content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
## this is a test script
a <- rnorm(100)
plot(a,xlab=c(-2,2))
| /svm.R | no_license | Maitree1986/svmR | R | false | false | 62 | r | ## this is a test script
a <- rnorm(100)
plot(a,xlab=c(-2,2))
|
library(lmomco)
### Name: plotlmrdia
### Title: Plot L-moment Ratio Diagram
### Aliases: plotlmrdia
### Keywords: L-moment ratio diagram Graphics
### ** Examples
plotlmrdia(lmrdia()) # simplest of all uses
## Not run:
##D # A more complex example follows.
##D # For a given mean, L-scale, L-skew, and L-kurtosis, let us use a sample size
##D # of 30 and using 500 simulations, set the L-moments in lmr and fit the Kappa.
##D T3 <- 0.34; T4 <- 0.21; n <- 30; nsim <- 500
##D lmr <- vec2lmom(c(10000,7500,T3,T4)); kap <- parkap(lmr)
##D
##D # Next, create vectors for storing simulated L-skew (t3) and L-kurtosis (t4)
##D t3 <- t4 <- vector(mode = "numeric")
##D
##D # Next, perform nsim simulations by randomly drawing from the Kappa distribution
##D # and compute the L-moments in sim.lmr and store the t3 and t4 of each sample.
##D for(i in 1:nsim) {
##D sim.lmr <- lmoms(rlmomco(n,kap))
##D t3[i] <- sim.lmr$ratios[3]; t4[i] <- sim.lmr$ratios[4]
##D }
##D
##D # Next, plot the diagram with a legend at a specified location, and "zoom"
##D # into the diagram by manually setting the axis limits.
##D plotlmrdia(lmrdia(), autolegend=TRUE, xleg=0.1, yleg=.41,
##D xlim=c(-.1,.5), ylim=c(-.1,.4), nopoints=TRUE, empty=TRUE)
##D
##D # Follow up by plotting the {t3,t4} values and the mean of these.
##D points(t3,t4)
##D points(mean(t3),mean(t4),pch=16,cex=3)
##D
##D # Now plot the trajectories of the distributions.
##D plotlmrdia(lmrdia(), add=TRUE)
##D
##D # Finally, plot crossing dashed lines at true values of L-skew and L-kurtosis.
##D lines(c(T3,T3),c(-1,1),col=8, lty=2)
##D lines(c(-1,1),c(T4,T4),col=8, lty=2) #
## End(Not run)
| /data/genthat_extracted_code/lmomco/examples/plotlmrdia.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,664 | r | library(lmomco)
### Name: plotlmrdia
### Title: Plot L-moment Ratio Diagram
### Aliases: plotlmrdia
### Keywords: L-moment ratio diagram Graphics
### ** Examples
plotlmrdia(lmrdia()) # simplest of all uses
## Not run:
##D # A more complex example follows.
##D # For a given mean, L-scale, L-skew, and L-kurtosis, let us use a sample size
##D # of 30 and using 500 simulations, set the L-moments in lmr and fit the Kappa.
##D T3 <- 0.34; T4 <- 0.21; n <- 30; nsim <- 500
##D lmr <- vec2lmom(c(10000,7500,T3,T4)); kap <- parkap(lmr)
##D
##D # Next, create vectors for storing simulated L-skew (t3) and L-kurtosis (t4)
##D t3 <- t4 <- vector(mode = "numeric")
##D
##D # Next, perform nsim simulations by randomly drawing from the Kappa distribution
##D # and compute the L-moments in sim.lmr and store the t3 and t4 of each sample.
##D for(i in 1:nsim) {
##D sim.lmr <- lmoms(rlmomco(n,kap))
##D t3[i] <- sim.lmr$ratios[3]; t4[i] <- sim.lmr$ratios[4]
##D }
##D
##D # Next, plot the diagram with a legend at a specified location, and "zoom"
##D # into the diagram by manually setting the axis limits.
##D plotlmrdia(lmrdia(), autolegend=TRUE, xleg=0.1, yleg=.41,
##D xlim=c(-.1,.5), ylim=c(-.1,.4), nopoints=TRUE, empty=TRUE)
##D
##D # Follow up by plotting the {t3,t4} values and the mean of these.
##D points(t3,t4)
##D points(mean(t3),mean(t4),pch=16,cex=3)
##D
##D # Now plot the trajectories of the distributions.
##D plotlmrdia(lmrdia(), add=TRUE)
##D
##D # Finally, plot crossing dashed lines at true values of L-skew and L-kurtosis.
##D lines(c(T3,T3),c(-1,1),col=8, lty=2)
##D lines(c(-1,1),c(T4,T4),col=8, lty=2) #
## End(Not run)
|
#EX_4_16
#page 23
l<-8#define length of stick as 8
vary<-function(l)
{
return((l^2)/12)
}#function to calculate var(y)
f<-function(y)
{
return((y^2)/(12*8))
}
varxy<-1/4*vary(l)
varxy#to print var(x|y)
integral<-integrate(f,0,l)#to calculate E(var(x|y))
Evarxy<-integral$val
Evarxy
varx<-sum(Evarxy,varxy)
varx#to final calcualtion of var(x)
| /Introduction_To_Probability_by_Dimitri_P._Bertsekas_And_John_N._Tsitsiklis/CH4/EX4.16/EX_4_16.R | permissive | FOSSEE/R_TBC_Uploads | R | false | false | 364 | r | #EX_4_16
#page 23
l<-8#define length of stick as 8
vary<-function(l)
{
return((l^2)/12)
}#function to calculate var(y)
f<-function(y)
{
return((y^2)/(12*8))
}
varxy<-1/4*vary(l)
varxy#to print var(x|y)
integral<-integrate(f,0,l)#to calculate E(var(x|y))
Evarxy<-integral$val
Evarxy
varx<-sum(Evarxy,varxy)
varx#to final calcualtion of var(x)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/individuals_defensive_actual_floor_stats.R
\name{individuals_defensive_actual_floor_stats}
\alias{individuals_defensive_actual_floor_stats}
\title{Individual's defensive actual statistics}
\usage{
individuals_defensive_actual_floor_stats(df1, df2, df3)
}
\arguments{
\item{df1}{Should be a Data Frame that represents the individual defensive statistics of the players. The parameter has to be in the format provided by the data_adjustment() function.}
\item{df2}{Should be a Data Frame that represents the team's statistics. The parameter has to be in the format provided by the team_stats() function.}
\item{df3}{Should be a Data Frame that represents the rival's statistics. The parameter has to be in the format provided by the team_stats() function.}
}
\value{
Data frame with the following individual defensive actual statistics
\itemize{
\item Defensive Stops (DStops)
\item Defensive Scores Possesions (DscPoss)
\item Defensive Possesions (DPoss)
\item Stops percentage (STOPS\%)
\item (TMDPossS\%)
\item Defensive Rating (DRtg)
}
}
\description{
The function allows the calculation of individual defensive actual statistics on court
}
\examples{
df1 <- data.frame("Name" = c("Witherspoon ","Team"), "MP" = c(14,200),
"DREB" = c(1,0), "FM" = c(4,0), "BLK" = c(0,0),"TOTAL FM" = c(4,0),
"FTO" = c(0,0),"STL" = c(1,1), "TOTAL FTO " = c(1,0), "FFTA" = c(0,0),
"DFGM" = c(1,0), "DFTM" = c(0,0))
df2 <- data.frame("G" = c(71), "MP" = c(17090), "FG" = c(3006),
"FGA" = c(6269),"Percentage FG" = c(0.48),"3P" = c(782),"3PA" = c(2242),
"Percentage 3P" = c(0.349),"2P" = c(2224), "2PA" = c(4027),
"Percentage 2P" = c(0.552),"FT" = c(1260),"FTA FG" = c(1728),
"Percentage FT" = c(0.729), "ORB" = c(757), "DRB" = c(2490),
"TRB" = c(3247), "AST" = c(1803), "STL" = c(612),"BLK" = c(468),
"TOV" = c(1077),"PF" = c(1471), "PTS" = c(8054), "+/-" = c(0))
df3 <- data.frame("G" = c(71), "MP" = c(17090), "FG" = c(2773),
"FGA" = c(6187),"Percentage FG" = c(0.448), "3P" = c(827),
"3PA" = c(2373), "Percentage 3P" = c(0.349), "2P" = c(1946),
"2PA" = c(3814), "Percentage 2P" = c(0.510), "FT" = c(1270),
"FTA FG" = c(1626), "Percentage FT" = c(0.781), "ORB" = c(668),
"DRB" = c(2333),"TRB" = c(3001), "AST" = c(1662),"STL" = c(585),
"BLK" = c(263), "TOV" = c(1130), "PF" = c(1544),
"PTS" = c(7643), "+/-" = c(0))
individuals_defensive_actual_floor_stats(df1,df2,df3)
}
\author{
Fco Javier Cantero \email{fco.cantero@edu.uah.es}
Juan José Cuadrado \email{jjcg@uah.es}
Universidad de Alcalá de Henares
}
| /man/individuals_defensive_actual_floor_stats.Rd | no_license | cran/AdvancedBasketballStats | R | false | true | 2,700 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/individuals_defensive_actual_floor_stats.R
\name{individuals_defensive_actual_floor_stats}
\alias{individuals_defensive_actual_floor_stats}
\title{Individual's defensive actual statistics}
\usage{
individuals_defensive_actual_floor_stats(df1, df2, df3)
}
\arguments{
\item{df1}{Should be a Data Frame that represents the individual defensive statistics of the players. The parameter has to be in the format provided by the data_adjustment() function.}
\item{df2}{Should be a Data Frame that represents the team's statistics. The parameter has to be in the format provided by the team_stats() function.}
\item{df3}{Should be a Data Frame that represents the rival's statistics. The parameter has to be in the format provided by the team_stats() function.}
}
\value{
Data frame with the following individual defensive actual statistics
\itemize{
\item Defensive Stops (DStops)
\item Defensive Scores Possesions (DscPoss)
\item Defensive Possesions (DPoss)
\item Stops percentage (STOPS\%)
\item (TMDPossS\%)
\item Defensive Rating (DRtg)
}
}
\description{
The function allows the calculation of individual defensive actual statistics on court
}
\examples{
df1 <- data.frame("Name" = c("Witherspoon ","Team"), "MP" = c(14,200),
"DREB" = c(1,0), "FM" = c(4,0), "BLK" = c(0,0),"TOTAL FM" = c(4,0),
"FTO" = c(0,0),"STL" = c(1,1), "TOTAL FTO " = c(1,0), "FFTA" = c(0,0),
"DFGM" = c(1,0), "DFTM" = c(0,0))
df2 <- data.frame("G" = c(71), "MP" = c(17090), "FG" = c(3006),
"FGA" = c(6269),"Percentage FG" = c(0.48),"3P" = c(782),"3PA" = c(2242),
"Percentage 3P" = c(0.349),"2P" = c(2224), "2PA" = c(4027),
"Percentage 2P" = c(0.552),"FT" = c(1260),"FTA FG" = c(1728),
"Percentage FT" = c(0.729), "ORB" = c(757), "DRB" = c(2490),
"TRB" = c(3247), "AST" = c(1803), "STL" = c(612),"BLK" = c(468),
"TOV" = c(1077),"PF" = c(1471), "PTS" = c(8054), "+/-" = c(0))
df3 <- data.frame("G" = c(71), "MP" = c(17090), "FG" = c(2773),
"FGA" = c(6187),"Percentage FG" = c(0.448), "3P" = c(827),
"3PA" = c(2373), "Percentage 3P" = c(0.349), "2P" = c(1946),
"2PA" = c(3814), "Percentage 2P" = c(0.510), "FT" = c(1270),
"FTA FG" = c(1626), "Percentage FT" = c(0.781), "ORB" = c(668),
"DRB" = c(2333),"TRB" = c(3001), "AST" = c(1662),"STL" = c(585),
"BLK" = c(263), "TOV" = c(1130), "PF" = c(1544),
"PTS" = c(7643), "+/-" = c(0))
individuals_defensive_actual_floor_stats(df1,df2,df3)
}
\author{
Fco Javier Cantero \email{fco.cantero@edu.uah.es}
Juan José Cuadrado \email{jjcg@uah.es}
Universidad de Alcalá de Henares
}
|
# read barcodes -----------------------------------------------------------
read_barcodes <- function(barcodes_path){
barcodes_sheets <- excel_sheets(barcodes_path)
# Replace with INPUT from shiny app or assume it's the first sheet
barcodes_sheets_selected <- barcodes_sheets[1]
barcodes <-
read_excel(barcodes_path,
sheet = barcodes_sheets_selected,
skip = 3,
col_names = FALSE)
colnames(barcodes) <- c("row", 1:12)
barcodes <-
barcodes %>%
mutate_all(as.character) %>%
pivot_longer(-row, names_to = "column", values_to = "Sample") %>%
arrange(as.numeric(column)) %>%
mutate(sample_nr = 1,
sample_nr = cumsum(sample_nr),
Location = paste0(sample_nr, "(1,", row, column, ")")) %>%
filter(!is.na(Sample)) %>%
select(Location, Sample)
return(barcodes)
}
# barcodes1 <- read_barcodes("data/200706_BARCODES_Ciao_4.xlsx")
# barcodes2 <- read_barcodes("data/200721_BARCODES_Ciao_33.xlsx")
# read isotypes -----------------------------------------------------------
read_isotypes <- function(barcodes_path){
# prepare a full plate to join later with isotypes
sample_nr <- 1:96
row <- c("A", "B", "C", "D",
"E", "F", "G", "H")
column <- 1:12
full_plate <-
crossing(column, row) %>%
bind_cols(sample_nr = sample_nr) %>%
mutate(Location = paste0(sample_nr, "(1,", row, column, ")")) %>%
select(Location, column)
# read isotypes from first row of excel document
# 1st row on 1st sheet is read, if information is not there, it will fail
barcodes_sheets <- excel_sheets(barcodes_path)
# Replace with INPUT from shiny app or assume it's the first sheet
barcodes_sheets_selected <- barcodes_sheets[1]
barcodes_structure <-
read_excel(barcodes_path,
sheet = barcodes_sheets_selected,
col_names = FALSE) %>%
slice(c(1,3))
isotype_list <- as.character(unlist(barcodes_structure[1,]))
column_list <- as.numeric(barcodes_structure[2,])
isotypes <-
tibble(isotype = isotype_list, column = column_list) %>%
filter(!is.na(column)) %>%
fill(isotype) %>%
left_join(full_plate, by = "column") %>%
select(Location, isotype)
return(isotypes)
}
# isotypes1 <- read_isotypes("data/200706_BARCODES_Ciao_4.xlsx")
# isotypes2 <- read_isotypes("data/200721_BARCODES_Ciao_33.xlsx")
# join barcodes and isotypes ----------------------------------------------
join_barcodes_isotypes <- function(barcodes, isotypes){
return(left_join(barcodes, isotypes, by = "Location"))
}
# barcodes_isotypes1 <- join_barcodes_isotypes(barcodes1, isotypes1)
# barcodes_isotypes2 <- join_barcodes_isotypes(barcodes2, isotypes2)
# how many rows to skip ---------------------------------------------------
skip_rows <- function(filepath, match){
rows_until_data <-
read_csv(filepath, col_names = FALSE, skip_empty_rows = FALSE) %>%
pull(1) %>%
match("DataType:", .) - 6 # empty rows are somehow read twice that's why 6 are subtracted
count_row <-
read_csv(filepath, skip = rows_until_data, skip_empty_rows = FALSE) %>%
pull(2) %>%
match(match, .)
return(count_row+rows_until_data-1)
}
# get count data ----------------------------------------------------------
get_count <- function(filepath, barcodes_isotypes, isotype_given=NA){
barcodes_isotypes <-
barcodes_isotypes %>%
mutate(isotype = replace_na(isotype, isotype_given))
n_samples <- length(barcodes_isotypes$Sample)
count <-
read_csv(filepath, skip = skip_rows(filepath, "Count"), skip_empty_rows = FALSE) %>%
janitor::clean_names(case = "title", abbreviations = c("NP", "RBD", "HKU1")) %>%
head(n_samples) %>%
select(-Sample, -`Total Events`) %>%
mutate(NP = as.numeric(NP),
S2 = as.numeric(S2),
S1 = as.numeric(S1),
Empty = as.numeric(Empty)) %>%
mutate_at(vars(one_of("RBD")), as.numeric) %>%
mutate_at(vars(one_of("HKU1")), as.numeric) %>%
pivot_longer(-Location, names_to = "target", values_to = "count") %>%
left_join(barcodes_isotypes, by = "Location") %>%
mutate(isotype_target = paste0(isotype, "_", target, "_count")) %>%
arrange(match(isotype, c("IgG", "IgA", "IgM"))) %>%
select(-Location, -target, -isotype) %>%
pivot_wider(names_from = isotype_target, values_from = count)
return(count)
}
# igg_count1 <- get_count("data/200706_Ciao_4_IgG_20200707_104333.csv", barcodes_isotypes1, "IgG")
# iga_count1 <- get_count("data/200706_Ciao_4_IgA_20200707_141245.csv", barcodes_isotypes1, "IgA")
# igm_count1 <- get_count("data/200706_Ciao_4_IgM_20200707_164242.csv", barcodes_isotypes1, "IgM")
#
# count1 <-
# igg_count1 %>%
# left_join(iga_count1, by = c("Sample")) %>%
# left_join(igm_count1, by = c("Sample"))
#
# count2 <- get_count("data/200721_Ciao_33_IgA_G_M_20200721_144831.csv", barcodes_isotypes2)
# get net mfi data --------------------------------------------------------
get_net_mfi <- function(filepath, barcodes_isotypes, isotype_given=NA){
barcodes_isotypes <-
barcodes_isotypes %>%
mutate(isotype = replace_na(isotype, isotype_given))
n_samples <- length(barcodes_isotypes$Sample)
net_mfi <-
read_csv(filepath, skip = skip_rows(filepath, "Net MFI")+1, skip_empty_rows = FALSE) %>%
janitor::clean_names(case = "title", abbreviations = c("NP", "RBD", "HKU1")) %>%
head(n_samples) %>%
select(-Sample, -`Total Events`) %>%
mutate(NP = as.numeric(NP),
S2 = as.numeric(S2),
S1 = as.numeric(S1),
Empty = as.numeric(Empty)) %>%
mutate_at(vars(one_of("RBD")), as.numeric) %>%
mutate_at(vars(one_of("HKU1")), as.numeric) %>%
pivot_longer(-Location, names_to = "target", values_to = "count") %>%
left_join(barcodes_isotypes, by = "Location") %>%
mutate(isotype_target = paste0(isotype, "_", target)) %>%
select(-Location, -target, -isotype) %>%
pivot_wider(names_from = isotype_target, values_from = count)
net_mfi_empty <-
net_mfi %>%
select(Sample, ends_with("_Empty")) %>%
pivot_longer(-Sample, names_to = "target", values_to = "empty") %>%
separate(target, c("isotype", NA))
# soc = signal over cutoff
# cutoff fold over empty beads
# defined by: mean + some factor * standard deviation
cutoff <-
tribble(
~target, ~cutoff,
"IgG_NP", 7.7, # mean + 6*sd
"IgG_RBD", 3.5, # mean + 4*sd
"IgG_S1", 2.3, # mean + 3*sd
"IgG_S2", 62.8, # mean + 6*sd
"IgA_NP", 27.7, # mean + 6*sd
"IgA_RBD", 13.9, # mean + 6*sd
"IgA_S1", 6.7, # mean + 6*sd
"IgA_S2", 35.8, # mean + 6*sd
"IgM_NP", 49.2, # mean + 4*sd
"IgM_RBD", 66.8, # mean + 4*sd
"IgM_S1", 22.9, # mean + 4*sd
"IgM_S2", 25.2, # mean + 4*sd
)
# calculate foe (fold over empty beads)
net_mfi_soc <-
net_mfi %>%
pivot_longer(-Sample, names_to = "target", values_to = "net_mfi") %>%
separate(target, c("isotype", "target")) %>%
mutate(target = paste(isotype, target, sep = "_")) %>%
left_join(net_mfi_empty, by = c("Sample", "isotype")) %>%
left_join(cutoff, by = c("target")) %>%
mutate(net_mfi_soc = net_mfi/(cutoff*empty)) %>%
filter(!is.na(net_mfi_soc)) %>%
select(Sample, target, net_mfi_soc) %>%
pivot_wider(names_from = target, values_from = net_mfi_soc)
net_mfi_full <-
net_mfi %>%
left_join(net_mfi_soc, by = c("Sample"), suffix = c("_net_mfi", "_net_mfi_soc")) %>%
select(Sample, starts_with("IgG"), starts_with("IgA"), starts_with("IgM"))
return(net_mfi_full)
}
# igg_net_mfi1 <- get_net_mfi("data/200706_Ciao_4_IgG_20200707_104333.csv", barcodes_isotypes1, "IgG")
# iga_net_mfi1 <- get_net_mfi("data/200706_Ciao_4_IgA_20200707_141245.csv", barcodes_isotypes1, "IgA")
# igm_net_mfi1 <- get_net_mfi("data/200706_Ciao_4_IgM_20200707_164242.csv", barcodes_isotypes1, "IgM")
#
# net_mfi1 <-
# igg_net_mfi1 %>%
# left_join(iga_net_mfi1, by = c("Sample")) %>%
# left_join(igm_net_mfi1, by = c("Sample"))
#
# net_mfi2 <- get_net_mfi("data/200721_Ciao_33_IgA_G_M_20200721_144831.csv", barcodes_isotypes2)
# set flag ----------------------------------------------------------------
set_flag <- function(net_mfi_soc, min_count, above_cutoff_IgG, above_cutoff_IgA, above_cutoff_IgM){
if ((sum(str_detect(names(net_mfi_soc), "RBD")) > 0) & sum(str_detect(names(net_mfi_soc), "HKU")) > 0){
# if RBD and HKU was measured
net_mfi_soc_flagged <-
net_mfi_soc %>%
mutate(IgG_NP_count_flag = if_else(as.numeric(IgG_NP_count) < min_count, "IgG NP", NULL),
IgG_S2_count_flag = if_else(as.numeric(IgG_S2_count) < min_count, "IgG S2", NULL),
IgG_S1_count_flag = if_else(as.numeric(IgG_S1_count) < min_count, "IgG S1", NULL),
IgG_RBD_count_flag = if_else(as.numeric(IgG_RBD_count) < min_count, "IgG RBD", NULL),
IgG_HKU1_count_flag = if_else(as.numeric(IgG_HKU1_count) < min_count, "IgG HKU1", NULL),
IgA_NP_count_flag = if_else(as.numeric(IgA_NP_count) < min_count, "IgA NP", NULL),
IgA_S2_count_flag = if_else(as.numeric(IgA_S2_count) < min_count, "IgA S2", NULL),
IgA_S1_count_flag = if_else(as.numeric(IgA_S1_count) < min_count, "IgA S1", NULL),
IgA_RBD_count_flag = if_else(as.numeric(IgA_RBD_count) < min_count, "IgA RBD", NULL),
IgA_HKU1_count_flag = if_else(as.numeric(IgA_HKU1_count) < min_count, "IgA HKU1", NULL),
IgM_NP_count_flag = if_else(as.numeric(IgM_NP_count) < min_count, "IgM NP", NULL),
IgM_S2_count_flag = if_else(as.numeric(IgM_S2_count) < min_count, "IgM S2", NULL),
IgM_S1_count_flag = if_else(as.numeric(IgM_S1_count) < min_count, "IgM S1", NULL),
IgM_RBD_count_flag = if_else(as.numeric(IgM_RBD_count) < min_count, "IgM RBD", NULL),
IgM_HKU1_count_flag = if_else(as.numeric(IgM_HKU1_count) < min_count, "IgM HKU1", NULL)) %>%
unite(Fehler_count, ends_with("count_flag"), na.rm=TRUE, sep = ", ") %>%
mutate(IgG_Empty_flag = if_else(IgG_Empty > above_cutoff_IgG, "IgG", NULL),
IgA_Empty_flag = if_else(IgA_Empty > above_cutoff_IgA, "IgA", NULL),
IgM_Empty_flag = if_else(IgM_Empty > above_cutoff_IgM, "IgM", NULL)) %>%
unite(Fehler_empty, ends_with("empty_flag"), na.rm=TRUE, sep = ", ")
} else if((sum(str_detect(names(net_mfi_soc), "RBD")) > 0) & sum(str_detect(names(net_mfi_soc), "HKU")) == 0) {
# if RBD was measured but not HKU
net_mfi_soc_flagged <-
net_mfi_soc %>%
mutate(IgG_NP_count_flag = if_else(as.numeric(IgG_NP_count) < min_count, "IgG NP", NULL),
IgG_S2_count_flag = if_else(as.numeric(IgG_S2_count) < min_count, "IgG S2", NULL),
IgG_S1_count_flag = if_else(as.numeric(IgG_S1_count) < min_count, "IgG S1", NULL),
IgG_RBD_count_flag = if_else(as.numeric(IgG_RBD_count) < min_count, "IgG RBD", NULL),
IgA_NP_count_flag = if_else(as.numeric(IgA_NP_count) < min_count, "IgA NP", NULL),
IgA_S2_count_flag = if_else(as.numeric(IgA_S2_count) < min_count, "IgA S2", NULL),
IgA_S1_count_flag = if_else(as.numeric(IgA_S1_count) < min_count, "IgA S1", NULL),
IgA_RBD_count_flag = if_else(as.numeric(IgA_RBD_count) < min_count, "IgA RBD", NULL),
IgM_NP_count_flag = if_else(as.numeric(IgM_NP_count) < min_count, "IgM NP", NULL),
IgM_S2_count_flag = if_else(as.numeric(IgM_S2_count) < min_count, "IgM S2", NULL),
IgM_S1_count_flag = if_else(as.numeric(IgM_S1_count) < min_count, "IgM S1", NULL),
IgM_RBD_count_flag = if_else(as.numeric(IgM_RBD_count) < min_count, "IgM RBD", NULL)) %>%
unite(Fehler_count, ends_with("count_flag"), na.rm=TRUE, sep = ", ") %>%
mutate(IgG_Empty_flag = if_else(IgG_Empty > above_cutoff_IgG, "IgG", NULL),
IgA_Empty_flag = if_else(IgA_Empty > above_cutoff_IgA, "IgA", NULL),
IgM_Empty_flag = if_else(IgM_Empty > above_cutoff_IgM, "IgM", NULL)) %>%
unite(Fehler_empty, ends_with("empty_flag"), na.rm=TRUE, sep = ", ")
}else{
# if neither RBD or HKU was measured
net_mfi_soc_flagged <-
net_mfi_soc %>%
mutate(IgG_NP_count_flag = if_else(as.numeric(IgG_NP_count) < min_count, "IgG NP", NULL),
IgG_S2_count_flag = if_else(as.numeric(IgG_S2_count) < min_count, "IgG S2", NULL),
IgG_S1_count_flag = if_else(as.numeric(IgG_S1_count) < min_count, "IgG S1", NULL),
IgA_NP_count_flag = if_else(as.numeric(IgA_NP_count) < min_count, "IgA NP", NULL),
IgA_S2_count_flag = if_else(as.numeric(IgA_S2_count) < min_count, "IgA S2", NULL),
IgA_S1_count_flag = if_else(as.numeric(IgA_S1_count) < min_count, "IgA S1", NULL),
IgM_NP_count_flag = if_else(as.numeric(IgM_NP_count) < min_count, "IgM NP", NULL),
IgM_S2_count_flag = if_else(as.numeric(IgM_S2_count) < min_count, "IgM S2", NULL),
IgM_S1_count_flag = if_else(as.numeric(IgM_S1_count) < min_count, "IgM S1", NULL)) %>%
unite(Fehler_count, ends_with("count_flag"), na.rm=TRUE, sep = ", ") %>%
mutate(IgG_Empty_flag = if_else(IgG_Empty > above_cutoff_IgG, "IgG", NULL),
IgA_Empty_flag = if_else(IgA_Empty > above_cutoff_IgA, "IgA", NULL),
IgM_Empty_flag = if_else(IgM_Empty > above_cutoff_IgM, "IgM", NULL)) %>%
unite(Fehler_empty, ends_with("empty_flag"), na.rm=TRUE, sep = ", ")
}
return(net_mfi_soc_flagged)
}
# net_mfi1 <- left_join(net_mfi1, count1, by = c("Sample"))
# net_mfi1_flagged <- set_flag(net_mfi1, 20, 40.05, 55.26, 539.74)
#
# net_mfi2 <- left_join(net_mfi2, count2, by = c("Sample"))
# net_mfi2_flagged <- set_flag(net_mfi2, 20, 40.05, 55.26, 539.74)
# test result -------------------------------------------------------------
test_result <- function(net_mfi_soc, IgG_NP_gw, IgG_RBD_gw, IgG_S1_gw){
# the result of each of the 12 parameter can either be
# positive (1), negative (0) or intermediate (0.5)
net_mfi_soc_result <-
net_mfi_soc %>%
mutate(IgG_Resultat_S1 = if_else(IgG_S1_net_mfi_soc > 1, 1, if_else(IgG_S1_net_mfi_soc > IgG_S1_gw, 0.5, 0)),
IgA_Resultat_S1 = if_else(IgA_S1_net_mfi_soc > 1, 1, 0),
IgM_Resultat_S1 = if_else(IgM_S1_net_mfi_soc > 1, 1, 0)) %>%
mutate(IgG_Resultat_S2 = if_else(IgG_S2_net_mfi_soc > 1, 1, 0),
IgA_Resultat_S2 = if_else(IgA_S2_net_mfi_soc > 1, 1, 0),
IgM_Resultat_S2 = if_else(IgM_S2_net_mfi_soc > 1, 1, 0)) %>%
mutate(IgG_Resultat_NP = if_else(IgG_NP_net_mfi_soc > 1, 1, if_else(IgG_NP_net_mfi_soc > IgG_NP_gw, 0.5, 0)),
IgA_Resultat_NP = if_else(IgA_NP_net_mfi_soc > 1, 1, 0),
IgM_Resultat_NP = if_else(IgM_NP_net_mfi_soc > 1, 1, 0)) %>%
mutate(IgG_Resultat_RBD = if_else(IgG_RBD_net_mfi_soc > 1, 1, if_else(IgG_RBD_net_mfi_soc > IgG_RBD_gw, 0.5, 0)),
IgA_Resultat_RBD = if_else(IgA_RBD_net_mfi_soc > 1, 1, 0),
IgM_Resultat_RBD = if_else(IgM_RBD_net_mfi_soc > 1, 1, 0)) %>%
mutate(Resultat_sum = IgG_Resultat_S1+IgA_Resultat_S1+IgM_Resultat_S1+
IgG_Resultat_S2+IgA_Resultat_S2+IgM_Resultat_S2+
IgG_Resultat_NP+IgA_Resultat_NP+IgM_Resultat_NP+
IgG_Resultat_RBD+IgA_Resultat_RBD+IgM_Resultat_RBD,
Resultat_sum_IgG = IgG_Resultat_S1+IgG_Resultat_S2+IgG_Resultat_NP+IgG_Resultat_RBD,
Resultat_n_pos = (IgG_Resultat_S1==1)+(IgA_Resultat_S1==1)+(IgM_Resultat_S1==1)+
(IgG_Resultat_S2==1)+(IgA_Resultat_S2==1)+(IgM_Resultat_S2==1)+
(IgG_Resultat_NP==1)+(IgA_Resultat_NP==1)+(IgM_Resultat_NP==1)+
(IgG_Resultat_RBD==1)+(IgA_Resultat_RBD==1)+(IgM_Resultat_RBD==1),
Resultat_n_pos_IgA_IgM = (IgA_Resultat_S1==1)+(IgM_Resultat_S1==1)+
(IgA_Resultat_S2==1)+(IgM_Resultat_S2==1)+
(IgA_Resultat_NP==1)+(IgM_Resultat_NP==1)+
(IgA_Resultat_RBD==1)+(IgM_Resultat_RBD==1)) %>%
mutate(Serokonversion = case_when(
(Resultat_sum >= 2) & (Resultat_sum_IgG >= 0.5) ~ "Positiv, fortgeschritten",
(Resultat_sum >= 2) & (Resultat_sum_IgG == 0) ~ "Positiv, partiell",
(IgG_Resultat_S1 == 1 | IgG_Resultat_NP == 1 | IgG_Resultat_RBD == 1) | (Resultat_sum_IgG >= 0.5 & Resultat_n_pos_IgA_IgM == 1) ~ "Schwach reaktiv",
Resultat_n_pos == 1 ~ "Indeterminat",
TRUE ~ "Negativ"
))
return(net_mfi_soc_result)
}
# net_mfi1_flagged_result <- test_result(net_mfi1_flagged, 0.884, 0.714, 0.895)
# create gt table ---------------------------------------------------------
create_gt <- function(table){
if (sum(str_detect(names(table), "RBD")) > 0){
net_mfi_soc_clean_gt <-
table %>%
gt(rowname_col = "Sample") %>%
tab_spanner_delim(delim = "_") %>%
tab_style(
style = cell_fill(color = "#FFE74C"),
locations = cells_body(
columns = vars(IgG_NP),
rows = IgG_NP >= 0.884)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgG_NP),
rows = IgG_NP > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgG_S2),
rows = IgG_S2 > 1)
) %>%
tab_style(
style = cell_fill(color = "#FFE74C"),
locations = cells_body(
columns = vars(IgG_S1),
rows = IgG_S1 >= 0.895)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgG_S1),
rows = IgG_S1 > 1)
) %>%
tab_style(
style = cell_fill(color = "#FFE74C"),
locations = cells_body(
columns = vars(IgG_RBD),
rows = IgG_RBD >= 0.714)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgG_RBD),
rows = IgG_RBD > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgA_NP),
rows = IgA_NP > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgA_S2),
rows = IgA_S2 > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgA_S1),
rows = IgA_S1 > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgA_RBD),
rows = IgA_RBD > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgM_NP),
rows = IgM_NP > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgM_S2),
rows = IgM_S2 > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgM_S1),
rows = IgM_S1 > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgM_RBD),
rows = IgM_RBD > 1)
) %>%
fmt_number(
columns = c(ends_with("NP"),
ends_with("S2"),
ends_with("S1"),
ends_with("RBD")),
decimals = 1
)
} else{
net_mfi_soc_clean_gt <-
table %>%
gt(rowname_col = "Sample") %>%
tab_spanner_delim(delim = "_") %>%
tab_style(
style = cell_fill(color = "#FFE74C"),
locations = cells_body(
columns = vars(IgG_NP),
rows = IgG_NP >= 0.884)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgG_NP),
rows = IgG_NP > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgG_S2),
rows = IgG_S2 > 1)
) %>%
tab_style(
style = cell_fill(color = "#FFE74C"),
locations = cells_body(
columns = vars(IgG_S1),
rows = IgG_S1 >= 0.895)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgG_S1),
rows = IgG_S1 > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgA_NP),
rows = IgA_NP > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgA_S2),
rows = IgA_S2 > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgA_S1),
rows = IgA_S1 > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgM_NP),
rows = IgM_NP > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgM_S2),
rows = IgM_S2 > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgM_S1),
rows = IgM_S1 > 1)
) %>%
fmt_number(
columns = c(ends_with("NP"),
ends_with("S2"),
ends_with("S1")),
decimals = 1
)
}
return(net_mfi_soc_clean_gt)
}
| /DIA/SARS-CoV-2_serology_v2/app-functions.R | permissive | medvir/shiny-server | R | false | false | 22,312 | r |
# read barcodes -----------------------------------------------------------
read_barcodes <- function(barcodes_path){
barcodes_sheets <- excel_sheets(barcodes_path)
# Replace with INPUT from shiny app or assume it's the first sheet
barcodes_sheets_selected <- barcodes_sheets[1]
barcodes <-
read_excel(barcodes_path,
sheet = barcodes_sheets_selected,
skip = 3,
col_names = FALSE)
colnames(barcodes) <- c("row", 1:12)
barcodes <-
barcodes %>%
mutate_all(as.character) %>%
pivot_longer(-row, names_to = "column", values_to = "Sample") %>%
arrange(as.numeric(column)) %>%
mutate(sample_nr = 1,
sample_nr = cumsum(sample_nr),
Location = paste0(sample_nr, "(1,", row, column, ")")) %>%
filter(!is.na(Sample)) %>%
select(Location, Sample)
return(barcodes)
}
# barcodes1 <- read_barcodes("data/200706_BARCODES_Ciao_4.xlsx")
# barcodes2 <- read_barcodes("data/200721_BARCODES_Ciao_33.xlsx")
# read isotypes -----------------------------------------------------------
read_isotypes <- function(barcodes_path){
# prepare a full plate to join later with isotypes
sample_nr <- 1:96
row <- c("A", "B", "C", "D",
"E", "F", "G", "H")
column <- 1:12
full_plate <-
crossing(column, row) %>%
bind_cols(sample_nr = sample_nr) %>%
mutate(Location = paste0(sample_nr, "(1,", row, column, ")")) %>%
select(Location, column)
# read isotypes from first row of excel document
# 1st row on 1st sheet is read, if information is not there, it will fail
barcodes_sheets <- excel_sheets(barcodes_path)
# Replace with INPUT from shiny app or assume it's the first sheet
barcodes_sheets_selected <- barcodes_sheets[1]
barcodes_structure <-
read_excel(barcodes_path,
sheet = barcodes_sheets_selected,
col_names = FALSE) %>%
slice(c(1,3))
isotype_list <- as.character(unlist(barcodes_structure[1,]))
column_list <- as.numeric(barcodes_structure[2,])
isotypes <-
tibble(isotype = isotype_list, column = column_list) %>%
filter(!is.na(column)) %>%
fill(isotype) %>%
left_join(full_plate, by = "column") %>%
select(Location, isotype)
return(isotypes)
}
# isotypes1 <- read_isotypes("data/200706_BARCODES_Ciao_4.xlsx")
# isotypes2 <- read_isotypes("data/200721_BARCODES_Ciao_33.xlsx")
# join barcodes and isotypes ----------------------------------------------
join_barcodes_isotypes <- function(barcodes, isotypes){
return(left_join(barcodes, isotypes, by = "Location"))
}
# barcodes_isotypes1 <- join_barcodes_isotypes(barcodes1, isotypes1)
# barcodes_isotypes2 <- join_barcodes_isotypes(barcodes2, isotypes2)
# how many rows to skip ---------------------------------------------------
skip_rows <- function(filepath, match){
rows_until_data <-
read_csv(filepath, col_names = FALSE, skip_empty_rows = FALSE) %>%
pull(1) %>%
match("DataType:", .) - 6 # empty rows are somehow read twice that's why 6 are subtracted
count_row <-
read_csv(filepath, skip = rows_until_data, skip_empty_rows = FALSE) %>%
pull(2) %>%
match(match, .)
return(count_row+rows_until_data-1)
}
# get count data ----------------------------------------------------------
get_count <- function(filepath, barcodes_isotypes, isotype_given=NA){
barcodes_isotypes <-
barcodes_isotypes %>%
mutate(isotype = replace_na(isotype, isotype_given))
n_samples <- length(barcodes_isotypes$Sample)
count <-
read_csv(filepath, skip = skip_rows(filepath, "Count"), skip_empty_rows = FALSE) %>%
janitor::clean_names(case = "title", abbreviations = c("NP", "RBD", "HKU1")) %>%
head(n_samples) %>%
select(-Sample, -`Total Events`) %>%
mutate(NP = as.numeric(NP),
S2 = as.numeric(S2),
S1 = as.numeric(S1),
Empty = as.numeric(Empty)) %>%
mutate_at(vars(one_of("RBD")), as.numeric) %>%
mutate_at(vars(one_of("HKU1")), as.numeric) %>%
pivot_longer(-Location, names_to = "target", values_to = "count") %>%
left_join(barcodes_isotypes, by = "Location") %>%
mutate(isotype_target = paste0(isotype, "_", target, "_count")) %>%
arrange(match(isotype, c("IgG", "IgA", "IgM"))) %>%
select(-Location, -target, -isotype) %>%
pivot_wider(names_from = isotype_target, values_from = count)
return(count)
}
# igg_count1 <- get_count("data/200706_Ciao_4_IgG_20200707_104333.csv", barcodes_isotypes1, "IgG")
# iga_count1 <- get_count("data/200706_Ciao_4_IgA_20200707_141245.csv", barcodes_isotypes1, "IgA")
# igm_count1 <- get_count("data/200706_Ciao_4_IgM_20200707_164242.csv", barcodes_isotypes1, "IgM")
#
# count1 <-
# igg_count1 %>%
# left_join(iga_count1, by = c("Sample")) %>%
# left_join(igm_count1, by = c("Sample"))
#
# count2 <- get_count("data/200721_Ciao_33_IgA_G_M_20200721_144831.csv", barcodes_isotypes2)
# get net mfi data --------------------------------------------------------
get_net_mfi <- function(filepath, barcodes_isotypes, isotype_given=NA){
barcodes_isotypes <-
barcodes_isotypes %>%
mutate(isotype = replace_na(isotype, isotype_given))
n_samples <- length(barcodes_isotypes$Sample)
net_mfi <-
read_csv(filepath, skip = skip_rows(filepath, "Net MFI")+1, skip_empty_rows = FALSE) %>%
janitor::clean_names(case = "title", abbreviations = c("NP", "RBD", "HKU1")) %>%
head(n_samples) %>%
select(-Sample, -`Total Events`) %>%
mutate(NP = as.numeric(NP),
S2 = as.numeric(S2),
S1 = as.numeric(S1),
Empty = as.numeric(Empty)) %>%
mutate_at(vars(one_of("RBD")), as.numeric) %>%
mutate_at(vars(one_of("HKU1")), as.numeric) %>%
pivot_longer(-Location, names_to = "target", values_to = "count") %>%
left_join(barcodes_isotypes, by = "Location") %>%
mutate(isotype_target = paste0(isotype, "_", target)) %>%
select(-Location, -target, -isotype) %>%
pivot_wider(names_from = isotype_target, values_from = count)
net_mfi_empty <-
net_mfi %>%
select(Sample, ends_with("_Empty")) %>%
pivot_longer(-Sample, names_to = "target", values_to = "empty") %>%
separate(target, c("isotype", NA))
# soc = signal over cutoff
# cutoff fold over empty beads
# defined by: mean + some factor * standard deviation
cutoff <-
tribble(
~target, ~cutoff,
"IgG_NP", 7.7, # mean + 6*sd
"IgG_RBD", 3.5, # mean + 4*sd
"IgG_S1", 2.3, # mean + 3*sd
"IgG_S2", 62.8, # mean + 6*sd
"IgA_NP", 27.7, # mean + 6*sd
"IgA_RBD", 13.9, # mean + 6*sd
"IgA_S1", 6.7, # mean + 6*sd
"IgA_S2", 35.8, # mean + 6*sd
"IgM_NP", 49.2, # mean + 4*sd
"IgM_RBD", 66.8, # mean + 4*sd
"IgM_S1", 22.9, # mean + 4*sd
"IgM_S2", 25.2, # mean + 4*sd
)
# calculate foe (fold over empty beads)
net_mfi_soc <-
net_mfi %>%
pivot_longer(-Sample, names_to = "target", values_to = "net_mfi") %>%
separate(target, c("isotype", "target")) %>%
mutate(target = paste(isotype, target, sep = "_")) %>%
left_join(net_mfi_empty, by = c("Sample", "isotype")) %>%
left_join(cutoff, by = c("target")) %>%
mutate(net_mfi_soc = net_mfi/(cutoff*empty)) %>%
filter(!is.na(net_mfi_soc)) %>%
select(Sample, target, net_mfi_soc) %>%
pivot_wider(names_from = target, values_from = net_mfi_soc)
net_mfi_full <-
net_mfi %>%
left_join(net_mfi_soc, by = c("Sample"), suffix = c("_net_mfi", "_net_mfi_soc")) %>%
select(Sample, starts_with("IgG"), starts_with("IgA"), starts_with("IgM"))
return(net_mfi_full)
}
# igg_net_mfi1 <- get_net_mfi("data/200706_Ciao_4_IgG_20200707_104333.csv", barcodes_isotypes1, "IgG")
# iga_net_mfi1 <- get_net_mfi("data/200706_Ciao_4_IgA_20200707_141245.csv", barcodes_isotypes1, "IgA")
# igm_net_mfi1 <- get_net_mfi("data/200706_Ciao_4_IgM_20200707_164242.csv", barcodes_isotypes1, "IgM")
#
# net_mfi1 <-
# igg_net_mfi1 %>%
# left_join(iga_net_mfi1, by = c("Sample")) %>%
# left_join(igm_net_mfi1, by = c("Sample"))
#
# net_mfi2 <- get_net_mfi("data/200721_Ciao_33_IgA_G_M_20200721_144831.csv", barcodes_isotypes2)
# set flag ----------------------------------------------------------------
set_flag <- function(net_mfi_soc, min_count, above_cutoff_IgG, above_cutoff_IgA, above_cutoff_IgM){
if ((sum(str_detect(names(net_mfi_soc), "RBD")) > 0) & sum(str_detect(names(net_mfi_soc), "HKU")) > 0){
# if RBD and HKU was measured
net_mfi_soc_flagged <-
net_mfi_soc %>%
mutate(IgG_NP_count_flag = if_else(as.numeric(IgG_NP_count) < min_count, "IgG NP", NULL),
IgG_S2_count_flag = if_else(as.numeric(IgG_S2_count) < min_count, "IgG S2", NULL),
IgG_S1_count_flag = if_else(as.numeric(IgG_S1_count) < min_count, "IgG S1", NULL),
IgG_RBD_count_flag = if_else(as.numeric(IgG_RBD_count) < min_count, "IgG RBD", NULL),
IgG_HKU1_count_flag = if_else(as.numeric(IgG_HKU1_count) < min_count, "IgG HKU1", NULL),
IgA_NP_count_flag = if_else(as.numeric(IgA_NP_count) < min_count, "IgA NP", NULL),
IgA_S2_count_flag = if_else(as.numeric(IgA_S2_count) < min_count, "IgA S2", NULL),
IgA_S1_count_flag = if_else(as.numeric(IgA_S1_count) < min_count, "IgA S1", NULL),
IgA_RBD_count_flag = if_else(as.numeric(IgA_RBD_count) < min_count, "IgA RBD", NULL),
IgA_HKU1_count_flag = if_else(as.numeric(IgA_HKU1_count) < min_count, "IgA HKU1", NULL),
IgM_NP_count_flag = if_else(as.numeric(IgM_NP_count) < min_count, "IgM NP", NULL),
IgM_S2_count_flag = if_else(as.numeric(IgM_S2_count) < min_count, "IgM S2", NULL),
IgM_S1_count_flag = if_else(as.numeric(IgM_S1_count) < min_count, "IgM S1", NULL),
IgM_RBD_count_flag = if_else(as.numeric(IgM_RBD_count) < min_count, "IgM RBD", NULL),
IgM_HKU1_count_flag = if_else(as.numeric(IgM_HKU1_count) < min_count, "IgM HKU1", NULL)) %>%
unite(Fehler_count, ends_with("count_flag"), na.rm=TRUE, sep = ", ") %>%
mutate(IgG_Empty_flag = if_else(IgG_Empty > above_cutoff_IgG, "IgG", NULL),
IgA_Empty_flag = if_else(IgA_Empty > above_cutoff_IgA, "IgA", NULL),
IgM_Empty_flag = if_else(IgM_Empty > above_cutoff_IgM, "IgM", NULL)) %>%
unite(Fehler_empty, ends_with("empty_flag"), na.rm=TRUE, sep = ", ")
} else if((sum(str_detect(names(net_mfi_soc), "RBD")) > 0) & sum(str_detect(names(net_mfi_soc), "HKU")) == 0) {
# if RBD was measured but not HKU
net_mfi_soc_flagged <-
net_mfi_soc %>%
mutate(IgG_NP_count_flag = if_else(as.numeric(IgG_NP_count) < min_count, "IgG NP", NULL),
IgG_S2_count_flag = if_else(as.numeric(IgG_S2_count) < min_count, "IgG S2", NULL),
IgG_S1_count_flag = if_else(as.numeric(IgG_S1_count) < min_count, "IgG S1", NULL),
IgG_RBD_count_flag = if_else(as.numeric(IgG_RBD_count) < min_count, "IgG RBD", NULL),
IgA_NP_count_flag = if_else(as.numeric(IgA_NP_count) < min_count, "IgA NP", NULL),
IgA_S2_count_flag = if_else(as.numeric(IgA_S2_count) < min_count, "IgA S2", NULL),
IgA_S1_count_flag = if_else(as.numeric(IgA_S1_count) < min_count, "IgA S1", NULL),
IgA_RBD_count_flag = if_else(as.numeric(IgA_RBD_count) < min_count, "IgA RBD", NULL),
IgM_NP_count_flag = if_else(as.numeric(IgM_NP_count) < min_count, "IgM NP", NULL),
IgM_S2_count_flag = if_else(as.numeric(IgM_S2_count) < min_count, "IgM S2", NULL),
IgM_S1_count_flag = if_else(as.numeric(IgM_S1_count) < min_count, "IgM S1", NULL),
IgM_RBD_count_flag = if_else(as.numeric(IgM_RBD_count) < min_count, "IgM RBD", NULL)) %>%
unite(Fehler_count, ends_with("count_flag"), na.rm=TRUE, sep = ", ") %>%
mutate(IgG_Empty_flag = if_else(IgG_Empty > above_cutoff_IgG, "IgG", NULL),
IgA_Empty_flag = if_else(IgA_Empty > above_cutoff_IgA, "IgA", NULL),
IgM_Empty_flag = if_else(IgM_Empty > above_cutoff_IgM, "IgM", NULL)) %>%
unite(Fehler_empty, ends_with("empty_flag"), na.rm=TRUE, sep = ", ")
}else{
# if neither RBD or HKU was measured
net_mfi_soc_flagged <-
net_mfi_soc %>%
mutate(IgG_NP_count_flag = if_else(as.numeric(IgG_NP_count) < min_count, "IgG NP", NULL),
IgG_S2_count_flag = if_else(as.numeric(IgG_S2_count) < min_count, "IgG S2", NULL),
IgG_S1_count_flag = if_else(as.numeric(IgG_S1_count) < min_count, "IgG S1", NULL),
IgA_NP_count_flag = if_else(as.numeric(IgA_NP_count) < min_count, "IgA NP", NULL),
IgA_S2_count_flag = if_else(as.numeric(IgA_S2_count) < min_count, "IgA S2", NULL),
IgA_S1_count_flag = if_else(as.numeric(IgA_S1_count) < min_count, "IgA S1", NULL),
IgM_NP_count_flag = if_else(as.numeric(IgM_NP_count) < min_count, "IgM NP", NULL),
IgM_S2_count_flag = if_else(as.numeric(IgM_S2_count) < min_count, "IgM S2", NULL),
IgM_S1_count_flag = if_else(as.numeric(IgM_S1_count) < min_count, "IgM S1", NULL)) %>%
unite(Fehler_count, ends_with("count_flag"), na.rm=TRUE, sep = ", ") %>%
mutate(IgG_Empty_flag = if_else(IgG_Empty > above_cutoff_IgG, "IgG", NULL),
IgA_Empty_flag = if_else(IgA_Empty > above_cutoff_IgA, "IgA", NULL),
IgM_Empty_flag = if_else(IgM_Empty > above_cutoff_IgM, "IgM", NULL)) %>%
unite(Fehler_empty, ends_with("empty_flag"), na.rm=TRUE, sep = ", ")
}
return(net_mfi_soc_flagged)
}
# net_mfi1 <- left_join(net_mfi1, count1, by = c("Sample"))
# net_mfi1_flagged <- set_flag(net_mfi1, 20, 40.05, 55.26, 539.74)
#
# net_mfi2 <- left_join(net_mfi2, count2, by = c("Sample"))
# net_mfi2_flagged <- set_flag(net_mfi2, 20, 40.05, 55.26, 539.74)
# test result -------------------------------------------------------------
test_result <- function(net_mfi_soc, IgG_NP_gw, IgG_RBD_gw, IgG_S1_gw){
# the result of each of the 12 parameter can either be
# positive (1), negative (0) or intermediate (0.5)
net_mfi_soc_result <-
net_mfi_soc %>%
mutate(IgG_Resultat_S1 = if_else(IgG_S1_net_mfi_soc > 1, 1, if_else(IgG_S1_net_mfi_soc > IgG_S1_gw, 0.5, 0)),
IgA_Resultat_S1 = if_else(IgA_S1_net_mfi_soc > 1, 1, 0),
IgM_Resultat_S1 = if_else(IgM_S1_net_mfi_soc > 1, 1, 0)) %>%
mutate(IgG_Resultat_S2 = if_else(IgG_S2_net_mfi_soc > 1, 1, 0),
IgA_Resultat_S2 = if_else(IgA_S2_net_mfi_soc > 1, 1, 0),
IgM_Resultat_S2 = if_else(IgM_S2_net_mfi_soc > 1, 1, 0)) %>%
mutate(IgG_Resultat_NP = if_else(IgG_NP_net_mfi_soc > 1, 1, if_else(IgG_NP_net_mfi_soc > IgG_NP_gw, 0.5, 0)),
IgA_Resultat_NP = if_else(IgA_NP_net_mfi_soc > 1, 1, 0),
IgM_Resultat_NP = if_else(IgM_NP_net_mfi_soc > 1, 1, 0)) %>%
mutate(IgG_Resultat_RBD = if_else(IgG_RBD_net_mfi_soc > 1, 1, if_else(IgG_RBD_net_mfi_soc > IgG_RBD_gw, 0.5, 0)),
IgA_Resultat_RBD = if_else(IgA_RBD_net_mfi_soc > 1, 1, 0),
IgM_Resultat_RBD = if_else(IgM_RBD_net_mfi_soc > 1, 1, 0)) %>%
mutate(Resultat_sum = IgG_Resultat_S1+IgA_Resultat_S1+IgM_Resultat_S1+
IgG_Resultat_S2+IgA_Resultat_S2+IgM_Resultat_S2+
IgG_Resultat_NP+IgA_Resultat_NP+IgM_Resultat_NP+
IgG_Resultat_RBD+IgA_Resultat_RBD+IgM_Resultat_RBD,
Resultat_sum_IgG = IgG_Resultat_S1+IgG_Resultat_S2+IgG_Resultat_NP+IgG_Resultat_RBD,
Resultat_n_pos = (IgG_Resultat_S1==1)+(IgA_Resultat_S1==1)+(IgM_Resultat_S1==1)+
(IgG_Resultat_S2==1)+(IgA_Resultat_S2==1)+(IgM_Resultat_S2==1)+
(IgG_Resultat_NP==1)+(IgA_Resultat_NP==1)+(IgM_Resultat_NP==1)+
(IgG_Resultat_RBD==1)+(IgA_Resultat_RBD==1)+(IgM_Resultat_RBD==1),
Resultat_n_pos_IgA_IgM = (IgA_Resultat_S1==1)+(IgM_Resultat_S1==1)+
(IgA_Resultat_S2==1)+(IgM_Resultat_S2==1)+
(IgA_Resultat_NP==1)+(IgM_Resultat_NP==1)+
(IgA_Resultat_RBD==1)+(IgM_Resultat_RBD==1)) %>%
mutate(Serokonversion = case_when(
(Resultat_sum >= 2) & (Resultat_sum_IgG >= 0.5) ~ "Positiv, fortgeschritten",
(Resultat_sum >= 2) & (Resultat_sum_IgG == 0) ~ "Positiv, partiell",
(IgG_Resultat_S1 == 1 | IgG_Resultat_NP == 1 | IgG_Resultat_RBD == 1) | (Resultat_sum_IgG >= 0.5 & Resultat_n_pos_IgA_IgM == 1) ~ "Schwach reaktiv",
Resultat_n_pos == 1 ~ "Indeterminat",
TRUE ~ "Negativ"
))
return(net_mfi_soc_result)
}
# net_mfi1_flagged_result <- test_result(net_mfi1_flagged, 0.884, 0.714, 0.895)
# create gt table ---------------------------------------------------------
create_gt <- function(table){
if (sum(str_detect(names(table), "RBD")) > 0){
net_mfi_soc_clean_gt <-
table %>%
gt(rowname_col = "Sample") %>%
tab_spanner_delim(delim = "_") %>%
tab_style(
style = cell_fill(color = "#FFE74C"),
locations = cells_body(
columns = vars(IgG_NP),
rows = IgG_NP >= 0.884)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgG_NP),
rows = IgG_NP > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgG_S2),
rows = IgG_S2 > 1)
) %>%
tab_style(
style = cell_fill(color = "#FFE74C"),
locations = cells_body(
columns = vars(IgG_S1),
rows = IgG_S1 >= 0.895)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgG_S1),
rows = IgG_S1 > 1)
) %>%
tab_style(
style = cell_fill(color = "#FFE74C"),
locations = cells_body(
columns = vars(IgG_RBD),
rows = IgG_RBD >= 0.714)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgG_RBD),
rows = IgG_RBD > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgA_NP),
rows = IgA_NP > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgA_S2),
rows = IgA_S2 > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgA_S1),
rows = IgA_S1 > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgA_RBD),
rows = IgA_RBD > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgM_NP),
rows = IgM_NP > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgM_S2),
rows = IgM_S2 > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgM_S1),
rows = IgM_S1 > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgM_RBD),
rows = IgM_RBD > 1)
) %>%
fmt_number(
columns = c(ends_with("NP"),
ends_with("S2"),
ends_with("S1"),
ends_with("RBD")),
decimals = 1
)
} else{
net_mfi_soc_clean_gt <-
table %>%
gt(rowname_col = "Sample") %>%
tab_spanner_delim(delim = "_") %>%
tab_style(
style = cell_fill(color = "#FFE74C"),
locations = cells_body(
columns = vars(IgG_NP),
rows = IgG_NP >= 0.884)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgG_NP),
rows = IgG_NP > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgG_S2),
rows = IgG_S2 > 1)
) %>%
tab_style(
style = cell_fill(color = "#FFE74C"),
locations = cells_body(
columns = vars(IgG_S1),
rows = IgG_S1 >= 0.895)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgG_S1),
rows = IgG_S1 > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgA_NP),
rows = IgA_NP > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgA_S2),
rows = IgA_S2 > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgA_S1),
rows = IgA_S1 > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgM_NP),
rows = IgM_NP > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgM_S2),
rows = IgM_S2 > 1)
) %>%
tab_style(
style = cell_fill(color = "#06AED5"),
locations = cells_body(
columns = vars(IgM_S1),
rows = IgM_S1 > 1)
) %>%
fmt_number(
columns = c(ends_with("NP"),
ends_with("S2"),
ends_with("S1")),
decimals = 1
)
}
return(net_mfi_soc_clean_gt)
}
|
## Getting and Cleaning Data - Peer Assessment 1
## "You should create one R script called run_analysis.R that does the following.
## A. Merges the training and the test sets to create one data set.
## B. Extracts only the measurements on the mean and standard deviation for each measurement.
## C. Uses descriptive activity names to name the activities in the data set
## D. Appropriately labels the data set with descriptive activity names.
## E. Creates a second, independent tidy data set with the average of each variable for each activity and each subject."
## Step 0 - Acquire the data set:
##-------------------------------
## Assume data set is already downloaded, in the current working directory and uses the default name
## If assumption is false, throws a warning about missing file. Can use next two lines of commented code to download file
#fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
#download.file(url=fileUrl,destfile="getdata_projectfiles_UCI HAR Dataset.zip")
if (file.exists("getdata_projectfiles_UCI HAR Dataset.zip") == FALSE){
print("'getdata_projectfiles_UCI HAR Dataset.zip' does not exist in current working directory - see Script for download.file command")
stop
}
## Check if data set has already been extracted by looking for the \UCI HAR Dataset\README.txt file
## If it doesn't - unzip the (existing) zip file
if (file.exists("UCI HAR Dataset/README.txt") == FALSE){
unzip(zipfile="getdata_projectfiles_UCI HAR Dataset.zip")
}
## Step 1 - Import the data sets:
##-------------------------------
## Create complete "test" set for merging
## Import Subject Number from "subject_test.txt" (as a Data Frame)
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt")
## import Activity Number from "y_test.txt"
y_test <- read.table("UCI HAR Dataset/test/y_test.txt")
## import Feature Data from "X_test.txt"
X_test <- read.table("UCI HAR Dataset/test/X_test.txt")
## Combine to form a data table with Subject, Activity Number, Feature Data columns
test_set <- cbind(subject_test,y_test,X_test)
## Create complete "training" set for merging
## Import Subject Number from "subject_train.txt"
subject_train <- read.table(file="UCI HAR Dataset/train/subject_train.txt")
## Import Activity Number from "y_train.txt"
y_train <- read.table("UCI HAR Dataset/train/y_train.txt")
## Import Feature Data from "X_train.txt"
X_train <- read.table("UCI HAR Dataset/train/X_train.txt")
## Combine to form a data table with Subject, Activity Number, Feature Data columns
train_set <- cbind(subject_train,y_train,X_train)
## Step 2 - Merge the data sets:
##------------------------------
## Import Feature Headings from "features.txt"
feature_headings <- read.table("UCI HAR Dataset/features.txt")
## Create a full set of headings to fit over the format of Subject, Activity Number, Feature Data columns
data_headings <- c("Subject", "Activity", as.character(feature_headings[,2]))
## As there are no shared Ids merge test and training sets with Row bind
combined_set <- data.frame(rbind(test_set,train_set))
## Add the created heading text as the combined data set column names
colnames(combined_set) <- data_headings
## ==================================================================
## A. "Merges the training and the test sets to create one data set."
## A. is now complete, "combined_set" is the one data set requested.
## ==================================================================
## Step 3 - Identify and Extract the Mean and STD columns:
##--------------------------------------------------------
## Identify which columns use the "mean(" or "std(" strings in them, using the "stringr" package
#install.packages("stringr")
#library(stringr)
require(stringr)
## Create a logical with TRUE if "mean(" OR "-std(" in the heading (strings taken from examination of features.txt and features_info.txt)
meanorstd <- str_detect(as.character(data_headings), "mean\\(") | str_detect(as.character(data_headings), "std\\(")
## Identify which entries are TRUE
meanorstd_cols <- which(meanorstd)
## Trim the data set to include only the Subject, Activity and the columns identified as either Mean or Std
trimmed_set <- cbind(combined_set[,1:2],combined_set[,meanorstd_cols])
## ==========================================================================================
## B. Extracts only the measurements on the mean and standard deviation for each measurement.
## B. is now complete, the "trimmed_set" contains only the names and extracted Mean/Std cols
## ==========================================================================================
## Step 4 - Describe the Activities by name:
##-------------------------------------------
## Import the Activity Names from the "activity_labels.txt"
activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt")
## Create a new Activity Label column using:
## i. Activity column in combined_set
## ii. Factor reference numbers (levels) from the first column in activity_labels
## iii.Factor reference names (labels) from the second column in activity_labels
activity_label_col <- data.frame(factor(trimmed_set$Activity, levels=activity_labels[,1],labels=activity_labels[,2]))
colnames(activity_label_col) <- "Activity_Name"
## N.B. I'm creating a new column as the instructions are ambigious about whether to Replace the Activity Numbers or not
## you could replace the existing column instead using:
## combined_set$Activity <- factor(combined_set1$Activity, levels=activity_labels[,1],labels=activity_labels[,2])
## This command would break some of the following script - as the presence of Activity and Activity_Name are assumed
## Add the Activity_Name column into the combined set (next to the Activity number)
activitynamed_set <- data.frame(trimmed_set[,1:2],activity_label_col,trimmed_set[,3:ncol(trimmed_set)])
## =========================================================================
## C. Uses descriptive activity names to name the activities in the data set
## D. Appropriately labels the data set with descriptive activity names.
## C. & D. are now complete - the names have been taken from the files and
## now appear as a labeled columns, including the extra column
## Activity_Name, in the "activitynamed_set"
## =========================================================================
## Step 5 - Create the averages and tidy set:
##-------------------------------------------
require(reshape2)
## Melt the data set (as per Reshaping Data lecture)
melted_set <- melt(activitynamed_set,id = c("Subject", "Activity", "Activity_Name"))
## Recast the data, with the Id columns as "Subject", "Activity", "Activity_Name" and compute Mean of all other entries
tidy_set <- dcast(melted_set, formula = Subject + Activity + Activity_Name ~ variable, mean)
## Output the tidy data set as a text file, for upload
write.table(tidy_set,"tidy_set.txt",row.names=FALSE)
## =====================================================================================================================
## E. Creates a second, independent tidy data set with the average of each variable for each activity and each subject."
## E. is now complete - the tidy_set has one row for each Subject + Activity/Activity Name combination along with
## computed averages (mean) of all other Feature data.
## =====================================================================================================================
| /run_analysis.R | no_license | nechapman/GandCData-PeerAssess1 | R | false | false | 7,524 | r | ## Getting and Cleaning Data - Peer Assessment 1
## "You should create one R script called run_analysis.R that does the following.
## A. Merges the training and the test sets to create one data set.
## B. Extracts only the measurements on the mean and standard deviation for each measurement.
## C. Uses descriptive activity names to name the activities in the data set
## D. Appropriately labels the data set with descriptive activity names.
## E. Creates a second, independent tidy data set with the average of each variable for each activity and each subject."
## Step 0 - Acquire the data set:
##-------------------------------
## Assume data set is already downloaded, in the current working directory and uses the default name
## If assumption is false, throws a warning about missing file. Can use next two lines of commented code to download file
#fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
#download.file(url=fileUrl,destfile="getdata_projectfiles_UCI HAR Dataset.zip")
if (file.exists("getdata_projectfiles_UCI HAR Dataset.zip") == FALSE){
print("'getdata_projectfiles_UCI HAR Dataset.zip' does not exist in current working directory - see Script for download.file command")
stop
}
## Check if data set has already been extracted by looking for the \UCI HAR Dataset\README.txt file
## If it doesn't - unzip the (existing) zip file
if (file.exists("UCI HAR Dataset/README.txt") == FALSE){
unzip(zipfile="getdata_projectfiles_UCI HAR Dataset.zip")
}
## Step 1 - Import the data sets:
##-------------------------------
## Create complete "test" set for merging
## Import Subject Number from "subject_test.txt" (as a Data Frame)
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt")
## import Activity Number from "y_test.txt"
y_test <- read.table("UCI HAR Dataset/test/y_test.txt")
## import Feature Data from "X_test.txt"
X_test <- read.table("UCI HAR Dataset/test/X_test.txt")
## Combine to form a data table with Subject, Activity Number, Feature Data columns
test_set <- cbind(subject_test,y_test,X_test)
## Create complete "training" set for merging
## Import Subject Number from "subject_train.txt"
subject_train <- read.table(file="UCI HAR Dataset/train/subject_train.txt")
## Import Activity Number from "y_train.txt"
y_train <- read.table("UCI HAR Dataset/train/y_train.txt")
## Import Feature Data from "X_train.txt"
X_train <- read.table("UCI HAR Dataset/train/X_train.txt")
## Combine to form a data table with Subject, Activity Number, Feature Data columns
train_set <- cbind(subject_train,y_train,X_train)
## Step 2 - Merge the data sets:
##------------------------------
## Import Feature Headings from "features.txt"
feature_headings <- read.table("UCI HAR Dataset/features.txt")
## Create a full set of headings to fit over the format of Subject, Activity Number, Feature Data columns
data_headings <- c("Subject", "Activity", as.character(feature_headings[,2]))
## As there are no shared Ids merge test and training sets with Row bind
combined_set <- data.frame(rbind(test_set,train_set))
## Add the created heading text as the combined data set column names
colnames(combined_set) <- data_headings
## ==================================================================
## A. "Merges the training and the test sets to create one data set."
## A. is now complete, "combined_set" is the one data set requested.
## ==================================================================
## Step 3 - Identify and Extract the Mean and STD columns:
##--------------------------------------------------------
## Identify which columns use the "mean(" or "std(" strings in them, using the "stringr" package
#install.packages("stringr")
#library(stringr)
require(stringr)
## Create a logical with TRUE if "mean(" OR "-std(" in the heading (strings taken from examination of features.txt and features_info.txt)
meanorstd <- str_detect(as.character(data_headings), "mean\\(") | str_detect(as.character(data_headings), "std\\(")
## Identify which entries are TRUE
meanorstd_cols <- which(meanorstd)
## Trim the data set to include only the Subject, Activity and the columns identified as either Mean or Std
trimmed_set <- cbind(combined_set[,1:2],combined_set[,meanorstd_cols])
## ==========================================================================================
## B. Extracts only the measurements on the mean and standard deviation for each measurement.
## B. is now complete, the "trimmed_set" contains only the names and extracted Mean/Std cols
## ==========================================================================================
## Step 4 - Describe the Activities by name:
##-------------------------------------------
## Import the Activity Names from the "activity_labels.txt"
activity_labels <- read.table("UCI HAR Dataset/activity_labels.txt")
## Create a new Activity Label column using:
## i. Activity column in combined_set
## ii. Factor reference numbers (levels) from the first column in activity_labels
## iii.Factor reference names (labels) from the second column in activity_labels
activity_label_col <- data.frame(factor(trimmed_set$Activity, levels=activity_labels[,1],labels=activity_labels[,2]))
colnames(activity_label_col) <- "Activity_Name"
## N.B. I'm creating a new column as the instructions are ambigious about whether to Replace the Activity Numbers or not
## you could replace the existing column instead using:
## combined_set$Activity <- factor(combined_set1$Activity, levels=activity_labels[,1],labels=activity_labels[,2])
## This command would break some of the following script - as the presence of Activity and Activity_Name are assumed
## Add the Activity_Name column into the combined set (next to the Activity number)
activitynamed_set <- data.frame(trimmed_set[,1:2],activity_label_col,trimmed_set[,3:ncol(trimmed_set)])
## =========================================================================
## C. Uses descriptive activity names to name the activities in the data set
## D. Appropriately labels the data set with descriptive activity names.
## C. & D. are now complete - the names have been taken from the files and
## now appear as a labeled columns, including the extra column
## Activity_Name, in the "activitynamed_set"
## =========================================================================
## Step 5 - Create the averages and tidy set:
##-------------------------------------------
require(reshape2)
## Melt the data set (as per Reshaping Data lecture)
melted_set <- melt(activitynamed_set,id = c("Subject", "Activity", "Activity_Name"))
## Recast the data, with the Id columns as "Subject", "Activity", "Activity_Name" and compute Mean of all other entries
tidy_set <- dcast(melted_set, formula = Subject + Activity + Activity_Name ~ variable, mean)
## Output the tidy data set as a text file, for upload
write.table(tidy_set,"tidy_set.txt",row.names=FALSE)
## =====================================================================================================================
## E. Creates a second, independent tidy data set with the average of each variable for each activity and each subject."
## E. is now complete - the tidy_set has one row for each Subject + Activity/Activity Name combination along with
## computed averages (mean) of all other Feature data.
## =====================================================================================================================
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) { ## set the value of the matrix
x <<- y
m <<- NULL
}
get <- function() x ##get the value of the matrix
setinverse <- function(inverse) m <<- inverse ##set the value of the inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse) ##get the value of the inverse
}
## Write a short comment describing this function
## This function computes the inverse of the special
## "matrix" returned by `makeCacheMatrix` above. If the inverse has
## already been calculated (and the matrix has not changed), then
## `cacheSolve` should retrieve the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- inverse(data, ...)
x$setinverse(m)
m
}
| /cachematrix.R | no_license | JJD129/ProgrammingAssignment2 | R | false | false | 1,190 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) { ## set the value of the matrix
x <<- y
m <<- NULL
}
get <- function() x ##get the value of the matrix
setinverse <- function(inverse) m <<- inverse ##set the value of the inverse
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse) ##get the value of the inverse
}
## Write a short comment describing this function
## This function computes the inverse of the special
## "matrix" returned by `makeCacheMatrix` above. If the inverse has
## already been calculated (and the matrix has not changed), then
## `cacheSolve` should retrieve the inverse from the cache
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- inverse(data, ...)
x$setinverse(m)
m
}
|
#' @title
#' Test of group differences
#'
#' @description
#' One-way analysis (ANOVA or Kruskal-Wallis Test)
#' with post-hoc comparisons and plots
#'
#' @param formula use the form \code{Y ~ X} where \code{Y} is the response
#' variable, and \code{X} is the categorical explanatory variable. \code{X}
#' will be coerced to be a factor.
#' @param data a data frame.
#' @param method character. Either \code{"anova"}, or \code{"kw"} (see details).
#' @param digits Number of significant digits to print.
#' @param horizontal logical. If \code{TRUE}, boxplots are plotted horizontally.
#' @param posthoc logical. If \code{TRUE}, the default, perform pairwise post-hoc comparisons
#' (TukeyHSD for ANOVA and Conover Test for Kuskal Wallis). This test
#' will only be performed if there are 3 or more levels for X.
#' @import dplyr
#' @import ggplot2
#' @importFrom multcompView multcompLetters
#' @importFrom PMCMRplus kwAllPairsConoverTest
#' @importFrom stats TukeyHSD aov dist hclust kruskal.test mad median sd
#' @return a list with 3 components:
#' \describe{
#' \item{\code{result}}{omnibus test}
#' \item{\code{summarystats}}{summary statistics}
#' \item{\code{plot}}{ggplot2 graph}
#' }
#' @export
#' @details
#' The \code{groupdiff} function performs one of two analyses:
#' \describe{
#' \item{\code{anova}}{A one-way analysis of variance, with TukeyHSD
#' post-hoc comparisons.}
#' \item{\code{kw}}{A Kruskal Wallis Rank Sum Test, with Conover
#' Test post-hoc comparisons.}
#' }
#' In each case, summary statistics and a grouped boxplots are
#' provided. In the parametric case, the statistics are n, mean, and
#' standard deviation. In the nonparametric case the statistics are
#' n, median, and median absolute deviation. If \code{posthoc = TRUE},
#' pairwise comparisons of superimposed on the boxplots.
#' Groups that share a letter are not significantly different (p < .05),
#' controlling for multiple comparisons.
#' @seealso \link[PMCMRplus]{kwAllPairsConoverTest},
#' \link[multcompView]{multcompLetters}.
#' @examples
#' # parametric analysis
#' groupdiff(hp ~ gear, cars74)
#'
#' # nonparametric analysis
#' groupdiff(popularity ~ vehicle_style, cardata,
#' method="kw", horizontal=TRUE)
groupdiff <- function(formula, data,
method=c("anova", "kw"),
digits=2,
horizontal=FALSE,
posthoc=TRUE){
x <- as.character(formula[[3]])
y <- as.character(formula[[2]])
data <- na.omit(data[c(x, y)])
Letters <- NULL
data[x] <- factor(data[[x]])
if (posthoc & length(levels(data[[x]])) < 3){
message("Note: Post-hoc comparisons require 3 or more groups.\n")
posthoc <- FALSE
}
data[x] <- reorder(data[[x]], data[[y]], mean, na.rm=TRUE)
levels(data[[x]]) <- gsub("-", " ", levels(data[[x]]))
# calculate offset
if (posthoc){
offset <- (max(data[[y]]) - min(data[[y]]))/10
} else {
offset <- 0
}
method <- match.arg(method)
if(method == "anova"){
# omnibus
a <- aov(formula, data)
fit <- unlist(summary(a))
F <- round(fit[7], digits)
df1 <- fit[1]
df2 <- fit[2]
p <- fit[9]
fit.result <- paste0("F(", df1, ", ", df2, ") = ", F,
", p-value = ", format.pval(p, digits))
# summary statistics
mystats <- data %>%
group_by(.data[[x]]) %>%
summarise(n = n(),
mean = round(mean(.data[[y]], na.rm=TRUE), digits),
sd = round(sd(.data[[y]], na.rm=TRUE), digits),
.groups = 'drop') %>%
as.data.frame()
# posthoc comparisons (for more than 2 levels)
if (posthoc){
tHSD <- TukeyHSD(a, ordered = FALSE, conf.level = 0.95)
Tukey.levels <- tHSD[[1]][,4]
Tukey.labels <- multcompLetters(Tukey.levels)['Letters']
plot.labels <- names(Tukey.labels[['Letters']])
plot.levels <- data.frame(plot.labels, Tukey.labels,
stringsAsFactors = FALSE,
row.names=NULL)
plot.levels$y <- max(data[y], na.rm=TRUE) + offset
}
# plot
p <- ggplot(data, aes_string(x=x, y=y)) +
geom_hline(yintercept=mean(data[[y]], na.rm=TRUE),
linetype="dashed",
color="darkgrey") +
geom_boxplot(aes_string(fill=x)) +
geom_point(data=mystats,
mapping=aes_string(x=x, y="mean"),
size=1, shape=3) +
theme_minimal() +
theme( plot.subtitle = element_text(size = 9),
plot.caption = element_text(size=8),
legend.position="none") +
labs(title = "Group Differences",
subtitle = paste("ANOVA:", fit.result),
caption = "Crosses are group means. Dashed line is the mean value for all obs.")
if(posthoc){
p <- p + geom_text(data = plot.levels,
aes(x = plot.labels,
y = y,
label = Letters),
size=3, fontface="italic") +
labs(caption = "Boxplots that share letters are not significantly different (TukeyHSD p < 0.05).\nCrosses are group means. Dashed line is the mean value for all obs.")
}
if(horizontal) p <- p + coord_flip()
}
if(method == "kw"){
# omnibus
fit <- kruskal.test(formula, data)
fit.result <- paste("Chi-squared = ",
round(fit$statistic, digits),
", df =",
fit$parameter,
", p-value =",
format.pval(fit$p.value, digits))
# summary statistics
mystats <- data %>%
group_by(.data[[x]]) %>%
summarise(n = n(),
median = round(median(.data[[y]], na.rm=TRUE), digits),
mad = round(mad(.data[[y]], na.rm=TRUE), digits),
.groups = 'drop') %>%
as.data.frame()
# posthoc
if(posthoc){
phtest <- kwAllPairsConoverTest(formula, data)
n <- ncol(phtest$p.value)
w <- matrix(nrow=n+1, ncol=n+1)
nms <- c(dimnames(phtest$p.value)[[2]], dimnames(phtest$p.value)[[1]][n])
dimnames(w) <- list(nms, nms)
w[2:nrow(w), 1:n] <- phtest$p.value
makeSymm <- function(m) {
m[upper.tri(m)] <- t(m)[upper.tri(m)]
return(m)
}
w <- makeSymm(w)
plot.levels <- data.frame(
plot.labels = rownames(w),
Letters = as.list(multcompLetters(w))$Letters,
y = max(data[y], na.rm=TRUE) + offset
)
}
# plot
p <- ggplot(data, aes_string(x=x, y=y)) +
geom_hline(yintercept=median(data[[y]], na.rm=TRUE),
linetype="dashed",
color="darkgrey") +
geom_boxplot(aes_string(fill = x)) +
theme_minimal() +
theme( plot.subtitle = element_text(size = 9),
plot.caption = element_text(size=8),
legend.position="none") +
labs(title = "Group Differences",
subtitle = paste("Kruskal-Wallis:", fit.result),
caption = "Dashed line is the median value for all obs.")
if(posthoc){
p <- p + geom_text(data = plot.levels,
aes(x = plot.labels,
y = y,
label = Letters),
size=3, fontface="italic") +
labs(caption = "Boxplots that share letters are not significantly different (Conover Test p < 0.05).\nDashed line is the median value for all obs.")
}
if(horizontal) p <- p + coord_flip()
}
# return results
result <- list(result = fit.result, summarystats = mystats, plot=p)
return(result)
}
| /R/groupdiff.R | permissive | Rkabacoff/qacEDA | R | false | false | 7,654 | r | #' @title
#' Test of group differences
#'
#' @description
#' One-way analysis (ANOVA or Kruskal-Wallis Test)
#' with post-hoc comparisons and plots
#'
#' @param formula use the form \code{Y ~ X} where \code{Y} is the response
#' variable, and \code{X} is the categorical explanatory variable. \code{X}
#' will be coerced to be a factor.
#' @param data a data frame.
#' @param method character. Either \code{"anova"}, or \code{"kw"} (see details).
#' @param digits Number of significant digits to print.
#' @param horizontal logical. If \code{TRUE}, boxplots are plotted horizontally.
#' @param posthoc logical. If \code{TRUE}, the default, perform pairwise post-hoc comparisons
#' (TukeyHSD for ANOVA and Conover Test for Kuskal Wallis). This test
#' will only be performed if there are 3 or more levels for X.
#' @import dplyr
#' @import ggplot2
#' @importFrom multcompView multcompLetters
#' @importFrom PMCMRplus kwAllPairsConoverTest
#' @importFrom stats TukeyHSD aov dist hclust kruskal.test mad median sd
#' @return a list with 3 components:
#' \describe{
#' \item{\code{result}}{omnibus test}
#' \item{\code{summarystats}}{summary statistics}
#' \item{\code{plot}}{ggplot2 graph}
#' }
#' @export
#' @details
#' The \code{groupdiff} function performs one of two analyses:
#' \describe{
#' \item{\code{anova}}{A one-way analysis of variance, with TukeyHSD
#' post-hoc comparisons.}
#' \item{\code{kw}}{A Kruskal Wallis Rank Sum Test, with Conover
#' Test post-hoc comparisons.}
#' }
#' In each case, summary statistics and a grouped boxplots are
#' provided. In the parametric case, the statistics are n, mean, and
#' standard deviation. In the nonparametric case the statistics are
#' n, median, and median absolute deviation. If \code{posthoc = TRUE},
#' pairwise comparisons of superimposed on the boxplots.
#' Groups that share a letter are not significantly different (p < .05),
#' controlling for multiple comparisons.
#' @seealso \link[PMCMRplus]{kwAllPairsConoverTest},
#' \link[multcompView]{multcompLetters}.
#' @examples
#' # parametric analysis
#' groupdiff(hp ~ gear, cars74)
#'
#' # nonparametric analysis
#' groupdiff(popularity ~ vehicle_style, cardata,
#' method="kw", horizontal=TRUE)
groupdiff <- function(formula, data,
method=c("anova", "kw"),
digits=2,
horizontal=FALSE,
posthoc=TRUE){
x <- as.character(formula[[3]])
y <- as.character(formula[[2]])
data <- na.omit(data[c(x, y)])
Letters <- NULL
data[x] <- factor(data[[x]])
if (posthoc & length(levels(data[[x]])) < 3){
message("Note: Post-hoc comparisons require 3 or more groups.\n")
posthoc <- FALSE
}
data[x] <- reorder(data[[x]], data[[y]], mean, na.rm=TRUE)
levels(data[[x]]) <- gsub("-", " ", levels(data[[x]]))
# calculate offset
if (posthoc){
offset <- (max(data[[y]]) - min(data[[y]]))/10
} else {
offset <- 0
}
method <- match.arg(method)
if(method == "anova"){
# omnibus
a <- aov(formula, data)
fit <- unlist(summary(a))
F <- round(fit[7], digits)
df1 <- fit[1]
df2 <- fit[2]
p <- fit[9]
fit.result <- paste0("F(", df1, ", ", df2, ") = ", F,
", p-value = ", format.pval(p, digits))
# summary statistics
mystats <- data %>%
group_by(.data[[x]]) %>%
summarise(n = n(),
mean = round(mean(.data[[y]], na.rm=TRUE), digits),
sd = round(sd(.data[[y]], na.rm=TRUE), digits),
.groups = 'drop') %>%
as.data.frame()
# posthoc comparisons (for more than 2 levels)
if (posthoc){
tHSD <- TukeyHSD(a, ordered = FALSE, conf.level = 0.95)
Tukey.levels <- tHSD[[1]][,4]
Tukey.labels <- multcompLetters(Tukey.levels)['Letters']
plot.labels <- names(Tukey.labels[['Letters']])
plot.levels <- data.frame(plot.labels, Tukey.labels,
stringsAsFactors = FALSE,
row.names=NULL)
plot.levels$y <- max(data[y], na.rm=TRUE) + offset
}
# plot
p <- ggplot(data, aes_string(x=x, y=y)) +
geom_hline(yintercept=mean(data[[y]], na.rm=TRUE),
linetype="dashed",
color="darkgrey") +
geom_boxplot(aes_string(fill=x)) +
geom_point(data=mystats,
mapping=aes_string(x=x, y="mean"),
size=1, shape=3) +
theme_minimal() +
theme( plot.subtitle = element_text(size = 9),
plot.caption = element_text(size=8),
legend.position="none") +
labs(title = "Group Differences",
subtitle = paste("ANOVA:", fit.result),
caption = "Crosses are group means. Dashed line is the mean value for all obs.")
if(posthoc){
p <- p + geom_text(data = plot.levels,
aes(x = plot.labels,
y = y,
label = Letters),
size=3, fontface="italic") +
labs(caption = "Boxplots that share letters are not significantly different (TukeyHSD p < 0.05).\nCrosses are group means. Dashed line is the mean value for all obs.")
}
if(horizontal) p <- p + coord_flip()
}
if(method == "kw"){
# omnibus
fit <- kruskal.test(formula, data)
fit.result <- paste("Chi-squared = ",
round(fit$statistic, digits),
", df =",
fit$parameter,
", p-value =",
format.pval(fit$p.value, digits))
# summary statistics
mystats <- data %>%
group_by(.data[[x]]) %>%
summarise(n = n(),
median = round(median(.data[[y]], na.rm=TRUE), digits),
mad = round(mad(.data[[y]], na.rm=TRUE), digits),
.groups = 'drop') %>%
as.data.frame()
# posthoc
if(posthoc){
phtest <- kwAllPairsConoverTest(formula, data)
n <- ncol(phtest$p.value)
w <- matrix(nrow=n+1, ncol=n+1)
nms <- c(dimnames(phtest$p.value)[[2]], dimnames(phtest$p.value)[[1]][n])
dimnames(w) <- list(nms, nms)
w[2:nrow(w), 1:n] <- phtest$p.value
makeSymm <- function(m) {
m[upper.tri(m)] <- t(m)[upper.tri(m)]
return(m)
}
w <- makeSymm(w)
plot.levels <- data.frame(
plot.labels = rownames(w),
Letters = as.list(multcompLetters(w))$Letters,
y = max(data[y], na.rm=TRUE) + offset
)
}
# plot
p <- ggplot(data, aes_string(x=x, y=y)) +
geom_hline(yintercept=median(data[[y]], na.rm=TRUE),
linetype="dashed",
color="darkgrey") +
geom_boxplot(aes_string(fill = x)) +
theme_minimal() +
theme( plot.subtitle = element_text(size = 9),
plot.caption = element_text(size=8),
legend.position="none") +
labs(title = "Group Differences",
subtitle = paste("Kruskal-Wallis:", fit.result),
caption = "Dashed line is the median value for all obs.")
if(posthoc){
p <- p + geom_text(data = plot.levels,
aes(x = plot.labels,
y = y,
label = Letters),
size=3, fontface="italic") +
labs(caption = "Boxplots that share letters are not significantly different (Conover Test p < 0.05).\nDashed line is the median value for all obs.")
}
if(horizontal) p <- p + coord_flip()
}
# return results
result <- list(result = fit.result, summarystats = mystats, plot=p)
return(result)
}
|
5e64df12b558848c213941e5193dafed query02_query04_1344.qdimacs 343 564 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query02_query04_1344/query02_query04_1344.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 69 | r | 5e64df12b558848c213941e5193dafed query02_query04_1344.qdimacs 343 564 |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spread.R
\name{spread_}
\alias{spread_}
\title{Standard-evaluation version of \code{spread}.}
\usage{
spread_(data, key_col, value_col, fill = NA, convert = FALSE, drop = TRUE)
}
\arguments{
\item{data}{A data frame.}
\item{key_col, value_col}{Strings giving names of key and value cols.}
\item{fill}{If set, missing values will be replaced with this value. Note
that there are two types of missingness in the input: explicit missing
values (i.e. \code{NA}), and implicit missings, rows that simply aren't
present. Both types of missing value will be replaced by \code{fill}.}
\item{convert}{If \code{TRUE}, \code{\link{type.convert}} with \code{asis =
TRUE} will be run on each of the new columns. This is useful if the value
column was a mix of variables that was coerced to a string. If the class of
the value column was factor or date, note that will not be true of the new
columns that are produced, which are coerced to character before type
conversion.}
\item{drop}{If \code{FALSE}, will keep factor levels that don't appear in the
data, filling in missing combinations with \code{fill}.}
}
\description{
This is a S3 generic.
}
\keyword{internal}
| /man/spread_.Rd | no_license | joshkatz/tidyr | R | false | true | 1,238 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/spread.R
\name{spread_}
\alias{spread_}
\title{Standard-evaluation version of \code{spread}.}
\usage{
spread_(data, key_col, value_col, fill = NA, convert = FALSE, drop = TRUE)
}
\arguments{
\item{data}{A data frame.}
\item{key_col, value_col}{Strings giving names of key and value cols.}
\item{fill}{If set, missing values will be replaced with this value. Note
that there are two types of missingness in the input: explicit missing
values (i.e. \code{NA}), and implicit missings, rows that simply aren't
present. Both types of missing value will be replaced by \code{fill}.}
\item{convert}{If \code{TRUE}, \code{\link{type.convert}} with \code{asis =
TRUE} will be run on each of the new columns. This is useful if the value
column was a mix of variables that was coerced to a string. If the class of
the value column was factor or date, note that will not be true of the new
columns that are produced, which are coerced to character before type
conversion.}
\item{drop}{If \code{FALSE}, will keep factor levels that don't appear in the
data, filling in missing combinations with \code{fill}.}
}
\description{
This is a S3 generic.
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coeff_lasso.R
\name{lasso_sg}
\alias{lasso_sg}
\title{Coefficient path (sg)}
\usage{
lasso_sg(lambdas, x, y, beta0, ts, ...)
}
\arguments{
\item{lambdas}{Values of the penalty term}
\item{x}{Predictors}
\item{y}{Response}
\item{beta0}{Initial guess of the regression coefficients}
\item{ts}{Stepsize for optimization method}
}
\description{
Coefficient path (sg)
}
| /man/lasso_sg.Rd | no_license | mcmtroffaes/bootlasso | R | false | true | 447 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/coeff_lasso.R
\name{lasso_sg}
\alias{lasso_sg}
\title{Coefficient path (sg)}
\usage{
lasso_sg(lambdas, x, y, beta0, ts, ...)
}
\arguments{
\item{lambdas}{Values of the penalty term}
\item{x}{Predictors}
\item{y}{Response}
\item{beta0}{Initial guess of the regression coefficients}
\item{ts}{Stepsize for optimization method}
}
\description{
Coefficient path (sg)
}
|
fpath <- file.path('..', 'orig', 'LR04', 'ODP980')
currwd <- getwd()
setwd(fpath)
ODP980 <-read.table("ODP980.txt",skip=65,header=FALSE,col.names=c("Depth.cm","Age.yrs","Cwuellerstorfid13C.PDB","Cwuellerstorfid18O.PDB"),sep="")
setwd(currwd)
| /data/ODP980.R | permissive | mcrucifix/pleistocene | R | false | false | 242 | r | fpath <- file.path('..', 'orig', 'LR04', 'ODP980')
currwd <- getwd()
setwd(fpath)
ODP980 <-read.table("ODP980.txt",skip=65,header=FALSE,col.names=c("Depth.cm","Age.yrs","Cwuellerstorfid13C.PDB","Cwuellerstorfid18O.PDB"),sep="")
setwd(currwd)
|
#set the working directory
#rename "your User Name here" based on your user name
#example: owner, Emily, Bill
getwd()
setwd("C:/Users/miked/OneDrive/Statistics/R/Projects/nuiphao/Grinding-screen-panel-comparison/2014/code/Functions")
# currwd
getwd()
#make some data
data<- seq(from=1, to=10, by=1)
data
#import the function
source("f_myfirstfunction.R")
#call the function
my_function(data)
#call the function
data2 <- times2(data)
| /03_code/02_functions/using my first function.R | no_license | mikedotsmit/04_RModelling | R | false | false | 436 | r | #set the working directory
#rename "your User Name here" based on your user name
#example: owner, Emily, Bill
getwd()
setwd("C:/Users/miked/OneDrive/Statistics/R/Projects/nuiphao/Grinding-screen-panel-comparison/2014/code/Functions")
# currwd
getwd()
#make some data
data<- seq(from=1, to=10, by=1)
data
#import the function
source("f_myfirstfunction.R")
#call the function
my_function(data)
#call the function
data2 <- times2(data)
|
library(brms)
library(dplyr)
library(mvtnorm)
library(rstan)
set.seed(7777)
if (file.exists("generateAR1.rda")){
load("generateAR1.rda")
} else{
source("generateAR1.R")
}
sim_data <- sim_dflist[[1]]
print(sim_data, n = 100)
## Long format for non-bayesian methods
services <- c("y1", "y2", "y3")
long_df <- (sim_data
%>% select(c("hhid", "years", "wealthindex", services))
%>% gather(service, status, services)
)
# Fitting on continous outcome
brms1 <- brm(y1 ~ wealthindex + (1|hhid) + (1|years)
, data = sim_data
, chains = 2
, cores = 4
, autocor=cor_ar(formula = ~1, p = 1, cov = FALSE)
)
brms2 <- brm(mvbind(y1, y2, y3) ~ wealthindex + (1|hhid) + (1|years)
, data = sim_data
, chains = 2
, cores = 4
, autocor=cor_ar(formula = ~1, p = 1, cov = FALSE)
)
## Brms binary response
priors <- c(prior(normal(0, 1), class = b, resp = y1bin)
, prior(normal(0, 1), class = b, resp = y2bin)
, prior(normal(0, 1), class = b, resp = y3bin)
, prior(normal(0, 1), class = b, coef = intercept, resp = y1bin)
, prior(normal(0, 1), class = b, coef = intercept, resp = y2bin)
, prior(normal(0, 1), class = b, coef = intercept, resp = y3bin)
, prior(normal(0, 1), class = b, coef = wealthindex, resp = y1bin)
, prior(normal(0, 1), class = b, coef = wealthindex, resp = y2bin)
, prior(normal(0, 1), class = b, coef = wealthindex, resp = y3bin)
, prior(cauchy(0, 5), class = sd, resp = y1bin)
, prior(cauchy(0, 5), class = sd, resp = y2bin)
, prior(cauchy(0, 5), class = sd, resp = y3bin)
, prior(cauchy(0, 5), class = sd, group = hhid, resp = y1bin)
, prior(cauchy(0, 5), class = sd, group = hhid, resp = y2bin)
, prior(cauchy(0, 5), class = sd, group = hhid, resp = y3bin)
, prior(cauchy(0, 5), class = sd, coef = Intercept, group = hhid, resp = y1bin)
, prior(cauchy(0, 5), class = sd, coef = Intercept, group = hhid, resp = y2bin)
, prior(cauchy(0, 5), class = sd, coef = Intercept, group = hhid, resp = y3bin)
, prior(cauchy(0, 5), class = sd, group = years, resp = y1bin)
, prior(cauchy(0, 5), class = sd, group = years, resp = y2bin)
, prior(cauchy(0, 5), class = sd, group = years, resp = y3bin)
, prior(cauchy(0, 5), class = sd, coef = Intercept, group = years, resp = y1bin)
, prior(cauchy(0, 5), class = sd, coef = Intercept, group = years, resp = y2bin)
, prior(cauchy(0, 5), class = sd, coef = Intercept, group = years, resp = y3bin)
)
brmsbin <- brm(mvbind(y1bin, y2bin, y3bin) ~ 0 + intercept + wealthindex + (1|hhid) + (1|years)
, data = sim_data
, chains = 2
, cores = 4
, family = list(bernoulli(link = "logit")
, bernoulli(link = "logit")
, bernoulli(link = "logit")
)
, prior = priors
, autocor=cor_ar(formula = ~hhid|years, p = 1, cov = TRUE)
)
### Stan model
stan_df <- (sim_data
# %>% filter(years==2007)
%>% select(wealthindex, y1bin, y2bin, y3bin)
)
standata <- list(
N=nrow(stan_df)
, K=2
, D=ncol(stan_df)-1
, y=cbind(stan_df$y1bin, stan_df$y2bin, stan_df$y3bin)
, x=cbind(1, stan_df$wealthindex)
)
rt <- stanc("mvt_probit.stan")
sm <- stan_model(stanc_ret = rt, verbose=FALSE)
stan1 <- sampling(sm
, data = standata
, chains = 2
, cores = 4
, iter = 2000
, thin = 1
, seed = 101
)
print(stan1, pars=c("beta"))
save(file = "fitting.rda"
, brms1
, brms2
, brmsbin
, stan1
, betas_df
, covmat_df
)
| /fitting.R | no_license | CYGUBICKO/washstan | R | false | false | 3,335 | r | library(brms)
library(dplyr)
library(mvtnorm)
library(rstan)
set.seed(7777)
if (file.exists("generateAR1.rda")){
load("generateAR1.rda")
} else{
source("generateAR1.R")
}
sim_data <- sim_dflist[[1]]
print(sim_data, n = 100)
## Long format for non-bayesian methods
services <- c("y1", "y2", "y3")
long_df <- (sim_data
%>% select(c("hhid", "years", "wealthindex", services))
%>% gather(service, status, services)
)
# Fitting on continous outcome
brms1 <- brm(y1 ~ wealthindex + (1|hhid) + (1|years)
, data = sim_data
, chains = 2
, cores = 4
, autocor=cor_ar(formula = ~1, p = 1, cov = FALSE)
)
brms2 <- brm(mvbind(y1, y2, y3) ~ wealthindex + (1|hhid) + (1|years)
, data = sim_data
, chains = 2
, cores = 4
, autocor=cor_ar(formula = ~1, p = 1, cov = FALSE)
)
## Brms binary response
priors <- c(prior(normal(0, 1), class = b, resp = y1bin)
, prior(normal(0, 1), class = b, resp = y2bin)
, prior(normal(0, 1), class = b, resp = y3bin)
, prior(normal(0, 1), class = b, coef = intercept, resp = y1bin)
, prior(normal(0, 1), class = b, coef = intercept, resp = y2bin)
, prior(normal(0, 1), class = b, coef = intercept, resp = y3bin)
, prior(normal(0, 1), class = b, coef = wealthindex, resp = y1bin)
, prior(normal(0, 1), class = b, coef = wealthindex, resp = y2bin)
, prior(normal(0, 1), class = b, coef = wealthindex, resp = y3bin)
, prior(cauchy(0, 5), class = sd, resp = y1bin)
, prior(cauchy(0, 5), class = sd, resp = y2bin)
, prior(cauchy(0, 5), class = sd, resp = y3bin)
, prior(cauchy(0, 5), class = sd, group = hhid, resp = y1bin)
, prior(cauchy(0, 5), class = sd, group = hhid, resp = y2bin)
, prior(cauchy(0, 5), class = sd, group = hhid, resp = y3bin)
, prior(cauchy(0, 5), class = sd, coef = Intercept, group = hhid, resp = y1bin)
, prior(cauchy(0, 5), class = sd, coef = Intercept, group = hhid, resp = y2bin)
, prior(cauchy(0, 5), class = sd, coef = Intercept, group = hhid, resp = y3bin)
, prior(cauchy(0, 5), class = sd, group = years, resp = y1bin)
, prior(cauchy(0, 5), class = sd, group = years, resp = y2bin)
, prior(cauchy(0, 5), class = sd, group = years, resp = y3bin)
, prior(cauchy(0, 5), class = sd, coef = Intercept, group = years, resp = y1bin)
, prior(cauchy(0, 5), class = sd, coef = Intercept, group = years, resp = y2bin)
, prior(cauchy(0, 5), class = sd, coef = Intercept, group = years, resp = y3bin)
)
brmsbin <- brm(mvbind(y1bin, y2bin, y3bin) ~ 0 + intercept + wealthindex + (1|hhid) + (1|years)
, data = sim_data
, chains = 2
, cores = 4
, family = list(bernoulli(link = "logit")
, bernoulli(link = "logit")
, bernoulli(link = "logit")
)
, prior = priors
, autocor=cor_ar(formula = ~hhid|years, p = 1, cov = TRUE)
)
### Stan model
stan_df <- (sim_data
# %>% filter(years==2007)
%>% select(wealthindex, y1bin, y2bin, y3bin)
)
standata <- list(
N=nrow(stan_df)
, K=2
, D=ncol(stan_df)-1
, y=cbind(stan_df$y1bin, stan_df$y2bin, stan_df$y3bin)
, x=cbind(1, stan_df$wealthindex)
)
rt <- stanc("mvt_probit.stan")
sm <- stan_model(stanc_ret = rt, verbose=FALSE)
stan1 <- sampling(sm
, data = standata
, chains = 2
, cores = 4
, iter = 2000
, thin = 1
, seed = 101
)
print(stan1, pars=c("beta"))
save(file = "fitting.rda"
, brms1
, brms2
, brmsbin
, stan1
, betas_df
, covmat_df
)
|
#' Standardize finaltable by amount or row total
#'
#' Divides peak areas by the amount (biomass, counts) of each floral sample in metadata, or by the sample sum to create percentages.
#' @param finaltable the wide data frame with samples in rows and compound names in columns, containing peak areas for floral samples only
#' @param metadata the data frame that contains meta data about the group, type, and other attributes of each sample
#' @param percent if TRUE, divide by row sums instead of amount
#' @return finaltable
#' @examples
#' data(GCMSfloral)
#' standardize_finaltable(finaltable, metadata, percent = FALSE)
#' @export
standardize_finaltable <- function(finaltable, metadata, percent = FALSE) {
return(finaltable / ifelse(percent, rowSums(finaltable), metadata[metadata$type == "floral","amount"])[row(finaltable)])
}
| /R/standardize_finaltable.R | no_license | jmpowers/bouquet | R | false | false | 837 | r | #' Standardize finaltable by amount or row total
#'
#' Divides peak areas by the amount (biomass, counts) of each floral sample in metadata, or by the sample sum to create percentages.
#' @param finaltable the wide data frame with samples in rows and compound names in columns, containing peak areas for floral samples only
#' @param metadata the data frame that contains meta data about the group, type, and other attributes of each sample
#' @param percent if TRUE, divide by row sums instead of amount
#' @return finaltable
#' @examples
#' data(GCMSfloral)
#' standardize_finaltable(finaltable, metadata, percent = FALSE)
#' @export
standardize_finaltable <- function(finaltable, metadata, percent = FALSE) {
return(finaltable / ifelse(percent, rowSums(finaltable), metadata[metadata$type == "floral","amount"])[row(finaltable)])
}
|
x<-read.table("household_power_consumption.txt",header=TRUE, sep=";", na.strings="?", colClasses=c(Date='factor', Time='factor', rep("numeric",7)))
time<-paste(x$Date, x$Time)
x$Time <- strptime(time, "%d/%m/%Y %H:%M:%S")
x$Date <- as.Date(x$Date, "%d/%m/%Y")
x <- subset(x, x$Date<="2007-02-02")
x <- subset(x, x$Date>="2007-02-01")
png(file="plot2.png", width=480, height=480)
plot(x$Time, x$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off() | /plot2.R | no_license | martasen/ExData_Plotting1 | R | false | false | 483 | r | x<-read.table("household_power_consumption.txt",header=TRUE, sep=";", na.strings="?", colClasses=c(Date='factor', Time='factor', rep("numeric",7)))
time<-paste(x$Date, x$Time)
x$Time <- strptime(time, "%d/%m/%Y %H:%M:%S")
x$Date <- as.Date(x$Date, "%d/%m/%Y")
x <- subset(x, x$Date<="2007-02-02")
x <- subset(x, x$Date>="2007-02-01")
png(file="plot2.png", width=480, height=480)
plot(x$Time, x$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off() |
#подгружаем необходимые пакеты
#install.packages("quantmod")
library(quantmod)
getSymbols("XOM", from="2016-01-01", to="2019-10-02")
getSymbols("VOW3.DE", from="2016-01-01", to="2019-10-02")
getSymbols("TCEHY", from="2016-01-01", to="2019-10-02")
#добавляем цены
prices_exxon_mobil <- as.numeric(Ad(XOM))
prices_volkswagen <- as.numeric(Ad(VOW3.DE))
prices_tencent_holdings <- as.numeric(Ad(TCEHY))
plot(prices_exxon_mobil, type="l", main="Цена акции Exxon Mobil")
plot(prices_volkswagen, type="l", main="Цена акции Volkswagen")
plot(prices_tencent_holdings, type="l", main="Цена акции Tencent Holdings")
#годоваядоходность
annual_terurn_exxon_mobil <- tail(prices_exxon_mobil, 1)/head(prices_exxon_mobil, 1) - 1
annual_terurn_volkswagen <- tail(prices_volkswagen, 1)/head(prices_volkswagen, 1) - 1
annual_terurn_tencent_holdings <- tail(prices_tencent_holdings, 1)/head(prices_tencent_holdings, 1) - 1
annual_return <- c(annual_terurn_exxon_mobil,
annual_terurn_volkswagen,
annual_terurn_tencent_holdings)
annual_return
ret_exxon_mobil <- diff(prices_exxon_mobil)/head(prices_exxon_mobil, -1)
ret_volkswagen <- diff(prices_volkswagen)/head(prices_volkswagen, -1)
ret_tencent_holdings <- diff(prices_tencent_holdings)/head(prices_tencent_holdings, -1)
mean_of_returns_exxon_mobile <- mean(ret_exxon_mobil)
mean_of_returns_volkswagen <- mean(ret_volkswagen)
mean_of_returns_tencent_holdings <- mean(ret_tencent_holdings)
standart_deviation_of_returns_exxon_mobile <- sd(ret_exxon_mobil)
standart_deviation_of_returns_volkswagen <- sd(ret_volkswagen)
standart_deviation_of_returns_tencent_holdings <- sd(ret_tencent_holdings)
mean_of_returns <- c(mean_of_returns_exxon_mobile,
mean_of_returns_volkswagen,
mean_of_returns_tencent_holdings)
standart_deviation_of_returns <- c(standart_deviation_of_returns_exxon_mobile,
standart_deviation_of_returns_volkswagen,
standart_deviation_of_returns_tencent_holdings)
mean_of_returns
standart_deviation_of_returns
#коэф. Шарпа
Sharpe_ratio <- mean_of_returns/standart_deviation_of_returns
Sharpe_ratio
#Value-at-Risk
#квантиль на уровне 5%, то есть доходность бумаги
#с 95% уровнем доверия будет не хуже, чем это значение
VaR_exxon_mobil <- sort(ret_exxon_mobil)[0.05*length(ret_exxon_mobil)]
VaR_volkswagen <- sort(ret_volkswagen)[0.05*length(ret_volkswagen)]
VaR_tencent_holdings <- sort(ret_tencent_holdings)[0.05*length(ret_tencent_holdings)]
VaR <- c(VaR_exxon_mobil,
VaR_volkswagen,
VaR_tencent_holdings)
#Expected shortfall - это среднее среди худшего
#в тех процентах, что левее квантиля, употреблённого в VaR
ES_exxon_mobil <- mean(ret_exxon_mobil[ret_exxon_mobil <= quantile(ret_exxon_mobil, 0.05)])
ES_volkswagen <- mean(ret_volkswagen[ret_volkswagen <= quantile(ret_volkswagen, 0.05)])
ES_tencent_holdings <- mean(ret_tencent_holdings[ret_tencent_holdings <= quantile(ret_tencent_holdings, 0.05)])
ES <- c(ES_exxon_mobil,
ES_volkswagen,
ES_tencent_holdings)
#кривая VaR для Exxon Mobil
N <- 100 # Длина тренировочной выборки.
test_exxon_mobil <- ret_exxon_mobil[(N+1):length(ret_exxon_mobil)] # Тестовая выборка
VAR_exxon_mobil <- rep(0, length(test_exxon_mobil)) # Запишем сюда пока нули.
for (i in (N+1):length(ret_exxon_mobil)){
train_exxon_mobil <- ret_exxon_mobil[(i-N):(i-1)]
VAR_exxon_mobil[i-N] <- quantile(train_exxon_mobil, 0.05)
}
plot(test_exxon_mobil, type="l", main = "Кривая VaR для Exxon Mobil")
lines(VAR_exxon_mobil, col="red")
#кривая VaR для Volkswagen
N <- 100 # Длина тренировочной выборки.
test_volkswagen <- ret_volkswagen[(N+1):length(ret_volkswagen)] # Тестовая выборка
VAR_volkswagen <- rep(0, length(test_volkswagen)) # Запишем сюда пока нули.
for (i in (N+1):length(ret_volkswagen)){
train_volkswagen <- ret_volkswagen[(i-N):(i-1)]
VAR_volkswagen[i-N] <- quantile(train_volkswagen, 0.05)
}
plot(test_volkswagen, type="l", main = "Кривая VaR для Volkswagen")
lines(VAR_volkswagen, col="blue")
#кривая VaR для Tencent Holdings
N <- 100 # Длина тренировочной выборки.
test_tencent_holdings <- ret_tencent_holdings[(N+1):length(ret_tencent_holdings)] # Тестовая выборка
VAR_tencent_holdings <- rep(0, length(test_tencent_holdings)) # Запишем сюда пока нули.
for (i in (N+1):length(ret_tencent_holdings)){
train_tencent_holdings <- ret_tencent_holdings[(i-N):(i-1)]
VAR_tencent_holdings[i-N] <- quantile(train_tencent_holdings, 0.05)
}
plot(test_tencent_holdings, type="l", main = "Кривая VaR для Tencent Holdings")
lines(VAR_tencent_holdings, col="green")
#Построение кривой VaR для Exxon Mobil при предпосылке о нормальности распределения доходностей
quantile(ret_exxon_mobil, 0.05)
qnorm(0.05, mean=mean(ret_exxon_mobil), sd=sd(ret_exxon_mobil))
N <- 100
test_exxon_mobil_normal <- ret_exxon_mobil[(N+1):length(ret_exxon_mobil)]
VAR_exxon_mobil_normal <- rep(0, length(test_exxon_mobil_normal))
for (i in (N+1):length(ret_exxon_mobil)){
train_exxon_mobil_normal <- ret_exxon_mobil[(i-N):(i-1)]
VAR_exxon_mobil_normal[i-N] <- qnorm(0.05, mean=mean(train_exxon_mobil_normal), sd=sd(train_exxon_mobil_normal))
}
plot(test_exxon_mobil_normal, type="l", main = "VaR Exxon Mobil для нормальных доходностей")
lines(VAR_exxon_mobil_normal, col="red")
#Тест Купика
L_exxon_mobil_normal <- length(test_exxon_mobil_normal)
K_exxon_mobil_normal <- sum(test_exxon_mobil_normal < VAR_exxon_mobil_normal)
a0_exxon_mobil_normal <- K_exxon_mobil_normal/L_exxon_mobil_normal
a <- 0.05
S_exxon_mobil_normal <- 2*log( (1-a0_exxon_mobil_normal)^(L_exxon_mobil_normal-
K_exxon_mobil_normal) *
a0_exxon_mobil_normal^K_exxon_mobil_normal ) - 2*log( (1-a)^(L_exxon_mobil_normal-K_exxon_mobil_normal) * a^K_exxon_mobil_normal)
pval_exxon_mobile_normal <- 1 - pchisq(S_exxon_mobil_normal, 1)
pval_exxon_mobile_normal
#Построение кривой VaR для Volkswagen при предпосылке о нормальности распределения доходностей
quantile(ret_volkswagen, 0.05)
qnorm(0.05, mean=mean(ret_volkswagen), sd=sd(ret_volkswagen))
N <- 100
test_volkswagen_normal <- ret_volkswagen[(N+1):length(ret_volkswagen)]
VAR_volkswagen_normal <- rep(0, length(test_volkswagen_normal))
for (i in (N+1):length(ret_volkswagen)){
train_volkswagen_normal <- ret_volkswagen[(i-N):(i-1)]
VAR_volkswagen_normal[i-N] <- qnorm(0.05, mean=mean(train_volkswagen_normal), sd=sd(train_volkswagen_normal))
}
plot(test_volkswagen_normal, type="l", main = "VaR Volkswagen для нормальных доходностей")
lines(VAR_volkswagen_normal, col="blue")
#Тест Купика
L_volkswagen_normal <- length(test_volkswagen_normal)
K_volkswagen_normal <- sum(test_volkswagen_normal < VAR_volkswagen_normal)
a0_volkswagen_normal <- K_volkswagen_normal/L_volkswagen_normal
a <- 0.05
S_volkswagen_normal <- 2*log( (1-a0_volkswagen_normal)^(L_volkswagen_normal-K_volkswagen_normal)
* a0_volkswagen_normal^K_volkswagen_normal) - 2*log( (1-a)^(L_volkswagen_normal
-K_volkswagen_normal)
* a^K_volkswagen_normal )
pval_volkswagen_normal <- 1 - pchisq(S_volkswagen_normal, 1)
pval_volkswagen_normal
#Построение кривой VaR для Tencent Holdings при предпосылке о нормальности распределения доходностей
quantile(ret_tencent_holdings, 0.05)
qnorm(0.05, mean=mean(ret_tencent_holdings), sd=sd(ret_tencent_holdings))
N <- 100
test_tencent_holdings_normal <- ret_tencent_holdings[(N+1):length(ret_tencent_holdings)]
VAR_tencent_holdings_normal <- rep(0, length(test_tencent_holdings_normal))
for (i in (N+1):length(ret_tencent_holdings)){
train_tencent_holdings_normal <- ret_tencent_holdings[(i-N):(i-1)]
VAR_tencent_holdings_normal[i-N] <- qnorm(0.05, mean=mean(train_tencent_holdings_normal), sd=sd(train_tencent_holdings_normal))
}
plot(test_tencent_holdings_normal, type="l", main = "VaR Tencent Holdings для нормальных доходностей")
lines(VAR_tencent_holdings_normal, col="blue")
#Тест Купика
L_tencent_holdings_normal <- length(test_tencent_holdings_normal)
K_tencent_holdings_normal <- sum(test_tencent_holdings_normal < VAR_tencent_holdings_normal)
a0_tencent_holdings_normal <- K_tencent_holdings_normal/L_tencent_holdings_normal
a <- 0.05
S_tencent_holdings_normal <- 2*log( (1-a0_tencent_holdings_normal)^(L_tencent_holdings_normal-K_tencent_holdings_normal)
* a0_tencent_holdings_normal^K_tencent_holdings_normal) - 2*log( (1-a)^(L_tencent_holdings_normal
-K_tencent_holdings_normal)
* a^K_tencent_holdings_normal )
pval_tencent_holdings_normal <- 1 - pchisq(S_tencent_holdings_normal, 1)
pval_tencent_holdings_normal
#Обобщённое гиперболическое распределение
#install.packages("ghyp")
library(ghyp)
#подгонка обобщённого геометрического распределения под Exxon Mobil
ghyp_dist_exxon_mobil <- fit.ghypuv(ret_exxon_mobil, silent = TRUE)
summary(ghyp_dist_exxon_mobil)
hist(ghyp_dist_exxon_mobil)
qqghyp(ghyp_dist_exxon_mobil)
aic_exxon_mobil <- stepAIC.ghyp(ret_exxon_mobil, dist=c("ghyp", "hyp", "t", "gauss"), silent=TRUE)
aic_exxon_mobil$best.model
qghyp(0.05, object = aic_exxon_mobil$best.model)
#ждать минуту
N <- 100
test_exxon_mobil_ghyp <- ret_exxon_mobil[(N+1):length(ret_exxon_mobil)]
VaR_exxon_mobil_ghyp <- rep(0, length(test_exxon_mobil_ghyp))
for (i in (N+1):length(ret_exxon_mobil)){
train_exxon_mobil_ghyp <- ret_exxon_mobil[(i-N):(i-1)]
model_exxon_mobil_ghyp <- stepAIC.ghyp(train_exxon_mobil_ghyp, dist=c("ghyp", "hyp", "t", "gauss"), silent=T)$best.model
VaR_exxon_mobil_ghyp[i-N] <- qghyp(0.05, object = model)
}
plot(test_exxon_mobil_ghyp, type="l", main = "Кривая VaR Exxon Momil (GHYP) ")
lines(VaR_exxon_mobil_ghyp, col="red")
L_exxon_mobil_ghyp <- length(test_exxon_mobil_ghyp)
K_exxon_mobil_ghyp <- sum(test_exxon_mobil_ghyp < VaR_exxon_mobil_ghyp)
a0_exxon_mobil_ghyp <- K_exxon_mobil_ghyp/L_exxon_mobil_ghyp
a <- 0.05
S_exxon_mobil_ghyp <- 2*log( (1-a0_exxon_mobil_ghyp)^(L_exxon_mobil_ghyp-K_exxon_mobil_ghyp) *
a0_exxon_mobil_ghyp^K_exxon_mobil_ghyp ) - 2*log( (1-a)^(L_exxon_mobil_ghyp-
K_exxon_mobil_ghyp)
* a^K_exxon_mobil_ghyp )
pval_exxon_mobil_ghyp <- 1 - pchisq(S_exxon_mobil_ghyp, 1)
pval_exxon_mobil_ghyp
#подгонка обобщённого геометрического распределения под Volkswagen
ghyp_dist_volkswagen <- fit.ghypuv(ret_volkswagen, silent = TRUE)
summary(ghyp_dist_volkswagen)
hist(ghyp_dist_volkswagen)
qqghyp(ghyp_dist_volkswagen)
aic_volkswagen <- stepAIC.ghyp(ret_volkswagen, dist=c("ghyp", "hyp", "t", "gauss"), silent=TRUE)
aic_volkswagen$best.model
qghyp(0.05, object = aic_volkswagen$best.model)
#ждать минуту
N <- 100
test_volkswagen_ghyp <- ret_volkswagen[(N+1):length(ret_volkswagen)]
VaR_volkswagen_ghyp <- rep(0, length(test_volkswagen_ghyp))
for (i in (N+1):length(ret_volkswagen)){
train_volkswagen_ghyp <- ret_volkswagen[(i-N):(i-1)]
model_volkswagen_ghyp <- stepAIC.ghyp(train_volkswagen_ghyp, dist=c("ghyp", "hyp", "t", "gauss"), silent=T)$best.model
VaR_volkswagen_ghyp[i-N] <- qghyp(0.05, object = model)
}
plot(test_volkswagen_ghyp, type="l", main = "Кривая VaR Exxon Momil (GHYP) ")
lines(VaR_volkswagen_ghyp, col="red")
L_volkswagen_ghyp <- length(test_volkswagen_ghyp)
K_volkswagen_ghyp <- sum(test_volkswagen_ghyp < VaR_volkswagen_ghyp)
a0_volkswagen_ghyp <- K_volkswagen_ghyp/L_volkswagen_ghyp
a <- 0.05
S_volkswagen_ghyp <- 2*log( (1-a0_volkswagen_ghyp)^(L_volkswagen_ghyp-K_volkswagen_ghyp) *
a0_volkswagen_ghyp^K_volkswagen_ghyp ) - 2*log( (1-a)^(L_volkswagen_ghyp-
K_volkswagen_ghyp)
* a^K_volkswagen_ghyp )
pval_volkswagen_ghyp <- 1 - pchisq(S_volkswagen_ghyp, 1)
pval_volkswagen_ghyp
#подгонка обобщённого геометрического распределения под Tencent Holdings
ghyp_dist_tencent_holdings <- fit.ghypuv(ret_tencent_holdings, silent = TRUE)
summary(ghyp_dist_tencent_holdings)
hist(ghyp_dist_tencent_holdings)
qqghyp(ghyp_dist_tencent_holdings)
aic_tencent_holdings <- stepAIC.ghyp(ret_tencent_holdings, dist=c("ghyp", "hyp", "t", "gauss"), silent=TRUE)
aic_tencent_holdings$best.model
qghyp(0.05, object = aic_tencent_holdings$best.model)
#ждать минуту
N <- 100
test_tencent_holdings_ghyp <- ret_tencent_holdings[(N+1):length(ret_tencent_holdings)]
VaR_tencent_holdings_ghyp <- rep(0, length(test_tencent_holdings_ghyp))
for (i in (N+1):length(ret_tencent_holdings)){
train_tencent_holdings_ghyp <- ret_tencent_holdings[(i-N):(i-1)]
model_tencent_holdings_ghyp <- stepAIC.ghyp(train_tencent_holdings_ghyp, dist=c("ghyp", "hyp", "t", "gauss"), silent=T)$best.model
VaR_tencent_holdings_ghyp[i-N] <- qghyp(0.05, object = model)
}
plot(test_tencent_holdings_ghyp, type="l", main = "Кривая VaR Tencent Holdings (GHYP) ")
lines(VaR_tencent_holdings_ghyp, col="red")
L_tencent_holdings_ghyp <- length(test_tencent_holdings_ghyp)
K_tencent_holdings_ghyp <- sum(test_tencent_holdings_ghyp < VaR_tencent_holdings_ghyp)
a0_tencent_holdings_ghyp <- K_tencent_holdings_ghyp/L_tencent_holdings_ghyp
a <- 0.05
S_tencent_holdings_ghyp <- 2*log( (1-a0_tencent_holdings_ghyp)^(L_tencent_holdings_ghyp-K_tencent_holdings_ghyp) *
a0_tencent_holdings_ghyp^K_tencent_holdings_ghyp ) - 2*log( (1-a)^(L_tencent_holdings_ghyp-
K_tencent_holdings_ghyp)
* a^K_tencent_holdings_ghyp )
pval_tencent_holdings_ghyp <- 1 - pchisq(S_tencent_holdings_ghyp, 1)
pval_tencent_holdings_ghyp
| /Mathematical Finance/Time Series analysis using R/Time series analysis. Stocks.R | no_license | liyanyan0713/Economics_Projects | R | false | false | 15,557 | r | #подгружаем необходимые пакеты
#install.packages("quantmod")
library(quantmod)
getSymbols("XOM", from="2016-01-01", to="2019-10-02")
getSymbols("VOW3.DE", from="2016-01-01", to="2019-10-02")
getSymbols("TCEHY", from="2016-01-01", to="2019-10-02")
#добавляем цены
prices_exxon_mobil <- as.numeric(Ad(XOM))
prices_volkswagen <- as.numeric(Ad(VOW3.DE))
prices_tencent_holdings <- as.numeric(Ad(TCEHY))
plot(prices_exxon_mobil, type="l", main="Цена акции Exxon Mobil")
plot(prices_volkswagen, type="l", main="Цена акции Volkswagen")
plot(prices_tencent_holdings, type="l", main="Цена акции Tencent Holdings")
#годоваядоходность
annual_terurn_exxon_mobil <- tail(prices_exxon_mobil, 1)/head(prices_exxon_mobil, 1) - 1
annual_terurn_volkswagen <- tail(prices_volkswagen, 1)/head(prices_volkswagen, 1) - 1
annual_terurn_tencent_holdings <- tail(prices_tencent_holdings, 1)/head(prices_tencent_holdings, 1) - 1
annual_return <- c(annual_terurn_exxon_mobil,
annual_terurn_volkswagen,
annual_terurn_tencent_holdings)
annual_return
ret_exxon_mobil <- diff(prices_exxon_mobil)/head(prices_exxon_mobil, -1)
ret_volkswagen <- diff(prices_volkswagen)/head(prices_volkswagen, -1)
ret_tencent_holdings <- diff(prices_tencent_holdings)/head(prices_tencent_holdings, -1)
mean_of_returns_exxon_mobile <- mean(ret_exxon_mobil)
mean_of_returns_volkswagen <- mean(ret_volkswagen)
mean_of_returns_tencent_holdings <- mean(ret_tencent_holdings)
standart_deviation_of_returns_exxon_mobile <- sd(ret_exxon_mobil)
standart_deviation_of_returns_volkswagen <- sd(ret_volkswagen)
standart_deviation_of_returns_tencent_holdings <- sd(ret_tencent_holdings)
mean_of_returns <- c(mean_of_returns_exxon_mobile,
mean_of_returns_volkswagen,
mean_of_returns_tencent_holdings)
standart_deviation_of_returns <- c(standart_deviation_of_returns_exxon_mobile,
standart_deviation_of_returns_volkswagen,
standart_deviation_of_returns_tencent_holdings)
mean_of_returns
standart_deviation_of_returns
#коэф. Шарпа
Sharpe_ratio <- mean_of_returns/standart_deviation_of_returns
Sharpe_ratio
#Value-at-Risk
#квантиль на уровне 5%, то есть доходность бумаги
#с 95% уровнем доверия будет не хуже, чем это значение
VaR_exxon_mobil <- sort(ret_exxon_mobil)[0.05*length(ret_exxon_mobil)]
VaR_volkswagen <- sort(ret_volkswagen)[0.05*length(ret_volkswagen)]
VaR_tencent_holdings <- sort(ret_tencent_holdings)[0.05*length(ret_tencent_holdings)]
VaR <- c(VaR_exxon_mobil,
VaR_volkswagen,
VaR_tencent_holdings)
#Expected shortfall - это среднее среди худшего
#в тех процентах, что левее квантиля, употреблённого в VaR
ES_exxon_mobil <- mean(ret_exxon_mobil[ret_exxon_mobil <= quantile(ret_exxon_mobil, 0.05)])
ES_volkswagen <- mean(ret_volkswagen[ret_volkswagen <= quantile(ret_volkswagen, 0.05)])
ES_tencent_holdings <- mean(ret_tencent_holdings[ret_tencent_holdings <= quantile(ret_tencent_holdings, 0.05)])
ES <- c(ES_exxon_mobil,
ES_volkswagen,
ES_tencent_holdings)
#кривая VaR для Exxon Mobil
N <- 100 # Длина тренировочной выборки.
test_exxon_mobil <- ret_exxon_mobil[(N+1):length(ret_exxon_mobil)] # Тестовая выборка
VAR_exxon_mobil <- rep(0, length(test_exxon_mobil)) # Запишем сюда пока нули.
for (i in (N+1):length(ret_exxon_mobil)){
train_exxon_mobil <- ret_exxon_mobil[(i-N):(i-1)]
VAR_exxon_mobil[i-N] <- quantile(train_exxon_mobil, 0.05)
}
plot(test_exxon_mobil, type="l", main = "Кривая VaR для Exxon Mobil")
lines(VAR_exxon_mobil, col="red")
#кривая VaR для Volkswagen
N <- 100 # Длина тренировочной выборки.
test_volkswagen <- ret_volkswagen[(N+1):length(ret_volkswagen)] # Тестовая выборка
VAR_volkswagen <- rep(0, length(test_volkswagen)) # Запишем сюда пока нули.
for (i in (N+1):length(ret_volkswagen)){
train_volkswagen <- ret_volkswagen[(i-N):(i-1)]
VAR_volkswagen[i-N] <- quantile(train_volkswagen, 0.05)
}
plot(test_volkswagen, type="l", main = "Кривая VaR для Volkswagen")
lines(VAR_volkswagen, col="blue")
#кривая VaR для Tencent Holdings
N <- 100 # Длина тренировочной выборки.
test_tencent_holdings <- ret_tencent_holdings[(N+1):length(ret_tencent_holdings)] # Тестовая выборка
VAR_tencent_holdings <- rep(0, length(test_tencent_holdings)) # Запишем сюда пока нули.
for (i in (N+1):length(ret_tencent_holdings)){
train_tencent_holdings <- ret_tencent_holdings[(i-N):(i-1)]
VAR_tencent_holdings[i-N] <- quantile(train_tencent_holdings, 0.05)
}
plot(test_tencent_holdings, type="l", main = "Кривая VaR для Tencent Holdings")
lines(VAR_tencent_holdings, col="green")
#Построение кривой VaR для Exxon Mobil при предпосылке о нормальности распределения доходностей
quantile(ret_exxon_mobil, 0.05)
qnorm(0.05, mean=mean(ret_exxon_mobil), sd=sd(ret_exxon_mobil))
N <- 100
test_exxon_mobil_normal <- ret_exxon_mobil[(N+1):length(ret_exxon_mobil)]
VAR_exxon_mobil_normal <- rep(0, length(test_exxon_mobil_normal))
for (i in (N+1):length(ret_exxon_mobil)){
train_exxon_mobil_normal <- ret_exxon_mobil[(i-N):(i-1)]
VAR_exxon_mobil_normal[i-N] <- qnorm(0.05, mean=mean(train_exxon_mobil_normal), sd=sd(train_exxon_mobil_normal))
}
plot(test_exxon_mobil_normal, type="l", main = "VaR Exxon Mobil для нормальных доходностей")
lines(VAR_exxon_mobil_normal, col="red")
#Тест Купика
L_exxon_mobil_normal <- length(test_exxon_mobil_normal)
K_exxon_mobil_normal <- sum(test_exxon_mobil_normal < VAR_exxon_mobil_normal)
a0_exxon_mobil_normal <- K_exxon_mobil_normal/L_exxon_mobil_normal
a <- 0.05
S_exxon_mobil_normal <- 2*log( (1-a0_exxon_mobil_normal)^(L_exxon_mobil_normal-
K_exxon_mobil_normal) *
a0_exxon_mobil_normal^K_exxon_mobil_normal ) - 2*log( (1-a)^(L_exxon_mobil_normal-K_exxon_mobil_normal) * a^K_exxon_mobil_normal)
pval_exxon_mobile_normal <- 1 - pchisq(S_exxon_mobil_normal, 1)
pval_exxon_mobile_normal
#Построение кривой VaR для Volkswagen при предпосылке о нормальности распределения доходностей
quantile(ret_volkswagen, 0.05)
qnorm(0.05, mean=mean(ret_volkswagen), sd=sd(ret_volkswagen))
N <- 100
test_volkswagen_normal <- ret_volkswagen[(N+1):length(ret_volkswagen)]
VAR_volkswagen_normal <- rep(0, length(test_volkswagen_normal))
for (i in (N+1):length(ret_volkswagen)){
train_volkswagen_normal <- ret_volkswagen[(i-N):(i-1)]
VAR_volkswagen_normal[i-N] <- qnorm(0.05, mean=mean(train_volkswagen_normal), sd=sd(train_volkswagen_normal))
}
plot(test_volkswagen_normal, type="l", main = "VaR Volkswagen для нормальных доходностей")
lines(VAR_volkswagen_normal, col="blue")
#Тест Купика
L_volkswagen_normal <- length(test_volkswagen_normal)
K_volkswagen_normal <- sum(test_volkswagen_normal < VAR_volkswagen_normal)
a0_volkswagen_normal <- K_volkswagen_normal/L_volkswagen_normal
a <- 0.05
S_volkswagen_normal <- 2*log( (1-a0_volkswagen_normal)^(L_volkswagen_normal-K_volkswagen_normal)
* a0_volkswagen_normal^K_volkswagen_normal) - 2*log( (1-a)^(L_volkswagen_normal
-K_volkswagen_normal)
* a^K_volkswagen_normal )
pval_volkswagen_normal <- 1 - pchisq(S_volkswagen_normal, 1)
pval_volkswagen_normal
#Построение кривой VaR для Tencent Holdings при предпосылке о нормальности распределения доходностей
quantile(ret_tencent_holdings, 0.05)
qnorm(0.05, mean=mean(ret_tencent_holdings), sd=sd(ret_tencent_holdings))
N <- 100
test_tencent_holdings_normal <- ret_tencent_holdings[(N+1):length(ret_tencent_holdings)]
VAR_tencent_holdings_normal <- rep(0, length(test_tencent_holdings_normal))
for (i in (N+1):length(ret_tencent_holdings)){
train_tencent_holdings_normal <- ret_tencent_holdings[(i-N):(i-1)]
VAR_tencent_holdings_normal[i-N] <- qnorm(0.05, mean=mean(train_tencent_holdings_normal), sd=sd(train_tencent_holdings_normal))
}
plot(test_tencent_holdings_normal, type="l", main = "VaR Tencent Holdings для нормальных доходностей")
lines(VAR_tencent_holdings_normal, col="blue")
#Тест Купика
L_tencent_holdings_normal <- length(test_tencent_holdings_normal)
K_tencent_holdings_normal <- sum(test_tencent_holdings_normal < VAR_tencent_holdings_normal)
a0_tencent_holdings_normal <- K_tencent_holdings_normal/L_tencent_holdings_normal
a <- 0.05
S_tencent_holdings_normal <- 2*log( (1-a0_tencent_holdings_normal)^(L_tencent_holdings_normal-K_tencent_holdings_normal)
* a0_tencent_holdings_normal^K_tencent_holdings_normal) - 2*log( (1-a)^(L_tencent_holdings_normal
-K_tencent_holdings_normal)
* a^K_tencent_holdings_normal )
pval_tencent_holdings_normal <- 1 - pchisq(S_tencent_holdings_normal, 1)
pval_tencent_holdings_normal
#Обобщённое гиперболическое распределение
#install.packages("ghyp")
library(ghyp)
#подгонка обобщённого геометрического распределения под Exxon Mobil
ghyp_dist_exxon_mobil <- fit.ghypuv(ret_exxon_mobil, silent = TRUE)
summary(ghyp_dist_exxon_mobil)
hist(ghyp_dist_exxon_mobil)
qqghyp(ghyp_dist_exxon_mobil)
aic_exxon_mobil <- stepAIC.ghyp(ret_exxon_mobil, dist=c("ghyp", "hyp", "t", "gauss"), silent=TRUE)
aic_exxon_mobil$best.model
qghyp(0.05, object = aic_exxon_mobil$best.model)
#ждать минуту
N <- 100
test_exxon_mobil_ghyp <- ret_exxon_mobil[(N+1):length(ret_exxon_mobil)]
VaR_exxon_mobil_ghyp <- rep(0, length(test_exxon_mobil_ghyp))
for (i in (N+1):length(ret_exxon_mobil)){
train_exxon_mobil_ghyp <- ret_exxon_mobil[(i-N):(i-1)]
model_exxon_mobil_ghyp <- stepAIC.ghyp(train_exxon_mobil_ghyp, dist=c("ghyp", "hyp", "t", "gauss"), silent=T)$best.model
VaR_exxon_mobil_ghyp[i-N] <- qghyp(0.05, object = model)
}
plot(test_exxon_mobil_ghyp, type="l", main = "Кривая VaR Exxon Momil (GHYP) ")
lines(VaR_exxon_mobil_ghyp, col="red")
L_exxon_mobil_ghyp <- length(test_exxon_mobil_ghyp)
K_exxon_mobil_ghyp <- sum(test_exxon_mobil_ghyp < VaR_exxon_mobil_ghyp)
a0_exxon_mobil_ghyp <- K_exxon_mobil_ghyp/L_exxon_mobil_ghyp
a <- 0.05
S_exxon_mobil_ghyp <- 2*log( (1-a0_exxon_mobil_ghyp)^(L_exxon_mobil_ghyp-K_exxon_mobil_ghyp) *
a0_exxon_mobil_ghyp^K_exxon_mobil_ghyp ) - 2*log( (1-a)^(L_exxon_mobil_ghyp-
K_exxon_mobil_ghyp)
* a^K_exxon_mobil_ghyp )
pval_exxon_mobil_ghyp <- 1 - pchisq(S_exxon_mobil_ghyp, 1)
pval_exxon_mobil_ghyp
#подгонка обобщённого геометрического распределения под Volkswagen
ghyp_dist_volkswagen <- fit.ghypuv(ret_volkswagen, silent = TRUE)
summary(ghyp_dist_volkswagen)
hist(ghyp_dist_volkswagen)
qqghyp(ghyp_dist_volkswagen)
aic_volkswagen <- stepAIC.ghyp(ret_volkswagen, dist=c("ghyp", "hyp", "t", "gauss"), silent=TRUE)
aic_volkswagen$best.model
qghyp(0.05, object = aic_volkswagen$best.model)
#ждать минуту
N <- 100
test_volkswagen_ghyp <- ret_volkswagen[(N+1):length(ret_volkswagen)]
VaR_volkswagen_ghyp <- rep(0, length(test_volkswagen_ghyp))
for (i in (N+1):length(ret_volkswagen)){
train_volkswagen_ghyp <- ret_volkswagen[(i-N):(i-1)]
model_volkswagen_ghyp <- stepAIC.ghyp(train_volkswagen_ghyp, dist=c("ghyp", "hyp", "t", "gauss"), silent=T)$best.model
VaR_volkswagen_ghyp[i-N] <- qghyp(0.05, object = model)
}
plot(test_volkswagen_ghyp, type="l", main = "Кривая VaR Exxon Momil (GHYP) ")
lines(VaR_volkswagen_ghyp, col="red")
L_volkswagen_ghyp <- length(test_volkswagen_ghyp)
K_volkswagen_ghyp <- sum(test_volkswagen_ghyp < VaR_volkswagen_ghyp)
a0_volkswagen_ghyp <- K_volkswagen_ghyp/L_volkswagen_ghyp
a <- 0.05
S_volkswagen_ghyp <- 2*log( (1-a0_volkswagen_ghyp)^(L_volkswagen_ghyp-K_volkswagen_ghyp) *
a0_volkswagen_ghyp^K_volkswagen_ghyp ) - 2*log( (1-a)^(L_volkswagen_ghyp-
K_volkswagen_ghyp)
* a^K_volkswagen_ghyp )
pval_volkswagen_ghyp <- 1 - pchisq(S_volkswagen_ghyp, 1)
pval_volkswagen_ghyp
#подгонка обобщённого геометрического распределения под Tencent Holdings
ghyp_dist_tencent_holdings <- fit.ghypuv(ret_tencent_holdings, silent = TRUE)
summary(ghyp_dist_tencent_holdings)
hist(ghyp_dist_tencent_holdings)
qqghyp(ghyp_dist_tencent_holdings)
aic_tencent_holdings <- stepAIC.ghyp(ret_tencent_holdings, dist=c("ghyp", "hyp", "t", "gauss"), silent=TRUE)
aic_tencent_holdings$best.model
qghyp(0.05, object = aic_tencent_holdings$best.model)
#ждать минуту
N <- 100
test_tencent_holdings_ghyp <- ret_tencent_holdings[(N+1):length(ret_tencent_holdings)]
VaR_tencent_holdings_ghyp <- rep(0, length(test_tencent_holdings_ghyp))
for (i in (N+1):length(ret_tencent_holdings)){
train_tencent_holdings_ghyp <- ret_tencent_holdings[(i-N):(i-1)]
model_tencent_holdings_ghyp <- stepAIC.ghyp(train_tencent_holdings_ghyp, dist=c("ghyp", "hyp", "t", "gauss"), silent=T)$best.model
VaR_tencent_holdings_ghyp[i-N] <- qghyp(0.05, object = model)
}
plot(test_tencent_holdings_ghyp, type="l", main = "Кривая VaR Tencent Holdings (GHYP) ")
lines(VaR_tencent_holdings_ghyp, col="red")
L_tencent_holdings_ghyp <- length(test_tencent_holdings_ghyp)
K_tencent_holdings_ghyp <- sum(test_tencent_holdings_ghyp < VaR_tencent_holdings_ghyp)
a0_tencent_holdings_ghyp <- K_tencent_holdings_ghyp/L_tencent_holdings_ghyp
a <- 0.05
S_tencent_holdings_ghyp <- 2*log( (1-a0_tencent_holdings_ghyp)^(L_tencent_holdings_ghyp-K_tencent_holdings_ghyp) *
a0_tencent_holdings_ghyp^K_tencent_holdings_ghyp ) - 2*log( (1-a)^(L_tencent_holdings_ghyp-
K_tencent_holdings_ghyp)
* a^K_tencent_holdings_ghyp )
pval_tencent_holdings_ghyp <- 1 - pchisq(S_tencent_holdings_ghyp, 1)
pval_tencent_holdings_ghyp
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(A = matrix()) {
M<<-NULL
set<-function(B){
A<<-B
M<<-NULL
}
get<-function(){A}
setInverse<-function(I){M<<-I}
getInverse<-function(){M}
list(set=set,
get=get,
setInverse=setInverse,
getInverse=getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(A, ...) {
M<-A$getInverse()
if(!isnull(M)){
message("getting cached matrix")
return(M)
}
X=A$get()
M<-solve(X,...)
A$setInverse(M)
M
## Return a matrix that is the inverse of 'x'
}
| /cachematrix.R | no_license | lifeimay0214/ProgrammingAssignment2 | R | false | false | 650 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(A = matrix()) {
M<<-NULL
set<-function(B){
A<<-B
M<<-NULL
}
get<-function(){A}
setInverse<-function(I){M<<-I}
getInverse<-function(){M}
list(set=set,
get=get,
setInverse=setInverse,
getInverse=getInverse)
}
## Write a short comment describing this function
cacheSolve <- function(A, ...) {
M<-A$getInverse()
if(!isnull(M)){
message("getting cached matrix")
return(M)
}
X=A$get()
M<-solve(X,...)
A$setInverse(M)
M
## Return a matrix that is the inverse of 'x'
}
|
library(ggplot2)
library(gtools)
library(reshape2)
plot.dirichlet.data <- function(n, alpha, nrow)
{
dat <- rdirichlet(n, alpha)
melt.dat <- melt(dat)
melt.dat[,"Var2"] <- as.factor(melt.dat[,"Var2"])
colnames(melt.dat) <- c("draw", "item", "value")
ggplot(data=melt.dat, aes(x=item, y=value, ymin=0, ymax=value)) +
geom_linerange(colour="blue") + geom_point(colour="blue") +
facet_wrap(~ draw, nrow=nrow) + ylim(0,1) +
opts(panel.border=theme_rect(fill = NA, colour = "gray50"))
}
make.dirichlet.plot <- function(alpha.scalar, makepdf=F)
{
alpha <- rep(alpha.scalar, 10)
filename <- sprintf("~/Desktop/alpha=%g.pdf", alpha.scalar)
if (makepdf) {
cat(filename)
pdf(filename, height=7, width=10)
}
print(plot.dirichlet.data(15, alpha, 3))
if (makepdf) dev.off()
}
make.dirichlet.plots <- function()
{
make.plot(1, makepdf=T)
make.plot(10, makepdf=T)
make.plot(100, makepdf=T)
make.plot(0.1, makepdf=T)
make.plot(0.01, makepdf=T)
make.plot(0.001, makepdf=T)
}
plot.dirichlet.data(10,c(1,1,1,1,1,1),10)
| /lda/dirichlet/dirichlet.R | no_license | satishverma/RStuff | R | false | false | 1,060 | r | library(ggplot2)
library(gtools)
library(reshape2)
plot.dirichlet.data <- function(n, alpha, nrow)
{
dat <- rdirichlet(n, alpha)
melt.dat <- melt(dat)
melt.dat[,"Var2"] <- as.factor(melt.dat[,"Var2"])
colnames(melt.dat) <- c("draw", "item", "value")
ggplot(data=melt.dat, aes(x=item, y=value, ymin=0, ymax=value)) +
geom_linerange(colour="blue") + geom_point(colour="blue") +
facet_wrap(~ draw, nrow=nrow) + ylim(0,1) +
opts(panel.border=theme_rect(fill = NA, colour = "gray50"))
}
make.dirichlet.plot <- function(alpha.scalar, makepdf=F)
{
alpha <- rep(alpha.scalar, 10)
filename <- sprintf("~/Desktop/alpha=%g.pdf", alpha.scalar)
if (makepdf) {
cat(filename)
pdf(filename, height=7, width=10)
}
print(plot.dirichlet.data(15, alpha, 3))
if (makepdf) dev.off()
}
make.dirichlet.plots <- function()
{
make.plot(1, makepdf=T)
make.plot(10, makepdf=T)
make.plot(100, makepdf=T)
make.plot(0.1, makepdf=T)
make.plot(0.01, makepdf=T)
make.plot(0.001, makepdf=T)
}
plot.dirichlet.data(10,c(1,1,1,1,1,1),10)
|
cor_to_range=function(event_name_list){
library(GenomicRanges)
ase <- unlist(strsplit(as.character(event_name_list),'\\$'))
ase_type=ase[[1]]
ase_event=ase[2]
mRNA_position=switch(ase_type,
SE =position_praser_for_SE(ase_event),
A5SS=position_praser_for_A5SS(ase_event),
A3SS=position_praser_for_A3SS(ase_event),
MXE=position_praser_for_MXE(ase_event),
RI=position_praser_for_RI(ase_event)
)
print(mRNA_position)
chr <- unlist(mRNA_position[1])
chr<-substr(chr,4,nchar(chr)+1)
if(chr=="Y"|chr=="X"){
chr=chr
}else{
chr<-as.numeric(chr)
}
five_primer_exon_mRNA_start <- as.numeric(as.character(mRNA_position[2]))
three_primer_exon_end <- as.numeric(as.character(mRNA_position[3]))
strand <- as.character(mRNA_position[4])
# if(strand=="+"){
# # print(strand)
# #print("positive")
# strand=1
# }else{
# # print("no")
# }
# if(strand=="-"){
# #print(strand)
# # print("minus")
# strand=-1
# }
# chrosome_coordinate=list(chr,five_primer_exon_mRNA_start,three_primer_exon_end,strand)
# gene_name=getBM(attributes = c('hgnc_symbol'), filters = c('chromosome_name','start','end','strand'),values = chrosome_coordinate, mart = ensembl)
#gene_name=getBM(attributes = c('mgi_symbol'), filters = c('chromosome_name','start','end','strand'),values = list(chr,five_primer_exon_mRNA_start,three_primer_exon_end,strand), mart = ensembl)
query_range <- GRanges(chr,IRanges(five_primer_exon_mRNA_start,three_primer_exon_end),strand)
return(query_range)
# gene_name=unlist(gene_name)
# how_many_name=length(gene_name)
# if(how_many_name==1){
# return(gene_name)
# }else{
# if(how_many_name==0){
# gene_name=NA
# return(gene_name)
# }else{
# gene_name=Reduce(function(...) paste(...,sep=" "),gene_name)
# return(gene_name)
# }
# }
}
position_praser_for_A3SS=function(event_list){
#A3SS_chr10:101458291:101458615:+@chr10:101460624|101460730:101460815:+
all_position <- unlist(strsplit(unlist(as.vector(event_list)),split='@'))
three_as_region=all_position[2]
three_as_region=unlist(strsplit(unlist(as.vector(three_as_region)),split=':'))
chr<- three_as_region[1]
strand<- three_as_region[4]
three_out=unlist(strsplit(three_as_region[2],split="\\|"))
five_primer_exon_mRNA_start=three_out[1]
three_primer_exon_end=three_out[2]
mRNA_position<- list(chr,five_primer_exon_mRNA_start,three_primer_exon_end,strand)
return(mRNA_position)
}
position_praser_for_A5SS=function(event_list){
#A5SS_chr15:48623621:48623676|48623992:+@chr15:48624465:48624603:+
all_position <- unlist(strsplit(unlist(as.vector(event_list)),split='@'))
five_as_region=all_position[1]
five_as_region=unlist(strsplit(unlist(as.vector(five_as_region)),split=':'))
chr<- five_as_region[1]
strand<- five_as_region[4]
five_out=unlist(strsplit(five_as_region[3],split="\\|"))
five_primer_exon_mRNA_start=five_out[1]
three_primer_exon_end=five_out[2]
mRNA_position<- list(chr,five_primer_exon_mRNA_start,three_primer_exon_end,strand)
return(mRNA_position)
}
position_praser_for_MXE=function(event_list){
#MXE_chr11:43911275:43911378:+@chr11:43913591:43913679:+@chr11:43918745:43918904:+@chr11:43923066:43923275:+
all_position <- unlist(strsplit(unlist(as.vector(event_list)),split='@'))
region=all_position[2]
region=unlist(strsplit(unlist(as.vector(region)),split=':'))
chr<-region[1]
strand<-region[4]
five_primer_exon_mRNA_start<-region[2]
three_primer_exon_end<-region[3]
mRNA_position<- list(chr,five_primer_exon_mRNA_start,three_primer_exon_end,strand)
return(mRNA_position)
}
position_praser_for_RI=function(event_list){
#RI_chr12:125609251-125609315:+@chr12:125609448-125609570:+
all_position <- unlist(strsplit(unlist(as.vector(event_list)),split=':'))
chr<-all_position[1]
strand<-all_position[5]
five_primer_exon_mRNA_start<-unlist(strsplit(unlist(as.vector(all_position[2])),split='-'))[2]
three_primer_exon_end<-unlist(strsplit(unlist(as.vector(all_position[4])),split='-'))[1]
mRNA_position<- list(chr,five_primer_exon_mRNA_start,three_primer_exon_end,strand)
return(mRNA_position)
}
position_praser_for_SE=function(event_list){
#SE_chr4:106067842:106068136:+@chr4:106111517:106111643:+@chr4:106155054:106158508:+
all_position <- unlist(strsplit(unlist(as.vector(event_list)),split='@'))
SE_region=unlist(strsplit(unlist(as.vector(all_position[2])),split=':'))
chr<-SE_region[1]
strand<-SE_region[4]
five_primer_exon_mRNA_start<-SE_region[2]
three_primer_exon_end<-SE_region[3]
mRNA_position<- list(chr,five_primer_exon_mRNA_start,three_primer_exon_end,strand)
return(mRNA_position)
} | /new_annotation.R | no_license | GeoPHLee/tofu | R | false | false | 4,854 | r | cor_to_range=function(event_name_list){
library(GenomicRanges)
ase <- unlist(strsplit(as.character(event_name_list),'\\$'))
ase_type=ase[[1]]
ase_event=ase[2]
mRNA_position=switch(ase_type,
SE =position_praser_for_SE(ase_event),
A5SS=position_praser_for_A5SS(ase_event),
A3SS=position_praser_for_A3SS(ase_event),
MXE=position_praser_for_MXE(ase_event),
RI=position_praser_for_RI(ase_event)
)
print(mRNA_position)
chr <- unlist(mRNA_position[1])
chr<-substr(chr,4,nchar(chr)+1)
if(chr=="Y"|chr=="X"){
chr=chr
}else{
chr<-as.numeric(chr)
}
five_primer_exon_mRNA_start <- as.numeric(as.character(mRNA_position[2]))
three_primer_exon_end <- as.numeric(as.character(mRNA_position[3]))
strand <- as.character(mRNA_position[4])
# if(strand=="+"){
# # print(strand)
# #print("positive")
# strand=1
# }else{
# # print("no")
# }
# if(strand=="-"){
# #print(strand)
# # print("minus")
# strand=-1
# }
# chrosome_coordinate=list(chr,five_primer_exon_mRNA_start,three_primer_exon_end,strand)
# gene_name=getBM(attributes = c('hgnc_symbol'), filters = c('chromosome_name','start','end','strand'),values = chrosome_coordinate, mart = ensembl)
#gene_name=getBM(attributes = c('mgi_symbol'), filters = c('chromosome_name','start','end','strand'),values = list(chr,five_primer_exon_mRNA_start,three_primer_exon_end,strand), mart = ensembl)
query_range <- GRanges(chr,IRanges(five_primer_exon_mRNA_start,three_primer_exon_end),strand)
return(query_range)
# gene_name=unlist(gene_name)
# how_many_name=length(gene_name)
# if(how_many_name==1){
# return(gene_name)
# }else{
# if(how_many_name==0){
# gene_name=NA
# return(gene_name)
# }else{
# gene_name=Reduce(function(...) paste(...,sep=" "),gene_name)
# return(gene_name)
# }
# }
}
position_praser_for_A3SS=function(event_list){
#A3SS_chr10:101458291:101458615:+@chr10:101460624|101460730:101460815:+
all_position <- unlist(strsplit(unlist(as.vector(event_list)),split='@'))
three_as_region=all_position[2]
three_as_region=unlist(strsplit(unlist(as.vector(three_as_region)),split=':'))
chr<- three_as_region[1]
strand<- three_as_region[4]
three_out=unlist(strsplit(three_as_region[2],split="\\|"))
five_primer_exon_mRNA_start=three_out[1]
three_primer_exon_end=three_out[2]
mRNA_position<- list(chr,five_primer_exon_mRNA_start,three_primer_exon_end,strand)
return(mRNA_position)
}
position_praser_for_A5SS=function(event_list){
#A5SS_chr15:48623621:48623676|48623992:+@chr15:48624465:48624603:+
all_position <- unlist(strsplit(unlist(as.vector(event_list)),split='@'))
five_as_region=all_position[1]
five_as_region=unlist(strsplit(unlist(as.vector(five_as_region)),split=':'))
chr<- five_as_region[1]
strand<- five_as_region[4]
five_out=unlist(strsplit(five_as_region[3],split="\\|"))
five_primer_exon_mRNA_start=five_out[1]
three_primer_exon_end=five_out[2]
mRNA_position<- list(chr,five_primer_exon_mRNA_start,three_primer_exon_end,strand)
return(mRNA_position)
}
position_praser_for_MXE=function(event_list){
#MXE_chr11:43911275:43911378:+@chr11:43913591:43913679:+@chr11:43918745:43918904:+@chr11:43923066:43923275:+
all_position <- unlist(strsplit(unlist(as.vector(event_list)),split='@'))
region=all_position[2]
region=unlist(strsplit(unlist(as.vector(region)),split=':'))
chr<-region[1]
strand<-region[4]
five_primer_exon_mRNA_start<-region[2]
three_primer_exon_end<-region[3]
mRNA_position<- list(chr,five_primer_exon_mRNA_start,three_primer_exon_end,strand)
return(mRNA_position)
}
position_praser_for_RI=function(event_list){
#RI_chr12:125609251-125609315:+@chr12:125609448-125609570:+
all_position <- unlist(strsplit(unlist(as.vector(event_list)),split=':'))
chr<-all_position[1]
strand<-all_position[5]
five_primer_exon_mRNA_start<-unlist(strsplit(unlist(as.vector(all_position[2])),split='-'))[2]
three_primer_exon_end<-unlist(strsplit(unlist(as.vector(all_position[4])),split='-'))[1]
mRNA_position<- list(chr,five_primer_exon_mRNA_start,three_primer_exon_end,strand)
return(mRNA_position)
}
position_praser_for_SE=function(event_list){
#SE_chr4:106067842:106068136:+@chr4:106111517:106111643:+@chr4:106155054:106158508:+
all_position <- unlist(strsplit(unlist(as.vector(event_list)),split='@'))
SE_region=unlist(strsplit(unlist(as.vector(all_position[2])),split=':'))
chr<-SE_region[1]
strand<-SE_region[4]
five_primer_exon_mRNA_start<-SE_region[2]
three_primer_exon_end<-SE_region[3]
mRNA_position<- list(chr,five_primer_exon_mRNA_start,three_primer_exon_end,strand)
return(mRNA_position)
} |
# Coordinates and Faceting
library(ggplot2)
# Data and Aesthetic
pl <- ggplot(mpg, aes(x=displ, y=hwy))
# Geometry
pl <- pl + geom_point()
pl
# x and y lim
pl2 <- pl + coord_cartesian(xlim=c(1,4), ylim=c(15,30))
pl2
# aspect ratios
pl2 <- pl + coord_fixed(ratio=1/3)
pl2
# Facet Grids (subplots)
pl + facet_grid(. ~ cyl) + theme_dark()
pl + facet_grid(drv ~ .) + theme_dark()
pl + facet_grid(drv ~ cyl) + theme_dark()
pl + facet_grid(cyl ~ drv) + theme_dark()
| /R/RPlots - Coordinates_and_Faceting.R | permissive | natitaw/write-to-learn | R | false | false | 473 | r | # Coordinates and Faceting
library(ggplot2)
# Data and Aesthetic
pl <- ggplot(mpg, aes(x=displ, y=hwy))
# Geometry
pl <- pl + geom_point()
pl
# x and y lim
pl2 <- pl + coord_cartesian(xlim=c(1,4), ylim=c(15,30))
pl2
# aspect ratios
pl2 <- pl + coord_fixed(ratio=1/3)
pl2
# Facet Grids (subplots)
pl + facet_grid(. ~ cyl) + theme_dark()
pl + facet_grid(drv ~ .) + theme_dark()
pl + facet_grid(drv ~ cyl) + theme_dark()
pl + facet_grid(cyl ~ drv) + theme_dark()
|
context("mlllogis")
## Data generation.
set.seed(313)
small_data <- actuar::rllogis(100, 2, 3)
tiny_data <- actuar::rllogis(10, 1, 1)
## Finds errors with na and data out of bounds.
expect_error(mlllogis(c(tiny_data, NA)))
expect_error(mlllogis(c(tiny_data, 0)))
expect_error(mlllogis(c(tiny_data, -1)))
# Check correctness
obj_1 <- mllogis(log(tiny_data))
obj_2 <- mlllogis(tiny_data)
expect_equal(
unname(obj_1[1]),
unname(log(obj_2)[1])
)
expect_equal(
unname(obj_1[2]),
unname(1 / (obj_2)[2])
)
## Checks that na.rm works as intended.
expect_equal(
coef(mlllogis(small_data)),
coef(mlllogis(c(small_data, NA), na.rm = TRUE))
)
## Is the log-likelihood correct?
est <- mlllogis(small_data, na.rm = TRUE)
expect_equal(
sum(actuar::dllogis(small_data, est[1], est[2], log = TRUE)),
attr(est, "logLik")
)
## Check class.
expect_equal(attr(est, "model"), "Loglogistic")
expect_equal(class(est), "univariateML")
# Check names.
expect_equal(names(est), c("shape", "rate"))
| /tests/testthat/test_mlllogis.R | permissive | tnagler/univariateML | R | false | false | 995 | r | context("mlllogis")
## Data generation.
set.seed(313)
small_data <- actuar::rllogis(100, 2, 3)
tiny_data <- actuar::rllogis(10, 1, 1)
## Finds errors with na and data out of bounds.
expect_error(mlllogis(c(tiny_data, NA)))
expect_error(mlllogis(c(tiny_data, 0)))
expect_error(mlllogis(c(tiny_data, -1)))
# Check correctness
obj_1 <- mllogis(log(tiny_data))
obj_2 <- mlllogis(tiny_data)
expect_equal(
unname(obj_1[1]),
unname(log(obj_2)[1])
)
expect_equal(
unname(obj_1[2]),
unname(1 / (obj_2)[2])
)
## Checks that na.rm works as intended.
expect_equal(
coef(mlllogis(small_data)),
coef(mlllogis(c(small_data, NA), na.rm = TRUE))
)
## Is the log-likelihood correct?
est <- mlllogis(small_data, na.rm = TRUE)
expect_equal(
sum(actuar::dllogis(small_data, est[1], est[2], log = TRUE)),
attr(est, "logLik")
)
## Check class.
expect_equal(attr(est, "model"), "Loglogistic")
expect_equal(class(est), "univariateML")
# Check names.
expect_equal(names(est), c("shape", "rate"))
|
#:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "fri_c4_1000_10")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass")
lrn = makeLearner("classif.ranger", par.vals = list(), predict.type = "prob")
#:# hash
#:# d267013eb9c3295bce56064c6ca444e6
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
| /models/openml_fri_c4_1000_10/classification_binaryClass/d267013eb9c3295bce56064c6ca444e6/code.R | no_license | pysiakk/CaseStudies2019S | R | false | false | 694 | r | #:# libraries
library(digest)
library(mlr)
library(OpenML)
library(farff)
#:# config
set.seed(1)
#:# data
dataset <- getOMLDataSet(data.name = "fri_c4_1000_10")
head(dataset$data)
#:# preprocessing
head(dataset$data)
#:# model
task = makeClassifTask(id = "task", data = dataset$data, target = "binaryClass")
lrn = makeLearner("classif.ranger", par.vals = list(), predict.type = "prob")
#:# hash
#:# d267013eb9c3295bce56064c6ca444e6
hash <- digest(list(task, lrn))
hash
#:# audit
cv <- makeResampleDesc("CV", iters = 5)
r <- mlr::resample(lrn, task, cv, measures = list(acc, auc, tnr, tpr, ppv, f1))
ACC <- r$aggr
ACC
#:# session info
sink(paste0("sessionInfo.txt"))
sessionInfo()
sink()
|
#' Bibliometric Analysis
#'
#' It performs a bibliometric analysis of a dataset imported from SCOPUS and Thomson Reuters' ISI Web of Knowledge databases.
#'
#' @param M is a bibliographic data frame obtained by the converting function \code{\link{convert2df}}.
#' It is a data matrix with cases corresponding to manuscripts and variables to Field Tag in the original SCOPUS and Thomson Reuters' ISI Web of Knowledge file.
#' @param sep is the field separator character. This character separates strings in each column of the data frame. The default is \code{sep = ";"}.
#' @return \code{biblioAnalysis} returns an object of \code{class} "bibliometrix".
#'
#' The functions \code{\link{summary}} and \code{\link{plot}} are used to obtain or print a summary and some useful plots of the results.
#'
#' An object of \code{class} "bibliometrix" is a list containing the following components:
#'
#' \tabular{lll}{
#' Articles \tab \tab the total number of manuscripts\cr
#' Authors \tab \tab the authors' frequency distribution\cr
#' AuthorsFrac \tab \tab the authors' frequency distribution (fractionalized)\cr
#' FirstAuthors \tab \tab first author of each manuscript\cr
#' nAUperPaper \tab \tab the number of authors per manuscript\cr
#' Apparences \tab \tab the number of author apparences\cr
#' nAuthors \tab \tab the number of authors\cr
#' AuMultiAuthoredArt \tab \tab the number of authors of multi authored articles\cr
#' Years \tab \tab pubblication year of each manuscript\cr
#' FirstAffiliation \tab \tab the affiliation of the first author\cr
#' Affiliations \tab \tab the frequency distribution of affiliations (of all co-authors for each paper)\cr
#' Aff_frac \tab \tab the fractionalized frequency distribution of affiliations (of all co-authors for each paper)\cr
#' CO \tab \tab the affiliation country of first author\cr
#' Countries \tab \tab the affiliation countries' frequency distribution\cr
#' TotalCitation \tab \tab the number of times each manuscript has been cited\cr
#' TCperYear \tab \tab the yearly average number of times each manuscript has been cited\cr
#' Sources \tab \tab the frequency distribution of sources (journals, books, etc.)\cr
#' DE \tab \tab the frequency distribution of authors' keywords\cr
#' ID \tab \tab the frequency distribution of keywords associated to the manuscript bySCOPUS and Thomson Reuters' ISI Web of Knowledge database}
#'
#'
#' @examples
#' data(scientometrics)
#'
#' results <- biblioAnalysis(scientometrics)
#'
#' summary(results, k = 10, pause = FALSE)
#'
#' @seealso \code{\link{convert2df}} to import and convert an ISI or SCOPUS Export file in a bibliographic data frame.
#' @seealso \code{\link{summary}} to obtain a summary of the results.
#' @seealso \code{\link{plot}} to draw some useful plots of the results.
#'
biblioAnalysis<-function(M,sep=";"){
# initialize variables
Authors=NULL
Authors_frac=NULL
FirstAuthors=NULL
PY=NULL
FAffiliation=NULL
Affiliation=NULL
Affiliation_frac=NULL
CO=rep(NA,dim(M)[1])
TC=NULL
TCperYear=NULL
SO=NULL
Country=NULL
DE=NULL
ID=NULL
# M is the dataframe
Tags<-names(M)
# temporal analyis
if ("PY" %in% Tags){Years=table(PY)}
# Author's distribution
if ("AU" %in% Tags){
listAU=strsplit(as.character(M$AU),sep)
listAU=lapply(listAU, function(l) trim.leading(l))
if (M$DB[1]=="ISI"){
listAU=lapply(listAU,function(l){
l=trim.leading(l)
l=gsub(" ","",l, fixed = TRUE)
})}
if (M$DB[1]=="SCOPUS"){
listAU=lapply(listAU,function(l){
l=trim.leading(l)
l=sub(" ",",",l, fixed = TRUE)
l=gsub(" ","",l, fixed = TRUE)})}
nAU=unlist(lapply(listAU,length)) # num. of authors per paper
fracAU=unlist(sapply(nAU,function(x){rep(1/x,x)})) # fractional frequencies
if (M$DB[1]=="ISI"){AU=gsub(" ", "", unlist(listAU), fixed = TRUE)} # delete spaces
if (M$DB[1]=="SCOPUS"){AU=sub(" ",",",unlist(listAU),fixed=TRUE);AU=gsub(" ","",AU,fixed=TRUE)}
Authors=sort(table(AU),decreasing=TRUE)
Authors_frac=aggregate(fracAU,by=list(AU),'sum')
names(Authors_frac)=c("Author","Frequency")
Authors_frac=Authors_frac[order(-Authors_frac$Frequency),]
FirstAuthors=lapply(listAU,function(l) l[[1]])
listAUU=strsplit(as.character(M$AU[nAU>1]),sep)
AuMultiAuthoredArt=length(unique(gsub(" ", "", unlist(listAUU), fixed = TRUE)))
}
#Total Citation Distribution
if ("TC" %in% Tags){
TC=as.numeric(M$TC)
PY=as.numeric(M$PY)
CurrentYear=as.numeric(format(Sys.Date(),"%Y"))
TCperYear=TC/(CurrentYear-PY)
}
# References
if ("CR" %in% Tags){CR=tableTag(M,"CR",sep)}
# ID Keywords
if ("ID" %in% Tags){ID=tableTag(M,"ID",sep)}
# DE Keywords
if ("DE" %in% Tags){DE=tableTag(M,"DE",sep)}
# Sources
if ("SO" %in% Tags){
SO=gsub(",","",M$SO,fixed=TRUE)
SO=sort(table(SO),decreasing = TRUE)
}
# All Affiliations, First Affiliation and Countries
if ("C1" %in% Tags){
AFF=gsub("\\[.*?\\] ", "", M$C1)
listAFF=strsplit(AFF,sep,fixed=TRUE)
nAFF=unlist(lapply(listAFF,length)) # num. of references per paper
listAFF[nAFF==0]="NA"
fracAFF=unlist(sapply(nAFF,function(x){rep(1/x,x)})) # fractional frequencies
AFF=trim.leading(unlist(listAFF)) # delete spaces
Affiliation=sort(table(AFF),decreasing=TRUE)
Affiliation_frac=aggregate(fracAFF,by=list(AFF),'sum')
names(Affiliation_frac)=c("Affiliation","Frequency")
Affiliation_frac=Affiliation_frac[order(-Affiliation_frac$Frequency),]
# First Affiliation
FAffiliation=lapply(listAFF,function(l) l[1])
# Countries
data("countries",envir=environment())
countries=as.character(countries[[1]])
if (M$DB[1]=="SCOPUS"){
FA=paste(FAffiliation,";",sep="")
RP=paste(M$RP,";",sep="")
countries=as.character(sapply(countries,function(s) paste0(s,";",collapse="")))}
else if (M$DB[1]=="ISI"){
#FA=paste(FAffiliation,".",sep="")
FA=FAffiliation
RP=paste(M$RP,".",sep="")
countries=as.character(sapply(countries,function(s) paste0(s,".",collapse="")))}
for (i in 1:length(countries)){
ind=which(regexpr(countries[i],FA,fixed=TRUE)!=-1)
if (length(ind)>0){CO[ind]=countries[i]}
indd=which(regexpr(countries[i],RP,fixed=TRUE)!=-1)
if (length(indd)>0){CO[indd]=countries[i]}
}
CO=gsub(";","",CO)
CO=gsub("\\.","",CO)
CO=gsub("UNITED STATES","USA",CO)
Country=sort(table(CO),decreasing = TRUE)
}
results=list(Articles=dim(M)[1], # Articles
Authors=Authors, # Authors' frequency distribution
AuthorsFrac=Authors_frac, # Authors' frequency distribution (fractionalized)
FirstAuthors=unlist(FirstAuthors),# First Author's list
nAUperPaper=nAU, # N. Authors per Paper
Apparences=sum(nAU), # Author apparences
nAuthors=dim(Authors), # N. of Authors
AuMultiAuthoredArt=AuMultiAuthoredArt, # N. of Authors of multi authored articles
Years=PY, # Years
FirstAffiliation=unlist(FAffiliation), # Affiliation of First Author
Affiliations=Affiliation, # Affiliations of all authors
Aff_frac=Affiliation_frac, # Affiliations of all authors (fractionalized)
CO=CO, # Country of each paper
Countries=Country, # Countries' frequency distribution
TotalCitation=TC, # Total Citations
TCperYear=TCperYear, # Total Citations per year
Sources=SO, # Sources
DE=DE,
ID=ID)
class(results)<-"bibliometrix"
return(results)
}
| /bibliometrix/R/biblioAnalysis.R | no_license | ingted/R-Examples | R | false | false | 7,956 | r | #' Bibliometric Analysis
#'
#' It performs a bibliometric analysis of a dataset imported from SCOPUS and Thomson Reuters' ISI Web of Knowledge databases.
#'
#' @param M is a bibliographic data frame obtained by the converting function \code{\link{convert2df}}.
#' It is a data matrix with cases corresponding to manuscripts and variables to Field Tag in the original SCOPUS and Thomson Reuters' ISI Web of Knowledge file.
#' @param sep is the field separator character. This character separates strings in each column of the data frame. The default is \code{sep = ";"}.
#' @return \code{biblioAnalysis} returns an object of \code{class} "bibliometrix".
#'
#' The functions \code{\link{summary}} and \code{\link{plot}} are used to obtain or print a summary and some useful plots of the results.
#'
#' An object of \code{class} "bibliometrix" is a list containing the following components:
#'
#' \tabular{lll}{
#' Articles \tab \tab the total number of manuscripts\cr
#' Authors \tab \tab the authors' frequency distribution\cr
#' AuthorsFrac \tab \tab the authors' frequency distribution (fractionalized)\cr
#' FirstAuthors \tab \tab first author of each manuscript\cr
#' nAUperPaper \tab \tab the number of authors per manuscript\cr
#' Apparences \tab \tab the number of author apparences\cr
#' nAuthors \tab \tab the number of authors\cr
#' AuMultiAuthoredArt \tab \tab the number of authors of multi authored articles\cr
#' Years \tab \tab pubblication year of each manuscript\cr
#' FirstAffiliation \tab \tab the affiliation of the first author\cr
#' Affiliations \tab \tab the frequency distribution of affiliations (of all co-authors for each paper)\cr
#' Aff_frac \tab \tab the fractionalized frequency distribution of affiliations (of all co-authors for each paper)\cr
#' CO \tab \tab the affiliation country of first author\cr
#' Countries \tab \tab the affiliation countries' frequency distribution\cr
#' TotalCitation \tab \tab the number of times each manuscript has been cited\cr
#' TCperYear \tab \tab the yearly average number of times each manuscript has been cited\cr
#' Sources \tab \tab the frequency distribution of sources (journals, books, etc.)\cr
#' DE \tab \tab the frequency distribution of authors' keywords\cr
#' ID \tab \tab the frequency distribution of keywords associated to the manuscript bySCOPUS and Thomson Reuters' ISI Web of Knowledge database}
#'
#'
#' @examples
#' data(scientometrics)
#'
#' results <- biblioAnalysis(scientometrics)
#'
#' summary(results, k = 10, pause = FALSE)
#'
#' @seealso \code{\link{convert2df}} to import and convert an ISI or SCOPUS Export file in a bibliographic data frame.
#' @seealso \code{\link{summary}} to obtain a summary of the results.
#' @seealso \code{\link{plot}} to draw some useful plots of the results.
#'
biblioAnalysis<-function(M,sep=";"){
# initialize variables
Authors=NULL
Authors_frac=NULL
FirstAuthors=NULL
PY=NULL
FAffiliation=NULL
Affiliation=NULL
Affiliation_frac=NULL
CO=rep(NA,dim(M)[1])
TC=NULL
TCperYear=NULL
SO=NULL
Country=NULL
DE=NULL
ID=NULL
# M is the dataframe
Tags<-names(M)
# temporal analyis
if ("PY" %in% Tags){Years=table(PY)}
# Author's distribution
if ("AU" %in% Tags){
listAU=strsplit(as.character(M$AU),sep)
listAU=lapply(listAU, function(l) trim.leading(l))
if (M$DB[1]=="ISI"){
listAU=lapply(listAU,function(l){
l=trim.leading(l)
l=gsub(" ","",l, fixed = TRUE)
})}
if (M$DB[1]=="SCOPUS"){
listAU=lapply(listAU,function(l){
l=trim.leading(l)
l=sub(" ",",",l, fixed = TRUE)
l=gsub(" ","",l, fixed = TRUE)})}
nAU=unlist(lapply(listAU,length)) # num. of authors per paper
fracAU=unlist(sapply(nAU,function(x){rep(1/x,x)})) # fractional frequencies
if (M$DB[1]=="ISI"){AU=gsub(" ", "", unlist(listAU), fixed = TRUE)} # delete spaces
if (M$DB[1]=="SCOPUS"){AU=sub(" ",",",unlist(listAU),fixed=TRUE);AU=gsub(" ","",AU,fixed=TRUE)}
Authors=sort(table(AU),decreasing=TRUE)
Authors_frac=aggregate(fracAU,by=list(AU),'sum')
names(Authors_frac)=c("Author","Frequency")
Authors_frac=Authors_frac[order(-Authors_frac$Frequency),]
FirstAuthors=lapply(listAU,function(l) l[[1]])
listAUU=strsplit(as.character(M$AU[nAU>1]),sep)
AuMultiAuthoredArt=length(unique(gsub(" ", "", unlist(listAUU), fixed = TRUE)))
}
#Total Citation Distribution
if ("TC" %in% Tags){
TC=as.numeric(M$TC)
PY=as.numeric(M$PY)
CurrentYear=as.numeric(format(Sys.Date(),"%Y"))
TCperYear=TC/(CurrentYear-PY)
}
# References
if ("CR" %in% Tags){CR=tableTag(M,"CR",sep)}
# ID Keywords
if ("ID" %in% Tags){ID=tableTag(M,"ID",sep)}
# DE Keywords
if ("DE" %in% Tags){DE=tableTag(M,"DE",sep)}
# Sources
if ("SO" %in% Tags){
SO=gsub(",","",M$SO,fixed=TRUE)
SO=sort(table(SO),decreasing = TRUE)
}
# All Affiliations, First Affiliation and Countries
if ("C1" %in% Tags){
AFF=gsub("\\[.*?\\] ", "", M$C1)
listAFF=strsplit(AFF,sep,fixed=TRUE)
nAFF=unlist(lapply(listAFF,length)) # num. of references per paper
listAFF[nAFF==0]="NA"
fracAFF=unlist(sapply(nAFF,function(x){rep(1/x,x)})) # fractional frequencies
AFF=trim.leading(unlist(listAFF)) # delete spaces
Affiliation=sort(table(AFF),decreasing=TRUE)
Affiliation_frac=aggregate(fracAFF,by=list(AFF),'sum')
names(Affiliation_frac)=c("Affiliation","Frequency")
Affiliation_frac=Affiliation_frac[order(-Affiliation_frac$Frequency),]
# First Affiliation
FAffiliation=lapply(listAFF,function(l) l[1])
# Countries
data("countries",envir=environment())
countries=as.character(countries[[1]])
if (M$DB[1]=="SCOPUS"){
FA=paste(FAffiliation,";",sep="")
RP=paste(M$RP,";",sep="")
countries=as.character(sapply(countries,function(s) paste0(s,";",collapse="")))}
else if (M$DB[1]=="ISI"){
#FA=paste(FAffiliation,".",sep="")
FA=FAffiliation
RP=paste(M$RP,".",sep="")
countries=as.character(sapply(countries,function(s) paste0(s,".",collapse="")))}
for (i in 1:length(countries)){
ind=which(regexpr(countries[i],FA,fixed=TRUE)!=-1)
if (length(ind)>0){CO[ind]=countries[i]}
indd=which(regexpr(countries[i],RP,fixed=TRUE)!=-1)
if (length(indd)>0){CO[indd]=countries[i]}
}
CO=gsub(";","",CO)
CO=gsub("\\.","",CO)
CO=gsub("UNITED STATES","USA",CO)
Country=sort(table(CO),decreasing = TRUE)
}
results=list(Articles=dim(M)[1], # Articles
Authors=Authors, # Authors' frequency distribution
AuthorsFrac=Authors_frac, # Authors' frequency distribution (fractionalized)
FirstAuthors=unlist(FirstAuthors),# First Author's list
nAUperPaper=nAU, # N. Authors per Paper
Apparences=sum(nAU), # Author apparences
nAuthors=dim(Authors), # N. of Authors
AuMultiAuthoredArt=AuMultiAuthoredArt, # N. of Authors of multi authored articles
Years=PY, # Years
FirstAffiliation=unlist(FAffiliation), # Affiliation of First Author
Affiliations=Affiliation, # Affiliations of all authors
Aff_frac=Affiliation_frac, # Affiliations of all authors (fractionalized)
CO=CO, # Country of each paper
Countries=Country, # Countries' frequency distribution
TotalCitation=TC, # Total Citations
TCperYear=TCperYear, # Total Citations per year
Sources=SO, # Sources
DE=DE,
ID=ID)
class(results)<-"bibliometrix"
return(results)
}
|
########################################################################################################
# Description: This script loads Acceleratometer Samsung Smartphone Data from UCI HAR Dataset
# and merges the training and test data sets together. It also assigns variable
# names, before pulling out columns only related to standard deviation and means.
# This script also joins the activity ID to the activity label for each row of
# data. After tidying this data set, the script then averages each column but
# grouped by activity and subject ID.
#
# Written: JAL, 2/22/2015
########################################################################################################
run_analysis <- function(){
#Set Filenames and directories
mainf <- "UCI HAR Dataset" #Main Folder
########################################################################
#STEP 1: Merge the training and the test sets to create one data set.
########################################################################
#Load label and Feature Files
activity_labels <- read.table(paste(mainf,"/activity_labels.txt",sep=""),col.names=c("id","activity"))
feature_labels <- read.table(paste(mainf,"/features.txt",sep=""),col.names=c("id","feature"))
#Load and Combine training/test sets for X,Y, and subject
#X Data Sets
x_train <- read.table(paste(mainf,"/train/X_train.txt",sep=""),col.names=feature_labels$feature)
x_test <- read.table(paste(mainf,"/test/X_test.txt",sep=""),col.names=feature_labels$feature)
x_total <- rbind(x_train,x_test)
#Clean Up the column names, where there are multiple periods from unallowed characters
colnames(x_total) <- gsub("\\.\\.","\\.",gsub("\\.\\.\\.","\\.",colnames(x_total)))
#Y Data Sets
y_train <- read.table(paste(mainf,"/train/Y_train.txt",sep=""),col.names=c("activity_id"))
y_test <- read.table(paste(mainf,"/test/Y_test.txt",sep=""),col.names=c("activity_id"))
y_total <- rbind(y_train,y_test)
#Subject Data Sets
subject_train <- read.table(paste(mainf,"/train/subject_train.txt",sep=""),col.names=c("subject_id"))
subject_test <- read.table(paste(mainf,"/test/subject_test.txt",sep=""),col.names=c("subject_id"))
subject_total <- rbind(subject_train,subject_test)
##Combine X,Y, and subject data sets
combined_partial <- cbind(subject_total,y_total,x_total)
### (STEP 3 Performed a Bit Early [Before Step 2], seems to make sense to
### do it here so there's one full table to work from)
########################################################################
#STEP 3: Extract only the measurements on the mean and standard
# deviation for each measurement.
########################################################################
#Use sqldf to join tables and display activity names from the activity_labels file
library(sqldf)
combined_full <- sqldf("
select
activity_labels.activity,
combined_partial.*
from
combined_partial
inner join activity_labels
on combined_partial.activity_id = activity_labels.id;
"
)
#Back to Step 2...
########################################################################
#STEP 2: Extract only the measurements on the mean and standard
# deviation for each measurement.
########################################################################
#Use grepl to find which colnames have "mean" or "std" in them
mean_std_ind <- grepl("mean|std",colnames(combined_full))
#Find "meanFreq" variables to exclude, which were caught by the "mean"
meanFreq_ind <- grepl("meanFreq",colnames(combined_full))
#Final Index Set, excluding "meanFreq" incidents
mean_std_ind_filtered <- mean_std_ind - meanFreq_ind
#Set to Logical
mean_std_ind_filtered <- sapply(as.data.frame(mean_std_ind_filtered),as.logical)
#Get Data from combined_full with colnames containing "mean" or "std"
combined_subset_mean_std <- combined_full[,mean_std_ind_filtered]
#Add Back in the first two columns from the combined_full data set, so
#that the subject number and activity info are still available
combined_tidy <- cbind(combined_full[,c(1,2)],combined_subset_mean_std)
#Rename Array to samsung_tidy
samsung_tidy <- combined_tidy
#STEP 3 ALREADY COMPLETED EARLIER
########################################################################
#STEP 4: Extract only the measurements on the mean and standard
# deviation for each measurement.
########################################################################
#STEP 4 ALREADY COMPLETED-
# The variable names were already set from the features file, and activity and subject_id were duly created
#
########################################################################
#STEP 5: From the data set in step 4, creates a second, independent
# tidy data set with the average of each variable for each
# activity and each subject.
########################################################################
#Load plyr library
library(plyr)
#Get Average of Each Column, Grouped by activity and subject_id
samsung_average_tidy <- ddply(combined_tidy,c("activity","subject_id"),numcolwise(mean))
#Add "Avg_" to the beginning of each variable name to show they're averages
samsung_average_colnames <- colnames(samsung_average_tidy)
colnames(samsung_average_tidy) <- c(samsung_average_colnames[1:2],paste("avg_",samsung_average_colnames[3:68],sep=""))
#Write Output File "step5.txt
write.table(samsung_average_tidy, "step5.txt", row.name = FALSE)
}
| /run_analysis.R | no_license | jlajeune/Getting_And_Cleaning_Data_Project | R | false | false | 5,918 | r | ########################################################################################################
# Description: This script loads Acceleratometer Samsung Smartphone Data from UCI HAR Dataset
# and merges the training and test data sets together. It also assigns variable
# names, before pulling out columns only related to standard deviation and means.
# This script also joins the activity ID to the activity label for each row of
# data. After tidying this data set, the script then averages each column but
# grouped by activity and subject ID.
#
# Written: JAL, 2/22/2015
########################################################################################################
run_analysis <- function(){
#Set Filenames and directories
mainf <- "UCI HAR Dataset" #Main Folder
########################################################################
#STEP 1: Merge the training and the test sets to create one data set.
########################################################################
#Load label and Feature Files
activity_labels <- read.table(paste(mainf,"/activity_labels.txt",sep=""),col.names=c("id","activity"))
feature_labels <- read.table(paste(mainf,"/features.txt",sep=""),col.names=c("id","feature"))
#Load and Combine training/test sets for X,Y, and subject
#X Data Sets
x_train <- read.table(paste(mainf,"/train/X_train.txt",sep=""),col.names=feature_labels$feature)
x_test <- read.table(paste(mainf,"/test/X_test.txt",sep=""),col.names=feature_labels$feature)
x_total <- rbind(x_train,x_test)
#Clean Up the column names, where there are multiple periods from unallowed characters
colnames(x_total) <- gsub("\\.\\.","\\.",gsub("\\.\\.\\.","\\.",colnames(x_total)))
#Y Data Sets
y_train <- read.table(paste(mainf,"/train/Y_train.txt",sep=""),col.names=c("activity_id"))
y_test <- read.table(paste(mainf,"/test/Y_test.txt",sep=""),col.names=c("activity_id"))
y_total <- rbind(y_train,y_test)
#Subject Data Sets
subject_train <- read.table(paste(mainf,"/train/subject_train.txt",sep=""),col.names=c("subject_id"))
subject_test <- read.table(paste(mainf,"/test/subject_test.txt",sep=""),col.names=c("subject_id"))
subject_total <- rbind(subject_train,subject_test)
##Combine X,Y, and subject data sets
combined_partial <- cbind(subject_total,y_total,x_total)
### (STEP 3 Performed a Bit Early [Before Step 2], seems to make sense to
### do it here so there's one full table to work from)
########################################################################
#STEP 3: Extract only the measurements on the mean and standard
# deviation for each measurement.
########################################################################
#Use sqldf to join tables and display activity names from the activity_labels file
library(sqldf)
combined_full <- sqldf("
select
activity_labels.activity,
combined_partial.*
from
combined_partial
inner join activity_labels
on combined_partial.activity_id = activity_labels.id;
"
)
#Back to Step 2...
########################################################################
#STEP 2: Extract only the measurements on the mean and standard
# deviation for each measurement.
########################################################################
#Use grepl to find which colnames have "mean" or "std" in them
mean_std_ind <- grepl("mean|std",colnames(combined_full))
#Find "meanFreq" variables to exclude, which were caught by the "mean"
meanFreq_ind <- grepl("meanFreq",colnames(combined_full))
#Final Index Set, excluding "meanFreq" incidents
mean_std_ind_filtered <- mean_std_ind - meanFreq_ind
#Set to Logical
mean_std_ind_filtered <- sapply(as.data.frame(mean_std_ind_filtered),as.logical)
#Get Data from combined_full with colnames containing "mean" or "std"
combined_subset_mean_std <- combined_full[,mean_std_ind_filtered]
#Add Back in the first two columns from the combined_full data set, so
#that the subject number and activity info are still available
combined_tidy <- cbind(combined_full[,c(1,2)],combined_subset_mean_std)
#Rename Array to samsung_tidy
samsung_tidy <- combined_tidy
#STEP 3 ALREADY COMPLETED EARLIER
########################################################################
#STEP 4: Extract only the measurements on the mean and standard
# deviation for each measurement.
########################################################################
#STEP 4 ALREADY COMPLETED-
# The variable names were already set from the features file, and activity and subject_id were duly created
#
########################################################################
#STEP 5: From the data set in step 4, creates a second, independent
# tidy data set with the average of each variable for each
# activity and each subject.
########################################################################
#Load plyr library
library(plyr)
#Get Average of Each Column, Grouped by activity and subject_id
samsung_average_tidy <- ddply(combined_tidy,c("activity","subject_id"),numcolwise(mean))
#Add "Avg_" to the beginning of each variable name to show they're averages
samsung_average_colnames <- colnames(samsung_average_tidy)
colnames(samsung_average_tidy) <- c(samsung_average_colnames[1:2],paste("avg_",samsung_average_colnames[3:68],sep=""))
#Write Output File "step5.txt
write.table(samsung_average_tidy, "step5.txt", row.name = FALSE)
}
|
\name{REDseq-package}
\alias{REDseq-package}
\alias{REDseq}
\docType{package}
\title{
REDseq
}
\description{
REDSeq is a Bioconductor package for building genomic map of restriction enzyme sites REmap, assigning sequencing tags to RE sites using five different strategies, visualizing genome-wide distribution of differentially cut regions with the REmap as reference and the distance distribution of sequence tags to corresponding RE sites, generating count table for identifying statistically significant RE sites using edgeR or DEseq.
}
\details{
\tabular{ll}{
Package: \tab REDseq\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2011-05-10\cr
License: \tab GPL\cr
LazyLoad: \tab yes\cr
}
~~ An overview of how to use the package, including the most important functions ~~
}
\author{
Lihua Julie Zhu
Maintainer:
Lihua Julie Zhu <julie.zhu@umassmed.edu>
}
\references{
1. Roberts, R.J., Restriction endonucleases. CRC Crit Rev Biochem, 1976. 4(2): p. 123-64.
2. Kessler, C. and V. Manta, Specificity of restriction endonucleases and DNA modification methyltransferases a review (Edition 3). Gene, 1990. 92(1-2): p. 1-248. \cr
3. Pingoud, A., J. Alves, and R. Geiger, Restriction enzymes. Methods Mol Biol, 1993. 16: p. 107-200. \cr
4. Anders, S. and W. Huber, Differential expression analysis for sequence count data. Genome Biol, 2010. 11(10): p. R106. \cr
5. Robinson, M.D., D.J. McCarthy, and G.K. Smyth, edgeR: a Bioconductor package for differential expression analysis of digital gene expression data. Bioinformatics, 2010. 26(1): p. 139-40. \cr
6. Zhu, L.J., et al., ChIPpeakAnno: a Bioconductor package to annotate ChIP-seq and ChIP-chip data. BMC Bioinformatics, 2010. 11: p. 237. \cr
7. Pages, H., BSgenome package. http://bioconductor.org/packages/2.8/bioc/\cr
vignettes/BSgenome/inst/doc/GenomeSearching.pdf \cr
8. Zhu, L.J., et al., REDseq: A Bioconductor package for Analyzing High Throughput Sequencing Data from Restriction Enzyme Digestion. (In preparation) \cr
}
\keyword{ package }
\seealso{
buildREmap, assignSeq2REsit, plotCutDistribution, distanceHistSeq2RE, summarizeByRE, summarizeBySeq, compareREseq, binom.test.REDseq, bam2GRanges
}
\examples{
if(interactive()){
library(ChIPpeakAnno)
REpatternFilePath = system.file("extdata", "examplePattern.fa", package="REDseq")
library(BSgenome.Celegans.UCSC.ce2)
buildREmap( REpatternFilePath, BSgenomeName=Celegans, outfile=tempfile(),chr='all')
bamfile = system.file("extdata", "example.bam", package="REDseq")
GRlistBam <- bam2GRanges(bamfile, bamindex = bamfile, subChr = 'chr12', pairedEndReads = FALSE, removeDuplicateReads = FALSE, minMapq = 10, maxFragmentWidth = 1000, blacklist = NULL, what = "mapq")
library(REDseq)
data(example.REDseq.list)
data(example.map.list)
r.unique = assignSeq2REsite(example.REDseq.list, example.map.list, cut.end ='5',
cut.offset = 1, seq.length = 36, allowed.offset = 5,
min.FragmentLength = 60, max.FragmentLength = 300,
partitionMultipleRE = "unique")
r.average = assignSeq2REsite(example.REDseq.list, example.map.list, cut.end ='5',
cut.offset = 1, seq.length = 36, allowed.offset = 5, min.FragmentLength = 60,
max.FragmentLength = 300, partitionMultipleRE = "average")
r.random = assignSeq2REsite(example.REDseq.list, example.map.list, cut.end ='5',
cut.offset = 1, seq.length = 36, allowed.offset = 5, min.FragmentLength = 60,
max.FragmentLength = 300, partitionMultipleRE = "random")
r.best = assignSeq2REsite(example.REDseq.list, example.map.list, cut.end ='5',
cut.offset = 1, seq.length = 36, allowed.offset = 5, min.FragmentLength = 60,
max.FragmentLength = 300, partitionMultipleRE = "best")
r.estimate = assignSeq2REsite(example.REDseq.list, example.map.list, cut.end ='5',
cut.offset = 1, seq.length = 36, allowed.offset = 5, min.FragmentLength = 60,
max.FragmentLength = 300, partitionMultipleRE = "estimate")
r.estimate$passed.filter
r.estimate$notpassed.filter
data(example.assignedREDseq)
plotCutDistribution(example.assignedREDseq,example.map.list,
chr="2", xlim =c(3012000, 3020000))
distanceHistSeq2RE(example.assignedREDseq,ylim=c(0,20))
summarizeByRE(example.assignedREDseq,by="Weight",sampleName="example")
REsummary =summarizeByRE(example.assignedREDseq,by="Weight")
binom.test.REDseq(REsummary)
}
}
| /man/REDseq-package.Rd | no_license | JunhuiLi11/REDseq | R | false | false | 4,289 | rd | \name{REDseq-package}
\alias{REDseq-package}
\alias{REDseq}
\docType{package}
\title{
REDseq
}
\description{
REDSeq is a Bioconductor package for building genomic map of restriction enzyme sites REmap, assigning sequencing tags to RE sites using five different strategies, visualizing genome-wide distribution of differentially cut regions with the REmap as reference and the distance distribution of sequence tags to corresponding RE sites, generating count table for identifying statistically significant RE sites using edgeR or DEseq.
}
\details{
\tabular{ll}{
Package: \tab REDseq\cr
Type: \tab Package\cr
Version: \tab 1.0\cr
Date: \tab 2011-05-10\cr
License: \tab GPL\cr
LazyLoad: \tab yes\cr
}
~~ An overview of how to use the package, including the most important functions ~~
}
\author{
Lihua Julie Zhu
Maintainer:
Lihua Julie Zhu <julie.zhu@umassmed.edu>
}
\references{
1. Roberts, R.J., Restriction endonucleases. CRC Crit Rev Biochem, 1976. 4(2): p. 123-64.
2. Kessler, C. and V. Manta, Specificity of restriction endonucleases and DNA modification methyltransferases a review (Edition 3). Gene, 1990. 92(1-2): p. 1-248. \cr
3. Pingoud, A., J. Alves, and R. Geiger, Restriction enzymes. Methods Mol Biol, 1993. 16: p. 107-200. \cr
4. Anders, S. and W. Huber, Differential expression analysis for sequence count data. Genome Biol, 2010. 11(10): p. R106. \cr
5. Robinson, M.D., D.J. McCarthy, and G.K. Smyth, edgeR: a Bioconductor package for differential expression analysis of digital gene expression data. Bioinformatics, 2010. 26(1): p. 139-40. \cr
6. Zhu, L.J., et al., ChIPpeakAnno: a Bioconductor package to annotate ChIP-seq and ChIP-chip data. BMC Bioinformatics, 2010. 11: p. 237. \cr
7. Pages, H., BSgenome package. http://bioconductor.org/packages/2.8/bioc/\cr
vignettes/BSgenome/inst/doc/GenomeSearching.pdf \cr
8. Zhu, L.J., et al., REDseq: A Bioconductor package for Analyzing High Throughput Sequencing Data from Restriction Enzyme Digestion. (In preparation) \cr
}
\keyword{ package }
\seealso{
buildREmap, assignSeq2REsit, plotCutDistribution, distanceHistSeq2RE, summarizeByRE, summarizeBySeq, compareREseq, binom.test.REDseq, bam2GRanges
}
\examples{
if(interactive()){
library(ChIPpeakAnno)
REpatternFilePath = system.file("extdata", "examplePattern.fa", package="REDseq")
library(BSgenome.Celegans.UCSC.ce2)
buildREmap( REpatternFilePath, BSgenomeName=Celegans, outfile=tempfile(),chr='all')
bamfile = system.file("extdata", "example.bam", package="REDseq")
GRlistBam <- bam2GRanges(bamfile, bamindex = bamfile, subChr = 'chr12', pairedEndReads = FALSE, removeDuplicateReads = FALSE, minMapq = 10, maxFragmentWidth = 1000, blacklist = NULL, what = "mapq")
library(REDseq)
data(example.REDseq.list)
data(example.map.list)
r.unique = assignSeq2REsite(example.REDseq.list, example.map.list, cut.end ='5',
cut.offset = 1, seq.length = 36, allowed.offset = 5,
min.FragmentLength = 60, max.FragmentLength = 300,
partitionMultipleRE = "unique")
r.average = assignSeq2REsite(example.REDseq.list, example.map.list, cut.end ='5',
cut.offset = 1, seq.length = 36, allowed.offset = 5, min.FragmentLength = 60,
max.FragmentLength = 300, partitionMultipleRE = "average")
r.random = assignSeq2REsite(example.REDseq.list, example.map.list, cut.end ='5',
cut.offset = 1, seq.length = 36, allowed.offset = 5, min.FragmentLength = 60,
max.FragmentLength = 300, partitionMultipleRE = "random")
r.best = assignSeq2REsite(example.REDseq.list, example.map.list, cut.end ='5',
cut.offset = 1, seq.length = 36, allowed.offset = 5, min.FragmentLength = 60,
max.FragmentLength = 300, partitionMultipleRE = "best")
r.estimate = assignSeq2REsite(example.REDseq.list, example.map.list, cut.end ='5',
cut.offset = 1, seq.length = 36, allowed.offset = 5, min.FragmentLength = 60,
max.FragmentLength = 300, partitionMultipleRE = "estimate")
r.estimate$passed.filter
r.estimate$notpassed.filter
data(example.assignedREDseq)
plotCutDistribution(example.assignedREDseq,example.map.list,
chr="2", xlim =c(3012000, 3020000))
distanceHistSeq2RE(example.assignedREDseq,ylim=c(0,20))
summarizeByRE(example.assignedREDseq,by="Weight",sampleName="example")
REsummary =summarizeByRE(example.assignedREDseq,by="Weight")
binom.test.REDseq(REsummary)
}
}
|
####################
## Purpose: This script holds useful modelling formulas
########################
require(data.table)
require(lme4)
source("FILEPATH/data_tests.R")
################### INV LOGIT #########################################
inv_logit<-function(x){
1/(1+exp(-x))
}
################### MAKE R-SYNTAX FORMULA #########################################
## make formula with R ssyntax
make_formula<-function(response, fixefs=NULL, ranefs=NULL, add_terms=NULL){
## if no fixed effects
if(length(fixefs)==0){
form <- paste0(paste0(response, "~"), paste(paste0("(1|", ranefs, ")"), collapse="+"))
## if no fixed effects and no random effects
if(length(ranefs)==0){
form <- paste0(paste0(response, "~1"))
}
} else {
## if no random effects
if(length(ranefs)==0){
form <- paste0(paste0(response, "~"), paste(fixefs, collapse="+"), collapse="+")
} else {
## if no fixed effects and no random effects
form <- paste0(paste0(response, "~"),
paste(c(paste(fixefs, collapse="+"), paste(paste0("(1|", ranefs, ")"), collapse="+")), collapse="+"))
}
}
## add additional terms
if(length(add_terms)>0){
add_terms <- paste0(add_terms, collapse="+")
form <- paste0(c(form, add_terms), collapse="+")
}
## save formula as a string, will need to convert w/ as.formula()
string_form <- gsub(" ", "", form)
#form<-as.formula(form)
message("Model formula:")
message(" ", string_form)
return(as.formula(string_form))
}
################### EXPAND DATA #########################################
## Arguments
# - starts_ends: a named list of charcter vectors of length 2.
# The name of the list is the new variable to be created.
# The first element is the variable of the starting value.
# The second element is the name of the variable of the end value
# ex: list(age=c("age_start", "age_end"), exposure=c("exposure_start", "exposure_end"))
#
# - var_values: a named list of numeric vectors of the values to split out at for a given variable
#
# - value_weights: a named list of numeric vectors, the same length as the corresponding vector in var_values. Weights applied to each
## Expand data 'integration' if a data point covers multiple discrete values
expand_data <- function(df, starts_ends, var_values, value_weights=NULL){
df <- copy(df)
################### CHECKS #########################################
## make sure certain cols don't already exist
new_cols<-c("temp_seq", paste0(names(starts_ends), "_temp_seq"), paste0("n_", names(starts_ends)), "split_wt")
lapply(new_cols, df=df, check_exists, not_exist=T) ##sy: check_exists function comes from data_tests.R script
## make sure certain cols do exist
lapply(unlist(starts_ends), df=df, check_exists)
################### SETUP #########################################
## create row id number
if("temp_seq" %in% names(df)){stop("Already a column called 'temp_seq' in data frame!")}
df[, temp_seq:=1:.N]
## create original weight column where each row is 1
df[, split_wt:=1]
################### LOOP THROUGH DIMENSION #########################################
## get number of values each data point covers in each dimension
for(d in 1:length(starts_ends)){
## store some input info
varname <- names(starts_ends)[d]
vals <- var_values[[d]]
cols <- starts_ends[[d]]
## create dim specific ind (for merging)
df[, paste0(varname, "_temp_seq"):=1:.N]
################### ROUND VALUES TO NEAREST VALUE #########################################
## create temporary roundedstart/end cols
get_nearest <- function(x, vals){
diffs <- abs(x - vals)
out <- unique(vals[diffs==min(diffs)])[1]
return(out)
}
## get temporary starting value
df[, paste0("temp_", cols[1]):=sapply(get(cols[1]), get_nearest, vals=vals)]
df[, paste0("temp_", cols[2]):=sapply(get(cols[2]), get_nearest, vals=vals)]
## make sure there are no end vals greater than starting vals
if(any(df[[paste0("temp_", cols[2])]]<df[[paste0("temp_", cols[1])]])){stop("Ending value smaller than starting value for ", names(starts_ends)[d])}
################### FIND NUMBER OF EXPANSIONS FOR EACH ROW #########################################
## get number of values to expand out between the start and end for each row
for(i in 1:nrow(df)){
## this line sums up the T/F for each val between the lower and upper for the row being looped through
n_bet <- sum(sapply(vals, FUN=data.table::between, lower=df[[paste0("temp_", cols[1])]][i], upper=df[[paste0("temp_", cols[2])]][i]))
df[i, paste0("n_", varname):=n_bet]
}
################### EXPAND ROWS #########################################
## expand the number of rows
expanded <- as.data.table(rep(df[[paste0(varname, "_temp_seq")]], times=df[[paste0("n_", varname)]]))
## get the new variable values
new_vals <- unlist(lapply(1:length(unique(df[[paste0(varname,"_temp_seq")]])), function(x){
loop_seq <- unique(df[[paste0(varname,"_temp_seq")]])[x]
## get minimum val for this value
start_val <- df[get(paste0(varname, "_temp_seq"))==loop_seq, get(paste0("temp_", cols[1]))]
## get the values
temp_vals <- var_values[[d]][var_values[[d]]>=start_val]
temp_vals <- temp_vals[1:(df[get(paste0(varname, "_temp_seq"))==loop_seq, get(paste0("n_", varname))])]
return(temp_vals)
}))
expanded <- cbind(expanded, new_vals)
setnames(expanded, c("V1", "new_vals"), c(paste0(varname, "_temp_seq"), varname))
df <- merge(expanded, df, by=paste0(varname, "_temp_seq"))
################### SCALE DOWN DATA WEIGHTS #########################################
## reduce weights
if(missing(value_weights)){
df[, split_wt:=split_wt/get(paste0("n_", varname))] ##sy: if each value given equal weight, just divide by number of splits performed
} else {
## merge weights onto df
wt_dt <- data.table(val=var_values[[d]], wt=value_weights[[d]])
df <- merge(df, wt_dt, by.x=paste0())
}
}
################### CLEAN OUT TEMP COLS #########################################
return(df)
}
################### MAKE FIXED EFFECTS MATRIX #########################################
make_fixef_matrix <- function(df, fixefs=NULL, add_intercept=F){
df <- copy(df)
df <- as.data.table(df)
df[, temp_response:=1]
if(length(fixefs)>0){
fix_form <- paste0("temp_response~1+", paste(fixefs, collapse="+"))
if(all(fixefs %in% names(df))){
setcolorder(df, fixefs)
}
X <- as.matrix(model.matrix(formula(fix_form), data=df))
##sy: drop first column, the intercept created by model.matrix. This gets recreated in toggle below
X <- as.matrix(X[, -1])
} else {
#X<-as.matrix(rep(0, times=nrow(df)), ncol=1)
X <- array(0, dim = c(nrow=nrow(df), 0))
}
if(add_intercept==T){
X <- cbind(rep(1, times=nrow(X)), X)
}
return(X)
}
################### MAKE RANDOM EFFECTS MATRIX #########################################
## create random effects matrix; training matrix to give random effects in case you're trying to make a prediction matrix
make_ranef_matrix <- function(df, form, training_matrix=NULL){
require(lme4) ## this gets the mkReTrms function
df <- copy(df)
## find random effects and response
ranefs <- unlist(as.character(lapply(findbars(form), "[[", 3)))
response <- as.character(form)[2] ## may not work in all cases
cols <- ranefs
if(response!=""){cols <- c(response, cols)}
#df<-df[, c(cols), with=F]
if("x" %in% names(df)){
stop("Column named 'x' in your df, please rename!")
}
if(length(ranefs)>0){
## format values of ranef columns to keep track of them easily
invisible(
ranef_order <- lapply(ranefs, function(x){
df[, (x):=paste0(x,";value:", get(x))]
})
)
if(is.null(training_matrix)){
## create matrix using lme4s functions
re_terms <- mkReTrms(findbars(as.formula(form)), model.frame(subbars(form), data=df))
Z <- as.matrix(t(re_terms[["Zt"]])) ## Z is returned transposed
flist <- unique(unlist(re_terms[["flist"]])) ## flist gets the correct order of random effects
Z <- Z[, flist] ## this re-orders Z to have correct order of random effects
} else {
df <- df[, ranefs, with=F]
message("Constructing prediction matrix..")
## lapply to evaluate where the training matrix values equal prediction matrix values
Z <- sapply(1:ncol(training_matrix), function(x){
ranef <- tstrsplit(colnames(training_matrix)[x], ";value:")[[1]]
as.numeric(df[[ranef]]==colnames(training_matrix)[x])
})
## assert that Z is a matrix
if(!is.matrix(Z)){Z <- matrix(Z, ncol=ncol(training_matrix))}
colnames(Z) <- colnames(training_matrix)
message("Done")
}
} else {
Z <- array(0, dim = c(nrow=nrow(df), 0)) ## return empty array if no random effects
}
return(Z)
}
################### GET NUMBER OF RANDOM EFFECTS IN EACH LEVEL #########################################
get_ranef_lvl_counts <- function(df, form){
## find random effects
ranefs <- unlist(as.character(lapply(findbars(form), "[[", 3)))
if(length(ranefs)>0){
n_s <- unlist(lapply(ranefs, function(x){
length(unique(df[[x]]))
}))
} else {
n_s <- array(0, dim=0)
}
return(n_s)
}
################### CREATE MATRiX FOR DELTA SMOOTHING #########################################
## xvals must be a named list. Each element needs to be a numeric vector of the values to estimate differences between for a given dimension
## df must be a data.table of data with column names corresponding to the names of the elements in the xvals list
make_delta_matrix <- function(xvals, df){
## xvals needs to be a named list.
## checks
if(is.null(names(xvals))){stop("xvals must be a named list")}
if(!all(names(xvals) %in% names(df))){stop("Names of xvals not in data")}
## loop over each dimension
full_matrix <- lapply(1:length(xvals), function(d){
## loop over each value
sub_matrix <- sapply(2:length(xvals[[d]]), function(x){
xval <- xvals[[d]][x]
prev_xval <- xvals[[d]][x-1]
## stop if any values are less than the initial values
if(x==2){
if(any(df[[names(xvals)[d]]] < prev_xval)){stop(message("You have values in ", names(xvals)[d], " that are less than the minimum mesh point!"))}
}
## get binaries if row values are greater than or equal to the previous xval.
temp <- as.numeric(df[[names(xvals)[d]]] > prev_xval)
## get change from previous xval or change from the value in the data and the xval if the value in the data is between previous xval
change <- ifelse(data.table::between(df[[names(xvals)[d]]], prev_xval, xval, incbounds=F), df[[names(xvals)[d]]] - prev_xval, xval - prev_xval)
temp <- temp * change
return(temp)
})
## give colnames based on index for clarity
colnames(sub_matrix) <- paste0(names(xvals)[d], "_", 2:length(xvals[[d]]))
return(sub_matrix)
})
Reduce(cbind, full_matrix)
}
################### INTERACT DELTA VARS #########################################
## delta vars need to be continuous but it's useful to interact a continuous var with a categorical (or other continuos) one
## this functions creates an A matrix with interacted vars
interact_delta_vars <- function(df, interact_pairs, drop_reference=T){
df <- copy(df)
invisible(
intrxn_cols <- unlist(lapply(1:length(interact_pairs), function(x){
## detect classes of pairs
pair <- interact_pairs[[x]]
pair_classes <- lapply(df[, c(pair), with=F], class)
message(" Printing detected classes for ", paste(pair, collapse=":"))
print(pair_classes)
char_col <- pair[pair_classes %in% c("factor", "character")]
## create interaction cols for character
if(length(char_col)>1){stop(paste0(length(char_col), " vars detected as character, need one numeric col"))}
if(length(char_col)>0){
## make binary ranef matrix of categorical
Z_full <- make_ranef_matrix(df, form=as.formula(paste0("~(1|", char_col, ")")))
## drop first column (reference col)
if(drop_reference==T){
Z_full <- Z_full[, -1]
}
## multiply each column by the continuous var
Z_full <- Z_full*df[[setdiff(pair, char_col)]]
colnames(Z_full) <- paste0(setdiff(pair, char_col), ":", colnames(Z_full))
intrxn_col <- colnames(Z_full)
Z_full <- as.data.table(Z_full)
df[, (intrxn_col):=Z_full]
} else {
## create interaction col if both cols are numeric/integer
intrxn_col <- paste0(pair, collapse=":")
df[, (intrxn_col):=get(pair[1]) * get(pair[2])]
}
return(intrxn_col)
}))
)
return(list(intrxn_df=df, intrxn_cols=intrxn_cols))
}
################### CREATE BASE MATRIX #########################################
## make a baseic matrix with sparsity besides tri-diagonal
make_tridiag_matrix <- function(n_sites, rho=NULL){
m0 <- matrix(0, ncol=n_sites, nrow=n_sites)
m1 <- copy(m0)
## if a correlation is given, create precision matrix for a exponential decay
if(!is.null(rho)){
m0[col(m0)==row(m0)] <- 1+rho^2 ## get diagnoals
m1[col(m1)==row(m1)-1 | col(m1)==row(m1)+1]<- -rho ## get off-diagonals
m0<-as(m0, "dgTMatrix")
} else {
m0[col(m0)==row(m0)]<-1 ## get diagnoals
m1[col(m1)==row(m1)-1 | col(m1)==row(m1)+1]<- 1 ## get off-diagonals
m0<-as(m0, "dgTMatrix")
m1<-as(m1, "dgTMatrix")
}
return(list(m0, m1))
}
################### SIMULATE MV NORMAL W/ CHOLESKY #########################################
rmvnorm_prec <- function(mu = NULL, prec, n.sims) {
if(is.null(mu)) {
mu <- rep(0, dim(prec)[1])
}
z <- matrix(MASS::mvrnorm(length(mu) * n.sims, 0, 1 ), ncol=n.sims)
L <- Matrix::Cholesky(prec , super = TRUE)
z <- Matrix::solve(L, z, system = "Lt") ## z = Lt^-1 %*% z
z <- Matrix::solve(L, z, system = "Pt") ## z = Pt %*% z
z <- as.matrix(z)
outmat <- t(mu + z)
colnames(outmat) <- colnames(prec)
return(outmat)
}
################### GET TMB PARAMETER DRAWS #########################################
simulate_tmb_draws <- function(model_object, n_draws=1000){
require(mvtnorm)
require(Matrix)
require(TMB)
## run report to get joint precision
report <- TMB::sdreport(model_object, getJointPrecision=T)
## estimates stored from best iteration
ests <- model_object$env$last.par.best
##s if only 1 fixed effect parameter
if(length(ests)==1){
se <- as.matrix(sqrt(report$cov.fixed))
par_draws <- rnorm(n=n_draws, mean=ests, sd=se)
par_draws <- data.table(par_draws)
names(par_draws) <- names(ests)
} else {
if(!is.null(model_object$env$random)){
#cov_mat<-as.matrix(Matrix::solve(report$jointPrecision)) ## this will break if Hessian not pos/def
par_draws <- rmvnorm_prec(mu=ests, prec=report$jointPrecision, n.sim=n_draws)
} else {
## if only fixed effects
cov_mat <- report$cov.fixed
par_draws <- mvtnorm::rmvnorm(n=n_draws, mean=ests, sigma=cov_mat)
}
}
return(as.data.table(par_draws))
}
################### CALCULATE PREDICTIONS FROM DRAWS #########################################
predict_draws <- function(prediction_math, draw_list, data_list=NULL,
return_draws=T, upper_lower=F,
mid_fun = "mean", quants = c(0.025, 0.975)){
################### SUB FUNCTIONS #########################################
calc_draw <- function(x, draw_list, temp_env, prediction_math){
sub_draws <- lapply(draw_list, "[", x, ) #function(n){ ## this syntax gets an entire row
# n[x]
# })
##s add draw vals to temporary environemnt
lapply(par_names, function(n){
temp_env[[n]]<-as.numeric(sub_draws[[n]])
})
## run the prediction formula
pred <- eval(parse(text=prediction_math), envir=temp_env)
rm(list = names(draw_list), envir = temp_env)
return(data.table(draw=paste0("draw", x-1), pred_row=1:length(pred), pred=as.numeric(pred)))
}
################### SETUP ENVIRONMENT #########################################
## get number of draws
n_draws <- unique(unlist(lapply(draw_list, nrow)))
## check to make sure number of draws are correct
if(length(n_draws)>1){stop("Varying number of draws supplied to elements of draw_list")}
## get names of objects
par_names <- names(draw_list)
data_names <- names(data_list)
## create a temporary environment to store values. Putting data here since data won't have draws (if data has draws, put it in draw_list)
temp_env <- new.env()
invisible(
lapply(data_names, function(x){
temp_env[[x]] <- data_list[[x]]
})
)
## convert draw list to matrix
draw_list <- lapply(draw_list, as.matrix)
################### CALCULATE DRAWS #########################################
message("Calculating draws...")
linpred_draws <- lapply(1:n_draws, calc_draw,
draw_list=draw_list, temp_env=temp_env, prediction_math=prediction_math) ## sub-args
#message("Done with pred lapply")
################### FORMAT AND RETURN #########################################
## format draw output
linpred_draws <- rbindlist(linpred_draws)
setkey(linpred_draws, pred_row)
if(return_draws==T){
## make wide on draws if returning draws
preds <- dcast(linpred_draws, formula = pred_row ~ draw, value.var = "pred")
} else {
mid_txt <- paste0(mid_fun,"(pred)")
if(upper_lower==T){
preds <- linpred_draws[, .(pred=eval(parse(text = mid_txt)), lower=quantile(pred, probs=quants[1]), upper=quantile(pred, probs=quants[2])), by="pred_row"]
} else {
## calculate mean and SD of draws if not returing draws and want SE
preds <- linpred_draws[, .(pred=eval(parse(text = mid_txt)), se=sd(pred)), by="pred_row"]
}
}
preds[, pred_row:=NULL]
message("Done")
return(preds)
}
#calc_draw(x=8, draw_list=draw_list, temp_env=temp_env, prediction_math=prediction_math)
################### SCALE VARIABLES #########################################
## get scaling factors first
get_scalers <- function(fixef, dt){
## first check for missing vars
check_missing(fixef, dt, warn=T)
m <- mean(dt[[fixef]], na.rm=T)
s <- sd(dt[[fixef]], na.rm=T)
out <- c(mean=m, sd=s)
return(out)
}
scale_fixefs <- function(dt, fixefs, scalers=NULL){
dt <- copy(dt)
dt <- as.data.table(dt)
scale_factors <- lapply(fixefs, get_scalers, dt)
names(scale_factors) <- fixefs
# scale_var <- function(fixef, scale_mean, scale_sd, dt){
# dt[, (paste0("scaled_", x)):=(get(x) - scale_mean)/scale_sd]
# }
#scale_var <- function()
#
# ## scale
# lapply(fixefs, scale_var, scale_mean=scale_factors[[]])
#
dt[, (paste0("scaled_", fixefs)):=lapply(fixefs, function(x){scale(get(x))})]
return(list(
scale_factors=scale_factors,
dt=dt
))
}
################### APPLY SCALERS #########################################
apply_scalers <- function(dt, scalers){
dt <- copy(dt)
for(s in 1:length(scalers)){
var <- names(scalers[s])
temp_scale <- scalers[[s]]
if(var %in% names(dt)){
message("Rescaling ", var)
dt[, (paste0("scaled_", var)):=(get(var)-temp_scale[1])/temp_scale[2]]
}
}
return(dt)
}
################### COMPILE TMB MODEL #########################################
compile_tmb <- function(x){
require(TMB)
require(data.table)
folders <- unlist(tstrsplit(x, "/"))
## get parent folder that holds model, and model name
parent_folder <- paste0(paste0(folders[-length(folders)], collapse="/"), "/")
model_file <- folders[length(folders)]
model_name <- unlist(tstrsplit(model_file, ".cpp"))[1]
## get current wd and reset after compiling tmb model to avoid issues
orig_wd <- getwd()
setwd(parent_folder)
TMB::compile(model_file)
dyn.load(dynlib(model_name))
## re-set WD
setwd(orig_wd)
}
| /gbd_2019/risk_factors_code/cvd_risks/utility/model_helper_functions.R | no_license | Nermin-Ghith/ihme-modeling | R | false | false | 20,561 | r | ####################
## Purpose: This script holds useful modelling formulas
########################
require(data.table)
require(lme4)
source("FILEPATH/data_tests.R")
################### INV LOGIT #########################################
inv_logit<-function(x){
1/(1+exp(-x))
}
################### MAKE R-SYNTAX FORMULA #########################################
## make formula with R ssyntax
make_formula<-function(response, fixefs=NULL, ranefs=NULL, add_terms=NULL){
## if no fixed effects
if(length(fixefs)==0){
form <- paste0(paste0(response, "~"), paste(paste0("(1|", ranefs, ")"), collapse="+"))
## if no fixed effects and no random effects
if(length(ranefs)==0){
form <- paste0(paste0(response, "~1"))
}
} else {
## if no random effects
if(length(ranefs)==0){
form <- paste0(paste0(response, "~"), paste(fixefs, collapse="+"), collapse="+")
} else {
## if no fixed effects and no random effects
form <- paste0(paste0(response, "~"),
paste(c(paste(fixefs, collapse="+"), paste(paste0("(1|", ranefs, ")"), collapse="+")), collapse="+"))
}
}
## add additional terms
if(length(add_terms)>0){
add_terms <- paste0(add_terms, collapse="+")
form <- paste0(c(form, add_terms), collapse="+")
}
## save formula as a string, will need to convert w/ as.formula()
string_form <- gsub(" ", "", form)
#form<-as.formula(form)
message("Model formula:")
message(" ", string_form)
return(as.formula(string_form))
}
################### EXPAND DATA #########################################
## Arguments
# - starts_ends: a named list of charcter vectors of length 2.
# The name of the list is the new variable to be created.
# The first element is the variable of the starting value.
# The second element is the name of the variable of the end value
# ex: list(age=c("age_start", "age_end"), exposure=c("exposure_start", "exposure_end"))
#
# - var_values: a named list of numeric vectors of the values to split out at for a given variable
#
# - value_weights: a named list of numeric vectors, the same length as the corresponding vector in var_values. Weights applied to each
## Expand data 'integration' if a data point covers multiple discrete values
expand_data <- function(df, starts_ends, var_values, value_weights=NULL){
df <- copy(df)
################### CHECKS #########################################
## make sure certain cols don't already exist
new_cols<-c("temp_seq", paste0(names(starts_ends), "_temp_seq"), paste0("n_", names(starts_ends)), "split_wt")
lapply(new_cols, df=df, check_exists, not_exist=T) ##sy: check_exists function comes from data_tests.R script
## make sure certain cols do exist
lapply(unlist(starts_ends), df=df, check_exists)
################### SETUP #########################################
## create row id number
if("temp_seq" %in% names(df)){stop("Already a column called 'temp_seq' in data frame!")}
df[, temp_seq:=1:.N]
## create original weight column where each row is 1
df[, split_wt:=1]
################### LOOP THROUGH DIMENSION #########################################
## get number of values each data point covers in each dimension
for(d in 1:length(starts_ends)){
## store some input info
varname <- names(starts_ends)[d]
vals <- var_values[[d]]
cols <- starts_ends[[d]]
## create dim specific ind (for merging)
df[, paste0(varname, "_temp_seq"):=1:.N]
################### ROUND VALUES TO NEAREST VALUE #########################################
## create temporary roundedstart/end cols
get_nearest <- function(x, vals){
diffs <- abs(x - vals)
out <- unique(vals[diffs==min(diffs)])[1]
return(out)
}
## get temporary starting value
df[, paste0("temp_", cols[1]):=sapply(get(cols[1]), get_nearest, vals=vals)]
df[, paste0("temp_", cols[2]):=sapply(get(cols[2]), get_nearest, vals=vals)]
## make sure there are no end vals greater than starting vals
if(any(df[[paste0("temp_", cols[2])]]<df[[paste0("temp_", cols[1])]])){stop("Ending value smaller than starting value for ", names(starts_ends)[d])}
################### FIND NUMBER OF EXPANSIONS FOR EACH ROW #########################################
## get number of values to expand out between the start and end for each row
for(i in 1:nrow(df)){
## this line sums up the T/F for each val between the lower and upper for the row being looped through
n_bet <- sum(sapply(vals, FUN=data.table::between, lower=df[[paste0("temp_", cols[1])]][i], upper=df[[paste0("temp_", cols[2])]][i]))
df[i, paste0("n_", varname):=n_bet]
}
################### EXPAND ROWS #########################################
## expand the number of rows
expanded <- as.data.table(rep(df[[paste0(varname, "_temp_seq")]], times=df[[paste0("n_", varname)]]))
## get the new variable values
new_vals <- unlist(lapply(1:length(unique(df[[paste0(varname,"_temp_seq")]])), function(x){
loop_seq <- unique(df[[paste0(varname,"_temp_seq")]])[x]
## get minimum val for this value
start_val <- df[get(paste0(varname, "_temp_seq"))==loop_seq, get(paste0("temp_", cols[1]))]
## get the values
temp_vals <- var_values[[d]][var_values[[d]]>=start_val]
temp_vals <- temp_vals[1:(df[get(paste0(varname, "_temp_seq"))==loop_seq, get(paste0("n_", varname))])]
return(temp_vals)
}))
expanded <- cbind(expanded, new_vals)
setnames(expanded, c("V1", "new_vals"), c(paste0(varname, "_temp_seq"), varname))
df <- merge(expanded, df, by=paste0(varname, "_temp_seq"))
################### SCALE DOWN DATA WEIGHTS #########################################
## reduce weights
if(missing(value_weights)){
df[, split_wt:=split_wt/get(paste0("n_", varname))] ##sy: if each value given equal weight, just divide by number of splits performed
} else {
## merge weights onto df
wt_dt <- data.table(val=var_values[[d]], wt=value_weights[[d]])
df <- merge(df, wt_dt, by.x=paste0())
}
}
################### CLEAN OUT TEMP COLS #########################################
return(df)
}
################### MAKE FIXED EFFECTS MATRIX #########################################
make_fixef_matrix <- function(df, fixefs=NULL, add_intercept=F){
df <- copy(df)
df <- as.data.table(df)
df[, temp_response:=1]
if(length(fixefs)>0){
fix_form <- paste0("temp_response~1+", paste(fixefs, collapse="+"))
if(all(fixefs %in% names(df))){
setcolorder(df, fixefs)
}
X <- as.matrix(model.matrix(formula(fix_form), data=df))
##sy: drop first column, the intercept created by model.matrix. This gets recreated in toggle below
X <- as.matrix(X[, -1])
} else {
#X<-as.matrix(rep(0, times=nrow(df)), ncol=1)
X <- array(0, dim = c(nrow=nrow(df), 0))
}
if(add_intercept==T){
X <- cbind(rep(1, times=nrow(X)), X)
}
return(X)
}
################### MAKE RANDOM EFFECTS MATRIX #########################################
## create random effects matrix; training matrix to give random effects in case you're trying to make a prediction matrix
make_ranef_matrix <- function(df, form, training_matrix=NULL){
require(lme4) ## this gets the mkReTrms function
df <- copy(df)
## find random effects and response
ranefs <- unlist(as.character(lapply(findbars(form), "[[", 3)))
response <- as.character(form)[2] ## may not work in all cases
cols <- ranefs
if(response!=""){cols <- c(response, cols)}
#df<-df[, c(cols), with=F]
if("x" %in% names(df)){
stop("Column named 'x' in your df, please rename!")
}
if(length(ranefs)>0){
## format values of ranef columns to keep track of them easily
invisible(
ranef_order <- lapply(ranefs, function(x){
df[, (x):=paste0(x,";value:", get(x))]
})
)
if(is.null(training_matrix)){
## create matrix using lme4s functions
re_terms <- mkReTrms(findbars(as.formula(form)), model.frame(subbars(form), data=df))
Z <- as.matrix(t(re_terms[["Zt"]])) ## Z is returned transposed
flist <- unique(unlist(re_terms[["flist"]])) ## flist gets the correct order of random effects
Z <- Z[, flist] ## this re-orders Z to have correct order of random effects
} else {
df <- df[, ranefs, with=F]
message("Constructing prediction matrix..")
## lapply to evaluate where the training matrix values equal prediction matrix values
Z <- sapply(1:ncol(training_matrix), function(x){
ranef <- tstrsplit(colnames(training_matrix)[x], ";value:")[[1]]
as.numeric(df[[ranef]]==colnames(training_matrix)[x])
})
## assert that Z is a matrix
if(!is.matrix(Z)){Z <- matrix(Z, ncol=ncol(training_matrix))}
colnames(Z) <- colnames(training_matrix)
message("Done")
}
} else {
Z <- array(0, dim = c(nrow=nrow(df), 0)) ## return empty array if no random effects
}
return(Z)
}
################### GET NUMBER OF RANDOM EFFECTS IN EACH LEVEL #########################################
get_ranef_lvl_counts <- function(df, form){
## find random effects
ranefs <- unlist(as.character(lapply(findbars(form), "[[", 3)))
if(length(ranefs)>0){
n_s <- unlist(lapply(ranefs, function(x){
length(unique(df[[x]]))
}))
} else {
n_s <- array(0, dim=0)
}
return(n_s)
}
################### CREATE MATRiX FOR DELTA SMOOTHING #########################################
## xvals must be a named list. Each element needs to be a numeric vector of the values to estimate differences between for a given dimension
## df must be a data.table of data with column names corresponding to the names of the elements in the xvals list
make_delta_matrix <- function(xvals, df){
## xvals needs to be a named list.
## checks
if(is.null(names(xvals))){stop("xvals must be a named list")}
if(!all(names(xvals) %in% names(df))){stop("Names of xvals not in data")}
## loop over each dimension
full_matrix <- lapply(1:length(xvals), function(d){
## loop over each value
sub_matrix <- sapply(2:length(xvals[[d]]), function(x){
xval <- xvals[[d]][x]
prev_xval <- xvals[[d]][x-1]
## stop if any values are less than the initial values
if(x==2){
if(any(df[[names(xvals)[d]]] < prev_xval)){stop(message("You have values in ", names(xvals)[d], " that are less than the minimum mesh point!"))}
}
## get binaries if row values are greater than or equal to the previous xval.
temp <- as.numeric(df[[names(xvals)[d]]] > prev_xval)
## get change from previous xval or change from the value in the data and the xval if the value in the data is between previous xval
change <- ifelse(data.table::between(df[[names(xvals)[d]]], prev_xval, xval, incbounds=F), df[[names(xvals)[d]]] - prev_xval, xval - prev_xval)
temp <- temp * change
return(temp)
})
## give colnames based on index for clarity
colnames(sub_matrix) <- paste0(names(xvals)[d], "_", 2:length(xvals[[d]]))
return(sub_matrix)
})
Reduce(cbind, full_matrix)
}
################### INTERACT DELTA VARS #########################################
## delta vars need to be continuous but it's useful to interact a continuous var with a categorical (or other continuos) one
## this functions creates an A matrix with interacted vars
interact_delta_vars <- function(df, interact_pairs, drop_reference=T){
df <- copy(df)
invisible(
intrxn_cols <- unlist(lapply(1:length(interact_pairs), function(x){
## detect classes of pairs
pair <- interact_pairs[[x]]
pair_classes <- lapply(df[, c(pair), with=F], class)
message(" Printing detected classes for ", paste(pair, collapse=":"))
print(pair_classes)
char_col <- pair[pair_classes %in% c("factor", "character")]
## create interaction cols for character
if(length(char_col)>1){stop(paste0(length(char_col), " vars detected as character, need one numeric col"))}
if(length(char_col)>0){
## make binary ranef matrix of categorical
Z_full <- make_ranef_matrix(df, form=as.formula(paste0("~(1|", char_col, ")")))
## drop first column (reference col)
if(drop_reference==T){
Z_full <- Z_full[, -1]
}
## multiply each column by the continuous var
Z_full <- Z_full*df[[setdiff(pair, char_col)]]
colnames(Z_full) <- paste0(setdiff(pair, char_col), ":", colnames(Z_full))
intrxn_col <- colnames(Z_full)
Z_full <- as.data.table(Z_full)
df[, (intrxn_col):=Z_full]
} else {
## create interaction col if both cols are numeric/integer
intrxn_col <- paste0(pair, collapse=":")
df[, (intrxn_col):=get(pair[1]) * get(pair[2])]
}
return(intrxn_col)
}))
)
return(list(intrxn_df=df, intrxn_cols=intrxn_cols))
}
################### CREATE BASE MATRIX #########################################
## make a baseic matrix with sparsity besides tri-diagonal
make_tridiag_matrix <- function(n_sites, rho=NULL){
m0 <- matrix(0, ncol=n_sites, nrow=n_sites)
m1 <- copy(m0)
## if a correlation is given, create precision matrix for a exponential decay
if(!is.null(rho)){
m0[col(m0)==row(m0)] <- 1+rho^2 ## get diagnoals
m1[col(m1)==row(m1)-1 | col(m1)==row(m1)+1]<- -rho ## get off-diagonals
m0<-as(m0, "dgTMatrix")
} else {
m0[col(m0)==row(m0)]<-1 ## get diagnoals
m1[col(m1)==row(m1)-1 | col(m1)==row(m1)+1]<- 1 ## get off-diagonals
m0<-as(m0, "dgTMatrix")
m1<-as(m1, "dgTMatrix")
}
return(list(m0, m1))
}
################### SIMULATE MV NORMAL W/ CHOLESKY #########################################
rmvnorm_prec <- function(mu = NULL, prec, n.sims) {
if(is.null(mu)) {
mu <- rep(0, dim(prec)[1])
}
z <- matrix(MASS::mvrnorm(length(mu) * n.sims, 0, 1 ), ncol=n.sims)
L <- Matrix::Cholesky(prec , super = TRUE)
z <- Matrix::solve(L, z, system = "Lt") ## z = Lt^-1 %*% z
z <- Matrix::solve(L, z, system = "Pt") ## z = Pt %*% z
z <- as.matrix(z)
outmat <- t(mu + z)
colnames(outmat) <- colnames(prec)
return(outmat)
}
################### GET TMB PARAMETER DRAWS #########################################
simulate_tmb_draws <- function(model_object, n_draws=1000){
require(mvtnorm)
require(Matrix)
require(TMB)
## run report to get joint precision
report <- TMB::sdreport(model_object, getJointPrecision=T)
## estimates stored from best iteration
ests <- model_object$env$last.par.best
##s if only 1 fixed effect parameter
if(length(ests)==1){
se <- as.matrix(sqrt(report$cov.fixed))
par_draws <- rnorm(n=n_draws, mean=ests, sd=se)
par_draws <- data.table(par_draws)
names(par_draws) <- names(ests)
} else {
if(!is.null(model_object$env$random)){
#cov_mat<-as.matrix(Matrix::solve(report$jointPrecision)) ## this will break if Hessian not pos/def
par_draws <- rmvnorm_prec(mu=ests, prec=report$jointPrecision, n.sim=n_draws)
} else {
## if only fixed effects
cov_mat <- report$cov.fixed
par_draws <- mvtnorm::rmvnorm(n=n_draws, mean=ests, sigma=cov_mat)
}
}
return(as.data.table(par_draws))
}
################### CALCULATE PREDICTIONS FROM DRAWS #########################################
predict_draws <- function(prediction_math, draw_list, data_list=NULL,
return_draws=T, upper_lower=F,
mid_fun = "mean", quants = c(0.025, 0.975)){
################### SUB FUNCTIONS #########################################
calc_draw <- function(x, draw_list, temp_env, prediction_math){
sub_draws <- lapply(draw_list, "[", x, ) #function(n){ ## this syntax gets an entire row
# n[x]
# })
##s add draw vals to temporary environemnt
lapply(par_names, function(n){
temp_env[[n]]<-as.numeric(sub_draws[[n]])
})
## run the prediction formula
pred <- eval(parse(text=prediction_math), envir=temp_env)
rm(list = names(draw_list), envir = temp_env)
return(data.table(draw=paste0("draw", x-1), pred_row=1:length(pred), pred=as.numeric(pred)))
}
################### SETUP ENVIRONMENT #########################################
## get number of draws
n_draws <- unique(unlist(lapply(draw_list, nrow)))
## check to make sure number of draws are correct
if(length(n_draws)>1){stop("Varying number of draws supplied to elements of draw_list")}
## get names of objects
par_names <- names(draw_list)
data_names <- names(data_list)
## create a temporary environment to store values. Putting data here since data won't have draws (if data has draws, put it in draw_list)
temp_env <- new.env()
invisible(
lapply(data_names, function(x){
temp_env[[x]] <- data_list[[x]]
})
)
## convert draw list to matrix
draw_list <- lapply(draw_list, as.matrix)
################### CALCULATE DRAWS #########################################
message("Calculating draws...")
linpred_draws <- lapply(1:n_draws, calc_draw,
draw_list=draw_list, temp_env=temp_env, prediction_math=prediction_math) ## sub-args
#message("Done with pred lapply")
################### FORMAT AND RETURN #########################################
## format draw output
linpred_draws <- rbindlist(linpred_draws)
setkey(linpred_draws, pred_row)
if(return_draws==T){
## make wide on draws if returning draws
preds <- dcast(linpred_draws, formula = pred_row ~ draw, value.var = "pred")
} else {
mid_txt <- paste0(mid_fun,"(pred)")
if(upper_lower==T){
preds <- linpred_draws[, .(pred=eval(parse(text = mid_txt)), lower=quantile(pred, probs=quants[1]), upper=quantile(pred, probs=quants[2])), by="pred_row"]
} else {
## calculate mean and SD of draws if not returing draws and want SE
preds <- linpred_draws[, .(pred=eval(parse(text = mid_txt)), se=sd(pred)), by="pred_row"]
}
}
preds[, pred_row:=NULL]
message("Done")
return(preds)
}
#calc_draw(x=8, draw_list=draw_list, temp_env=temp_env, prediction_math=prediction_math)
################### SCALE VARIABLES #########################################
## get scaling factors first
get_scalers <- function(fixef, dt){
## first check for missing vars
check_missing(fixef, dt, warn=T)
m <- mean(dt[[fixef]], na.rm=T)
s <- sd(dt[[fixef]], na.rm=T)
out <- c(mean=m, sd=s)
return(out)
}
scale_fixefs <- function(dt, fixefs, scalers=NULL){
dt <- copy(dt)
dt <- as.data.table(dt)
scale_factors <- lapply(fixefs, get_scalers, dt)
names(scale_factors) <- fixefs
# scale_var <- function(fixef, scale_mean, scale_sd, dt){
# dt[, (paste0("scaled_", x)):=(get(x) - scale_mean)/scale_sd]
# }
#scale_var <- function()
#
# ## scale
# lapply(fixefs, scale_var, scale_mean=scale_factors[[]])
#
dt[, (paste0("scaled_", fixefs)):=lapply(fixefs, function(x){scale(get(x))})]
return(list(
scale_factors=scale_factors,
dt=dt
))
}
################### APPLY SCALERS #########################################
apply_scalers <- function(dt, scalers){
dt <- copy(dt)
for(s in 1:length(scalers)){
var <- names(scalers[s])
temp_scale <- scalers[[s]]
if(var %in% names(dt)){
message("Rescaling ", var)
dt[, (paste0("scaled_", var)):=(get(var)-temp_scale[1])/temp_scale[2]]
}
}
return(dt)
}
################### COMPILE TMB MODEL #########################################
compile_tmb <- function(x){
require(TMB)
require(data.table)
folders <- unlist(tstrsplit(x, "/"))
## get parent folder that holds model, and model name
parent_folder <- paste0(paste0(folders[-length(folders)], collapse="/"), "/")
model_file <- folders[length(folders)]
model_name <- unlist(tstrsplit(model_file, ".cpp"))[1]
## get current wd and reset after compiling tmb model to avoid issues
orig_wd <- getwd()
setwd(parent_folder)
TMB::compile(model_file)
dyn.load(dynlib(model_name))
## re-set WD
setwd(orig_wd)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{madelon}
\alias{madelon}
\title{An artificial dataset called MADELON}
\format{
A list of two elements:
\describe{
\item{data}{2000 by 500 matrix of 2000 objects with 500 features}
\item{decision}{vector of 2000 decisions (labels 0/1)}
\item{IG.2D}{example 2D IG computed using \code{ComputeMaxInfoGains}}
}
}
\source{
\url{https://archive.ics.uci.edu/ml/datasets/Madelon}
}
\usage{
madelon
}
\description{
An artificial dataset containing data points grouped in 32 clusters placed
on the vertices of a five dimensional hypercube and randomly labeled 0/1.
}
\details{
The five dimensions constitute 5 informative features.
15 linear combinations of those features are added to form a set of 20
(redundant) informative features.
There are 480 distractor features called 'probes' having no predictive
power.
Included is the original training set with label -1 changed to 0.
}
\keyword{datasets}
| /man/madelon.Rd | no_license | cran/MDFS | R | false | true | 1,005 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{madelon}
\alias{madelon}
\title{An artificial dataset called MADELON}
\format{
A list of two elements:
\describe{
\item{data}{2000 by 500 matrix of 2000 objects with 500 features}
\item{decision}{vector of 2000 decisions (labels 0/1)}
\item{IG.2D}{example 2D IG computed using \code{ComputeMaxInfoGains}}
}
}
\source{
\url{https://archive.ics.uci.edu/ml/datasets/Madelon}
}
\usage{
madelon
}
\description{
An artificial dataset containing data points grouped in 32 clusters placed
on the vertices of a five dimensional hypercube and randomly labeled 0/1.
}
\details{
The five dimensions constitute 5 informative features.
15 linear combinations of those features are added to form a set of 20
(redundant) informative features.
There are 480 distractor features called 'probes' having no predictive
power.
Included is the original training set with label -1 changed to 0.
}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/anim.plots.R
\name{anim.hist}
\alias{anim.hist}
\title{Draw an animated histogram.}
\usage{
anim.hist(x, times, speed = 1, show = TRUE, use.times = TRUE,
window = t, window.process = NULL, density = NULL, angle = NULL,
col = NULL, border = NULL, ...)
}
\arguments{
\item{x, density, angle, col, border, ...}{parameters passed to \code{\link{hist}}.}
\item{times, show, speed, use.times, window, window.process}{see
\code{\link{anim.plot}}.}
}
\description{
Draw an animated histogram.
}
\details{
Parameters \code{x, density, angle, col} and \code{border} are all
"chunked", i.e. first recycled to the length of \code{times} or \code{x}
(whichever is longer), then split according to the unique values of \code{times}.
See \code{\link{anim.plot}} for more details.
}
\examples{
anim.hist(rep(rnorm(5000), 7), times=rep(1:7, each=5000),
breaks=c(5,10,20,50,100,200, 500, 1000))
}
| /man/anim.hist.Rd | no_license | anhnguyendepocen/anim.plots | R | false | true | 969 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/anim.plots.R
\name{anim.hist}
\alias{anim.hist}
\title{Draw an animated histogram.}
\usage{
anim.hist(x, times, speed = 1, show = TRUE, use.times = TRUE,
window = t, window.process = NULL, density = NULL, angle = NULL,
col = NULL, border = NULL, ...)
}
\arguments{
\item{x, density, angle, col, border, ...}{parameters passed to \code{\link{hist}}.}
\item{times, show, speed, use.times, window, window.process}{see
\code{\link{anim.plot}}.}
}
\description{
Draw an animated histogram.
}
\details{
Parameters \code{x, density, angle, col} and \code{border} are all
"chunked", i.e. first recycled to the length of \code{times} or \code{x}
(whichever is longer), then split according to the unique values of \code{times}.
See \code{\link{anim.plot}} for more details.
}
\examples{
anim.hist(rep(rnorm(5000), 7), times=rep(1:7, each=5000),
breaks=c(5,10,20,50,100,200, 500, 1000))
}
|
#' Number of possible combinations
#'
#' Calculates the number of combinations of each size.
#' @param x A numeric vector
#' @return A named numeric vector
#' @export
#' @examples
#' genesPerPathway <- c(2, 4, 5, 8, 5, 8)
#' completness(genesPerPathway)
completness <- function(x){
iter <- seq_len(max(x))
out <- vapply(iter, choose, FUN.VALUE = numeric(1), n = length(x))
names(out) <- as.character(iter)
out
}
| /R/completness.R | no_license | llrs/GSEAdv | R | false | false | 421 | r | #' Number of possible combinations
#'
#' Calculates the number of combinations of each size.
#' @param x A numeric vector
#' @return A named numeric vector
#' @export
#' @examples
#' genesPerPathway <- c(2, 4, 5, 8, 5, 8)
#' completness(genesPerPathway)
completness <- function(x){
iter <- seq_len(max(x))
out <- vapply(iter, choose, FUN.VALUE = numeric(1), n = length(x))
names(out) <- as.character(iter)
out
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BiCopHfuncDeriv2.R
\name{BiCopHfuncDeriv2}
\alias{BiCopHfuncDeriv2}
\title{Second Derivatives of the h-Function of a Bivariate Copula}
\usage{
BiCopHfuncDeriv2(u1, u2, family, par, par2 = 0, deriv = "par", obj = NULL,
check.pars = TRUE)
}
\arguments{
\item{u1, u2}{numeric vectors of equal length with values in [0,1].}
\item{family}{integer; single number or vector of size \code{length(u1)};
defines the bivariate copula family: \cr
\code{0} = independence copula \cr
\code{1} = Gaussian copula \cr
\code{2} = Student t copula (t-copula) \cr
\code{3} = Clayton copula \cr
\code{4} = Gumbel copula \cr
\code{5} = Frank copula \cr
\code{6} = Joe copula \cr
\code{13} = rotated Clayton copula (180 degrees; ``survival Clayton'') \cr
\code{14} = rotated Gumbel copula (180 degrees; ``survival Gumbel'') \cr
\code{16} = rotated Joe copula (180 degrees; ``survival Joe'') \cr
\code{23} = rotated Clayton copula (90 degrees) \cr
\code{24} = rotated Gumbel copula (90 degrees) \cr
\code{26} = rotated Joe copula (90 degrees) \cr
\code{33} = rotated Clayton copula (270 degrees) \cr
\code{34} = rotated Gumbel copula (270 degrees) \cr
\code{36} = rotated Joe copula (270 degrees) \cr}
\item{par}{numeric; single number or vector of size \code{length(u1)};
copula parameter.}
\item{par2}{integer; single number or vector of size \code{length(u1)};
second parameter for the t-Copula; default is \code{par2 = 0}, should be an
positive integer for the Students's t copula \code{family = 2}.}
\item{deriv}{Derivative argument \cr
\code{"par"} = second derivative with respect to
the first parameter (default)\cr
\code{"par2"} = second derivative with respect to
the second parameter (only available for the t-copula) \cr
\code{"u2"} = second derivative with respect to
the second argument \code{u2} \cr
\code{"par1par2"} = second derivative with respect to
the first and second parameter (only available for the t-copula) \cr
\code{"par1u2"} = second derivative with respect to
the first parameter and the second argument \cr
\code{"par2u2"} = second derivative with respect to the second parameter
and the second argument (only available for the t-copula) \cr}
\item{obj}{\code{BiCop} object containing the family and parameter
specification.}
\item{check.pars}{logical; default is \code{TRUE}; if \code{FALSE}, checks
for family/parameter-consistency are ommited (should only be used with
care).}
}
\value{
A numeric vector of the second-order conditional bivariate copula
derivative
\itemize{
\item of the copula \code{family}
\item with parameter(s) \code{par}, \code{par2}
\item with respect to \code{deriv}
\item evaluated at \code{u1} and \code{u2}.
}
}
\description{
This function evaluates the second derivative of a given conditional
parametric bivariate copula (h-function) with respect to its parameter(s)
and/or its arguments.
}
\details{
If the family and parameter specification is stored in a \code{\link{BiCop}}
object \code{obj}, the alternative version \cr
\preformatted{BiCopHfuncDeriv2(u1, u2, obj, deriv = "par")}
can be used.
}
\examples{
## simulate from a bivariate Student-t copula
set.seed(123)
cop <- BiCop(family = 2, par = -0.7, par2 = 4)
simdata <- BiCopSim(100, cop)
## second derivative of the conditional bivariate t-copula
## with respect to the first parameter
u1 <- simdata[,1]
u2 <- simdata[,2]
BiCopHfuncDeriv2(u1, u2, cop, deriv = "par")
## estimate a Student-t copula for the simulated data
cop <- BiCopEst(u1, u2, family = 2)
## and evaluate the derivative of the conditional copula
## w.r.t. the second argument u2
BiCopHfuncDeriv2(u1, u2, cop, deriv = "u2")
}
\author{
Ulf Schepsmeier, Jakob Stoeber
}
\references{
Schepsmeier, U. and J. Stoeber (2014). Derivatives and Fisher
information of bivariate copulas. Statistical Papers, 55 (2), 525-542. \cr
\url{http://link.springer.com/article/10.1007/s00362-013-0498-x}.
}
\seealso{
\code{\link{RVineGrad}}, \code{\link{RVineHessian}},
\code{\link{BiCopDeriv}}, \code{\link{BiCopDeriv2}},
\code{\link{BiCopHfuncDeriv}}, \code{\link{BiCop}}
}
| /man/BiCopHfuncDeriv2.Rd | no_license | ulf85/VineCopula | R | false | true | 4,112 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BiCopHfuncDeriv2.R
\name{BiCopHfuncDeriv2}
\alias{BiCopHfuncDeriv2}
\title{Second Derivatives of the h-Function of a Bivariate Copula}
\usage{
BiCopHfuncDeriv2(u1, u2, family, par, par2 = 0, deriv = "par", obj = NULL,
check.pars = TRUE)
}
\arguments{
\item{u1, u2}{numeric vectors of equal length with values in [0,1].}
\item{family}{integer; single number or vector of size \code{length(u1)};
defines the bivariate copula family: \cr
\code{0} = independence copula \cr
\code{1} = Gaussian copula \cr
\code{2} = Student t copula (t-copula) \cr
\code{3} = Clayton copula \cr
\code{4} = Gumbel copula \cr
\code{5} = Frank copula \cr
\code{6} = Joe copula \cr
\code{13} = rotated Clayton copula (180 degrees; ``survival Clayton'') \cr
\code{14} = rotated Gumbel copula (180 degrees; ``survival Gumbel'') \cr
\code{16} = rotated Joe copula (180 degrees; ``survival Joe'') \cr
\code{23} = rotated Clayton copula (90 degrees) \cr
\code{24} = rotated Gumbel copula (90 degrees) \cr
\code{26} = rotated Joe copula (90 degrees) \cr
\code{33} = rotated Clayton copula (270 degrees) \cr
\code{34} = rotated Gumbel copula (270 degrees) \cr
\code{36} = rotated Joe copula (270 degrees) \cr}
\item{par}{numeric; single number or vector of size \code{length(u1)};
copula parameter.}
\item{par2}{integer; single number or vector of size \code{length(u1)};
second parameter for the t-Copula; default is \code{par2 = 0}, should be an
positive integer for the Students's t copula \code{family = 2}.}
\item{deriv}{Derivative argument \cr
\code{"par"} = second derivative with respect to
the first parameter (default)\cr
\code{"par2"} = second derivative with respect to
the second parameter (only available for the t-copula) \cr
\code{"u2"} = second derivative with respect to
the second argument \code{u2} \cr
\code{"par1par2"} = second derivative with respect to
the first and second parameter (only available for the t-copula) \cr
\code{"par1u2"} = second derivative with respect to
the first parameter and the second argument \cr
\code{"par2u2"} = second derivative with respect to the second parameter
and the second argument (only available for the t-copula) \cr}
\item{obj}{\code{BiCop} object containing the family and parameter
specification.}
\item{check.pars}{logical; default is \code{TRUE}; if \code{FALSE}, checks
for family/parameter-consistency are ommited (should only be used with
care).}
}
\value{
A numeric vector of the second-order conditional bivariate copula
derivative
\itemize{
\item of the copula \code{family}
\item with parameter(s) \code{par}, \code{par2}
\item with respect to \code{deriv}
\item evaluated at \code{u1} and \code{u2}.
}
}
\description{
This function evaluates the second derivative of a given conditional
parametric bivariate copula (h-function) with respect to its parameter(s)
and/or its arguments.
}
\details{
If the family and parameter specification is stored in a \code{\link{BiCop}}
object \code{obj}, the alternative version \cr
\preformatted{BiCopHfuncDeriv2(u1, u2, obj, deriv = "par")}
can be used.
}
\examples{
## simulate from a bivariate Student-t copula
set.seed(123)
cop <- BiCop(family = 2, par = -0.7, par2 = 4)
simdata <- BiCopSim(100, cop)
## second derivative of the conditional bivariate t-copula
## with respect to the first parameter
u1 <- simdata[,1]
u2 <- simdata[,2]
BiCopHfuncDeriv2(u1, u2, cop, deriv = "par")
## estimate a Student-t copula for the simulated data
cop <- BiCopEst(u1, u2, family = 2)
## and evaluate the derivative of the conditional copula
## w.r.t. the second argument u2
BiCopHfuncDeriv2(u1, u2, cop, deriv = "u2")
}
\author{
Ulf Schepsmeier, Jakob Stoeber
}
\references{
Schepsmeier, U. and J. Stoeber (2014). Derivatives and Fisher
information of bivariate copulas. Statistical Papers, 55 (2), 525-542. \cr
\url{http://link.springer.com/article/10.1007/s00362-013-0498-x}.
}
\seealso{
\code{\link{RVineGrad}}, \code{\link{RVineHessian}},
\code{\link{BiCopDeriv}}, \code{\link{BiCopDeriv2}},
\code{\link{BiCopHfuncDeriv}}, \code{\link{BiCop}}
}
|
# Name:
# Purpose: Read xml and load into an Oracle table
# Developer
# 12/19/2016 (htu) - initial creation
#
# 1. load the required libraries
# Clear All
rm(list=ls())
# check if packages installed and then install if necessary
packages <- c('XML','ROracle','plyr','RCurl')
if (length(setdiff(packages, rownames(installed.packages()))) > 0) {
install.packages(setdiff(packages, rownames(installed.packages())))
}
library("XML") # Load the package required to read XML files.
library("methods") # Also load the other required package.
library(ROracle)
library(plyr)
library(RCurl)
setwd("C:/Users/hanming.h.tu/Google Drive/ACN/Codes/R")
source("libs/Func_comm.R")
# 2. read the xml file
# Give the input file name to the function.
# xfn <- 'C:/Users/hanming.h.tu/Google Drive/ACN/C/A/QMDR5_3/define_xml_2_0_releasepackage20140424/sdtm/define2-0-0-example-sdtm.xml'
# url <- 'https://drive.google.com/open?id=0B6yBXwWAdpcTWTFJWlRWWVItc1k'
# x1 <- getURL(url)
xfn <- 'data/define2_sdtm.xml'
#rst <- xmlTreeParse(file = xfn, addAttributeNamespaces = TRUE, useInternalNode=TRUE)
# rst <- xmlParse(file = xfn, addAttributeNamespaces = TRUE, useInternalNode=TRUE)
# rst <- xmlParse(file = url)
rst <- xmlParse(file = xfn)
top <- xmlRoot(rst)
ta <- xmlAttrs(top)
t1 <- top[which(names(top) != "comment")]
#Root Node's children
xmlSize(t1[[1]]) #number of nodes in each child
xmlSApply(t1[[1]], xmlName) #name(s)
xmlSApply(t1[[1]], xmlAttrs) #attribute(s)
xmlSApply(t1[[1]], xmlSize) #size
xpathSApply(top, "//ODM", xmlGetAttr, 'ODMVersion')
v1 <- xpathSApply(t1, "//ODM/Study/GlobalVariables/StudyName", xmlValue)
# https://hopstat.wordpress.com/2014/01/14/faster-xml-conversion-to-data-frames/
xmlToDF(t1, xpath='/Study/GlobalVariables')
ldply(xmlToList(url), function(x) { data.frame(x[!names(x)=="comment"]) } )
for (n in names.XMLNode(top)) {
if (n == "comment") { next }
print(n)
}
pl <- xmlSApply(top[which(names(top) != "comment")], function(x) xmlSApply(x[which(names(x) != "comment")], xmlValue))
pl <- xmlSApply(top, function(x) xmlSApply(x, xmlAttrs))
top[c("comment")]
xpathSApply(rst, "/ODM/Study/GlobalVariables/StudyName" , xmlValue)
ns <- getNodeSet(rst, '/ODM/Study')
element_cnt <-length(ns)
strings<-paste(sapply(ns, function(x) { xmlValue(x) }),collapse="|"))
# 3. create the data frame
df1 <- data.frame( XML_ID = 1,
XML_TYPE = 'Define',
XML_DOC = rst,
CREATED = '12/19/2016',
RUN_ID = 1,
JOB_ID = 1
)
#
# 3. connect to Oracle database
#
con <- get_conn("std_mdr", "std_mdr", "adevscan.adevgns.orst.com",service_name="adevpdb")
#
# 4. create temporary tables
#
Sys.setenv(TZ = "EST")
Sys.setenv(ORA_SDTZ = "EST")
tb1 <- "T_XMLS"
tb2 <- "QT_XMLS"
if (dbExistsTable(con, tb1, schema = NULL)) {
rs1 <- dbSendQuery(con, paste("drop table ", tb1))
}
dbWriteTable(con,tb1,df1)
#
# 5. insert the records to target tables
#
cmd1 <- paste("insert into ", tb2, "select * from ", tb1)
r1.tru <- dbSendQuery(con, paste("truncate table", tb2))
r1.ins <- dbSendQuery(con, cmd1)
if (dbHasCompleted(r1.ins)) {
r1.cmt <- dbSendQuery(con, "commit")
}
# End of the program
| /development/R/scripts/load_xml.R | permissive | phuse-org/phuse-scripts | R | false | false | 3,323 | r | # Name:
# Purpose: Read xml and load into an Oracle table
# Developer
# 12/19/2016 (htu) - initial creation
#
# 1. load the required libraries
# Clear All
rm(list=ls())
# check if packages installed and then install if necessary
packages <- c('XML','ROracle','plyr','RCurl')
if (length(setdiff(packages, rownames(installed.packages()))) > 0) {
install.packages(setdiff(packages, rownames(installed.packages())))
}
library("XML") # Load the package required to read XML files.
library("methods") # Also load the other required package.
library(ROracle)
library(plyr)
library(RCurl)
setwd("C:/Users/hanming.h.tu/Google Drive/ACN/Codes/R")
source("libs/Func_comm.R")
# 2. read the xml file
# Give the input file name to the function.
# xfn <- 'C:/Users/hanming.h.tu/Google Drive/ACN/C/A/QMDR5_3/define_xml_2_0_releasepackage20140424/sdtm/define2-0-0-example-sdtm.xml'
# url <- 'https://drive.google.com/open?id=0B6yBXwWAdpcTWTFJWlRWWVItc1k'
# x1 <- getURL(url)
xfn <- 'data/define2_sdtm.xml'
#rst <- xmlTreeParse(file = xfn, addAttributeNamespaces = TRUE, useInternalNode=TRUE)
# rst <- xmlParse(file = xfn, addAttributeNamespaces = TRUE, useInternalNode=TRUE)
# rst <- xmlParse(file = url)
rst <- xmlParse(file = xfn)
top <- xmlRoot(rst)
ta <- xmlAttrs(top)
t1 <- top[which(names(top) != "comment")]
#Root Node's children
xmlSize(t1[[1]]) #number of nodes in each child
xmlSApply(t1[[1]], xmlName) #name(s)
xmlSApply(t1[[1]], xmlAttrs) #attribute(s)
xmlSApply(t1[[1]], xmlSize) #size
xpathSApply(top, "//ODM", xmlGetAttr, 'ODMVersion')
v1 <- xpathSApply(t1, "//ODM/Study/GlobalVariables/StudyName", xmlValue)
# https://hopstat.wordpress.com/2014/01/14/faster-xml-conversion-to-data-frames/
xmlToDF(t1, xpath='/Study/GlobalVariables')
ldply(xmlToList(url), function(x) { data.frame(x[!names(x)=="comment"]) } )
for (n in names.XMLNode(top)) {
if (n == "comment") { next }
print(n)
}
pl <- xmlSApply(top[which(names(top) != "comment")], function(x) xmlSApply(x[which(names(x) != "comment")], xmlValue))
pl <- xmlSApply(top, function(x) xmlSApply(x, xmlAttrs))
top[c("comment")]
xpathSApply(rst, "/ODM/Study/GlobalVariables/StudyName" , xmlValue)
ns <- getNodeSet(rst, '/ODM/Study')
element_cnt <-length(ns)
strings<-paste(sapply(ns, function(x) { xmlValue(x) }),collapse="|"))
# 3. create the data frame
df1 <- data.frame( XML_ID = 1,
XML_TYPE = 'Define',
XML_DOC = rst,
CREATED = '12/19/2016',
RUN_ID = 1,
JOB_ID = 1
)
#
# 3. connect to Oracle database
#
con <- get_conn("std_mdr", "std_mdr", "adevscan.adevgns.orst.com",service_name="adevpdb")
#
# 4. create temporary tables
#
Sys.setenv(TZ = "EST")
Sys.setenv(ORA_SDTZ = "EST")
tb1 <- "T_XMLS"
tb2 <- "QT_XMLS"
if (dbExistsTable(con, tb1, schema = NULL)) {
rs1 <- dbSendQuery(con, paste("drop table ", tb1))
}
dbWriteTable(con,tb1,df1)
#
# 5. insert the records to target tables
#
cmd1 <- paste("insert into ", tb2, "select * from ", tb1)
r1.tru <- dbSendQuery(con, paste("truncate table", tb2))
r1.ins <- dbSendQuery(con, cmd1)
if (dbHasCompleted(r1.ins)) {
r1.cmt <- dbSendQuery(con, "commit")
}
# End of the program
|
# install.packages("tidyverse")
# install.packages("ggplot2")
# install.packages("class")
library(tidyverse)
library(ggplot2)
library(dplyr)
library(class)
setwd("c:/temp/cpsc375hw3")
# 2
# a. Load the data. Column 1 ("Code") is the anonymized subject code and will not be used here.
# Columns 2-10 are the 9 features. Column 11 is the diagnosis: [B]enign or [M]alignant.
data <- read_csv("breast-cancer-wisconsin.csv")
# 2ai. How many total cases are there in the data?: ___
nrow(data)
# 2aii. How many [B]enign cases are there in the data?: ___
nrow(data %>% filter(Class == "B"))
# 2aiii. How many [M]alignant cases are there in the data?: ___
nrow(data %>% filter(Class == "M"))
# b. Run the k-means clustering algorithm using all the rows and all the 9 features.
# Use k=2, nstart=10.
# i. What should be the value of k? k = ___ (already given)
# ii. Give R code:
km <- kmeans(data[,2:10], centers = 2, nstart = 10)
data$cluster <- km$cluster
data$cluster <- as.factor(data$cluster)
# c. Evaluation: Compare the resulting clusters with the known diagnosis .
# i. What is the contingency table of your clustering?
# (Note: you can arbitrarily assign cluster 1/2 to Benign/Malignant)
# class == "B", cluster = 1
kM1 <- nrow(data %>% filter(Class == "M" & cluster == 1))
kB1 <- nrow(data %>% filter(Class == "B" & cluster == 1))
# class == "M", cluster = 2
kM2 <- nrow(data %>% filter(Class == "M" & cluster == 2))
kB2 <- nrow(data %>% filter(Class == "B" & cluster == 2))
contingencyTable <- matrix(c(c(kM1, kB1), c(kM2, kB2)), nrow = 2, ncol = 2)
colnames(contingencyTable) <- paste("Predicted/Cluster", sep = "", c(1, 2))
rownames(contingencyTable) <- paste("Actual/", sep = "", c("M", "B"))
| /hw3q2.R | no_license | rauldoe/cpsc375hw3 | R | false | false | 1,720 | r | # install.packages("tidyverse")
# install.packages("ggplot2")
# install.packages("class")
library(tidyverse)
library(ggplot2)
library(dplyr)
library(class)
setwd("c:/temp/cpsc375hw3")
# 2
# a. Load the data. Column 1 ("Code") is the anonymized subject code and will not be used here.
# Columns 2-10 are the 9 features. Column 11 is the diagnosis: [B]enign or [M]alignant.
data <- read_csv("breast-cancer-wisconsin.csv")
# 2ai. How many total cases are there in the data?: ___
nrow(data)
# 2aii. How many [B]enign cases are there in the data?: ___
nrow(data %>% filter(Class == "B"))
# 2aiii. How many [M]alignant cases are there in the data?: ___
nrow(data %>% filter(Class == "M"))
# b. Run the k-means clustering algorithm using all the rows and all the 9 features.
# Use k=2, nstart=10.
# i. What should be the value of k? k = ___ (already given)
# ii. Give R code:
km <- kmeans(data[,2:10], centers = 2, nstart = 10)
data$cluster <- km$cluster
data$cluster <- as.factor(data$cluster)
# c. Evaluation: Compare the resulting clusters with the known diagnosis .
# i. What is the contingency table of your clustering?
# (Note: you can arbitrarily assign cluster 1/2 to Benign/Malignant)
# class == "B", cluster = 1
kM1 <- nrow(data %>% filter(Class == "M" & cluster == 1))
kB1 <- nrow(data %>% filter(Class == "B" & cluster == 1))
# class == "M", cluster = 2
kM2 <- nrow(data %>% filter(Class == "M" & cluster == 2))
kB2 <- nrow(data %>% filter(Class == "B" & cluster == 2))
contingencyTable <- matrix(c(c(kM1, kB1), c(kM2, kB2)), nrow = 2, ncol = 2)
colnames(contingencyTable) <- paste("Predicted/Cluster", sep = "", c(1, 2))
rownames(contingencyTable) <- paste("Actual/", sep = "", c("M", "B"))
|
############################### ETMUN bipartite Networks STEP 1 ###################
#
#
# DESCRIPTION : Travail sur le graphe ETMUN (affiliation) biparti.
# Résumé numérique des dimensions des graphes (différents filtrages)
#
#
############################################################################## PG juin 2020
### ==== LOAD PACKAGES AND DATA ====
setwd("~/Chap4_NetworkAnalysis/Chap4_NetworkAnalysis/4.2.InterOrgaNet/ETMUN_Inter")
#Packages
library(tidyverse)
library(tidylog)
library(igraph)
library(gridExtra)
library(Matrix)
#####Data
## Membership : table City-Asso. (for edges). EUROPE frame filtered (check STEP0 in Data folder)
MembershipEtmun <- readRDS("~/Chap4_NetworkAnalysis/Chap4_NetworkAnalysis/Data/ETMUN/ETMUN_Membership_europe.RDS")
## Information on associations (for nodes)
AssoEtmun <- read.csv2("~/Chap4_NetworkAnalysis/Chap4_NetworkAnalysis/Data/ETMUN/BD_ETMUN_OrganizationsWithMembersCities.csv",
stringsAsFactors = F)
### ==== CREATE NETWORK DATA ====
MembershipEtmun$label <- paste(MembershipEtmun$geonameId, MembershipEtmun$asciiName,MembershipEtmun$CountryCode, sep = "_")
## Create matrix
# edge list
edgelist <- MembershipEtmun %>%
filter(!is.na(geonameId)) %>%
select( geonameId, Code_Network) %>%
group_by(geonameId,Code_Network)%>%
summarise(weight = n()) # some weight > 1 because several members of an association are (representing the same city) in the same locality
#(eg. Municipality of Barcelona + Metropolitan area of Barcelona)
##Pct edges with weight > 1
edgeSup1 <- edgelist %>% filter(weight > 1)
nrow(edgeSup1)/ nrow(edgelist)
edgelistnw <- edgelist %>% mutate(weight = 1) # edgelist non weighted (transform weight > 1)
## Convert edgelist into a matrix
# with possible multiple affiliations of a city in the same organisation (weighted)
#em = edges matrix
em <- edgelist %>%
pivot_wider(names_from = geonameId, values_from = weight, values_fill = list(weight = 0)) %>%
column_to_rownames(var="Code_Network") %>%
as.matrix()
# Remove duplicate, only unique localities in each association (unweighted)
emnw <- edgelistnw %>%
pivot_wider(names_from = geonameId, values_from = weight, values_fill = list(weight = 0)) %>%
column_to_rownames(var="Code_Network") %>%
as.matrix()
## Filter to keep only cities present in two asso
# with possible multiple affiliations of a city in the same organisation (weighted)
em2 <- em[,colSums(em) > 1]
em2f <- round((ncol(em)- ncol(em2))/ncol(em)*100, 0) # remove 85% of cities. Remain 1976 cities in more than one association
# Remove duplicate, only unique localities in each association (unweighted)
em2nw <- emnw[,colSums(emnw) > 1]
em2nwf <- round((ncol(emnw)- ncol(em2nw))/ncol(emnw)*100, 0) # remove 88% of cities. Remain 1545 cities in more than one association
# Remove WWCAM ASSO with only one european city
assoremove <- "18332"
#Filter out the association on the matrix that keep city of degree 1
emnw <- emnw[!rownames(emnw) %in% assoremove, ]
emnw <- emnw[,colSums(emnw) > 0]
#Filter out the association on the matrix that keep city of more than 1 degree (only cities involved at least in 2 associations)
em2nw <- em2nw[!rownames(em2nw) %in% assoremove, ]
em2nw <- em2nw[,colSums(em2nw) > 1]
### Choice to work only on emnw and em2nw, that are binary matrices
### ==== COMPUTE BASIC GLOBAL INDEXES ====
graphlevelindex <- function(matrix){
require(igraph)
require(Matrix)
#density
density <- nnzero(matrix)/ (ncol(matrix)*nrow(matrix))
#size
nbliens <- nnzero(matrix)
#order type 1
ncities <- ncol(matrix)
#order type 2
nasso <- nrow(matrix)
#Diameter
g <- graph.incidence(matrix, directed = FALSE)
diam <- diameter(g,directed = FALSE)
# comp
comp <- components(g)
nbcomp <- comp$no
#degrees
meanDegCities <- mean(colSums(matrix != 0))
medDegCities <- median(colSums(matrix != 0))
meanDegAsso <- mean(rowSums(matrix != 0))
medDegAsso <- median(rowSums(matrix != 0))
result <- data.frame(Densité = density,
Diamètre = diam,
Taille = nbliens,
NbComp = nbcomp,
NbVilles = ncities,
NbAssos = nasso,
MeanDegreeCity = meanDegCities,
MedDegreeCity = medDegCities,
MeanDegreeAsso = meanDegAsso,
MedDegreeAsso = medDegAsso)
return(result)
}
### apply on matrices
#full network with doublons in asso
dimem <- graphlevelindex(em)
# doublons in asso but cities with degree 1 removed
dimem2 <- graphlevelindex(em2)
#full network without doublons in asso
dimemnw <- graphlevelindex(emnw)
# wihtout doublons in asso and cities with degree 1 removed
dimem2nw <- graphlevelindex(em2nw)
## Results in a df
dfnetworklevel <- rbind(dimemnw, dimem2nw) %>% mutate_all(~round(., digits = 3))
#¨Preparation dataframe for export in french
NameFilter <- c("Filtrage des doublons\ndans les villes membres\nde chaque association",
"Filtrage des doublons\ndans les villes membres\nde chaque association\n&\nFiltrage des villes\nprésentes dans une seule\nassociation")
TypeGraph <- c("Binaire", "Binaire")
TypeGraph
# Pct cities filtered
Citiesfilter <- c(0,em2nwf)
#Insert description of filtering
dfnetworklevel <- dfnetworklevel %>% mutate(TypeGraph = TypeGraph) %>%
mutate(Filtre = NameFilter) %>%
mutate(Citiesfilter = Citiesfilter)%>%
mutate(NomMatrix = c("emnw", "em2nw"))%>%
select(Filtre,NomMatrix,TypeGraph,Densité, Diamètre,NbComp, Taille,NbVilles, Citiesfilter, everything())
Varnames <- c("Filtrage du graphe",
"Nom de la matrice",
"Type de matrice",
"Densité",
"Diamètre",
"Nb composantes",
"Taille\n(nb de liens)",
"Nb de Villes",
"Pct villes filtrées",
"Nb d'associations",
"Degré moyen\ndes villes",
"Degré médian\ndes villes",
"Degré moyen\ndes associations",
"Degré médian\ndes associations")
colnames(dfnetworklevel) <- Varnames
## export df as pdf
dfexport <- t(dfnetworklevel)
pdf(file= "OUT/dfBipartiEtmun_index.pdf", height = 7, width =6.5 )
grid.table(dfexport)
dev.off()
#### SAVE RDS the em2nw matrix for STEP2
saveRDS(em2nw, "DataProd/Etmun_em2nw.rds")
saveRDS(emnw, "DataProd/Etmun_emnw.rds")
| /4.2.InterOrgaNet/ETMUN_inter/STEP1_2m_dimensions_ETMUN.R | no_license | pgourdongeo/Chap4_NetworkAnalysis | R | false | false | 6,580 | r | ############################### ETMUN bipartite Networks STEP 1 ###################
#
#
# DESCRIPTION : Travail sur le graphe ETMUN (affiliation) biparti.
# Résumé numérique des dimensions des graphes (différents filtrages)
#
#
############################################################################## PG juin 2020
### ==== LOAD PACKAGES AND DATA ====
setwd("~/Chap4_NetworkAnalysis/Chap4_NetworkAnalysis/4.2.InterOrgaNet/ETMUN_Inter")
#Packages
library(tidyverse)
library(tidylog)
library(igraph)
library(gridExtra)
library(Matrix)
#####Data
## Membership : table City-Asso. (for edges). EUROPE frame filtered (check STEP0 in Data folder)
MembershipEtmun <- readRDS("~/Chap4_NetworkAnalysis/Chap4_NetworkAnalysis/Data/ETMUN/ETMUN_Membership_europe.RDS")
## Information on associations (for nodes)
AssoEtmun <- read.csv2("~/Chap4_NetworkAnalysis/Chap4_NetworkAnalysis/Data/ETMUN/BD_ETMUN_OrganizationsWithMembersCities.csv",
stringsAsFactors = F)
### ==== CREATE NETWORK DATA ====
MembershipEtmun$label <- paste(MembershipEtmun$geonameId, MembershipEtmun$asciiName,MembershipEtmun$CountryCode, sep = "_")
## Create matrix
# edge list
edgelist <- MembershipEtmun %>%
filter(!is.na(geonameId)) %>%
select( geonameId, Code_Network) %>%
group_by(geonameId,Code_Network)%>%
summarise(weight = n()) # some weight > 1 because several members of an association are (representing the same city) in the same locality
#(eg. Municipality of Barcelona + Metropolitan area of Barcelona)
##Pct edges with weight > 1
edgeSup1 <- edgelist %>% filter(weight > 1)
nrow(edgeSup1)/ nrow(edgelist)
edgelistnw <- edgelist %>% mutate(weight = 1) # edgelist non weighted (transform weight > 1)
## Convert edgelist into a matrix
# with possible multiple affiliations of a city in the same organisation (weighted)
#em = edges matrix
em <- edgelist %>%
pivot_wider(names_from = geonameId, values_from = weight, values_fill = list(weight = 0)) %>%
column_to_rownames(var="Code_Network") %>%
as.matrix()
# Remove duplicate, only unique localities in each association (unweighted)
emnw <- edgelistnw %>%
pivot_wider(names_from = geonameId, values_from = weight, values_fill = list(weight = 0)) %>%
column_to_rownames(var="Code_Network") %>%
as.matrix()
## Filter to keep only cities present in two asso
# with possible multiple affiliations of a city in the same organisation (weighted)
em2 <- em[,colSums(em) > 1]
em2f <- round((ncol(em)- ncol(em2))/ncol(em)*100, 0) # remove 85% of cities. Remain 1976 cities in more than one association
# Remove duplicate, only unique localities in each association (unweighted)
em2nw <- emnw[,colSums(emnw) > 1]
em2nwf <- round((ncol(emnw)- ncol(em2nw))/ncol(emnw)*100, 0) # remove 88% of cities. Remain 1545 cities in more than one association
# Remove WWCAM ASSO with only one european city
assoremove <- "18332"
#Filter out the association on the matrix that keep city of degree 1
emnw <- emnw[!rownames(emnw) %in% assoremove, ]
emnw <- emnw[,colSums(emnw) > 0]
#Filter out the association on the matrix that keep city of more than 1 degree (only cities involved at least in 2 associations)
em2nw <- em2nw[!rownames(em2nw) %in% assoremove, ]
em2nw <- em2nw[,colSums(em2nw) > 1]
### Choice to work only on emnw and em2nw, that are binary matrices
### ==== COMPUTE BASIC GLOBAL INDEXES ====
graphlevelindex <- function(matrix){
require(igraph)
require(Matrix)
#density
density <- nnzero(matrix)/ (ncol(matrix)*nrow(matrix))
#size
nbliens <- nnzero(matrix)
#order type 1
ncities <- ncol(matrix)
#order type 2
nasso <- nrow(matrix)
#Diameter
g <- graph.incidence(matrix, directed = FALSE)
diam <- diameter(g,directed = FALSE)
# comp
comp <- components(g)
nbcomp <- comp$no
#degrees
meanDegCities <- mean(colSums(matrix != 0))
medDegCities <- median(colSums(matrix != 0))
meanDegAsso <- mean(rowSums(matrix != 0))
medDegAsso <- median(rowSums(matrix != 0))
result <- data.frame(Densité = density,
Diamètre = diam,
Taille = nbliens,
NbComp = nbcomp,
NbVilles = ncities,
NbAssos = nasso,
MeanDegreeCity = meanDegCities,
MedDegreeCity = medDegCities,
MeanDegreeAsso = meanDegAsso,
MedDegreeAsso = medDegAsso)
return(result)
}
### apply on matrices
#full network with doublons in asso
dimem <- graphlevelindex(em)
# doublons in asso but cities with degree 1 removed
dimem2 <- graphlevelindex(em2)
#full network without doublons in asso
dimemnw <- graphlevelindex(emnw)
# wihtout doublons in asso and cities with degree 1 removed
dimem2nw <- graphlevelindex(em2nw)
## Results in a df
dfnetworklevel <- rbind(dimemnw, dimem2nw) %>% mutate_all(~round(., digits = 3))
#¨Preparation dataframe for export in french
NameFilter <- c("Filtrage des doublons\ndans les villes membres\nde chaque association",
"Filtrage des doublons\ndans les villes membres\nde chaque association\n&\nFiltrage des villes\nprésentes dans une seule\nassociation")
TypeGraph <- c("Binaire", "Binaire")
TypeGraph
# Pct cities filtered
Citiesfilter <- c(0,em2nwf)
#Insert description of filtering
dfnetworklevel <- dfnetworklevel %>% mutate(TypeGraph = TypeGraph) %>%
mutate(Filtre = NameFilter) %>%
mutate(Citiesfilter = Citiesfilter)%>%
mutate(NomMatrix = c("emnw", "em2nw"))%>%
select(Filtre,NomMatrix,TypeGraph,Densité, Diamètre,NbComp, Taille,NbVilles, Citiesfilter, everything())
Varnames <- c("Filtrage du graphe",
"Nom de la matrice",
"Type de matrice",
"Densité",
"Diamètre",
"Nb composantes",
"Taille\n(nb de liens)",
"Nb de Villes",
"Pct villes filtrées",
"Nb d'associations",
"Degré moyen\ndes villes",
"Degré médian\ndes villes",
"Degré moyen\ndes associations",
"Degré médian\ndes associations")
colnames(dfnetworklevel) <- Varnames
## export df as pdf
dfexport <- t(dfnetworklevel)
pdf(file= "OUT/dfBipartiEtmun_index.pdf", height = 7, width =6.5 )
grid.table(dfexport)
dev.off()
#### SAVE RDS the em2nw matrix for STEP2
saveRDS(em2nw, "DataProd/Etmun_em2nw.rds")
saveRDS(emnw, "DataProd/Etmun_emnw.rds")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sem.R
\name{sem}
\alias{sem}
\title{Standard Error of the Mean}
\usage{
sem(x, na.rm = FALSE)
}
\arguments{
\item{x}{A numeric vector or an R object, which is coercible to one by as.vector(x, "numeric")}
\item{na.rm}{logical. Should missing values be removed?}
}
\description{
Computes the standard error of the mean (SEM)
}
\examples{
sem(1:4)
}
\seealso{
sd for standard deviation
}
| /man/sem.Rd | no_license | opelr/opelR | R | false | true | 464 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sem.R
\name{sem}
\alias{sem}
\title{Standard Error of the Mean}
\usage{
sem(x, na.rm = FALSE)
}
\arguments{
\item{x}{A numeric vector or an R object, which is coercible to one by as.vector(x, "numeric")}
\item{na.rm}{logical. Should missing values be removed?}
}
\description{
Computes the standard error of the mean (SEM)
}
\examples{
sem(1:4)
}
\seealso{
sd for standard deviation
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gsMethods.R, R/gsqplot.R
\name{summary.gsDesign}
\alias{summary.gsDesign}
\alias{print.gsDesign}
\alias{gsBoundSummary}
\alias{xprint}
\alias{print.gsBoundSummary}
\alias{gsBValue}
\alias{gsDelta}
\alias{gsRR}
\alias{gsHR}
\alias{gsCPz}
\title{Bound Summary and Z-transformations}
\usage{
\method{summary}{gsDesign}(object, information = FALSE, timeunit = "months", ...)
\method{print}{gsDesign}(x, ...)
gsBoundSummary(
x,
deltaname = NULL,
logdelta = FALSE,
Nname = NULL,
digits = 4,
ddigits = 2,
tdigits = 0,
timename = "Month",
prior = normalGrid(mu = x$delta/2, sigma = 10/sqrt(x$n.fix)),
POS = FALSE,
ratio = NULL,
exclude = c("B-value", "Spending", "CP", "CP H1", "PP"),
r = 18,
...
)
xprint(
x,
include.rownames = FALSE,
hline.after = c(-1, which(x$Value == x[1, ]$Value) - 1, nrow(x)),
...
)
\method{print}{gsBoundSummary}(x, row.names = FALSE, digits = 4, ...)
gsBValue(z, i, x, ylab = "B-value", ...)
gsDelta(z, i, x, ylab = NULL, ...)
gsRR(z, i, x, ratio = 1, ylab = "Approximate risk ratio", ...)
gsHR(z, i, x, ratio = 1, ylab = "Approximate hazard ratio", ...)
gsCPz(z, i, x, theta = NULL, ylab = NULL, ...)
}
\arguments{
\item{object}{An item of class \code{gsDesign} or \code{gsSurv}}
\item{information}{indicator of whether \code{n.I} in \code{object}
represents statistical information rather than sample size or event counts.}
\item{timeunit}{Text string with time units used for time-to-event designs
created with \code{gsSurv()}}
\item{...}{This allows many optional arguments that are standard when
calling \code{plot} for \code{gsBValue}, \code{gsDelta}, \code{gsHR},
\code{gsRR} and \code{gsCPz}}
\item{x}{An item of class \code{gsDesign} or \code{gsSurv}, except for
\code{print.gsBoundSummary()} where \code{x} is an object created by
\code{gsBoundSummary()} and \code{xprint()} which is used with \code{xtable}
(see examples)}
\item{deltaname}{Natural parameter name. If default \code{NULL} is used,
routine will default to \code{"HR"} when class is \code{gsSurv} or if
\code{nFixSurv} was input when creating \code{x} with \code{gsDesign()}.}
\item{logdelta}{Indicates whether natural parameter is the natural logarithm
of the actual parameter. For example, the relative risk or odds-ratio would
be put on the logarithmic scale since the asymptotic behavior is 'more
normal' than a non-transformed value. As with \code{deltaname}, the default
will be changed to true if \code{x} has class \code{gsDesign} or if
\code{nFixSurv>0} was input when \code{x} was created by \code{gsDesign()};
that is, the natural parameter for a time-to-event endpoint will be on the
logarithmic scale.}
\item{Nname}{This will normally be changed to \code{"N"} or, if a
time-to-event endpoint is used, \code{"Events"}. Other immediate possibility
are \code{"Deaths"} or \code{"Information"}.}
\item{digits}{Number of digits past the decimal to be printed in the body of
the table.}
\item{ddigits}{Number of digits past the decimal to be printed for the
natural parameter delta.}
\item{tdigits}{Number of digits past the decimal point to be shown for
estimated timing of each analysis.}
\item{timename}{Text string indicating time unit.}
\item{prior}{A prior distribution for the standardized effect size. Must be
of the format produced by \code{normalGrid()}, but can reflect an arbitrary
prior distribution. The default reflects a normal prior centered half-way
between the null and alternate hypothesis with the variance being equivalent
to the treatment effect estimate if 1 percent of the sample size for a fixed
design were sampled. The prior is intended to be relatively uninformative.
This input will only be applied if \code{POS=TRUE} is input.}
\item{POS}{This is an indicator of whether or not probability of success
(POS) should be estimated at baseline or at each interim based on the prior
distribution input in \code{prior}. The prior probability of success before
the trial starts is the power of the study averaged over the prior
distribution for the standardized effect size. The POS after an interim
analysis assumes the interim test statistic is an unknown value between the
futility and efficacy bounds. Based on this, a posterior distribution for
the standardized parameter is computed and the conditional power of the
trial is averaged over this posterior distribution.}
\item{ratio}{Sample size ratio assumed for experimental to control treatment
group sample sizes. This only matters when \code{x} for a binomial or
time-to-event endpoint where \code{gsRR} or \code{gsHR} are used for
approximating the treatment effect if a test statistic falls on a study
bound.}
\item{exclude}{A list of test statistics to be excluded from design boundary
summary produced; see details or examples for a list of all possible output
values. A value of \code{NULL} produces all available summaries.}
\item{r}{See \code{\link{gsDesign}}. This is an integer used to control the
degree of accuracy of group sequential calculations which will normally not
be changed.}
\item{include.rownames}{indicator of whether or not to include row names in
output.}
\item{hline.after}{table lines after which horizontal separation lines
should be set; default is to put lines between each analysis as well as at
the top and bottom of the table.}
\item{row.names}{indicator of whether or not to print row names}
\item{z}{A vector of z-statistics}
\item{i}{A vector containing the analysis for each element in \code{z}; each
element must be in 1 to \code{x$k}, inclusive}
\item{ylab}{Used when functions are passed to \code{plot.gsDesign} to
establish default y-axis labels}
\item{theta}{A scalar value representing the standardized effect size used
for conditional power calculations; see \code{gsDesign}; if NULL,
conditional power is computed at the estimated interim treatment effect
based on \code{z}}
}
\value{
\code{gsBValue()}, \code{gsDelta()}, \code{gsHR()} and
\code{gsCPz()} each returns a vector containing the B-values, approximate
treatment effect (see details), approximate hazard ratio and conditional
power, respectively, for each value specified by the interim test statistics
in \code{z} at interim analyses specified in \code{i}.
\code{summary} returns a text string summarizing the design at a high level.
This may be used with \code{gsBoundSummary} for a nicely formatted, concise
group sequential design description.
\code{gsBoundSummary} returns a table in a data frame providing a variety of
boundary characteristics. The tabular format makes formatting particularly
amenable to place in documents either through direct creation of readable by
Word (see the \code{rtf} package) or to a csv format readable by spreadsheet
software using \code{write.csv}.
\code{print.gsDesign} prints an overall summary a group sequential design.
While the design description is complete, the format is not as `document
friendly' as \code{gsBoundSummary}.
\code{print.gsBoundSummary} is a simple extension of \code{print.data.frame}
intended for objects created with \code{gsBoundSummary}. The only extension
is to make the default to not print row names. This is probably `not good R
style' but may be helpful for many lazy R programmers like the author.
}
\description{
A tabular summary of a group sequential design's bounds and their properties
are often useful. The 'vintage' \code{print.gsDesign()} function provides a
complete but minimally formatted summary of a group sequential design
derived by \code{gsDesign()}. A brief description of the overall design can
also be useful (\code{summary.gsDesign()}. A tabular summary of boundary
characteristics oriented only towards LaTeX output is produced by
\code{\link{xtable.gsSurv}}. More flexibility is provided by
\code{gsBoundSummary()} which produces a tabular summary of a
user-specifiable set of package-provided boundary properties in a data
frame. This can also be used to along with functions such as
\code{\link{print.data.frame}()}, \code{\link{write.table}()},
\code{\link{write.csv}()}, \code{\link{write.csv2}()} or, from the RTF
package, \code{addTable.RTF()} (from the rtf package) to produce console or
R Markdown output or output to a variety of file types. \code{xprint()} is
provided for LaTeX output by setting default options for
\code{\link{print.xtable}()} when producing tables summarizing design
bounds.
Individual transformation of z-value test statistics for interim and final
analyses are obtained from \code{gsBValue()}, \code{gsDelta()},
\code{gsHR()} and \code{gsCPz()} for B-values, approximate treatment effect
(see details), approximate hazard ratio and conditional power, respectively.
The \code{print.gsDesign} function is intended to provide an easier output
to review than is available from a simple list of all the output components.
The \code{gsBoundSummary} function is intended to provide a summary of
boundary characteristics that is often useful for evaluating boundary
selection; this outputs an extension of the \code{data.frame} class that
sets up default printing without row names using
\code{print.gsBoundSummary}. \code{summary.gsDesign}, on the other hand,
provides a summary of the overall design at a higher level; this provides
characteristics not included in the \code{gsBoundSummary} summary and no
detail concerning interim analysis bounds.
In brief, the computed descriptions of group sequential design bounds are as
follows: \code{Z:} Standardized normal test statistic at design bound.
\code{p (1-sided):} 1-sided p-value for \code{Z}. This will be computed as
the probability of a greater EXCEPT for lower bound when a 2-sided design is
being summarized.
\code{delta at bound:} Approximate value of the natural parameter at the
bound. The approximate standardized effect size at the bound is generally
computed as \code{Z/sqrt(n)}. Calling this \code{theta}, this is translated
to the \code{delta} using the values \code{delta0} and \code{delta1} from
the input \code{x} by the formula \code{delta0 +
(delta1-delta0)/theta1*theta} where \code{theta1} is the alternate
hypothesis value of the standardized parameter. Note that this value will be
exponentiated in the case of relative risks, hazard ratios or when the user
specifies \code{logdelta=TRUE}. In the case of hazard ratios, the value is
computed instead by \code{gsHR()} to be consistent with
\code{plot.gsDesign()}. Similarly, the value is computed by \code{gsRR()}
when the relative risk is the natural parameter.
\code{Spending: }Incremental error spending at each given analysis. For
asymmetric designs, futility bound will have beta-spending summarized.
Efficacy bound always has alpha-spending summarized.
\code{B-value: }\code{sqrt(t)*Z} where \code{t} is the proportion of
information at the analysis divided by the final analysis planned
information. The expected value for B-values is directly proportional to
\code{t}.
\code{CP: }Conditional power under the estimated treatment difference
assuming the interim Z-statistic is at the study bound
\code{CP H1: }Conditional power under the alternate hypothesis treatment
effect assuming the interim test statistic is at the study bound.
\code{PP: }Predictive power assuming the interim test statistic is at the
study bound and the input prior distribution for the standardized effect
size. This is the conditional power averaged across the posterior
distribution for the treatment effect given the interim test statistic
value. \code{P{Cross if delta=xx}: }For each of the parameter values in
\code{x}, the probability of crossing either bound given that treatment
effect is computed. This value is cumulative for each bound. For example,
the probability of crossing the efficacy bound at or before the analysis of
interest.
}
\note{
The gsDesign technical manual is available at
\url{https://keaven.github.io/gsd-tech-manual/}.
}
\examples{
library(ggplot2)
# survival endpoint using gsSurv
# generally preferred over nSurv since time computations are shown
xgs <- gsSurv(lambdaC = .2, hr = .5, eta = .1, T = 2, minfup = 1.5)
gsBoundSummary(xgs, timename = "Year", tdigits = 1)
summary(xgs)
# survival endpoint using nSurvival
# NOTE: generally recommend gsSurv above for this!
ss <- nSurvival(
lambda1 = .2, lambda2 = .1, eta = .1, Ts = 2, Tr = .5,
sided = 1, alpha = .025, ratio = 2
)
xs <- gsDesign(nFixSurv = ss$n, n.fix = ss$nEvents, delta1 = log(ss$lambda2 / ss$lambda1))
gsBoundSummary(xs, logdelta = TRUE, ratio = ss$ratio)
# generate some of the above summary statistics for the upper bound
z <- xs$upper$bound
# B-values
gsBValue(z = z, i = 1:3, x = xs)
# hazard ratio
gsHR(z = z, i = 1:3, x = xs)
# conditional power at observed treatment effect
gsCPz(z = z[1:2], i = 1:2, x = xs)
# conditional power at H1 treatment effect
gsCPz(z = z[1:2], i = 1:2, x = xs, theta = xs$delta)
# information-based design
xinfo <- gsDesign(delta = .3, delta1 = .3)
gsBoundSummary(xinfo, Nname = "Information")
# show all available boundary descriptions
gsBoundSummary(xinfo, Nname = "Information", exclude = NULL)
# add intermediate parameter value
xinfo <- gsProbability(d = xinfo, theta = c(0, .15, .3))
class(xinfo) # note this is still as gsDesign class object
gsBoundSummary(xinfo, Nname = "Information")
# now look at a binomial endpoint; specify H0 treatment difference as p1-p2=.05
# now treatment effect at bound (say, thetahat) is transformed to
# xp$delta0 + xp$delta1*(thetahat-xp$delta0)/xp$delta
np <- nBinomial(p1 = .15, p2 = .10)
xp <- gsDesign(n.fix = np, endpoint = "Binomial", delta1 = .05)
summary(xp)
gsBoundSummary(xp, deltaname = "p[C]-p[E]")
# estimate treatment effect at lower bound
# by setting delta0=0 (default) and delta1 above in gsDesign
# treatment effect at bounds is scaled to these differences
# in this case, this is the difference in event rates
gsDelta(z = xp$lower$bound, i = 1:3, xp)
# binomial endpoint with risk ratio estimates
n.fix <- nBinomial(p1 = .3, p2 = .15, scale = "RR")
xrr <- gsDesign(k = 2, n.fix = n.fix, delta1 = log(.15 / .3), endpoint = "Binomial")
gsBoundSummary(xrr, deltaname = "RR", logdelta = TRUE)
gsRR(z = xp$lower$bound, i = 1:3, xrr)
plot(xrr, plottype = "RR")
# delta is odds-ratio: sample size slightly smaller than for relative risk or risk difference
n.fix <- nBinomial(p1 = .3, p2 = .15, scale = "OR")
xOR <- gsDesign(k = 2, n.fix = n.fix, delta1 = log(.15 / .3 / .85 * .7), endpoint = "Binomial")
gsBoundSummary(xOR, deltaname = "OR", logdelta = TRUE)
# for nice LaTeX table output, use xprint
xprint(xtable::xtable(gsBoundSummary(xOR, deltaname = "OR", logdelta = TRUE),
caption = "Table caption."))
}
\references{
Jennison C and Turnbull BW (2000), \emph{Group Sequential
Methods with Applications to Clinical Trials}. Boca Raton: Chapman and Hall.
}
\seealso{
\link{gsDesign}, \link{plot.gsDesign},
\code{\link{gsProbability}}, \code{\link{xtable.gsSurv}}
}
\author{
Keaven Anderson \email{keaven_anderson@merck.com}
}
\keyword{design}
| /man/gsBoundSummary.Rd | no_license | cran/gsDesign | R | false | true | 15,408 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gsMethods.R, R/gsqplot.R
\name{summary.gsDesign}
\alias{summary.gsDesign}
\alias{print.gsDesign}
\alias{gsBoundSummary}
\alias{xprint}
\alias{print.gsBoundSummary}
\alias{gsBValue}
\alias{gsDelta}
\alias{gsRR}
\alias{gsHR}
\alias{gsCPz}
\title{Bound Summary and Z-transformations}
\usage{
\method{summary}{gsDesign}(object, information = FALSE, timeunit = "months", ...)
\method{print}{gsDesign}(x, ...)
gsBoundSummary(
x,
deltaname = NULL,
logdelta = FALSE,
Nname = NULL,
digits = 4,
ddigits = 2,
tdigits = 0,
timename = "Month",
prior = normalGrid(mu = x$delta/2, sigma = 10/sqrt(x$n.fix)),
POS = FALSE,
ratio = NULL,
exclude = c("B-value", "Spending", "CP", "CP H1", "PP"),
r = 18,
...
)
xprint(
x,
include.rownames = FALSE,
hline.after = c(-1, which(x$Value == x[1, ]$Value) - 1, nrow(x)),
...
)
\method{print}{gsBoundSummary}(x, row.names = FALSE, digits = 4, ...)
gsBValue(z, i, x, ylab = "B-value", ...)
gsDelta(z, i, x, ylab = NULL, ...)
gsRR(z, i, x, ratio = 1, ylab = "Approximate risk ratio", ...)
gsHR(z, i, x, ratio = 1, ylab = "Approximate hazard ratio", ...)
gsCPz(z, i, x, theta = NULL, ylab = NULL, ...)
}
\arguments{
\item{object}{An item of class \code{gsDesign} or \code{gsSurv}}
\item{information}{indicator of whether \code{n.I} in \code{object}
represents statistical information rather than sample size or event counts.}
\item{timeunit}{Text string with time units used for time-to-event designs
created with \code{gsSurv()}}
\item{...}{This allows many optional arguments that are standard when
calling \code{plot} for \code{gsBValue}, \code{gsDelta}, \code{gsHR},
\code{gsRR} and \code{gsCPz}}
\item{x}{An item of class \code{gsDesign} or \code{gsSurv}, except for
\code{print.gsBoundSummary()} where \code{x} is an object created by
\code{gsBoundSummary()} and \code{xprint()} which is used with \code{xtable}
(see examples)}
\item{deltaname}{Natural parameter name. If default \code{NULL} is used,
routine will default to \code{"HR"} when class is \code{gsSurv} or if
\code{nFixSurv} was input when creating \code{x} with \code{gsDesign()}.}
\item{logdelta}{Indicates whether natural parameter is the natural logarithm
of the actual parameter. For example, the relative risk or odds-ratio would
be put on the logarithmic scale since the asymptotic behavior is 'more
normal' than a non-transformed value. As with \code{deltaname}, the default
will be changed to true if \code{x} has class \code{gsDesign} or if
\code{nFixSurv>0} was input when \code{x} was created by \code{gsDesign()};
that is, the natural parameter for a time-to-event endpoint will be on the
logarithmic scale.}
\item{Nname}{This will normally be changed to \code{"N"} or, if a
time-to-event endpoint is used, \code{"Events"}. Other immediate possibility
are \code{"Deaths"} or \code{"Information"}.}
\item{digits}{Number of digits past the decimal to be printed in the body of
the table.}
\item{ddigits}{Number of digits past the decimal to be printed for the
natural parameter delta.}
\item{tdigits}{Number of digits past the decimal point to be shown for
estimated timing of each analysis.}
\item{timename}{Text string indicating time unit.}
\item{prior}{A prior distribution for the standardized effect size. Must be
of the format produced by \code{normalGrid()}, but can reflect an arbitrary
prior distribution. The default reflects a normal prior centered half-way
between the null and alternate hypothesis with the variance being equivalent
to the treatment effect estimate if 1 percent of the sample size for a fixed
design were sampled. The prior is intended to be relatively uninformative.
This input will only be applied if \code{POS=TRUE} is input.}
\item{POS}{This is an indicator of whether or not probability of success
(POS) should be estimated at baseline or at each interim based on the prior
distribution input in \code{prior}. The prior probability of success before
the trial starts is the power of the study averaged over the prior
distribution for the standardized effect size. The POS after an interim
analysis assumes the interim test statistic is an unknown value between the
futility and efficacy bounds. Based on this, a posterior distribution for
the standardized parameter is computed and the conditional power of the
trial is averaged over this posterior distribution.}
\item{ratio}{Sample size ratio assumed for experimental to control treatment
group sample sizes. This only matters when \code{x} for a binomial or
time-to-event endpoint where \code{gsRR} or \code{gsHR} are used for
approximating the treatment effect if a test statistic falls on a study
bound.}
\item{exclude}{A list of test statistics to be excluded from design boundary
summary produced; see details or examples for a list of all possible output
values. A value of \code{NULL} produces all available summaries.}
\item{r}{See \code{\link{gsDesign}}. This is an integer used to control the
degree of accuracy of group sequential calculations which will normally not
be changed.}
\item{include.rownames}{indicator of whether or not to include row names in
output.}
\item{hline.after}{table lines after which horizontal separation lines
should be set; default is to put lines between each analysis as well as at
the top and bottom of the table.}
\item{row.names}{indicator of whether or not to print row names}
\item{z}{A vector of z-statistics}
\item{i}{A vector containing the analysis for each element in \code{z}; each
element must be in 1 to \code{x$k}, inclusive}
\item{ylab}{Used when functions are passed to \code{plot.gsDesign} to
establish default y-axis labels}
\item{theta}{A scalar value representing the standardized effect size used
for conditional power calculations; see \code{gsDesign}; if NULL,
conditional power is computed at the estimated interim treatment effect
based on \code{z}}
}
\value{
\code{gsBValue()}, \code{gsDelta()}, \code{gsHR()} and
\code{gsCPz()} each returns a vector containing the B-values, approximate
treatment effect (see details), approximate hazard ratio and conditional
power, respectively, for each value specified by the interim test statistics
in \code{z} at interim analyses specified in \code{i}.
\code{summary} returns a text string summarizing the design at a high level.
This may be used with \code{gsBoundSummary} for a nicely formatted, concise
group sequential design description.
\code{gsBoundSummary} returns a table in a data frame providing a variety of
boundary characteristics. The tabular format makes formatting particularly
amenable to place in documents either through direct creation of readable by
Word (see the \code{rtf} package) or to a csv format readable by spreadsheet
software using \code{write.csv}.
\code{print.gsDesign} prints an overall summary a group sequential design.
While the design description is complete, the format is not as `document
friendly' as \code{gsBoundSummary}.
\code{print.gsBoundSummary} is a simple extension of \code{print.data.frame}
intended for objects created with \code{gsBoundSummary}. The only extension
is to make the default to not print row names. This is probably `not good R
style' but may be helpful for many lazy R programmers like the author.
}
\description{
A tabular summary of a group sequential design's bounds and their properties
are often useful. The 'vintage' \code{print.gsDesign()} function provides a
complete but minimally formatted summary of a group sequential design
derived by \code{gsDesign()}. A brief description of the overall design can
also be useful (\code{summary.gsDesign()}. A tabular summary of boundary
characteristics oriented only towards LaTeX output is produced by
\code{\link{xtable.gsSurv}}. More flexibility is provided by
\code{gsBoundSummary()} which produces a tabular summary of a
user-specifiable set of package-provided boundary properties in a data
frame. This can also be used to along with functions such as
\code{\link{print.data.frame}()}, \code{\link{write.table}()},
\code{\link{write.csv}()}, \code{\link{write.csv2}()} or, from the RTF
package, \code{addTable.RTF()} (from the rtf package) to produce console or
R Markdown output or output to a variety of file types. \code{xprint()} is
provided for LaTeX output by setting default options for
\code{\link{print.xtable}()} when producing tables summarizing design
bounds.
Individual transformation of z-value test statistics for interim and final
analyses are obtained from \code{gsBValue()}, \code{gsDelta()},
\code{gsHR()} and \code{gsCPz()} for B-values, approximate treatment effect
(see details), approximate hazard ratio and conditional power, respectively.
The \code{print.gsDesign} function is intended to provide an easier output
to review than is available from a simple list of all the output components.
The \code{gsBoundSummary} function is intended to provide a summary of
boundary characteristics that is often useful for evaluating boundary
selection; this outputs an extension of the \code{data.frame} class that
sets up default printing without row names using
\code{print.gsBoundSummary}. \code{summary.gsDesign}, on the other hand,
provides a summary of the overall design at a higher level; this provides
characteristics not included in the \code{gsBoundSummary} summary and no
detail concerning interim analysis bounds.
In brief, the computed descriptions of group sequential design bounds are as
follows: \code{Z:} Standardized normal test statistic at design bound.
\code{p (1-sided):} 1-sided p-value for \code{Z}. This will be computed as
the probability of a greater EXCEPT for lower bound when a 2-sided design is
being summarized.
\code{delta at bound:} Approximate value of the natural parameter at the
bound. The approximate standardized effect size at the bound is generally
computed as \code{Z/sqrt(n)}. Calling this \code{theta}, this is translated
to the \code{delta} using the values \code{delta0} and \code{delta1} from
the input \code{x} by the formula \code{delta0 +
(delta1-delta0)/theta1*theta} where \code{theta1} is the alternate
hypothesis value of the standardized parameter. Note that this value will be
exponentiated in the case of relative risks, hazard ratios or when the user
specifies \code{logdelta=TRUE}. In the case of hazard ratios, the value is
computed instead by \code{gsHR()} to be consistent with
\code{plot.gsDesign()}. Similarly, the value is computed by \code{gsRR()}
when the relative risk is the natural parameter.
\code{Spending: }Incremental error spending at each given analysis. For
asymmetric designs, futility bound will have beta-spending summarized.
Efficacy bound always has alpha-spending summarized.
\code{B-value: }\code{sqrt(t)*Z} where \code{t} is the proportion of
information at the analysis divided by the final analysis planned
information. The expected value for B-values is directly proportional to
\code{t}.
\code{CP: }Conditional power under the estimated treatment difference
assuming the interim Z-statistic is at the study bound
\code{CP H1: }Conditional power under the alternate hypothesis treatment
effect assuming the interim test statistic is at the study bound.
\code{PP: }Predictive power assuming the interim test statistic is at the
study bound and the input prior distribution for the standardized effect
size. This is the conditional power averaged across the posterior
distribution for the treatment effect given the interim test statistic
value. \code{P{Cross if delta=xx}: }For each of the parameter values in
\code{x}, the probability of crossing either bound given that treatment
effect is computed. This value is cumulative for each bound. For example,
the probability of crossing the efficacy bound at or before the analysis of
interest.
}
\note{
The gsDesign technical manual is available at
\url{https://keaven.github.io/gsd-tech-manual/}.
}
\examples{
library(ggplot2)
# survival endpoint using gsSurv
# generally preferred over nSurv since time computations are shown
xgs <- gsSurv(lambdaC = .2, hr = .5, eta = .1, T = 2, minfup = 1.5)
gsBoundSummary(xgs, timename = "Year", tdigits = 1)
summary(xgs)
# survival endpoint using nSurvival
# NOTE: generally recommend gsSurv above for this!
ss <- nSurvival(
lambda1 = .2, lambda2 = .1, eta = .1, Ts = 2, Tr = .5,
sided = 1, alpha = .025, ratio = 2
)
xs <- gsDesign(nFixSurv = ss$n, n.fix = ss$nEvents, delta1 = log(ss$lambda2 / ss$lambda1))
gsBoundSummary(xs, logdelta = TRUE, ratio = ss$ratio)
# generate some of the above summary statistics for the upper bound
z <- xs$upper$bound
# B-values
gsBValue(z = z, i = 1:3, x = xs)
# hazard ratio
gsHR(z = z, i = 1:3, x = xs)
# conditional power at observed treatment effect
gsCPz(z = z[1:2], i = 1:2, x = xs)
# conditional power at H1 treatment effect
gsCPz(z = z[1:2], i = 1:2, x = xs, theta = xs$delta)
# information-based design
xinfo <- gsDesign(delta = .3, delta1 = .3)
gsBoundSummary(xinfo, Nname = "Information")
# show all available boundary descriptions
gsBoundSummary(xinfo, Nname = "Information", exclude = NULL)
# add intermediate parameter value
xinfo <- gsProbability(d = xinfo, theta = c(0, .15, .3))
class(xinfo) # note this is still as gsDesign class object
gsBoundSummary(xinfo, Nname = "Information")
# now look at a binomial endpoint; specify H0 treatment difference as p1-p2=.05
# now treatment effect at bound (say, thetahat) is transformed to
# xp$delta0 + xp$delta1*(thetahat-xp$delta0)/xp$delta
np <- nBinomial(p1 = .15, p2 = .10)
xp <- gsDesign(n.fix = np, endpoint = "Binomial", delta1 = .05)
summary(xp)
gsBoundSummary(xp, deltaname = "p[C]-p[E]")
# estimate treatment effect at lower bound
# by setting delta0=0 (default) and delta1 above in gsDesign
# treatment effect at bounds is scaled to these differences
# in this case, this is the difference in event rates
gsDelta(z = xp$lower$bound, i = 1:3, xp)
# binomial endpoint with risk ratio estimates
n.fix <- nBinomial(p1 = .3, p2 = .15, scale = "RR")
xrr <- gsDesign(k = 2, n.fix = n.fix, delta1 = log(.15 / .3), endpoint = "Binomial")
gsBoundSummary(xrr, deltaname = "RR", logdelta = TRUE)
gsRR(z = xp$lower$bound, i = 1:3, xrr)
plot(xrr, plottype = "RR")
# delta is odds-ratio: sample size slightly smaller than for relative risk or risk difference
n.fix <- nBinomial(p1 = .3, p2 = .15, scale = "OR")
xOR <- gsDesign(k = 2, n.fix = n.fix, delta1 = log(.15 / .3 / .85 * .7), endpoint = "Binomial")
gsBoundSummary(xOR, deltaname = "OR", logdelta = TRUE)
# for nice LaTeX table output, use xprint
xprint(xtable::xtable(gsBoundSummary(xOR, deltaname = "OR", logdelta = TRUE),
caption = "Table caption."))
}
\references{
Jennison C and Turnbull BW (2000), \emph{Group Sequential
Methods with Applications to Clinical Trials}. Boca Raton: Chapman and Hall.
}
\seealso{
\link{gsDesign}, \link{plot.gsDesign},
\code{\link{gsProbability}}, \code{\link{xtable.gsSurv}}
}
\author{
Keaven Anderson \email{keaven_anderson@merck.com}
}
\keyword{design}
|
# Question 2
#######################################################################################################################
#
# What type of variable is genhlth?
#
#######################################################################################################################
1 numerical, continuous
2 numerical, discrete
3 categorical (not ordinal)
4 categorical, ordinal
Answer - 4 categorical, ordinal | /dataCamp/openCourses/dataAnalysisAndStatisticalInference/2_introductionToData/5_question2.R | permissive | odonnmi/learnNPractice | R | false | false | 426 | r | # Question 2
#######################################################################################################################
#
# What type of variable is genhlth?
#
#######################################################################################################################
1 numerical, continuous
2 numerical, discrete
3 categorical (not ordinal)
4 categorical, ordinal
Answer - 4 categorical, ordinal |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_ora_on_eset.R
\name{get_entrezg_from_gsymbols}
\alias{get_entrezg_from_gsymbols}
\title{Get entrez from gene symbols}
\usage{
get_entrezg_from_gsymbols(object, load_org.xx.xx.db)
}
\arguments{
\item{object}{eset}
\item{load_org.xx.xx.db}{load_org.xx.xx.db}
}
\value{
vector with entrez gene ids
}
\description{
Get entrez from gene symbols
}
\examples{
require(magrittr)
if (require(autonomics.data) & require(org.Hs.eg.db)){
object <- autonomics.data::billing2016
load_org.xx.xx.db <- org.Hs.eg.db::org.Hs.eg.db
object \%>\% get_entrezg_from_gsymbols(load_org.xx.xx.db)
}
if (require(atkin.2014) & require(org.Hs.eg.db)){
atkin.2014::soma[1:10, ] \%>\%
get_entrezg_from_gsymbols(org.Hs.eg.db::org.Hs.eg.db)
}
}
| /autonomics.ora/man/get_entrezg_from_gsymbols.Rd | no_license | bhagwataditya/autonomics0 | R | false | true | 810 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_ora_on_eset.R
\name{get_entrezg_from_gsymbols}
\alias{get_entrezg_from_gsymbols}
\title{Get entrez from gene symbols}
\usage{
get_entrezg_from_gsymbols(object, load_org.xx.xx.db)
}
\arguments{
\item{object}{eset}
\item{load_org.xx.xx.db}{load_org.xx.xx.db}
}
\value{
vector with entrez gene ids
}
\description{
Get entrez from gene symbols
}
\examples{
require(magrittr)
if (require(autonomics.data) & require(org.Hs.eg.db)){
object <- autonomics.data::billing2016
load_org.xx.xx.db <- org.Hs.eg.db::org.Hs.eg.db
object \%>\% get_entrezg_from_gsymbols(load_org.xx.xx.db)
}
if (require(atkin.2014) & require(org.Hs.eg.db)){
atkin.2014::soma[1:10, ] \%>\%
get_entrezg_from_gsymbols(org.Hs.eg.db::org.Hs.eg.db)
}
}
|
# This file defines and describes the model to approximate
# There must be declarations for all the model parameters, e.g. the path
# and the command to execute it or the model dimensions d and k.
# The model described in this file is the one in the param_search directory.
source("R/Model.R")
min.cross.corr = function(outputs){
return(max(0.001, min(outputs[71:105])))
}
num.over.thresh = function(outputs){
return(length(which(outputs[71:105]>0.7)))
}
ratio.over.thresh = function(outputs) {
return(length(which(outputs[71:105]>0.7))/length(outputs[71:105]))
}
#KPI.extractors <- c(num.over.thresh,ratio.over.thresh)
KPI.extractors <- c(ratio.over.thresh)
#KPI.extractors <- NULL
model_name = "GynCycle"
path = "/home/marco/Uni/param_search"
cmd_path = "/home/marco/Uni/param_search/tmp"
bounds_file = "bounds.csv" # input variables bounds
param_file = "lambda.txt" # input values file for a simulation
default_param_file = "default_lambda.txt" # default input values file
sim_command = "./Test"
command_args = paste0("-overrideFile=", param_file)
# Model Dimensions
d = 76 # inputs
n_outputs = 105
k = ifelse(is.null(KPI.extractors),n_outputs,length(KPI.extractors) )
# Parses the output of a simulation and returns the output values
# in a vector of length k
parseSimulationResults = function(res) {
if(length(res)!=5) return(rep(NA, k))
outputs = list(SqrNormDiff=c(), AvgDiff=c(),CrossCorr=c())
for(i in (1:length(res))){
line = strsplit(res[i]," = ")[[1]]
if(line[1] %in% c("SqrNormDiff","AvgDiff","CrossCorr")) {
values = strsplit(line[2], ",")[[1]]
values = sapply(values, as.double)
if(line[1] == "SqrNormDiff") {outputs$SqrNormDiff <- values}
if(line[1] == "AvgDiff") {outputs$AvgDiff <- values}
if(line[1] == "CrossCorr") {outputs$CrossCorr <- values}
}
}
return(c(outputs$SqrNormDiff, outputs$AvgDiff, outputs$CrossCorr))
}
read.default.inputs = function() {
default_filename = paste(cmd_path, default_param_file, sep="/")
lambda = readChar(default_filename, file.info(default_filename)$size)
lambda = strsplit(lambda, "\n")[[1]]
vals = c()
for(i in 1:length(lambda)){
v = as.double(strsplit(lambda[i], "=")[[1]][2])
vals = c(vals, v)
}
return(vals)
}
# Write input file from inputs X=(x1, ... , xd)
write.input.file = function(X) {
fileConn = file(paste(cmd_path, param_file, sep="/"))
txt = character()
for (i in 1:d) {
p = sprintf("lambda[%d]=%f", i, X[i])
txt = c(txt, p)
}
writeLines(txt, fileConn)
close(fileConn)
}
model = Model$new(model_name, path, cmd_path,
bounds_file,param_file,default_param_file,
sim_command, command_args,d,k, parse.output=parseSimulationResults,
read.default=read.default.inputs, write.input=write.input.file,
KPI.extractors=KPI.extractors)
| /init_model.R | no_license | espositomarco/SFA-falsification | R | false | false | 2,877 | r | # This file defines and describes the model to approximate
# There must be declarations for all the model parameters, e.g. the path
# and the command to execute it or the model dimensions d and k.
# The model described in this file is the one in the param_search directory.
source("R/Model.R")
min.cross.corr = function(outputs){
return(max(0.001, min(outputs[71:105])))
}
num.over.thresh = function(outputs){
return(length(which(outputs[71:105]>0.7)))
}
ratio.over.thresh = function(outputs) {
return(length(which(outputs[71:105]>0.7))/length(outputs[71:105]))
}
#KPI.extractors <- c(num.over.thresh,ratio.over.thresh)
KPI.extractors <- c(ratio.over.thresh)
#KPI.extractors <- NULL
model_name = "GynCycle"
path = "/home/marco/Uni/param_search"
cmd_path = "/home/marco/Uni/param_search/tmp"
bounds_file = "bounds.csv" # input variables bounds
param_file = "lambda.txt" # input values file for a simulation
default_param_file = "default_lambda.txt" # default input values file
sim_command = "./Test"
command_args = paste0("-overrideFile=", param_file)
# Model Dimensions
d = 76 # inputs
n_outputs = 105
k = ifelse(is.null(KPI.extractors),n_outputs,length(KPI.extractors) )
# Parses the output of a simulation and returns the output values
# in a vector of length k
parseSimulationResults = function(res) {
if(length(res)!=5) return(rep(NA, k))
outputs = list(SqrNormDiff=c(), AvgDiff=c(),CrossCorr=c())
for(i in (1:length(res))){
line = strsplit(res[i]," = ")[[1]]
if(line[1] %in% c("SqrNormDiff","AvgDiff","CrossCorr")) {
values = strsplit(line[2], ",")[[1]]
values = sapply(values, as.double)
if(line[1] == "SqrNormDiff") {outputs$SqrNormDiff <- values}
if(line[1] == "AvgDiff") {outputs$AvgDiff <- values}
if(line[1] == "CrossCorr") {outputs$CrossCorr <- values}
}
}
return(c(outputs$SqrNormDiff, outputs$AvgDiff, outputs$CrossCorr))
}
read.default.inputs = function() {
default_filename = paste(cmd_path, default_param_file, sep="/")
lambda = readChar(default_filename, file.info(default_filename)$size)
lambda = strsplit(lambda, "\n")[[1]]
vals = c()
for(i in 1:length(lambda)){
v = as.double(strsplit(lambda[i], "=")[[1]][2])
vals = c(vals, v)
}
return(vals)
}
# Write input file from inputs X=(x1, ... , xd)
write.input.file = function(X) {
fileConn = file(paste(cmd_path, param_file, sep="/"))
txt = character()
for (i in 1:d) {
p = sprintf("lambda[%d]=%f", i, X[i])
txt = c(txt, p)
}
writeLines(txt, fileConn)
close(fileConn)
}
model = Model$new(model_name, path, cmd_path,
bounds_file,param_file,default_param_file,
sim_command, command_args,d,k, parse.output=parseSimulationResults,
read.default=read.default.inputs, write.input=write.input.file,
KPI.extractors=KPI.extractors)
|
h5.read.spikes.dh <-
function (h5file, ids = NULL, time.interval = 1, beg = NULL,
end = NULL, corr.breaks)
{
chop <- function(v, counts) {
stopifnot(sum(counts) == length(v))
end <- cumsum(counts)
beg <- c(1, 1 + end[-length(end)])
begend <- cbind(beg, end)
apply(begend, 1, function(x) v[x[1]:x[2]])
}
#reads in data:
data <- h5read(path.expand(h5file), name = "/")
#chop breaks up the spikes into their respective channels
spikes <- chop(as.vector(data$spikes), data$sCount)
names(spikes) <- data$names
#get.array.info() expects the names to just have each their channel title
arrayinfo <- get.array.info(data)
layout <- arrayinfo$layout
if (missing(corr.breaks)) {
corr.breaks <- arrayinfo$corr.breaks
}
s <- construct.s(spikes, ids, time.interval, beg, end, corr.breaks,
layout, filename = h5file)
names.data<-names(data)
if (is.element("dose", names.data) ){
s$dose<-data$dose
}
if (is.element("treatment", names.data) ){
s$treatment<-data$treatment
}
if (is.element("units", names.data) ){
s$units<-data$units
}
if (is.element("well", names.data) ){
s$well<-data$well
}
if (is.element("genotype", names.data) ){
s$genotype<-data$genotype
}
if (is.element("pup", names.data) ){
s$pup<-data$pup
}
if (is.element("trt.div", names.data) ){
s$trt.div<-data$trt.div
}
s<-get.num.AE(s)
s
}
| /R/h5.read.spikes.dh.R | no_license | dianaransomhall/meadq | R | false | false | 1,463 | r | h5.read.spikes.dh <-
function (h5file, ids = NULL, time.interval = 1, beg = NULL,
end = NULL, corr.breaks)
{
chop <- function(v, counts) {
stopifnot(sum(counts) == length(v))
end <- cumsum(counts)
beg <- c(1, 1 + end[-length(end)])
begend <- cbind(beg, end)
apply(begend, 1, function(x) v[x[1]:x[2]])
}
#reads in data:
data <- h5read(path.expand(h5file), name = "/")
#chop breaks up the spikes into their respective channels
spikes <- chop(as.vector(data$spikes), data$sCount)
names(spikes) <- data$names
#get.array.info() expects the names to just have each their channel title
arrayinfo <- get.array.info(data)
layout <- arrayinfo$layout
if (missing(corr.breaks)) {
corr.breaks <- arrayinfo$corr.breaks
}
s <- construct.s(spikes, ids, time.interval, beg, end, corr.breaks,
layout, filename = h5file)
names.data<-names(data)
if (is.element("dose", names.data) ){
s$dose<-data$dose
}
if (is.element("treatment", names.data) ){
s$treatment<-data$treatment
}
if (is.element("units", names.data) ){
s$units<-data$units
}
if (is.element("well", names.data) ){
s$well<-data$well
}
if (is.element("genotype", names.data) ){
s$genotype<-data$genotype
}
if (is.element("pup", names.data) ){
s$pup<-data$pup
}
if (is.element("trt.div", names.data) ){
s$trt.div<-data$trt.div
}
s<-get.num.AE(s)
s
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_read_years}
\alias{fars_read_years}
\title{Read FARS years}
\usage{
fars_read_years(years)
}
\arguments{
\item{years}{A vector with a list of years}
}
\value{
A data.frame including entries in data by month, or NULL if the
\code{year} is not valid
}
\description{
Ancillary function used by \code{fars_summarize_years}
}
\examples{
\dontrun{fars_read_years(2015)}
}
\seealso{
\link{fars_read}
\link{make_filename}
\link{fars_summarize_years}
}
| /man/fars_read_years.Rd | no_license | jpmaillard/fars | R | false | true | 553 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_read_years}
\alias{fars_read_years}
\title{Read FARS years}
\usage{
fars_read_years(years)
}
\arguments{
\item{years}{A vector with a list of years}
}
\value{
A data.frame including entries in data by month, or NULL if the
\code{year} is not valid
}
\description{
Ancillary function used by \code{fars_summarize_years}
}
\examples{
\dontrun{fars_read_years(2015)}
}
\seealso{
\link{fars_read}
\link{make_filename}
\link{fars_summarize_years}
}
|
#==============================================================================
#TITLE : plot4.R
#DESCRIPTION : Exploratory Data Analysis - Course Project 2
# 1. Download emissions data
# 2. Explore how emissions from coal combustion sources changed in US
# 3. Draw a bar plot between total emissions for each year for coal
# combustion sources
#AUTHOR : Rajesh Thallam
#DATE : 1/25/2015
#VERSION : 0.1
#USAGE : draw.plot4()
#NOTES : Script can be executed in R console
#R_VERSION : R version 3.1.1 (2014-07-10)
#==============================================================================
# import libraries
library(ggplot2)
# helper method: logging to the console
p <- function(...) {
cat("[plot4.R]", format(Sys.time(), "[%Y-%m-%d %H:%M:%S]"),..., "\n")
}
# helper method: downloading data if not available
download.data <- function() {
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.dir <- "data"
data.dir <- "data"
zip.file <- file.path(download.dir, "dataset.zip")
# download data
if(!file.exists(download.dir)) { dir.create(download.dir) }
if(!file.exists(zip.file)) { download.file(url, zip.file) }
else { p("Data already exists") }
# extract data
if(file.exists(zip.file)) { unzip(zip.file, exdir = "data", overwrite = TRUE) }
data.dir
}
# main function
# draw plot4 function
draw.plot4 <- function() {
p("Exploratory Data Analysis Project 2")
p("Starting up...")
p("Preparing to draw plot 4")
# download and extract data
p("Downloading and extracting data files")
download.data()
# read national emissions data
p("Reading emissions data")
NEI <- readRDS("data/summarySCC_PM25.rds")
# read source code classification data
p("Reading source code classification data")
SCC <- readRDS("data/Source_Classification_Code.rds")
# find coal combustion-related sources
coal.combustion.source <- SCC[grepl("Fuel Comb.*Coal", SCC$EI.Sector),]
# aggregate total emissions for each year for coal combustion sources
p("Preparing plot data by aggregating emissions for each year for coal combustion sources")
coal.emissions <- aggregate(
Emissions ~ year,
NEI[(NEI$SCC %in% coal.combustion.source$SCC), ],
sum)
# open png device to draw the plot
p("Open PNG file to draw the plot")
png(
filename = "plot4.png",
height = 480,
width = 480
)
# draw bar plot to show total emission for each year for coal combustion sources
p("Draw bar plot to show total emission for each year for coal combustion sources")
plot <-
ggplot(
coal.emissions,
aes( x = factor(year), y = Emissions)
) +
geom_bar(stat="identity") +
xlab("Year") +
ylab(expression("Total PM"[2.5]*" emissions (in tons)")) +
ggtitle("Emissions from coal combustion related sources")
print(plot)
# close the png device
p("Close PNG file")
dev.off()
}
# draw plot
draw.plot4 | /Project 2/plot4.R | no_license | VinayPrakashSingh/coursera-exploratory-data-analysis | R | false | false | 3,054 | r | #==============================================================================
#TITLE : plot4.R
#DESCRIPTION : Exploratory Data Analysis - Course Project 2
# 1. Download emissions data
# 2. Explore how emissions from coal combustion sources changed in US
# 3. Draw a bar plot between total emissions for each year for coal
# combustion sources
#AUTHOR : Rajesh Thallam
#DATE : 1/25/2015
#VERSION : 0.1
#USAGE : draw.plot4()
#NOTES : Script can be executed in R console
#R_VERSION : R version 3.1.1 (2014-07-10)
#==============================================================================
# import libraries
library(ggplot2)
# helper method: logging to the console
p <- function(...) {
cat("[plot4.R]", format(Sys.time(), "[%Y-%m-%d %H:%M:%S]"),..., "\n")
}
# helper method: downloading data if not available
download.data <- function() {
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.dir <- "data"
data.dir <- "data"
zip.file <- file.path(download.dir, "dataset.zip")
# download data
if(!file.exists(download.dir)) { dir.create(download.dir) }
if(!file.exists(zip.file)) { download.file(url, zip.file) }
else { p("Data already exists") }
# extract data
if(file.exists(zip.file)) { unzip(zip.file, exdir = "data", overwrite = TRUE) }
data.dir
}
# main function
# draw plot4 function
draw.plot4 <- function() {
p("Exploratory Data Analysis Project 2")
p("Starting up...")
p("Preparing to draw plot 4")
# download and extract data
p("Downloading and extracting data files")
download.data()
# read national emissions data
p("Reading emissions data")
NEI <- readRDS("data/summarySCC_PM25.rds")
# read source code classification data
p("Reading source code classification data")
SCC <- readRDS("data/Source_Classification_Code.rds")
# find coal combustion-related sources
coal.combustion.source <- SCC[grepl("Fuel Comb.*Coal", SCC$EI.Sector),]
# aggregate total emissions for each year for coal combustion sources
p("Preparing plot data by aggregating emissions for each year for coal combustion sources")
coal.emissions <- aggregate(
Emissions ~ year,
NEI[(NEI$SCC %in% coal.combustion.source$SCC), ],
sum)
# open png device to draw the plot
p("Open PNG file to draw the plot")
png(
filename = "plot4.png",
height = 480,
width = 480
)
# draw bar plot to show total emission for each year for coal combustion sources
p("Draw bar plot to show total emission for each year for coal combustion sources")
plot <-
ggplot(
coal.emissions,
aes( x = factor(year), y = Emissions)
) +
geom_bar(stat="identity") +
xlab("Year") +
ylab(expression("Total PM"[2.5]*" emissions (in tons)")) +
ggtitle("Emissions from coal combustion related sources")
print(plot)
# close the png device
p("Close PNG file")
dev.off()
}
# draw plot
draw.plot4 |
library(tidyverse)
library(here)
# This file brings in assets (data and CSS) from the assets repo (https://github.com/rfortherestofus/assets).
# Get Data ----------------------------------------------------------------
download.file("https://github.com/rfortherestofus/assets/raw/master/data/nhanes.csv",
destfile = here("frequencies-three-ways", "nhanes.csv"))
# Get CSS -----------------------------------------------------------------
download.file("https://raw.githubusercontent.com/rfortherestofus/course-assets/master/style/style.css",
destfile = here("assets", "style.css")) | /R/setup.R | no_license | rfortherestofus/rru-misc | R | false | false | 615 | r | library(tidyverse)
library(here)
# This file brings in assets (data and CSS) from the assets repo (https://github.com/rfortherestofus/assets).
# Get Data ----------------------------------------------------------------
download.file("https://github.com/rfortherestofus/assets/raw/master/data/nhanes.csv",
destfile = here("frequencies-three-ways", "nhanes.csv"))
# Get CSS -----------------------------------------------------------------
download.file("https://raw.githubusercontent.com/rfortherestofus/course-assets/master/style/style.css",
destfile = here("assets", "style.css")) |
# resp_times.r
# Get the times at every response for whole data frame
resp_times <- function(df,n_trials){
df <- df %>% filter(cum_trial <= n_trials,
cum_dt <= 180)
df$bins <- df$cum_dt %>%
get_bins(., 0, 180, 1)
df <- df %>%
{
var <- .
event <- var %>% `[[`("evento")
boolean <- ifelse(event == 1 | event == 11,1,0) %>% as.logical()
var <- var[boolean, ]
var
}
df %>% select(sujeto,evento,sesion,cde,bins,cum_trial)
}
| /analysis/r/resp_times.r | permissive | jealcalat/Generalization_decrement_data-analysis | R | false | false | 505 | r | # resp_times.r
# Get the times at every response for whole data frame
resp_times <- function(df,n_trials){
df <- df %>% filter(cum_trial <= n_trials,
cum_dt <= 180)
df$bins <- df$cum_dt %>%
get_bins(., 0, 180, 1)
df <- df %>%
{
var <- .
event <- var %>% `[[`("evento")
boolean <- ifelse(event == 1 | event == 11,1,0) %>% as.logical()
var <- var[boolean, ]
var
}
df %>% select(sujeto,evento,sesion,cde,bins,cum_trial)
}
|
#1st method
omni1 = read.csv(./data/salesqty.csv)
head(omni1)
#2nd Method : from CSV file
omni2 = read.csv(file.choose())
head(omni2)
#3rd Method : from gsheet
library(gsheet)
url = "https://docs.google.com/spreadsheets/d/1h7HU0X_Q4T5h5D1Q36qoK40Tplz94x_HZYHOJJC_edU/edit#gid=1595306231"
omni3 = as.data.frame(gsheet::gsheet2tbl(url))
head(omni3)
#create a dataframe of new sample values
fit2 = lm(sales ~ price + promotion, data=omni3)
(ndata2 = data.frame(price=c(60,70), promotion=c(300,400)))
p2sales = predict(fit2, newdata=ndata2)
cbind(ndata2, p2sales)
#assumption
par(mfrow=c(2,2))
plot(fit2)
par(mfrow=c(1,1))
plot(fit2,which=1) # no pattern, equal variance
plot(fit2,2) # Residuals are normally distributed
plot(fit2,3) # No hetero-scedascity
plot(fit2,4) # tells outliers which affect model
omni[-c(11,14,15),]
fit3 = lm(sales ~ price + promotion, data=omni[-c(11,14,15),])
plot(fit3,4)
summary(fit3) | /csv_read.R | no_license | swarnavanaskar/analytics_1 | R | false | false | 922 | r | #1st method
omni1 = read.csv(./data/salesqty.csv)
head(omni1)
#2nd Method : from CSV file
omni2 = read.csv(file.choose())
head(omni2)
#3rd Method : from gsheet
library(gsheet)
url = "https://docs.google.com/spreadsheets/d/1h7HU0X_Q4T5h5D1Q36qoK40Tplz94x_HZYHOJJC_edU/edit#gid=1595306231"
omni3 = as.data.frame(gsheet::gsheet2tbl(url))
head(omni3)
#create a dataframe of new sample values
fit2 = lm(sales ~ price + promotion, data=omni3)
(ndata2 = data.frame(price=c(60,70), promotion=c(300,400)))
p2sales = predict(fit2, newdata=ndata2)
cbind(ndata2, p2sales)
#assumption
par(mfrow=c(2,2))
plot(fit2)
par(mfrow=c(1,1))
plot(fit2,which=1) # no pattern, equal variance
plot(fit2,2) # Residuals are normally distributed
plot(fit2,3) # No hetero-scedascity
plot(fit2,4) # tells outliers which affect model
omni[-c(11,14,15),]
fit3 = lm(sales ~ price + promotion, data=omni[-c(11,14,15),])
plot(fit3,4)
summary(fit3) |
#!/usr/bin/Rscript
library(biomaRt)
library(org.Hs.eg.db)
library(optparse)
# Now annotate IDs
# EncaPSULATE THIS Bit in a function when I can be bothered
mart <- useMart(biomart = "ensembl", dataset = "hsapiens_gene_ensembl")
results <- getBM(attributes = c("description", "external_gene_id"), filters = "ensembl_gene_id",
values = row.names(inData), mart = mart)
setwd('~/Documents/FredCSC/reformattedFiles/symbolConversion/')
geneNameToID <- function (inFile) {
#Takes a matrix of gene Symbols as argument and returns the geneID mappings.
#org.Hs.egALIAS2EG, org.Hs.egGENENAME, org.Hs.egSYMBOL2EG
table = read.delim(inFile)
IDvector = as.character(table[,1])
holder = vector(mode = 'character', len=length(IDvector))
i = 1
#check that gene names match a gene ID
for (gene in IDvector) {
print(gene)
x = try(as.character(mget(gene, org.Hs.egALIAS2EG)))
if(class(x) == "try-error") {x = 'noMatch'}
else {holder[i] = x}
i = i + 1
}
orfs = cbind(IDvector, holder)
result = merge.data.frame(orfs, table, by.x='IDvector', by.y='From')
return (result)
}
option_list <- list(
make_option(c("-e", "--explain"), action="store_true", default=FALSE,
help="Takes the vector "),
make_option(c("-i", "--inFile"), action="store",type = 'character', default='~/Documents/FredCSC/reformattedFiles/130829_gseaExpression.gct',
help="A tab delimited text file of listing the geneID you wish to convert"),
make_option(c("-o", "--outFile"), action="store", type='character', default='output.txt',
help="The file you wish to output results as a tab delimited text file")
)
opt <- parse_args(OptionParser(option_list=option_list))
inFileZ = 'ziskin.txt'
inFileLgr = 'lgr5.txt'
inFileEph2 = 'ephB2.txt'
outFile = opt$outFile
out = geneNameToID(inFileZ)
result = merge.data.frame(y, data, by.x='IDvector', by.y='NAME')
write.table(result, outFile, sep='\t') | /PhD/130830_convertGeneIDs.R | no_license | dvbrown/Rscripts | R | false | false | 1,953 | r | #!/usr/bin/Rscript
library(biomaRt)
library(org.Hs.eg.db)
library(optparse)
# Now annotate IDs
# EncaPSULATE THIS Bit in a function when I can be bothered
mart <- useMart(biomart = "ensembl", dataset = "hsapiens_gene_ensembl")
results <- getBM(attributes = c("description", "external_gene_id"), filters = "ensembl_gene_id",
values = row.names(inData), mart = mart)
setwd('~/Documents/FredCSC/reformattedFiles/symbolConversion/')
geneNameToID <- function (inFile) {
#Takes a matrix of gene Symbols as argument and returns the geneID mappings.
#org.Hs.egALIAS2EG, org.Hs.egGENENAME, org.Hs.egSYMBOL2EG
table = read.delim(inFile)
IDvector = as.character(table[,1])
holder = vector(mode = 'character', len=length(IDvector))
i = 1
#check that gene names match a gene ID
for (gene in IDvector) {
print(gene)
x = try(as.character(mget(gene, org.Hs.egALIAS2EG)))
if(class(x) == "try-error") {x = 'noMatch'}
else {holder[i] = x}
i = i + 1
}
orfs = cbind(IDvector, holder)
result = merge.data.frame(orfs, table, by.x='IDvector', by.y='From')
return (result)
}
option_list <- list(
make_option(c("-e", "--explain"), action="store_true", default=FALSE,
help="Takes the vector "),
make_option(c("-i", "--inFile"), action="store",type = 'character', default='~/Documents/FredCSC/reformattedFiles/130829_gseaExpression.gct',
help="A tab delimited text file of listing the geneID you wish to convert"),
make_option(c("-o", "--outFile"), action="store", type='character', default='output.txt',
help="The file you wish to output results as a tab delimited text file")
)
opt <- parse_args(OptionParser(option_list=option_list))
inFileZ = 'ziskin.txt'
inFileLgr = 'lgr5.txt'
inFileEph2 = 'ephB2.txt'
outFile = opt$outFile
out = geneNameToID(inFileZ)
result = merge.data.frame(y, data, by.x='IDvector', by.y='NAME')
write.table(result, outFile, sep='\t') |
\name{loafercreek}
\alias{loafercreek}
\alias{gopheridge}
\alias{mineralKing}
\docType{data}
\title{Example \code{SoilProfilecollection} Objects Returned by \code{fetchNASIS}.}
\description{Several examples of soil profile collections returned by \code{fetchNASIS(from='pedons')} as \code{SoilProfileCollection} objects.}
\usage{
data(loafercreek)
data(gopheridge)
data(mineralKing)
}
\examples{
\donttest{
if(require("aqp")) {
# load example dataset
data("gopheridge")
# what kind of object is this?
class(gopheridge)
# how many profiles?
length(gopheridge)
# there are 60 profiles, this calls for a split plot
par(mar=c(0,0,0,0), mfrow=c(2,1))
# plot soil colors
plot(gopheridge[1:30, ], name='hzname', color='soil_color')
plot(gopheridge[31:60, ], name='hzname', color='soil_color')
# need a larger top margin for legend
par(mar=c(0,0,4,0), mfrow=c(2,1))
# generate colors based on clay content
plot(gopheridge[1:30, ], name='hzname', color='clay')
plot(gopheridge[31:60, ], name='hzname', color='clay')
# single row and no labels
par(mar=c(0,0,0,0), mfrow=c(1,1))
# plot soils sorted by depth to contact
plot(gopheridge, name='', print.id=FALSE, plot.order=order(gopheridge$bedrckdepth))
# plot first 10 profiles
plot(gopheridge[1:10, ], name='hzname', color='soil_color', label='pedon_id', id.style='side')
# add rock fragment data to plot:
addVolumeFraction(gopheridge[1:10, ], colname='total_frags_pct')
# add diagnostic horizons
addDiagnosticBracket(gopheridge[1:10, ], kind='argillic horizon', col='red', offset=-0.4)
## loafercreek
data("loafercreek")
# plot first 10 profiles
plot(loafercreek[1:10, ], name='hzname', color='soil_color', label='pedon_id', id.style='side')
# add rock fragment data to plot:
addVolumeFraction(loafercreek[1:10, ], colname='total_frags_pct')
# add diagnostic horizons
addDiagnosticBracket(loafercreek[1:10, ], kind='argillic horizon', col='red', offset=-0.4)
}
}
}
\keyword{datasets}
| /misc/man-deprecated/loafercreek.Rd | no_license | Emory-ENVS-SihiLab/soilDB | R | false | false | 2,035 | rd | \name{loafercreek}
\alias{loafercreek}
\alias{gopheridge}
\alias{mineralKing}
\docType{data}
\title{Example \code{SoilProfilecollection} Objects Returned by \code{fetchNASIS}.}
\description{Several examples of soil profile collections returned by \code{fetchNASIS(from='pedons')} as \code{SoilProfileCollection} objects.}
\usage{
data(loafercreek)
data(gopheridge)
data(mineralKing)
}
\examples{
\donttest{
if(require("aqp")) {
# load example dataset
data("gopheridge")
# what kind of object is this?
class(gopheridge)
# how many profiles?
length(gopheridge)
# there are 60 profiles, this calls for a split plot
par(mar=c(0,0,0,0), mfrow=c(2,1))
# plot soil colors
plot(gopheridge[1:30, ], name='hzname', color='soil_color')
plot(gopheridge[31:60, ], name='hzname', color='soil_color')
# need a larger top margin for legend
par(mar=c(0,0,4,0), mfrow=c(2,1))
# generate colors based on clay content
plot(gopheridge[1:30, ], name='hzname', color='clay')
plot(gopheridge[31:60, ], name='hzname', color='clay')
# single row and no labels
par(mar=c(0,0,0,0), mfrow=c(1,1))
# plot soils sorted by depth to contact
plot(gopheridge, name='', print.id=FALSE, plot.order=order(gopheridge$bedrckdepth))
# plot first 10 profiles
plot(gopheridge[1:10, ], name='hzname', color='soil_color', label='pedon_id', id.style='side')
# add rock fragment data to plot:
addVolumeFraction(gopheridge[1:10, ], colname='total_frags_pct')
# add diagnostic horizons
addDiagnosticBracket(gopheridge[1:10, ], kind='argillic horizon', col='red', offset=-0.4)
## loafercreek
data("loafercreek")
# plot first 10 profiles
plot(loafercreek[1:10, ], name='hzname', color='soil_color', label='pedon_id', id.style='side')
# add rock fragment data to plot:
addVolumeFraction(loafercreek[1:10, ], colname='total_frags_pct')
# add diagnostic horizons
addDiagnosticBracket(loafercreek[1:10, ], kind='argillic horizon', col='red', offset=-0.4)
}
}
}
\keyword{datasets}
|
callback <- function(panel)
{
print(panel$option)
}
panel1 <- rp.control()
rp.textentry(panel1, option, labels="Your name:", initval="-", action=callback, width=40)
panel2 <- rp_window()
rp.textentry(panel2, option, labels=c("Your height:", "Your weight:"), initval=c("H", "W"), action=callback, width=20)
| /demo/rp.textentry.r | no_license | cran/rpanel | R | false | false | 322 | r | callback <- function(panel)
{
print(panel$option)
}
panel1 <- rp.control()
rp.textentry(panel1, option, labels="Your name:", initval="-", action=callback, width=40)
panel2 <- rp_window()
rp.textentry(panel2, option, labels=c("Your height:", "Your weight:"), initval=c("H", "W"), action=callback, width=20)
|
# trait MEANS #
library(plyr)
library(ggplot2)
library(reshape2)
traits <- read.csv("data/harvest/traits.csv", header=T)
#traits <- subset(traits, treatment != "flooded")
# subset by species
traits.A <- subset(traits, species == "acacia")
traits.C <- subset(traits, species == "cas")
traits.E <- subset(traits, species == "euc")
plot.means_facet <- function(df, species) {
figureDir <- "C:/Users/James/Desktop/stuff/glasshouse/glasshouse proj/output/figures/traits"
species <- deparse(substitute(species))
outDir <- figureDir
dir.create(outDir, recursive=TRUE)
df_melted <- melt(df)
df_melted <- na.omit(df_melted)
stats <- ddply(df_melted, .(CO2, treatment, variable), summarise,
mean = mean(value),
sem = sd(value)/sqrt(length(value)))
stats <- transform(stats, lower=mean-sem, upper=mean+sem)
png(sprintf("%s/%s_traitfacet.png", outDir, species), width = 1500, height = 900)
plot <- ggplot(stats, aes(treatment, mean, fill=CO2))
plot <- plot + geom_bar(stat = "identity", position="dodge")
plot <- plot + facet_wrap(~ variable, scales = "free")
plot <- plot + geom_errorbar(aes(ymax=upper,
ymin=lower),
position=position_dodge(0.9),
data=stats)
plot <- plot + ggtitle(paste(species))
print(plot)
dev.off()
}
plot.means_facet(traits.A, acacia)
plot.means_facet(traits.C, casuarina)
plot.means_facet(traits.E, eucalyptus)
plot.hists_facet <- function(df, species) {
figureDir <- "C:/Users/James/Desktop/stuff/glasshouse/glasshouse proj/output/figures"
species <- deparse(substitute(species))
outDir <- sprintf("%s/%s", figureDir, species)
dir.create(outDir, recursive=TRUE)
melted <- melt(df)
png(sprintf("%s/%s_traitfacet.png", outDir, species), width = 1500, height = 900)
plot <- ggplot(blah, aes(value, fill=CO2))
plot <- plot + geom_histogram(binwidth = 3)
plot <- plot + facet_wrap(~ treatment, scales = "free")
print(plot)
plot <- plot + ggtitle(paste(species, trait))
print(plot)
dev.off()
}
blah <- melt(traits.A)
blah <- subset(blah, treatment == "flooded")
hist.plot <- function(df) {
}
hist.plot(blah)
plot <- ggplot(blah, aes(value, fill=CO2))
plot <- plot + geom_histogram(binwidth = 3)
plot <- plot + facet_wrap(~ treatment, scales = "free")
print(plot)
| /scripts/trait_means.R | no_license | jamesrlawson/waterloggingCO2 | R | false | false | 2,539 | r | # trait MEANS #
library(plyr)
library(ggplot2)
library(reshape2)
traits <- read.csv("data/harvest/traits.csv", header=T)
#traits <- subset(traits, treatment != "flooded")
# subset by species
traits.A <- subset(traits, species == "acacia")
traits.C <- subset(traits, species == "cas")
traits.E <- subset(traits, species == "euc")
plot.means_facet <- function(df, species) {
figureDir <- "C:/Users/James/Desktop/stuff/glasshouse/glasshouse proj/output/figures/traits"
species <- deparse(substitute(species))
outDir <- figureDir
dir.create(outDir, recursive=TRUE)
df_melted <- melt(df)
df_melted <- na.omit(df_melted)
stats <- ddply(df_melted, .(CO2, treatment, variable), summarise,
mean = mean(value),
sem = sd(value)/sqrt(length(value)))
stats <- transform(stats, lower=mean-sem, upper=mean+sem)
png(sprintf("%s/%s_traitfacet.png", outDir, species), width = 1500, height = 900)
plot <- ggplot(stats, aes(treatment, mean, fill=CO2))
plot <- plot + geom_bar(stat = "identity", position="dodge")
plot <- plot + facet_wrap(~ variable, scales = "free")
plot <- plot + geom_errorbar(aes(ymax=upper,
ymin=lower),
position=position_dodge(0.9),
data=stats)
plot <- plot + ggtitle(paste(species))
print(plot)
dev.off()
}
plot.means_facet(traits.A, acacia)
plot.means_facet(traits.C, casuarina)
plot.means_facet(traits.E, eucalyptus)
plot.hists_facet <- function(df, species) {
figureDir <- "C:/Users/James/Desktop/stuff/glasshouse/glasshouse proj/output/figures"
species <- deparse(substitute(species))
outDir <- sprintf("%s/%s", figureDir, species)
dir.create(outDir, recursive=TRUE)
melted <- melt(df)
png(sprintf("%s/%s_traitfacet.png", outDir, species), width = 1500, height = 900)
plot <- ggplot(blah, aes(value, fill=CO2))
plot <- plot + geom_histogram(binwidth = 3)
plot <- plot + facet_wrap(~ treatment, scales = "free")
print(plot)
plot <- plot + ggtitle(paste(species, trait))
print(plot)
dev.off()
}
blah <- melt(traits.A)
blah <- subset(blah, treatment == "flooded")
hist.plot <- function(df) {
}
hist.plot(blah)
plot <- ggplot(blah, aes(value, fill=CO2))
plot <- plot + geom_histogram(binwidth = 3)
plot <- plot + facet_wrap(~ treatment, scales = "free")
print(plot)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.R
\name{xml_parse_data}
\alias{xml_parse_data}
\title{Convert R parse data to XML}
\usage{
xml_parse_data(x, includeText = NA, pretty = FALSE)
}
\arguments{
\item{x}{
an expression returned from \code{\link{parse}}, or a function or other
object with source reference information
}
\item{includeText}{
logical; whether to include the text of parsed items in the result
}
\item{pretty}{Whether to pretty-indent the XML output. It has a small
overhead which probably only matters for very large source files.}
}
\value{
An XML string representing the parse data. See details below.
}
\description{
In recent R versions the parser can attach source code location
information to the parsed expressions. This information is often
useful for static analysis, e.g. code linting. It can be accessed
via the \code{\link[utils]{getParseData}} function.
}
\details{
\code{xml_parse_data} converts this information to an XML tree.
The R parser's token names are preserved in the XML as much as
possible, but some of them are not valid XML tag names, so they are
renamed, see the \code{\link{xml_parse_token_map}} vector for the
mapping.
The top XML tag is \code{<exprlist>}, which is a list of
expressions, each expression is an \code{<expr>} tag. Each tag
has attributes that define the location: \code{line1}, \code{col1},
\code{line2}, \code{col2}. These are from the \code{\link{getParseData}}
data frame column names.
See an example below. See also the README at
\url{https://github.com/r-lib/xmlparsedata#readme}
for examples on how to search the XML tree with the \code{xml2} package
and XPath expressions.
}
\examples{
code <- "function(a = 1, b = 2) {\\n a + b\\n}\\n"
expr <- parse(text = code, keep.source = TRUE)
# The base R way:
getParseData(expr)
cat(xml_parse_data(expr, pretty = TRUE))
}
\seealso{
\code{\link{xml_parse_token_map}} for the token names.
\url{https://github.com/r-lib/xmlparsedata#readme} for more
information and use cases.
}
| /man/xml_parse_data.Rd | permissive | AshesITR/xmlparsedata | R | false | true | 2,056 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.R
\name{xml_parse_data}
\alias{xml_parse_data}
\title{Convert R parse data to XML}
\usage{
xml_parse_data(x, includeText = NA, pretty = FALSE)
}
\arguments{
\item{x}{
an expression returned from \code{\link{parse}}, or a function or other
object with source reference information
}
\item{includeText}{
logical; whether to include the text of parsed items in the result
}
\item{pretty}{Whether to pretty-indent the XML output. It has a small
overhead which probably only matters for very large source files.}
}
\value{
An XML string representing the parse data. See details below.
}
\description{
In recent R versions the parser can attach source code location
information to the parsed expressions. This information is often
useful for static analysis, e.g. code linting. It can be accessed
via the \code{\link[utils]{getParseData}} function.
}
\details{
\code{xml_parse_data} converts this information to an XML tree.
The R parser's token names are preserved in the XML as much as
possible, but some of them are not valid XML tag names, so they are
renamed, see the \code{\link{xml_parse_token_map}} vector for the
mapping.
The top XML tag is \code{<exprlist>}, which is a list of
expressions, each expression is an \code{<expr>} tag. Each tag
has attributes that define the location: \code{line1}, \code{col1},
\code{line2}, \code{col2}. These are from the \code{\link{getParseData}}
data frame column names.
See an example below. See also the README at
\url{https://github.com/r-lib/xmlparsedata#readme}
for examples on how to search the XML tree with the \code{xml2} package
and XPath expressions.
}
\examples{
code <- "function(a = 1, b = 2) {\\n a + b\\n}\\n"
expr <- parse(text = code, keep.source = TRUE)
# The base R way:
getParseData(expr)
cat(xml_parse_data(expr, pretty = TRUE))
}
\seealso{
\code{\link{xml_parse_token_map}} for the token names.
\url{https://github.com/r-lib/xmlparsedata#readme} for more
information and use cases.
}
|
function (cv1, cv2, cv3, width1, width2, width3, heights, x,
y, z, p1, p3, tmin, tmax)
{
e <- get("data.env", .GlobalEnv)
e[["fe3dp13"]][[length(e[["fe3dp13"]]) + 1]] <- list(cv1 = cv1,
cv2 = cv2, cv3 = cv3, width1 = width1, width2 = width2,
width3 = width3, heights = heights, x = x, y = y, z = z,
p1 = p1, p3 = p3, tmin = tmin, tmax = tmax)
.Call("_metadynminer3d_fe3dp13", PACKAGE = "metadynminer3d",
cv1, cv2, cv3, width1, width2, width3, heights, x, y,
z, p1, p3, tmin, tmax)
}
| /valgrind_test_dir/fe3dp13-test.R | no_license | akhikolla/RcppDeepStateTest | R | false | false | 546 | r | function (cv1, cv2, cv3, width1, width2, width3, heights, x,
y, z, p1, p3, tmin, tmax)
{
e <- get("data.env", .GlobalEnv)
e[["fe3dp13"]][[length(e[["fe3dp13"]]) + 1]] <- list(cv1 = cv1,
cv2 = cv2, cv3 = cv3, width1 = width1, width2 = width2,
width3 = width3, heights = heights, x = x, y = y, z = z,
p1 = p1, p3 = p3, tmin = tmin, tmax = tmax)
.Call("_metadynminer3d_fe3dp13", PACKAGE = "metadynminer3d",
cv1, cv2, cv3, width1, width2, width3, heights, x, y,
z, p1, p3, tmin, tmax)
}
|
## The first function, makeCacheMatrix, returns a list of 4 functions:
## set, get, setinv, and getinv functions
## takes as input and matrix ,x, and returns a list of 4 functions:
## set, get, setinv, and getinv functions .
## The <<- operator is used to set the value of i to null in the parent (global)
## Environment
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinv <- function(inv) i <<- inv
getinv <- function() i
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## This function checks to see if we have already cached the inverse of the
## matrix x. If it is the cache , it alerts the user that it is getting the
## cached inverse before displaying it. If not, it computes the inverse using the
## Solve function .
## If it is called again, the cacheMatrix function has it now stored in cache
## and it wont have to be re-calcualted
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinv()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data )
x$setinv(i)
i
}
| /cachematrix.R | no_license | StevenOshry/ProgrammingAssignment2 | R | false | false | 1,364 | r | ## The first function, makeCacheMatrix, returns a list of 4 functions:
## set, get, setinv, and getinv functions
## takes as input and matrix ,x, and returns a list of 4 functions:
## set, get, setinv, and getinv functions .
## The <<- operator is used to set the value of i to null in the parent (global)
## Environment
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinv <- function(inv) i <<- inv
getinv <- function() i
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
## This function checks to see if we have already cached the inverse of the
## matrix x. If it is the cache , it alerts the user that it is getting the
## cached inverse before displaying it. If not, it computes the inverse using the
## Solve function .
## If it is called again, the cacheMatrix function has it now stored in cache
## and it wont have to be re-calcualted
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$getinv()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data )
x$setinv(i)
i
}
|
# Attempt 2: PCA-in-groups
# The point of this file:
# Clearly, some sort of variable selection must be done. Early attempts at PCA
# were, shall we say, less than promising. In this file, I use Zsuzsa's grouped
# variables to do a smaller PCA-in-groups.
# Load in data
filepath <- '/home/beeb/Documents/Data_Science/News Competition/OnlineNewsPopularity'
setwd(filepath)
library(reshape2)
library(ggplot2)
library(dplyr)
newspop <- read.csv('news_popularity_training.csv')
# Split these two into the groups Zsuzsa made
names <- names(newspop)
x.words <- names[c(4:8,13)]
x.links <- names[c(9:10,30:32)]
x.dig <- names[11:12]
x.time <- names[c(3,33:40)]
x.key <- names[14:29]
x.NLP <- names[41:61]
newspop$t <- as.factor(newspop$popularity)
# WHAT IS THIS WITCHCRAFT???
#Create binary from y variable
for(t in sort(unique(newspop[,'popularity']))) {
newspop[paste("pop",t,sep="")] <- ifelse( newspop[,'popularity'] == t , 1 , 0 )
}
# Clever, Zsuzsa. Very clever.
groups <- list()
groups[[1]] <- x.words
groups[[2]] <- x.links
groups[[3]] <- x.dig
groups[[4]] <- x.time
groups[[5]] <- x.key
groups[[6]] <- x.NLP
grouped.data <- lapply(groups, function(y) {
return(select(newspop, one_of(y)))
})
cors <- lapply(grouped.data, cor)
# Looking at these correlations, I'm not 100% convinced we've grouped these correctly
# eg, look at
cors[[1]]
# Seems quite clear that n_non_stop_words, n_unique_tokens, n_non_stop_unique_tokens,
# and average_token_length should go together; the other two not so much. same is true
# if you look at
cors[[2]]
# But we can come back to that, all we'd have to do is re-write a few lines of code.
grouped.data.pca <- lapply(grouped.data, function(x) {
pr <- prcomp(x)
plot(pr$sdev, main = length(x))
return(pr)
})
# Irritatingly, it seems like the breakdown with the least-neat selection of PC's is
# the sixth group, which is arguably the most important (check plots!)
# The plan: take one PC from the first group; two from the second; one from the third;
# get rid of the fourth entirely, it's not useful; three from the fifth; shall we say 8
# from the sixth? That's 88% of the variance.
newdata <- data.frame(grouped.data.pca[[1]]$x[,1],
grouped.data.pca[[2]]$x[,1:2],
grouped.data.pca[[3]]$x[,1],
grouped.data.pca[[5]]$x[,1:3],
grouped.data.pca[[6]]$x[,1:8],
newspop$is_weekend,
newspop[,grep('pop', names(newspop))])
# Something must be done about these variable names. These are like the most confusing
# thing ever.
# Meh.
# Labels labels labels. Names names names.
names <- paste0('PC', 1:15)
names(newdata)[1:15] <- names
training.sample <- sample(nrow(newdata), nrow(newdata)*0.8)
newdata.train <- newdata[training.sample,]
newdata.test <- newdata[setdiff(1:nrow(newdata), training.sample),]
xvals <- newdata.train[,1:16]
hmm <- matrix(NA, nrow=nrow(newdata.test), ncol = 5)
j <- 1
for(i in grep('pop[0-9]', names(newdata.train))) {
t <- newdata.train[,i]
current <- cbind(xvals, t)
model <- glm(t ~ . + newspop.is_weekend*., data = current, family=binomial)
predict.model <- predict(model, newdata=newdata.test, type='response')
assign(paste0('model', i), model)
assign(paste0('predict', i), predict.model)
hmm[,j] <- predict.model
j <- j + 1
}
colnames(hmm) <- names(newdata.test)[grep('pop[0-9]', names(newdata.test))]
# This is not looking bad, actually.
which <- apply(hmm, 1, which.max)
table(newdata.test$popularity, which)
correct <- rep(0, nrow(newdata.test))
correct[newdata.test$popularity == which] <- 1
sum(correct)/length(correct)
# I ran the code three times,and came up with 48.5%, 48.5%, 48.1% accuracy.
# This is marginally better than we would have done using only PREDICT ALL AS 2
| /Feature engineering/feature engineering v2-2.R | no_license | abarciauskas-bgse/kaggle-onlinenewspopularity | R | false | false | 3,864 | r | # Attempt 2: PCA-in-groups
# The point of this file:
# Clearly, some sort of variable selection must be done. Early attempts at PCA
# were, shall we say, less than promising. In this file, I use Zsuzsa's grouped
# variables to do a smaller PCA-in-groups.
# Load in data
filepath <- '/home/beeb/Documents/Data_Science/News Competition/OnlineNewsPopularity'
setwd(filepath)
library(reshape2)
library(ggplot2)
library(dplyr)
newspop <- read.csv('news_popularity_training.csv')
# Split these two into the groups Zsuzsa made
names <- names(newspop)
x.words <- names[c(4:8,13)]
x.links <- names[c(9:10,30:32)]
x.dig <- names[11:12]
x.time <- names[c(3,33:40)]
x.key <- names[14:29]
x.NLP <- names[41:61]
newspop$t <- as.factor(newspop$popularity)
# WHAT IS THIS WITCHCRAFT???
#Create binary from y variable
for(t in sort(unique(newspop[,'popularity']))) {
newspop[paste("pop",t,sep="")] <- ifelse( newspop[,'popularity'] == t , 1 , 0 )
}
# Clever, Zsuzsa. Very clever.
groups <- list()
groups[[1]] <- x.words
groups[[2]] <- x.links
groups[[3]] <- x.dig
groups[[4]] <- x.time
groups[[5]] <- x.key
groups[[6]] <- x.NLP
grouped.data <- lapply(groups, function(y) {
return(select(newspop, one_of(y)))
})
cors <- lapply(grouped.data, cor)
# Looking at these correlations, I'm not 100% convinced we've grouped these correctly
# eg, look at
cors[[1]]
# Seems quite clear that n_non_stop_words, n_unique_tokens, n_non_stop_unique_tokens,
# and average_token_length should go together; the other two not so much. same is true
# if you look at
cors[[2]]
# But we can come back to that, all we'd have to do is re-write a few lines of code.
grouped.data.pca <- lapply(grouped.data, function(x) {
pr <- prcomp(x)
plot(pr$sdev, main = length(x))
return(pr)
})
# Irritatingly, it seems like the breakdown with the least-neat selection of PC's is
# the sixth group, which is arguably the most important (check plots!)
# The plan: take one PC from the first group; two from the second; one from the third;
# get rid of the fourth entirely, it's not useful; three from the fifth; shall we say 8
# from the sixth? That's 88% of the variance.
newdata <- data.frame(grouped.data.pca[[1]]$x[,1],
grouped.data.pca[[2]]$x[,1:2],
grouped.data.pca[[3]]$x[,1],
grouped.data.pca[[5]]$x[,1:3],
grouped.data.pca[[6]]$x[,1:8],
newspop$is_weekend,
newspop[,grep('pop', names(newspop))])
# Something must be done about these variable names. These are like the most confusing
# thing ever.
# Meh.
# Labels labels labels. Names names names.
names <- paste0('PC', 1:15)
names(newdata)[1:15] <- names
training.sample <- sample(nrow(newdata), nrow(newdata)*0.8)
newdata.train <- newdata[training.sample,]
newdata.test <- newdata[setdiff(1:nrow(newdata), training.sample),]
xvals <- newdata.train[,1:16]
hmm <- matrix(NA, nrow=nrow(newdata.test), ncol = 5)
j <- 1
for(i in grep('pop[0-9]', names(newdata.train))) {
t <- newdata.train[,i]
current <- cbind(xvals, t)
model <- glm(t ~ . + newspop.is_weekend*., data = current, family=binomial)
predict.model <- predict(model, newdata=newdata.test, type='response')
assign(paste0('model', i), model)
assign(paste0('predict', i), predict.model)
hmm[,j] <- predict.model
j <- j + 1
}
colnames(hmm) <- names(newdata.test)[grep('pop[0-9]', names(newdata.test))]
# This is not looking bad, actually.
which <- apply(hmm, 1, which.max)
table(newdata.test$popularity, which)
correct <- rep(0, nrow(newdata.test))
correct[newdata.test$popularity == which] <- 1
sum(correct)/length(correct)
# I ran the code three times,and came up with 48.5%, 48.5%, 48.1% accuracy.
# This is marginally better than we would have done using only PREDICT ALL AS 2
|
##Data Loading and Processing
install.packages("data.table")
library(dplyr)
setwd("C:/Users/lsharifi/Desktop/Rot2/coursera/A4-w1")
power<- read.table ('household_power_consumption.txt', sep=";" , header=TRUE, na.strings="?",
stringsAsFactors=FALSE
)
View(power)
#convert date and time variables to Date/Time class
power$Time <- strptime(paste(power$Date, power$Time), "%d/%m/%Y %H:%M:%S")
power$Date <- as.Date(power$Date, "%d/%m/%Y")
# We will only be using data from the dates 2007-02-01 and 2007-02-02
dates <- as.Date(c("2007-02-01", "2007-02-02"), "%Y-%m-%d")
power <- subset(power, Date %in% dates)
power
--------------------------------------------------------------Plot1------------------------------------------------
plot1 <- hist(power$Global_active_power, main = paste("Global Active Power"), col="red", ylab="Frequency" , xlab="Global Active Power (kilowatts)")
dev.copy(png, file="plot1.png", width=480, height=480)
dev.off()
--------------------------------------------------------------Plot2------------------------------------------------
plot2<- plot(power$Time,power$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.copy(png, file="plot2.png", width=480, height=480)
dev.off()
--------------------------------------------------------------Plot3------------------------------------------------
plot3 <- plot(power$Time,power$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(power$Time,power$Sub_metering_2,col="red")
lines(power$Time,power$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"), c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"),lty=c(1,1), lwd=c(1,1))
dev.copy(png, file="plot3.png", width=480, height=480)
dev.off()
-------------------------------------------------------------Plot4----------------------------------------------------
plot4 <-
par(mfrow=c(2,2))
##PLOT 1
plot(power$Time,power$Global_active_power, type="l", xlab="", ylab="Global Active Power")
##PLOT 2
plot(power$Time,power$Voltage, type="l", xlab="datetime", ylab="Voltage")
##PLOT 3
plot(power$Time,power$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(power$Time,power$Sub_metering_2,col="red")
lines(power$Time,power$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"), c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),lty=c(1,1), bty="n", cex=.5) #bty removes the box, cex shrinks the text, spacing added after labels so it renders correctly
#PLOT 4
plot(power$Time,power$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
#OUTPUT
dev.copy(png, file="plot4.png", width=480, height=480)
dev.off()
| /A4-w1.R | no_license | layaSharifi/ExData_Plotting1 | R | false | false | 2,751 | r |
##Data Loading and Processing
install.packages("data.table")
library(dplyr)
setwd("C:/Users/lsharifi/Desktop/Rot2/coursera/A4-w1")
power<- read.table ('household_power_consumption.txt', sep=";" , header=TRUE, na.strings="?",
stringsAsFactors=FALSE
)
View(power)
#convert date and time variables to Date/Time class
power$Time <- strptime(paste(power$Date, power$Time), "%d/%m/%Y %H:%M:%S")
power$Date <- as.Date(power$Date, "%d/%m/%Y")
# We will only be using data from the dates 2007-02-01 and 2007-02-02
dates <- as.Date(c("2007-02-01", "2007-02-02"), "%Y-%m-%d")
power <- subset(power, Date %in% dates)
power
--------------------------------------------------------------Plot1------------------------------------------------
plot1 <- hist(power$Global_active_power, main = paste("Global Active Power"), col="red", ylab="Frequency" , xlab="Global Active Power (kilowatts)")
dev.copy(png, file="plot1.png", width=480, height=480)
dev.off()
--------------------------------------------------------------Plot2------------------------------------------------
plot2<- plot(power$Time,power$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.copy(png, file="plot2.png", width=480, height=480)
dev.off()
--------------------------------------------------------------Plot3------------------------------------------------
plot3 <- plot(power$Time,power$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(power$Time,power$Sub_metering_2,col="red")
lines(power$Time,power$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"), c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"),lty=c(1,1), lwd=c(1,1))
dev.copy(png, file="plot3.png", width=480, height=480)
dev.off()
-------------------------------------------------------------Plot4----------------------------------------------------
plot4 <-
par(mfrow=c(2,2))
##PLOT 1
plot(power$Time,power$Global_active_power, type="l", xlab="", ylab="Global Active Power")
##PLOT 2
plot(power$Time,power$Voltage, type="l", xlab="datetime", ylab="Voltage")
##PLOT 3
plot(power$Time,power$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(power$Time,power$Sub_metering_2,col="red")
lines(power$Time,power$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"), c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),lty=c(1,1), bty="n", cex=.5) #bty removes the box, cex shrinks the text, spacing added after labels so it renders correctly
#PLOT 4
plot(power$Time,power$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
#OUTPUT
dev.copy(png, file="plot4.png", width=480, height=480)
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elasticsearchservice_operations.R
\name{elasticsearchservice_describe_elasticsearch_domain}
\alias{elasticsearchservice_describe_elasticsearch_domain}
\title{Returns domain configuration information about the specified
Elasticsearch domain, including the domain ID, domain endpoint, and
domain ARN}
\usage{
elasticsearchservice_describe_elasticsearch_domain(DomainName)
}
\arguments{
\item{DomainName}{[required] The name of the Elasticsearch domain for which you want information.}
}
\description{
Returns domain configuration information about the specified
Elasticsearch domain, including the domain ID, domain endpoint, and
domain ARN.
}
\section{Request syntax}{
\preformatted{svc$describe_elasticsearch_domain(
DomainName = "string"
)
}
}
\keyword{internal}
| /cran/paws.analytics/man/elasticsearchservice_describe_elasticsearch_domain.Rd | permissive | johnnytommy/paws | R | false | true | 845 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elasticsearchservice_operations.R
\name{elasticsearchservice_describe_elasticsearch_domain}
\alias{elasticsearchservice_describe_elasticsearch_domain}
\title{Returns domain configuration information about the specified
Elasticsearch domain, including the domain ID, domain endpoint, and
domain ARN}
\usage{
elasticsearchservice_describe_elasticsearch_domain(DomainName)
}
\arguments{
\item{DomainName}{[required] The name of the Elasticsearch domain for which you want information.}
}
\description{
Returns domain configuration information about the specified
Elasticsearch domain, including the domain ID, domain endpoint, and
domain ARN.
}
\section{Request syntax}{
\preformatted{svc$describe_elasticsearch_domain(
DomainName = "string"
)
}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ml_classification_gbt_classifier.R,
% R/ml_model_gradient_boosted_trees.R, R/ml_regression_gbt_regressor.R
\name{ml_gbt_classifier}
\alias{ml_gbt_classifier}
\alias{ml_gradient_boosted_trees}
\alias{ml_gradient_boosted_trees}
\alias{ml_gbt_regressor}
\title{Spark ML -- Gradient Boosted Trees}
\usage{
ml_gbt_classifier(x, formula = NULL, max_iter = 20L, max_depth = 5L,
step_size = 0.1, subsampling_rate = 1, feature_subset_strategy = "auto",
min_instances_per_node = 1L, max_bins = 32L, min_info_gain = 0,
loss_type = "logistic", seed = NULL, thresholds = NULL,
checkpoint_interval = 10L, cache_node_ids = FALSE,
max_memory_in_mb = 256L, features_col = "features", label_col = "label",
prediction_col = "prediction", probability_col = "probability",
raw_prediction_col = "rawPrediction",
uid = random_string("gbt_classifier_"), ...)
ml_gradient_boosted_trees(x, formula = NULL, type = c("auto", "regression",
"classification"), features_col = "features", label_col = "label",
prediction_col = "prediction", probability_col = "probability",
raw_prediction_col = "rawPrediction", checkpoint_interval = 10L,
loss_type = c("auto", "logistic", "squared", "absolute"), max_bins = 32L,
max_depth = 5L, max_iter = 20L, min_info_gain = 0,
min_instances_per_node = 1L, step_size = 0.1, subsampling_rate = 1,
feature_subset_strategy = "auto", seed = NULL, thresholds = NULL,
cache_node_ids = FALSE, max_memory_in_mb = 256L,
uid = random_string("gradient_boosted_trees_"), response = NULL,
features = NULL, ...)
ml_gbt_regressor(x, formula = NULL, max_iter = 20L, max_depth = 5L,
step_size = 0.1, subsampling_rate = 1, feature_subset_strategy = "auto",
min_instances_per_node = 1L, max_bins = 32L, min_info_gain = 0,
loss_type = "squared", seed = NULL, checkpoint_interval = 10L,
cache_node_ids = FALSE, max_memory_in_mb = 256L,
features_col = "features", label_col = "label",
prediction_col = "prediction", uid = random_string("gbt_regressor_"), ...)
}
\arguments{
\item{x}{A \code{spark_connection}, \code{ml_pipeline}, or a \code{tbl_spark}.}
\item{formula}{Used when \code{x} is a \code{tbl_spark}. R formula as a character string or a formula. This is used to transform the input dataframe before fitting, see \link{ft_r_formula} for details.}
\item{max_iter}{Maxmimum number of iterations.}
\item{max_depth}{Maximum depth of the tree (>= 0); that is, the maximum
number of nodes separating any leaves from the root of the tree.}
\item{step_size}{Step size (a.k.a. learning rate) in interval (0, 1] for shrinking the contribution of each estimator. (default = 0.1)}
\item{subsampling_rate}{Fraction of the training data used for learning each decision tree, in range (0, 1]. (default = 1.0)}
\item{feature_subset_strategy}{The number of features to consider for splits at each tree node. See details for options.}
\item{min_instances_per_node}{Minimum number of instances each child must
have after split.}
\item{max_bins}{The maximum number of bins used for discretizing
continuous features and for choosing how to split on features at
each node. More bins give higher granularity.}
\item{min_info_gain}{Minimum information gain for a split to be considered
at a tree node. Should be >= 0, defaults to 0.}
\item{loss_type}{Loss function which GBT tries to minimize. Supported: \code{"squared"} (L2) and \code{"absolute"} (L1) (default = squared) for regression and \code{"logistic"} (default) for classification. For \code{ml_gradient_boosted_trees}, setting \code{"auto"}
will default to the appropriate loss type based on model type.}
\item{seed}{Seed for random numbers.}
\item{thresholds}{Thresholds in multi-class classification to adjust the probability of predicting each class. Array must have length equal to the number of classes, with values > 0 excepting that at most one value may be 0. The class with largest value \code{p/t} is predicted, where \code{p} is the original probability of that class and \code{t} is the class's threshold.}
\item{checkpoint_interval}{Set checkpoint interval (>= 1) or disable checkpoint (-1).
E.g. 10 means that the cache will get checkpointed every 10 iterations, defaults to 10.}
\item{cache_node_ids}{If \code{FALSE}, the algorithm will pass trees to executors to match instances with nodes.
If \code{TRUE}, the algorithm will cache node IDs for each instance. Caching can speed up training of deeper trees.
Defaults to \code{FALSE}.}
\item{max_memory_in_mb}{Maximum memory in MB allocated to histogram aggregation.
If too small, then 1 node will be split per iteration,
and its aggregates may exceed this size. Defaults to 256.}
\item{features_col}{Features column name, as a length-one character vector. The column should be single vector column of numeric values. Usually this column is output by \code{\link{ft_r_formula}}.}
\item{label_col}{Label column name. The column should be a numeric column. Usually this column is output by \code{\link{ft_r_formula}}.}
\item{prediction_col}{Prediction column name.}
\item{probability_col}{Column name for predicted class conditional probabilities.}
\item{raw_prediction_col}{Raw prediction (a.k.a. confidence) column name.}
\item{uid}{A character string used to uniquely identify the ML estimator.}
\item{...}{Optional arguments; see Details.}
\item{type}{The type of model to fit. \code{"regression"} treats the response
as a continuous variable, while \code{"classification"} treats the response
as a categorical variable. When \code{"auto"} is used, the model type is
inferred based on the response variable type -- if it is a numeric type,
then regression is used; classification otherwise.}
\item{response}{(Deprecated) The name of the response column (as a length-one character vector.)}
\item{features}{(Deprecated) The name of features (terms) to use for the model fit.}
}
\value{
The object returned depends on the class of \code{x}.
\itemize{
\item \code{spark_connection}: When \code{x} is a \code{spark_connection}, the function returns an instance of a \code{ml_predictor} object. The object contains a pointer to
a Spark \code{Predictor} object and can be used to compose
\code{Pipeline} objects.
\item \code{ml_pipeline}: When \code{x} is a \code{ml_pipeline}, the function returns a \code{ml_pipeline} with
the predictor appended to the pipeline.
\item \code{tbl_spark}: When \code{x} is a \code{tbl_spark}, a predictor is constructed then
immediately fit with the input \code{tbl_spark}, returning a prediction model.
\item \code{tbl_spark}, with \code{formula}: specified When \code{formula}
is specified, the input \code{tbl_spark} is first transformed using a
\code{RFormula} transformer before being fit by
the predictor. The object returned in this case is a \code{ml_model} which is a
wrapper of a \code{ml_pipeline_model}.
}
}
\description{
Perform binary classification and regression using gradient boosted trees. Multiclass classification is not supported yet.
}
\details{
When \code{x} is a \code{tbl_spark} and \code{formula} (alternatively, \code{response} and \code{features}) is specified, the function returns a \code{ml_model} object wrapping a \code{ml_pipeline_model} which contains data pre-processing transformers, the ML predictor, and, for classification models, a post-processing transformer that converts predictions into class labels. For classification, an optional argument \code{predicted_label_col} (defaults to \code{"predicted_label"}) can be used to specify the name of the predicted label column. In addition to the fitted \code{ml_pipeline_model}, \code{ml_model} objects also contain a \code{ml_pipeline} object where the ML predictor stage is an estimator ready to be fit against data. This is utilized by \code{\link{ml_save}} with \code{type = "pipeline"} to faciliate model refresh workflows.
The supported options for \code{feature_subset_strategy} are
\itemize{
\item \code{"auto"}: Choose automatically for task: If \code{num_trees == 1}, set to \code{"all"}. If \code{num_trees > 1} (forest), set to \code{"sqrt"} for classification and to \code{"onethird"} for regression.
\item \code{"all"}: use all features
\item \code{"onethird"}: use 1/3 of the features
\item \code{"sqrt"}: use use sqrt(number of features)
\item \code{"log2"}: use log2(number of features)
\item \code{"n"}: when \code{n} is in the range (0, 1.0], use n * number of features. When \code{n} is in the range (1, number of features), use \code{n} features. (default = \code{"auto"})
}
\code{ml_gradient_boosted_trees} is a wrapper around \code{ml_gbt_regressor.tbl_spark} and \code{ml_gbt_classifier.tbl_spark} and calls the appropriate method based on model type.
}
\examples{
\dontrun{
sc <- spark_connect(master = "local")
iris_tbl <- sdf_copy_to(sc, iris, name = "iris_tbl", overwrite = TRUE)
partitions <- iris_tbl \%>\%
sdf_partition(training = 0.7, test = 0.3, seed = 1111)
iris_training <- partitions$training
iris_test <- partitions$test
gbt_model <- iris_training \%>\%
ml_gradient_boosted_trees(Sepal_Length ~ Petal_Length + Petal_Width)
pred <- sdf_predict(iris_test, gbt_model)
ml_regression_evaluator(pred, label_col = "Sepal_Length")
}
}
\seealso{
See \url{http://spark.apache.org/docs/latest/ml-classification-regression.html} for
more information on the set of supervised learning algorithms.
Other ml algorithms: \code{\link{ml_aft_survival_regression}},
\code{\link{ml_decision_tree_classifier}},
\code{\link{ml_generalized_linear_regression}},
\code{\link{ml_isotonic_regression}},
\code{\link{ml_linear_regression}},
\code{\link{ml_linear_svc}},
\code{\link{ml_logistic_regression}},
\code{\link{ml_multilayer_perceptron_classifier}},
\code{\link{ml_naive_bayes}},
\code{\link{ml_one_vs_rest}},
\code{\link{ml_random_forest_classifier}}
}
| /man/ml_gradient_boosted_trees.Rd | permissive | shabbybanks/sparklyr | R | false | true | 9,938 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ml_classification_gbt_classifier.R,
% R/ml_model_gradient_boosted_trees.R, R/ml_regression_gbt_regressor.R
\name{ml_gbt_classifier}
\alias{ml_gbt_classifier}
\alias{ml_gradient_boosted_trees}
\alias{ml_gradient_boosted_trees}
\alias{ml_gbt_regressor}
\title{Spark ML -- Gradient Boosted Trees}
\usage{
ml_gbt_classifier(x, formula = NULL, max_iter = 20L, max_depth = 5L,
step_size = 0.1, subsampling_rate = 1, feature_subset_strategy = "auto",
min_instances_per_node = 1L, max_bins = 32L, min_info_gain = 0,
loss_type = "logistic", seed = NULL, thresholds = NULL,
checkpoint_interval = 10L, cache_node_ids = FALSE,
max_memory_in_mb = 256L, features_col = "features", label_col = "label",
prediction_col = "prediction", probability_col = "probability",
raw_prediction_col = "rawPrediction",
uid = random_string("gbt_classifier_"), ...)
ml_gradient_boosted_trees(x, formula = NULL, type = c("auto", "regression",
"classification"), features_col = "features", label_col = "label",
prediction_col = "prediction", probability_col = "probability",
raw_prediction_col = "rawPrediction", checkpoint_interval = 10L,
loss_type = c("auto", "logistic", "squared", "absolute"), max_bins = 32L,
max_depth = 5L, max_iter = 20L, min_info_gain = 0,
min_instances_per_node = 1L, step_size = 0.1, subsampling_rate = 1,
feature_subset_strategy = "auto", seed = NULL, thresholds = NULL,
cache_node_ids = FALSE, max_memory_in_mb = 256L,
uid = random_string("gradient_boosted_trees_"), response = NULL,
features = NULL, ...)
ml_gbt_regressor(x, formula = NULL, max_iter = 20L, max_depth = 5L,
step_size = 0.1, subsampling_rate = 1, feature_subset_strategy = "auto",
min_instances_per_node = 1L, max_bins = 32L, min_info_gain = 0,
loss_type = "squared", seed = NULL, checkpoint_interval = 10L,
cache_node_ids = FALSE, max_memory_in_mb = 256L,
features_col = "features", label_col = "label",
prediction_col = "prediction", uid = random_string("gbt_regressor_"), ...)
}
\arguments{
\item{x}{A \code{spark_connection}, \code{ml_pipeline}, or a \code{tbl_spark}.}
\item{formula}{Used when \code{x} is a \code{tbl_spark}. R formula as a character string or a formula. This is used to transform the input dataframe before fitting, see \link{ft_r_formula} for details.}
\item{max_iter}{Maxmimum number of iterations.}
\item{max_depth}{Maximum depth of the tree (>= 0); that is, the maximum
number of nodes separating any leaves from the root of the tree.}
\item{step_size}{Step size (a.k.a. learning rate) in interval (0, 1] for shrinking the contribution of each estimator. (default = 0.1)}
\item{subsampling_rate}{Fraction of the training data used for learning each decision tree, in range (0, 1]. (default = 1.0)}
\item{feature_subset_strategy}{The number of features to consider for splits at each tree node. See details for options.}
\item{min_instances_per_node}{Minimum number of instances each child must
have after split.}
\item{max_bins}{The maximum number of bins used for discretizing
continuous features and for choosing how to split on features at
each node. More bins give higher granularity.}
\item{min_info_gain}{Minimum information gain for a split to be considered
at a tree node. Should be >= 0, defaults to 0.}
\item{loss_type}{Loss function which GBT tries to minimize. Supported: \code{"squared"} (L2) and \code{"absolute"} (L1) (default = squared) for regression and \code{"logistic"} (default) for classification. For \code{ml_gradient_boosted_trees}, setting \code{"auto"}
will default to the appropriate loss type based on model type.}
\item{seed}{Seed for random numbers.}
\item{thresholds}{Thresholds in multi-class classification to adjust the probability of predicting each class. Array must have length equal to the number of classes, with values > 0 excepting that at most one value may be 0. The class with largest value \code{p/t} is predicted, where \code{p} is the original probability of that class and \code{t} is the class's threshold.}
\item{checkpoint_interval}{Set checkpoint interval (>= 1) or disable checkpoint (-1).
E.g. 10 means that the cache will get checkpointed every 10 iterations, defaults to 10.}
\item{cache_node_ids}{If \code{FALSE}, the algorithm will pass trees to executors to match instances with nodes.
If \code{TRUE}, the algorithm will cache node IDs for each instance. Caching can speed up training of deeper trees.
Defaults to \code{FALSE}.}
\item{max_memory_in_mb}{Maximum memory in MB allocated to histogram aggregation.
If too small, then 1 node will be split per iteration,
and its aggregates may exceed this size. Defaults to 256.}
\item{features_col}{Features column name, as a length-one character vector. The column should be single vector column of numeric values. Usually this column is output by \code{\link{ft_r_formula}}.}
\item{label_col}{Label column name. The column should be a numeric column. Usually this column is output by \code{\link{ft_r_formula}}.}
\item{prediction_col}{Prediction column name.}
\item{probability_col}{Column name for predicted class conditional probabilities.}
\item{raw_prediction_col}{Raw prediction (a.k.a. confidence) column name.}
\item{uid}{A character string used to uniquely identify the ML estimator.}
\item{...}{Optional arguments; see Details.}
\item{type}{The type of model to fit. \code{"regression"} treats the response
as a continuous variable, while \code{"classification"} treats the response
as a categorical variable. When \code{"auto"} is used, the model type is
inferred based on the response variable type -- if it is a numeric type,
then regression is used; classification otherwise.}
\item{response}{(Deprecated) The name of the response column (as a length-one character vector.)}
\item{features}{(Deprecated) The name of features (terms) to use for the model fit.}
}
\value{
The object returned depends on the class of \code{x}.
\itemize{
\item \code{spark_connection}: When \code{x} is a \code{spark_connection}, the function returns an instance of a \code{ml_predictor} object. The object contains a pointer to
a Spark \code{Predictor} object and can be used to compose
\code{Pipeline} objects.
\item \code{ml_pipeline}: When \code{x} is a \code{ml_pipeline}, the function returns a \code{ml_pipeline} with
the predictor appended to the pipeline.
\item \code{tbl_spark}: When \code{x} is a \code{tbl_spark}, a predictor is constructed then
immediately fit with the input \code{tbl_spark}, returning a prediction model.
\item \code{tbl_spark}, with \code{formula}: specified When \code{formula}
is specified, the input \code{tbl_spark} is first transformed using a
\code{RFormula} transformer before being fit by
the predictor. The object returned in this case is a \code{ml_model} which is a
wrapper of a \code{ml_pipeline_model}.
}
}
\description{
Perform binary classification and regression using gradient boosted trees. Multiclass classification is not supported yet.
}
\details{
When \code{x} is a \code{tbl_spark} and \code{formula} (alternatively, \code{response} and \code{features}) is specified, the function returns a \code{ml_model} object wrapping a \code{ml_pipeline_model} which contains data pre-processing transformers, the ML predictor, and, for classification models, a post-processing transformer that converts predictions into class labels. For classification, an optional argument \code{predicted_label_col} (defaults to \code{"predicted_label"}) can be used to specify the name of the predicted label column. In addition to the fitted \code{ml_pipeline_model}, \code{ml_model} objects also contain a \code{ml_pipeline} object where the ML predictor stage is an estimator ready to be fit against data. This is utilized by \code{\link{ml_save}} with \code{type = "pipeline"} to faciliate model refresh workflows.
The supported options for \code{feature_subset_strategy} are
\itemize{
\item \code{"auto"}: Choose automatically for task: If \code{num_trees == 1}, set to \code{"all"}. If \code{num_trees > 1} (forest), set to \code{"sqrt"} for classification and to \code{"onethird"} for regression.
\item \code{"all"}: use all features
\item \code{"onethird"}: use 1/3 of the features
\item \code{"sqrt"}: use use sqrt(number of features)
\item \code{"log2"}: use log2(number of features)
\item \code{"n"}: when \code{n} is in the range (0, 1.0], use n * number of features. When \code{n} is in the range (1, number of features), use \code{n} features. (default = \code{"auto"})
}
\code{ml_gradient_boosted_trees} is a wrapper around \code{ml_gbt_regressor.tbl_spark} and \code{ml_gbt_classifier.tbl_spark} and calls the appropriate method based on model type.
}
\examples{
\dontrun{
sc <- spark_connect(master = "local")
iris_tbl <- sdf_copy_to(sc, iris, name = "iris_tbl", overwrite = TRUE)
partitions <- iris_tbl \%>\%
sdf_partition(training = 0.7, test = 0.3, seed = 1111)
iris_training <- partitions$training
iris_test <- partitions$test
gbt_model <- iris_training \%>\%
ml_gradient_boosted_trees(Sepal_Length ~ Petal_Length + Petal_Width)
pred <- sdf_predict(iris_test, gbt_model)
ml_regression_evaluator(pred, label_col = "Sepal_Length")
}
}
\seealso{
See \url{http://spark.apache.org/docs/latest/ml-classification-regression.html} for
more information on the set of supervised learning algorithms.
Other ml algorithms: \code{\link{ml_aft_survival_regression}},
\code{\link{ml_decision_tree_classifier}},
\code{\link{ml_generalized_linear_regression}},
\code{\link{ml_isotonic_regression}},
\code{\link{ml_linear_regression}},
\code{\link{ml_linear_svc}},
\code{\link{ml_logistic_regression}},
\code{\link{ml_multilayer_perceptron_classifier}},
\code{\link{ml_naive_bayes}},
\code{\link{ml_one_vs_rest}},
\code{\link{ml_random_forest_classifier}}
}
|
## this script scrapes data about the episode results across each GBBO series
## and exports the list to a json + the data frame to a csv
library(rvest)
library(purrr)
library(dplyr)
library(tidyr)
library(readr)
url_base <- "https://en.wikipedia.org/wiki/The_Great_British_Bake_Off_(series_%d)"
## get the episode results across series
get_results <- function(series) {
cat(c("on your mark...","get set...", "BAKE!", sep = " "))
pages <- read_html(sprintf(url_base, series))
results_table <- pages %>%
html_nodes(xpath = '//*[@id="mw-content-text"]/div/table') %>%
html_table(fill = TRUE, header = FALSE) %>%
.[[3]]
}
## bind them all together in a dataframe
results_df <- map_dfr(.x = 1:9, .f = get_results, .id = 'series') %>%
select(series,
baker = X1,
episode_ = X2:X11) %>%
group_by(series) %>%
filter(!row_number() %in% c(1:2)) %>%
ungroup()
## tons of wrangling here
results_df2 <- results_df %>%
gather(episode, result, -series, -baker) %>%
separate(episode, into = c("drop", "episode")) %>%
select(series, episode, baker, result) %>%
mutate(series = as.integer(series),
episode = as.integer(episode),
baker = ifelse(series == 2 & baker == "Jo", "Joanne", baker)) %>%
arrange(series, baker, episode) %>%
drop_na(result) # this gets rid of episodes that don't apply to that series
results <- results_df2 %>%
mutate(gone = case_when(
result == "OUT" ~ "OUT",
result == "LEFT" ~ "LEFT",
is.na(result) ~ NA_character_)) %>%
group_by(series, baker) %>%
fill(gone, .direction = c("down")) %>%
mutate_all(funs(na_if(., ""))) %>%
mutate(new_result = coalesce(result, gone)) %>%
replace_na(list(new_result = "IN")) %>%
mutate(new_result2 = case_when(
new_result == "OUT" & lag(new_result) == "OUT" ~ NA_character_,
new_result == "LEFT" & lag(new_result) == "LEFT" ~ NA_character_,
TRUE ~ new_result)) %>%
select(series, episode, baker, result = new_result2) %>%
mutate(result = case_when(
result %in% c("Runner up", "Runner Up", "Runner-Up", "Third Place") ~ "RUNNER-UP",
result == "SB" ~ "STAR BAKER",
TRUE ~ result
))
## dataframe to csv
write_csv(results, here::here("data-raw", "results.csv"))
| /data-raw/results-scrape.R | permissive | mai-n-coleman/bakeoff | R | false | false | 2,235 | r | ## this script scrapes data about the episode results across each GBBO series
## and exports the list to a json + the data frame to a csv
library(rvest)
library(purrr)
library(dplyr)
library(tidyr)
library(readr)
url_base <- "https://en.wikipedia.org/wiki/The_Great_British_Bake_Off_(series_%d)"
## get the episode results across series
get_results <- function(series) {
cat(c("on your mark...","get set...", "BAKE!", sep = " "))
pages <- read_html(sprintf(url_base, series))
results_table <- pages %>%
html_nodes(xpath = '//*[@id="mw-content-text"]/div/table') %>%
html_table(fill = TRUE, header = FALSE) %>%
.[[3]]
}
## bind them all together in a dataframe
results_df <- map_dfr(.x = 1:9, .f = get_results, .id = 'series') %>%
select(series,
baker = X1,
episode_ = X2:X11) %>%
group_by(series) %>%
filter(!row_number() %in% c(1:2)) %>%
ungroup()
## tons of wrangling here
results_df2 <- results_df %>%
gather(episode, result, -series, -baker) %>%
separate(episode, into = c("drop", "episode")) %>%
select(series, episode, baker, result) %>%
mutate(series = as.integer(series),
episode = as.integer(episode),
baker = ifelse(series == 2 & baker == "Jo", "Joanne", baker)) %>%
arrange(series, baker, episode) %>%
drop_na(result) # this gets rid of episodes that don't apply to that series
results <- results_df2 %>%
mutate(gone = case_when(
result == "OUT" ~ "OUT",
result == "LEFT" ~ "LEFT",
is.na(result) ~ NA_character_)) %>%
group_by(series, baker) %>%
fill(gone, .direction = c("down")) %>%
mutate_all(funs(na_if(., ""))) %>%
mutate(new_result = coalesce(result, gone)) %>%
replace_na(list(new_result = "IN")) %>%
mutate(new_result2 = case_when(
new_result == "OUT" & lag(new_result) == "OUT" ~ NA_character_,
new_result == "LEFT" & lag(new_result) == "LEFT" ~ NA_character_,
TRUE ~ new_result)) %>%
select(series, episode, baker, result = new_result2) %>%
mutate(result = case_when(
result %in% c("Runner up", "Runner Up", "Runner-Up", "Third Place") ~ "RUNNER-UP",
result == "SB" ~ "STAR BAKER",
TRUE ~ result
))
## dataframe to csv
write_csv(results, here::here("data-raw", "results.csv"))
|
library(sceasy)
h5adPath <- commandArgs(trailingOnly = TRUE)[1]
sceasy::convertFormat(h5adPath, from="anndata", to="seurat", outFile = gsub(".h5ad", ".rds", h5adPath), main_layer = "data")
| /backend/corpora/dataset_processing/make_seurat.R | permissive | isabella232/corpora-data-portal | R | false | false | 191 | r | library(sceasy)
h5adPath <- commandArgs(trailingOnly = TRUE)[1]
sceasy::convertFormat(h5adPath, from="anndata", to="seurat", outFile = gsub(".h5ad", ".rds", h5adPath), main_layer = "data")
|
#' @importFrom R6 R6Class
NULL
#' EnergyPlus IDD object
#'
#' `IddObject` is an abstraction of a single object in an [Idd] object. It
#' provides more detail methods to query field properties. `IddObject` can only
#' be created from the parent [Idd] object, using `$object()`,
#' `$object_in_group()` and other equivalent. This is because that
#' initialization of an `IddObject` needs some shared data from parent [Idd]
#' object.
#'
#' There are lots of properties for every class and field. For details on the
#' meaning of each property, please see the heading comments in the
#' `Energy+.idd` file in the EnergyPlus installation path.
#'
#' @docType class
#' @name IddObject
#' @seealso [Idd] Class
#' @author Hongyuan Jia
NULL
#' Create an `IddObject` object.
#'
#' `idd_object()` takes a parent `Idd` object, a class name, and returns a
#' corresponding [IddObject]. For details, see [IddObject].
#'
#' @param parent An [Idd] object or a valid input for [use_idd()].
#' @param class A valid class name (a string).
#' @return An [IddObject] object.
#' @export
#' @examples
#' \dontrun{
#' idd <- use_idd(8.8, download = "auto")
#'
#' # get an IddObject using class name
#' idd_object(idd, "Material")
#' idd_object(8.8, "Material")
#' }
#'
# idd_object {{{
idd_object <- function (parent, class) {
IddObject$new(class, parent)
}
# }}}
#' @export
# IddObject {{{
IddObject <- R6::R6Class(classname = "IddObject", cloneable = FALSE,
public = list(
# INITIALIZE {{{
#' @description
#' Create an `IddObject` object
#'
#' @details
#' Note that an `IddObject` can be created from the parent [Idd] object,
#' using `$object()`, [idd_object] and other equivalent.
#'
#' @param class A single integer specifying the class index or a single
#' string specifying the class name.
#' @param parent An [Idd] object or a valid input for [use_idd()].
#'
#' @return An `IddObject` object.
#'
#' @examples
#' \dontrun{
#' surf <- IddObject$new("BuildingSurface:Detailed", use_idd(8.8, download = "auto"))
#' }
#'
initialize = function (class, parent) {
if (missing(parent)) {
abort("error_iddobject_missing_parent",
paste("IddObject can only be created based on a parent Idd object.",
"Please give `parent`, which should be either an IDD version or an `Idd` object."
)
)
} else {
private$m_parent <- use_idd(parent)
}
assert(!is.null(class))
private$m_class_id <- get_idd_class(private$idd_env(), class, underscore = TRUE)$class_id
},
# }}}
# META {{{
# version {{{
#' @description
#' Get the version of parent `Idd`
#'
#' @details
#' `$version()` returns the version of parent `Idd` in a
#' [base::numeric_version()] format. This makes it easy to direction
#' compare versions of different `IddObject`s, e.g. `iddobj$version() > 8.6` or
#' `iddobj1$version() > iddobj2$version()`.
#'
#' @return A [base::numeric_version()] object.
#'
#' @examples
#' \dontrun{
#' # get version
#' surf$version()
#' }
#'
version = function ()
iddobj_version(self, private),
# }}}
# parent {{{
#' @description
#' Get parent [Idd]
#'
#' @details
#' `$parent()` returns parent [Idd] object.
#'
#' @return A [Idd] object.
#'
#' @examples
#' \dontrun{
#' surf$parent()
#' }
#'
parent = function ()
iddobj_parent(self, private),
# }}}
# }}}
# CLASS PROPERTY GETTERS {{{
# group_name {{{
#' @description
#' Get the group name
#'
#' @details
#' `$group_name()` returns the group name of current `IddObject`.
#'
#' @return A single string.
#'
#' @examples
#' \dontrun{
#' surf$group_name()
#' }
#'
group_name = function ()
iddobj_group_name(self, private),
# }}}
# group_index {{{
#' @description
#' Get the group index
#'
#' @details
#' `$group_index()` returns the group index of current `IddObject`. A
#' group index is just an integer indicating its appearance order in the
#' [Idd].
#'
#' @return A single integer.
#'
#' @examples
#' \dontrun{
#' surf$group_index()
#' }
#'
group_index = function ()
iddobj_group_index(self, private),
# }}}
# class_name {{{
#' @description
#' Get the class name of current `IddObject`
#'
#' @details
#' `$class_name()` returns the class name of current `IddObject`.
#'
#' @return A single string.
#'
#' @examples
#' \dontrun{
#' surf$class_name()
#' }
#'
class_name = function ()
iddobj_class_name(self, private),
# }}}
# class_index {{{
#' @description
#' Get the class index
#'
#' @details
#' `$class_index()` returns the class index of current `IddObject`. A
#' class index is just an integer indicating its appearance order in the
#' [Idd].
#'
#' @return A single integer.
#'
#' @examples
#' \dontrun{
#' surf$class_index()
#' }
#'
class_index = function ()
iddobj_class_index(self, private),
# }}}
# class_foramt {{{
#' @description
#' Get the class format
#'
#' @details
#' `$class_format()` returns the format of this IDD class. This format
#' indicator is currently not used by eplusr.
#'
#' @note
#' Some classes have special format when saved in the IDFEditor with the
#' special format option enabled. Those special format includes
#' "singleLine", "vertices", "compactSchedule", "fluidProperties",
#' "viewFactors" and "spectral". eplusr can handle all those format when
#' parsing IDF files. However, when saved, all classes are formatted in
#' standard way.
#'
#' @return A single character.
#'
#' @examples
#' \dontrun{
#' surf$class_format()
#' }
#'
class_format = function ()
iddobj_class_format(self, private),
# }}}
# min_fields {{{
#' @description
#' Get the minimum field number of current class
#'
#' @details
#' `$min_fields()` returns the minimum fields required for current class.
#' If no required, `0` is returned.
#'
#' @return A single integer.
#'
#' @examples
#' \dontrun{
#' surf$min_fields()
#' }
#'
min_fields = function ()
iddobj_min_fields(self, private),
# }}}
# num_fields {{{
#' @description
#' Get the total field number of current class
#'
#' @details
#' `$num_fields()` returns current total number of fields in current
#' class.
#'
#' @note
#' This number may change if the class is extensible and after
#' `$add_extensible_group()` or `$del_extensible_group()`.
#'
#' @return A single integer.
#'
#' @examples
#' \dontrun{
#' surf$num_fields()
#' }
#'
num_fields = function ()
iddobj_num_fields(self, private),
# }}}
# memo {{{
#' @description
#' Get the memo string of current class
#'
#' @details
#' `$memo()` returns memo of current class, usually a brief description
#' of this class.
#'
#' @return A character vector.
#'
#' @examples
#' \dontrun{
#' surf$memo()
#' }
#'
memo = function ()
iddobj_memo(self, private),
# }}}
# num_extensible{{{
#' @description
#' Get the field number of the extensible group in current class
#'
#' @details
#' `$num_extensible()` returns the field number of the extensible group
#' in current class.
#'
#' An extensible group is a set of fields that should be treated as a
#' whole, such like the X, Y and Z vertices of a building surfaces. An
#' extensible group should be added or deleted together.
#'
#' If there is no extensible group in current class, `0` is returned.
#'
#' @return A single integer.
#'
#' @examples
#' \dontrun{
#' surf$num_extensible()
#' }
#'
num_extensible = function ()
iddobj_num_extensible(self, private),
# }}}
# first_extensible_index {{{
#' @description
#' Get the minimum field number of current class
#'
#' @details
#' `$first_extensible_index()` returns the field index of first
#' extensible field in current class.
#'
#' An extensible group is a set of fields that should be treated as a
#' whole, such like the X, Y and Z vertices of a building surfaces. An
#' extensible group should be added or deleted together.
#'
#' If there is no extensible group in current class, `0` is returned.
#'
#' @return A single integer.
#'
#' @examples
#' \dontrun{
#' surf$first_extensible_index()
#' }
#'
first_extensible_index = function ()
iddobj_first_extensible_index(self, private),
# }}}
# extensible_group_num{{{
#' @description
#' Get the number of extensible groups in current class
#'
#' @details
#' `$extensible_group_num()` returns the number of extensible groups in
#' current class.
#'
#' An extensible group is a set of fields that should be treated as a
#' whole, such like the X, Y and Z vertices of a building surfaces. An
#' extensible group should be added or deleted together.
#'
#' If there is no extensible group in current class, `0` is returned.
#'
#' @return A single integer.
#'
#' @examples
#' \dontrun{
#' surf$extensible_group_num()
#' }
#'
extensible_group_num = function ()
iddobj_extensible_group_num(self, private),
# }}}
# }}}
# EXTENSIBLE GROUP {{{
# add_extensible_group {{{
#' @description
#' Add extensible groups in current class
#'
#' @details
#' `$add_extensible_groups()` adds extensible groups in this class.
#'
#' An extensible group is a set of fields that should be treated as a
#' whole, such like the X, Y and Z vertices of a building surfaces. An
#' extensible group should be added or deleted together.
#'
#' An error will be issued if current class contains no extensible
#' group.
#'
#' @param num An integer indicating the number of extensible groups to
#' be added.
#'
#' @return The modified `IddObject` itself.
#'
#' @examples
#' \dontrun{
#' # field number before adding
#' surf$num_fields()
#' # extensible group number before adding
#' surf$extensible_group_num()
#'
#' # add 2 more extensible groups
#' surf$add_extensible_group(2)
#'
#' # field number after adding
#' surf$num_fields()
#' # extensible group number after adding
#' surf$extensible_group_num()
#' }
#'
add_extensible_group = function (num = 1L)
iddobj_add_extensible_group(self, private, num),
# }}}
# del_extensible_group {{{
#' @description
#' Delete extensible groups in current class
#'
#' @details
#' `$del_extensible_groups()` deletes extensible groups in this class.
#'
#' An extensible group is a set of fields that should be treated as a
#' whole, such like the X, Y and Z vertices of a building surfaces. An
#' extensible group should be added or deleted together.
#'
#' An error will be issued if current class contains no extensible
#' group.
#'
#' @param num An integer indicating the number of extensible groups to
#' be deleted.
#'
#' @return The modified `IddObject` itself.
#'
#' @examples
#' \dontrun{
#' # field number before deleting
#' surf$num_fields()
#' # extensible group number before deleting
#' surf$extensible_group_num()
#'
#' # delete 2 more extensible groups
#' surf$del_extensible_group(2)
#'
#' # field number after deleting
#' surf$num_fields()
#' # extensible group number after deleting
#' surf$extensible_group_num()
#' }
#'
del_extensible_group = function (num = 1L)
iddobj_del_extensible_group(self, private, num),
# }}}
# }}}
# CLASS PROPERTY ASSERTIONS {{{
# has_name {{{
#' @description
#' Check if current class has name attribute
#'
#' @details
#' `$has_name()` return `TRUE` if current class has name attribute, and
#' `FALSE` otherwise.
#'
#' A class with name attribute means that objects in this class can have
#' names.
#'
#' @return A single logical value (`TRUE` or `FALSE`).
#'
#' @examples
#' \dontrun{
#' surf$has_name()
#' }
#'
has_name = function ()
iddobj_has_name(self, private),
# }}}
# is_required {{{
#' @description
#' Check if current class is required
#'
#' @details
#' `$is_required()` returns `TRUE` if current class is required and
#' `FALSE` otherwise.
#'
#' A required class means that for any model, there should be at least
#' one object in this class. One example is `Building` class.
#'
#' @return A single logical value (`TRUE` or `FALSE`).
#'
#' @examples
#' \dontrun{
#' surf$is_required()
#' }
#'
is_required = function ()
iddobj_is_required(self, private),
# }}}
# is_unique {{{
#' @description
#' Check if current class is unique
#'
#' @details
#' `$is_unique()` returns `TRUE` if current class is unique and
#' `FALSE` otherwise.
#'
#' A unique class means that for any model, there should be at most
#' one object in this class. One example is `Building` class.
#'
#' @return A single logical value (`TRUE` or `FALSE`).
#'
#' @examples
#' \dontrun{
#' surf$is_unique()
#' }
#'
is_unique = function ()
iddobj_is_unique(self, private),
# }}}
# is_extensible {{{
#' @description
#' Check if current class is extensible
#'
#' @details
#' `$is_extensible()` returns `TRUE` if current class is extensible and
#' `FALSE` otherwise.
#'
#' A extensible class means that for there are curtain number of fields
#' in this class that can be dynamically added or deleted, such like the
#' X, Y and Z vertices of a building surface.
#'
#' @return A single logical value (`TRUE` or `FALSE`).
#'
#' @examples
#' \dontrun{
#' surf$is_extensible()
#' }
#'
is_extensible = function ()
iddobj_is_extensible(self, private),
# }}}
# }}}
# FIELD PROPERTY GETTERS {{{
# field_name {{{
#' @description
#' Get field names
#'
#' @details
#' `$field_name()` returns a character vector of names of fields
#' specified by field indices in current class.
#'
#' @param index An integer vector of field indices. If `NULL`, names of
#' all fields in this class are returned. Default: `NULL`.
#'
#' @param unit If `TRUE`, the units of those fields are also returned.
#' Default: `FALSE`.
#'
#' @param in_ip If `in_ip`, corresponding imperial units are returned.
#' It only has effect when `unit` is `TRUE`. Default:
#' `eplusr_option("view_in_ip")`.
#'
#' @return A character vector.
#'
#' @examples
#' \dontrun{
#' # get all field names
#' surf$field_name()
#'
#' # get field units also
#' surf$field_name(unit = TRUE)
#'
#' # get field units in IP
#' surf$field_name(unit = TRUE)
#'
#' # change field name to lower-style
#' surf$field_name(unit = TRUE, in_ip = TRUE)
#' }
#'
field_name = function (index = NULL, unit = FALSE, in_ip = eplusr_option("view_in_ip"))
iddobj_field_name(self, private, index, unit, in_ip),
# }}}
# field_index {{{
#' @description
#' Get field indices
#'
#' @details
#' `$field_index()` returns an integer vector of names of fields
#' specified by field names in current class.
#'
#' @param name A character vector of field names. Can be in
#' "lower-style", i.e. all spaces and dashes is replaced by
#' underscores. If `NULL`, indices of all fields in this class
#' are returned. Default: `NULL`.
#'
#' @return An integer vector.
#'
#' @examples
#' \dontrun{
#' # get all field indices
#' surf$field_index()
#'
#' # get field indices for specific fields
#' surf$field_index(c("number of vertices", "vertex 10 z-coordinate"))
#' }
#'
field_index = function (name = NULL)
iddobj_field_index(self, private, name),
# }}}
# field_type {{{
#' @description
#' Get field types
#'
#' @details
#' `$field_type()` returns a character vector of field types of
#' specified fields in current class. All possible values are:
#'
#' * `"integer"`
#' * `"real"`
#' * `"alpha"` (arbitrary string)
#' * `"choice"` (alpha with specific list of choices)
#' * `"object-list"` (link to a list of objects defined elsewhere)
#' * `"external-list"` (uses a special list from an external source)
#' * `"node"` (name used in connecting HVAC components).
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @return A character vector.
#'
#' @examples
#' \dontrun{
#' # get all field types
#' surf$field_type()
#'
#' # get field types for specific fields
#' surf$field_type(c("name", "zone name", "vertex 10 z-coordinate"))
#' }
#'
field_type = function (which = NULL)
iddobj_field_type(self, private, which = which),
# }}}
# field_note {{{
#' @description
#' Get field notes
#'
#' @details
#' `$field_note()` returns a list of character vectors that contains
#' field notes of specified fields in current class, usually serving as
#' field descriptions. If no notes are found for current fields, `NULL`
#' is returned.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @return A list of character vectors.
#'
#' @examples
#' \dontrun{
#' # get all field notes
#' surf$field_note()
#'
#' # get field types for specific fields
#' surf$field_note(c("name", "zone name", "vertex 10 z-coordinate"))
#' }
#'
field_note = function (which = NULL)
iddobj_field_note(self, private, which),
# }}}
# field_unit {{{
#' @description
#' Get field units
#'
#' @details
#' `$field_unit()` returns a character vector that contains units of
#' specified fields in current class. If there is no unit found for
#' current field, `NA` is returned.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @param in_ip If `in_ip`, corresponding imperial units are returned.
#' Default: `eplusr_option("view_in_ip")`.
#'
#' @return A character vector.
#'
#' @examples
#' \dontrun{
#' # get all field units
#' surf$field_unit()
#'
#' # get field units for specific fields
#' surf$field_unit(c("name", "zone name", "vertex 10 z-coordinate"))
#' }
#'
field_unit = function (which = NULL, in_ip = eplusr_option("view_in_ip"))
iddobj_field_unit(self, private, which, in_ip),
# }}}
# field_default {{{
#' @description
#' Get field default value
#'
#' @details
#' `$field_default()` returns a list that contains default values of
#' specified fields in current class. If there is no default value found
#' for current field, `NA` is returned.
#'
#' @note
#' The type of each default value will be consistent with field
#' definition. However, for numeric fields with default values being
#' `"autosize"` or `"autocalculate"`, the type of returned values will
#' be character.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @param in_ip If `in_ip`, values in corresponding imperial units are
#' returned. Default: `eplusr_option("view_in_ip")`.
#'
#' @return A character vector.
#'
#' @examples
#' \dontrun{
#' # get all field default values
#' surf$field_default()
#'
#' # get default values for specific fields
#' surf$field_default(c("name", "zone name", "vertex 10 z-coordinate"))
#' }
#'
field_default = function (which = NULL, in_ip = eplusr_option("view_in_ip"))
iddobj_field_default(self, private, which, in_ip),
# }}}
# field_choice {{{
#' @description
#' Get choices of field values
#'
#' @details
#' `$field_value()` returns a list of character vectors that contains
#' choices of specified field values in current class. If there is no
#' choice found for current field, `NULL` is returned.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @return A list of character vectors.
#'
#' @examples
#' \dontrun{
#' # get all field value choices
#' surf$field_choice()
#'
#' # get field value choices for specific fields
#' surf$field_choice(c("name", "sun exposure", "wind exposure"))
#' }
#'
field_choice = function (which = NULL)
iddobj_field_choice(self, private, which),
# }}}
# field_range {{{
#' @description
#' Get field value ranges
#'
#' @details
#' `$field_range()` returns a list of value ranges of specified fields
#' in current class.
#'
#' Every range has four components:
#'
#' * `minimum`: lower limit
#' * `lower_incbounds`: `TRUE` if the lower limit should be included
#' * `maximum`: upper limit
#' * `upper_incbounds`: `TRUE` if the upper limit should be included
#'
#' For fields of character type,
#'
#' * `minimum` and `maximum` are always set to `NA`
#' * `lower_incbounds` and `upper_incbounds` are always set to `FALSE`
#'
#' For fields of numeric types with no specified ranges,
#'
#' * `minimum` is set to `-Inf`
#' * `lower_incbounds` is set to `FALSE`
#' * `upper` is set to `Inf`
#' * `upper_incbounds` is set to `FALSE`
#'
#' The field range is printed in number interval denotation.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @return A list of ranges.
#'
#' @examples
#' \dontrun{
#' # get all field value ranges
#' surf$field_range()
#'
#' # get value ranges for specific fields
#' surf$field_range(c("name", "number of vertices", "vertex 10 z-coordinate"))
#' }
#'
field_range = function (which = NULL)
iddobj_field_range(self, private, which),
# }}}
# field_relation {{{
#' @description
#' Extract the relationship among fields
#'
#' @details
#' Many fields in [Idd] can be referred by others. For example, the
#' `Outside Layer` and other fields in `Construction` class refer to the
#' `Name` field in `Material` class and other material related classes.
#' Here it means that the `Outside Layer` field **refers to** the `Name`
#' field and the `Name` field is **referred by** the `Outside Layer`.
#'
#' `$field_relation()` provides a simple interface to get this kind of
#' relation. It takes a field specification and a relation
#' direction, and returns an `IddRelation` object which contains data
#' presenting such relation above.
#'
#' `$field_relation()` returns a list of references for those fields
#' that have the `object-list` and/or `reference` and
#' `reference-class-name` attribute. Basically, it is a list of two
#' elements `ref_to` and `ref_by`. Underneath, `ref_to` and `ref_by`
#' are [data.table][data.table::data.table()]s which contain source
#' field data and reference field data with custom printing method. For
#' instance, if `iddobj$field_relation(c(1, 2), "ref_to")` gives results
#' below:
#'
#' ```
#' -- Refer to Others ---------------------
#' +- Field: <1: Field 1>
#' | v~~~~~~~~~~~~~~~~~~
#' | \- Class: <Class 2>
#' | \- Field: <2: Field 2>
#' |
#' \- Field: <2: Field 2>
#' ```
#'
#' This means that `Field 2` in current class does not refer to any other fields.
#' But `Field 1` in current class refers to `Field 2` in class named `Class 2`.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @param direction The relation direction to extract. Should be one of
#' `"all"`, `"ref_to"` or `"ref_by"`.
#'
#' @param class A character vector of class names used for searching
#' relations. Default: `NULL`.
#'
#' @param group A character vector of group names used for searching
#' relations. Default: `NULL`.
#'
#' @param depth If > 0, the relation is searched recursively. A
#' simple example of recursive reference: one material named
#' `mat` is referred by a construction named `const`, and `const`
#' is also referred by a surface named `surf`. If `NULL`,
#' all possible recursive relations are returned. Default: `0`.
#'
#' @param keep If `TRUE`, all input fields are returned regardless they
#' have any relations with other objects or not. If `FALSE`, only
#' fields in input that have relations with other objects are
#' returned. Default: `FALSE`.
#'
#' @return An `IddRelation` object.
#'
#' @examples
#' \dontrun{
#' # get field relation for specific fields
#' surf$field_relation(c("name", "zone name", "vertex 10 z-coordinate"))
#' }
#'
field_relation = function (which = NULL, direction = c("all", "ref_by", "ref_to"), class = NULL, group = NULL, depth = 0L, keep = FALSE)
iddobj_field_relation(self, private, which, match.arg(direction), class = class, group = group, depth = depth, keep = keep),
# }}}
# field_possible {{{
#' @description
#' Get field possible values
#'
#' @details
#' `$field_possible()` returns all possible values for specified fields,
#' including auto-value (`Autosize`, `Autocalculate`, and `NA` if not
#' applicable), and results from `$field_default()`, `$field_range()`,
#' `$field_choice()`. Underneath, it returns a data.table with custom
#' printing method. For instance, if `iddobj$field_possible(c(4, 2))`
#' gives results below:
#'
#' ```
#' -- 4: Field 4 ----------
#' * Auto value: <NA>
#' * Default: <NA>
#' * Choice:
#' - "Key1"
#' - "Key2"
#'
#' -- 2: Field 2 ----------
#' * Auto value: "Autosize"
#' * Default: 2
#' * Choice: <NA>
#' ```
#'
#' This means that `Field 4` in current class cannot be "autosized" or
#' "autocalculated", and it does not have any default value. Its value should be
#' a choice from `"Key1"` or `"Key2"`. For `Field 2` in current class, it has a
#' default value of `2` but can also be filled with value `"Autosize"`.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @return A `IddFieldPossible` object which is a
#' [data.table::data.table()] with 9 columns.
#'
#' @examples
#' \dontrun{
#' # get field possible values for specific fields
#' surf$field_possible(6:10)
#' }
#'
field_possible = function (which = NULL)
iddobj_field_possible(self, private, which),
# }}}
# }}}
# FIELD PROPERTY ASSERTIONS {{{
# is_valid_field_num {{{
#' @description
#' Check if input is a valid field number
#'
#' @details
#' `$is_valid_field_num()` returns `TRUE` if input `num` is acceptable
#' as a total number of fields in this class. Extensible property is
#' considered.
#'
#' For instance, the total number of fields defined in IDD for class
#' `BuildingSurfaces:Detailed` is 390. However, 396 is still a valid
#' field number for this class as the number of field in the extensible
#' group is 3.
#'
#' @param num An integer vector to test.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$is_valid_field_num(c(10, 14, 100))
#' }
#'
is_valid_field_num = function (num)
iddobj_is_valid_field_num(self, private, num),
# }}}
# is_extensible_index {{{
#' @description
#' Check if input field index indicates an extensible field
#'
#' @details
#' `$is_extensible_index()` returns `TRUE` if input `index` indicates an
#' index of extensible field in current class.
#'
#' Extensible fields mean that these fields can be dynamically added or
#' deleted, such like the X, Y and Z vertices of a building surface.
#'
#' @param index An integer vector of field indices.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$is_extensible_index(c(10, 14, 100))
#' }
#'
is_extensible_index = function (index)
iddobj_is_extensible_index(self, private, index),
# }}}
# is_valid_field_name {{{
#' @description
#' Check if input character is a valid field name
#'
#' @details
#' `$is_valid_field_name()` returns `TRUE` if `name` is a valid field
#' name **WITHOUT** unit. Note `name` can be given in underscore style,
#' e.g. `"outside_layer"` is equivalent to `"Outside Layer"`.
#'
#' @param name A character vector to test.
#'
#' @param strict If `TRUE`, only exact match is accepted. Default:
#' `FALSE`.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$is_valid_field_name(c("name", "sun_exposure"))
#'
#' # exact match
#' surf$is_valid_field_name(c("Name", "Sun_Exposure"), strict = TRUE)
#' }
#'
is_valid_field_name = function (name, strict = FALSE)
iddobj_is_valid_field_name(self, private, name, strict),
# }}}
# is_valid_field_index {{{
#' @description
#' Check if input integer is a valid field index
#'
#' @details
#' `$is_valid_field_index()` returns `TRUE` if `index` is a valid field
#' index. For extensible class, `TRUE` is always returned.
#'
#' @param index An integer vector to test.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$is_valid_field_index(1:10)
#' }
#'
is_valid_field_index = function (index)
iddobj_is_valid_field_index(self, private, index),
# }}}
# is_autosizable_field {{{
#' @description
#' Check if input field can be autosized
#'
#' @details
#' `$is_autosizable_field()` returns `TRUE` if input field can be
#' assigned to `autosize`.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$is_autosizable_field()
#'
#' surf$is_autosizable_field(c("name", "sun_exposure"))
#' }
#'
is_autosizable_field = function (which = NULL)
iddobj_is_autosizable_field(self, private, which),
# }}}
# is_autocalculatable_field {{{
#' @description
#' Check if input field can be autocalculated
#'
#' @details
#' `$is_autocalculatable_field()` returns `TRUE` if input field can be
#' assigned to `autocalculate`.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$is_autocalculatable_field()
#'
#' surf$is_autocalculatable_field(c("name", "sun_exposure"))
#' }
#'
is_autocalculatable_field = function (which = NULL)
iddobj_is_autocalculatable_field(self, private, which),
# }}}
# is_numeric_field {{{
#' @description
#' Check if input field value should be numeric
#'
#' @details
#' `$is_numeric_field()` returns `TRUE` if the value of input field
#' should be numeric ( an integer or a real number).
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$is_numeric_field()
#'
#' surf$is_numeric_field(c("name", "sun_exposure"))
#' }
#'
is_numeric_field = function (which = NULL)
iddobj_is_numeric_field(self, private, which),
# }}}
# is_real_field {{{
#' @description
#' Check if input field value should be a real number
#'
#' @details
#' `$is_real_field()` returns `TRUE` if the field value should be a real
#' number but not an integer.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$is_real_field()
#'
#' surf$is_real_field(c("name", "number of vertices"))
#' }
#'
is_real_field = function (which = NULL)
iddobj_is_real_field(self, private, which),
# }}}
# is_integer_field {{{
#' @description
#' Check if input field value should be an integer
#'
#' @details
#' `$is_real_field()` returns `TRUE` if the field value should be an
#' integer.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$is_integer_field()
#'
#' surf$is_integer_field(c("name", "number of vertices"))
#' }
#'
is_integer_field = function (which = NULL)
iddobj_is_integer_field(self, private, which),
# }}}
# is_required_field {{{
#' @description
#' Check if input field is required
#'
#' @details
#' `$is_required_field()` returns `TRUE` if the field is required.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$is_required_field()
#'
#' surf$is_required_field(c("name", "number of vertices"))
#' }
#'
is_required_field = function (which = NULL)
iddobj_is_required_field(self, private, which),
# }}}
# has_ref {{{
#' @description
#' Check if input field can refer to or can be referred by other fields
#'
#' @details
#' `$has_ref()` returns `TRUE` if input field refers to or can be referred
#' by other fields.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @param class A character vector of class names used for searching
#' relations. Default: `NULL`.
#'
#' @param group A character vector of group names used for searching
#' relations. Default: `NULL`.
#'
#' @param depth If > 0, the relation is searched recursively. A
#' simple example of recursive reference: one material named
#' `mat` is referred by a construction named `const`, and `const`
#' is also referred by a surface named `surf`. If `NULL`,
#' all possible recursive relations are returned. Default: `0`.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$has_ref()
#'
#' surf$has_ref(c("name", "zone name"))
#' }
#'
has_ref = function (which = NULL, class = NULL, group = NULL, depth = 0L)
iddobj_has_ref(self, private, which, class = class, group = group, depth = depth),
# }}}
# has_ref_to {{{
#' @description
#' Check if input field can refer to other fields
#'
#' @details
#' `$has_ref_to()` returns `TRUE` if input field can refer to other
#' fields.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @param class A character vector of class names used for searching
#' relations. Default: `NULL`.
#'
#' @param group A character vector of group names used for searching
#' relations. Default: `NULL`.
#'
#' @param depth If > 0, the relation is searched recursively. A
#' simple example of recursive reference: one material named
#' `mat` is referred by a construction named `const`, and `const`
#' is also referred by a surface named `surf`. If `NULL`,
#' all possible recursive relations are returned. Default: `0`.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$has_ref_to()
#'
#' surf$has_ref_to(c("name", "zone name"))
#' }
#'
has_ref_to = function (which = NULL, class = NULL, group = NULL, depth = 0L)
iddobj_has_ref_to(self, private, which, class = class, group = group, depth = depth),
# }}}
# has_ref_by {{{
#' @description
#' Check if input field can be referred by other fields
#'
#' @details
#' `$has_ref_by()` returns `TRUE` if input field can be referred by
#' other fields.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @param class A character vector of class names used for searching
#' relations. Default: `NULL`.
#'
#' @param group A character vector of group names used for searching
#' relations. Default: `NULL`.
#'
#' @param depth If > 0, the relation is searched recursively. A
#' simple example of recursive reference: one material named
#' `mat` is referred by a construction named `const`, and `const`
#' is also referred by a surface named `surf`. If `NULL`,
#' all possible recursive relations are returned. Default: `0`.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$has_ref_by()
#'
#' surf$has_ref_by(c("name", "zone name"))
#' }
#'
has_ref_by = function (which = NULL, class = NULL, group = NULL, depth = 0L)
iddobj_has_ref_by(self, private, which, class = class, group = group, depth = depth),
# }}}
# }}}
# DATA EXTRACTION {{{
# to_table {{{
#' @description
#' Format an `IddObject` as a data.frame
#'
#' @details
#' `$to_table()` returns a [data.table][data.table::data.table()] that
#' contains basic data of current class.
#' The returned [data.table][data.table::data.table()] has 3 columns:
#'
#' * `class`: Character type. Current class name.
#' * `index`: Integer type. Field indexes.
#' * `field`: Character type. Field names.
#'
#' @param all If `TRUE`, all available fields defined in IDD for
#' specified class will be returned. If `FALSE`, only the minimum
#' field number is returned. Default: `FALSE`.
#'
#' @return A [data.table][data.table::data.table()] with 3 columns.
#'
#' @examples
#' \dontrun{
#' surf$to_table()
#'
#' surf$to_table(TRUE)
#' }
#'
to_table = function (all = FALSE)
iddobj_to_table(self, private, all),
# }}}
# to_string {{{
#' @description
#' Format an `IdfObject` as a character vector
#'
#' @details
#' `$to_string()` returns the text format of current class. The returned
#' character vector can be pasted into an IDF file as an empty object of
#' specified class.
#'
#' @param comment A character vector to be used as comments of returned
#' string format object.
#' @param leading Leading spaces added to each field. Default: `4L`.
#' @param sep_at The character width to separate value string and field
#' string. Default: `29L` which is the same as IDF Editor.
#' @param all If `TRUE`, all available fields defined in IDD for
#' specified class will be returned. Default: `FALSE`.
#'
#' @return A character vector.
#'
#' @examples
#' \dontrun{
#' # get text format of class BuildingSurface:Detailed
#' surf$to_string()
#'
#' # tweak output formatting
#' surf$to_string(leading = 0, sep_at = 0)
#'
#' # add comments
#' surf$to_string(c("This", "will", "be", "comments"))
#' }
#'
to_string = function (comment = NULL, leading = 4L, sep_at = 29L, all = FALSE)
iddobj_to_string(self, private, comment, leading, sep_at = sep_at, all = all),
# }}}
# }}}
# print {{{
#' @description
#' Print `IddObject` object
#'
#' @details
#' `$print()` prints the `IddObject` object giving the information of
#' class name, class properties, field indices and field names.
#'
#' `$print()` prints the IddObject. Basically, the print output can be
#' divided into 4 parts:
#'
#' * CLASS: IDD class name of current object in format `<IddObject: CLASS>`.
#' * MEMO: brief description of the IDD class.
#' * PROPERTY: properties of the IDD class, including name of group it
#' belongs to, whether it is an unique or required class and current
#' total fields. The fields may increase if the IDD class is
#' extensible, such as `Branch`, `ZoneList` and etc.
#' * FIELDS: fields of current IDD class. Required fields are marked
#' with stars (`*`). If the class is extensible, only the first
#' extensible group will be printed and two ellipses will be shown at
#' the bottom. Fields in the extensible group will be marked with an
#' arrow down surrounded by angle brackets (`<v>`).
#'
#' @param brief If `TRUE`, only class name part is printed. Default:
#' `FALSE`.
#'
#' @return The `IddObject` object itself, invisibly.
#'
#' @examples
#' \dontrun{
#' surf
#'
#' surf$print(brief = TRUE)
#' }
#'
print = function (brief = FALSE)
iddobj_print(self, private, brief)
# }}}
),
private = list(
# PRIVATE FIELDS {{{
m_parent = NULL,
m_class_id = NULL,
# }}}
# PRIVATE FUNCTIONS {{{
idd_priv = function () {
._get_private(private$m_parent)
},
idd_env = function () {
.subset2(._get_private(private$m_parent), "m_idd_env")
}
# }}}
)
)
# }}}
# iddobj_version {{{
iddobj_version <- function (self, private) {
private$idd_priv()$m_version
}
# }}}
# iddobj_parent {{{
iddobj_parent <- function (self, private) {
private$m_parent
}
# }}}
# iddobj_group_index {{{
iddobj_group_index <- function (self, private) {
private$idd_env()$class[class_id == private$m_class_id, group_id]
}
# }}}
# iddobj_group_name {{{
iddobj_group_name <- function (self, private) {
grp_id <- iddobj_group_index(self, private)
private$idd_env()$group[J(grp_id), on = "group_id", group_name]
}
# }}}
# iddobj_class_index {{{
iddobj_class_index <- function (self, private) {
private$m_class_id
}
# }}}
# iddobj_class_name {{{
iddobj_class_name <- function (self, private) {
private$idd_env()$class[J(private$m_class_id), on = "class_id", class_name]
}
# }}}
# iddobj_class_data {{{
iddobj_class_data <- function (self, private) {
private$idd_env()$class[J(private$m_class_id), on = "class_id"]
}
# }}}
# iddobj_class_format {{{
iddobj_class_format <- function (self, private) {
iddobj_class_data(self, private)$format
}
# }}}
# iddobj_min_fields {{{
iddobj_min_fields <- function (self, private) {
iddobj_class_data(self, private)$min_fields
}
# }}}
# iddobj_num_fields {{{
iddobj_num_fields <- function (self, private) {
iddobj_class_data(self, private)$num_fields
}
# }}}
# iddobj_memo {{{
iddobj_memo <- function (self, private) {
iddobj_class_data(self, private)$memo[[1L]]
}
# }}}
# iddobj_num_extensible {{{
iddobj_num_extensible <- function (self, private) {
iddobj_class_data(self, private)$num_extensible
}
# }}}
# iddobj_first_extensible_index {{{
iddobj_first_extensible_index <- function (self, private) {
iddobj_class_data(self, private)$first_extensible
}
# }}}
# iddobj_extensible_group_num {{{
iddobj_extensible_group_num <- function (self, private) {
iddobj_class_data(self, private)$num_extensible_group
}
# }}}
# iddobj_add_extensible_group {{{
iddobj_add_extensible_group <- function (self, private, num) {
assert(is_count(num))
iddenv <- ._get_private(private$m_parent)$m_idd_env
iddenv <- add_idd_extensible_group(private$idd_env(), private$m_class_id, num, strict = TRUE)
verbose_info(num, " extensible group(s) added")
self
}
# }}}
# iddobj_del_extensible_group {{{
iddobj_del_extensible_group <- function (self, private, num) {
assert(is_count(num))
iddenv <- ._get_private(private$m_parent)$m_idd_env
iddenv <- del_idd_extensible_group(private$idd_env(), private$m_class_id, num, strict = TRUE)
verbose_info(num, " extensible group(s) deleted")
self
}
# }}}
# iddobj_has_name {{{
iddobj_has_name <- function (self, private) {
iddobj_class_data(self, private)$has_name
}
# }}}
# iddobj_is_required {{{
iddobj_is_required <- function (self, private) {
iddobj_class_data(self, private)$required_object
}
# }}}
# iddobj_is_unique {{{
iddobj_is_unique <- function (self, private) {
iddobj_class_data(self, private)$unique_object
}
# }}}
# iddobj_is_extensible {{{
iddobj_is_extensible <- function (self, private) {
iddobj_class_data(self, private)$num_extensible > 0L
}
# }}}
# iddobj_field_data {{{
iddobj_field_data <- function (self, private, which = NULL, property = NULL, underscore = FALSE) {
all <- if (is.null(which)) TRUE else FALSE
get_idd_field(private$idd_env(), private$m_class_id, which,
property, all = all, underscore = underscore, no_ext = TRUE
)
}
# }}}
# iddobj_field_name {{{
iddobj_field_name <- function (self, private, index = NULL, unit = FALSE, in_ip = eplusr_option("view_in_ip")) {
if (!is.null(index)) assert(are_count(index))
if (unit) {
if (eplusr_option("view_in_ip") != in_ip) {
eplusr_option(view_in_ip = in_ip)
on.exit(eplusr_option(view_in_ip = !in_ip), add = TRUE)
}
res <- format_name(iddobj_field_data(self, private, index, c("units", "ip_units")))
} else {
res <- iddobj_field_data(self, private, index)$field_name
}
res
}
# }}}
# iddobj_field_index {{{
iddobj_field_index <- function (self, private, name = NULL) {
if (!is.null(name)) assert(is.character(name))
iddobj_field_data(self, private, name, underscore = TRUE)$field_index
}
# }}}
# iddobj_field_type {{{
iddobj_field_type <- function (self, private, which = NULL) {
iddobj_field_data(self, private, which, "type", underscore = TRUE)$type
}
# }}}
# iddobj_field_note {{{
iddobj_field_note <- function (self, private, which = NULL) {
iddobj_field_data(self, private, which, "note", underscore = TRUE)$note
}
# }}}
# iddobj_field_unit {{{
iddobj_field_unit <- function (self, private, which = NULL, in_ip = eplusr_option("view_in_ip")) {
fld <- iddobj_field_data(self, private, which, c("units", "ip_units"), underscore = TRUE)
if (in_ip) {
fld$ip_units
} else {
fld$units
}
}
# }}}
# iddobj_field_default {{{
iddobj_field_default <- function (self, private, which = NULL, in_ip = eplusr_option("view_in_ip")) {
fld <- iddobj_field_data(self, private, which, underscore = TRUE,
c("default_chr", "default_num", "units", "ip_units", "type_enum")
)
if (in_ip) fld <- field_default_to_unit(fld, "si", "ip")
setnames(fld, c("default_chr", "default_num"), c("value_chr", "value_num"))
get_value_list(fld)
}
# }}}
# iddobj_field_choice {{{
iddobj_field_choice <- function (self, private, which = NULL) {
iddobj_field_data(self, private, which, "choice", underscore = TRUE)$choice
}
# }}}
# iddobj_field_range {{{
iddobj_field_range <- function (self, private, which = NULL) {
fld <- iddobj_field_data(self, private, which, c("type_enum", "has_range",
"minimum", "lower_incbounds", "maximum", "upper_incbounds"), underscore = TRUE)
# set limits to Inf for numeric values that do not have ranges
fld[type_enum < IDDFIELD_TYPE$choice & has_range == FALSE,
`:=`(maximum = Inf, minimum = -Inf)]
fld[, `:=`(range = list(ranger(minimum, lower_incbounds, maximum, upper_incbounds))), by = field_id]
fld$range
}
# }}}
# iddobj_field_relation {{{
iddobj_field_relation <- function (self, private, which = NULL, direction = c("all", "ref_to", "ref_by"),
class = NULL, group = NULL, depth = 0L, keep = FALSE) {
direction <- match.arg(direction)
if (is.null(which)) {
get_iddobj_relation(private$idd_env(), private$m_class_id, NULL, name = TRUE,
direction = direction, depth = depth, keep_all = keep,
class = class, group = group)
} else {
fld <- get_idd_field(private$idd_env(), private$m_class_id, which)
get_iddobj_relation(private$idd_env(), NULL, fld$field_id, name = TRUE,
direction = direction, depth = depth, keep_all = keep,
class = class, group = group)
}
}
# }}}
# iddobj_field_possible {{{
iddobj_field_possible <- function (self, private, which = NULL) {
fld <- iddobj_field_data(self, private, which, FIELD_COLS$property, underscore = TRUE)
get_iddobj_possible(private$idd_env(), field_id = fld$field_id)
}
# }}}
# iddobj_is_valid_field_num {{{
iddobj_is_valid_field_num <- function (self, private, num) {
assert(are_count(num))
cls <- iddobj_class_data(self, private)
!(
# it should be FALSE when num is
# 1. less than min-fields OR
cls$min_fields > num |
# 2. larger than num-fields but not extensible OR
(cls$num_extensible == 0L & num > cls$num_fields) |
# 3. larger than num-fields and is extensible but not have full
# extensible groups
(cls$num_extensible > 0L &
((num - cls$num_fields) %% cls$num_extensible) != 0L
)
)
}
# }}}
# iddobj_is_extensible_index {{{
iddobj_is_extensible_index <- function (self, private, index) {
assert(are_count(index))
cls <- iddobj_class_data(self, private)
if (!cls$num_extensible) return(rep(FALSE, length(index)))
index >= cls$first_extensible
}
# }}}
# iddobj_is_valid_field_name {{{
iddobj_is_valid_field_name <- function (self, private, name, strict = FALSE) {
fld <- iddobj_field_data(self, private, underscore = TRUE)
name <- as.character(name)
if (isTRUE(strict)) {
name %chin% fld$field_name
} else {
name %chin% fld$field_name | name %chin% lower_name(fld$field_name)
}
}
# }}}
# iddobj_is_valid_field_index {{{
iddobj_is_valid_field_index <- function (self, private, index) {
assert(are_count(index))
index <= iddobj_class_data(self, private)$num_fields
}
# }}}
# iddobj_is_autosizable_field {{{
iddobj_is_autosizable_field <- function (self, private, which) {
iddobj_field_data(self, private, which, "autosizable", underscore = TRUE)$autosizable
}
# }}}
# iddobj_is_autocalculatable_field {{{
iddobj_is_autocalculatable_field <- function (self, private, which) {
iddobj_field_data(self, private, which, "autocalculatable", underscore = TRUE)$autocalculatable
}
# }}}
# iddobj_is_numeric_field {{{
iddobj_is_numeric_field <- function (self, private, which) {
iddobj_field_type(self, private, which) %chin% c("integer", "real")
}
# }}}
# iddobj_is_integer_field {{{
iddobj_is_integer_field <- function (self, private, which) {
iddobj_field_type(self, private, which) == "integer"
}
# }}}
# iddobj_is_real_field {{{
iddobj_is_real_field <- function (self, private, which) {
iddobj_field_type(self, private, which) == "real"
}
# }}}
# iddobj_is_required_field {{{
iddobj_is_required_field <- function (self, private, which) {
iddobj_field_data(self, private, which, "required_field", underscore = TRUE)$required_field
}
# }}}
# iddobj_has_ref {{{
iddobj_has_ref <- function (self, private, which = NULL, class = NULL, group = NULL,
depth = 0L, type = c("all", "ref_to", "ref_by")) {
type <- match.arg(type)
if (is.null(which)) {
rel <- get_iddobj_relation(private$idd_env(), private$m_class_id,
class = class, group = group, depth = depth, direction = type)
} else {
fld <- get_idd_field(private$idd_env(), private$m_class_id, which)
rel <- get_iddobj_relation(private$idd_env(), NULL, fld$field_id,
class = class, group = group, depth = depth, direction = type)
}
if (type == "all") {
rel$ref_to[, list(.N > 0 && any(!is.na(src_field_id))), by = "field_id"]$V1 |
rel$ref_by[, list(.N > 0 && any(!is.na(field_id))), by = "src_field_id"]$V1
} else if (type == "ref_to") {
rel$ref_to[, list(.N > 0 && any(!is.na(src_field_id))), by = "field_id"]$V1
} else {
rel$ref_by[, list(.N > 0 && any(!is.na(field_id))), by = "src_field_id"]$V1
}
}
# }}}
# iddobj_has_ref_by {{{
iddobj_has_ref_by <- function (self, private, which = NULL, class = NULL, group = NULL, depth = 0L) {
iddobj_has_ref(self, private, which, class = class, group = group, depth = depth, type = "ref_by")
}
# }}}
# iddobj_has_ref_to {{{
iddobj_has_ref_to <- function (self, private, which = NULL, class = NULL, group = NULL, depth = 0L) {
iddobj_has_ref(self, private, which, class = class, group = group, depth = depth, type = "ref_to")
}
# }}}
# iddobj_to_table {{{
iddobj_to_table <- function (self, private, all = FALSE) {
get_iddobj_table(private$idd_env(), private$m_class_id, all)
}
# }}}
# iddobj_to_string {{{
iddobj_to_string <- function (self, private, comment = NULL, leading = 4L, sep_at = 29L, all = FALSE) {
get_iddobj_string(private$idd_env(), private$m_class_id, comment = comment,
leading = leading, sep_at = sep_at, all = all
)
}
# }}}
# iddobj_print {{{
iddobj_print <- function (self, private, brief = FALSE) {
# CLASS {{{
cls <- iddobj_class_data(self, private)
cli::cat_line(paste0("<IddObject: ", surround(cls$class_name), ">"))
if (brief) return(invisible(self))
# memo {{{
cli::cat_rule("MEMO")
if (is.null(cls$memo[[1L]])) {
cli::cat_line(" <No Memo>\n")
} else {
cli::cat_line(" \"", paste0(cls$memo[[1L]], collapse = "\n"), "\"\n")
}
# }}}
# property {{{
cli::cat_rule("PROPERTIES")
grp <- private$idd_env()$group[J(cls$group_id), on = "group_id", group_name]
cli::cat_line(" * ", c(
paste0("Group: ", surround(grp)),
paste0("Unique: ", cls$unique_object),
paste0("Required: ", cls$required_object),
paste0("Total fields: ", cls$num_fields)
))
cli::cat_line()
# }}}
# }}}
# FIELD {{{
cli::cat_rule("FIELDS")
# calculate number of fields to print
if (cls$num_extensible) {
# only print the first extensible group
set(cls, NULL, "num_print",
pmax(cls$last_required, cls$first_extensible + cls$num_extensible - 1L)
)
} else {
set(cls, NULL, "num_print", cls$num_fields)
}
fld <- iddobj_field_data(self, private, seq_len(cls$num_print), c("extensible_group", "required_field"))
set(fld, NULL, "name", format_name(fld, prefix = FALSE))
set(fld, NULL, "index", format_index(fld, required = TRUE, pad_char = "0"))
set(fld, NULL, "ext", "")
fld[extensible_group > 0L, ext := paste0(" <", cli::symbol$arrow_down, ">")]
cli::cat_line(" ", fld$index, ": ", fld$name, fld$ext)
if (cls$num_extensible) cli::cat_line(" ......")
# }}}
}
# }}}
#' Format an IddObject
#'
#' Format an [IddObject] into a string of an empty object of current class.
#' It is formatted exactly the same as in IDF Editor.
#'
#' @param x An [IddObject] object.
#' @param all If `TRUE`, all fields in current class are returned, otherwise
#' only minimum fields are returned.
#' @param comment A character vector to be used as comments of returned string
#' format object. If `NULL`, no comments are inserted. Default: `NULL`.
#' @param leading Leading spaces added to each field. Default: `4`.
#' @param sep_at The character width to separate value string and field string.
#' Default: `29` which is the same as IDF Editor.
#' @param ... Further arguments passed to or from other methods.
#' @return A single length character vector.
#' @examples
#' \dontrun{
#' cat(format(use_idd(8.8, download = "auto")$Materal, leading = 0))
#' }
#'
#' @export
# format.IddObject {{{
format.IddObject <- function (x, comment = NULL, leading = 4L, sep_at = 29L, all = FALSE, ...) {
paste0(x$to_string(comment = comment, leading = leading, sep_at = sep_at, all = all),
collapse = "\n"
)
}
# }}}
#' Coerce an IddObject into a Character Vector
#'
#' Coerce an [IddObject] into an empty object of current class in a character
#' vector format. It is formatted exactly the same as in IDF Editor.
#'
#' @inheritParams format.IddObject
#' @return A character vector.
#' @examples
#' \dontrun{
#' as.character(use_idd(8.8, download = "auto")$Materal, leading = 0)
#' }
#'
#' @export
# as.character.IddObject {{{
as.character.IddObject <- function (x, comment = NULL, leading = 4L, sep_at = 29L, all = FALSE, ...) {
x$to_string(comment = comment, leading = leading, sep_at = sep_at, all = all)
}
# }}}
#' @export
# str.IddObject {{{
str.IddObject <- function (object, brief = FALSE, ...) {
object$print(brief)
}
# }}}
#' @export
# ==.IddObject {{{
`==.IddObject` <- function (e1, e2) {
if (!is_iddobject(e2)) return(FALSE)
identical(
._get_private(._get_private(e1)$m_parent)$m_log$uuid,
._get_private(._get_private(e2)$m_parent)$m_log$uuid
) &&
identical(._get_private(e1)$m_class_id, ._get_private(e2)$m_class_id)
}
#' @export
`!=.IddObject` <- function (e1, e2) {
Negate(`==.IddObject`)(e1, e2)
}
# }}}
| /R/idd_object.R | permissive | lukas-rokka/eplusr | R | false | false | 65,732 | r | #' @importFrom R6 R6Class
NULL
#' EnergyPlus IDD object
#'
#' `IddObject` is an abstraction of a single object in an [Idd] object. It
#' provides more detail methods to query field properties. `IddObject` can only
#' be created from the parent [Idd] object, using `$object()`,
#' `$object_in_group()` and other equivalent. This is because that
#' initialization of an `IddObject` needs some shared data from parent [Idd]
#' object.
#'
#' There are lots of properties for every class and field. For details on the
#' meaning of each property, please see the heading comments in the
#' `Energy+.idd` file in the EnergyPlus installation path.
#'
#' @docType class
#' @name IddObject
#' @seealso [Idd] Class
#' @author Hongyuan Jia
NULL
#' Create an `IddObject` object.
#'
#' `idd_object()` takes a parent `Idd` object, a class name, and returns a
#' corresponding [IddObject]. For details, see [IddObject].
#'
#' @param parent An [Idd] object or a valid input for [use_idd()].
#' @param class A valid class name (a string).
#' @return An [IddObject] object.
#' @export
#' @examples
#' \dontrun{
#' idd <- use_idd(8.8, download = "auto")
#'
#' # get an IddObject using class name
#' idd_object(idd, "Material")
#' idd_object(8.8, "Material")
#' }
#'
# idd_object {{{
idd_object <- function (parent, class) {
IddObject$new(class, parent)
}
# }}}
#' @export
# IddObject {{{
IddObject <- R6::R6Class(classname = "IddObject", cloneable = FALSE,
public = list(
# INITIALIZE {{{
#' @description
#' Create an `IddObject` object
#'
#' @details
#' Note that an `IddObject` can be created from the parent [Idd] object,
#' using `$object()`, [idd_object] and other equivalent.
#'
#' @param class A single integer specifying the class index or a single
#' string specifying the class name.
#' @param parent An [Idd] object or a valid input for [use_idd()].
#'
#' @return An `IddObject` object.
#'
#' @examples
#' \dontrun{
#' surf <- IddObject$new("BuildingSurface:Detailed", use_idd(8.8, download = "auto"))
#' }
#'
initialize = function (class, parent) {
if (missing(parent)) {
abort("error_iddobject_missing_parent",
paste("IddObject can only be created based on a parent Idd object.",
"Please give `parent`, which should be either an IDD version or an `Idd` object."
)
)
} else {
private$m_parent <- use_idd(parent)
}
assert(!is.null(class))
private$m_class_id <- get_idd_class(private$idd_env(), class, underscore = TRUE)$class_id
},
# }}}
# META {{{
# version {{{
#' @description
#' Get the version of parent `Idd`
#'
#' @details
#' `$version()` returns the version of parent `Idd` in a
#' [base::numeric_version()] format. This makes it easy to direction
#' compare versions of different `IddObject`s, e.g. `iddobj$version() > 8.6` or
#' `iddobj1$version() > iddobj2$version()`.
#'
#' @return A [base::numeric_version()] object.
#'
#' @examples
#' \dontrun{
#' # get version
#' surf$version()
#' }
#'
version = function ()
iddobj_version(self, private),
# }}}
# parent {{{
#' @description
#' Get parent [Idd]
#'
#' @details
#' `$parent()` returns parent [Idd] object.
#'
#' @return A [Idd] object.
#'
#' @examples
#' \dontrun{
#' surf$parent()
#' }
#'
parent = function ()
iddobj_parent(self, private),
# }}}
# }}}
# CLASS PROPERTY GETTERS {{{
# group_name {{{
#' @description
#' Get the group name
#'
#' @details
#' `$group_name()` returns the group name of current `IddObject`.
#'
#' @return A single string.
#'
#' @examples
#' \dontrun{
#' surf$group_name()
#' }
#'
group_name = function ()
iddobj_group_name(self, private),
# }}}
# group_index {{{
#' @description
#' Get the group index
#'
#' @details
#' `$group_index()` returns the group index of current `IddObject`. A
#' group index is just an integer indicating its appearance order in the
#' [Idd].
#'
#' @return A single integer.
#'
#' @examples
#' \dontrun{
#' surf$group_index()
#' }
#'
group_index = function ()
iddobj_group_index(self, private),
# }}}
# class_name {{{
#' @description
#' Get the class name of current `IddObject`
#'
#' @details
#' `$class_name()` returns the class name of current `IddObject`.
#'
#' @return A single string.
#'
#' @examples
#' \dontrun{
#' surf$class_name()
#' }
#'
class_name = function ()
iddobj_class_name(self, private),
# }}}
# class_index {{{
#' @description
#' Get the class index
#'
#' @details
#' `$class_index()` returns the class index of current `IddObject`. A
#' class index is just an integer indicating its appearance order in the
#' [Idd].
#'
#' @return A single integer.
#'
#' @examples
#' \dontrun{
#' surf$class_index()
#' }
#'
class_index = function ()
iddobj_class_index(self, private),
# }}}
# class_foramt {{{
#' @description
#' Get the class format
#'
#' @details
#' `$class_format()` returns the format of this IDD class. This format
#' indicator is currently not used by eplusr.
#'
#' @note
#' Some classes have special format when saved in the IDFEditor with the
#' special format option enabled. Those special format includes
#' "singleLine", "vertices", "compactSchedule", "fluidProperties",
#' "viewFactors" and "spectral". eplusr can handle all those format when
#' parsing IDF files. However, when saved, all classes are formatted in
#' standard way.
#'
#' @return A single character.
#'
#' @examples
#' \dontrun{
#' surf$class_format()
#' }
#'
class_format = function ()
iddobj_class_format(self, private),
# }}}
# min_fields {{{
#' @description
#' Get the minimum field number of current class
#'
#' @details
#' `$min_fields()` returns the minimum fields required for current class.
#' If no required, `0` is returned.
#'
#' @return A single integer.
#'
#' @examples
#' \dontrun{
#' surf$min_fields()
#' }
#'
min_fields = function ()
iddobj_min_fields(self, private),
# }}}
# num_fields {{{
#' @description
#' Get the total field number of current class
#'
#' @details
#' `$num_fields()` returns current total number of fields in current
#' class.
#'
#' @note
#' This number may change if the class is extensible and after
#' `$add_extensible_group()` or `$del_extensible_group()`.
#'
#' @return A single integer.
#'
#' @examples
#' \dontrun{
#' surf$num_fields()
#' }
#'
num_fields = function ()
iddobj_num_fields(self, private),
# }}}
# memo {{{
#' @description
#' Get the memo string of current class
#'
#' @details
#' `$memo()` returns memo of current class, usually a brief description
#' of this class.
#'
#' @return A character vector.
#'
#' @examples
#' \dontrun{
#' surf$memo()
#' }
#'
memo = function ()
iddobj_memo(self, private),
# }}}
# num_extensible{{{
#' @description
#' Get the field number of the extensible group in current class
#'
#' @details
#' `$num_extensible()` returns the field number of the extensible group
#' in current class.
#'
#' An extensible group is a set of fields that should be treated as a
#' whole, such like the X, Y and Z vertices of a building surfaces. An
#' extensible group should be added or deleted together.
#'
#' If there is no extensible group in current class, `0` is returned.
#'
#' @return A single integer.
#'
#' @examples
#' \dontrun{
#' surf$num_extensible()
#' }
#'
num_extensible = function ()
iddobj_num_extensible(self, private),
# }}}
# first_extensible_index {{{
#' @description
#' Get the minimum field number of current class
#'
#' @details
#' `$first_extensible_index()` returns the field index of first
#' extensible field in current class.
#'
#' An extensible group is a set of fields that should be treated as a
#' whole, such like the X, Y and Z vertices of a building surfaces. An
#' extensible group should be added or deleted together.
#'
#' If there is no extensible group in current class, `0` is returned.
#'
#' @return A single integer.
#'
#' @examples
#' \dontrun{
#' surf$first_extensible_index()
#' }
#'
first_extensible_index = function ()
iddobj_first_extensible_index(self, private),
# }}}
# extensible_group_num{{{
#' @description
#' Get the number of extensible groups in current class
#'
#' @details
#' `$extensible_group_num()` returns the number of extensible groups in
#' current class.
#'
#' An extensible group is a set of fields that should be treated as a
#' whole, such like the X, Y and Z vertices of a building surfaces. An
#' extensible group should be added or deleted together.
#'
#' If there is no extensible group in current class, `0` is returned.
#'
#' @return A single integer.
#'
#' @examples
#' \dontrun{
#' surf$extensible_group_num()
#' }
#'
extensible_group_num = function ()
iddobj_extensible_group_num(self, private),
# }}}
# }}}
# EXTENSIBLE GROUP {{{
# add_extensible_group {{{
#' @description
#' Add extensible groups in current class
#'
#' @details
#' `$add_extensible_groups()` adds extensible groups in this class.
#'
#' An extensible group is a set of fields that should be treated as a
#' whole, such like the X, Y and Z vertices of a building surfaces. An
#' extensible group should be added or deleted together.
#'
#' An error will be issued if current class contains no extensible
#' group.
#'
#' @param num An integer indicating the number of extensible groups to
#' be added.
#'
#' @return The modified `IddObject` itself.
#'
#' @examples
#' \dontrun{
#' # field number before adding
#' surf$num_fields()
#' # extensible group number before adding
#' surf$extensible_group_num()
#'
#' # add 2 more extensible groups
#' surf$add_extensible_group(2)
#'
#' # field number after adding
#' surf$num_fields()
#' # extensible group number after adding
#' surf$extensible_group_num()
#' }
#'
add_extensible_group = function (num = 1L)
iddobj_add_extensible_group(self, private, num),
# }}}
# del_extensible_group {{{
#' @description
#' Delete extensible groups in current class
#'
#' @details
#' `$del_extensible_groups()` deletes extensible groups in this class.
#'
#' An extensible group is a set of fields that should be treated as a
#' whole, such like the X, Y and Z vertices of a building surfaces. An
#' extensible group should be added or deleted together.
#'
#' An error will be issued if current class contains no extensible
#' group.
#'
#' @param num An integer indicating the number of extensible groups to
#' be deleted.
#'
#' @return The modified `IddObject` itself.
#'
#' @examples
#' \dontrun{
#' # field number before deleting
#' surf$num_fields()
#' # extensible group number before deleting
#' surf$extensible_group_num()
#'
#' # delete 2 more extensible groups
#' surf$del_extensible_group(2)
#'
#' # field number after deleting
#' surf$num_fields()
#' # extensible group number after deleting
#' surf$extensible_group_num()
#' }
#'
del_extensible_group = function (num = 1L)
iddobj_del_extensible_group(self, private, num),
# }}}
# }}}
# CLASS PROPERTY ASSERTIONS {{{
# has_name {{{
#' @description
#' Check if current class has name attribute
#'
#' @details
#' `$has_name()` return `TRUE` if current class has name attribute, and
#' `FALSE` otherwise.
#'
#' A class with name attribute means that objects in this class can have
#' names.
#'
#' @return A single logical value (`TRUE` or `FALSE`).
#'
#' @examples
#' \dontrun{
#' surf$has_name()
#' }
#'
has_name = function ()
iddobj_has_name(self, private),
# }}}
# is_required {{{
#' @description
#' Check if current class is required
#'
#' @details
#' `$is_required()` returns `TRUE` if current class is required and
#' `FALSE` otherwise.
#'
#' A required class means that for any model, there should be at least
#' one object in this class. One example is `Building` class.
#'
#' @return A single logical value (`TRUE` or `FALSE`).
#'
#' @examples
#' \dontrun{
#' surf$is_required()
#' }
#'
is_required = function ()
iddobj_is_required(self, private),
# }}}
# is_unique {{{
#' @description
#' Check if current class is unique
#'
#' @details
#' `$is_unique()` returns `TRUE` if current class is unique and
#' `FALSE` otherwise.
#'
#' A unique class means that for any model, there should be at most
#' one object in this class. One example is `Building` class.
#'
#' @return A single logical value (`TRUE` or `FALSE`).
#'
#' @examples
#' \dontrun{
#' surf$is_unique()
#' }
#'
is_unique = function ()
iddobj_is_unique(self, private),
# }}}
# is_extensible {{{
#' @description
#' Check if current class is extensible
#'
#' @details
#' `$is_extensible()` returns `TRUE` if current class is extensible and
#' `FALSE` otherwise.
#'
#' A extensible class means that for there are curtain number of fields
#' in this class that can be dynamically added or deleted, such like the
#' X, Y and Z vertices of a building surface.
#'
#' @return A single logical value (`TRUE` or `FALSE`).
#'
#' @examples
#' \dontrun{
#' surf$is_extensible()
#' }
#'
is_extensible = function ()
iddobj_is_extensible(self, private),
# }}}
# }}}
# FIELD PROPERTY GETTERS {{{
# field_name {{{
#' @description
#' Get field names
#'
#' @details
#' `$field_name()` returns a character vector of names of fields
#' specified by field indices in current class.
#'
#' @param index An integer vector of field indices. If `NULL`, names of
#' all fields in this class are returned. Default: `NULL`.
#'
#' @param unit If `TRUE`, the units of those fields are also returned.
#' Default: `FALSE`.
#'
#' @param in_ip If `in_ip`, corresponding imperial units are returned.
#' It only has effect when `unit` is `TRUE`. Default:
#' `eplusr_option("view_in_ip")`.
#'
#' @return A character vector.
#'
#' @examples
#' \dontrun{
#' # get all field names
#' surf$field_name()
#'
#' # get field units also
#' surf$field_name(unit = TRUE)
#'
#' # get field units in IP
#' surf$field_name(unit = TRUE)
#'
#' # change field name to lower-style
#' surf$field_name(unit = TRUE, in_ip = TRUE)
#' }
#'
field_name = function (index = NULL, unit = FALSE, in_ip = eplusr_option("view_in_ip"))
iddobj_field_name(self, private, index, unit, in_ip),
# }}}
# field_index {{{
#' @description
#' Get field indices
#'
#' @details
#' `$field_index()` returns an integer vector of names of fields
#' specified by field names in current class.
#'
#' @param name A character vector of field names. Can be in
#' "lower-style", i.e. all spaces and dashes is replaced by
#' underscores. If `NULL`, indices of all fields in this class
#' are returned. Default: `NULL`.
#'
#' @return An integer vector.
#'
#' @examples
#' \dontrun{
#' # get all field indices
#' surf$field_index()
#'
#' # get field indices for specific fields
#' surf$field_index(c("number of vertices", "vertex 10 z-coordinate"))
#' }
#'
field_index = function (name = NULL)
iddobj_field_index(self, private, name),
# }}}
# field_type {{{
#' @description
#' Get field types
#'
#' @details
#' `$field_type()` returns a character vector of field types of
#' specified fields in current class. All possible values are:
#'
#' * `"integer"`
#' * `"real"`
#' * `"alpha"` (arbitrary string)
#' * `"choice"` (alpha with specific list of choices)
#' * `"object-list"` (link to a list of objects defined elsewhere)
#' * `"external-list"` (uses a special list from an external source)
#' * `"node"` (name used in connecting HVAC components).
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @return A character vector.
#'
#' @examples
#' \dontrun{
#' # get all field types
#' surf$field_type()
#'
#' # get field types for specific fields
#' surf$field_type(c("name", "zone name", "vertex 10 z-coordinate"))
#' }
#'
field_type = function (which = NULL)
iddobj_field_type(self, private, which = which),
# }}}
# field_note {{{
#' @description
#' Get field notes
#'
#' @details
#' `$field_note()` returns a list of character vectors that contains
#' field notes of specified fields in current class, usually serving as
#' field descriptions. If no notes are found for current fields, `NULL`
#' is returned.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @return A list of character vectors.
#'
#' @examples
#' \dontrun{
#' # get all field notes
#' surf$field_note()
#'
#' # get field types for specific fields
#' surf$field_note(c("name", "zone name", "vertex 10 z-coordinate"))
#' }
#'
field_note = function (which = NULL)
iddobj_field_note(self, private, which),
# }}}
# field_unit {{{
#' @description
#' Get field units
#'
#' @details
#' `$field_unit()` returns a character vector that contains units of
#' specified fields in current class. If there is no unit found for
#' current field, `NA` is returned.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @param in_ip If `in_ip`, corresponding imperial units are returned.
#' Default: `eplusr_option("view_in_ip")`.
#'
#' @return A character vector.
#'
#' @examples
#' \dontrun{
#' # get all field units
#' surf$field_unit()
#'
#' # get field units for specific fields
#' surf$field_unit(c("name", "zone name", "vertex 10 z-coordinate"))
#' }
#'
field_unit = function (which = NULL, in_ip = eplusr_option("view_in_ip"))
iddobj_field_unit(self, private, which, in_ip),
# }}}
# field_default {{{
#' @description
#' Get field default value
#'
#' @details
#' `$field_default()` returns a list that contains default values of
#' specified fields in current class. If there is no default value found
#' for current field, `NA` is returned.
#'
#' @note
#' The type of each default value will be consistent with field
#' definition. However, for numeric fields with default values being
#' `"autosize"` or `"autocalculate"`, the type of returned values will
#' be character.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @param in_ip If `in_ip`, values in corresponding imperial units are
#' returned. Default: `eplusr_option("view_in_ip")`.
#'
#' @return A character vector.
#'
#' @examples
#' \dontrun{
#' # get all field default values
#' surf$field_default()
#'
#' # get default values for specific fields
#' surf$field_default(c("name", "zone name", "vertex 10 z-coordinate"))
#' }
#'
field_default = function (which = NULL, in_ip = eplusr_option("view_in_ip"))
iddobj_field_default(self, private, which, in_ip),
# }}}
# field_choice {{{
#' @description
#' Get choices of field values
#'
#' @details
#' `$field_value()` returns a list of character vectors that contains
#' choices of specified field values in current class. If there is no
#' choice found for current field, `NULL` is returned.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @return A list of character vectors.
#'
#' @examples
#' \dontrun{
#' # get all field value choices
#' surf$field_choice()
#'
#' # get field value choices for specific fields
#' surf$field_choice(c("name", "sun exposure", "wind exposure"))
#' }
#'
field_choice = function (which = NULL)
iddobj_field_choice(self, private, which),
# }}}
# field_range {{{
#' @description
#' Get field value ranges
#'
#' @details
#' `$field_range()` returns a list of value ranges of specified fields
#' in current class.
#'
#' Every range has four components:
#'
#' * `minimum`: lower limit
#' * `lower_incbounds`: `TRUE` if the lower limit should be included
#' * `maximum`: upper limit
#' * `upper_incbounds`: `TRUE` if the upper limit should be included
#'
#' For fields of character type,
#'
#' * `minimum` and `maximum` are always set to `NA`
#' * `lower_incbounds` and `upper_incbounds` are always set to `FALSE`
#'
#' For fields of numeric types with no specified ranges,
#'
#' * `minimum` is set to `-Inf`
#' * `lower_incbounds` is set to `FALSE`
#' * `upper` is set to `Inf`
#' * `upper_incbounds` is set to `FALSE`
#'
#' The field range is printed in number interval denotation.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @return A list of ranges.
#'
#' @examples
#' \dontrun{
#' # get all field value ranges
#' surf$field_range()
#'
#' # get value ranges for specific fields
#' surf$field_range(c("name", "number of vertices", "vertex 10 z-coordinate"))
#' }
#'
field_range = function (which = NULL)
iddobj_field_range(self, private, which),
# }}}
# field_relation {{{
#' @description
#' Extract the relationship among fields
#'
#' @details
#' Many fields in [Idd] can be referred by others. For example, the
#' `Outside Layer` and other fields in `Construction` class refer to the
#' `Name` field in `Material` class and other material related classes.
#' Here it means that the `Outside Layer` field **refers to** the `Name`
#' field and the `Name` field is **referred by** the `Outside Layer`.
#'
#' `$field_relation()` provides a simple interface to get this kind of
#' relation. It takes a field specification and a relation
#' direction, and returns an `IddRelation` object which contains data
#' presenting such relation above.
#'
#' `$field_relation()` returns a list of references for those fields
#' that have the `object-list` and/or `reference` and
#' `reference-class-name` attribute. Basically, it is a list of two
#' elements `ref_to` and `ref_by`. Underneath, `ref_to` and `ref_by`
#' are [data.table][data.table::data.table()]s which contain source
#' field data and reference field data with custom printing method. For
#' instance, if `iddobj$field_relation(c(1, 2), "ref_to")` gives results
#' below:
#'
#' ```
#' -- Refer to Others ---------------------
#' +- Field: <1: Field 1>
#' | v~~~~~~~~~~~~~~~~~~
#' | \- Class: <Class 2>
#' | \- Field: <2: Field 2>
#' |
#' \- Field: <2: Field 2>
#' ```
#'
#' This means that `Field 2` in current class does not refer to any other fields.
#' But `Field 1` in current class refers to `Field 2` in class named `Class 2`.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @param direction The relation direction to extract. Should be one of
#' `"all"`, `"ref_to"` or `"ref_by"`.
#'
#' @param class A character vector of class names used for searching
#' relations. Default: `NULL`.
#'
#' @param group A character vector of group names used for searching
#' relations. Default: `NULL`.
#'
#' @param depth If > 0, the relation is searched recursively. A
#' simple example of recursive reference: one material named
#' `mat` is referred by a construction named `const`, and `const`
#' is also referred by a surface named `surf`. If `NULL`,
#' all possible recursive relations are returned. Default: `0`.
#'
#' @param keep If `TRUE`, all input fields are returned regardless they
#' have any relations with other objects or not. If `FALSE`, only
#' fields in input that have relations with other objects are
#' returned. Default: `FALSE`.
#'
#' @return An `IddRelation` object.
#'
#' @examples
#' \dontrun{
#' # get field relation for specific fields
#' surf$field_relation(c("name", "zone name", "vertex 10 z-coordinate"))
#' }
#'
field_relation = function (which = NULL, direction = c("all", "ref_by", "ref_to"), class = NULL, group = NULL, depth = 0L, keep = FALSE)
iddobj_field_relation(self, private, which, match.arg(direction), class = class, group = group, depth = depth, keep = keep),
# }}}
# field_possible {{{
#' @description
#' Get field possible values
#'
#' @details
#' `$field_possible()` returns all possible values for specified fields,
#' including auto-value (`Autosize`, `Autocalculate`, and `NA` if not
#' applicable), and results from `$field_default()`, `$field_range()`,
#' `$field_choice()`. Underneath, it returns a data.table with custom
#' printing method. For instance, if `iddobj$field_possible(c(4, 2))`
#' gives results below:
#'
#' ```
#' -- 4: Field 4 ----------
#' * Auto value: <NA>
#' * Default: <NA>
#' * Choice:
#' - "Key1"
#' - "Key2"
#'
#' -- 2: Field 2 ----------
#' * Auto value: "Autosize"
#' * Default: 2
#' * Choice: <NA>
#' ```
#'
#' This means that `Field 4` in current class cannot be "autosized" or
#' "autocalculated", and it does not have any default value. Its value should be
#' a choice from `"Key1"` or `"Key2"`. For `Field 2` in current class, it has a
#' default value of `2` but can also be filled with value `"Autosize"`.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @return A `IddFieldPossible` object which is a
#' [data.table::data.table()] with 9 columns.
#'
#' @examples
#' \dontrun{
#' # get field possible values for specific fields
#' surf$field_possible(6:10)
#' }
#'
field_possible = function (which = NULL)
iddobj_field_possible(self, private, which),
# }}}
# }}}
# FIELD PROPERTY ASSERTIONS {{{
# is_valid_field_num {{{
#' @description
#' Check if input is a valid field number
#'
#' @details
#' `$is_valid_field_num()` returns `TRUE` if input `num` is acceptable
#' as a total number of fields in this class. Extensible property is
#' considered.
#'
#' For instance, the total number of fields defined in IDD for class
#' `BuildingSurfaces:Detailed` is 390. However, 396 is still a valid
#' field number for this class as the number of field in the extensible
#' group is 3.
#'
#' @param num An integer vector to test.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$is_valid_field_num(c(10, 14, 100))
#' }
#'
is_valid_field_num = function (num)
iddobj_is_valid_field_num(self, private, num),
# }}}
# is_extensible_index {{{
#' @description
#' Check if input field index indicates an extensible field
#'
#' @details
#' `$is_extensible_index()` returns `TRUE` if input `index` indicates an
#' index of extensible field in current class.
#'
#' Extensible fields mean that these fields can be dynamically added or
#' deleted, such like the X, Y and Z vertices of a building surface.
#'
#' @param index An integer vector of field indices.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$is_extensible_index(c(10, 14, 100))
#' }
#'
is_extensible_index = function (index)
iddobj_is_extensible_index(self, private, index),
# }}}
# is_valid_field_name {{{
#' @description
#' Check if input character is a valid field name
#'
#' @details
#' `$is_valid_field_name()` returns `TRUE` if `name` is a valid field
#' name **WITHOUT** unit. Note `name` can be given in underscore style,
#' e.g. `"outside_layer"` is equivalent to `"Outside Layer"`.
#'
#' @param name A character vector to test.
#'
#' @param strict If `TRUE`, only exact match is accepted. Default:
#' `FALSE`.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$is_valid_field_name(c("name", "sun_exposure"))
#'
#' # exact match
#' surf$is_valid_field_name(c("Name", "Sun_Exposure"), strict = TRUE)
#' }
#'
is_valid_field_name = function (name, strict = FALSE)
iddobj_is_valid_field_name(self, private, name, strict),
# }}}
# is_valid_field_index {{{
#' @description
#' Check if input integer is a valid field index
#'
#' @details
#' `$is_valid_field_index()` returns `TRUE` if `index` is a valid field
#' index. For extensible class, `TRUE` is always returned.
#'
#' @param index An integer vector to test.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$is_valid_field_index(1:10)
#' }
#'
is_valid_field_index = function (index)
iddobj_is_valid_field_index(self, private, index),
# }}}
# is_autosizable_field {{{
#' @description
#' Check if input field can be autosized
#'
#' @details
#' `$is_autosizable_field()` returns `TRUE` if input field can be
#' assigned to `autosize`.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$is_autosizable_field()
#'
#' surf$is_autosizable_field(c("name", "sun_exposure"))
#' }
#'
is_autosizable_field = function (which = NULL)
iddobj_is_autosizable_field(self, private, which),
# }}}
# is_autocalculatable_field {{{
#' @description
#' Check if input field can be autocalculated
#'
#' @details
#' `$is_autocalculatable_field()` returns `TRUE` if input field can be
#' assigned to `autocalculate`.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$is_autocalculatable_field()
#'
#' surf$is_autocalculatable_field(c("name", "sun_exposure"))
#' }
#'
is_autocalculatable_field = function (which = NULL)
iddobj_is_autocalculatable_field(self, private, which),
# }}}
# is_numeric_field {{{
#' @description
#' Check if input field value should be numeric
#'
#' @details
#' `$is_numeric_field()` returns `TRUE` if the value of input field
#' should be numeric ( an integer or a real number).
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$is_numeric_field()
#'
#' surf$is_numeric_field(c("name", "sun_exposure"))
#' }
#'
is_numeric_field = function (which = NULL)
iddobj_is_numeric_field(self, private, which),
# }}}
# is_real_field {{{
#' @description
#' Check if input field value should be a real number
#'
#' @details
#' `$is_real_field()` returns `TRUE` if the field value should be a real
#' number but not an integer.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$is_real_field()
#'
#' surf$is_real_field(c("name", "number of vertices"))
#' }
#'
is_real_field = function (which = NULL)
iddobj_is_real_field(self, private, which),
# }}}
# is_integer_field {{{
#' @description
#' Check if input field value should be an integer
#'
#' @details
#' `$is_real_field()` returns `TRUE` if the field value should be an
#' integer.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$is_integer_field()
#'
#' surf$is_integer_field(c("name", "number of vertices"))
#' }
#'
is_integer_field = function (which = NULL)
iddobj_is_integer_field(self, private, which),
# }}}
# is_required_field {{{
#' @description
#' Check if input field is required
#'
#' @details
#' `$is_required_field()` returns `TRUE` if the field is required.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$is_required_field()
#'
#' surf$is_required_field(c("name", "number of vertices"))
#' }
#'
is_required_field = function (which = NULL)
iddobj_is_required_field(self, private, which),
# }}}
# has_ref {{{
#' @description
#' Check if input field can refer to or can be referred by other fields
#'
#' @details
#' `$has_ref()` returns `TRUE` if input field refers to or can be referred
#' by other fields.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @param class A character vector of class names used for searching
#' relations. Default: `NULL`.
#'
#' @param group A character vector of group names used for searching
#' relations. Default: `NULL`.
#'
#' @param depth If > 0, the relation is searched recursively. A
#' simple example of recursive reference: one material named
#' `mat` is referred by a construction named `const`, and `const`
#' is also referred by a surface named `surf`. If `NULL`,
#' all possible recursive relations are returned. Default: `0`.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$has_ref()
#'
#' surf$has_ref(c("name", "zone name"))
#' }
#'
has_ref = function (which = NULL, class = NULL, group = NULL, depth = 0L)
iddobj_has_ref(self, private, which, class = class, group = group, depth = depth),
# }}}
# has_ref_to {{{
#' @description
#' Check if input field can refer to other fields
#'
#' @details
#' `$has_ref_to()` returns `TRUE` if input field can refer to other
#' fields.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @param class A character vector of class names used for searching
#' relations. Default: `NULL`.
#'
#' @param group A character vector of group names used for searching
#' relations. Default: `NULL`.
#'
#' @param depth If > 0, the relation is searched recursively. A
#' simple example of recursive reference: one material named
#' `mat` is referred by a construction named `const`, and `const`
#' is also referred by a surface named `surf`. If `NULL`,
#' all possible recursive relations are returned. Default: `0`.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$has_ref_to()
#'
#' surf$has_ref_to(c("name", "zone name"))
#' }
#'
has_ref_to = function (which = NULL, class = NULL, group = NULL, depth = 0L)
iddobj_has_ref_to(self, private, which, class = class, group = group, depth = depth),
# }}}
# has_ref_by {{{
#' @description
#' Check if input field can be referred by other fields
#'
#' @details
#' `$has_ref_by()` returns `TRUE` if input field can be referred by
#' other fields.
#'
#' @param which An integer vector of field indices or a character vector
#' of field names in current class. If `NULL`, all fields in this
#' class are used. Default: `NULL`.
#'
#' @param class A character vector of class names used for searching
#' relations. Default: `NULL`.
#'
#' @param group A character vector of group names used for searching
#' relations. Default: `NULL`.
#'
#' @param depth If > 0, the relation is searched recursively. A
#' simple example of recursive reference: one material named
#' `mat` is referred by a construction named `const`, and `const`
#' is also referred by a surface named `surf`. If `NULL`,
#' all possible recursive relations are returned. Default: `0`.
#'
#' @return A logical vector.
#'
#' @examples
#' \dontrun{
#' surf$has_ref_by()
#'
#' surf$has_ref_by(c("name", "zone name"))
#' }
#'
has_ref_by = function (which = NULL, class = NULL, group = NULL, depth = 0L)
iddobj_has_ref_by(self, private, which, class = class, group = group, depth = depth),
# }}}
# }}}
# DATA EXTRACTION {{{
# to_table {{{
#' @description
#' Format an `IddObject` as a data.frame
#'
#' @details
#' `$to_table()` returns a [data.table][data.table::data.table()] that
#' contains basic data of current class.
#' The returned [data.table][data.table::data.table()] has 3 columns:
#'
#' * `class`: Character type. Current class name.
#' * `index`: Integer type. Field indexes.
#' * `field`: Character type. Field names.
#'
#' @param all If `TRUE`, all available fields defined in IDD for
#' specified class will be returned. If `FALSE`, only the minimum
#' field number is returned. Default: `FALSE`.
#'
#' @return A [data.table][data.table::data.table()] with 3 columns.
#'
#' @examples
#' \dontrun{
#' surf$to_table()
#'
#' surf$to_table(TRUE)
#' }
#'
to_table = function (all = FALSE)
iddobj_to_table(self, private, all),
# }}}
# to_string {{{
#' @description
#' Format an `IdfObject` as a character vector
#'
#' @details
#' `$to_string()` returns the text format of current class. The returned
#' character vector can be pasted into an IDF file as an empty object of
#' specified class.
#'
#' @param comment A character vector to be used as comments of returned
#' string format object.
#' @param leading Leading spaces added to each field. Default: `4L`.
#' @param sep_at The character width to separate value string and field
#' string. Default: `29L` which is the same as IDF Editor.
#' @param all If `TRUE`, all available fields defined in IDD for
#' specified class will be returned. Default: `FALSE`.
#'
#' @return A character vector.
#'
#' @examples
#' \dontrun{
#' # get text format of class BuildingSurface:Detailed
#' surf$to_string()
#'
#' # tweak output formatting
#' surf$to_string(leading = 0, sep_at = 0)
#'
#' # add comments
#' surf$to_string(c("This", "will", "be", "comments"))
#' }
#'
to_string = function (comment = NULL, leading = 4L, sep_at = 29L, all = FALSE)
iddobj_to_string(self, private, comment, leading, sep_at = sep_at, all = all),
# }}}
# }}}
# print {{{
#' @description
#' Print `IddObject` object
#'
#' @details
#' `$print()` prints the `IddObject` object giving the information of
#' class name, class properties, field indices and field names.
#'
#' `$print()` prints the IddObject. Basically, the print output can be
#' divided into 4 parts:
#'
#' * CLASS: IDD class name of current object in format `<IddObject: CLASS>`.
#' * MEMO: brief description of the IDD class.
#' * PROPERTY: properties of the IDD class, including name of group it
#' belongs to, whether it is an unique or required class and current
#' total fields. The fields may increase if the IDD class is
#' extensible, such as `Branch`, `ZoneList` and etc.
#' * FIELDS: fields of current IDD class. Required fields are marked
#' with stars (`*`). If the class is extensible, only the first
#' extensible group will be printed and two ellipses will be shown at
#' the bottom. Fields in the extensible group will be marked with an
#' arrow down surrounded by angle brackets (`<v>`).
#'
#' @param brief If `TRUE`, only class name part is printed. Default:
#' `FALSE`.
#'
#' @return The `IddObject` object itself, invisibly.
#'
#' @examples
#' \dontrun{
#' surf
#'
#' surf$print(brief = TRUE)
#' }
#'
print = function (brief = FALSE)
iddobj_print(self, private, brief)
# }}}
),
private = list(
# PRIVATE FIELDS {{{
m_parent = NULL,
m_class_id = NULL,
# }}}
# PRIVATE FUNCTIONS {{{
idd_priv = function () {
._get_private(private$m_parent)
},
idd_env = function () {
.subset2(._get_private(private$m_parent), "m_idd_env")
}
# }}}
)
)
# }}}
# iddobj_version {{{
iddobj_version <- function (self, private) {
private$idd_priv()$m_version
}
# }}}
# iddobj_parent {{{
iddobj_parent <- function (self, private) {
private$m_parent
}
# }}}
# iddobj_group_index {{{
iddobj_group_index <- function (self, private) {
private$idd_env()$class[class_id == private$m_class_id, group_id]
}
# }}}
# iddobj_group_name {{{
iddobj_group_name <- function (self, private) {
grp_id <- iddobj_group_index(self, private)
private$idd_env()$group[J(grp_id), on = "group_id", group_name]
}
# }}}
# iddobj_class_index {{{
iddobj_class_index <- function (self, private) {
private$m_class_id
}
# }}}
# iddobj_class_name {{{
iddobj_class_name <- function (self, private) {
private$idd_env()$class[J(private$m_class_id), on = "class_id", class_name]
}
# }}}
# iddobj_class_data {{{
iddobj_class_data <- function (self, private) {
private$idd_env()$class[J(private$m_class_id), on = "class_id"]
}
# }}}
# iddobj_class_format {{{
iddobj_class_format <- function (self, private) {
iddobj_class_data(self, private)$format
}
# }}}
# iddobj_min_fields {{{
iddobj_min_fields <- function (self, private) {
iddobj_class_data(self, private)$min_fields
}
# }}}
# iddobj_num_fields {{{
iddobj_num_fields <- function (self, private) {
iddobj_class_data(self, private)$num_fields
}
# }}}
# iddobj_memo {{{
iddobj_memo <- function (self, private) {
iddobj_class_data(self, private)$memo[[1L]]
}
# }}}
# iddobj_num_extensible {{{
iddobj_num_extensible <- function (self, private) {
iddobj_class_data(self, private)$num_extensible
}
# }}}
# iddobj_first_extensible_index {{{
iddobj_first_extensible_index <- function (self, private) {
iddobj_class_data(self, private)$first_extensible
}
# }}}
# iddobj_extensible_group_num {{{
iddobj_extensible_group_num <- function (self, private) {
iddobj_class_data(self, private)$num_extensible_group
}
# }}}
# iddobj_add_extensible_group {{{
iddobj_add_extensible_group <- function (self, private, num) {
assert(is_count(num))
iddenv <- ._get_private(private$m_parent)$m_idd_env
iddenv <- add_idd_extensible_group(private$idd_env(), private$m_class_id, num, strict = TRUE)
verbose_info(num, " extensible group(s) added")
self
}
# }}}
# iddobj_del_extensible_group {{{
iddobj_del_extensible_group <- function (self, private, num) {
assert(is_count(num))
iddenv <- ._get_private(private$m_parent)$m_idd_env
iddenv <- del_idd_extensible_group(private$idd_env(), private$m_class_id, num, strict = TRUE)
verbose_info(num, " extensible group(s) deleted")
self
}
# }}}
# iddobj_has_name {{{
iddobj_has_name <- function (self, private) {
iddobj_class_data(self, private)$has_name
}
# }}}
# iddobj_is_required {{{
iddobj_is_required <- function (self, private) {
iddobj_class_data(self, private)$required_object
}
# }}}
# iddobj_is_unique {{{
iddobj_is_unique <- function (self, private) {
iddobj_class_data(self, private)$unique_object
}
# }}}
# iddobj_is_extensible {{{
iddobj_is_extensible <- function (self, private) {
iddobj_class_data(self, private)$num_extensible > 0L
}
# }}}
# iddobj_field_data {{{
iddobj_field_data <- function (self, private, which = NULL, property = NULL, underscore = FALSE) {
all <- if (is.null(which)) TRUE else FALSE
get_idd_field(private$idd_env(), private$m_class_id, which,
property, all = all, underscore = underscore, no_ext = TRUE
)
}
# }}}
# iddobj_field_name {{{
iddobj_field_name <- function (self, private, index = NULL, unit = FALSE, in_ip = eplusr_option("view_in_ip")) {
if (!is.null(index)) assert(are_count(index))
if (unit) {
if (eplusr_option("view_in_ip") != in_ip) {
eplusr_option(view_in_ip = in_ip)
on.exit(eplusr_option(view_in_ip = !in_ip), add = TRUE)
}
res <- format_name(iddobj_field_data(self, private, index, c("units", "ip_units")))
} else {
res <- iddobj_field_data(self, private, index)$field_name
}
res
}
# }}}
# iddobj_field_index {{{
iddobj_field_index <- function (self, private, name = NULL) {
if (!is.null(name)) assert(is.character(name))
iddobj_field_data(self, private, name, underscore = TRUE)$field_index
}
# }}}
# iddobj_field_type {{{
iddobj_field_type <- function (self, private, which = NULL) {
iddobj_field_data(self, private, which, "type", underscore = TRUE)$type
}
# }}}
# iddobj_field_note {{{
iddobj_field_note <- function (self, private, which = NULL) {
iddobj_field_data(self, private, which, "note", underscore = TRUE)$note
}
# }}}
# iddobj_field_unit {{{
iddobj_field_unit <- function (self, private, which = NULL, in_ip = eplusr_option("view_in_ip")) {
fld <- iddobj_field_data(self, private, which, c("units", "ip_units"), underscore = TRUE)
if (in_ip) {
fld$ip_units
} else {
fld$units
}
}
# }}}
# iddobj_field_default {{{
iddobj_field_default <- function (self, private, which = NULL, in_ip = eplusr_option("view_in_ip")) {
fld <- iddobj_field_data(self, private, which, underscore = TRUE,
c("default_chr", "default_num", "units", "ip_units", "type_enum")
)
if (in_ip) fld <- field_default_to_unit(fld, "si", "ip")
setnames(fld, c("default_chr", "default_num"), c("value_chr", "value_num"))
get_value_list(fld)
}
# }}}
# iddobj_field_choice {{{
iddobj_field_choice <- function (self, private, which = NULL) {
iddobj_field_data(self, private, which, "choice", underscore = TRUE)$choice
}
# }}}
# iddobj_field_range {{{
iddobj_field_range <- function (self, private, which = NULL) {
fld <- iddobj_field_data(self, private, which, c("type_enum", "has_range",
"minimum", "lower_incbounds", "maximum", "upper_incbounds"), underscore = TRUE)
# set limits to Inf for numeric values that do not have ranges
fld[type_enum < IDDFIELD_TYPE$choice & has_range == FALSE,
`:=`(maximum = Inf, minimum = -Inf)]
fld[, `:=`(range = list(ranger(minimum, lower_incbounds, maximum, upper_incbounds))), by = field_id]
fld$range
}
# }}}
# iddobj_field_relation {{{
iddobj_field_relation <- function (self, private, which = NULL, direction = c("all", "ref_to", "ref_by"),
class = NULL, group = NULL, depth = 0L, keep = FALSE) {
direction <- match.arg(direction)
if (is.null(which)) {
get_iddobj_relation(private$idd_env(), private$m_class_id, NULL, name = TRUE,
direction = direction, depth = depth, keep_all = keep,
class = class, group = group)
} else {
fld <- get_idd_field(private$idd_env(), private$m_class_id, which)
get_iddobj_relation(private$idd_env(), NULL, fld$field_id, name = TRUE,
direction = direction, depth = depth, keep_all = keep,
class = class, group = group)
}
}
# }}}
# iddobj_field_possible {{{
iddobj_field_possible <- function (self, private, which = NULL) {
fld <- iddobj_field_data(self, private, which, FIELD_COLS$property, underscore = TRUE)
get_iddobj_possible(private$idd_env(), field_id = fld$field_id)
}
# }}}
# iddobj_is_valid_field_num {{{
iddobj_is_valid_field_num <- function (self, private, num) {
assert(are_count(num))
cls <- iddobj_class_data(self, private)
!(
# it should be FALSE when num is
# 1. less than min-fields OR
cls$min_fields > num |
# 2. larger than num-fields but not extensible OR
(cls$num_extensible == 0L & num > cls$num_fields) |
# 3. larger than num-fields and is extensible but not have full
# extensible groups
(cls$num_extensible > 0L &
((num - cls$num_fields) %% cls$num_extensible) != 0L
)
)
}
# }}}
# iddobj_is_extensible_index {{{
iddobj_is_extensible_index <- function (self, private, index) {
assert(are_count(index))
cls <- iddobj_class_data(self, private)
if (!cls$num_extensible) return(rep(FALSE, length(index)))
index >= cls$first_extensible
}
# }}}
# iddobj_is_valid_field_name {{{
iddobj_is_valid_field_name <- function (self, private, name, strict = FALSE) {
fld <- iddobj_field_data(self, private, underscore = TRUE)
name <- as.character(name)
if (isTRUE(strict)) {
name %chin% fld$field_name
} else {
name %chin% fld$field_name | name %chin% lower_name(fld$field_name)
}
}
# }}}
# iddobj_is_valid_field_index {{{
iddobj_is_valid_field_index <- function (self, private, index) {
assert(are_count(index))
index <= iddobj_class_data(self, private)$num_fields
}
# }}}
# iddobj_is_autosizable_field {{{
iddobj_is_autosizable_field <- function (self, private, which) {
iddobj_field_data(self, private, which, "autosizable", underscore = TRUE)$autosizable
}
# }}}
# iddobj_is_autocalculatable_field {{{
iddobj_is_autocalculatable_field <- function (self, private, which) {
iddobj_field_data(self, private, which, "autocalculatable", underscore = TRUE)$autocalculatable
}
# }}}
# iddobj_is_numeric_field {{{
iddobj_is_numeric_field <- function (self, private, which) {
iddobj_field_type(self, private, which) %chin% c("integer", "real")
}
# }}}
# iddobj_is_integer_field {{{
iddobj_is_integer_field <- function (self, private, which) {
iddobj_field_type(self, private, which) == "integer"
}
# }}}
# iddobj_is_real_field {{{
iddobj_is_real_field <- function (self, private, which) {
iddobj_field_type(self, private, which) == "real"
}
# }}}
# iddobj_is_required_field {{{
iddobj_is_required_field <- function (self, private, which) {
iddobj_field_data(self, private, which, "required_field", underscore = TRUE)$required_field
}
# }}}
# iddobj_has_ref {{{
iddobj_has_ref <- function (self, private, which = NULL, class = NULL, group = NULL,
depth = 0L, type = c("all", "ref_to", "ref_by")) {
type <- match.arg(type)
if (is.null(which)) {
rel <- get_iddobj_relation(private$idd_env(), private$m_class_id,
class = class, group = group, depth = depth, direction = type)
} else {
fld <- get_idd_field(private$idd_env(), private$m_class_id, which)
rel <- get_iddobj_relation(private$idd_env(), NULL, fld$field_id,
class = class, group = group, depth = depth, direction = type)
}
if (type == "all") {
rel$ref_to[, list(.N > 0 && any(!is.na(src_field_id))), by = "field_id"]$V1 |
rel$ref_by[, list(.N > 0 && any(!is.na(field_id))), by = "src_field_id"]$V1
} else if (type == "ref_to") {
rel$ref_to[, list(.N > 0 && any(!is.na(src_field_id))), by = "field_id"]$V1
} else {
rel$ref_by[, list(.N > 0 && any(!is.na(field_id))), by = "src_field_id"]$V1
}
}
# }}}
# iddobj_has_ref_by {{{
iddobj_has_ref_by <- function (self, private, which = NULL, class = NULL, group = NULL, depth = 0L) {
iddobj_has_ref(self, private, which, class = class, group = group, depth = depth, type = "ref_by")
}
# }}}
# iddobj_has_ref_to {{{
iddobj_has_ref_to <- function (self, private, which = NULL, class = NULL, group = NULL, depth = 0L) {
iddobj_has_ref(self, private, which, class = class, group = group, depth = depth, type = "ref_to")
}
# }}}
# iddobj_to_table {{{
iddobj_to_table <- function (self, private, all = FALSE) {
get_iddobj_table(private$idd_env(), private$m_class_id, all)
}
# }}}
# iddobj_to_string {{{
iddobj_to_string <- function (self, private, comment = NULL, leading = 4L, sep_at = 29L, all = FALSE) {
get_iddobj_string(private$idd_env(), private$m_class_id, comment = comment,
leading = leading, sep_at = sep_at, all = all
)
}
# }}}
# iddobj_print {{{
iddobj_print <- function (self, private, brief = FALSE) {
# CLASS {{{
cls <- iddobj_class_data(self, private)
cli::cat_line(paste0("<IddObject: ", surround(cls$class_name), ">"))
if (brief) return(invisible(self))
# memo {{{
cli::cat_rule("MEMO")
if (is.null(cls$memo[[1L]])) {
cli::cat_line(" <No Memo>\n")
} else {
cli::cat_line(" \"", paste0(cls$memo[[1L]], collapse = "\n"), "\"\n")
}
# }}}
# property {{{
cli::cat_rule("PROPERTIES")
grp <- private$idd_env()$group[J(cls$group_id), on = "group_id", group_name]
cli::cat_line(" * ", c(
paste0("Group: ", surround(grp)),
paste0("Unique: ", cls$unique_object),
paste0("Required: ", cls$required_object),
paste0("Total fields: ", cls$num_fields)
))
cli::cat_line()
# }}}
# }}}
# FIELD {{{
cli::cat_rule("FIELDS")
# calculate number of fields to print
if (cls$num_extensible) {
# only print the first extensible group
set(cls, NULL, "num_print",
pmax(cls$last_required, cls$first_extensible + cls$num_extensible - 1L)
)
} else {
set(cls, NULL, "num_print", cls$num_fields)
}
fld <- iddobj_field_data(self, private, seq_len(cls$num_print), c("extensible_group", "required_field"))
set(fld, NULL, "name", format_name(fld, prefix = FALSE))
set(fld, NULL, "index", format_index(fld, required = TRUE, pad_char = "0"))
set(fld, NULL, "ext", "")
fld[extensible_group > 0L, ext := paste0(" <", cli::symbol$arrow_down, ">")]
cli::cat_line(" ", fld$index, ": ", fld$name, fld$ext)
if (cls$num_extensible) cli::cat_line(" ......")
# }}}
}
# }}}
#' Format an IddObject
#'
#' Format an [IddObject] into a string of an empty object of current class.
#' It is formatted exactly the same as in IDF Editor.
#'
#' @param x An [IddObject] object.
#' @param all If `TRUE`, all fields in current class are returned, otherwise
#' only minimum fields are returned.
#' @param comment A character vector to be used as comments of returned string
#' format object. If `NULL`, no comments are inserted. Default: `NULL`.
#' @param leading Leading spaces added to each field. Default: `4`.
#' @param sep_at The character width to separate value string and field string.
#' Default: `29` which is the same as IDF Editor.
#' @param ... Further arguments passed to or from other methods.
#' @return A single length character vector.
#' @examples
#' \dontrun{
#' cat(format(use_idd(8.8, download = "auto")$Materal, leading = 0))
#' }
#'
#' @export
# format.IddObject {{{
format.IddObject <- function (x, comment = NULL, leading = 4L, sep_at = 29L, all = FALSE, ...) {
paste0(x$to_string(comment = comment, leading = leading, sep_at = sep_at, all = all),
collapse = "\n"
)
}
# }}}
#' Coerce an IddObject into a Character Vector
#'
#' Coerce an [IddObject] into an empty object of current class in a character
#' vector format. It is formatted exactly the same as in IDF Editor.
#'
#' @inheritParams format.IddObject
#' @return A character vector.
#' @examples
#' \dontrun{
#' as.character(use_idd(8.8, download = "auto")$Materal, leading = 0)
#' }
#'
#' @export
# as.character.IddObject {{{
as.character.IddObject <- function (x, comment = NULL, leading = 4L, sep_at = 29L, all = FALSE, ...) {
x$to_string(comment = comment, leading = leading, sep_at = sep_at, all = all)
}
# }}}
#' @export
# str.IddObject {{{
str.IddObject <- function (object, brief = FALSE, ...) {
object$print(brief)
}
# }}}
#' @export
# ==.IddObject {{{
`==.IddObject` <- function (e1, e2) {
if (!is_iddobject(e2)) return(FALSE)
identical(
._get_private(._get_private(e1)$m_parent)$m_log$uuid,
._get_private(._get_private(e2)$m_parent)$m_log$uuid
) &&
identical(._get_private(e1)$m_class_id, ._get_private(e2)$m_class_id)
}
#' @export
`!=.IddObject` <- function (e1, e2) {
Negate(`==.IddObject`)(e1, e2)
}
# }}}
|
library(tidyverse) # CRAN v1.3.0
library(lubridate) # CRAN v1.7.9
library(jsonlite) # CRAN v1.7.0
library(textfeatures) # CRAN v0.3.3
library(emo) # [github::hadley/emo] v0.0.0.9000
source("R/count_utils.R")
#######################################################################
# #
# Import data Telegram chat from JSON file #
# #
#######################################################################
tg <- fromJSON("data-raw/result.json", flatten = TRUE)
tg_df <- tg$messages
glimpse(tg_df)
View(tg_df)
#######################################################################
# #
# Data Preparation #
# #
#######################################################################
tg_df <- tg_df %>%
filter(is.na(from) | from != "Sri")
dim(tg_df)
tg_df %>%
distinct(from) %>%
count()
#################### Function to tidy the message #####################
tidy_text <- function(text){
if(is.list(unlist(text))){
x <- unlist(unlist(text))
paste(x[names(x) != "type"], collapse = " ")
} else if(length(unlist(text)) > 1){
x <- unlist(text)
paste(x[names(x) != "type"], collapse = " ")
} else {
unlist(text)
}
}
#######################################################################
# #
# Feature Engineering #
# #
#######################################################################
tg_clean_df <- tg_df %>%
mutate(type = if_else(type != "message", action, type),
text = map(text, tidy_text) %>% unlist(),
datetime = as_datetime(date),
date = as_date(date),
hour = hour(datetime),
day = wday(date, week_start = 1, label = TRUE),
month = month(date, label = TRUE),
year = year(date),
media = case_when(is.na(file) & !is.na(photo) ~ "photo",
str_detect(file, "File not") & str_detect(mime_type, "pdf") ~ "pdf",
str_detect(file, "File not") & str_detect(mime_type, "video") & media_type == "animation" ~ "animation",
str_detect(file, "File not") & str_detect(mime_type, "video") & media_type == "video_file" ~ "video",
str_detect(file, "File not") & str_detect(mime_type, "application") ~ "others",
str_detect(file, "stickers") ~ "sticker",
str_detect(mime_type, "video") ~ "video",
str_detect(mime_type, "text") ~ "file",
str_detect(mime_type, "audio") ~ "audio",
str_detect(mime_type, "jpeg") | str_detect(mime_type, "jpg") | str_detect(mime_type, "png") ~ "image",
str_detect(mime_type, "image") ~ str_remove_all(mime_type, "image/"),
TRUE ~ mime_type
),
is_reply = !is.na(reply_to_message_id),
any_emoji = emo::ji_detect(text) | !is.na(sticker_emoji),
is_emoji_only = !is.na(sticker_emoji),
emoji = emo::ji_extract_all(text),
n_char = n_chars(text),
n_url = n_urls(text)
) %>%
select(id:from, datetime, media, sticker_emoji, emoji, hour:is_emoji_only, n_char:n_url)
glimpse(tg_clean_df)
View(tg_clean_df)
| /R/telegram_group_chat.R | no_license | aephidayatuloh/tg | R | false | false | 3,843 | r | library(tidyverse) # CRAN v1.3.0
library(lubridate) # CRAN v1.7.9
library(jsonlite) # CRAN v1.7.0
library(textfeatures) # CRAN v0.3.3
library(emo) # [github::hadley/emo] v0.0.0.9000
source("R/count_utils.R")
#######################################################################
# #
# Import data Telegram chat from JSON file #
# #
#######################################################################
tg <- fromJSON("data-raw/result.json", flatten = TRUE)
tg_df <- tg$messages
glimpse(tg_df)
View(tg_df)
#######################################################################
# #
# Data Preparation #
# #
#######################################################################
tg_df <- tg_df %>%
filter(is.na(from) | from != "Sri")
dim(tg_df)
tg_df %>%
distinct(from) %>%
count()
#################### Function to tidy the message #####################
tidy_text <- function(text){
if(is.list(unlist(text))){
x <- unlist(unlist(text))
paste(x[names(x) != "type"], collapse = " ")
} else if(length(unlist(text)) > 1){
x <- unlist(text)
paste(x[names(x) != "type"], collapse = " ")
} else {
unlist(text)
}
}
#######################################################################
# #
# Feature Engineering #
# #
#######################################################################
tg_clean_df <- tg_df %>%
mutate(type = if_else(type != "message", action, type),
text = map(text, tidy_text) %>% unlist(),
datetime = as_datetime(date),
date = as_date(date),
hour = hour(datetime),
day = wday(date, week_start = 1, label = TRUE),
month = month(date, label = TRUE),
year = year(date),
media = case_when(is.na(file) & !is.na(photo) ~ "photo",
str_detect(file, "File not") & str_detect(mime_type, "pdf") ~ "pdf",
str_detect(file, "File not") & str_detect(mime_type, "video") & media_type == "animation" ~ "animation",
str_detect(file, "File not") & str_detect(mime_type, "video") & media_type == "video_file" ~ "video",
str_detect(file, "File not") & str_detect(mime_type, "application") ~ "others",
str_detect(file, "stickers") ~ "sticker",
str_detect(mime_type, "video") ~ "video",
str_detect(mime_type, "text") ~ "file",
str_detect(mime_type, "audio") ~ "audio",
str_detect(mime_type, "jpeg") | str_detect(mime_type, "jpg") | str_detect(mime_type, "png") ~ "image",
str_detect(mime_type, "image") ~ str_remove_all(mime_type, "image/"),
TRUE ~ mime_type
),
is_reply = !is.na(reply_to_message_id),
any_emoji = emo::ji_detect(text) | !is.na(sticker_emoji),
is_emoji_only = !is.na(sticker_emoji),
emoji = emo::ji_extract_all(text),
n_char = n_chars(text),
n_url = n_urls(text)
) %>%
select(id:from, datetime, media, sticker_emoji, emoji, hour:is_emoji_only, n_char:n_url)
glimpse(tg_clean_df)
View(tg_clean_df)
|
library(pheatmap)
library(mclust)
library(data.table)
### get parameters
args = commandArgs(trailingOnly=TRUE)
signal_mat_file = args[1]
signal_mat_tp_file = args[2]
qPCR_mat_file = args[3]
qPCR_mat_tp_file = args[4]
output_file_name = args[5]
signal_mat_file_raw = 'ctcf.qPCR.randbg.blackrm.idsort.sigmat.bgsub.txt'
signal_mat_file_norm = 'ctcf.qPCR.randbg.blackrm.idsort.sigmat.bgsub.LMqPCRnorm.txt'
used_replicates = c(5,6, 8,9, 10,11, 12,13, 15,17, 18,20)
### function
getR2 = function(x1,x2){
r2 = 1-mean((x1-x2)^2)/mean((x1-mean(x1))^2)
return(r2)
}
### get input mat
chipseq_sig_mat_raw = as.data.frame(fread(signal_mat_file_raw))
chipseq_sig_mat_norm = as.data.frame(fread(signal_mat_file_norm))
used_row = (grepl("ctcf", chipseq_sig_mat_raw[,4]))
chipseq_sig_mat_raw = chipseq_sig_mat_raw[,used_replicates]
chipseq_sig_mat_norm = chipseq_sig_mat_norm[,used_replicates]
r2_mat = c()
### raw R2
r2_vec = c()
for (i in seq(1,11,2)){
r2_tmp = getR2(chipseq_sig_mat_raw[used_row,i],chipseq_sig_mat_raw[used_row,i+1])
print(r2_tmp)
r2_vec = c(r2_vec, r2_tmp)
}
r2_mat = cbind(r2_mat, r2_vec)
### qPCR R2
r2_vec = c()
for (i in seq(1,11,2)){
r2_tmp = getR2(chipseq_sig_mat_norm[used_row,i],chipseq_sig_mat_norm[used_row,i+1])
print(r2_tmp)
r2_vec = c(r2_vec, r2_tmp)
}
r2_mat = cbind(r2_mat, r2_vec)
### BGnorm
r2_vec = c()
for (i in seq(1,11,2)){
r2_tmp = getR2(chipseq_sig_mat_raw[used_row,i],chipseq_sig_mat_raw[used_row,i+1]/mean(chipseq_sig_mat_raw[,i+1])*mean(chipseq_sig_mat_raw[,11]/2+chipseq_sig_mat_raw[,12]/2))
print(r2_tmp)
r2_vec = c(r2_vec, r2_tmp)
}
r2_mat = cbind(r2_mat, r2_vec)
colnames(r2_mat) = c('RAW', 'qPCR_LMnorm', 'BGnorm')
rownames(r2_mat) = c('0A', '4A', '6A', '12A', '18A', '24A')
write.table(r2_mat, 'between_replicates_R2.txt', quote=F, sep='\t', col.names=T, row.names=T)
library(ggplot2)
library(RColorBrewer)
x_2c = c()
for (i in c(1,3,2)){
x1 = as.data.frame(rep(colnames(r2_mat)[i], dim(r2_mat)[1]))
x2 = r2_mat[,i]
x12 = cbind(x1,x2)
colnames(x12) = c('Method','R2')
x_2c = rbind(x_2c, x12)
}
### all R2
pdf('R2_boxplot.pdf', width=5, height=4)
x_2c$R2 = as.numeric(x_2c$R2)
x_2c$Method = factor(x_2c$Method, levels = unique(x_2c$Method),ordered = TRUE)
p = ggplot(data = x_2c, aes(x=Method, y=(R2)))
p = p + geom_boxplot(aes(fill = Method))
p = p + geom_point(aes(y=(R2), group=Method), position = position_dodge(width=0.1))
p = p + scale_fill_manual(values=rep(c('gray', 'orange1', 'dodgerblue1', 'dodgerblue'),each = 1))
#p = p + scale_fill_brewer(palette="YlGnBu", direction=-1)
#p = p + theme(panel.background = element_blank(), panel.border = element_rect(colour = "black", fill=NA, size=1.5))
#p = p + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust=1, size = 10))
p = p + ylim(0, 1)
plot(p)
dev.off()
| /02_22_2020_pipeline/get_replicates_R2.R | no_license | guanjue/CTCF_Auxin_clustering | R | false | false | 2,791 | r | library(pheatmap)
library(mclust)
library(data.table)
### get parameters
args = commandArgs(trailingOnly=TRUE)
signal_mat_file = args[1]
signal_mat_tp_file = args[2]
qPCR_mat_file = args[3]
qPCR_mat_tp_file = args[4]
output_file_name = args[5]
signal_mat_file_raw = 'ctcf.qPCR.randbg.blackrm.idsort.sigmat.bgsub.txt'
signal_mat_file_norm = 'ctcf.qPCR.randbg.blackrm.idsort.sigmat.bgsub.LMqPCRnorm.txt'
used_replicates = c(5,6, 8,9, 10,11, 12,13, 15,17, 18,20)
### function
getR2 = function(x1,x2){
r2 = 1-mean((x1-x2)^2)/mean((x1-mean(x1))^2)
return(r2)
}
### get input mat
chipseq_sig_mat_raw = as.data.frame(fread(signal_mat_file_raw))
chipseq_sig_mat_norm = as.data.frame(fread(signal_mat_file_norm))
used_row = (grepl("ctcf", chipseq_sig_mat_raw[,4]))
chipseq_sig_mat_raw = chipseq_sig_mat_raw[,used_replicates]
chipseq_sig_mat_norm = chipseq_sig_mat_norm[,used_replicates]
r2_mat = c()
### raw R2
r2_vec = c()
for (i in seq(1,11,2)){
r2_tmp = getR2(chipseq_sig_mat_raw[used_row,i],chipseq_sig_mat_raw[used_row,i+1])
print(r2_tmp)
r2_vec = c(r2_vec, r2_tmp)
}
r2_mat = cbind(r2_mat, r2_vec)
### qPCR R2
r2_vec = c()
for (i in seq(1,11,2)){
r2_tmp = getR2(chipseq_sig_mat_norm[used_row,i],chipseq_sig_mat_norm[used_row,i+1])
print(r2_tmp)
r2_vec = c(r2_vec, r2_tmp)
}
r2_mat = cbind(r2_mat, r2_vec)
### BGnorm
r2_vec = c()
for (i in seq(1,11,2)){
r2_tmp = getR2(chipseq_sig_mat_raw[used_row,i],chipseq_sig_mat_raw[used_row,i+1]/mean(chipseq_sig_mat_raw[,i+1])*mean(chipseq_sig_mat_raw[,11]/2+chipseq_sig_mat_raw[,12]/2))
print(r2_tmp)
r2_vec = c(r2_vec, r2_tmp)
}
r2_mat = cbind(r2_mat, r2_vec)
colnames(r2_mat) = c('RAW', 'qPCR_LMnorm', 'BGnorm')
rownames(r2_mat) = c('0A', '4A', '6A', '12A', '18A', '24A')
write.table(r2_mat, 'between_replicates_R2.txt', quote=F, sep='\t', col.names=T, row.names=T)
library(ggplot2)
library(RColorBrewer)
x_2c = c()
for (i in c(1,3,2)){
x1 = as.data.frame(rep(colnames(r2_mat)[i], dim(r2_mat)[1]))
x2 = r2_mat[,i]
x12 = cbind(x1,x2)
colnames(x12) = c('Method','R2')
x_2c = rbind(x_2c, x12)
}
### all R2
pdf('R2_boxplot.pdf', width=5, height=4)
x_2c$R2 = as.numeric(x_2c$R2)
x_2c$Method = factor(x_2c$Method, levels = unique(x_2c$Method),ordered = TRUE)
p = ggplot(data = x_2c, aes(x=Method, y=(R2)))
p = p + geom_boxplot(aes(fill = Method))
p = p + geom_point(aes(y=(R2), group=Method), position = position_dodge(width=0.1))
p = p + scale_fill_manual(values=rep(c('gray', 'orange1', 'dodgerblue1', 'dodgerblue'),each = 1))
#p = p + scale_fill_brewer(palette="YlGnBu", direction=-1)
#p = p + theme(panel.background = element_blank(), panel.border = element_rect(colour = "black", fill=NA, size=1.5))
#p = p + theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust=1, size = 10))
p = p + ylim(0, 1)
plot(p)
dev.off()
|
# SVR
setwd("/media/user/Edison/datascience@BGU/MyProjects/Udemy-Course/Project1")
# Importing the dataset
load("Polynomial.regression.RData")
dataset = dataset[2:3]
# Splitting the dataset into the Training set and Test set
# # install.packages('caTools')
# library(caTools)
# set.seed(123)
# split = sample.split(dataset$Salary, SplitRatio = 2/3)
# training_set = subset(dataset, split == TRUE)
# test_set = subset(dataset, split == FALSE)
# Feature Scaling
# training_set = scale(training_set)
# test_set = scale(test_set)
# Fitting SVR to the dataset
#install.packages('e1071')
library(e1071)
regressor = svm(formula = Salary ~ .,
data = dataset,
type = 'eps-regression',
kernel = 'radial')
# Predicting a new result
y_pred = predict(regressor, data.frame(Level = 6.5))
# Visualising the SVR results
# install.packages('ggplot2')
library(ggplot2)
ggplot() +
geom_point(aes(x = dataset$Level, y = dataset$Salary),
colour = 'red') +
geom_line(aes(x = dataset$Level, y = predict(regressor, newdata = dataset)),
colour = 'blue') +
ggtitle('Truth or Bluff (SVR)') +
xlab('Level') +
ylab('Salary')
# Visualising the SVR results (for higher resolution and smoother curve)
# install.packages('ggplot2')
library(ggplot2)
x_grid = seq(min(dataset$Level), max(dataset$Level), 0.1)
ggplot() +
geom_point(aes(x = dataset$Level, y = dataset$Salary),
colour = 'red') +
geom_line(aes(x = x_grid, y = predict(regressor, newdata = data.frame(Level = x_grid))),
colour = 'blue') +
ggtitle('Truth or Bluff (SVR)') +
xlab('Level') +
ylab('Salary')
| /Udemy-Machine Learning A-Z/support.vector.regression.R | no_license | getachew67/Data-Science-using-R | R | false | false | 1,656 | r | # SVR
setwd("/media/user/Edison/datascience@BGU/MyProjects/Udemy-Course/Project1")
# Importing the dataset
load("Polynomial.regression.RData")
dataset = dataset[2:3]
# Splitting the dataset into the Training set and Test set
# # install.packages('caTools')
# library(caTools)
# set.seed(123)
# split = sample.split(dataset$Salary, SplitRatio = 2/3)
# training_set = subset(dataset, split == TRUE)
# test_set = subset(dataset, split == FALSE)
# Feature Scaling
# training_set = scale(training_set)
# test_set = scale(test_set)
# Fitting SVR to the dataset
#install.packages('e1071')
library(e1071)
regressor = svm(formula = Salary ~ .,
data = dataset,
type = 'eps-regression',
kernel = 'radial')
# Predicting a new result
y_pred = predict(regressor, data.frame(Level = 6.5))
# Visualising the SVR results
# install.packages('ggplot2')
library(ggplot2)
ggplot() +
geom_point(aes(x = dataset$Level, y = dataset$Salary),
colour = 'red') +
geom_line(aes(x = dataset$Level, y = predict(regressor, newdata = dataset)),
colour = 'blue') +
ggtitle('Truth or Bluff (SVR)') +
xlab('Level') +
ylab('Salary')
# Visualising the SVR results (for higher resolution and smoother curve)
# install.packages('ggplot2')
library(ggplot2)
x_grid = seq(min(dataset$Level), max(dataset$Level), 0.1)
ggplot() +
geom_point(aes(x = dataset$Level, y = dataset$Salary),
colour = 'red') +
geom_line(aes(x = x_grid, y = predict(regressor, newdata = data.frame(Level = x_grid))),
colour = 'blue') +
ggtitle('Truth or Bluff (SVR)') +
xlab('Level') +
ylab('Salary')
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/columns.R
\name{update_column}
\alias{update_column}
\alias{move_column}
\title{Update a column in a GitHub project}
\usage{
update_column(column, name, project, repo, user, org, ...)
move_column(column, position, after, project, repo, user, org, ...)
}
\arguments{
\item{column}{(integer or string) Either the column number or name.}
\item{name}{(string, optional) The new name for the column.}
\item{project}{(integer or string) Either the project number or name.}
\item{repo}{(string, optional) The repository specified in the format:
\code{owner/repo}.}
\item{user}{(string, optional) The login of the user.}
\item{org}{(string, optional) The name of the organization.}
\item{...}{Parameters passed to \code{\link[=gh_request]{gh_request()}}.}
\item{position}{(string, optional) Either \code{"first"} or \code{"last"}.}
\item{after}{(integer or string, optional) An ID or name of another column to
place this one after.}
}
\value{
\code{update_column()} returns a list of the column properties.
\strong{Column Properties:}
\itemize{
\item \strong{id}: The ID of the column.
\item \strong{name}: The name given to the column.
\item \strong{created_at}: When it was created.
\item \strong{updated_at}: When it was last updated.
}
}
\description{
\code{update_column()} can be used to change the column name in a project in
GitHub. \code{move_column()} can be used to reorder the columns.
}
\details{
You can update a column associated with either a repository, user or
organization, by supplying them as an input, as long as you have appropriate
permissions.
You can move a column by either specifying the position, either \code{"first"} or
\code{"last"}, or by specifying another column to place it after.
For more details see the GitHub API documentation:
\itemize{
\item \url{https://docs.github.com/en/free-pro-team@latest/rest/reference/projects#update-an-existing-project-column}
\item \url{https://docs.github.com/en/free-pro-team@latest/rest/reference/projects#move-a-project-column}
}
}
\examples{
\dontrun{
# Update the name of a column in a repository project
update_column(
column = "Test column",
name = "Updated test column",
project = "Test project",
repo = "ChadGoymer/githapi"
)
# Move a column to the first position in a user's project
move_column(
name = "Test column",
position = "first",
user = "ChadGoymer"
)
# Move a column after another on in an organization's project
move_column(
name = "Test column",
after = "Test column 2",
org = "HairyCoos"
)
}
}
| /man/update_column.Rd | permissive | jfontestad/githapi | R | false | true | 2,650 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/columns.R
\name{update_column}
\alias{update_column}
\alias{move_column}
\title{Update a column in a GitHub project}
\usage{
update_column(column, name, project, repo, user, org, ...)
move_column(column, position, after, project, repo, user, org, ...)
}
\arguments{
\item{column}{(integer or string) Either the column number or name.}
\item{name}{(string, optional) The new name for the column.}
\item{project}{(integer or string) Either the project number or name.}
\item{repo}{(string, optional) The repository specified in the format:
\code{owner/repo}.}
\item{user}{(string, optional) The login of the user.}
\item{org}{(string, optional) The name of the organization.}
\item{...}{Parameters passed to \code{\link[=gh_request]{gh_request()}}.}
\item{position}{(string, optional) Either \code{"first"} or \code{"last"}.}
\item{after}{(integer or string, optional) An ID or name of another column to
place this one after.}
}
\value{
\code{update_column()} returns a list of the column properties.
\strong{Column Properties:}
\itemize{
\item \strong{id}: The ID of the column.
\item \strong{name}: The name given to the column.
\item \strong{created_at}: When it was created.
\item \strong{updated_at}: When it was last updated.
}
}
\description{
\code{update_column()} can be used to change the column name in a project in
GitHub. \code{move_column()} can be used to reorder the columns.
}
\details{
You can update a column associated with either a repository, user or
organization, by supplying them as an input, as long as you have appropriate
permissions.
You can move a column by either specifying the position, either \code{"first"} or
\code{"last"}, or by specifying another column to place it after.
For more details see the GitHub API documentation:
\itemize{
\item \url{https://docs.github.com/en/free-pro-team@latest/rest/reference/projects#update-an-existing-project-column}
\item \url{https://docs.github.com/en/free-pro-team@latest/rest/reference/projects#move-a-project-column}
}
}
\examples{
\dontrun{
# Update the name of a column in a repository project
update_column(
column = "Test column",
name = "Updated test column",
project = "Test project",
repo = "ChadGoymer/githapi"
)
# Move a column to the first position in a user's project
move_column(
name = "Test column",
position = "first",
user = "ChadGoymer"
)
# Move a column after another on in an organization's project
move_column(
name = "Test column",
after = "Test column 2",
org = "HairyCoos"
)
}
}
|
#### README ############
## There are 9 sections in this code
## Section 1) download and process the FPKM and sample infor from GEO
## section 2) Quanlity control and feature selection
## section 3) clustering cells by gene markers
## section 4) clustering cells without gene markers based on 10.3k expressed genes that were used to construct trajectory by Monocle3
## Section 5 (OPTINAL): constructing trajectory using Monocle
## Section 6) select Global ~3k HVG
## Section 7) Prepare inputs for QuanTC with 3198 genes and 131 cells
## section 8) cluster 131 cells of 4531 genes using different methods, and plot the cell clusters
## scetion 9) construct and visualize teh trajectory using scater package
## last update 2/8/2022
## by Holly Yang ##############
setwd('F:/projects/BioTIP/result/GSE52583')
####################################################################################
### Section 1 ) download and process the FPKM and sample infor from GEO ##
## columns "age" cells" "cellName" are from GEO ##
## column "putative_cell_type" are from the publishgd Stable 3 ##
## factor "cell_type" merges both age and putative_cell_type ##
## "CellType" is added in Section 4, the biomarker-based cell clusters ##
## "Cluster" is added in Section 5, the unsupervised cell clusters (k=6) ##
####################################################################################
{
library(GEOquery)
GSE52583 <- getGEO(GEO = 'GSE52583', filename = NULL, destdir = '../../data/GSE52583_lung_cellfate_usedbyMojtahedi2016/GSE52583GEOquery',
GSElimits = NULL, GSEMatrix = TRUE, AnnotGPL = FALSE, getGPL = TRUE)
class(GSE52583) #[1] "list"
names(GSE52583)
# [1] "GSE52583-GPL13112_series_matrix.txt.gz" "GSE52583-GPL16417_series_matrix.txt.gz"
class(GSE52583[[1]]) #[1] "ExpressionSet"
save(GSE52583, file= '../../data/GSE52583_lung_cellfate_usedbyMojtahedi2016/GSE52583GEOquery/FPKM.GSE52583_ExpressionSet_list.rData',compress=T)
## generate meta_table for cells ################
cli <- rbind(pData(GSE52583[[1]]), pData(GSE52583[[2]]))
dim(cli) # [1] 201 46
toDelete <- NULL
for(i in 1:ncol(cli)) {
if(length(table(cli[,i])) ==1) toDelete <- c(toDelete, i)
}
cli <- cli[,-toDelete]
dim(cli) #[1] 201 10
cli$age <- unlist(lapply(cli$characteristics_ch1.2, function(x) unlist(strsplit(as.character(x), split=" day "))[2]))
cli$age[which(cli$age=='107')]='Adult'
cli$genotype <- unlist(lapply(cli$characteristics_ch1.3, function(x) unlist(strsplit(as.character(x), split=": "))[2]))
table(cli$age, cli$genotype )
# Sftpc-Cre-ERT2-rtta -/- tetO-HIST1H2BJ-GFP+/-) wild type
# 14.5 0 45
# 16.5 0 27
# 18.5 0 83
# Adult 46 0
cli$replicate <- unlist(lapply(cli$title, function(x) unlist(strsplit(as.character(x), split=", "))[2]))
cli$cells <- unlist(lapply(cli$title, function(x) unlist(strsplit(as.character(x), split=", "))[3]))
table(cli$cells)
#200 cell bulk control no cell control single cells
# 2 1 198
rownames(cli) <- cli$geo_accession
cli <- cli[,c]
tmp <- unlist(lapply(cli$supplementary_file_1, function(x) unlist(strsplit(as.character(x), split="suppl/"))[2]))
tmp <- unlist(lapply(tmp, function(x) unlist(strsplit(x, split="_IL"))[1]))
tmp1 <- unlist(lapply(tmp, function(x) unlist(strsplit(x, split="_"))[2]))
tmp2 <- unlist(lapply(tmp, function(x) unlist(strsplit(x, split="_"))[3]))
tmp3 <- unlist(lapply(tmp, function(x) unlist(strsplit(x, split="_"))[4]))
cli$cellName <- paste(tmp1,tmp2,tmp3,sep="_")
colnames(cli)[8] <- 'biosample'
colnames(cli)[9] <- 'SRX'
cli <- cli[,c(6,7,11:15)]
## Treutlein2014 suppl data 3, match to cli
Data3 <- read.table('../../data/GSE52583_lung_cellfate_usedbyMojtahedi2016/Treutlein2014/Treurlein2014_SData3.txt',header=T,sep='\t')
dim(Data3) #[1] 82 23275
colnames(Data3)[1:10]
# [1] "cell_name" "time_point" "sample"
# [4] "putative_cell_type" "X0610005C13Rik" "X0610007C21Rik"
# [7] "X0610007L01Rik" "X0610007N19Rik" "X0610007P08Rik"
# [10] "X0610007P14Rik"
table(Data3$putative_cell_type)
# AT1 AT2 BP bulk ciliated Clara
# 41 12 13 2 3 11
cli$putative_cell_type <- Data3[match(cli$cellName,Data3$cell_name),]$putative_cell_type
# write.table(cli, file='GSE52583_sample_annotation_xy.txt', sep='\t')
## generate matrix of FPKM ######################
myDir <- "F:/projects/BioTIP/data/GSE52583_lung_cellfate_usedbyMojtahedi2016/GSE52583_RAW/"
COLs <- c('tracking_id', 'class_code', 'nearest_ref_id', 'gene_id', 'gene_short_name', 'tss_id',
'locus', 'length','coverage','FPKM','FPKM_conf_lo','FPKM_conf_hi','FPKM_status')
files <- list.files(path = myDir, pattern='*.gz')
(n <- length(files)) # 201
tmp <- read.table(file=paste0(myDir,files[1]),sep='\t',header=FALSE, comment="")
colnames(tmp) <- COLs
FPKM <- NULL
for(i in 1:n)
{
tmp <- read.table(file=paste0(myDir,files[i]),sep='\t',header=FALSE, comment="")
colnames(tmp) <- COLs
FPKM <- cbind(FPKM, tmp$FPKM)
}
rownames(FPKM) <- tmp$gene_id
colnames(FPKM) <- unlist(lapply(files, function(x) unlist(strsplit(x, split="_"))[1]))
dim(FPKM) #[1] 23837 201
write(rownames(FPKM), file= '../../data/GSE52583_lung_cellfate_usedbyMojtahedi2016/Treutlein2014/FPKM_correctSymbol.txt')
# save(FPKM, file='FPKM_matrix_nofilter.rData', compress=T)
any( rownames(cli)!=colnames(FPKM)) #[1] FALSE
}
################################################################################################################
## section 2) Quanlity control and feature selection
## GEO downloaded FPKM matrix 23837 201
## remove_spike, mito, non-annotated transcripts 23231 201
## collapse the FPKM values for duplicated symbol by the mean value 22854 201
## cell_quality_filtering by removing
#### either very low mRNA recovery or potential doublets or triplets 22854 196
## feature selection based on mean-variance relationship in trajectory construction 10359 196
## feature selection (in section 7) of coding gene, miRNA and lincRNA (expressed) 10251 196
#### feature selection (in section 7) of HVGs 3198 196
## focusing on cells along the AT2 trajectory 3198 131
##################################################################################################################
library(monocle)
{
cell_median_FPKM <- apply(FPKM, 2, function(df) median(df, na.rm=TRUE) )
table(cell_median_FPKM==0) # TRUE 201 !!!! FPKM has been centralized per cell !!!
genes.ERCC <- grep("ERCC", rownames(FPKM),value=TRUE)
length(genes.ERCC) # 92
FPKM.ERCC <- FPKM[genes.ERCC,]
dim(FPKM.ERCC) # 92 201
save(FPKM.ERCC, file='FPKM.ERCC.RData') #!!!!!!!!!!!!
### housekeeping genes given in extend Data Fig 3
HK <- c("Gusb", "Tbp","Ppih", "Tfrc", "Sdha", "Pgk1", "B2m", "Ldha", "Gapdh", "Hsp90ab1", "Actb")
all(HK %in% rownames(FPKM)) #[1] TRUE
## 2.1) prepare annotate table ------------------------------
{ # https://ivanek.github.io/analysisOfGenomicsDataWithR/03_AnnotationResources_html.html
# https://www.biostars.org/p/147351/
# https://www.biostars.org/p/147351/
library(biomaRt) #biomaRt_2.42.0
ensembl <- useMart("ensembl", dataset="mmusculus_gene_ensembl")
grep("Synonyms",listAttributes(ensembl), value=T)
annot<-getBM(c("ensembl_gene_id", "mgi_symbol", "chromosome_name", "strand", "start_position", "end_position","gene_biotype"),
filters= "mgi_symbol", value=rownames(FPKM), mart=ensembl)
dim(annot) #[1] 20990 7
table(rownames(FPKM) %in% annot$mgi_symbol)
# FALSE TRUE
# 2875 20962
x <- which(!rownames(FPKM) %in% annot$mgi_symbol); length(x) # 2875
# load the annotation database
# set up your query genes
queryGeneNames <- rownames(FPKM)[x]
# use sql to get alias table and gene_info table (contains the symbols)
# first open the database connection
#library(DBI) #DBI_1.1.0
dbCon <- org.Mm.eg_dbconn()
# write your SQL query
sqlQuery <- 'SELECT * FROM alias, gene_info WHERE alias._id == gene_info._id;'
# execute the query on the database
aliasSymbol <- dbGetQuery(dbCon, sqlQuery)
dim(aliasSymbol) #[1] 149181 5
# subset to get your results
result <- aliasSymbol[which(aliasSymbol[,2] %in% queryGeneNames),c(2,5)]
dim(result) # [1] 2476 2
length(unique(result[,1]) ) #[1] 2430
unique(result[duplicated(result[,1]),1])
#[1] "Ang3" "Egfbp2" "Aim1" "Stra13" "Odz2"
# [6] "Odz1" "Odz3" "Prl2c4" "Nat6" "Rab1"
#[11] "Sip1" "1700058G18Rik" "Abp1" "Klra22" "B3gnt1"
#[16] "G6b" "ORF61" "Dbc1" "Clca4" "Siglec5"
#[21] "6430411K18Rik" "Mll2" "6430706D22Rik" "A730008H23Rik" "2810408M09Rik"
#[26] "Stxbp3a" "Nrp" "Dear1" "Duxbl" "Plac9"
#[31] "H2afb1" "Gmcl1l" "Cldn25" "Cml3" "Mir193"
#[36] "5430421N21Rik" "Snord116" "Rbmy1a1"
# manually correct unrecognized symbles with alias Symbol #!!!!!!!!!!!!!!!!!!!!!!!
tmp <- unique(c(549,11855,21429,24529,12342,21477, 25735,25741,25746,26420,
10086, 35195,38777,35170,4730,11893,68927, 66645, 58335, 80156, 82624,
83976, 96333, 56001,59911, 77808,57346, 98574, 107119,42860,107291,
116138,116186,116234,116825,125298,125445, 118088, 118604,124424,
119423,12090, 38360, 125467, 125484, 125487))
result <- result[-which(rownames(result) %in% as.character(tmp)),]
dim(result) # [1] 2430 2
length(unique(result[,1]) ) #[1] 2430
annot2 <- getBM(c("ensembl_gene_id", "mgi_symbol", "chromosome_name", "strand", "start_position", "end_position","gene_biotype"),
filters= "mgi_symbol", value=result[,"symbol"], mart=ensembl)
annot <- rbind(annot, annot2)
dim(annot) # [1] 23213 7
rm(annot2)
# save(annot, file="ensembl_annot_GSE52583_full.RData", compress=T)
GSE52583.gene.id <- annot$mgi_symbol
tmp <- match(result$symbol, GSE52583.gene.id)
length(tmp) # 2430
GSE52583.gene.id[tmp[!is.na(tmp)]] <- result$alias_symbol[!is.na(tmp)]
annot$GSE52583.gene.id <- GSE52583.gene.id
# correct the wrong chromosome_name
x <- grep("CHR", annot$chromosome_name); length(x) # 311
for(i in 1:length(x))
{
annot[x[i],"chromosome_name"] <- getBM("chromosome_name",
filters= "mgi_symbol", value=annot$mgi_symbol[x[i]], mart=ensembl)
}
dim(annot) # 23213 8
table(annot$chromosome_name)
x <- grep("JH58", annot$chromosome_name); length(x) # 6
for(i in 1:length(x))
{
annot[x[i],"chromosome_name"] <- getBM("chromosome_name",
filters= "mgi_symbol", value=annot$mgi_symbol[x[i]], mart=ensembl)
}
dim(annot) # 23213 8
table(annot$chromosome_name)
x <- grep("GL45", annot$chromosome_name); length(x) # 8
for(i in 1:length(x))
{
annot[x[i],"chromosome_name"] <- getBM("chromosome_name",
filters= "mgi_symbol", value=annot$mgi_symbol[x[i]], mart=ensembl)
}
dim(annot) # 23213 8
table(annot$chromosome_name)
x <- grep("GL45", annot$chromosome_name); length(x) # 4
annot[x,"chromosome_name"] <- c(1, "X", "X","X")
x <- grep("JH58", annot$chromosome_name); length(x) # 3
annot[x,"chromosome_name"] <- c(4, 5, 4)
annot[x[1] ,"gene_biotype"] <- "lncRNA"
colnames(annot)[which(colnames(annot)=="mgi_symbol")] <- 'gene_short_name'
annot$strand[which(annot$strand=="1")] ="+"
annot$strand[which(annot$strand=="-1")] ="-"
annot$locus <- paste0("chr",annot$chromosome_name,":",annot$start_position,"-",annot$end_position,":",annot$strand)
GRanges(annot$locus)
#GRanges object with 23213 ranges and 0 metadata columns:
# save(annot, file="ensembl_annot_GSE52583_full.RData", compress=T) # !!!!!!!!!
}
## 2.2) Remove non-annotated transcripts, including spike, mito transcripts ------------------------------
{
FPKM <- FPKM[which(rownames(FPKM) %in% GSE52583.gene.id),]
dim(FPKM) #23231 / filtered: [1] 15897 201
### collapse the FPKM values for duplicated gene symbol by the mean value ---------------------------
x <- which(duplicated(rownames(FPKM))) ; length(x) #[1] 377
tmp <- FPKM[x,]
FPKM <- FPKM[-x,]
for(i in 1:length(x))
{
y <- which(rownames(FPKM)==x[i])
FPKM[y,] <- apply(rbind(FPKM[y,], tmp[i,]), 2, mean)
}
dim(FPKM) # 22854 / filtered: [1] 15782 201
}
## 2.3) arbitarily take the first annotation for the duplicated annotations --------------------
{
annot <- annot[-which(duplicated(annot$GSE52583.gene.id)),]
rownames(annot) <- annot$GSE52583.gene.id
annot <- annot[rownames(FPKM),]
pd <- new("AnnotatedDataFrame", data = cli)
fd <- new("AnnotatedDataFrame", data = annot)
annot.FPKM <- newCellDataSet(FPKM, phenoData = pd, featureData = fd,
lowerDetectionLimit = 1,
expressionFamily=tobit()) # Tobits are truncated normal distributions.
cds <- relative2abs(annot.FPKM, method = "num_genes")
annot.cds <- newCellDataSet(cds, phenoData = pd, featureData = fd,
lowerDetectionLimit = 1,
expressionFamily=negbinomial.size()) # Negative binomial distribution with fixed variance (which is automatically calculated by Monocle). Recommended
# save(annot.cds, file="GSE52583_annot.cds_allgene.RData", compress=TRUE) #++++++++++++++++++++++++++++++
}
## 2.4) cell quality control --------------------
library(monocle)
{
valid_cells <- row.names(subset(pData(annot.cds),
cells == "single cells"
))
annot.cds <- annot.cds[,valid_cells]
annot.cds <- estimateSizeFactors(annot.cds)
annot.cds <- estimateDispersions(annot.cds)
#Removing 291 outliers
dim(annot.cds)
#Features Samples
# 22854 198
# cut left tail by setting an expression threshold of 2^(-18) !!!!!
annot.cds <- detectGenes(annot.cds, min_expr = 2^(-18))
print(head(fData(annot.cds)))
expressed_genes <- row.names(subset(fData(annot.cds),
num_cells_expressed >= 10))
length(expressed_genes) #[1] 11333 genes expressed in at least 10 cells
## It's also good to look at the distribution of mRNA totals across the cells:
pData(annot.cds)$Total_mRNAs <- Matrix::colSums(exprs(annot.cds)) #!!!
summary(pData(annot.cds)$Total_mRNAs)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 728 7703 11310 13023 16333 30088
# We've gone ahead and removed the few cells with either very low mRNA recovery or far more mRNA that the typical cell. -------------------------
# Often, doublets or triplets have roughly twice the mRNA recovered as true single cells,
annot.cds <- annot.cds[,pData(annot.cds)$Total_mRNAs < 30000] # !!!!!!!!!!!!!!!
table(pData(annot.cds)$age) ## only remove 1 E14.5 cell !!!!!!!!!!!!!!
# 14.5 16.5 18.5 Adult
# 44 27 80 46
upper_bound <- lower_bound <- NULL
x <- levels(pData(annot.cds)$age)
for(i in x)
{
tmp <- which(pData(annot.cds)$age==i)
u_bound <- 10^(mean(log10(pData(annot.cds)$Total_mRNAs[tmp])) +
2*sd(log10(pData(annot.cds)$Total_mRNAs[tmp])))
l_bound <- 10^(mean(log10(pData(annot.cds)$Total_mRNAs[tmp])) -
2*sd(log10(pData(annot.cds)$Total_mRNAs[tmp])))
upper_bound <- max(upper_bound,u_bound)
lower_bound <- min(lower_bound,l_bound)
}
lower_bound #[1] 1633.736
upper_bound # [1] 32035.42
qplot(Total_mRNAs, data = pData(annot.cds), color = age, geom =
"density", ylab="Density") +
geom_vline(xintercept = lower_bound) +
geom_vline(xintercept = upper_bound)
dev.copy2pdf(file="density_Total_mRNAs.pdf")
# so the latter filter is another means of excluding all but single cells from the analysis.
# Such filtering is handy if your protocol doesn't allow directly visualization of cell after they've been captured.
# Note that these thresholds of 3095 and 38890 mRNAs are specific to this dataset.
annot.cds <- annot.cds[,pData(annot.cds)$Total_mRNAs > lower_bound &
pData(annot.cds)$Total_mRNAs < upper_bound]
annot.cds <- detectGenes(annot.cds, min_expr = 0.1)
dim(annot.cds)
# Features Samples
# 22854 196
table(pData(annot.cds)$age) ## here remove 2 Adult cells !!!!!!!!!!!!!!
# 14.5 16.5 18.5 Adult
# 45 27 80 44
# Log-transform each value in the expression matrix.
L <- log(exprs(annot.cds[expressed_genes,]))
any(L==-Inf) #[1] TRUE
# Standardize each gene, so that they are all on the same scale,
# Then melt the data with plyr so we can plot it easily
melted_dens_df <- melt(Matrix::t(scale(Matrix::t(L))))
# verify the data follows a distribution that is roughly lognormal
# Plot the distribution of the standardized gene expression values.
qplot(value, geom = "density", data = melted_dens_df) +
stat_function(fun = dnorm, size = 0.5, color = 'red') +
xlab("Standardized log(FPKM), 196 single cells") +
ylab("Density")
dev.copy2pdf(file="density_Standardized_logcds.pdf")
fData(annot.cds)$expressed_genes <- (row.names(fData(annot.cds)) %in% expressed_genes)
# save(annot.cds, file="GSE52583_annot.cds_Standardized.RData", compress=TRUE) #++++++++++++++++++++++++++++++
}
## 2.5) feature selection based on mean gene expression abundance ----------------------
## focus on lncRNA, coding genes, and miRNAs
{
length(fData(annot.cds)$expressed_genes) # 11333
# expressed_genes was only used to exclude two Adult cells !!!!!!!!!!!!!!!!!!
# annot.cds <- annot.cds[fData(annot.cds)$expressed_genes, ]
# dim(annot.cds)
#Features Samples
# 11333 196
table(pData(annot.cds)$putative_cell_type)
# AT1 AT2 BP bulk ciliated Clara
# 41 12 13 0 3 11
pData(annot.cds)$cell_type <- paste(pData(annot.cds)$age, pData(annot.cds)$putative_cell_type, sep="_") #!!!!
pData(annot.cds)$cell_type[which(pData(annot.cds)$cell_type=="Adult_NA")] = "Adult_AT2" #!!!!
pData(annot.cds)$cell_type <- factor(pData(annot.cds)$cell_type,
levels =c("14.5_NA", "16.5_NA","18.5_BP","18.5_ciliated","18.5_Clara","18.5_AT1","18.5_AT2","Adult_AT2"))
## Clustering cells (without marker genes) --------------------------
disp_table <- dispersionTable(annot.cds) # Retrieve a table of values specifying the mean-variance relationship
dim(disp_table) #[1] 10365 4
hist(log(disp_table$mean_expression), 100)
# filter genes based on average expression level --------------------------------
unsup_clustering_genes <- subset(disp_table, mean_expression >= 0.01)
dim(unsup_clustering_genes) # [1] 10359 4 ######
annot.cds <- setOrderingFilter(annot.cds, unsup_clustering_genes$gene_id)
table(fData(annot.cds)$use_for_ordering)
#FALSE TRUE
#12676 10359
plot_ordering_genes(annot.cds)
dev.copy2pdf(file="scarePlot_averageHighgene_Standardized.pdf")
}
}
###################################################
## section 3) clustering cells by gene markers ##
###################################################
{
AT1_id <- row.names(subset(fData(annot.cds), gene_short_name == "Ager")) # %in% c("Pdpn","Ager","Aqp5") ))
AT1_id2 <- row.names(subset(fData(annot.cds), gene_short_name == "S100a6")) # %in% c("Pdpn","Ager","Aqp5") ))
AT1_id3 <- row.names(subset(fData(annot.cds), gene_short_name == "Pdpn")) # %in% c("Pdpn","Ager","Aqp5") ))
AT2_id <- row.names(subset(fData(annot.cds), gene_short_name == "Sftpc")) #%in% c("Sftpc","Muc1","Sftpb","Abca3","Lyz2")))
AT2_id2 <- row.names(subset(fData(annot.cds), gene_short_name == "Lyz2")) #%in% c("Sftpc","Muc1","Sftpb","Abca3","Lyz2")))
cth <- newCellTypeHierarchy()
cth <- addCellType(cth, "AT1", classify_func =
function(x) { x[AT1_id,] > 1 & x[AT1_id2,] > 1 & x[AT2_id2,] < 1})
cth <- addCellType(cth, "AT2", classify_func = function(x)
{ x[AT1_id3,] < 1 & x[AT2_id,] > 1 })
annot.cds <- classifyCells(annot.cds, cth, frequency_thresh =NULL)
colnames(pData(annot.cds))
# [1] "platform_id" "instrument_model" "age" "genotype"
# [5] "replicate" "cells" "cellName" "putative_cell_type"
# [9] "remove_by_RECC" "Size_Factor" "num_genes_expressed" "Total_mRNAs"
# [13] "cell_type" "CellType"
table(pData(annot.cds)$CellType, pData(annot.cds)$putative_cell_type) #!!!!!!!!!!!!
# AT1 AT2 BP bulk ciliated Clara
# Ambiguous 0 0 1 0 2 1
# AT1 38 0 9 0 0 3
# AT2 0 11 1 0 1 2
# Unknown 3 1 2 0 0 5
}
########################################################
## section 4) clustering cells without gene markers ##
########################################################
{
pc <- plot_pc_variance_explained(annot.cds, return_all = T) # norm_method='log'
plot(pc$p)
pc$variance_explained[1:3]*100
# [1] 8.703065 3.243314 2.495040
annot.cds <- reduceDimension(annot.cds, max_components = 3, num_dim = 6,
norm_method = "log", # because the exprs values are thedownloaded FPKM, log transform is applied !!!!!!!!!!!!!!!!!!!
reduction_method = 'tSNE', verbose = T)
colnames(pData(annot.cds)) # NO change
annot.cds <- clusterCells(annot.cds, num_clusters = 4)
# save(annot.cds, file="GSE52583_annot.cds_Clustered.RData", compress=TRUE) #++++++++++++++++++++++++++++++
############# plot -------------
pc <- plot_pc_variance_explained(annot.cds, return_all = T) # norm_method='log'
plot(pc$p)
pc$variance_explained[1:3]*100
#[1] 8.703065 3.243314 2.495040
pdf(file="PCA_averageHighgene_standarded_cds_thresholded.pdf")
plot_cell_clusters(annot.cds, color_by = 'as.factor(cell_type)')
plot_cell_clusters(annot.cds, color_by = 'as.factor(Cluster)')
g <- plot_cell_clusters(annot.cds, color_by = 'as.factor(cell_type)', plot=FALSE)
g + geom_point(aes_string(color = pData(annot.cds)$cell_type,
shape=pData(annot.cds)$Cluster))
dev.off()
}
###################################################################
## Section 5 (OPTINAL): constructing trajectory using Monocle ##
###################################################################
{
annot.cds@expressionFamily@vfamily #[1] "negbinomial.size"
clustering_DEG_genes <- differentialGeneTest(annot.cds[which(fData(annot.cds)$use_for_ordering),],
fullModelFormulaStr = '~Cluster',
relative_expr = TRUE, # by default, Whether to transform expression into relative values.
cores = 1)
save(clustering_DEG_genes, file='clustering_DEG_genes.age.Cluster.RData') #!!!!!!!!!!!!
ordering_genes.Cluster <-
row.names(clustering_DEG_genes)[order(clustering_DEG_genes$qval)]
table(fData(annot.cds)$use_for_ordering)
# FALSE TRUE
# 12495 10359
annot.cds <- setOrderingFilter(annot.cds,
ordering_genes = ordering_genes.Cluster)
annot.cds <- reduceDimension(annot.cds, method = 'DDRTree') ##### !!! there are different methods !!!
annot.cds <- orderCells(annot.cds)
annot.cds <- orderCells(annot.cds, root_state = GM_state(annot.cds))
# save(annot.cds, file="GSE52583_annot.cds_Clustered_thresholded.RData", compress=TRUE) #++++++++++++++++++++++++++++++
# ## additionally DE test for the feature selction in section 7
# clustering_DEG_genes <- differentialGeneTest(annot.cds[x,],
# fullModelFormulaStr = '~cell_type',
# relative_expr = TRUE, # by default, Whether to transform expression into relative values.
# cores = 1)
# save(clustering_DEG_genes, file='clustering_DEG_genes.age.cellType.RData') #!!!!!!!!!!!!!!
pdf(file="pca_averageHighgene_Standardized_trajectory.pdf")
plot_cell_trajectory(annot.cds, color_by = "age")
plot_cell_trajectory(annot.cds, color_by = "cell_type")
plot_cell_trajectory(annot.cds, color_by = "Cluster")
plot_cell_trajectory(annot.cds, color_by = "Pseudotime")
dev.off()
}
# ### backup all selected transcripts ########
# load(file="GSE52583_annot.cds_Clustered_thresholded.RData")
#
# dat <- exprs(annot.cds)
# dim(dat) #22854 196
# dat <- dat[which(fData(annot.cds)$use_for_ordering),]
# dim(dat)
# #[1] 10359 196
# save(dat, file="F:/projects/BioTIP/doc/2020_/Applications/input.Rdata/GSE52583/GSE52583_monocle_counts.RData")
# cli <- pData(annot.cds)
# dim(cli) # 196 21
# save(cli, file='F:/projects/BioTIP/doc/2020_/Applications/input.Rdata/GSE52583/GSE52583_cli.RData')
# df <- fData(annot.cds)[rownames(dat),]
# dim(df) # 10359 11
# save(df, file='F:/projects/BioTIP/doc/2020_/Applications/input.Rdata/GSE52583/GSE52583_GeneFeature.RData')
# rm(dat, cli, df)
######################################################
## Section 6) Global ~3k HVG section ##
## of coding genes, lncRNAs, and miRNAs ##
## based on variance of FPKM across all cells ##
######################################################
{
## 7.1) HVG selection ---------------------
{
#load(file="GSE52583_annot.cds_Clustered_thresholded.RData")
table(fData(annot.cds)$use_for_ordering)
# FALSE TRUE
# 12495 10359
# select coding genes, lncRNAs, and miRNAs for the downstream analysis ------------------------
table(fData(annot.cds)$use_for_ordering, fData(annot.cds)$gene_biotype %in% c('lncRNA', 'miRNA','protein_coding') )
# FALSE TRUE
# FALSE 380 12115
# TRUE 108 10251
## alternatively, could select genes based on differentially expression across time or clusters
# x <- which(fData(annot.cds)$num_cells_expressed>= 10 & fData(annot.cds)$use_for_ordering &
# fData(annot.cds)$gene_biotype %in% c('lncRNA', 'miRNA','protein_coding'))
# length(x) #8900
#
#
# load(file='clustering_DEG_genes.age.cellType.RData')
# selected_genes.age.cellType <-
# row.names(clustering_DEG_genes)[which(clustering_DEG_genes$qval<0.05)]
#
# load(file='clustering_DEG_genes.Cluster.RData')
# selected_genes.Cluster <-
# row.names(clustering_DEG_genes)[which(clustering_DEG_genes$qval<0.05)]
# HVG <- union(selected_genes.Cluster[1:4000],
# selected_genes.age.cellType[1:4000]
# )
# if(any(is.na(HVG))) HVG <- HVG[-which(is.na(HVG))]
# length(HVG) # 4531
x <- which(fData(annot.cds)$use_for_ordering &
fData(annot.cds)$gene_biotype %in% c('lncRNA', 'miRNA','protein_coding') )
length(x) # 10251
vars <- rowVars(log2(exprs(annot.cds[x,])+1))
hist(vars, 100)
table(vars>0.5)
# FALSE TRUE
# 7053 3198
HVG <- rownames(annot.cds)[x][which(vars>0.5)]
}
## 7.2) export all 196 cells -------------------------
{
cli <- pData(annot.cds)
cli$GEO.access <- rownames(cli)
dim(cli) # 196 22
logmat <- log2(exprs(annot.cds)[HVG,]+1)
dim(logmat) # [1] 3198 196
sce <- SingleCellExperiment(logmat)
colData(sce) <- DataFrame(age = cli$age,
cellName = cli$cellName,
GEO.access = cli$GEO.access,
putative_cell_type = cli$putative_cell_type,
C_by_marker = cli$CellType,
C_Monocle.k4 = cli$Cluster,
Pseudotime = cli$Pseudotime,
Size_Factor = cli$Size_Factor)
rowData(sce) <- fData(annot.cds)[rownames(sce),]
names(assays(sce)) = 'logFPKM'
dim(sce) # 3198 196
save(sce, file='BioTIP_GSE52583_robustness/sce.RData') #!!!!!!!!!!!
}
## 7.3) focusing on cells along the AT2 trajectory -------------------------
{
cli <- pData(annot.cds)
cli$GEO.access <- rownames(cli)
y <- which(cli$age=="18.5" & cli$CellType !="AT2" )
length(y) # 65
cli <- cli[-y,]; dim(cli) # 131 21
# remove the factor levels of 0
tmp <- as.vector(cli$putative_cell_type)
tmp <- factor(tmp, levels=c('BP', 'ciliated', 'Clara','AT2' ))
logmat <- log2(exprs(annot.cds)[HVG,-y]+1)
dim(logmat) # [1] 4531 131
colnames(logmat) <- rownames(cli) <- cli$cellName
sce <- SingleCellExperiment(logmat)
colData(sce) <- DataFrame(age = cli$age,
cellName = cli$cellName,
GEO.access = cli$GEO.access,
putative_cell_type = tmp,
C_by_marker = cli$CellType,
C_Monocle.k4 = cli$Cluster,
Pseudotime = cli$Pseudotime,
Size_Factor = cli$Size_Factor)
rowData(sce) <- fData(annot.cds)[rownames(sce),]
names(assays(sce)) = 'logFPKM'
dim(sce) # 3198 131
save(sce, file='BioTIP_GSE52583_robustness/AT2.sce.RData') #!!!!!!!!!!!
}
}
#################################################################
## Section 7) Prepare inputs for QuanTC on 131 cells ##
#################################################################
## cell-cell similarity matrix for 131 cells #---------------------
# refer to http://127.0.0.1:11637/library/SC3/doc/SC3.html
# needs to customize ks accordingily per dataset, the larger range the longer running time.
# In this case, ks=3:8 are tested,
# and for the related soft-thresholding clustering (QuanTC method),
# we had take average of the Consensus.Cluster-agreeable clustering results of k=4:10 to get cell-cell similarity matrix M
library(SC3)
files = c('AT2.sce.RData', 'sce.RData')
for(j in files){
load(file= paste0('BioTIP_GSE52583_robustness/',j))
## write into QuanTC inputs files #---------------------------
if(j=='AT2.sce.RData') QuanTC.input.dir = "F:/projects/QuanTC/QuanTC-modified/Input/GSE52583.AT2/" else {
QuanTC.input.dir = "F:/projects/QuanTC/QuanTC-modified/Input/GSE52583/"
}
## 8.2) calculate cell-cell similarity and estiamte teh number of clusters ----------------------------
{
sce.sc3 = sce
rowData(sce.sc3)$feature_symbol <- rownames(sce.sc3)
# remove features with duplicated names
any(duplicated(rowData(sce.sc3)$feature_symbol)) # F
range(assays(sce)$logFPKM)
# [1] 0.00000 13.97788
### to run SC3 successfully, transform sparsematrix to matrix !!!!!!!!!!!!!
logcounts(sce.sc3) <- as.matrix(assays(sce)$logFPKM)
sum(logcounts(sce.sc3)<1e-16,2)/nrow(logcounts(sce.sc3))>0.95 # TRUE therefore no cell being filtered
counts(sce.sc3) <- as.matrix(2^assays(sce)$logFPKM -1)
# NOT repeat !!!!
# # biology: boolean parameter, defines whether to compute differentially expressed genes, marker genes and cell outliers.
set.seed(2020)
sce.sc3 <- sc3(sce.sc3, ks = 3:8, biology = FALSE) # svm_max = 5000 is default!!!
# Setting SC3 parameters...
# Your dataset contains more than 2000 cells. Adjusting the nstart parameter of kmeans to 50 for faster performance...
# Calculating distances between the cells...
# Performing transformations and calculating eigenvectors...
# Performing k-means clustering...
# Calculating consensus matrix...
traceback()
# When the sce.sc3 object is prepared for clustering, SC3 can also estimate the optimal number of clusters k in the dataset
# NOT repeat, runs 10 mins !!!!
sce.sc3 <- sc3_estimate_k(sce.sc3)
str(metadata(sce.sc3)$sc3)
# $ k_estimation : num 5
# to save space, transform back matrix to sparse matrix
assayNames(sce.sc3)
#[1] "logFPKM" "logcounts" "counts"
assays(sce.sc3) <- assays(sce.sc3)[1]
if(j=='AT2.sce.RData') save(sce.sc3, file='AT2.sce_SC3.RData') else save(sce.sc3, file='sce_SC3.RData') ##!!!!!!!!!!!!!!!!!!!
gc()
# END DO NOT REPET !!!!!!!!!!!!!
}
## 8.3) writing input fiels for QuanTC -----------------------------------------
{
for(j in files){
load(file= paste0('BioTIP_GSE52583_robustness/',j))
## write into QuanTC inputs files #---------------------------
if(j=='AT2.sce.RData') {
QuanTC.input.dir = "F:/projects/QuanTC/QuanTC-modified/Input/GSE52583.AT2/"
load(file='AT2.sce_SC3.RData')}
else {
QuanTC.input.dir = "F:/projects/QuanTC/QuanTC-modified/Input/GSE52583/"
load(file='sce_SC3.RData')
}
M_3 = (sce.sc3@metadata$sc3$consensus$`3`$consensus)
M_5 = (sce.sc3@metadata$sc3$consensus$`5`$consensus)
M_7 = (sce.sc3@metadata$sc3$consensus$`7`$consensus)
# take average of the Consensus.Cluster-agreeable clustering results to get cell-cell similarity matrix M
M = (M_3+M_5+M_7)/3
write.csv(M, file=paste0(QuanTC.input.dir,'Treutlein2014_cell-cell.csv'), row.names=F, col.names=F)
logmat <- as.matrix(assays(sce)$logFPKM)
dim(logmat) # [1] 4720 131
write.table(round(logmat,4), file= paste0(QuanTC.input.dir,'Treutlein2014_log2.FPKM.txt'), row.names=FALSE, col.names=FALSE, sep='\t')
write.table(rownames(logmat), file= paste0(QuanTC.input.dir,'Treutlein2014_gene_name.txt'), row.names=FALSE, quote=FALSE, col.names=FALSE)
cli <- colData(sce)
write.table(cli$cellName %>% as.vector(),
file= paste0(QuanTC.input.dir,'Treutlein2014_cell_name.txt'), row.names=FALSE, quote=FALSE, col.names=FALSE)
write.table(cli$C_by_marker %>% as.vector(),
file= paste0(QuanTC.input.dir,'Treutlein2014_CellType.txt'), row.names=FALSE, quote=FALSE, col.names=FALSE)
true_label <- as.vector(cli$age)
true_label[which(true_label=='14.5')]=1 # pseudo time, numeric values
true_label[which(true_label=='16.5')]=2
true_label[which(true_label=='18.5')]=3
true_label[which(true_label=='Adult')]=4
write.table(true_label,
file= paste0(QuanTC.input.dir,'Treutlein2014_CellAge.txt'), row.names=FALSE, quote=FALSE, col.names=FALSE)
}
}
}
####################################################################################
## section 8) cluster 131 cells of 4531 genes using different methods ##
## Note that excluded cells are which(cli$age=="18.5" & cli$CellType !="AT2" ) ##
####################################################################################
setwd('F:/projects/BioTIP/result/GSE52583')
library(dplyr)
library(scater)
library(scran)
library(SC3)
library(Seurat) #Seurat version 4.0.6
#library(leiden)
#library(monocle3)
j='AT2.sce.RData'
subDir = 'BioTIP_GSE52583_robustness/'
QuanTCDir = 'QuanTC_Output/AT2/'
{
parameters = list()
parameters$k = 10 # An integer number of nearest neighboring cells to use when creating the k nearest neighbor graph for Louvain/Leiden/SNNGraph clustering.
################################################################################
## 8.0) load the R object
################################################################################
load(paste0(subDir,'/',j))
sce
# dim: 3198 131
# metadata(0):
# assays(1): logFPKM
# rownames(4531): 0610007P08Rik 0610007P22Rik ... Zyx l7Rn6
# rowData names(11): ensembl_gene_id gene_short_name ...
# num_cells_expressed use_for_ordering
# colnames: NULL
# colData names(8): age cellName ... Pseudotime Size_Factor
# reducedDimNames(0):
# altExpNames(0):
table(colData(sce)$C_by_marker)
# Ambiguous AT1 AT2 Unknown
# 2 13 77 39
table(colData(sce)$age)
# 14.5 16.5 18.5 Adult
# 45 27 15 44
########################################################################################
# 8.1) # extract SNNGraph clusters (by scran) with two settings for the parameter k
# The parameter k indicates the number of nearest neighbors to consider during graph construction, whicc
# we set to 5 for small number of cells (e.g., <500) and 10 to large number of sequenced cells (e.g., 4k).
# https://nbisweden.github.io/single-cell_sib_scilifelab/session-clustering/clustering.html
########################################################################################
## Calculate size factors and normalize has been excluded becasue the downloaded FPKM has been scaled
# sce <- scran::computeSumFactors(sce, min.mean = 0.1, assay.type ='logFPKM')
# sce <- scater::normalize(sce)
# logcounts(sce) <- as.matrix(logcounts(sce))
{
## Fit variance trend and apply denoising PCA
new.trend <- scran::modelGeneVarByPoisson(x = sce, assay.type ='logFPKM')
# means <- rowMeans(assays(sce)$logFPKM)
# vars <- rowVars(assays(sce)$logFPKM)
# fit <- scran::fitTrendVar(means, vars)
# fit$trend <- new.trend
dec <- scran::modelGeneVar(sce, assay.type ='logFPKM')
set.seed(123)
sce <- scran::denoisePCA(sce, technical = new.trend, assay.type ='logFPKM')
reducedDimNames(sce) #[1] "PCA"
# k: An integer scalar specifying the number of nearest neighbors to consider during graph construction.
SNNGraph.ID <- scran::buildSNNGraph(sce, k= parameters$k, use.dimred = 'PCA')
SNNGraph.ID <- igraph::cluster_walktrap(SNNGraph.ID)$membership
# check the agreeement between new and original clusters using the SNNGraph method
table(as.vector(sce$age), SNNGraph.ID)
# SNNGraph.ID
# 1 2 3
# 14.5 0 40 5
# 16.5 1 2 24
# 18.5 12 0 3
# Adult 44 0 0
colData(sce)$C_SNNGraph_k10 = factor(SNNGraph.ID)
SNNGraph.ID <- scran::buildSNNGraph(sce, k= 20, use.dimred = 'PCA') # the same
SNNGraph.ID <- scran::buildSNNGraph(sce, k= 8, use.dimred = 'PCA') # the same
SNNGraph.ID <- igraph::cluster_walktrap(SNNGraph.ID)$membership
table(as.vector(sce$age), SNNGraph.ID)
# SNNGraph.ID
# 1 2 3 4 5 6 7
# 14.5 7 0 0 0 16 0 22
# 16.5 24 0 0 0 2 1 0
# 18.5 2 0 0 11 0 2 0
# Adult 0 12 22 1 0 9 0
colData(sce)$C_SNNGraph_k8 = factor(SNNGraph.ID)
#save(sce, file=paste0(subDir,'/',j), compress=TRUE) # !!!!!!!!!!!!!!!!!
}
################################################################
# 8.2) # extract consensus clusters
# refer to http://127.0.0.1:11637/library/SC3/doc/SC3.html
# needs to customize ks accordingily per dataset, the larger range the longer running time.
# In this case, ks=3,5,7 are tested,
# and for the related soft-thresholding clustering (QuanTC method),
# we had take average of the Consensus.Cluster-agreeable clustering results of k=3,5,7 to get cell-cell similarity matrix M
##################################################################
if(j=="AT2.sce.RData") load('AT2.sce_SC3.RData') else load('sce_SC3.RData') #!!!!!!!!!!!!!!!
{
sce.sc3 # optimale num =5 by SC3
## manually pick the optimal matches to follow up
table(as.vector(sce$age), colData(sce.sc3)$sc3_5_clusters)
# 1 2 3 4 5
# 14.5 0 0 0 37 8
# 16.5 0 0 27 0 0
# 18.5 14 0 0 0 1
# Adult 0 44 0 0 0
# load(file='sce_E8.25_HEP.RData')
colData(sce)$C_consensus_ks3 = colData(sce.sc3)$sc3_3_clusters
colData(sce)$C_consensus_ks5 = colData(sce.sc3)$sc3_5_clusters
colData(sce)$C_consensus_ks7 = colData(sce.sc3)$sc3_7_clusters
rm(sce.sc3)
# save(sce, file=paste0(subDir,'/',j), compress=TRUE) # !!!!!!!!!!!!!!!!!
}
################################################################
# 8.3) # extract Leiden clustering (using Seurat)
# https://satijalab.org/seurat/articles/get_started.html
# Leiden requires the leidenalg python.
# We apply the Seurat packge, by setting the algorithm =4 in the function FindClusters()
# This parameter decides the algorithm for modularity optimization
# (1 = original Louvain algorithm; 2 = Louvain algorithm with multilevel refinement;
# 3 = SLM algorithm; 4 = Leiden algorithm).
# The resolution parameter: use a value above (below) 1.0 if you want to obtain a larger (smaller) number of communities.
# Seurat author recommended that:
# We find that setting this parameter between 0.4-1.2 typically returns good results for single-cell datasets of around 3K cells.
###################################################################
# generate a pseudo count to run Seurat
{
logcounts(sce) <- assays(sce)$logFPKM
counts(sce) <- as.matrix(2^logcounts(sce)-1)
# convert from SingleCellExperiment
sce.seurat <- as.Seurat(sce)
# Warning: Feature names cannot have underscores ('_'), replacing with dashes ('-')
sce.seurat
# Computes the k.param nearest neighbors for a given dataset
ndim = dim(sce.seurat[['PCA']])[2]
sce.seurat <- FindNeighbors(sce.seurat, reduction = "PCA", k.param = parameters$k, dims = 1:ndim)
sce.seurat <- FindClusters(sce.seurat, resolution = 0.4, algorithm = 4) # smaller number of communities
table(Idents(sce.seurat), as.vector(colData(sce)$age))
# 14.5 16.5 18.5 Adult
# 1 0 0 11 35
# 2 6 24 2 0
# 3 24 0 0 0
# 4 15 2 0 0
# 5 0 1 2 9
colData(sce)$C_Leiden_0.4 = Idents(sce.seurat)
sce.seurat <- FindClusters(sce.seurat, resolution = 0.8, algorithm = 4) # smaller number of communities
table(Idents(sce.seurat), as.vector(colData(sce)$age))
# 14.5 16.5 18.5 Adult
# 1 6 24 2 0
# 2 24 0 0 0
# 3 0 0 0 22
# 4 15 2 0 0
# 5 0 0 11 1
# 6 0 0 0 12
# 7 0 1 2 9
colData(sce)$C_Leiden_0.8 = Idents(sce.seurat)
sce.seurat <- FindClusters(sce.seurat, resolution = 1.2, algorithm = 4) # smaller number of communities
table(Idents(sce.seurat), as.vector(colData(sce)$age))
# 14.5 16.5 18.5 Adult
# 1 24 0 0 0
# 2 0 0 0 22
# 3 6 10 2 0
# 4 15 2 0 0
# 5 0 14 0 0
# 6 0 0 11 1
# 7 0 1 2 9
# 8 0 0 0 12
colData(sce)$C_Leiden_1.2 = Idents(sce.seurat)
# In this case, remove teh pseudo counts
counts(sce) <- logcounts(sce) <- NULL
## save(sce, file=paste0(subDir,'/',j), compress=TRUE) # !!!!!!!!!!!!!!!!!
rm(sce.seurat)
}
################################################################
# 8.4 ) # extract the QUANTC-assigned clusters
# k=4 was optimalized by QuanTC pipeline
# refer to QuanTC_soft.thresholding_clusters.m
##################################################################
{
C_TC <- read.table(paste0(QuanTCDir,'C_TC.txt'))
C_TC <- C_TC[,1]
length(C_TC) #[1] 131
index_TC <- read.table(paste0(QuanTCDir,'index_TC.txt'))
index_TC <- index_TC[,1]
unique(C_TC[index_TC]) # 5 verified the C_TC is the cluster ID generated by QuanTC
## replace the QuanTC.cluster IDs, TC is the last, to be consistented with those shoing in Fig S2
tmp <- data.frame(C_TC)
tmp <- tmp %>% mutate(C_TC = replace(C_TC, C_TC == 1, 'C1'))
tmp <- tmp %>% mutate(C_TC = replace(C_TC, C_TC == 2, 'C2'))
tmp <- tmp %>% mutate(C_TC = replace(C_TC, C_TC == 3, 'C3'))
tmp <- tmp %>% mutate(C_TC = replace(C_TC, C_TC == 4, 'C4'))
tmp <- tmp %>% mutate(C_TC = replace(C_TC, C_TC == 5, 'TC'))
table(as.vector(sce$age), tmp[,1])
# C1 C2 C3 C4 TC
# 14.5 36 0 0 0 9
# 16.5 0 0 13 0 14
# 18.5 0 0 0 2 13
# Adult 0 43 0 0 1
colData(sce)$C_Soft <- tmp[,1]
}
## save(sce, file=paste0(subDir,'/',j), compress=TRUE) # !!!!!!!!!!!!!!!!!
## 8.5) plot different clustering restuls
####################################################################
library(scater)
sce <- runTSNE(sce, dimred="PCA")
{
x <- grep('C_', colnames(colData(sce)))
(n=length(x)) # 11
pdf(file=paste0(subDir,"/TSNE.",j,"_clustering_methods.pdf"), width=10, height=9)
gridExtra::grid.arrange(
plotReducedDim(sce, dimred='TSNE', colour_by='putative_cell_type', #add_legend=FALSE,
text_by='putative_cell_type', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('putative_cell_type') #+ ylim(5,20)
,plotReducedDim(sce, dimred='TSNE',colour_by='C_by_marker', #add_legend=FALSE,
text_by='C_by_marker', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('C_by gene markers') #+ ylim(5,20)
,plotReducedDim(sce, dimred='TSNE',colour_by='C_Monocle.k4', #add_legend=FALSE,
text_by='C_Monocle.k4', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('C_Monocle.k4 with 10k genes') #+ ylim(5,20)
,plotReducedDim(sce, dimred='TSNE',colour_by='C_consensus_ks3', #add_legend=FALSE,
text_by='C_consensus_ks3', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('C_consensus_ks3') #+ ylim(5,20)
,plotReducedDim(sce, dimred='TSNE',colour_by='C_consensus_ks5', #add_legend=FALSE,
text_by='C_consensus_ks5', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('consensus_ks5') #+ ylim(5,20)
,plotReducedDim(sce, dimred='TSNE',colour_by='C_consensus_ks7', #add_legend=FALSE,
text_by='C_consensus_ks7', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('consensus_ks7') #+ ylim(5,20)
,plotReducedDim(sce, dimred='TSNE',colour_by='C_Leiden_0.4', #add_legend=FALSE,
text_by='C_Leiden_0.4', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('Leiden_0.4') #+ ylim(5,20)
,plotReducedDim(sce, dimred='TSNE',colour_by='C_Leiden_0.8', #add_legend=FALSE,
text_by='C_Leiden_0.8', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('Leiden_0.8') #+ ylim(5,20)
,plotReducedDim(sce, dimred='TSNE',colour_by='C_Leiden_1.2', #add_legend=FALSE,
text_by='C_Leiden_1.2', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('Leiden_1.2') #+ ylim(5,20)
,ncol=3
)
gridExtra::grid.arrange(
plotReducedDim(sce, dimred='TSNE', colour_by='C_SNNGraph_k10', #add_legend=FALSE,
text_by='C_SNNGraph_k10', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('C_SNNGraph_k10')
,plotReducedDim(sce, dimred='TSNE',colour_by='C_SNNGraph_k8', #add_legend=FALSE,
text_by='C_SNNGraph_k8', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('C_SNNGraph_k8') #+ ylim(5,20)
,plotReducedDim(sce, dimred='TSNE',colour_by='C_Soft', #add_legend=FALSE,
text_by='C_Soft', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('soft-thresholding clusting') #+ ylim(5,20)
,plotReducedDim(sce, dimred='TSNE',colour_by='age', #add_legend=FALSE,
text_by='age', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('Collection time') #+ ylim(5,20)
,ncol=3, nrow=3)
dev.off()
}
}
############################################################################
## scetion 9) construct and visualize the trajectory using scater package
############################################################################
fid = c('AT2.sce.RData','sce.RData')
for(k in 1:length(fid)){
load(paste0('BioTIP_GSE52583_robustness/',fid[k]))
if(!'PCA' %in% reducedDimNames(sce)){
new.trend <- scran::modelGeneVarByPoisson(x = sce, assay.type ='logFPKM')
dec <- scran::modelGeneVar(sce, assay.type ='logFPKM')
set.seed(123)
sce <- scran::denoisePCA(sce, technical = new.trend, assay.type ='logFPKM')
reducedDimNames(sce) #[1] "PCA"
}
if(!'TSNE' %in% reducedDimNames(sce)){
sce <- runTSNE(sce, dimred="PCA")
}
## add the label to show
colData(sce)$label = paste(sce$age, sce$putative_cell_type, sep='_')
library(scater)
# pseudo counts
#counts(sce) = 2^(logcounts(sce))
pdf(file=paste0("BioTIP_GSE52583_robustness/trajectory_",ncol(sce),"cells.pdf"))
if(fid[k]=='sce.RData') {
by.cluster <- aggregateAcrossCells(sce, ids=sce$age, use.assay.type='logFPKM')} else {
by.cluster <- aggregateAcrossCells(sce, ids=sce$C_Leiden_0.4, use.assay.type='logFPKM')
}
centroids <- reducedDim(by.cluster, "PCA")
dmat <- dist(centroids)
dmat <- as.matrix(dmat)
g <- igraph::graph.adjacency(dmat, mode = "undirected", weighted = TRUE)
mst <- igraph::minimum.spanning.tree(g)
plot(mst)
pairs <- Matrix::which(mst[] > 0, arr.ind=TRUE)
coords <- reducedDim(by.cluster, "TSNE")
group <- rep(seq_len(nrow(pairs)), 2)
stuff <- data.frame(rbind(coords[pairs[,1],], coords[pairs[,2],]), group)
plotTSNE(sce, colour_by="age",
text_by="label", text_size = 8)
plotTSNE(sce, colour_by="age",
text_by="label", text_size = 8) +
geom_line(data=stuff, mapping=aes(x=X1, y=X2, group=group))
dev.off()
}
k=1
load(paste0('BioTIP_GSE52583_robustness/',fid[k]))
table(sce$age, sce$C_Leiden_0.4)
# 1 2 3 4 5
# 14.5 0 6 24 15 0
# 16.5 0 24 0 2 1
# 18.5 11 2 0 0 2
# Adult 35 0 0 0 9
| /examples/code/lung_Treutlein2014/1_FPKM_preprocess.R | no_license | xyang2uchicago/BioTIP | R | false | false | 52,215 | r | #### README ############
## There are 9 sections in this code
## Section 1) download and process the FPKM and sample infor from GEO
## section 2) Quanlity control and feature selection
## section 3) clustering cells by gene markers
## section 4) clustering cells without gene markers based on 10.3k expressed genes that were used to construct trajectory by Monocle3
## Section 5 (OPTINAL): constructing trajectory using Monocle
## Section 6) select Global ~3k HVG
## Section 7) Prepare inputs for QuanTC with 3198 genes and 131 cells
## section 8) cluster 131 cells of 4531 genes using different methods, and plot the cell clusters
## scetion 9) construct and visualize teh trajectory using scater package
## last update 2/8/2022
## by Holly Yang ##############
setwd('F:/projects/BioTIP/result/GSE52583')
####################################################################################
### Section 1 ) download and process the FPKM and sample infor from GEO ##
## columns "age" cells" "cellName" are from GEO ##
## column "putative_cell_type" are from the publishgd Stable 3 ##
## factor "cell_type" merges both age and putative_cell_type ##
## "CellType" is added in Section 4, the biomarker-based cell clusters ##
## "Cluster" is added in Section 5, the unsupervised cell clusters (k=6) ##
####################################################################################
{
library(GEOquery)
GSE52583 <- getGEO(GEO = 'GSE52583', filename = NULL, destdir = '../../data/GSE52583_lung_cellfate_usedbyMojtahedi2016/GSE52583GEOquery',
GSElimits = NULL, GSEMatrix = TRUE, AnnotGPL = FALSE, getGPL = TRUE)
class(GSE52583) #[1] "list"
names(GSE52583)
# [1] "GSE52583-GPL13112_series_matrix.txt.gz" "GSE52583-GPL16417_series_matrix.txt.gz"
class(GSE52583[[1]]) #[1] "ExpressionSet"
save(GSE52583, file= '../../data/GSE52583_lung_cellfate_usedbyMojtahedi2016/GSE52583GEOquery/FPKM.GSE52583_ExpressionSet_list.rData',compress=T)
## generate meta_table for cells ################
cli <- rbind(pData(GSE52583[[1]]), pData(GSE52583[[2]]))
dim(cli) # [1] 201 46
toDelete <- NULL
for(i in 1:ncol(cli)) {
if(length(table(cli[,i])) ==1) toDelete <- c(toDelete, i)
}
cli <- cli[,-toDelete]
dim(cli) #[1] 201 10
cli$age <- unlist(lapply(cli$characteristics_ch1.2, function(x) unlist(strsplit(as.character(x), split=" day "))[2]))
cli$age[which(cli$age=='107')]='Adult'
cli$genotype <- unlist(lapply(cli$characteristics_ch1.3, function(x) unlist(strsplit(as.character(x), split=": "))[2]))
table(cli$age, cli$genotype )
# Sftpc-Cre-ERT2-rtta -/- tetO-HIST1H2BJ-GFP+/-) wild type
# 14.5 0 45
# 16.5 0 27
# 18.5 0 83
# Adult 46 0
cli$replicate <- unlist(lapply(cli$title, function(x) unlist(strsplit(as.character(x), split=", "))[2]))
cli$cells <- unlist(lapply(cli$title, function(x) unlist(strsplit(as.character(x), split=", "))[3]))
table(cli$cells)
#200 cell bulk control no cell control single cells
# 2 1 198
rownames(cli) <- cli$geo_accession
cli <- cli[,c]
tmp <- unlist(lapply(cli$supplementary_file_1, function(x) unlist(strsplit(as.character(x), split="suppl/"))[2]))
tmp <- unlist(lapply(tmp, function(x) unlist(strsplit(x, split="_IL"))[1]))
tmp1 <- unlist(lapply(tmp, function(x) unlist(strsplit(x, split="_"))[2]))
tmp2 <- unlist(lapply(tmp, function(x) unlist(strsplit(x, split="_"))[3]))
tmp3 <- unlist(lapply(tmp, function(x) unlist(strsplit(x, split="_"))[4]))
cli$cellName <- paste(tmp1,tmp2,tmp3,sep="_")
colnames(cli)[8] <- 'biosample'
colnames(cli)[9] <- 'SRX'
cli <- cli[,c(6,7,11:15)]
## Treutlein2014 suppl data 3, match to cli
Data3 <- read.table('../../data/GSE52583_lung_cellfate_usedbyMojtahedi2016/Treutlein2014/Treurlein2014_SData3.txt',header=T,sep='\t')
dim(Data3) #[1] 82 23275
colnames(Data3)[1:10]
# [1] "cell_name" "time_point" "sample"
# [4] "putative_cell_type" "X0610005C13Rik" "X0610007C21Rik"
# [7] "X0610007L01Rik" "X0610007N19Rik" "X0610007P08Rik"
# [10] "X0610007P14Rik"
table(Data3$putative_cell_type)
# AT1 AT2 BP bulk ciliated Clara
# 41 12 13 2 3 11
cli$putative_cell_type <- Data3[match(cli$cellName,Data3$cell_name),]$putative_cell_type
# write.table(cli, file='GSE52583_sample_annotation_xy.txt', sep='\t')
## generate matrix of FPKM ######################
myDir <- "F:/projects/BioTIP/data/GSE52583_lung_cellfate_usedbyMojtahedi2016/GSE52583_RAW/"
COLs <- c('tracking_id', 'class_code', 'nearest_ref_id', 'gene_id', 'gene_short_name', 'tss_id',
'locus', 'length','coverage','FPKM','FPKM_conf_lo','FPKM_conf_hi','FPKM_status')
files <- list.files(path = myDir, pattern='*.gz')
(n <- length(files)) # 201
tmp <- read.table(file=paste0(myDir,files[1]),sep='\t',header=FALSE, comment="")
colnames(tmp) <- COLs
FPKM <- NULL
for(i in 1:n)
{
tmp <- read.table(file=paste0(myDir,files[i]),sep='\t',header=FALSE, comment="")
colnames(tmp) <- COLs
FPKM <- cbind(FPKM, tmp$FPKM)
}
rownames(FPKM) <- tmp$gene_id
colnames(FPKM) <- unlist(lapply(files, function(x) unlist(strsplit(x, split="_"))[1]))
dim(FPKM) #[1] 23837 201
write(rownames(FPKM), file= '../../data/GSE52583_lung_cellfate_usedbyMojtahedi2016/Treutlein2014/FPKM_correctSymbol.txt')
# save(FPKM, file='FPKM_matrix_nofilter.rData', compress=T)
any( rownames(cli)!=colnames(FPKM)) #[1] FALSE
}
################################################################################################################
## section 2) Quanlity control and feature selection
## GEO downloaded FPKM matrix 23837 201
## remove_spike, mito, non-annotated transcripts 23231 201
## collapse the FPKM values for duplicated symbol by the mean value 22854 201
## cell_quality_filtering by removing
#### either very low mRNA recovery or potential doublets or triplets 22854 196
## feature selection based on mean-variance relationship in trajectory construction 10359 196
## feature selection (in section 7) of coding gene, miRNA and lincRNA (expressed) 10251 196
#### feature selection (in section 7) of HVGs 3198 196
## focusing on cells along the AT2 trajectory 3198 131
##################################################################################################################
library(monocle)
{
cell_median_FPKM <- apply(FPKM, 2, function(df) median(df, na.rm=TRUE) )
table(cell_median_FPKM==0) # TRUE 201 !!!! FPKM has been centralized per cell !!!
genes.ERCC <- grep("ERCC", rownames(FPKM),value=TRUE)
length(genes.ERCC) # 92
FPKM.ERCC <- FPKM[genes.ERCC,]
dim(FPKM.ERCC) # 92 201
save(FPKM.ERCC, file='FPKM.ERCC.RData') #!!!!!!!!!!!!
### housekeeping genes given in extend Data Fig 3
HK <- c("Gusb", "Tbp","Ppih", "Tfrc", "Sdha", "Pgk1", "B2m", "Ldha", "Gapdh", "Hsp90ab1", "Actb")
all(HK %in% rownames(FPKM)) #[1] TRUE
## 2.1) prepare annotate table ------------------------------
{ # https://ivanek.github.io/analysisOfGenomicsDataWithR/03_AnnotationResources_html.html
# https://www.biostars.org/p/147351/
# https://www.biostars.org/p/147351/
library(biomaRt) #biomaRt_2.42.0
ensembl <- useMart("ensembl", dataset="mmusculus_gene_ensembl")
grep("Synonyms",listAttributes(ensembl), value=T)
annot<-getBM(c("ensembl_gene_id", "mgi_symbol", "chromosome_name", "strand", "start_position", "end_position","gene_biotype"),
filters= "mgi_symbol", value=rownames(FPKM), mart=ensembl)
dim(annot) #[1] 20990 7
table(rownames(FPKM) %in% annot$mgi_symbol)
# FALSE TRUE
# 2875 20962
x <- which(!rownames(FPKM) %in% annot$mgi_symbol); length(x) # 2875
# load the annotation database
# set up your query genes
queryGeneNames <- rownames(FPKM)[x]
# use sql to get alias table and gene_info table (contains the symbols)
# first open the database connection
#library(DBI) #DBI_1.1.0
dbCon <- org.Mm.eg_dbconn()
# write your SQL query
sqlQuery <- 'SELECT * FROM alias, gene_info WHERE alias._id == gene_info._id;'
# execute the query on the database
aliasSymbol <- dbGetQuery(dbCon, sqlQuery)
dim(aliasSymbol) #[1] 149181 5
# subset to get your results
result <- aliasSymbol[which(aliasSymbol[,2] %in% queryGeneNames),c(2,5)]
dim(result) # [1] 2476 2
length(unique(result[,1]) ) #[1] 2430
unique(result[duplicated(result[,1]),1])
#[1] "Ang3" "Egfbp2" "Aim1" "Stra13" "Odz2"
# [6] "Odz1" "Odz3" "Prl2c4" "Nat6" "Rab1"
#[11] "Sip1" "1700058G18Rik" "Abp1" "Klra22" "B3gnt1"
#[16] "G6b" "ORF61" "Dbc1" "Clca4" "Siglec5"
#[21] "6430411K18Rik" "Mll2" "6430706D22Rik" "A730008H23Rik" "2810408M09Rik"
#[26] "Stxbp3a" "Nrp" "Dear1" "Duxbl" "Plac9"
#[31] "H2afb1" "Gmcl1l" "Cldn25" "Cml3" "Mir193"
#[36] "5430421N21Rik" "Snord116" "Rbmy1a1"
# manually correct unrecognized symbles with alias Symbol #!!!!!!!!!!!!!!!!!!!!!!!
tmp <- unique(c(549,11855,21429,24529,12342,21477, 25735,25741,25746,26420,
10086, 35195,38777,35170,4730,11893,68927, 66645, 58335, 80156, 82624,
83976, 96333, 56001,59911, 77808,57346, 98574, 107119,42860,107291,
116138,116186,116234,116825,125298,125445, 118088, 118604,124424,
119423,12090, 38360, 125467, 125484, 125487))
result <- result[-which(rownames(result) %in% as.character(tmp)),]
dim(result) # [1] 2430 2
length(unique(result[,1]) ) #[1] 2430
annot2 <- getBM(c("ensembl_gene_id", "mgi_symbol", "chromosome_name", "strand", "start_position", "end_position","gene_biotype"),
filters= "mgi_symbol", value=result[,"symbol"], mart=ensembl)
annot <- rbind(annot, annot2)
dim(annot) # [1] 23213 7
rm(annot2)
# save(annot, file="ensembl_annot_GSE52583_full.RData", compress=T)
GSE52583.gene.id <- annot$mgi_symbol
tmp <- match(result$symbol, GSE52583.gene.id)
length(tmp) # 2430
GSE52583.gene.id[tmp[!is.na(tmp)]] <- result$alias_symbol[!is.na(tmp)]
annot$GSE52583.gene.id <- GSE52583.gene.id
# correct the wrong chromosome_name
x <- grep("CHR", annot$chromosome_name); length(x) # 311
for(i in 1:length(x))
{
annot[x[i],"chromosome_name"] <- getBM("chromosome_name",
filters= "mgi_symbol", value=annot$mgi_symbol[x[i]], mart=ensembl)
}
dim(annot) # 23213 8
table(annot$chromosome_name)
x <- grep("JH58", annot$chromosome_name); length(x) # 6
for(i in 1:length(x))
{
annot[x[i],"chromosome_name"] <- getBM("chromosome_name",
filters= "mgi_symbol", value=annot$mgi_symbol[x[i]], mart=ensembl)
}
dim(annot) # 23213 8
table(annot$chromosome_name)
x <- grep("GL45", annot$chromosome_name); length(x) # 8
for(i in 1:length(x))
{
annot[x[i],"chromosome_name"] <- getBM("chromosome_name",
filters= "mgi_symbol", value=annot$mgi_symbol[x[i]], mart=ensembl)
}
dim(annot) # 23213 8
table(annot$chromosome_name)
x <- grep("GL45", annot$chromosome_name); length(x) # 4
annot[x,"chromosome_name"] <- c(1, "X", "X","X")
x <- grep("JH58", annot$chromosome_name); length(x) # 3
annot[x,"chromosome_name"] <- c(4, 5, 4)
annot[x[1] ,"gene_biotype"] <- "lncRNA"
colnames(annot)[which(colnames(annot)=="mgi_symbol")] <- 'gene_short_name'
annot$strand[which(annot$strand=="1")] ="+"
annot$strand[which(annot$strand=="-1")] ="-"
annot$locus <- paste0("chr",annot$chromosome_name,":",annot$start_position,"-",annot$end_position,":",annot$strand)
GRanges(annot$locus)
#GRanges object with 23213 ranges and 0 metadata columns:
# save(annot, file="ensembl_annot_GSE52583_full.RData", compress=T) # !!!!!!!!!
}
## 2.2) Remove non-annotated transcripts, including spike, mito transcripts ------------------------------
{
FPKM <- FPKM[which(rownames(FPKM) %in% GSE52583.gene.id),]
dim(FPKM) #23231 / filtered: [1] 15897 201
### collapse the FPKM values for duplicated gene symbol by the mean value ---------------------------
x <- which(duplicated(rownames(FPKM))) ; length(x) #[1] 377
tmp <- FPKM[x,]
FPKM <- FPKM[-x,]
for(i in 1:length(x))
{
y <- which(rownames(FPKM)==x[i])
FPKM[y,] <- apply(rbind(FPKM[y,], tmp[i,]), 2, mean)
}
dim(FPKM) # 22854 / filtered: [1] 15782 201
}
## 2.3) arbitarily take the first annotation for the duplicated annotations --------------------
{
annot <- annot[-which(duplicated(annot$GSE52583.gene.id)),]
rownames(annot) <- annot$GSE52583.gene.id
annot <- annot[rownames(FPKM),]
pd <- new("AnnotatedDataFrame", data = cli)
fd <- new("AnnotatedDataFrame", data = annot)
annot.FPKM <- newCellDataSet(FPKM, phenoData = pd, featureData = fd,
lowerDetectionLimit = 1,
expressionFamily=tobit()) # Tobits are truncated normal distributions.
cds <- relative2abs(annot.FPKM, method = "num_genes")
annot.cds <- newCellDataSet(cds, phenoData = pd, featureData = fd,
lowerDetectionLimit = 1,
expressionFamily=negbinomial.size()) # Negative binomial distribution with fixed variance (which is automatically calculated by Monocle). Recommended
# save(annot.cds, file="GSE52583_annot.cds_allgene.RData", compress=TRUE) #++++++++++++++++++++++++++++++
}
## 2.4) cell quality control --------------------
library(monocle)
{
valid_cells <- row.names(subset(pData(annot.cds),
cells == "single cells"
))
annot.cds <- annot.cds[,valid_cells]
annot.cds <- estimateSizeFactors(annot.cds)
annot.cds <- estimateDispersions(annot.cds)
#Removing 291 outliers
dim(annot.cds)
#Features Samples
# 22854 198
# cut left tail by setting an expression threshold of 2^(-18) !!!!!
annot.cds <- detectGenes(annot.cds, min_expr = 2^(-18))
print(head(fData(annot.cds)))
expressed_genes <- row.names(subset(fData(annot.cds),
num_cells_expressed >= 10))
length(expressed_genes) #[1] 11333 genes expressed in at least 10 cells
## It's also good to look at the distribution of mRNA totals across the cells:
pData(annot.cds)$Total_mRNAs <- Matrix::colSums(exprs(annot.cds)) #!!!
summary(pData(annot.cds)$Total_mRNAs)
# Min. 1st Qu. Median Mean 3rd Qu. Max.
# 728 7703 11310 13023 16333 30088
# We've gone ahead and removed the few cells with either very low mRNA recovery or far more mRNA that the typical cell. -------------------------
# Often, doublets or triplets have roughly twice the mRNA recovered as true single cells,
annot.cds <- annot.cds[,pData(annot.cds)$Total_mRNAs < 30000] # !!!!!!!!!!!!!!!
table(pData(annot.cds)$age) ## only remove 1 E14.5 cell !!!!!!!!!!!!!!
# 14.5 16.5 18.5 Adult
# 44 27 80 46
upper_bound <- lower_bound <- NULL
x <- levels(pData(annot.cds)$age)
for(i in x)
{
tmp <- which(pData(annot.cds)$age==i)
u_bound <- 10^(mean(log10(pData(annot.cds)$Total_mRNAs[tmp])) +
2*sd(log10(pData(annot.cds)$Total_mRNAs[tmp])))
l_bound <- 10^(mean(log10(pData(annot.cds)$Total_mRNAs[tmp])) -
2*sd(log10(pData(annot.cds)$Total_mRNAs[tmp])))
upper_bound <- max(upper_bound,u_bound)
lower_bound <- min(lower_bound,l_bound)
}
lower_bound #[1] 1633.736
upper_bound # [1] 32035.42
qplot(Total_mRNAs, data = pData(annot.cds), color = age, geom =
"density", ylab="Density") +
geom_vline(xintercept = lower_bound) +
geom_vline(xintercept = upper_bound)
dev.copy2pdf(file="density_Total_mRNAs.pdf")
# so the latter filter is another means of excluding all but single cells from the analysis.
# Such filtering is handy if your protocol doesn't allow directly visualization of cell after they've been captured.
# Note that these thresholds of 3095 and 38890 mRNAs are specific to this dataset.
annot.cds <- annot.cds[,pData(annot.cds)$Total_mRNAs > lower_bound &
pData(annot.cds)$Total_mRNAs < upper_bound]
annot.cds <- detectGenes(annot.cds, min_expr = 0.1)
dim(annot.cds)
# Features Samples
# 22854 196
table(pData(annot.cds)$age) ## here remove 2 Adult cells !!!!!!!!!!!!!!
# 14.5 16.5 18.5 Adult
# 45 27 80 44
# Log-transform each value in the expression matrix.
L <- log(exprs(annot.cds[expressed_genes,]))
any(L==-Inf) #[1] TRUE
# Standardize each gene, so that they are all on the same scale,
# Then melt the data with plyr so we can plot it easily
melted_dens_df <- melt(Matrix::t(scale(Matrix::t(L))))
# verify the data follows a distribution that is roughly lognormal
# Plot the distribution of the standardized gene expression values.
qplot(value, geom = "density", data = melted_dens_df) +
stat_function(fun = dnorm, size = 0.5, color = 'red') +
xlab("Standardized log(FPKM), 196 single cells") +
ylab("Density")
dev.copy2pdf(file="density_Standardized_logcds.pdf")
fData(annot.cds)$expressed_genes <- (row.names(fData(annot.cds)) %in% expressed_genes)
# save(annot.cds, file="GSE52583_annot.cds_Standardized.RData", compress=TRUE) #++++++++++++++++++++++++++++++
}
## 2.5) feature selection based on mean gene expression abundance ----------------------
## focus on lncRNA, coding genes, and miRNAs
{
length(fData(annot.cds)$expressed_genes) # 11333
# expressed_genes was only used to exclude two Adult cells !!!!!!!!!!!!!!!!!!
# annot.cds <- annot.cds[fData(annot.cds)$expressed_genes, ]
# dim(annot.cds)
#Features Samples
# 11333 196
table(pData(annot.cds)$putative_cell_type)
# AT1 AT2 BP bulk ciliated Clara
# 41 12 13 0 3 11
pData(annot.cds)$cell_type <- paste(pData(annot.cds)$age, pData(annot.cds)$putative_cell_type, sep="_") #!!!!
pData(annot.cds)$cell_type[which(pData(annot.cds)$cell_type=="Adult_NA")] = "Adult_AT2" #!!!!
pData(annot.cds)$cell_type <- factor(pData(annot.cds)$cell_type,
levels =c("14.5_NA", "16.5_NA","18.5_BP","18.5_ciliated","18.5_Clara","18.5_AT1","18.5_AT2","Adult_AT2"))
## Clustering cells (without marker genes) --------------------------
disp_table <- dispersionTable(annot.cds) # Retrieve a table of values specifying the mean-variance relationship
dim(disp_table) #[1] 10365 4
hist(log(disp_table$mean_expression), 100)
# filter genes based on average expression level --------------------------------
unsup_clustering_genes <- subset(disp_table, mean_expression >= 0.01)
dim(unsup_clustering_genes) # [1] 10359 4 ######
annot.cds <- setOrderingFilter(annot.cds, unsup_clustering_genes$gene_id)
table(fData(annot.cds)$use_for_ordering)
#FALSE TRUE
#12676 10359
plot_ordering_genes(annot.cds)
dev.copy2pdf(file="scarePlot_averageHighgene_Standardized.pdf")
}
}
###################################################
## section 3) clustering cells by gene markers ##
###################################################
{
AT1_id <- row.names(subset(fData(annot.cds), gene_short_name == "Ager")) # %in% c("Pdpn","Ager","Aqp5") ))
AT1_id2 <- row.names(subset(fData(annot.cds), gene_short_name == "S100a6")) # %in% c("Pdpn","Ager","Aqp5") ))
AT1_id3 <- row.names(subset(fData(annot.cds), gene_short_name == "Pdpn")) # %in% c("Pdpn","Ager","Aqp5") ))
AT2_id <- row.names(subset(fData(annot.cds), gene_short_name == "Sftpc")) #%in% c("Sftpc","Muc1","Sftpb","Abca3","Lyz2")))
AT2_id2 <- row.names(subset(fData(annot.cds), gene_short_name == "Lyz2")) #%in% c("Sftpc","Muc1","Sftpb","Abca3","Lyz2")))
cth <- newCellTypeHierarchy()
cth <- addCellType(cth, "AT1", classify_func =
function(x) { x[AT1_id,] > 1 & x[AT1_id2,] > 1 & x[AT2_id2,] < 1})
cth <- addCellType(cth, "AT2", classify_func = function(x)
{ x[AT1_id3,] < 1 & x[AT2_id,] > 1 })
annot.cds <- classifyCells(annot.cds, cth, frequency_thresh =NULL)
colnames(pData(annot.cds))
# [1] "platform_id" "instrument_model" "age" "genotype"
# [5] "replicate" "cells" "cellName" "putative_cell_type"
# [9] "remove_by_RECC" "Size_Factor" "num_genes_expressed" "Total_mRNAs"
# [13] "cell_type" "CellType"
table(pData(annot.cds)$CellType, pData(annot.cds)$putative_cell_type) #!!!!!!!!!!!!
# AT1 AT2 BP bulk ciliated Clara
# Ambiguous 0 0 1 0 2 1
# AT1 38 0 9 0 0 3
# AT2 0 11 1 0 1 2
# Unknown 3 1 2 0 0 5
}
########################################################
## section 4) clustering cells without gene markers ##
########################################################
{
pc <- plot_pc_variance_explained(annot.cds, return_all = T) # norm_method='log'
plot(pc$p)
pc$variance_explained[1:3]*100
# [1] 8.703065 3.243314 2.495040
annot.cds <- reduceDimension(annot.cds, max_components = 3, num_dim = 6,
norm_method = "log", # because the exprs values are thedownloaded FPKM, log transform is applied !!!!!!!!!!!!!!!!!!!
reduction_method = 'tSNE', verbose = T)
colnames(pData(annot.cds)) # NO change
annot.cds <- clusterCells(annot.cds, num_clusters = 4)
# save(annot.cds, file="GSE52583_annot.cds_Clustered.RData", compress=TRUE) #++++++++++++++++++++++++++++++
############# plot -------------
pc <- plot_pc_variance_explained(annot.cds, return_all = T) # norm_method='log'
plot(pc$p)
pc$variance_explained[1:3]*100
#[1] 8.703065 3.243314 2.495040
pdf(file="PCA_averageHighgene_standarded_cds_thresholded.pdf")
plot_cell_clusters(annot.cds, color_by = 'as.factor(cell_type)')
plot_cell_clusters(annot.cds, color_by = 'as.factor(Cluster)')
g <- plot_cell_clusters(annot.cds, color_by = 'as.factor(cell_type)', plot=FALSE)
g + geom_point(aes_string(color = pData(annot.cds)$cell_type,
shape=pData(annot.cds)$Cluster))
dev.off()
}
###################################################################
## Section 5 (OPTINAL): constructing trajectory using Monocle ##
###################################################################
{
annot.cds@expressionFamily@vfamily #[1] "negbinomial.size"
clustering_DEG_genes <- differentialGeneTest(annot.cds[which(fData(annot.cds)$use_for_ordering),],
fullModelFormulaStr = '~Cluster',
relative_expr = TRUE, # by default, Whether to transform expression into relative values.
cores = 1)
save(clustering_DEG_genes, file='clustering_DEG_genes.age.Cluster.RData') #!!!!!!!!!!!!
ordering_genes.Cluster <-
row.names(clustering_DEG_genes)[order(clustering_DEG_genes$qval)]
table(fData(annot.cds)$use_for_ordering)
# FALSE TRUE
# 12495 10359
annot.cds <- setOrderingFilter(annot.cds,
ordering_genes = ordering_genes.Cluster)
annot.cds <- reduceDimension(annot.cds, method = 'DDRTree') ##### !!! there are different methods !!!
annot.cds <- orderCells(annot.cds)
annot.cds <- orderCells(annot.cds, root_state = GM_state(annot.cds))
# save(annot.cds, file="GSE52583_annot.cds_Clustered_thresholded.RData", compress=TRUE) #++++++++++++++++++++++++++++++
# ## additionally DE test for the feature selction in section 7
# clustering_DEG_genes <- differentialGeneTest(annot.cds[x,],
# fullModelFormulaStr = '~cell_type',
# relative_expr = TRUE, # by default, Whether to transform expression into relative values.
# cores = 1)
# save(clustering_DEG_genes, file='clustering_DEG_genes.age.cellType.RData') #!!!!!!!!!!!!!!
pdf(file="pca_averageHighgene_Standardized_trajectory.pdf")
plot_cell_trajectory(annot.cds, color_by = "age")
plot_cell_trajectory(annot.cds, color_by = "cell_type")
plot_cell_trajectory(annot.cds, color_by = "Cluster")
plot_cell_trajectory(annot.cds, color_by = "Pseudotime")
dev.off()
}
# ### backup all selected transcripts ########
# load(file="GSE52583_annot.cds_Clustered_thresholded.RData")
#
# dat <- exprs(annot.cds)
# dim(dat) #22854 196
# dat <- dat[which(fData(annot.cds)$use_for_ordering),]
# dim(dat)
# #[1] 10359 196
# save(dat, file="F:/projects/BioTIP/doc/2020_/Applications/input.Rdata/GSE52583/GSE52583_monocle_counts.RData")
# cli <- pData(annot.cds)
# dim(cli) # 196 21
# save(cli, file='F:/projects/BioTIP/doc/2020_/Applications/input.Rdata/GSE52583/GSE52583_cli.RData')
# df <- fData(annot.cds)[rownames(dat),]
# dim(df) # 10359 11
# save(df, file='F:/projects/BioTIP/doc/2020_/Applications/input.Rdata/GSE52583/GSE52583_GeneFeature.RData')
# rm(dat, cli, df)
######################################################
## Section 6) Global ~3k HVG section ##
## of coding genes, lncRNAs, and miRNAs ##
## based on variance of FPKM across all cells ##
######################################################
{
## 7.1) HVG selection ---------------------
{
#load(file="GSE52583_annot.cds_Clustered_thresholded.RData")
table(fData(annot.cds)$use_for_ordering)
# FALSE TRUE
# 12495 10359
# select coding genes, lncRNAs, and miRNAs for the downstream analysis ------------------------
table(fData(annot.cds)$use_for_ordering, fData(annot.cds)$gene_biotype %in% c('lncRNA', 'miRNA','protein_coding') )
# FALSE TRUE
# FALSE 380 12115
# TRUE 108 10251
## alternatively, could select genes based on differentially expression across time or clusters
# x <- which(fData(annot.cds)$num_cells_expressed>= 10 & fData(annot.cds)$use_for_ordering &
# fData(annot.cds)$gene_biotype %in% c('lncRNA', 'miRNA','protein_coding'))
# length(x) #8900
#
#
# load(file='clustering_DEG_genes.age.cellType.RData')
# selected_genes.age.cellType <-
# row.names(clustering_DEG_genes)[which(clustering_DEG_genes$qval<0.05)]
#
# load(file='clustering_DEG_genes.Cluster.RData')
# selected_genes.Cluster <-
# row.names(clustering_DEG_genes)[which(clustering_DEG_genes$qval<0.05)]
# HVG <- union(selected_genes.Cluster[1:4000],
# selected_genes.age.cellType[1:4000]
# )
# if(any(is.na(HVG))) HVG <- HVG[-which(is.na(HVG))]
# length(HVG) # 4531
x <- which(fData(annot.cds)$use_for_ordering &
fData(annot.cds)$gene_biotype %in% c('lncRNA', 'miRNA','protein_coding') )
length(x) # 10251
vars <- rowVars(log2(exprs(annot.cds[x,])+1))
hist(vars, 100)
table(vars>0.5)
# FALSE TRUE
# 7053 3198
HVG <- rownames(annot.cds)[x][which(vars>0.5)]
}
## 7.2) export all 196 cells -------------------------
{
cli <- pData(annot.cds)
cli$GEO.access <- rownames(cli)
dim(cli) # 196 22
logmat <- log2(exprs(annot.cds)[HVG,]+1)
dim(logmat) # [1] 3198 196
sce <- SingleCellExperiment(logmat)
colData(sce) <- DataFrame(age = cli$age,
cellName = cli$cellName,
GEO.access = cli$GEO.access,
putative_cell_type = cli$putative_cell_type,
C_by_marker = cli$CellType,
C_Monocle.k4 = cli$Cluster,
Pseudotime = cli$Pseudotime,
Size_Factor = cli$Size_Factor)
rowData(sce) <- fData(annot.cds)[rownames(sce),]
names(assays(sce)) = 'logFPKM'
dim(sce) # 3198 196
save(sce, file='BioTIP_GSE52583_robustness/sce.RData') #!!!!!!!!!!!
}
## 7.3) focusing on cells along the AT2 trajectory -------------------------
{
cli <- pData(annot.cds)
cli$GEO.access <- rownames(cli)
y <- which(cli$age=="18.5" & cli$CellType !="AT2" )
length(y) # 65
cli <- cli[-y,]; dim(cli) # 131 21
# remove the factor levels of 0
tmp <- as.vector(cli$putative_cell_type)
tmp <- factor(tmp, levels=c('BP', 'ciliated', 'Clara','AT2' ))
logmat <- log2(exprs(annot.cds)[HVG,-y]+1)
dim(logmat) # [1] 4531 131
colnames(logmat) <- rownames(cli) <- cli$cellName
sce <- SingleCellExperiment(logmat)
colData(sce) <- DataFrame(age = cli$age,
cellName = cli$cellName,
GEO.access = cli$GEO.access,
putative_cell_type = tmp,
C_by_marker = cli$CellType,
C_Monocle.k4 = cli$Cluster,
Pseudotime = cli$Pseudotime,
Size_Factor = cli$Size_Factor)
rowData(sce) <- fData(annot.cds)[rownames(sce),]
names(assays(sce)) = 'logFPKM'
dim(sce) # 3198 131
save(sce, file='BioTIP_GSE52583_robustness/AT2.sce.RData') #!!!!!!!!!!!
}
}
#################################################################
## Section 7) Prepare inputs for QuanTC on 131 cells ##
#################################################################
## cell-cell similarity matrix for 131 cells #---------------------
# refer to http://127.0.0.1:11637/library/SC3/doc/SC3.html
# needs to customize ks accordingily per dataset, the larger range the longer running time.
# In this case, ks=3:8 are tested,
# and for the related soft-thresholding clustering (QuanTC method),
# we had take average of the Consensus.Cluster-agreeable clustering results of k=4:10 to get cell-cell similarity matrix M
library(SC3)
files = c('AT2.sce.RData', 'sce.RData')
for(j in files){
load(file= paste0('BioTIP_GSE52583_robustness/',j))
## write into QuanTC inputs files #---------------------------
if(j=='AT2.sce.RData') QuanTC.input.dir = "F:/projects/QuanTC/QuanTC-modified/Input/GSE52583.AT2/" else {
QuanTC.input.dir = "F:/projects/QuanTC/QuanTC-modified/Input/GSE52583/"
}
## 8.2) calculate cell-cell similarity and estiamte teh number of clusters ----------------------------
{
sce.sc3 = sce
rowData(sce.sc3)$feature_symbol <- rownames(sce.sc3)
# remove features with duplicated names
any(duplicated(rowData(sce.sc3)$feature_symbol)) # F
range(assays(sce)$logFPKM)
# [1] 0.00000 13.97788
### to run SC3 successfully, transform sparsematrix to matrix !!!!!!!!!!!!!
logcounts(sce.sc3) <- as.matrix(assays(sce)$logFPKM)
sum(logcounts(sce.sc3)<1e-16,2)/nrow(logcounts(sce.sc3))>0.95 # TRUE therefore no cell being filtered
counts(sce.sc3) <- as.matrix(2^assays(sce)$logFPKM -1)
# NOT repeat !!!!
# # biology: boolean parameter, defines whether to compute differentially expressed genes, marker genes and cell outliers.
set.seed(2020)
sce.sc3 <- sc3(sce.sc3, ks = 3:8, biology = FALSE) # svm_max = 5000 is default!!!
# Setting SC3 parameters...
# Your dataset contains more than 2000 cells. Adjusting the nstart parameter of kmeans to 50 for faster performance...
# Calculating distances between the cells...
# Performing transformations and calculating eigenvectors...
# Performing k-means clustering...
# Calculating consensus matrix...
traceback()
# When the sce.sc3 object is prepared for clustering, SC3 can also estimate the optimal number of clusters k in the dataset
# NOT repeat, runs 10 mins !!!!
sce.sc3 <- sc3_estimate_k(sce.sc3)
str(metadata(sce.sc3)$sc3)
# $ k_estimation : num 5
# to save space, transform back matrix to sparse matrix
assayNames(sce.sc3)
#[1] "logFPKM" "logcounts" "counts"
assays(sce.sc3) <- assays(sce.sc3)[1]
if(j=='AT2.sce.RData') save(sce.sc3, file='AT2.sce_SC3.RData') else save(sce.sc3, file='sce_SC3.RData') ##!!!!!!!!!!!!!!!!!!!
gc()
# END DO NOT REPET !!!!!!!!!!!!!
}
## 8.3) writing input fiels for QuanTC -----------------------------------------
{
for(j in files){
load(file= paste0('BioTIP_GSE52583_robustness/',j))
## write into QuanTC inputs files #---------------------------
if(j=='AT2.sce.RData') {
QuanTC.input.dir = "F:/projects/QuanTC/QuanTC-modified/Input/GSE52583.AT2/"
load(file='AT2.sce_SC3.RData')}
else {
QuanTC.input.dir = "F:/projects/QuanTC/QuanTC-modified/Input/GSE52583/"
load(file='sce_SC3.RData')
}
M_3 = (sce.sc3@metadata$sc3$consensus$`3`$consensus)
M_5 = (sce.sc3@metadata$sc3$consensus$`5`$consensus)
M_7 = (sce.sc3@metadata$sc3$consensus$`7`$consensus)
# take average of the Consensus.Cluster-agreeable clustering results to get cell-cell similarity matrix M
M = (M_3+M_5+M_7)/3
write.csv(M, file=paste0(QuanTC.input.dir,'Treutlein2014_cell-cell.csv'), row.names=F, col.names=F)
logmat <- as.matrix(assays(sce)$logFPKM)
dim(logmat) # [1] 4720 131
write.table(round(logmat,4), file= paste0(QuanTC.input.dir,'Treutlein2014_log2.FPKM.txt'), row.names=FALSE, col.names=FALSE, sep='\t')
write.table(rownames(logmat), file= paste0(QuanTC.input.dir,'Treutlein2014_gene_name.txt'), row.names=FALSE, quote=FALSE, col.names=FALSE)
cli <- colData(sce)
write.table(cli$cellName %>% as.vector(),
file= paste0(QuanTC.input.dir,'Treutlein2014_cell_name.txt'), row.names=FALSE, quote=FALSE, col.names=FALSE)
write.table(cli$C_by_marker %>% as.vector(),
file= paste0(QuanTC.input.dir,'Treutlein2014_CellType.txt'), row.names=FALSE, quote=FALSE, col.names=FALSE)
true_label <- as.vector(cli$age)
true_label[which(true_label=='14.5')]=1 # pseudo time, numeric values
true_label[which(true_label=='16.5')]=2
true_label[which(true_label=='18.5')]=3
true_label[which(true_label=='Adult')]=4
write.table(true_label,
file= paste0(QuanTC.input.dir,'Treutlein2014_CellAge.txt'), row.names=FALSE, quote=FALSE, col.names=FALSE)
}
}
}
####################################################################################
## section 8) cluster 131 cells of 4531 genes using different methods ##
## Note that excluded cells are which(cli$age=="18.5" & cli$CellType !="AT2" ) ##
####################################################################################
setwd('F:/projects/BioTIP/result/GSE52583')
library(dplyr)
library(scater)
library(scran)
library(SC3)
library(Seurat) #Seurat version 4.0.6
#library(leiden)
#library(monocle3)
j='AT2.sce.RData'
subDir = 'BioTIP_GSE52583_robustness/'
QuanTCDir = 'QuanTC_Output/AT2/'
{
parameters = list()
parameters$k = 10 # An integer number of nearest neighboring cells to use when creating the k nearest neighbor graph for Louvain/Leiden/SNNGraph clustering.
################################################################################
## 8.0) load the R object
################################################################################
load(paste0(subDir,'/',j))
sce
# dim: 3198 131
# metadata(0):
# assays(1): logFPKM
# rownames(4531): 0610007P08Rik 0610007P22Rik ... Zyx l7Rn6
# rowData names(11): ensembl_gene_id gene_short_name ...
# num_cells_expressed use_for_ordering
# colnames: NULL
# colData names(8): age cellName ... Pseudotime Size_Factor
# reducedDimNames(0):
# altExpNames(0):
table(colData(sce)$C_by_marker)
# Ambiguous AT1 AT2 Unknown
# 2 13 77 39
table(colData(sce)$age)
# 14.5 16.5 18.5 Adult
# 45 27 15 44
########################################################################################
# 8.1) # extract SNNGraph clusters (by scran) with two settings for the parameter k
# The parameter k indicates the number of nearest neighbors to consider during graph construction, whicc
# we set to 5 for small number of cells (e.g., <500) and 10 to large number of sequenced cells (e.g., 4k).
# https://nbisweden.github.io/single-cell_sib_scilifelab/session-clustering/clustering.html
########################################################################################
## Calculate size factors and normalize has been excluded becasue the downloaded FPKM has been scaled
# sce <- scran::computeSumFactors(sce, min.mean = 0.1, assay.type ='logFPKM')
# sce <- scater::normalize(sce)
# logcounts(sce) <- as.matrix(logcounts(sce))
{
## Fit variance trend and apply denoising PCA
new.trend <- scran::modelGeneVarByPoisson(x = sce, assay.type ='logFPKM')
# means <- rowMeans(assays(sce)$logFPKM)
# vars <- rowVars(assays(sce)$logFPKM)
# fit <- scran::fitTrendVar(means, vars)
# fit$trend <- new.trend
dec <- scran::modelGeneVar(sce, assay.type ='logFPKM')
set.seed(123)
sce <- scran::denoisePCA(sce, technical = new.trend, assay.type ='logFPKM')
reducedDimNames(sce) #[1] "PCA"
# k: An integer scalar specifying the number of nearest neighbors to consider during graph construction.
SNNGraph.ID <- scran::buildSNNGraph(sce, k= parameters$k, use.dimred = 'PCA')
SNNGraph.ID <- igraph::cluster_walktrap(SNNGraph.ID)$membership
# check the agreeement between new and original clusters using the SNNGraph method
table(as.vector(sce$age), SNNGraph.ID)
# SNNGraph.ID
# 1 2 3
# 14.5 0 40 5
# 16.5 1 2 24
# 18.5 12 0 3
# Adult 44 0 0
colData(sce)$C_SNNGraph_k10 = factor(SNNGraph.ID)
SNNGraph.ID <- scran::buildSNNGraph(sce, k= 20, use.dimred = 'PCA') # the same
SNNGraph.ID <- scran::buildSNNGraph(sce, k= 8, use.dimred = 'PCA') # the same
SNNGraph.ID <- igraph::cluster_walktrap(SNNGraph.ID)$membership
table(as.vector(sce$age), SNNGraph.ID)
# SNNGraph.ID
# 1 2 3 4 5 6 7
# 14.5 7 0 0 0 16 0 22
# 16.5 24 0 0 0 2 1 0
# 18.5 2 0 0 11 0 2 0
# Adult 0 12 22 1 0 9 0
colData(sce)$C_SNNGraph_k8 = factor(SNNGraph.ID)
#save(sce, file=paste0(subDir,'/',j), compress=TRUE) # !!!!!!!!!!!!!!!!!
}
################################################################
# 8.2) # extract consensus clusters
# refer to http://127.0.0.1:11637/library/SC3/doc/SC3.html
# needs to customize ks accordingily per dataset, the larger range the longer running time.
# In this case, ks=3,5,7 are tested,
# and for the related soft-thresholding clustering (QuanTC method),
# we had take average of the Consensus.Cluster-agreeable clustering results of k=3,5,7 to get cell-cell similarity matrix M
##################################################################
if(j=="AT2.sce.RData") load('AT2.sce_SC3.RData') else load('sce_SC3.RData') #!!!!!!!!!!!!!!!
{
sce.sc3 # optimale num =5 by SC3
## manually pick the optimal matches to follow up
table(as.vector(sce$age), colData(sce.sc3)$sc3_5_clusters)
# 1 2 3 4 5
# 14.5 0 0 0 37 8
# 16.5 0 0 27 0 0
# 18.5 14 0 0 0 1
# Adult 0 44 0 0 0
# load(file='sce_E8.25_HEP.RData')
colData(sce)$C_consensus_ks3 = colData(sce.sc3)$sc3_3_clusters
colData(sce)$C_consensus_ks5 = colData(sce.sc3)$sc3_5_clusters
colData(sce)$C_consensus_ks7 = colData(sce.sc3)$sc3_7_clusters
rm(sce.sc3)
# save(sce, file=paste0(subDir,'/',j), compress=TRUE) # !!!!!!!!!!!!!!!!!
}
################################################################
# 8.3) # extract Leiden clustering (using Seurat)
# https://satijalab.org/seurat/articles/get_started.html
# Leiden requires the leidenalg python.
# We apply the Seurat packge, by setting the algorithm =4 in the function FindClusters()
# This parameter decides the algorithm for modularity optimization
# (1 = original Louvain algorithm; 2 = Louvain algorithm with multilevel refinement;
# 3 = SLM algorithm; 4 = Leiden algorithm).
# The resolution parameter: use a value above (below) 1.0 if you want to obtain a larger (smaller) number of communities.
# Seurat author recommended that:
# We find that setting this parameter between 0.4-1.2 typically returns good results for single-cell datasets of around 3K cells.
###################################################################
# generate a pseudo count to run Seurat
{
logcounts(sce) <- assays(sce)$logFPKM
counts(sce) <- as.matrix(2^logcounts(sce)-1)
# convert from SingleCellExperiment
sce.seurat <- as.Seurat(sce)
# Warning: Feature names cannot have underscores ('_'), replacing with dashes ('-')
sce.seurat
# Computes the k.param nearest neighbors for a given dataset
ndim = dim(sce.seurat[['PCA']])[2]
sce.seurat <- FindNeighbors(sce.seurat, reduction = "PCA", k.param = parameters$k, dims = 1:ndim)
sce.seurat <- FindClusters(sce.seurat, resolution = 0.4, algorithm = 4) # smaller number of communities
table(Idents(sce.seurat), as.vector(colData(sce)$age))
# 14.5 16.5 18.5 Adult
# 1 0 0 11 35
# 2 6 24 2 0
# 3 24 0 0 0
# 4 15 2 0 0
# 5 0 1 2 9
colData(sce)$C_Leiden_0.4 = Idents(sce.seurat)
sce.seurat <- FindClusters(sce.seurat, resolution = 0.8, algorithm = 4) # smaller number of communities
table(Idents(sce.seurat), as.vector(colData(sce)$age))
# 14.5 16.5 18.5 Adult
# 1 6 24 2 0
# 2 24 0 0 0
# 3 0 0 0 22
# 4 15 2 0 0
# 5 0 0 11 1
# 6 0 0 0 12
# 7 0 1 2 9
colData(sce)$C_Leiden_0.8 = Idents(sce.seurat)
sce.seurat <- FindClusters(sce.seurat, resolution = 1.2, algorithm = 4) # smaller number of communities
table(Idents(sce.seurat), as.vector(colData(sce)$age))
# 14.5 16.5 18.5 Adult
# 1 24 0 0 0
# 2 0 0 0 22
# 3 6 10 2 0
# 4 15 2 0 0
# 5 0 14 0 0
# 6 0 0 11 1
# 7 0 1 2 9
# 8 0 0 0 12
colData(sce)$C_Leiden_1.2 = Idents(sce.seurat)
# In this case, remove teh pseudo counts
counts(sce) <- logcounts(sce) <- NULL
## save(sce, file=paste0(subDir,'/',j), compress=TRUE) # !!!!!!!!!!!!!!!!!
rm(sce.seurat)
}
################################################################
# 8.4 ) # extract the QUANTC-assigned clusters
# k=4 was optimalized by QuanTC pipeline
# refer to QuanTC_soft.thresholding_clusters.m
##################################################################
{
C_TC <- read.table(paste0(QuanTCDir,'C_TC.txt'))
C_TC <- C_TC[,1]
length(C_TC) #[1] 131
index_TC <- read.table(paste0(QuanTCDir,'index_TC.txt'))
index_TC <- index_TC[,1]
unique(C_TC[index_TC]) # 5 verified the C_TC is the cluster ID generated by QuanTC
## replace the QuanTC.cluster IDs, TC is the last, to be consistented with those shoing in Fig S2
tmp <- data.frame(C_TC)
tmp <- tmp %>% mutate(C_TC = replace(C_TC, C_TC == 1, 'C1'))
tmp <- tmp %>% mutate(C_TC = replace(C_TC, C_TC == 2, 'C2'))
tmp <- tmp %>% mutate(C_TC = replace(C_TC, C_TC == 3, 'C3'))
tmp <- tmp %>% mutate(C_TC = replace(C_TC, C_TC == 4, 'C4'))
tmp <- tmp %>% mutate(C_TC = replace(C_TC, C_TC == 5, 'TC'))
table(as.vector(sce$age), tmp[,1])
# C1 C2 C3 C4 TC
# 14.5 36 0 0 0 9
# 16.5 0 0 13 0 14
# 18.5 0 0 0 2 13
# Adult 0 43 0 0 1
colData(sce)$C_Soft <- tmp[,1]
}
## save(sce, file=paste0(subDir,'/',j), compress=TRUE) # !!!!!!!!!!!!!!!!!
## 8.5) plot different clustering restuls
####################################################################
library(scater)
sce <- runTSNE(sce, dimred="PCA")
{
x <- grep('C_', colnames(colData(sce)))
(n=length(x)) # 11
pdf(file=paste0(subDir,"/TSNE.",j,"_clustering_methods.pdf"), width=10, height=9)
gridExtra::grid.arrange(
plotReducedDim(sce, dimred='TSNE', colour_by='putative_cell_type', #add_legend=FALSE,
text_by='putative_cell_type', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('putative_cell_type') #+ ylim(5,20)
,plotReducedDim(sce, dimred='TSNE',colour_by='C_by_marker', #add_legend=FALSE,
text_by='C_by_marker', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('C_by gene markers') #+ ylim(5,20)
,plotReducedDim(sce, dimred='TSNE',colour_by='C_Monocle.k4', #add_legend=FALSE,
text_by='C_Monocle.k4', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('C_Monocle.k4 with 10k genes') #+ ylim(5,20)
,plotReducedDim(sce, dimred='TSNE',colour_by='C_consensus_ks3', #add_legend=FALSE,
text_by='C_consensus_ks3', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('C_consensus_ks3') #+ ylim(5,20)
,plotReducedDim(sce, dimred='TSNE',colour_by='C_consensus_ks5', #add_legend=FALSE,
text_by='C_consensus_ks5', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('consensus_ks5') #+ ylim(5,20)
,plotReducedDim(sce, dimred='TSNE',colour_by='C_consensus_ks7', #add_legend=FALSE,
text_by='C_consensus_ks7', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('consensus_ks7') #+ ylim(5,20)
,plotReducedDim(sce, dimred='TSNE',colour_by='C_Leiden_0.4', #add_legend=FALSE,
text_by='C_Leiden_0.4', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('Leiden_0.4') #+ ylim(5,20)
,plotReducedDim(sce, dimred='TSNE',colour_by='C_Leiden_0.8', #add_legend=FALSE,
text_by='C_Leiden_0.8', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('Leiden_0.8') #+ ylim(5,20)
,plotReducedDim(sce, dimred='TSNE',colour_by='C_Leiden_1.2', #add_legend=FALSE,
text_by='C_Leiden_1.2', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('Leiden_1.2') #+ ylim(5,20)
,ncol=3
)
gridExtra::grid.arrange(
plotReducedDim(sce, dimred='TSNE', colour_by='C_SNNGraph_k10', #add_legend=FALSE,
text_by='C_SNNGraph_k10', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('C_SNNGraph_k10')
,plotReducedDim(sce, dimred='TSNE',colour_by='C_SNNGraph_k8', #add_legend=FALSE,
text_by='C_SNNGraph_k8', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('C_SNNGraph_k8') #+ ylim(5,20)
,plotReducedDim(sce, dimred='TSNE',colour_by='C_Soft', #add_legend=FALSE,
text_by='C_Soft', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('soft-thresholding clusting') #+ ylim(5,20)
,plotReducedDim(sce, dimred='TSNE',colour_by='age', #add_legend=FALSE,
text_by='age', text_size = 4, text_colour='black', point_size=0.5) +
ggtitle('Collection time') #+ ylim(5,20)
,ncol=3, nrow=3)
dev.off()
}
}
############################################################################
## scetion 9) construct and visualize the trajectory using scater package
############################################################################
fid = c('AT2.sce.RData','sce.RData')
for(k in 1:length(fid)){
load(paste0('BioTIP_GSE52583_robustness/',fid[k]))
if(!'PCA' %in% reducedDimNames(sce)){
new.trend <- scran::modelGeneVarByPoisson(x = sce, assay.type ='logFPKM')
dec <- scran::modelGeneVar(sce, assay.type ='logFPKM')
set.seed(123)
sce <- scran::denoisePCA(sce, technical = new.trend, assay.type ='logFPKM')
reducedDimNames(sce) #[1] "PCA"
}
if(!'TSNE' %in% reducedDimNames(sce)){
sce <- runTSNE(sce, dimred="PCA")
}
## add the label to show
colData(sce)$label = paste(sce$age, sce$putative_cell_type, sep='_')
library(scater)
# pseudo counts
#counts(sce) = 2^(logcounts(sce))
pdf(file=paste0("BioTIP_GSE52583_robustness/trajectory_",ncol(sce),"cells.pdf"))
if(fid[k]=='sce.RData') {
by.cluster <- aggregateAcrossCells(sce, ids=sce$age, use.assay.type='logFPKM')} else {
by.cluster <- aggregateAcrossCells(sce, ids=sce$C_Leiden_0.4, use.assay.type='logFPKM')
}
centroids <- reducedDim(by.cluster, "PCA")
dmat <- dist(centroids)
dmat <- as.matrix(dmat)
g <- igraph::graph.adjacency(dmat, mode = "undirected", weighted = TRUE)
mst <- igraph::minimum.spanning.tree(g)
plot(mst)
pairs <- Matrix::which(mst[] > 0, arr.ind=TRUE)
coords <- reducedDim(by.cluster, "TSNE")
group <- rep(seq_len(nrow(pairs)), 2)
stuff <- data.frame(rbind(coords[pairs[,1],], coords[pairs[,2],]), group)
plotTSNE(sce, colour_by="age",
text_by="label", text_size = 8)
plotTSNE(sce, colour_by="age",
text_by="label", text_size = 8) +
geom_line(data=stuff, mapping=aes(x=X1, y=X2, group=group))
dev.off()
}
k=1
load(paste0('BioTIP_GSE52583_robustness/',fid[k]))
table(sce$age, sce$C_Leiden_0.4)
# 1 2 3 4 5
# 14.5 0 6 24 15 0
# 16.5 0 24 0 2 1
# 18.5 11 2 0 0 2
# Adult 35 0 0 0 9
|
cat("\014") # Clear your console
rm(list = ls()) #clear your environment
########################## Load in header file ######################## #
setwd("~/git/of-dollars-and-data")
source(file.path(paste0(getwd(),"/header.R")))
########################## Load in Libraries ########################## #
library(MASS)
library(tidyr)
library(dplyr)
########################## Start Program Here ######################### #
#Initial setup for full simulation
# Found this online for correlated samples:
#
# http://stats.stackexchange.com/questions/82261/generating-correlated-distributions-with-a-certain-mean-and-standard-deviation
#
# Set the initial client capital and number of simulations
n_simulations <- 10000
initial_client_capital <- 1 * 10^6
n_years <- 40
# This seed allows us to have reproducible random sampling
set.seed(12345)
# Set the mean and sd of the returns for the market
mu_market <- 0.1
sd_market <- 0.2
run_sim <- function(hf_outperformance,
hf_management_fee,
hf_performance_fee,
hf_performance_above_benchmark,
management_and_performance_fee,
hf_deduct_fees,
market_management_fee,
hf_corr_to_market
){
# Set the mean and standard deviation for the hedge fund
# This assumes some level of outperformance by the hedge fund
mu_hf <- mu_market + hf_outperformance
sd_hf <- sd_market
# Correlation between the market and the hedge fund (~0.9 based on recent data)
rho <- hf_corr_to_market
# Set the correlation matrix
cor_matrix <- matrix(c(1, rho,
rho, 1),
ncol = 2, byrow = TRUE)
# Set the variance matrix
var_matrix <- c(sd_market, sd_hf) %*% t(c(sd_market, sd_hf))
# Set the covariance matrix
cov_matrix <- var_matrix * cor_matrix
# Initialize value path matrices
# One is for the client of the hedge fund and one is for the client of the index fund
client_hf_matrix <- matrix(NA, nrow = n_simulations, ncol = n_years)
client_market_matrix <- matrix(NA, nrow = n_simulations, ncol = n_years)
# Simulate each year
for (i in 1:n_years){
# Generate the correlated returns for each simulation
# Note ret[, 1] == market return, ret[, 2] == hedge fund return
ret <- mvrnorm(n_simulations, mu = c(mu_market, mu_hf),
Sigma = cov_matrix,
empirical = TRUE)
# Determine if there is any ret_above_benchmark for each simulation
if (hf_performance_above_benchmark == 1){
ret_above_benchmark <- pmax(0, ret[, 2] - ret[, 1])
} else {
ret_above_benchmark <- pmax(0, ret[, 2])
}
if (i == 1){
client_market_matrix[, i] <- initial_client_capital * (1 + ret[, 1]) * (1 - market_management_fee)
management_fee <- initial_client_capital * hf_management_fee
if (management_and_performance_fee == 0){
performance_fee <- initial_client_capital * ret_above_benchmark * hf_performance_fee
hf_final_fee <- pmax(management_fee, performance_fee)
} else {
performance_fee <- pmax(0, ((initial_client_capital * ret_above_benchmark - management_fee) * hf_performance_fee))
hf_final_fee <- management_fee + performance_fee
}
client_hf_matrix[, i] <- (initial_client_capital * (1 + ret[, 2])) - hf_final_fee
if (hf_deduct_fees == 1){
fee_deduction <- sapply(performance_fee, function(x){ ifelse(x == 0, management_fee, 0)})
}else {
fee_deduction <- 0
}
} else {
client_market_matrix[, i] <- client_market_matrix[, (i - 1)] * (1 + ret[, 1]) * (1 - market_management_fee)
management_fee <- client_hf_matrix[, (i - 1)] * hf_management_fee
if (management_and_performance_fee == 0){
performance_fee <- client_hf_matrix[, (i - 1)] * ret_above_benchmark * hf_performance_fee
hf_final_fee <- pmax(0, pmax(management_fee, performance_fee) - fee_deduction)
} else {
performance_fee <- pmax(0, ((client_hf_matrix[, (i - 1)] * ret_above_benchmark - management_fee) * hf_performance_fee))
hf_final_fee <- management_fee + performance_fee
}
client_hf_matrix[, i] <- (client_hf_matrix[, (i - 1)] * (1 + ret[, 2])) - hf_final_fee
if (hf_deduct_fees == 1){
fee_deduction <- sapply(performance_fee, function(x){ ifelse(x == 0, management_fee, 0)})
}else {
fee_deduction <- 0
}
}
}
print(paste0("The hedge fund outperformed the market (net of fees) in ", sum(client_hf_matrix[, n_years] > client_market_matrix[, n_years])/n_simulations * 100, "% of simulations"))
return(sum(client_hf_matrix[, n_years] > client_market_matrix[, n_years])/n_simulations)
}
# Code to do a quick sample run for testing
# I could delete it and do a checkout later in Git, but I am lazy
# run_sim(
# hf_outperformance = 0.01,
# hf_management_fee = 0,
# hf_performance_fee = 0.5,
# hf_performance_above_benchmark = 1,
# management_and_performance_fee = 0,
# hf_deduct_fees = 0,
# market_management_fee = 0.0005,
# hf_corr_to_market = 0.9
# )
# Create a matrix to store the results
results <- data.frame(hf_outperformance = numeric(),
hf_management_fee = numeric(),
hf_performance_fee = numeric(),
hf_performance_above_benchmark = integer(),
management_and_performance_fee = integer(),
hf_deduct_fees = integer(),
market_management_fee = numeric(),
hf_corr_to_market = numeric(),
hf_outperform_pct = numeric(),
scenario = integer(),
mu_market = numeric(),
sd_market = numeric()
)
# Loop through outperformance, correlation, and other sensitivities
i <- 1
for (o in seq(0, 0.04, by = 0.01)){
for (c in c(0, 0.5, 0.9)){
for (scenario in seq(1, 3, by = 1)){
if (scenario == 1){
mf <- 0.01
pf <- 0.3
pab <- 1
map <- 0
df <- 1
} else if (scenario == 2){
mf <- 0.02
pf <- 0.2
pab <- 0
map <- 1
df <- 0
} else if (scenario == 3){
mf <- 0.01
pf <- 0.0
pab <- 0
map <- 1
df <- 0
}
results[i, "hf_outperformance"] <- o
results[i, "hf_management_fee"] <- mf
results[i, "hf_performance_fee"] <- pf
results[i, "hf_performance_above_benchmark"] <- pab
results[i, "management_and_performance_fee"] <- map
results[i, "hf_deduct_fees"] <- df
results[i, "market_management_fee"] <- 0.0005
results[i, "hf_corr_to_market"] <- c
results[i, "scenario"] <- scenario
results[i, "mu_market"] <- mu_market
results[i, "sd_market"] <- sd_market
results[i, "hf_outperform_pct"] <- run_sim(
hf_outperformance = o,
hf_management_fee = mf,
hf_performance_fee = pf,
hf_performance_above_benchmark = pab,
management_and_performance_fee = map,
hf_deduct_fees = df,
market_management_fee = 0.0005,
hf_corr_to_market = c
)
i <- i + 1
print(i)
}
}
}
# Save down RDS given the loops above take a few minutes to run
saveRDS(results, paste0(localdir, "13-hf-correlation-results.Rds"))
# ############################ End ################################## #
| /build/13-simulate-correlated-hedge-fund-returns.R | no_license | ishanbose/of-dollars-and-data | R | false | false | 8,419 | r | cat("\014") # Clear your console
rm(list = ls()) #clear your environment
########################## Load in header file ######################## #
setwd("~/git/of-dollars-and-data")
source(file.path(paste0(getwd(),"/header.R")))
########################## Load in Libraries ########################## #
library(MASS)
library(tidyr)
library(dplyr)
########################## Start Program Here ######################### #
#Initial setup for full simulation
# Found this online for correlated samples:
#
# http://stats.stackexchange.com/questions/82261/generating-correlated-distributions-with-a-certain-mean-and-standard-deviation
#
# Set the initial client capital and number of simulations
n_simulations <- 10000
initial_client_capital <- 1 * 10^6
n_years <- 40
# This seed allows us to have reproducible random sampling
set.seed(12345)
# Set the mean and sd of the returns for the market
mu_market <- 0.1
sd_market <- 0.2
run_sim <- function(hf_outperformance,
hf_management_fee,
hf_performance_fee,
hf_performance_above_benchmark,
management_and_performance_fee,
hf_deduct_fees,
market_management_fee,
hf_corr_to_market
){
# Set the mean and standard deviation for the hedge fund
# This assumes some level of outperformance by the hedge fund
mu_hf <- mu_market + hf_outperformance
sd_hf <- sd_market
# Correlation between the market and the hedge fund (~0.9 based on recent data)
rho <- hf_corr_to_market
# Set the correlation matrix
cor_matrix <- matrix(c(1, rho,
rho, 1),
ncol = 2, byrow = TRUE)
# Set the variance matrix
var_matrix <- c(sd_market, sd_hf) %*% t(c(sd_market, sd_hf))
# Set the covariance matrix
cov_matrix <- var_matrix * cor_matrix
# Initialize value path matrices
# One is for the client of the hedge fund and one is for the client of the index fund
client_hf_matrix <- matrix(NA, nrow = n_simulations, ncol = n_years)
client_market_matrix <- matrix(NA, nrow = n_simulations, ncol = n_years)
# Simulate each year
for (i in 1:n_years){
# Generate the correlated returns for each simulation
# Note ret[, 1] == market return, ret[, 2] == hedge fund return
ret <- mvrnorm(n_simulations, mu = c(mu_market, mu_hf),
Sigma = cov_matrix,
empirical = TRUE)
# Determine if there is any ret_above_benchmark for each simulation
if (hf_performance_above_benchmark == 1){
ret_above_benchmark <- pmax(0, ret[, 2] - ret[, 1])
} else {
ret_above_benchmark <- pmax(0, ret[, 2])
}
if (i == 1){
client_market_matrix[, i] <- initial_client_capital * (1 + ret[, 1]) * (1 - market_management_fee)
management_fee <- initial_client_capital * hf_management_fee
if (management_and_performance_fee == 0){
performance_fee <- initial_client_capital * ret_above_benchmark * hf_performance_fee
hf_final_fee <- pmax(management_fee, performance_fee)
} else {
performance_fee <- pmax(0, ((initial_client_capital * ret_above_benchmark - management_fee) * hf_performance_fee))
hf_final_fee <- management_fee + performance_fee
}
client_hf_matrix[, i] <- (initial_client_capital * (1 + ret[, 2])) - hf_final_fee
if (hf_deduct_fees == 1){
fee_deduction <- sapply(performance_fee, function(x){ ifelse(x == 0, management_fee, 0)})
}else {
fee_deduction <- 0
}
} else {
client_market_matrix[, i] <- client_market_matrix[, (i - 1)] * (1 + ret[, 1]) * (1 - market_management_fee)
management_fee <- client_hf_matrix[, (i - 1)] * hf_management_fee
if (management_and_performance_fee == 0){
performance_fee <- client_hf_matrix[, (i - 1)] * ret_above_benchmark * hf_performance_fee
hf_final_fee <- pmax(0, pmax(management_fee, performance_fee) - fee_deduction)
} else {
performance_fee <- pmax(0, ((client_hf_matrix[, (i - 1)] * ret_above_benchmark - management_fee) * hf_performance_fee))
hf_final_fee <- management_fee + performance_fee
}
client_hf_matrix[, i] <- (client_hf_matrix[, (i - 1)] * (1 + ret[, 2])) - hf_final_fee
if (hf_deduct_fees == 1){
fee_deduction <- sapply(performance_fee, function(x){ ifelse(x == 0, management_fee, 0)})
}else {
fee_deduction <- 0
}
}
}
print(paste0("The hedge fund outperformed the market (net of fees) in ", sum(client_hf_matrix[, n_years] > client_market_matrix[, n_years])/n_simulations * 100, "% of simulations"))
return(sum(client_hf_matrix[, n_years] > client_market_matrix[, n_years])/n_simulations)
}
# Code to do a quick sample run for testing
# I could delete it and do a checkout later in Git, but I am lazy
# run_sim(
# hf_outperformance = 0.01,
# hf_management_fee = 0,
# hf_performance_fee = 0.5,
# hf_performance_above_benchmark = 1,
# management_and_performance_fee = 0,
# hf_deduct_fees = 0,
# market_management_fee = 0.0005,
# hf_corr_to_market = 0.9
# )
# Create a matrix to store the results
results <- data.frame(hf_outperformance = numeric(),
hf_management_fee = numeric(),
hf_performance_fee = numeric(),
hf_performance_above_benchmark = integer(),
management_and_performance_fee = integer(),
hf_deduct_fees = integer(),
market_management_fee = numeric(),
hf_corr_to_market = numeric(),
hf_outperform_pct = numeric(),
scenario = integer(),
mu_market = numeric(),
sd_market = numeric()
)
# Loop through outperformance, correlation, and other sensitivities
i <- 1
for (o in seq(0, 0.04, by = 0.01)){
for (c in c(0, 0.5, 0.9)){
for (scenario in seq(1, 3, by = 1)){
if (scenario == 1){
mf <- 0.01
pf <- 0.3
pab <- 1
map <- 0
df <- 1
} else if (scenario == 2){
mf <- 0.02
pf <- 0.2
pab <- 0
map <- 1
df <- 0
} else if (scenario == 3){
mf <- 0.01
pf <- 0.0
pab <- 0
map <- 1
df <- 0
}
results[i, "hf_outperformance"] <- o
results[i, "hf_management_fee"] <- mf
results[i, "hf_performance_fee"] <- pf
results[i, "hf_performance_above_benchmark"] <- pab
results[i, "management_and_performance_fee"] <- map
results[i, "hf_deduct_fees"] <- df
results[i, "market_management_fee"] <- 0.0005
results[i, "hf_corr_to_market"] <- c
results[i, "scenario"] <- scenario
results[i, "mu_market"] <- mu_market
results[i, "sd_market"] <- sd_market
results[i, "hf_outperform_pct"] <- run_sim(
hf_outperformance = o,
hf_management_fee = mf,
hf_performance_fee = pf,
hf_performance_above_benchmark = pab,
management_and_performance_fee = map,
hf_deduct_fees = df,
market_management_fee = 0.0005,
hf_corr_to_market = c
)
i <- i + 1
print(i)
}
}
}
# Save down RDS given the loops above take a few minutes to run
saveRDS(results, paste0(localdir, "13-hf-correlation-results.Rds"))
# ############################ End ################################## #
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudfront_operations.R
\name{cloudfront_get_streaming_distribution}
\alias{cloudfront_get_streaming_distribution}
\title{Gets information about a specified RTMP distribution, including the
distribution configuration}
\usage{
cloudfront_get_streaming_distribution(Id)
}
\arguments{
\item{Id}{[required] The streaming distribution's ID.}
}
\value{
A list with the following syntax:\preformatted{list(
StreamingDistribution = list(
Id = "string",
ARN = "string",
Status = "string",
LastModifiedTime = as.POSIXct(
"2015-01-01"
),
DomainName = "string",
ActiveTrustedSigners = list(
Enabled = TRUE|FALSE,
Quantity = 123,
Items = list(
list(
AwsAccountNumber = "string",
KeyPairIds = list(
Quantity = 123,
Items = list(
"string"
)
)
)
)
),
StreamingDistributionConfig = list(
CallerReference = "string",
S3Origin = list(
DomainName = "string",
OriginAccessIdentity = "string"
),
Aliases = list(
Quantity = 123,
Items = list(
"string"
)
),
Comment = "string",
Logging = list(
Enabled = TRUE|FALSE,
Bucket = "string",
Prefix = "string"
),
TrustedSigners = list(
Enabled = TRUE|FALSE,
Quantity = 123,
Items = list(
"string"
)
),
PriceClass = "PriceClass_100"|"PriceClass_200"|"PriceClass_All",
Enabled = TRUE|FALSE
)
),
ETag = "string"
)
}
}
\description{
Gets information about a specified RTMP distribution, including the
distribution configuration.
}
\section{Request syntax}{
\preformatted{svc$get_streaming_distribution(
Id = "string"
)
}
}
\keyword{internal}
| /cran/paws.networking/man/cloudfront_get_streaming_distribution.Rd | permissive | TWarczak/paws | R | false | true | 1,898 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cloudfront_operations.R
\name{cloudfront_get_streaming_distribution}
\alias{cloudfront_get_streaming_distribution}
\title{Gets information about a specified RTMP distribution, including the
distribution configuration}
\usage{
cloudfront_get_streaming_distribution(Id)
}
\arguments{
\item{Id}{[required] The streaming distribution's ID.}
}
\value{
A list with the following syntax:\preformatted{list(
StreamingDistribution = list(
Id = "string",
ARN = "string",
Status = "string",
LastModifiedTime = as.POSIXct(
"2015-01-01"
),
DomainName = "string",
ActiveTrustedSigners = list(
Enabled = TRUE|FALSE,
Quantity = 123,
Items = list(
list(
AwsAccountNumber = "string",
KeyPairIds = list(
Quantity = 123,
Items = list(
"string"
)
)
)
)
),
StreamingDistributionConfig = list(
CallerReference = "string",
S3Origin = list(
DomainName = "string",
OriginAccessIdentity = "string"
),
Aliases = list(
Quantity = 123,
Items = list(
"string"
)
),
Comment = "string",
Logging = list(
Enabled = TRUE|FALSE,
Bucket = "string",
Prefix = "string"
),
TrustedSigners = list(
Enabled = TRUE|FALSE,
Quantity = 123,
Items = list(
"string"
)
),
PriceClass = "PriceClass_100"|"PriceClass_200"|"PriceClass_All",
Enabled = TRUE|FALSE
)
),
ETag = "string"
)
}
}
\description{
Gets information about a specified RTMP distribution, including the
distribution configuration.
}
\section{Request syntax}{
\preformatted{svc$get_streaming_distribution(
Id = "string"
)
}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classes.R
\name{show,goc-method}
\alias{show,goc-method}
\alias{show,grain-method}
\alias{show,corridor-method}
\title{Show a \code{grainscape} object}
\usage{
\S4method{show}{goc}(object)
\S4method{show}{grain}(object)
\S4method{show}{corridor}(object)
}
\arguments{
\item{object}{A \code{\link[=goc-class]{goc}} or
\code{\link[=grain-class]{grain}} object.}
}
\description{
Custom \code{show} method to safely print the contents of a \code{goc} or
\code{grain} object.
}
| /grainscape/man/show.Rd | no_license | akhikolla/InformationHouse | R | false | true | 553 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/classes.R
\name{show,goc-method}
\alias{show,goc-method}
\alias{show,grain-method}
\alias{show,corridor-method}
\title{Show a \code{grainscape} object}
\usage{
\S4method{show}{goc}(object)
\S4method{show}{grain}(object)
\S4method{show}{corridor}(object)
}
\arguments{
\item{object}{A \code{\link[=goc-class]{goc}} or
\code{\link[=grain-class]{grain}} object.}
}
\description{
Custom \code{show} method to safely print the contents of a \code{goc} or
\code{grain} object.
}
|
## ----
## title: "Post_bedtools_analysis"
## input: bedtools coverage results for individual samples
## author: "binhe"
## created: "02/18/2016"
## modified: "04/03/2016", "10/18/2017"
## output: html_document
## ---
# 1. load packages
library(data.table)
library(hexbin)
# 2. load data and annotation
anno <- fread("../data/annotation/C_glabrata_gene_for_mapping_s02-m07-r04.bed")
files <- dir(path="../output/bedtools_cov", pattern="*.cov.txt",full.names=T)
raw <- lapply( files, fread )
# 3. extract information
gene.names <- anno$V4
gene.l <- raw[[1]]$V11
read <- sapply( raw, function(x) x$V9 )
frac <- sapply( raw, function(x) x$V12 )
# 4. Calculate length normalized coverage for each feature
# first multiple # of reads by the length of reads = 50
# then divide by gene length
norm.cov <- read * 75 / gene.l
# 5. now plot the actual fraction $frac against the expected $norm.cov
pdf(file = paste("../output/bedtools_fraction_coverage_QC_", format(Sys.time(), "%Y-%m-%d"), ".pdf", sep = ""))
hbin <- hexbin(log(norm.cov+0.0001,10), frac+0.0001, xbins = 40, xlab = "log10 normalized coverage", ylab = "% of gene body covered")
plot(hbin, colorcut = c(seq(0,0.1,length=10),1))
dev.off()
# 6. output the count matrix
# get basename "S1" etc.
names <- sapply( strsplit( basename(files), split=".", fixed = TRUE ), "[", 1 )
read <- as.data.table(read)
names(read) <- names
read$gene.names <- gene.names
write.table(as.data.frame(read), file=paste("../output/Ex009_reads_per_transcript_", format(Sys.time(), "%Y-%m-%d"), ".txt", sep = ""), quote=F, row.names=F, sep="\t")
write.table(as.data.frame(frac), file=paste("../output/Ex009_fraction_covered_per_feature_", format(Sys.time(), "%Y-%m-%d"), ".txt", sep = ""), quote=F, row.names=F, sep="\t")
| /E009-Pi-timecourse/01-Cgla-HBZ-2017/02-RNAseq-analysis/code/Post_bedtools_analysis.R | no_license | hezhaobin/more-pho | R | false | false | 1,769 | r | ## ----
## title: "Post_bedtools_analysis"
## input: bedtools coverage results for individual samples
## author: "binhe"
## created: "02/18/2016"
## modified: "04/03/2016", "10/18/2017"
## output: html_document
## ---
# 1. load packages
library(data.table)
library(hexbin)
# 2. load data and annotation
anno <- fread("../data/annotation/C_glabrata_gene_for_mapping_s02-m07-r04.bed")
files <- dir(path="../output/bedtools_cov", pattern="*.cov.txt",full.names=T)
raw <- lapply( files, fread )
# 3. extract information
gene.names <- anno$V4
gene.l <- raw[[1]]$V11
read <- sapply( raw, function(x) x$V9 )
frac <- sapply( raw, function(x) x$V12 )
# 4. Calculate length normalized coverage for each feature
# first multiple # of reads by the length of reads = 50
# then divide by gene length
norm.cov <- read * 75 / gene.l
# 5. now plot the actual fraction $frac against the expected $norm.cov
pdf(file = paste("../output/bedtools_fraction_coverage_QC_", format(Sys.time(), "%Y-%m-%d"), ".pdf", sep = ""))
hbin <- hexbin(log(norm.cov+0.0001,10), frac+0.0001, xbins = 40, xlab = "log10 normalized coverage", ylab = "% of gene body covered")
plot(hbin, colorcut = c(seq(0,0.1,length=10),1))
dev.off()
# 6. output the count matrix
# get basename "S1" etc.
names <- sapply( strsplit( basename(files), split=".", fixed = TRUE ), "[", 1 )
read <- as.data.table(read)
names(read) <- names
read$gene.names <- gene.names
write.table(as.data.frame(read), file=paste("../output/Ex009_reads_per_transcript_", format(Sys.time(), "%Y-%m-%d"), ".txt", sep = ""), quote=F, row.names=F, sep="\t")
write.table(as.data.frame(frac), file=paste("../output/Ex009_fraction_covered_per_feature_", format(Sys.time(), "%Y-%m-%d"), ".txt", sep = ""), quote=F, row.names=F, sep="\t")
|
# KOMP confounding
library(tidyverse)
library(lubridate)
library(broom)
library(xts)
load("/Users/alfloeffler/Documents/Luftqualitaet/Analysen/NO2/BW_17_stationen.RData")
summary(BW_station_data)
load("~/Documents/Luftqualitaet/Analysen/NO2/BW_stations_komp_WG.RData")
BW_stations_komp_WG %>% summary()
BW_data <- BW_stations_komp_WG
DATEN <- BW_data%>%
mutate(yearID = BW_data$datetime %>%
format_ISO8601(precision = "y") %>%as.numeric() )
DATEN <- DATEN %>% filter(yearID %in% 2014:2019)
DATEN$NO2 <- DATEN$NO2 %>% replace_na(mean(.,na.rm= TRUE))
DATEN$NO_ <- DATEN$NO_ %>% replace_na(mean(.,na.rm= TRUE))
DATEN$O3 <- DATEN$O3 %>% replace_na(mean(.,na.rm= TRUE))
summary(DATEN)
# Auswahl der Daten von 2015 bis 2020
Daten_15_20 <- BW_station_data %>%
dplyr::select(name,yearID,NO_,NO2) %>%
filter(yearID %in% 2015:2020)
head(Daten_15_20,2)
single_slope <- Daten_15_20 %>% filter(name == "Alb") %>%
lm(NO2 ~ NO_,data = .) %>%
.$coef %>%
.[2]
# Datensätze mit NA eliminieren
Daten_15_20 <- Daten_15_20 %>% na.omit()
Regr_no2.no <-Daten_15_20 %>% group_by(name)%>%
summarise(slp = lm(NO2 ~ NO_)$coeff[2],
intcpt = lm (NO2 ~ NO_)$coeff[1])
NROW(Daten_15_20)
Korr <- Daten_15_20 %>% group_by(name) %>%
summarise (Korrelation =cor(NO2,NO_))
Korr %>% arrange(Korrelation) # zwischen 0.1 und 0.62
#weitere Korrelationen
#NO2 & O3
Daten_NO2_O3 <- BW_station_data %>%
dplyr::select(yearID,name,datetime,NO2,O3) %>% na.omit()
Korr_NO2_O3 <- Daten_NO2_O3 %>% group_by(name)%>%
summarise (Korrelation =cor(NO2,O3)) %>% arrange(Korrelation)
# NO2 & Temp
NROW(DATEN)#393454
# Datensätze mit defektem Temperaturfühler eliminieren
DATEN_temp <- DATEN %>% filter (Temp < 45)
DATEN_temp <- DATEN_temp %>% na.omit()
Korrelationen <- DATEN_temp %>% group_by(name)%>%
summarise(Kor_no2.temp= cor(NO2,Temp),Kor_no.temp = cor(NO_,Temp),
Kor_o3.temp = cor(O3,Temp))
DATEN_temp %>% filter(name == "Rt_") %>%
summarise (cor(O3,Temp))
| /Komp_confounding.R | no_license | aloes2512/NO2 | R | false | false | 1,999 | r | # KOMP confounding
library(tidyverse)
library(lubridate)
library(broom)
library(xts)
load("/Users/alfloeffler/Documents/Luftqualitaet/Analysen/NO2/BW_17_stationen.RData")
summary(BW_station_data)
load("~/Documents/Luftqualitaet/Analysen/NO2/BW_stations_komp_WG.RData")
BW_stations_komp_WG %>% summary()
BW_data <- BW_stations_komp_WG
DATEN <- BW_data%>%
mutate(yearID = BW_data$datetime %>%
format_ISO8601(precision = "y") %>%as.numeric() )
DATEN <- DATEN %>% filter(yearID %in% 2014:2019)
DATEN$NO2 <- DATEN$NO2 %>% replace_na(mean(.,na.rm= TRUE))
DATEN$NO_ <- DATEN$NO_ %>% replace_na(mean(.,na.rm= TRUE))
DATEN$O3 <- DATEN$O3 %>% replace_na(mean(.,na.rm= TRUE))
summary(DATEN)
# Auswahl der Daten von 2015 bis 2020
Daten_15_20 <- BW_station_data %>%
dplyr::select(name,yearID,NO_,NO2) %>%
filter(yearID %in% 2015:2020)
head(Daten_15_20,2)
single_slope <- Daten_15_20 %>% filter(name == "Alb") %>%
lm(NO2 ~ NO_,data = .) %>%
.$coef %>%
.[2]
# Datensätze mit NA eliminieren
Daten_15_20 <- Daten_15_20 %>% na.omit()
Regr_no2.no <-Daten_15_20 %>% group_by(name)%>%
summarise(slp = lm(NO2 ~ NO_)$coeff[2],
intcpt = lm (NO2 ~ NO_)$coeff[1])
NROW(Daten_15_20)
Korr <- Daten_15_20 %>% group_by(name) %>%
summarise (Korrelation =cor(NO2,NO_))
Korr %>% arrange(Korrelation) # zwischen 0.1 und 0.62
#weitere Korrelationen
#NO2 & O3
Daten_NO2_O3 <- BW_station_data %>%
dplyr::select(yearID,name,datetime,NO2,O3) %>% na.omit()
Korr_NO2_O3 <- Daten_NO2_O3 %>% group_by(name)%>%
summarise (Korrelation =cor(NO2,O3)) %>% arrange(Korrelation)
# NO2 & Temp
NROW(DATEN)#393454
# Datensätze mit defektem Temperaturfühler eliminieren
DATEN_temp <- DATEN %>% filter (Temp < 45)
DATEN_temp <- DATEN_temp %>% na.omit()
Korrelationen <- DATEN_temp %>% group_by(name)%>%
summarise(Kor_no2.temp= cor(NO2,Temp),Kor_no.temp = cor(NO_,Temp),
Kor_o3.temp = cor(O3,Temp))
DATEN_temp %>% filter(name == "Rt_") %>%
summarise (cor(O3,Temp))
|
library(ape)
testtree <- read.tree("2485_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="2485_0_unrooted.txt") | /codeml_files/newick_trees_processed/2485_0/rinput.R | no_license | DaniBoo/cyanobacteria_project | R | false | false | 135 | r | library(ape)
testtree <- read.tree("2485_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="2485_0_unrooted.txt") |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drive_objects.R
\name{ParentReference}
\alias{ParentReference}
\title{ParentReference Object}
\usage{
ParentReference(id = NULL, isRoot = NULL, parentLink = NULL,
selfLink = NULL)
}
\arguments{
\item{id}{The ID of the parent}
\item{isRoot}{Whether or not the parent is the root folder}
\item{parentLink}{A link to the parent}
\item{selfLink}{A link back to this reference}
}
\value{
ParentReference object
}
\description{
ParentReference Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
A reference to a file's parent.
}
\seealso{
Other ParentReference functions: \code{\link{parents.insert}}
}
| /googledrivev2.auto/man/ParentReference.Rd | permissive | GVersteeg/autoGoogleAPI | R | false | true | 716 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/drive_objects.R
\name{ParentReference}
\alias{ParentReference}
\title{ParentReference Object}
\usage{
ParentReference(id = NULL, isRoot = NULL, parentLink = NULL,
selfLink = NULL)
}
\arguments{
\item{id}{The ID of the parent}
\item{isRoot}{Whether or not the parent is the root folder}
\item{parentLink}{A link to the parent}
\item{selfLink}{A link back to this reference}
}
\value{
ParentReference object
}
\description{
ParentReference Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
A reference to a file's parent.
}
\seealso{
Other ParentReference functions: \code{\link{parents.insert}}
}
|
#Process Modes User Script
#Jonathan H. Morgan
#18 May 2020
#Clear Out Console Script
cat("\014")
#Clearing Old Data
rm(list = ls())
gc()
#Setting Working Directory
setwd("~/Desktop/ACT/Duke10/Scripts and Visualizations/")
getwd()
#Calling Function
source("~/Desktop/ACT/ACT R Functions/modes_processing_18May2020.R")
#Loading Dataset I am analyzing
load('duke10_usif_12May2020.Rda')
#Isolating my concept list.
#Note: The concept list is represented as separate object in the event that the analyst wants to examine only a subset of the concepts contained in the entire dataset.
concept <- sessions$Concept
#Creating a subset dataset containing two columns.
#Importantly, the function assumes that the first column of this dataset is the concept label, and the second lists the ratings associated with the labels.
concept_data <- sessions[c(52,53)]
#The name used to label the output dataset
name <- c('evaluation_modes')
#Running the Function
process_modes(concept, concept_data, name)
| /ACT Modal Analyses/modes_processing_example_18May2020.R | no_license | jhm18/Generative-Social-Science | R | false | false | 1,000 | r | #Process Modes User Script
#Jonathan H. Morgan
#18 May 2020
#Clear Out Console Script
cat("\014")
#Clearing Old Data
rm(list = ls())
gc()
#Setting Working Directory
setwd("~/Desktop/ACT/Duke10/Scripts and Visualizations/")
getwd()
#Calling Function
source("~/Desktop/ACT/ACT R Functions/modes_processing_18May2020.R")
#Loading Dataset I am analyzing
load('duke10_usif_12May2020.Rda')
#Isolating my concept list.
#Note: The concept list is represented as separate object in the event that the analyst wants to examine only a subset of the concepts contained in the entire dataset.
concept <- sessions$Concept
#Creating a subset dataset containing two columns.
#Importantly, the function assumes that the first column of this dataset is the concept label, and the second lists the ratings associated with the labels.
concept_data <- sessions[c(52,53)]
#The name used to label the output dataset
name <- c('evaluation_modes')
#Running the Function
process_modes(concept, concept_data, name)
|
#' read Stock Synthesis data file
#'
#' Read Stock Synthesis data file into list object in R. This function is a
#' wrapper which calls SS_readdat_2.00, SS_readdat_3.00, SS_readdat_3.24, or SS_readdat_3.30
#' (and potentially additional functions in the future). This setup allows those
#' functions to be cleaner (if somewhat redundant) than a single function that
#' attempts to do everything. Returned datlist is mostly consistent across versions.
#'
#'
#' @param file Filename either with full path or relative to working directory.
#' @param version SS version number.
#' Currently "2.00", "3.00", "3.24" or "3.30" are supported,
#' either as character or numeric values (noting that numeric 3.30 = 3.3). If
#' version is NULL, the version (3.24 or 3.30) will be looked for on the first
#' line of the file.
#' @param verbose Should there be verbose output while running the file?
#' Default=TRUE.
#' @param echoall Debugging tool (not fully implemented) of echoing blocks of
#' data as it is being read.
#' @param section Which data set to read. Only applies for a data.ss_new file
#' created by Stock Synthesis. Allows the choice of either expected values
#' (section=2) or bootstrap data (section=3+). Leaving default of section=NULL
#' will read input data, (equivalent to section=1).
#' @author Ian G. Taylor, Allan C. Hicks, Neil L. Klaer, Kelli F. Johnson,
#' Chantel R. Wetzel
#' @export
#' @seealso \code{\link{SS_readdat_2.00}}, \code{\link{SS_readdat_3.00}},
#' \code{\link{SS_readdat_3.24}}, \code{\link{SS_readdat_3.30}},
#' \code{\link{SS_readctl}}, \code{\link{SS_readctl_3.24}}
#' \code{\link{SS_readstarter}}, \code{\link{SS_readforecast}},
#' \code{\link{SS_writestarter}},
#' \code{\link{SS_writeforecast}}, \code{\link{SS_writedat}}
SS_readdat <- function(file, version=NULL, verbose=TRUE,echoall=FALSE,section=NULL){
# automatic testing of version number ----
if(is.null(version)) {
# look for 3.24 or 3.30 at the top of the chosen file
version <- scan(file, what=character(), nlines=5, quiet=!verbose)
version <- substring(version,3,6)
version <- version[version %in% c("3.24", "3.30")]
# if that fails, look for data.ss_new file in the same directory
if(length(version) > 0){
if(verbose)cat("assuming version", version, "based on first five lines of data file\n")
}else{
newfile <- file.path(dirname(file), "data.ss_new")
if(file.exists(newfile)){
version <- scan(newfile, what=character(), nlines=1, quiet=!verbose)
version <- substring(version,3,6)
if(verbose)cat("assuming version", version, "based on first line of data.ss_new\n")
}else{
stop("input 'version' required due to missing value at top of", file)
}
}
}
nver <- as.numeric(substring(version,1,4))
if(verbose) cat("Char version is ", version, "\n")
if(verbose) cat("Numeric version is ", nver, "\n")
# call function for SS version 2.00 ----
if(nver<3){
datlist <- SS_readdat_2.00(file=file, verbose=verbose,
echoall=echoall, section=section)
}
# call function for SS version 3.00 ----
if((nver>=3)&&(nver<3.2)){
datlist <- SS_readdat_3.00(file=file, verbose=verbose,
echoall=echoall, section=section)
}
# call function for SS version 3.24 ----
if((nver>=3.2)&&(nver<3.3)){
datlist <- SS_readdat_3.24(file=file, verbose=verbose,
echoall=echoall, section=section)
}
# call function for SS version 3.30 ----
if(nver>=3.3){
datlist <- SS_readdat_3.30(file=file, verbose=verbose,
echoall=echoall, section=section)
}
# return the result
return(datlist)
}
| /R/SS_readdat.R | no_license | amart/r4ss | R | false | false | 3,726 | r | #' read Stock Synthesis data file
#'
#' Read Stock Synthesis data file into list object in R. This function is a
#' wrapper which calls SS_readdat_2.00, SS_readdat_3.00, SS_readdat_3.24, or SS_readdat_3.30
#' (and potentially additional functions in the future). This setup allows those
#' functions to be cleaner (if somewhat redundant) than a single function that
#' attempts to do everything. Returned datlist is mostly consistent across versions.
#'
#'
#' @param file Filename either with full path or relative to working directory.
#' @param version SS version number.
#' Currently "2.00", "3.00", "3.24" or "3.30" are supported,
#' either as character or numeric values (noting that numeric 3.30 = 3.3). If
#' version is NULL, the version (3.24 or 3.30) will be looked for on the first
#' line of the file.
#' @param verbose Should there be verbose output while running the file?
#' Default=TRUE.
#' @param echoall Debugging tool (not fully implemented) of echoing blocks of
#' data as it is being read.
#' @param section Which data set to read. Only applies for a data.ss_new file
#' created by Stock Synthesis. Allows the choice of either expected values
#' (section=2) or bootstrap data (section=3+). Leaving default of section=NULL
#' will read input data, (equivalent to section=1).
#' @author Ian G. Taylor, Allan C. Hicks, Neil L. Klaer, Kelli F. Johnson,
#' Chantel R. Wetzel
#' @export
#' @seealso \code{\link{SS_readdat_2.00}}, \code{\link{SS_readdat_3.00}},
#' \code{\link{SS_readdat_3.24}}, \code{\link{SS_readdat_3.30}},
#' \code{\link{SS_readctl}}, \code{\link{SS_readctl_3.24}}
#' \code{\link{SS_readstarter}}, \code{\link{SS_readforecast}},
#' \code{\link{SS_writestarter}},
#' \code{\link{SS_writeforecast}}, \code{\link{SS_writedat}}
SS_readdat <- function(file, version=NULL, verbose=TRUE,echoall=FALSE,section=NULL){
# automatic testing of version number ----
if(is.null(version)) {
# look for 3.24 or 3.30 at the top of the chosen file
version <- scan(file, what=character(), nlines=5, quiet=!verbose)
version <- substring(version,3,6)
version <- version[version %in% c("3.24", "3.30")]
# if that fails, look for data.ss_new file in the same directory
if(length(version) > 0){
if(verbose)cat("assuming version", version, "based on first five lines of data file\n")
}else{
newfile <- file.path(dirname(file), "data.ss_new")
if(file.exists(newfile)){
version <- scan(newfile, what=character(), nlines=1, quiet=!verbose)
version <- substring(version,3,6)
if(verbose)cat("assuming version", version, "based on first line of data.ss_new\n")
}else{
stop("input 'version' required due to missing value at top of", file)
}
}
}
nver <- as.numeric(substring(version,1,4))
if(verbose) cat("Char version is ", version, "\n")
if(verbose) cat("Numeric version is ", nver, "\n")
# call function for SS version 2.00 ----
if(nver<3){
datlist <- SS_readdat_2.00(file=file, verbose=verbose,
echoall=echoall, section=section)
}
# call function for SS version 3.00 ----
if((nver>=3)&&(nver<3.2)){
datlist <- SS_readdat_3.00(file=file, verbose=verbose,
echoall=echoall, section=section)
}
# call function for SS version 3.24 ----
if((nver>=3.2)&&(nver<3.3)){
datlist <- SS_readdat_3.24(file=file, verbose=verbose,
echoall=echoall, section=section)
}
# call function for SS version 3.30 ----
if(nver>=3.3){
datlist <- SS_readdat_3.30(file=file, verbose=verbose,
echoall=echoall, section=section)
}
# return the result
return(datlist)
}
|
with(aa53f79bdffed440e9878c0a57e2678e5, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/80bb2a25-ac5d-47d0-abfc-b3f3811f0936';rm(list=ls())}); | /80bb2a25-ac5d-47d0-abfc-b3f3811f0936/R/Temp/ahP2iQqS7eqaZ.R | no_license | ayanmanna8/test | R | false | false | 212 | r | with(aa53f79bdffed440e9878c0a57e2678e5, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/80bb2a25-ac5d-47d0-abfc-b3f3811f0936';rm(list=ls())}); |
#' Calculate the single entropy
#' @description
#' To calculate entropy for single branch, the data is first separated into two sections. One is feature section which contains the attributes and the other one is class section which contains the classification result in the dataset. The two inputs in this function feature and class are corresponded to these sections. For each unique class existed in the class section, $-p_ilog_2p_i$ term is calculated and summed for all classes to obtain total initial single entropy.
#' @param features Attribute data
#' @param class class data
#' @return A numeric entropy value
#' @export
get_entropy<-function(features,class)
{
total <- length(features)
type <- rep(NA, length(unique(class)))
prob <- rep(NA, length(unique(class)))
one_ent <- rep(NA, length(unique(class)))
for(ii in 1:length(unique(class))) {
type[ii] <- sum(class==unique(class)[ii])
prob[ii] <- type[ii] / total
one_ent[ii] <- prob[ii]*(ifelse(prob[ii] > 0, log(prob[ii]), 0))
}
entropy <- -sum(one_ent)
return(entropy)
}
| /SoySauce/R/get_entropy.R | no_license | cherishwsx/SoySaucePackage | R | false | false | 1,064 | r | #' Calculate the single entropy
#' @description
#' To calculate entropy for single branch, the data is first separated into two sections. One is feature section which contains the attributes and the other one is class section which contains the classification result in the dataset. The two inputs in this function feature and class are corresponded to these sections. For each unique class existed in the class section, $-p_ilog_2p_i$ term is calculated and summed for all classes to obtain total initial single entropy.
#' @param features Attribute data
#' @param class class data
#' @return A numeric entropy value
#' @export
get_entropy<-function(features,class)
{
total <- length(features)
type <- rep(NA, length(unique(class)))
prob <- rep(NA, length(unique(class)))
one_ent <- rep(NA, length(unique(class)))
for(ii in 1:length(unique(class))) {
type[ii] <- sum(class==unique(class)[ii])
prob[ii] <- type[ii] / total
one_ent[ii] <- prob[ii]*(ifelse(prob[ii] > 0, log(prob[ii]), 0))
}
entropy <- -sum(one_ent)
return(entropy)
}
|
# distance matrix
distM = read.csv(paste0(readwd,"distM_2Doutlines_dtw_1B150_20210802.csv"), row.names = 1)
# circumference data
load(paste0(readwd, "circumference_scaled_Rim_1B150_selectedERlist_20210802.RData"))
rowN = max(unlist(lapply(mat_circumference_scaled, dim)))
names(mat_circumference_scaled) = names(distM)
# process matrix data
distM = as.matrix(distM)
distM = distM/max(distM)
# data clicked
dfplot_clicked_i = data.frame(matrix(ncol = 2, nrow = rowN))
colnames(dfplot_clicked_i) = c("x_val",
"y_val")
dfplot_clicked_j = dfplot_clicked_i
# Tab Network #####
# high and low memberhsip, marginal cases for 1B150 clustering
special_set_1B150 = data.frame(
"filename" = c("SADR021655.jpg", "SADR021680.jpg", "SADR020634.jpg", "SADR011104.jpg", "SADR020954.jpg", "SADR021208.jpg", "SADR010608.jpg", "SADR011077.jpg", "SADR011076.jpg"),
"label" = c("central", "central", "central", "peipheral", "peipheral & marginal", "peipheral & marginal", "marginal", "marginal", "marginal")
)
# measures data
data_df = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_pointMeasures_Rim_1B150_selectedERlist_20210928.csv"))
data_df$filename = as.character(data_df$filename)
data_df$Type = as.character(data_df$Type)
rownames(data_df) = data_df$filename
# obs order initial
obs_order_initial = as.character(data_df$filename)
# # Include the data from the Fuzzy Rules #####
# Flattness
data_df$Rim_flattness_med = data_df$Rim_Diam_extra_half/data_df$Rim_median.WT
data_df$Rim_flattness_max = data_df$Rim_Diam_extra_half/data_df$Rim_max.WT
data_df$WT_LipBot = data_df$Rim_lip_width_scaled/data_df$Rim_bottom_width_scaled # included in FIS
# From FIS data created
data_df_FIS_created = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_extracted_FIS_1B150_1B170_1A100_1C100_20210802.csv"))
data_df_FIS_created$filename = as.character(data_df_FIS_created$filename)
# as.data.frame(t(data_df_FIS_created[180,]))
data_df_FIS_created[,(colnames(data_df_FIS_created) %in% c("VC.lengths", "VC.sides", "VC.Height.Ratio"))] = NULL # use later for vertical cut
data_df_FIS_created$distTop.Out = as.numeric(as.character(data_df_FIS_created$distTop.Out))
data_df_FIS_created$distTop.In = as.numeric(as.character(data_df_FIS_created$distTop.In))
data_df_FIS_created$CutsSymmetric = as.numeric(as.character(data_df_FIS_created$CutsSymmetric))
data_df_FIS_created$VC.Outline.Ratio = as.numeric(as.character(data_df_FIS_created$VC.Outline.Ratio))
# impute values
data_df_FIS_created$CutsSymmetric[is.na(data_df_FIS_created$CutsSymmetric)] = -0.1
data_df_FIS_created$distTop.In[is.na(data_df_FIS_created$distTop.In)] = -1
data_df_FIS_created$distTop.Out[is.na(data_df_FIS_created$distTop.Out)] = -1
data_df = merge(data_df, data_df_FIS_created, by = "filename")
# data FIS for ellipse variable
data_df_FIS_ellipse = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_extracted_FIS_1B150_1B170_1A100_1C100_20210920.csv"))
data_df_FIS_ellipse$filename = as.character(data_df_FIS_ellipse$filename)
data_df = merge(data_df, data_df_FIS_ellipse, by = "filename")
# From FIS-1
data_df_FIS_1 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_extracted_FIS_1_1B150_1B170_1A100_1C100_20210802.csv"))
data_df_FIS_1$filename = as.character(data_df_FIS_1$filename)
data_df_FIS_1 = data_df_FIS_1[data_df_FIS_1$filename %in% data_df$filename, ]
#
# data for FIS plots by cluster
dataplot_FIS_1 = data.frame("filenaam" = data_df_FIS_1$filename, "everted.rim" = data_df_FIS_1$everted.rim, "straight.rim" = data_df_FIS_1$straight.rim, "inverted.rim" = data_df_FIS_1$inverted.rim) # , "hard_cl" = 0
dataplot_FIS_1$selected = 0
#
data_df_FIS_1 = data_df_FIS_1[, which(colnames(data_df_FIS_1) %in% c("filename", "final.support", "final.descr"))]
data_df_FIS_1$final.descr = as.character(data_df_FIS_1$final.descr)
colnames(data_df_FIS_1)[2:3] = c("final.support.FIS_1", "final.descr.FIS_1")
data_df = merge(data_df, data_df_FIS_1, by = "filename")
# From FIS-2
data_df_FIS_2 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_extracted_FIS_2_1B150_1B170_1A100_1C100_20210802.csv"))
data_df_FIS_2$filename = as.character(data_df_FIS_2$filename)
data_df_FIS_2 = data_df_FIS_2[data_df_FIS_2$filename %in% data_df$filename, ]
#
# data for FIS plots by cluster
dataplot_FIS_2 = data.frame("filenaam" = data_df_FIS_2$filename, "horizontally.flattened.rim" = data_df_FIS_2$horizontally.flattened.rim, "not.horizontally.flattened.rim" = data_df_FIS_2$not.horizontally.flattened.rim) # , "hard_cl" = 0
dataplot_FIS_2$selected = 0
#
data_df_FIS_2 = data_df_FIS_2[, which(colnames(data_df_FIS_2) %in% c("filename", "final.support", "final.descr"))]
data_df_FIS_2$final.descr = as.character(data_df_FIS_2$final.descr)
colnames(data_df_FIS_2)[2:3] = c("final.support.FIS_2", "final.descr.FIS_2")
data_df = merge(data_df, data_df_FIS_2, by = "filename")
# From FIS-3
data_df_FIS_3 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_extracted_FIS_3_1B150_1B170_1A100_1C100_20210802.csv"))
data_df_FIS_3$filename = as.character(data_df_FIS_3$filename)
data_df_FIS_3 = data_df_FIS_3[data_df_FIS_3$filename %in% data_df$filename, ]
#
# data for FIS plots by cluster
dataplot_FIS_3 = data.frame("filenaam" = data_df_FIS_3$filename, "rounded.rim" = data_df_FIS_3$rounded.rim, "not.rounded.rim" = data_df_FIS_3$not.rounded.rim) # , "hard_cl" = 0
dataplot_FIS_3$selected = 0
#
data_df_FIS_3 = data_df_FIS_3[, which(colnames(data_df_FIS_3) %in% c("filename", "final.support", "final.descr"))]
data_df_FIS_3$final.descr = as.character(data_df_FIS_3$final.descr)
colnames(data_df_FIS_3)[2:3] = c("final.support.FIS_3", "final.descr.FIS_3")
data_df = merge(data_df, data_df_FIS_3, by = "filename")
# From FIS-4
data_df_FIS_4 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_extracted_FIS_4_1B150_1B170_1A100_1C100_20210802.csv"))
data_df_FIS_4$filename = as.character(data_df_FIS_4$filename)
data_df_FIS_4 = data_df_FIS_4[data_df_FIS_4$filename %in% data_df$filename, ]
#
# data for FIS plots by cluster
dataplot_FIS_4.ext = data.frame("filenaam" = data_df_FIS_4$filename, "vertically.flattened.exterior" = data_df_FIS_4$vertically.flattened.exterior, "not.vertically.flattened.exterior" = data_df_FIS_4$not.vertically.flattened.exterior) # , "hard_cl" = 0
dataplot_FIS_4.ext$selected = 0
#
# data for FIS plots by cluster
dataplot_FIS_4.int = data.frame("filenaam" = data_df_FIS_4$filename, "vertically.flattened.interior" = data_df_FIS_4$vertically.flattened.interior, "not.vertically.flattened.interior" = data_df_FIS_4$not.vertically.flattened.interior) # , "hard_cl" = 0
dataplot_FIS_4.int$selected = 0
#
data_df_FIS_4 = data_df_FIS_4[, which(colnames(data_df_FIS_4) %in% c("filename", "final.support.ext", "final.descr.ext", "final.support.int", "final.descr.int", "HC.int.ext"))]
data_df_FIS_4$final.descr.ext = as.character(data_df_FIS_4$final.descr.ext)
data_df_FIS_4$final.descr.int = as.character(data_df_FIS_4$final.descr.int)
colnames(data_df_FIS_4)[2:6] = c("final.support.ext.FIS_4", "final.descr.ext.FIS_4", "final.support.int.FIS_4", "final.descr.int.FIS_4", "final.descr.both.sides.FIS_4")
data_df = merge(data_df, data_df_FIS_4, by = "filename")
# # From FIS-5
# data_df_FIS_5 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_extracted_FIS_5_1B150_1B170_1A100_1C100_20210802.csv"))
# data_df_FIS_5$filename = as.character(data_df_FIS_5$filename)
# data_df_FIS_5 = data_df_FIS_5[data_df_FIS_5$filename %in% data_df$filename, ]
# #
# # data for FIS plots by cluster
# dataplot_FIS_5 = data.frame("filenaam" = data_df_FIS_5$filename, "cut.rim" = data_df_FIS_5$cut.rim, "not.cut.rim" = data_df_FIS_5$not.cut.rim) # , "hard_cl" = 0
# dataplot_FIS_5$selected = 0
# #
# data_df_FIS_5 = data_df_FIS_5[, which(colnames(data_df_FIS_5) %in% c("filename", "final.support", "final.descr"))]
# data_df_FIS_5$final.descr = as.character(data_df_FIS_5$final.descr)
# colnames(data_df_FIS_5)[2:3] = c("final.support.FIS_5", "final.descr.FIS_5")
# data_df = merge(data_df, data_df_FIS_5, by = "filename")
# From FIS-6
data_df_FIS_6 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_extracted_FIS_6_1B150_1B170_1A100_1C100_20210802.csv"))
data_df_FIS_6$filename = as.character(data_df_FIS_6$filename)
data_df_FIS_6 = data_df_FIS_6[data_df_FIS_6$filename %in% data_df$filename, ]
#
# data for FIS plots by cluster
dataplot_FIS_6 = data.frame("filenaam" = data_df_FIS_6$filename, "thickened.rim" = data_df_FIS_6$thickened.rim, "non.thickened.rim" = data_df_FIS_6$non.thickened.rim) # , "hard_cl" = 0
dataplot_FIS_6$selected = 0
#
data_df_FIS_6 = data_df_FIS_6[, which(colnames(data_df_FIS_6) %in% c("filename", "final.support", "final.descr"))]
data_df_FIS_6$final.descr = as.character(data_df_FIS_6$final.descr)
colnames(data_df_FIS_6)[2:3] = c("final.support.FIS_6", "final.descr.FIS_6")
data_df = merge(data_df, data_df_FIS_6, by = "filename")
# extra from FIS-6
data_df$rim.out.prot.length.selected = ifelse(data_df$rim.out.prot.length > data_df$rim.out.prot.length.2, data_df$rim.out.prot.length, data_df$rim.out.prot.length.2)
data_df$rim.inn.prot.length.selected = ifelse(data_df$Rim_incl_min_sign > data_df$rim.inn.prot.length.2, data_df$rim.inn.prot.length, data_df$rim.inn.prot.length.2)
data_df$rim.out.prot.length.selected_scaled = data_df$rim.out.prot.length.selected/data_df$Rim_height_manual
data_df$rim.inn.prot.length.selected_scaled = data_df$rim.inn.prot.length.selected/data_df$Rim_height_manual
# From FIS-7
data_df_FIS_7 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_extracted_FIS_7_471-sherds_20210928.csv"))
data_df_FIS_7$filename = as.character(data_df_FIS_7$filename)
data_df_FIS_7 = data_df_FIS_7[data_df_FIS_7$filename %in% data_df$filename, ]
#
# data for FIS plots by cluster
dataplot_FIS_7 = data.frame("filenaam" = data_df_FIS_7$filename, "complicated.rim" = data_df_FIS_7$complicated.rim, "simple.rim" = data_df_FIS_7$simple.rim) # , "hard_cl" = 0
dataplot_FIS_7$selected = 0
#
data_df_FIS_7 = data_df_FIS_7[, which(colnames(data_df_FIS_7) %in% c("filename", "final.support", "final.descr"))]
data_df_FIS_7$final.descr = as.character(data_df_FIS_7$final.descr)
colnames(data_df_FIS_7)[2:3] = c("final.support.FIS_7", "final.descr.FIS_7")
data_df = merge(data_df, data_df_FIS_7, by = "filename")
# order data
obs_order_initial_id = match(obs_order_initial, data_df$filename) # added for the expected ordering
data_df = data_df[obs_order_initial_id, ]
# get plot data
df_labels = as.data.frame(as.character(data_df$TV), stringsAsFactors = FALSE)
data_df$filename = as.character(data_df$filename)
colnames(df_labels) = "TV"
rownames(df_labels) = data_df$filename
df_labels$TV = as.character(df_labels$TV)
df_labels$binaryLabel = data_df$is.full.profile
df_labels$binaryLabel = replace(df_labels$binaryLabel, df_labels$binaryLabel == TRUE, 1)
df_labels$binaryLabel = replace(df_labels$binaryLabel, df_labels$binaryLabel != 1, 0)
# colum = c("Rim_incl_sin_mean","rim.out.prot.length","rim.inn.prot.length", "rim.out.diff.length","rim.inn.diff.length","block.out.perC","block.perC", "trapez.perC","form.factor","aspect.ratio","roundness",
# "Rim_HorizCut_WT","Rim_mean.WT","Rim_median.WT","Rim_sd.WT","Rim_min.WT","Rim_max.WT","RimWT_margin","Rim_Diam_extra_half","Rim_extent.1","Rim_curl",
# "Rim_elongation.min","Rim_elongation.max","Rim_elongation.avg","Rim_radius.ratio","Rim_eccentricity","Rim_incl_sin_min","Rim_incl_sin_max","Rim_height_manual",
# "WT.mean_norm", "WT.med_norm", "WT.var_norm", "WT.skew_norm", "WT.kurt_norm", "WT.BotMed_norm", "Rim_flattness_med", "Rim_flattness_max")
columCateg = c("filename", "TV", "Type", "filewd", "set", "is.full.profile", "sherd_rim.diameter.check", "Rim_HorizCut.Inn_Cont",
"inERlist", "ContextCompare", "Year", "Site", "sherd_listlines.out", "sherd_listlines.inn", "Rim_HorizCut.Out_Cont",
"Rim_HorizCut.Inn_Width", "Rim_HorizCut.Out_Width")
columFISdescr = c("final.descr.FIS_1", "final.descr.FIS_2", "final.descr.FIS_3", "final.descr.ext.FIS_4", "final.descr.int.FIS_4", "final.descr.both.sides.FIS_4", "final.descr.FIS_6", "final.descr.FIS_7") # "final.descr.FIS_5",
df_dataplot = data_df[, !(colnames(data_df) %in% columCateg)]
df_dataplot = df_dataplot[, !(colnames(df_dataplot) %in% columFISdescr)]
rownames(df_dataplot) = data_df$filename
## Beeplot Data ##
bee_dataplot = df_dataplot
# do not normalise pv, sherd_inclination, roundness, form.factor, slope.WT, MDLHratio, Rim_incl_sin
exclV = c("Rim_incl_sin_mean", "form.factor", "roundness", "form.factor", "Rim_extent.1", "Rim_eccentricity", "Rim_incl_sin_min", "Rim_incl_sin_max")
# exclV = c("roundness", "form.factor", "Rim_extent.1", "Rim_eccentricity",
# "Rim_incl_sin_mean", "Rim_incl_sin_min", "Rim_incl_sin_max",
# # "Rim_WT.mean.norm", "Rim_WT.med.norm", "Rim_WT.var.norm",
# "Below.Rim_incl_sin_min", "Below.Rim_incl_sin_max", "Below.Rim_incl_sin_mean",
# # 'FIS.1_mf.1.out','FIS.1_mf.1.avg','FIS.1_mf.1.inn','FIS.1_mf.2.out','FIS.1_mf.2.avg','FIS.1_mf.2.inn','FIS.1_mf.3.out','FIS.1_mf.3.avg','FIS.1_mf.3.inn','FIS.1_mf.4.out','FIS.1_mf.4.avg','FIS.1_mf.4.inn','straight.rim','slightly.bent.rim','quite.bent.rim','profoundly.bent.rim','straight.rim.sign','straight.rim.descr','slightly.bent.rim.sign','slightly.bent.rim.descr','quite.bent.rim.sign','quite.bent.rim.descr','profoundly.bent.rim.sign','profoundly.bent.rim.descr','final.support','interm.1.descr','interm.2.descr','final.descr',
# 'final.support.FIS_1', 'final.support.FIS_2', 'final.support.FIS_3', 'final.support.ext.FIS_4', 'final.support.int.FIS_4', 'final.support.FIS_5', 'final.support.FIS_6'
# ) # 'FIS6.var.1.inn','FIS6.var.1.out','FIS6.var.2.global','FIS6.var.3.global'
bee_dataplot = bee_dataplot[,!(names(bee_dataplot) %in% exclV)]
vmin = apply(bee_dataplot, 2, min, na.rm = TRUE)
vmax = apply(bee_dataplot, 2, max, na.rm = TRUE)
for (i in 1:dim(bee_dataplot)[2]) {
if ((vmax[[i]] - vmin[[i]]) > 0) {
bee_dataplot[,i] = (bee_dataplot[,i] - vmin[[i]]) / (vmax[[i]] - vmin[[i]])
} else {
bee_dataplot[,i] = bee_dataplot[,i]
}
}
# Merge bee_dataplot with non numeric data
bee_dataplot = cbind.data.frame(rownames(bee_dataplot), bee_dataplot, df_dataplot[,(names(df_dataplot) %in% exclV)])
names(bee_dataplot)[1] = "filenaam"
bee_dataplot$filenaam = as.character(bee_dataplot$filenaam)
bee_melted = melt(bee_dataplot, id = "filenaam")
bee_melted$selected = 0
# # Melt data
# bee_melted = melt(bee_dataplot, id = "filenaam")
# bee_melted$selected = 0
# order of observations
obs_order = as.character(bee_dataplot$filenaam)
# obs_order = unlist(strsplit(obs_order, ".jpg"))
# data for the Global plotMeasures
df_MuVo_all = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210612_multivocality/df_pointMeasures_multivocality_all.csv"))
df_MuVo_all = df_MuVo_all[df_MuVo_all$filename %in% data_df$filename, ]
dataplotMeasures = cbind.data.frame(df_MuVo_all$filename, df_MuVo_all$scale, df_MuVo_all$sherd_HWratio, df_MuVo_all$sherd_inclination, df_MuVo_all$sherd_rimDiam, df_MuVo_all$sherd_WThick1, df_MuVo_all$sherd_height)
colnames(dataplotMeasures) = c("filename", "scale", "sherd_HW", "sherd_inclination", "sherd_rim_diameter", "sherd_WTat2.3", "sherd_height") # + "rim_diameter_check" + inclide WT just below rim => rim width?
dataplotMeasures$scaleCM = 3
dataplotMeasures$sherd_height = dataplotMeasures$scaleCM*dataplotMeasures$sherd_height/dataplotMeasures$scale
dataplotMeasures$sherd_rim_diameter = dataplotMeasures$scaleCM*dataplotMeasures$sherd_rim_diameter/dataplotMeasures$scale
dataplotMeasures$sherd_WTat2.3 = dataplotMeasures$scaleCM*dataplotMeasures$sherd_WTat2.3/dataplotMeasures$scale
dataplotMeasures_extra = data.frame("filename" = data_df$filename, "sherd_rim.diameter.check" = data_df$sherd_rim.diameter.check, "is.full.profile" = data_df$is.full.profile)
dataplotMeasures = merge(dataplotMeasures, dataplotMeasures_extra, by = "filename")
# order for the dataplotMeasures
obs_order_id = match(obs_order, dataplotMeasures$filename)
dataplotMeasures = dataplotMeasures[obs_order_id, ]
# jitter position
jitterpos = position_jitter(width = 0, height = 0, seed = 1)
# Data for Cluster Results
# Minkowski p = 4
# cluster_df_10 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_Mink_v2_ncl-10_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
cluster_df_10 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_L4_RimDiam_inclBR_Mink_v2_ncl-3_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
# cluster_df_10 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_L4_RimDiam_inclBR_FIS_Mink_ncl-7_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
cluster_df_10 = cluster_df_10[,-(which(colnames(cluster_df_10) %in% c("labelsTV","labelsType","input")))] # "dist_from1", "dist_from2", "input"
colnames(cluster_df_10)[1] = "filenaam"
# cluster_df_8 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_Mink_v2_ncl-8_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
# cluster_df_8 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_L4_RimDiam_inclBR_Mink_ncl-4_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
cluster_df_8 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_L4_RimDiam_inclBR_Mink_v2_ncl-7_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
# cluster_df_8 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_L4_RimDiam_inclBR_FIS_Mink_ncl-5_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
cluster_df_8 = cluster_df_8[,-(which(colnames(cluster_df_8) %in% c("labelsTV","labelsType","input")))] # "dist_from1", "dist_from2", "input"
colnames(cluster_df_8)[1] = "filenaam"
# cluster_df_6 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_Mink_v2_ncl-6_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
# cluster_df_6 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_L4_RimDiam_Mink_ncl-7_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
cluster_df_6 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_L4_RimDiam_inclBR_Mink_v2_ncl-2_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
# cluster_df_6 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_L4_RimDiam_inclBR_FIS_Mink_ncl-4_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
cluster_df_6 = cluster_df_6[,-(which(colnames(cluster_df_6) %in% c("labelsTV","labelsType","input")))] # "dist_from1", "dist_from2", "input"
colnames(cluster_df_6)[1] = "filenaam"
# cluster_df_4 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_Mink_v2_ncl-4_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
# cluster_df_4 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_L4_RimDiam_Mink_ncl-5_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
cluster_df_4 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_L4_RimDiam_inclBR_Mink_v2_ncl-4_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
# cluster_df_4 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_L4_RimDiam_inclBR_FIS_Mink_ncl-3_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
cluster_df_4 = cluster_df_4[,-(which(colnames(cluster_df_4) %in% c("labelsTV","labelsType","input")))] # "dist_from1", "dist_from2", "input"
colnames(cluster_df_4)[1] = "filenaam"
cluster_df_selected = cluster_df_10
# merge FIS and cluster data ## <<<<-----------
dataplot_FIS_1_ncls = merge(dataplot_FIS_1, cluster_df_selected, by = "filenaam")
dataplot_FIS_2_ncls = merge(dataplot_FIS_2, cluster_df_selected, by = "filenaam")
dataplot_FIS_3_ncls = merge(dataplot_FIS_3, cluster_df_selected, by = "filenaam")
dataplot_FIS_4.ext_ncls = merge(dataplot_FIS_4.ext, cluster_df_selected, by = "filenaam")
dataplot_FIS_4.int_ncls = merge(dataplot_FIS_4.int, cluster_df_selected, by = "filenaam")
# dataplot_FIS_5_ncls = merge(dataplot_FIS_5, cluster_df_selected, by = "filenaam")
dataplot_FIS_6_ncls = merge(dataplot_FIS_6, cluster_df_selected, by = "filenaam")
dataplot_FIS_7_ncls = merge(dataplot_FIS_7, cluster_df_selected, by = "filenaam")
# order obs dataplot_FIS_*_ncls
obs_order_FIS_id = match(obs_order, dataplot_FIS_1_ncls$filenaam)
dataplot_FIS_1_ncls = dataplot_FIS_1_ncls[obs_order_FIS_id, ]
obs_order_FIS_id = match(obs_order, dataplot_FIS_2_ncls$filenaam)
dataplot_FIS_2_ncls = dataplot_FIS_2_ncls[obs_order_FIS_id, ]
obs_order_FIS_id = match(obs_order, dataplot_FIS_3_ncls$filenaam)
dataplot_FIS_3_ncls = dataplot_FIS_3_ncls[obs_order_FIS_id, ]
obs_order_FIS_id = match(obs_order, dataplot_FIS_4.ext_ncls$filenaam)
dataplot_FIS_4.ext_ncls = dataplot_FIS_4.ext_ncls[obs_order_FIS_id, ]
obs_order_FIS_id = match(obs_order, dataplot_FIS_4.int_ncls$filenaam)
dataplot_FIS_4.int_ncls = dataplot_FIS_4.int_ncls[obs_order_FIS_id, ]
# obs_order_FIS_id = match(obs_order, dataplot_FIS_5_ncls$filenaam)
# dataplot_FIS_5_ncls = dataplot_FIS_5_ncls[obs_order_FIS_id, ]
# obs_order_FIS_id = match(obs_order, dataplot_FIS_6_ncls$filenaam)
dataplot_FIS_6_ncls = dataplot_FIS_6_ncls[obs_order_FIS_id, ]
obs_order_FIS_id = match(obs_order, dataplot_FIS_7_ncls$filenaam)
dataplot_FIS_7_ncls = dataplot_FIS_7_ncls[obs_order_FIS_id, ]
df_FIStable = cbind.data.frame(dataplot_FIS_1_ncls[,1:4], dataplot_FIS_2_ncls[,2:3], dataplot_FIS_3_ncls[,2:3], dataplot_FIS_4.ext_ncls[,2:3], dataplot_FIS_4.int_ncls[,2:3], dataplot_FIS_6_ncls[,2:3], dataplot_FIS_7_ncls[,2:3]) # dataplot_FIS_5_ncls[,2:3],
# STAD links
# # lines manhattan
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_ScaledPeriphery_Manh_83of100-1B150_20210802.csv")
# # lines chebyshev
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_ScaledPeriphery_Cheb_83of100-1B150_20210802.csv")
# # lines canberra
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_ScaledPeriphery_Canb_83of100-1B150_20210802.csv")
# # lines Minkowski p=4 <<---
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_ScaledPeriphery_Mink_83of100-1B150_20210802.csv")
# 2D DTW
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_ScaledHeight_dtw_1B150_20210802.csv")
# jaccard
#
# # efa
#
# # measures
#
# # descriptors
#
# combined
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_Rims_Manh_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_Rims_Mink_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_Rims_Eucl_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_L4_G3_Manh_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_L4_G4_Manh_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_L4_G7_Manh_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_L4_G3_Mink_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_L4_G4_Mink_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_L4_G7_Mink_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_L4_G3_Eucl_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_L4_G4_Eucl_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_L4_G7_Eucl_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_L4_RimDiam_inclBR_Mink_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_L4_RimDiam_Mink_83of100-1B150_20210802.csv")
df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_normAll01_L4_RimDiam_inclBR_Mink_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_normAll01_L4_RimDiam_Mink_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_normAll01_L4_RimDiam_inclBR_FIS_Mink_83of100-1B150_20210802.csv")
# UI Colours ###
df_colorAll = data.frame(bee_dataplot$filenaam)
names(df_colorAll) = "filenaam"
df_colorAll$filenaam = as.character(df_colorAll$filenaam)
df_colorAll = cbind.data.frame(df_colorAll, df_labels)
df_colorAll$binaryLabel = ifelse(df_colorAll$binaryLabel == '1', "#f28e2c", "#4e79a7")
groupLabels = c("binaryLabel", "TV", names(bee_dataplot))
# multiple colours palette display.brewer.all()
# brewerset = brewer.pal(11, name = "BrBG")[c(2,3,4,8,9,10)]
# c('#800000','#9A6324','#808000', '#469990','#000075','#000000',
# '#e6194B','#f58231','#ffe119','#bfef45','#3cb44b','#42d4f4','#4363d8','#911eb4','#f032e6','#a9a9a9',
# '#fabed4','#ffd8b1','#fffac8','#aaffc3','#dcbeff','#ffffff') # # https://sashamaps.net/docs/resources/20-colors/
# instead of the shiny yellow #ffe119 use #e5c700
# instead of the cyan #42d4f4 use #0db6db
alphaHEXcode = data.frame("value" = 100:0,
"code" = c("FF","FC","FA","F7","F5","F2","F0","ED","EB","E8","E6","E3","E0","DE","DB","D9","D6",
"D4","D1","CF","CC","C9","C7","C4","C2","BF","BD","BA","B8","B5","B3","B0","AD","AB","A8",
"A6","A3","A1","9E","9C","99","96","94","91","8F","8C","8A","87","85","82","80","7D","7A","78","75","73",
"70","6E","6B","69","66","63","61","5E","5C","59","57","54","52","4F","4D","4A","47","45","42","40","3D","3B","38",
"36","33","30","2E","2B","29","26","24","21","1F","1C","1A","17","14","12","0F","0D","0A","08","05","03","00"))
brewerset_Cat = c('#808000', '#469990','#dcbeff', '#9A6324','#000000',
'#e6194B','#f58231','#e5c700','#bfef45','#3cb44b','#0db6db','#4363d8','#911eb4','#f032e6','#a9a9a9',
'#fabed4','#ffd8b1','#fffac8','#aaffc3','#000075', '#ffffff','#800000')
# brewerset_Clusters = c('#000000', '#db6d00', '#006ddb', '#920000',
# '#8f4e00', '#ffdf4d', '#676767', '#009999',
# '#ff6db6', '#490092')
brewerset_Clusters = c('#000000', '#db6d00', '#006ddb', '#920000',
'#754a3c', '#ffdf4d', '#676767', '#009999',
'#ff6db6', '#490092')
newcol_Cat = colorRampPalette(brewerset_Cat)
colorset = newcol_Cat(length(unique(df_colorAll$TV))) # TV color categories
df_colorTV = cbind.data.frame(sort(unique(df_colorAll$TV)), colorset)
names(df_colorTV) = c("TV", "col")
df_colorTV$TV = as.character(df_colorTV$TV)
df_colorAll = merge(df_colorAll, df_colorTV, by = "TV")
names(df_colorAll)[4] = "TV_col"
brewerset_Con = brewer.pal(11, name = "BrBG")
newcol_Con = colorRampPalette(brewerset_Con)
for(i in 4:length(groupLabels)) {
vari = bee_dataplot[, names(bee_dataplot) %in% groupLabels[i]]
colorset = newcol_Con(length(unique(vari)))
df_colorVari = cbind.data.frame(sort(unique(vari)), colorset)
names(df_colorVari)[1] = names(bee_dataplot)[(names(bee_dataplot) %in% groupLabels[i])]
names(df_colorVari)[2] = "col"
# head(df_colorVari)
df_colorFili = bee_dataplot[ , c(1, which(names(bee_dataplot) %in% groupLabels[i]))] # 1=>bee_dataplot$filenaam
# head(df_colorFili)
df_colorI = merge(df_colorFili, df_colorVari, by = names(bee_dataplot)[(names(bee_dataplot) %in% groupLabels[i])])
# head(df_colorI)
df_colorI = df_colorI[,-1]
names(df_colorI)[2] = names(bee_dataplot)[(names(bee_dataplot) %in% groupLabels[i])]
# head(df_colorI)
df_colorAll = merge(df_colorAll, df_colorI, by = "filenaam")
}
rm(df_colorI, df_colorFili, df_colorVari)
# All selected data enriched: variables Original, variables Scaled, variable Colors, filewd
# df_dataplot, bee_dataplot, df_colorAll
df_All = cbind.data.frame(rownames(df_dataplot), df_dataplot)
names(df_All)[1] = "filenaam"
names(bee_dataplot)[2:length(names(bee_dataplot))] = paste0(names(bee_dataplot)[2:length(names(bee_dataplot))], "_Sc")
df_All = merge(df_All, bee_dataplot, by = "filenaam")
names(df_colorAll)[5:length(names(df_colorAll))] = paste0(names(df_colorAll)[5:length(names(df_colorAll))], "_col")
df_colorAll = as.data.frame(lapply(df_colorAll, as.character))
df_All = merge(df_All, df_colorAll, by = "filenaam")
df_filewd = data_df[ ,c("filename","filewd")]
names(df_filewd)[1] = "filenaam"
df_All = merge(df_All, df_filewd, by = "filenaam")
df_All$filenaam = as.character(df_All$filenaam)
df_All$plain_col = ifelse(df_All$filenaam %in% special_set_1B150$filename, "#000000", "#d6d6d6")
df_All$TV = as.character(df_All$TV)
# clean some data from the environmnet
rm(df_colorAll, df_colorTV, vmin, vmax) # df_colorCentr,
## descriptors in FIS 1
df_descriptors = cbind.data.frame(unique(data_df_FIS_1$final.descr.FIS_1), brewerset_Cat[2:(1+length(unique(data_df_FIS_1$final.descr.FIS_1)))])
names(df_descriptors) = c("final.descr.FIS_1", "final.descr.FIS_1_col")
df_descriptors$final.descr.FIS_1 = as.character(df_descriptors$final.descr.FIS_1)
df_colorI = merge(data_df_FIS_1, df_descriptors, by = "final.descr.FIS_1")
df_colorI$value = round(df_colorI$final.support.FIS_1*100,0)
df_colorI = merge(df_colorI, alphaHEXcode, by = "value")
df_colorI$final.descr.FIS_1_col_alpha = paste0(df_colorI$final.descr.FIS_1_col, df_colorI$code)
df_colorI = df_colorI[,-which(colnames(df_colorI) %in% c("final.support.FIS_1", "value", "code"))]
names(df_colorI)[which(names(df_colorI) == "filename")] = "filenaam"
df_All = merge(df_All, df_colorI, by = "filenaam")
## descriptors in FIS 2
df_descriptors = cbind.data.frame(unique(data_df_FIS_2$final.descr.FIS_2), brewerset_Cat[2:(1+length(unique(data_df_FIS_2$final.descr.FIS_2)))])
names(df_descriptors) = c("final.descr.FIS_2", "final.descr.FIS_2_col")
df_descriptors$final.descr.FIS_2 = as.character(df_descriptors$final.descr.FIS_2)
df_colorI = merge(data_df_FIS_2, df_descriptors, by = "final.descr.FIS_2")
df_colorI$value = round(df_colorI$final.support.FIS_2*100,0)
df_colorI = merge(df_colorI, alphaHEXcode, by = "value")
df_colorI$final.descr.FIS_2_col_alpha = paste0(df_colorI$final.descr.FIS_2_col, df_colorI$code)
df_colorI = df_colorI[,-which(colnames(df_colorI) %in% c("final.support.FIS_2", "value", "code"))]
names(df_colorI)[which(names(df_colorI) == "filename")] = "filenaam"
df_All = merge(df_All, df_colorI, by = "filenaam")
## descriptors in FIS 3
df_descriptors = cbind.data.frame(unique(data_df_FIS_3$final.descr.FIS_3), brewerset_Cat[2:(1+length(unique(data_df_FIS_3$final.descr.FIS_3)))])
names(df_descriptors) = c("final.descr.FIS_3", "final.descr.FIS_3_col")
df_descriptors$final.descr.FIS_3 = as.character(df_descriptors$final.descr.FIS_3)
df_colorI = merge(data_df_FIS_3, df_descriptors, by = "final.descr.FIS_3")
df_colorI$value = round(df_colorI$final.support.FIS_3*100,0)
df_colorI = merge(df_colorI, alphaHEXcode, by = "value")
df_colorI$final.descr.FIS_3_col_alpha = paste0(df_colorI$final.descr.FIS_3_col, df_colorI$code)
df_colorI = df_colorI[,-which(colnames(df_colorI) %in% c("final.support.FIS_3", "value", "code"))]
names(df_colorI)[which(names(df_colorI) == "filename")] = "filenaam"
df_All = merge(df_All, df_colorI, by = "filenaam")
## descriptors in FIS 4
# exterior
df_descriptors = cbind.data.frame(unique(data_df_FIS_4$final.descr.ext.FIS_4), brewerset_Cat[2:(1+length(unique(data_df_FIS_4$final.descr.ext.FIS_4)))])
names(df_descriptors) = c("final.descr.ext.FIS_4", "final.descr.ext.FIS_4_col")
df_descriptors$final.descr.ext.FIS_4 = as.character(df_descriptors$final.descr.ext.FIS_4)
df_colorI = merge(data_df_FIS_4, df_descriptors, by = "final.descr.ext.FIS_4")
df_colorI$value = round(df_colorI$final.support.ext.FIS_4*100,0)
df_colorI = merge(df_colorI, alphaHEXcode, by = "value")
df_colorI$final.descr.ext.FIS_4_col_alpha = paste0(df_colorI$final.descr.ext.FIS_4_col, df_colorI$code)
df_colorI = df_colorI[,which(colnames(df_colorI) %in% c("filename", "final.descr.ext.FIS_4", "final.descr.ext.FIS_4_col"))]
names(df_colorI)[which(names(df_colorI) == "filename")] = "filenaam"
df_All = merge(df_All, df_colorI, by = "filenaam")
# interior
df_descriptors = cbind.data.frame(unique(data_df_FIS_4$final.descr.int.FIS_4), brewerset_Cat[2:(1+length(unique(data_df_FIS_4$final.descr.int.FIS_4)))])
names(df_descriptors) = c("final.descr.int.FIS_4", "final.descr.int.FIS_4_col")
df_descriptors$final.descr.int.FIS_4 = as.character(df_descriptors$final.descr.int.FIS_4)
df_colorI = merge(data_df_FIS_4, df_descriptors, by = "final.descr.int.FIS_4")
df_colorI$value = round(df_colorI$final.support.int.FIS_4*100,0)
df_colorI = merge(df_colorI, alphaHEXcode, by = "value")
df_colorI$final.descr.int.FIS_4_col_alpha = paste0(df_colorI$final.descr.int.FIS_4_col, df_colorI$code)
df_colorI = df_colorI[,which(colnames(df_colorI) %in% c("filename", "final.descr.int.FIS_4", "final.descr.int.FIS_4_col"))]
names(df_colorI)[which(names(df_colorI) == "filename")] = "filenaam"
df_All = merge(df_All, df_colorI, by = "filenaam")
# ## descriptors in FIS 5
# df_descriptors = cbind.data.frame(unique(data_df_FIS_5$final.descr.FIS_5), brewerset_Cat[2:(1+length(unique(data_df_FIS_5$final.descr.FIS_5)))])
# names(df_descriptors) = c("final.descr.FIS_5", "final.descr.FIS_5_col")
# df_descriptors$final.descr.FIS_5 = as.character(df_descriptors$final.descr.FIS_5)
# df_colorI = merge(data_df_FIS_5, df_descriptors, by = "final.descr.FIS_5")
# df_colorI$value = round(df_colorI$final.support.FIS_5*100,0)
# df_colorI = merge(df_colorI, alphaHEXcode, by = "value")
# df_colorI$final.descr.FIS_5_col_alpha = paste0(df_colorI$final.descr.FIS_5_col, df_colorI$code)
# df_colorI = df_colorI[,-which(colnames(df_colorI) %in% c("final.support.FIS_5", "value", "code"))]
# names(df_colorI)[which(names(df_colorI) == "filename")] = "filenaam"
# df_All = merge(df_All, df_colorI, by = "filenaam")
## descriptors in FIS 6
df_descriptors = cbind.data.frame(unique(data_df_FIS_6$final.descr.FIS_6), brewerset_Cat[2:(1+length(unique(data_df_FIS_6$final.descr.FIS_6)))])
names(df_descriptors) = c("final.descr.FIS_6", "final.descr.FIS_6_col")
df_descriptors$final.descr.FIS_6 = as.character(df_descriptors$final.descr.FIS_6)
df_colorI = merge(data_df_FIS_6, df_descriptors, by = "final.descr.FIS_6")
df_colorI$value = round(df_colorI$final.support.FIS_6*100,0)
df_colorI = merge(df_colorI, alphaHEXcode, by = "value")
df_colorI$final.descr.FIS_6_col_alpha = paste0(df_colorI$final.descr.FIS_6_col, df_colorI$code)
df_colorI = df_colorI[,-which(colnames(df_colorI) %in% c("final.support.FIS_6", "value", "code"))]
names(df_colorI)[which(names(df_colorI) == "filename")] = "filenaam"
df_All = merge(df_All, df_colorI, by = "filenaam")
## descriptors in FIS 7
df_descriptors = cbind.data.frame(unique(data_df_FIS_7$final.descr.FIS_7), brewerset_Cat[2:(1+length(unique(data_df_FIS_7$final.descr.FIS_7)))])
names(df_descriptors) = c("final.descr.FIS_7", "final.descr.FIS_7_col")
df_descriptors$final.descr.FIS_7 = as.character(df_descriptors$final.descr.FIS_7)
df_colorI = merge(data_df_FIS_7, df_descriptors, by = "final.descr.FIS_7")
df_colorI$value = round(df_colorI$final.support.FIS_7*100,0)
df_colorI = merge(df_colorI, alphaHEXcode, by = "value")
df_colorI$final.descr.FIS_7_col_alpha = paste0(df_colorI$final.descr.FIS_7_col, df_colorI$code)
df_colorI = df_colorI[,-which(colnames(df_colorI) %in% c("final.support.FIS_7", "value", "code"))]
names(df_colorI)[which(names(df_colorI) == "filename")] = "filenaam"
df_All = merge(df_All, df_colorI, by = "filenaam")
# hard cluster results coloring ##
## 10 clusters
# colorset = newcol_Cat(length(unique(cluster_df_10$hard_cl))) # Cluster color categories
df_colorCluster = cbind.data.frame(unique(cluster_df_10$hard_cl), brewerset_Clusters[1:length(unique(cluster_df_10$hard_cl))])
names(df_colorCluster) = c("hard_cl", "Cluster_10_col")
df_colorCluster$hard_cl = as.character(df_colorCluster$hard_cl)
df_colorI = merge(cluster_df_10, df_colorCluster, by = "hard_cl")
# adjust transparency
vecAlpha = data.frame("filenaam" = df_colorI$filenaam, "value" = rep(1, dim(df_colorI)[1]))
for (rown in 1:dim(vecAlpha)[1]) {
vecAlpha[rown, "value"] = round(df_colorI[rown, as.numeric(df_colorI$hard_cl[rown])+2]*100,0)
}
# scale values for clearer visualisation: we scale in [20, 100]
minVal = min(vecAlpha$value); maxVal = max(vecAlpha$value)
vecAlpha$value = round(20 + (100 - 20)*(vecAlpha$value - minVal) / (maxVal - minVal),0)
vecAlpha = merge(vecAlpha, alphaHEXcode, by = "value")
colnames(vecAlpha)[which(colnames(vecAlpha) == "value")] = "value_10"
df_colorI = merge(df_colorI, vecAlpha, by = "filenaam")
colnames(df_colorI)[which(colnames(df_colorI) == "code")] = "code_10"
df_colorI$Cluster_10_Membership_col = paste0(df_colorI$Cluster_10_col, df_colorI$code_10) #, vecAlpha$code ??
colnames(df_colorI)[which(colnames(df_colorI) == "hard_cl")] = "hard_cl_10"
df_colorI = df_colorI[, c("filenaam", "hard_cl_10", "Cluster_10_col", "value_10", "code_10", "Cluster_10_Membership_col")]
df_All = merge(df_All, df_colorI, by = "filenaam")
## 8 clusters
# colorset = newcol_Cat(length(unique(cluster_df_8$hard_cl))) # TV color categories
df_colorCluster = cbind.data.frame(unique(cluster_df_8$hard_cl), brewerset_Clusters[1:length(unique(cluster_df_8$hard_cl))])
names(df_colorCluster) = c("hard_cl", "Cluster_8_col")
df_colorCluster$hard_cl = as.character(df_colorCluster$hard_cl)
df_colorI = merge(cluster_df_8, df_colorCluster, by = "hard_cl")
# adjust transparency
vecAlpha = data.frame("filenaam" = df_colorI$filenaam, "value" = rep(1,dim(df_colorI)[1]))
for (rown in 1:dim(vecAlpha)[1]) {
vecAlpha[rown,"value"] = round(df_colorI[rown, as.numeric(df_colorI$hard_cl[rown])+2]*100,0)
}
# scale values for clearer visualisation: we scale in [20, 100]
minVal = min(vecAlpha$value); maxVal = max(vecAlpha$value)
vecAlpha$value = round(20 + (100 - 20)*(vecAlpha$value - minVal) / (maxVal - minVal),0)
vecAlpha = merge(vecAlpha, alphaHEXcode, by = "value")
colnames(vecAlpha)[which(colnames(vecAlpha) == "value")] = "value_8"
df_colorI = merge(df_colorI, vecAlpha, by = "filenaam")
colnames(df_colorI)[which(colnames(df_colorI) == "code")] = "code_8"
df_colorI$Cluster_8_Membership_col = paste0(df_colorI$Cluster_8_col, df_colorI$code_8)
colnames(df_colorI)[which(colnames(df_colorI) == "hard_cl")] = "hard_cl_8"
df_colorI = df_colorI[, c("filenaam", "hard_cl_8", "Cluster_8_col", "value_8", "code_8", "Cluster_8_Membership_col")]
df_All = merge(df_All, df_colorI, by = "filenaam")
## 6 clusters
# colorset = newcol_Cat(length(unique(cluster_df_6$hard_cl))) # TV color categories
df_colorCluster = cbind.data.frame(unique(cluster_df_6$hard_cl), brewerset_Clusters[1:length(unique(cluster_df_6$hard_cl))])
names(df_colorCluster) = c("hard_cl", "Cluster_6_col")
df_colorCluster$hard_cl = as.character(df_colorCluster$hard_cl)
df_colorI = merge(cluster_df_6, df_colorCluster, by = "hard_cl")
# adjust transparency
vecAlpha = data.frame("filenaam" = df_colorI$filenaam, "value" = rep(1,dim(df_colorI)[1]))
for (rown in 1:dim(vecAlpha)[1]) {
vecAlpha[rown,"value"] = round(df_colorI[rown, as.numeric(df_colorI$hard_cl[rown])+2]*100,0)
}
# scale values for clearer visualisation: we scale in [20, 100]
minVal = min(vecAlpha$value); maxVal = max(vecAlpha$value)
vecAlpha$value = round(20 + (100 - 20)*(vecAlpha$value - minVal) / (maxVal - minVal),0)
vecAlpha = merge(vecAlpha, alphaHEXcode, by = "value")
colnames(vecAlpha)[which(colnames(vecAlpha) == "value")] = "value_6"
df_colorI = merge(df_colorI, vecAlpha, by = "filenaam")
colnames(df_colorI)[which(colnames(df_colorI) == "code")] = "code_6"
df_colorI$Cluster_6_Membership_col = paste0(df_colorI$Cluster_6_col, df_colorI$code_6)
colnames(df_colorI)[which(colnames(df_colorI) == "hard_cl")] = "hard_cl_6"
df_colorI = df_colorI[, c("filenaam", "hard_cl_6", "Cluster_6_col", "value_6", "code_6", "Cluster_6_Membership_col")]
df_All = merge(df_All, df_colorI, by = "filenaam")
## 4 clusters
# colorset = newcol_Cat(length(unique(cluster_df_4$hard_cl))) # TV color categories
df_colorCluster = cbind.data.frame(unique(cluster_df_4$hard_cl), brewerset_Clusters[1:length(unique(cluster_df_4$hard_cl))])
names(df_colorCluster) = c("hard_cl", "Cluster_4_col")
df_colorCluster$hard_cl = as.character(df_colorCluster$hard_cl)
df_colorI = merge(cluster_df_4, df_colorCluster, by = "hard_cl")
# adjust transparency
vecAlpha = data.frame("filenaam" = df_colorI$filenaam, "value" = rep(1,dim(df_colorI)[1]))
for (rown in 1:dim(vecAlpha)[1]) {
vecAlpha[rown,"value"] = round(df_colorI[rown, as.numeric(df_colorI$hard_cl[rown])+2]*100,0)
}
# scale values for clearer visualisation: we scale in [20, 100]
minVal = min(vecAlpha$value); maxVal = max(vecAlpha$value)
vecAlpha$value = round(20 + (100 - 20)*(vecAlpha$value - minVal) / (maxVal - minVal),0)
vecAlpha = merge(vecAlpha, alphaHEXcode, by = "value")
colnames(vecAlpha)[which(colnames(vecAlpha) == "value")] = "value_4"
df_colorI = merge(df_colorI, vecAlpha, by = "filenaam")
colnames(df_colorI)[which(colnames(df_colorI) == "code")] = "code_4"
df_colorI$Cluster_4_Membership_col = paste0(df_colorI$Cluster_4_col, df_colorI$code_4)
colnames(df_colorI)[which(colnames(df_colorI) == "hard_cl")] = "hard_cl_4"
df_colorI = df_colorI[, c("filenaam", "hard_cl_4", "Cluster_4_col", "value_4", "code_4", "Cluster_4_Membership_col")]
df_All = merge(df_All, df_colorI, by = "filenaam")
# Add the columns that are not yet added in the df_All ###
data_df_categ = data_df[,which(colnames(data_df) %in% columCateg)]
colnames(data_df_categ)[1] = "filenaam"
data_df_categ$ContextLabel = as.character(data_df_categ$ContextCompare)
data_df_categ$ContextLabel = substring(data_df_categ$ContextLabel, 4)
df_All = merge(df_All, data_df_categ, by = "filenaam")
# reorder the dataframe
obs_order_id = match(obs_order, df_All$filenaam) # added for the expected ordering
df_All = df_All[obs_order_id, ] # added for the expected ordering
rownames(df_All) = NULL # added for the expected ordering
listcolors = list("plain" = "plain_col", "full profile" = "binaryLabel", "TV" = "TV_col",
"Cluster (10)" = "Cluster_10_col", "Cluster_Membership (10)" = "Cluster_10_Membership_col",
"Cluster (8)" = "Cluster_8_col", "Cluster_Membership (8)" = "Cluster_8_Membership_col",
"Cluster (6)" = "Cluster_6_col", "Cluster_Membership (6)" = "Cluster_6_Membership_col",
"Cluster (4)" = "Cluster_4_col", "Cluster_Membership (4)" = "Cluster_4_Membership_col",
"rim.out.prot.length" = "rim.out.prot.length_col",
"rim.inn.prot.length" = "rim.inn.prot.length_col", "rim.out.diff.length" = "rim.out.diff.length_col", "rim.inn.diff.length" = "rim.inn.diff.length_col",
"rim.inn.prot.length.selected" = "rim.inn.prot.length.selected_col", "rim.out.prot.length.selected" = "rim.out.prot.length.selected_col",
"rim.inn.prot.length.selected_scaled" = "rim.inn.prot.length.selected_scaled_col", "rim.out.prot.length.selected_scaled" = "rim.out.prot.length.selected_scaled_col",
"block.perC" = "block.perC_col", "trapez.perC" = "trapez.perC_col",
"Rim_HorizCut.Out_WT" = "Rim_HorizCut.Out_WT_col", "Rim_HorizCut.Inn_WT" = "Rim_HorizCut.Inn_WT_col",
"Rim_mean.WT" = "Rim_mean.WT_col", "Rim_median.WT" = "Rim_median.WT_col",
"Rim_sd.WT" = "Rim_sd.WT_col", "Rim_min.WT" = "Rim_min.WT_col", "Rim_max.WT" = "Rim_max.WT_col",
"RimWT_margin" = "RimWT_margin_col", "Rim_Diam_extra_half" = "Rim_Diam_extra_half_col", "Rim_curl" = "Rim_curl_col",
"Rim_elongation.min" = "Rim_elongation.min_col", "Rim_elongation.max" = "Rim_elongation.max_col", "Rim_elongation.avg" = "Rim_elongation.avg_col",
"Rim_radius.ratio" = "Rim_radius.ratio_col", "Rim_height_manual" = "Rim_height_manual_col", "Rim_incl_sin_mean" = "Rim_incl_sin_mean_col",
"form.factor" = "form.factor_col", "aspect.ratio" = "aspect.ratio_col", "roundness" = "roundness_col",
"Rim_extent.1" = "Rim_extent.1_col", "Rim_eccentricity" = "Rim_eccentricity_col", "Rim_incl_sin_min" = "Rim_incl_sin_min_col", "Rim_incl_sin_max" = "Rim_incl_sin_max_col",
"Rim_mass.centre.x" = "Rim_mass.centre.x_col", "Rim_mass.centre.y" = "Rim_mass.centre.y_col",
"Rim_majoraxis" = "Rim_majoraxis_col", "Rim_WT.mean.norm" = "Rim_WT.mean.norm_col", "Rim_WT.med.norm" = "Rim_WT.med.norm_col",
"Rim_WT.var.norm" = "Rim_WT.var.norm_col", "Rim_WT.skew.norm" = "Rim_WT.skew.norm_col", "Rim_WT.kurt.norm" = "Rim_WT.kurt.norm_col",
"Rim_WT.BotMed.norm" = "Rim_WT.BotMed.norm_col", "Below.Rim_incl_sin_min" = "Below.Rim_incl_sin_min_col",
"Below.Rim_incl_sin_max" = "Below.Rim_incl_sin_max_col", "Below.Rim_incl_sin_mean" = "Below.Rim_incl_sin_mean_col",
"Below.Rim_incl_min_sign" = "Below.Rim_incl_min_sign_col", "Below.Rim_incl_max_sign" = "Below.Rim_incl_max_sign_col",
"Below.Rim_incl_mean_sign" = "Below.Rim_incl_mean_sign_col",
"Rim_flattness_med" = "Rim_flattness_med_col", "Rim_flattness_max" = "Rim_flattness_max_col", "CutsSymmetric" = "CutsSymmetric_col", "WT_LipBot" = "WT_LipBot_col",
"ellipse.perC" = "ellipse.perC_col",
# 'FIS.1_mf.1.out' = 'FIS.1_mf.1.out_col', 'FIS.1_mf.1.avg' = 'FIS.1_mf.1.avg_col', 'FIS.1_mf.1.inn' = 'FIS.1_mf.1.inn_col', 'FIS.1_mf.2.out' = 'FIS.1_mf.2.out_col',
# 'FIS.1_mf.2.avg' = 'FIS.1_mf.2.avg_col', 'FIS.1_mf.2.inn' = 'FIS.1_mf.2.inn_col', 'FIS.1_mf.3.out' = 'FIS.1_mf.3.out_col', 'FIS.1_mf.3.avg' = 'FIS.1_mf.3.avg_col',
# 'FIS.1_mf.3.inn' = 'FIS.1_mf.3.inn_col', 'FIS.1_mf.4.out' = 'FIS.1_mf.4.out_col', 'FIS.1_mf.4.avg' = 'FIS.1_mf.4.avg_col', 'FIS.1_mf.4.inn' = 'FIS.1_mf.4.inn_col',
# 'straight.rim' = 'straight.rim_col', 'slightly.bent.rim' = 'slightly.bent.rim_col', 'quite.bent.rim' = 'quite.bent.rim_col', 'profoundly.bent.rim' = 'profoundly.bent.rim_col',
# 'straight.rim.sign' = 'straight.rim.sign_col', 'straight.rim.descr' = 'straight.rim.descr_col', 'slightly.bent.rim.sign' = 'slightly.bent.rim.sign_col',
# 'slightly.bent.rim.descr' = 'slightly.bent.rim.descr_col', 'quite.bent.rim.sign' = 'quite.bent.rim.sign_col', 'quite.bent.rim.descr' = 'quite.bent.rim.descr_col',
# 'profoundly.bent.rim.sign' = 'profoundly.bent.rim.sign_col', 'profoundly.bent.rim.descr' = 'profoundly.bent.rim.descr_col', 'final.support' = 'final.support_col',
# 'interm.1.descr' = 'interm.1.descr_col', 'interm.2.descr' = 'interm.2.descr_col', 'final.descr' = 'final.descr_col',
'final.support.FIS_1' = 'final.support.FIS_1_col', 'final.support.FIS_2' = 'final.support.FIS_2_col', 'final.support.FIS_3' = 'final.support.FIS_3_col', 'final.support.ext.FIS_4' = 'final.support.ext.FIS_4_col', 'final.support.int.FIS_4' = 'final.support.int.FIS_4_col', 'final.support.FIS_6' = 'final.support.FIS_6_col', # 'final.support.FIS_5' = 'final.support.FIS_5_col',
'final.descr.FIS_1' = 'final.descr.FIS_1_col', 'final.descr.FIS_1_alpha' = 'final.descr.FIS_1_col_alpha',
'final.descr.FIS_2' = 'final.descr.FIS_2_col', 'final.descr.FIS_2_alpha' = 'final.descr.FIS_2_col_alpha',
'final.descr.FIS_3' = 'final.descr.FIS_3_col', 'final.descr.FIS_3_alpha' = 'final.descr.FIS_3_col_alpha',
'final.descr.ext.FIS_4' = 'final.descr.ext.FIS_4_col', 'final.descr.ext.FIS_4_alpha' = 'final.descr.ext.FIS_4_col_alpha',
'final.descr.int.FIS_4' = 'final.descr.int.FIS_4_col', 'final.descr.int.FIS_4_alpha' = 'final.descr.int.FIS_4_col_alpha',
# 'final.descr.FIS_5' = 'final.descr.FIS_5_col', 'final.descr.FIS_5_alpha' = 'final.descr.FIS_5_col_alpha',
'final.descr.FIS_6' = 'final.descr.FIS_6_col', 'final.descr.FIS_6_alpha' = 'final.descr.FIS_6_col_alpha',
'final.descr.FIS_7' = 'final.descr.FIS_7_col', 'final.descr.FIS_7_alpha' = 'final.descr.FIS_7_col_alpha'
)
# Context data
tbl_contexts = as.data.frame(table(df_All$ContextLabel)) # df_All$ContextCompare
tbl_contexts$Var1 = as.character(tbl_contexts$Var1)
tbl_contexts$Site = ""
for (tbli in 1:dim(tbl_contexts)[1]) {
tbl_contexts$Site[tbli] = unlist(strsplit(tbl_contexts[tbli,"Var1"], "-"))[2]
}
colnames(tbl_contexts) = c("Context", "sherds", "Site")
tbl_inERlist = unique(cbind.data.frame(df_All$ContextLabel,df_All$inERlist))
colnames(tbl_inERlist) = c("Context", "Checked")
tbl_contexts = merge(tbl_contexts, tbl_inERlist, by = "Context")
tbl_contexts = tbl_contexts[order(tbl_contexts$Site),c("Site", "Context", "Checked", "sherds")]
# Size of node data as the certainty of belonging to the group
# add in df_All?
df_nodesize = data.frame("filenaam" = df_All$filenaam, "nodesize" = 0)
for (cl_i in unique(cluster_df_10$hard_cl)) {
df_cl_i = subset(cluster_df_10, cluster_df_10$hard_cl == cl_i)
# here we can normalise inside the cluster if we want ....
df_nodesize[c(match(df_cl_i$filenaam, df_nodesize$filenaam)), "nodesize"] = round(df_cl_i[,cl_i+2]*20,0)
} # the order is correct because we have taken the filenaam data from 'df_All'
### Network Data ###
## wd ##
# wd = as.character(df_All$filewd.y) # original profile image
wd = rep("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Scripts_VesselMorphology/img_rim_20201711", length(data_df$filename)) # rim image
# wd = rep("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Scripts_VesselMorphology/img_cropped_Profile/img_cropped_Pr_1B150_20211112", length(data_df$filename)) # cropped profile image
## images ##
# pic = as.character(df_All$filenaam) # original image
pic = paste0("img_rim_", as.character(data_df$filename)) # rim image
# pic = paste0("remade_", as.character(data_df$filename)) # cropped profile image
## base64 images ##
txt = NULL
for (i in 1:length(pic)) {
txt[i] = RCurl::base64Encode(readBin(paste0(wd[i], "/", pic[i]), 'raw', file.info(paste0(wd[i], "/", pic[i]))[1, 'size']), 'txt')
}
# node attributes
sizeGroup = 20 # for all sherds having the same size
| /R/get_data.R | permissive | kafetzakid/morphotypeShiny | R | false | false | 55,134 | r | # distance matrix
distM = read.csv(paste0(readwd,"distM_2Doutlines_dtw_1B150_20210802.csv"), row.names = 1)
# circumference data
load(paste0(readwd, "circumference_scaled_Rim_1B150_selectedERlist_20210802.RData"))
rowN = max(unlist(lapply(mat_circumference_scaled, dim)))
names(mat_circumference_scaled) = names(distM)
# process matrix data
distM = as.matrix(distM)
distM = distM/max(distM)
# data clicked
dfplot_clicked_i = data.frame(matrix(ncol = 2, nrow = rowN))
colnames(dfplot_clicked_i) = c("x_val",
"y_val")
dfplot_clicked_j = dfplot_clicked_i
# Tab Network #####
# high and low memberhsip, marginal cases for 1B150 clustering
special_set_1B150 = data.frame(
"filename" = c("SADR021655.jpg", "SADR021680.jpg", "SADR020634.jpg", "SADR011104.jpg", "SADR020954.jpg", "SADR021208.jpg", "SADR010608.jpg", "SADR011077.jpg", "SADR011076.jpg"),
"label" = c("central", "central", "central", "peipheral", "peipheral & marginal", "peipheral & marginal", "marginal", "marginal", "marginal")
)
# measures data
data_df = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_pointMeasures_Rim_1B150_selectedERlist_20210928.csv"))
data_df$filename = as.character(data_df$filename)
data_df$Type = as.character(data_df$Type)
rownames(data_df) = data_df$filename
# obs order initial
obs_order_initial = as.character(data_df$filename)
# # Include the data from the Fuzzy Rules #####
# Flattness
data_df$Rim_flattness_med = data_df$Rim_Diam_extra_half/data_df$Rim_median.WT
data_df$Rim_flattness_max = data_df$Rim_Diam_extra_half/data_df$Rim_max.WT
data_df$WT_LipBot = data_df$Rim_lip_width_scaled/data_df$Rim_bottom_width_scaled # included in FIS
# From FIS data created
data_df_FIS_created = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_extracted_FIS_1B150_1B170_1A100_1C100_20210802.csv"))
data_df_FIS_created$filename = as.character(data_df_FIS_created$filename)
# as.data.frame(t(data_df_FIS_created[180,]))
data_df_FIS_created[,(colnames(data_df_FIS_created) %in% c("VC.lengths", "VC.sides", "VC.Height.Ratio"))] = NULL # use later for vertical cut
data_df_FIS_created$distTop.Out = as.numeric(as.character(data_df_FIS_created$distTop.Out))
data_df_FIS_created$distTop.In = as.numeric(as.character(data_df_FIS_created$distTop.In))
data_df_FIS_created$CutsSymmetric = as.numeric(as.character(data_df_FIS_created$CutsSymmetric))
data_df_FIS_created$VC.Outline.Ratio = as.numeric(as.character(data_df_FIS_created$VC.Outline.Ratio))
# impute values
data_df_FIS_created$CutsSymmetric[is.na(data_df_FIS_created$CutsSymmetric)] = -0.1
data_df_FIS_created$distTop.In[is.na(data_df_FIS_created$distTop.In)] = -1
data_df_FIS_created$distTop.Out[is.na(data_df_FIS_created$distTop.Out)] = -1
data_df = merge(data_df, data_df_FIS_created, by = "filename")
# data FIS for ellipse variable
data_df_FIS_ellipse = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_extracted_FIS_1B150_1B170_1A100_1C100_20210920.csv"))
data_df_FIS_ellipse$filename = as.character(data_df_FIS_ellipse$filename)
data_df = merge(data_df, data_df_FIS_ellipse, by = "filename")
# From FIS-1
data_df_FIS_1 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_extracted_FIS_1_1B150_1B170_1A100_1C100_20210802.csv"))
data_df_FIS_1$filename = as.character(data_df_FIS_1$filename)
data_df_FIS_1 = data_df_FIS_1[data_df_FIS_1$filename %in% data_df$filename, ]
#
# data for FIS plots by cluster
dataplot_FIS_1 = data.frame("filenaam" = data_df_FIS_1$filename, "everted.rim" = data_df_FIS_1$everted.rim, "straight.rim" = data_df_FIS_1$straight.rim, "inverted.rim" = data_df_FIS_1$inverted.rim) # , "hard_cl" = 0
dataplot_FIS_1$selected = 0
#
data_df_FIS_1 = data_df_FIS_1[, which(colnames(data_df_FIS_1) %in% c("filename", "final.support", "final.descr"))]
data_df_FIS_1$final.descr = as.character(data_df_FIS_1$final.descr)
colnames(data_df_FIS_1)[2:3] = c("final.support.FIS_1", "final.descr.FIS_1")
data_df = merge(data_df, data_df_FIS_1, by = "filename")
# From FIS-2
data_df_FIS_2 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_extracted_FIS_2_1B150_1B170_1A100_1C100_20210802.csv"))
data_df_FIS_2$filename = as.character(data_df_FIS_2$filename)
data_df_FIS_2 = data_df_FIS_2[data_df_FIS_2$filename %in% data_df$filename, ]
#
# data for FIS plots by cluster
dataplot_FIS_2 = data.frame("filenaam" = data_df_FIS_2$filename, "horizontally.flattened.rim" = data_df_FIS_2$horizontally.flattened.rim, "not.horizontally.flattened.rim" = data_df_FIS_2$not.horizontally.flattened.rim) # , "hard_cl" = 0
dataplot_FIS_2$selected = 0
#
data_df_FIS_2 = data_df_FIS_2[, which(colnames(data_df_FIS_2) %in% c("filename", "final.support", "final.descr"))]
data_df_FIS_2$final.descr = as.character(data_df_FIS_2$final.descr)
colnames(data_df_FIS_2)[2:3] = c("final.support.FIS_2", "final.descr.FIS_2")
data_df = merge(data_df, data_df_FIS_2, by = "filename")
# From FIS-3
data_df_FIS_3 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_extracted_FIS_3_1B150_1B170_1A100_1C100_20210802.csv"))
data_df_FIS_3$filename = as.character(data_df_FIS_3$filename)
data_df_FIS_3 = data_df_FIS_3[data_df_FIS_3$filename %in% data_df$filename, ]
#
# data for FIS plots by cluster
dataplot_FIS_3 = data.frame("filenaam" = data_df_FIS_3$filename, "rounded.rim" = data_df_FIS_3$rounded.rim, "not.rounded.rim" = data_df_FIS_3$not.rounded.rim) # , "hard_cl" = 0
dataplot_FIS_3$selected = 0
#
data_df_FIS_3 = data_df_FIS_3[, which(colnames(data_df_FIS_3) %in% c("filename", "final.support", "final.descr"))]
data_df_FIS_3$final.descr = as.character(data_df_FIS_3$final.descr)
colnames(data_df_FIS_3)[2:3] = c("final.support.FIS_3", "final.descr.FIS_3")
data_df = merge(data_df, data_df_FIS_3, by = "filename")
# From FIS-4
data_df_FIS_4 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_extracted_FIS_4_1B150_1B170_1A100_1C100_20210802.csv"))
data_df_FIS_4$filename = as.character(data_df_FIS_4$filename)
data_df_FIS_4 = data_df_FIS_4[data_df_FIS_4$filename %in% data_df$filename, ]
#
# data for FIS plots by cluster
dataplot_FIS_4.ext = data.frame("filenaam" = data_df_FIS_4$filename, "vertically.flattened.exterior" = data_df_FIS_4$vertically.flattened.exterior, "not.vertically.flattened.exterior" = data_df_FIS_4$not.vertically.flattened.exterior) # , "hard_cl" = 0
dataplot_FIS_4.ext$selected = 0
#
# data for FIS plots by cluster
dataplot_FIS_4.int = data.frame("filenaam" = data_df_FIS_4$filename, "vertically.flattened.interior" = data_df_FIS_4$vertically.flattened.interior, "not.vertically.flattened.interior" = data_df_FIS_4$not.vertically.flattened.interior) # , "hard_cl" = 0
dataplot_FIS_4.int$selected = 0
#
data_df_FIS_4 = data_df_FIS_4[, which(colnames(data_df_FIS_4) %in% c("filename", "final.support.ext", "final.descr.ext", "final.support.int", "final.descr.int", "HC.int.ext"))]
data_df_FIS_4$final.descr.ext = as.character(data_df_FIS_4$final.descr.ext)
data_df_FIS_4$final.descr.int = as.character(data_df_FIS_4$final.descr.int)
colnames(data_df_FIS_4)[2:6] = c("final.support.ext.FIS_4", "final.descr.ext.FIS_4", "final.support.int.FIS_4", "final.descr.int.FIS_4", "final.descr.both.sides.FIS_4")
data_df = merge(data_df, data_df_FIS_4, by = "filename")
# # From FIS-5
# data_df_FIS_5 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_extracted_FIS_5_1B150_1B170_1A100_1C100_20210802.csv"))
# data_df_FIS_5$filename = as.character(data_df_FIS_5$filename)
# data_df_FIS_5 = data_df_FIS_5[data_df_FIS_5$filename %in% data_df$filename, ]
# #
# # data for FIS plots by cluster
# dataplot_FIS_5 = data.frame("filenaam" = data_df_FIS_5$filename, "cut.rim" = data_df_FIS_5$cut.rim, "not.cut.rim" = data_df_FIS_5$not.cut.rim) # , "hard_cl" = 0
# dataplot_FIS_5$selected = 0
# #
# data_df_FIS_5 = data_df_FIS_5[, which(colnames(data_df_FIS_5) %in% c("filename", "final.support", "final.descr"))]
# data_df_FIS_5$final.descr = as.character(data_df_FIS_5$final.descr)
# colnames(data_df_FIS_5)[2:3] = c("final.support.FIS_5", "final.descr.FIS_5")
# data_df = merge(data_df, data_df_FIS_5, by = "filename")
# From FIS-6
data_df_FIS_6 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_extracted_FIS_6_1B150_1B170_1A100_1C100_20210802.csv"))
data_df_FIS_6$filename = as.character(data_df_FIS_6$filename)
data_df_FIS_6 = data_df_FIS_6[data_df_FIS_6$filename %in% data_df$filename, ]
#
# data for FIS plots by cluster
dataplot_FIS_6 = data.frame("filenaam" = data_df_FIS_6$filename, "thickened.rim" = data_df_FIS_6$thickened.rim, "non.thickened.rim" = data_df_FIS_6$non.thickened.rim) # , "hard_cl" = 0
dataplot_FIS_6$selected = 0
#
data_df_FIS_6 = data_df_FIS_6[, which(colnames(data_df_FIS_6) %in% c("filename", "final.support", "final.descr"))]
data_df_FIS_6$final.descr = as.character(data_df_FIS_6$final.descr)
colnames(data_df_FIS_6)[2:3] = c("final.support.FIS_6", "final.descr.FIS_6")
data_df = merge(data_df, data_df_FIS_6, by = "filename")
# extra from FIS-6
data_df$rim.out.prot.length.selected = ifelse(data_df$rim.out.prot.length > data_df$rim.out.prot.length.2, data_df$rim.out.prot.length, data_df$rim.out.prot.length.2)
data_df$rim.inn.prot.length.selected = ifelse(data_df$Rim_incl_min_sign > data_df$rim.inn.prot.length.2, data_df$rim.inn.prot.length, data_df$rim.inn.prot.length.2)
data_df$rim.out.prot.length.selected_scaled = data_df$rim.out.prot.length.selected/data_df$Rim_height_manual
data_df$rim.inn.prot.length.selected_scaled = data_df$rim.inn.prot.length.selected/data_df$Rim_height_manual
# From FIS-7
data_df_FIS_7 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_extracted_FIS_7_471-sherds_20210928.csv"))
data_df_FIS_7$filename = as.character(data_df_FIS_7$filename)
data_df_FIS_7 = data_df_FIS_7[data_df_FIS_7$filename %in% data_df$filename, ]
#
# data for FIS plots by cluster
dataplot_FIS_7 = data.frame("filenaam" = data_df_FIS_7$filename, "complicated.rim" = data_df_FIS_7$complicated.rim, "simple.rim" = data_df_FIS_7$simple.rim) # , "hard_cl" = 0
dataplot_FIS_7$selected = 0
#
data_df_FIS_7 = data_df_FIS_7[, which(colnames(data_df_FIS_7) %in% c("filename", "final.support", "final.descr"))]
data_df_FIS_7$final.descr = as.character(data_df_FIS_7$final.descr)
colnames(data_df_FIS_7)[2:3] = c("final.support.FIS_7", "final.descr.FIS_7")
data_df = merge(data_df, data_df_FIS_7, by = "filename")
# order data
obs_order_initial_id = match(obs_order_initial, data_df$filename) # added for the expected ordering
data_df = data_df[obs_order_initial_id, ]
# get plot data
df_labels = as.data.frame(as.character(data_df$TV), stringsAsFactors = FALSE)
data_df$filename = as.character(data_df$filename)
colnames(df_labels) = "TV"
rownames(df_labels) = data_df$filename
df_labels$TV = as.character(df_labels$TV)
df_labels$binaryLabel = data_df$is.full.profile
df_labels$binaryLabel = replace(df_labels$binaryLabel, df_labels$binaryLabel == TRUE, 1)
df_labels$binaryLabel = replace(df_labels$binaryLabel, df_labels$binaryLabel != 1, 0)
# colum = c("Rim_incl_sin_mean","rim.out.prot.length","rim.inn.prot.length", "rim.out.diff.length","rim.inn.diff.length","block.out.perC","block.perC", "trapez.perC","form.factor","aspect.ratio","roundness",
# "Rim_HorizCut_WT","Rim_mean.WT","Rim_median.WT","Rim_sd.WT","Rim_min.WT","Rim_max.WT","RimWT_margin","Rim_Diam_extra_half","Rim_extent.1","Rim_curl",
# "Rim_elongation.min","Rim_elongation.max","Rim_elongation.avg","Rim_radius.ratio","Rim_eccentricity","Rim_incl_sin_min","Rim_incl_sin_max","Rim_height_manual",
# "WT.mean_norm", "WT.med_norm", "WT.var_norm", "WT.skew_norm", "WT.kurt_norm", "WT.BotMed_norm", "Rim_flattness_med", "Rim_flattness_max")
columCateg = c("filename", "TV", "Type", "filewd", "set", "is.full.profile", "sherd_rim.diameter.check", "Rim_HorizCut.Inn_Cont",
"inERlist", "ContextCompare", "Year", "Site", "sherd_listlines.out", "sherd_listlines.inn", "Rim_HorizCut.Out_Cont",
"Rim_HorizCut.Inn_Width", "Rim_HorizCut.Out_Width")
columFISdescr = c("final.descr.FIS_1", "final.descr.FIS_2", "final.descr.FIS_3", "final.descr.ext.FIS_4", "final.descr.int.FIS_4", "final.descr.both.sides.FIS_4", "final.descr.FIS_6", "final.descr.FIS_7") # "final.descr.FIS_5",
df_dataplot = data_df[, !(colnames(data_df) %in% columCateg)]
df_dataplot = df_dataplot[, !(colnames(df_dataplot) %in% columFISdescr)]
rownames(df_dataplot) = data_df$filename
## Beeplot Data ##
bee_dataplot = df_dataplot
# do not normalise pv, sherd_inclination, roundness, form.factor, slope.WT, MDLHratio, Rim_incl_sin
exclV = c("Rim_incl_sin_mean", "form.factor", "roundness", "form.factor", "Rim_extent.1", "Rim_eccentricity", "Rim_incl_sin_min", "Rim_incl_sin_max")
# exclV = c("roundness", "form.factor", "Rim_extent.1", "Rim_eccentricity",
# "Rim_incl_sin_mean", "Rim_incl_sin_min", "Rim_incl_sin_max",
# # "Rim_WT.mean.norm", "Rim_WT.med.norm", "Rim_WT.var.norm",
# "Below.Rim_incl_sin_min", "Below.Rim_incl_sin_max", "Below.Rim_incl_sin_mean",
# # 'FIS.1_mf.1.out','FIS.1_mf.1.avg','FIS.1_mf.1.inn','FIS.1_mf.2.out','FIS.1_mf.2.avg','FIS.1_mf.2.inn','FIS.1_mf.3.out','FIS.1_mf.3.avg','FIS.1_mf.3.inn','FIS.1_mf.4.out','FIS.1_mf.4.avg','FIS.1_mf.4.inn','straight.rim','slightly.bent.rim','quite.bent.rim','profoundly.bent.rim','straight.rim.sign','straight.rim.descr','slightly.bent.rim.sign','slightly.bent.rim.descr','quite.bent.rim.sign','quite.bent.rim.descr','profoundly.bent.rim.sign','profoundly.bent.rim.descr','final.support','interm.1.descr','interm.2.descr','final.descr',
# 'final.support.FIS_1', 'final.support.FIS_2', 'final.support.FIS_3', 'final.support.ext.FIS_4', 'final.support.int.FIS_4', 'final.support.FIS_5', 'final.support.FIS_6'
# ) # 'FIS6.var.1.inn','FIS6.var.1.out','FIS6.var.2.global','FIS6.var.3.global'
bee_dataplot = bee_dataplot[,!(names(bee_dataplot) %in% exclV)]
vmin = apply(bee_dataplot, 2, min, na.rm = TRUE)
vmax = apply(bee_dataplot, 2, max, na.rm = TRUE)
for (i in 1:dim(bee_dataplot)[2]) {
if ((vmax[[i]] - vmin[[i]]) > 0) {
bee_dataplot[,i] = (bee_dataplot[,i] - vmin[[i]]) / (vmax[[i]] - vmin[[i]])
} else {
bee_dataplot[,i] = bee_dataplot[,i]
}
}
# Merge bee_dataplot with non numeric data
bee_dataplot = cbind.data.frame(rownames(bee_dataplot), bee_dataplot, df_dataplot[,(names(df_dataplot) %in% exclV)])
names(bee_dataplot)[1] = "filenaam"
bee_dataplot$filenaam = as.character(bee_dataplot$filenaam)
bee_melted = melt(bee_dataplot, id = "filenaam")
bee_melted$selected = 0
# # Melt data
# bee_melted = melt(bee_dataplot, id = "filenaam")
# bee_melted$selected = 0
# order of observations
obs_order = as.character(bee_dataplot$filenaam)
# obs_order = unlist(strsplit(obs_order, ".jpg"))
# data for the Global plotMeasures
df_MuVo_all = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210612_multivocality/df_pointMeasures_multivocality_all.csv"))
df_MuVo_all = df_MuVo_all[df_MuVo_all$filename %in% data_df$filename, ]
dataplotMeasures = cbind.data.frame(df_MuVo_all$filename, df_MuVo_all$scale, df_MuVo_all$sherd_HWratio, df_MuVo_all$sherd_inclination, df_MuVo_all$sherd_rimDiam, df_MuVo_all$sherd_WThick1, df_MuVo_all$sherd_height)
colnames(dataplotMeasures) = c("filename", "scale", "sherd_HW", "sherd_inclination", "sherd_rim_diameter", "sherd_WTat2.3", "sherd_height") # + "rim_diameter_check" + inclide WT just below rim => rim width?
dataplotMeasures$scaleCM = 3
dataplotMeasures$sherd_height = dataplotMeasures$scaleCM*dataplotMeasures$sherd_height/dataplotMeasures$scale
dataplotMeasures$sherd_rim_diameter = dataplotMeasures$scaleCM*dataplotMeasures$sherd_rim_diameter/dataplotMeasures$scale
dataplotMeasures$sherd_WTat2.3 = dataplotMeasures$scaleCM*dataplotMeasures$sherd_WTat2.3/dataplotMeasures$scale
dataplotMeasures_extra = data.frame("filename" = data_df$filename, "sherd_rim.diameter.check" = data_df$sherd_rim.diameter.check, "is.full.profile" = data_df$is.full.profile)
dataplotMeasures = merge(dataplotMeasures, dataplotMeasures_extra, by = "filename")
# order for the dataplotMeasures
obs_order_id = match(obs_order, dataplotMeasures$filename)
dataplotMeasures = dataplotMeasures[obs_order_id, ]
# jitter position
jitterpos = position_jitter(width = 0, height = 0, seed = 1)
# Data for Cluster Results
# Minkowski p = 4
# cluster_df_10 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_Mink_v2_ncl-10_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
cluster_df_10 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_L4_RimDiam_inclBR_Mink_v2_ncl-3_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
# cluster_df_10 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_L4_RimDiam_inclBR_FIS_Mink_ncl-7_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
cluster_df_10 = cluster_df_10[,-(which(colnames(cluster_df_10) %in% c("labelsTV","labelsType","input")))] # "dist_from1", "dist_from2", "input"
colnames(cluster_df_10)[1] = "filenaam"
# cluster_df_8 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_Mink_v2_ncl-8_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
# cluster_df_8 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_L4_RimDiam_inclBR_Mink_ncl-4_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
cluster_df_8 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_L4_RimDiam_inclBR_Mink_v2_ncl-7_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
# cluster_df_8 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_L4_RimDiam_inclBR_FIS_Mink_ncl-5_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
cluster_df_8 = cluster_df_8[,-(which(colnames(cluster_df_8) %in% c("labelsTV","labelsType","input")))] # "dist_from1", "dist_from2", "input"
colnames(cluster_df_8)[1] = "filenaam"
# cluster_df_6 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_Mink_v2_ncl-6_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
# cluster_df_6 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_L4_RimDiam_Mink_ncl-7_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
cluster_df_6 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_L4_RimDiam_inclBR_Mink_v2_ncl-2_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
# cluster_df_6 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_L4_RimDiam_inclBR_FIS_Mink_ncl-4_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
cluster_df_6 = cluster_df_6[,-(which(colnames(cluster_df_6) %in% c("labelsTV","labelsType","input")))] # "dist_from1", "dist_from2", "input"
colnames(cluster_df_6)[1] = "filenaam"
# cluster_df_4 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_Mink_v2_ncl-4_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
# cluster_df_4 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_L4_RimDiam_Mink_ncl-5_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
cluster_df_4 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_L4_RimDiam_inclBR_Mink_v2_ncl-4_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
# cluster_df_4 = as.data.frame(read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_cluster_results_L4_RimDiam_inclBR_FIS_Mink_ncl-3_83of100-1B150_20210802.csv"), stingsAsFactors = FALSE)
cluster_df_4 = cluster_df_4[,-(which(colnames(cluster_df_4) %in% c("labelsTV","labelsType","input")))] # "dist_from1", "dist_from2", "input"
colnames(cluster_df_4)[1] = "filenaam"
cluster_df_selected = cluster_df_10
# merge FIS and cluster data ## <<<<-----------
dataplot_FIS_1_ncls = merge(dataplot_FIS_1, cluster_df_selected, by = "filenaam")
dataplot_FIS_2_ncls = merge(dataplot_FIS_2, cluster_df_selected, by = "filenaam")
dataplot_FIS_3_ncls = merge(dataplot_FIS_3, cluster_df_selected, by = "filenaam")
dataplot_FIS_4.ext_ncls = merge(dataplot_FIS_4.ext, cluster_df_selected, by = "filenaam")
dataplot_FIS_4.int_ncls = merge(dataplot_FIS_4.int, cluster_df_selected, by = "filenaam")
# dataplot_FIS_5_ncls = merge(dataplot_FIS_5, cluster_df_selected, by = "filenaam")
dataplot_FIS_6_ncls = merge(dataplot_FIS_6, cluster_df_selected, by = "filenaam")
dataplot_FIS_7_ncls = merge(dataplot_FIS_7, cluster_df_selected, by = "filenaam")
# order obs dataplot_FIS_*_ncls
obs_order_FIS_id = match(obs_order, dataplot_FIS_1_ncls$filenaam)
dataplot_FIS_1_ncls = dataplot_FIS_1_ncls[obs_order_FIS_id, ]
obs_order_FIS_id = match(obs_order, dataplot_FIS_2_ncls$filenaam)
dataplot_FIS_2_ncls = dataplot_FIS_2_ncls[obs_order_FIS_id, ]
obs_order_FIS_id = match(obs_order, dataplot_FIS_3_ncls$filenaam)
dataplot_FIS_3_ncls = dataplot_FIS_3_ncls[obs_order_FIS_id, ]
obs_order_FIS_id = match(obs_order, dataplot_FIS_4.ext_ncls$filenaam)
dataplot_FIS_4.ext_ncls = dataplot_FIS_4.ext_ncls[obs_order_FIS_id, ]
obs_order_FIS_id = match(obs_order, dataplot_FIS_4.int_ncls$filenaam)
dataplot_FIS_4.int_ncls = dataplot_FIS_4.int_ncls[obs_order_FIS_id, ]
# obs_order_FIS_id = match(obs_order, dataplot_FIS_5_ncls$filenaam)
# dataplot_FIS_5_ncls = dataplot_FIS_5_ncls[obs_order_FIS_id, ]
# obs_order_FIS_id = match(obs_order, dataplot_FIS_6_ncls$filenaam)
dataplot_FIS_6_ncls = dataplot_FIS_6_ncls[obs_order_FIS_id, ]
obs_order_FIS_id = match(obs_order, dataplot_FIS_7_ncls$filenaam)
dataplot_FIS_7_ncls = dataplot_FIS_7_ncls[obs_order_FIS_id, ]
df_FIStable = cbind.data.frame(dataplot_FIS_1_ncls[,1:4], dataplot_FIS_2_ncls[,2:3], dataplot_FIS_3_ncls[,2:3], dataplot_FIS_4.ext_ncls[,2:3], dataplot_FIS_4.int_ncls[,2:3], dataplot_FIS_6_ncls[,2:3], dataplot_FIS_7_ncls[,2:3]) # dataplot_FIS_5_ncls[,2:3],
# STAD links
# # lines manhattan
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_ScaledPeriphery_Manh_83of100-1B150_20210802.csv")
# # lines chebyshev
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_ScaledPeriphery_Cheb_83of100-1B150_20210802.csv")
# # lines canberra
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_ScaledPeriphery_Canb_83of100-1B150_20210802.csv")
# # lines Minkowski p=4 <<---
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_ScaledPeriphery_Mink_83of100-1B150_20210802.csv")
# 2D DTW
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_ScaledHeight_dtw_1B150_20210802.csv")
# jaccard
#
# # efa
#
# # measures
#
# # descriptors
#
# combined
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_Rims_Manh_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_Rims_Mink_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_Rims_Eucl_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_L4_G3_Manh_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_L4_G4_Manh_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_L4_G7_Manh_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_L4_G3_Mink_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_L4_G4_Mink_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_L4_G7_Mink_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_L4_G3_Eucl_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_L4_G4_Eucl_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_L4_G7_Eucl_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_L4_RimDiam_inclBR_Mink_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_L4_RimDiam_Mink_83of100-1B150_20210802.csv")
df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_normAll01_L4_RimDiam_inclBR_Mink_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_normAll01_L4_RimDiam_Mink_83of100-1B150_20210802.csv")
# df_links_Measures = read.csv("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Results_VM_paper1/20210802_VMpaper/df_links_STAD_Combined_normAll01_L4_RimDiam_inclBR_FIS_Mink_83of100-1B150_20210802.csv")
# UI Colours ###
df_colorAll = data.frame(bee_dataplot$filenaam)
names(df_colorAll) = "filenaam"
df_colorAll$filenaam = as.character(df_colorAll$filenaam)
df_colorAll = cbind.data.frame(df_colorAll, df_labels)
df_colorAll$binaryLabel = ifelse(df_colorAll$binaryLabel == '1', "#f28e2c", "#4e79a7")
groupLabels = c("binaryLabel", "TV", names(bee_dataplot))
# multiple colours palette display.brewer.all()
# brewerset = brewer.pal(11, name = "BrBG")[c(2,3,4,8,9,10)]
# c('#800000','#9A6324','#808000', '#469990','#000075','#000000',
# '#e6194B','#f58231','#ffe119','#bfef45','#3cb44b','#42d4f4','#4363d8','#911eb4','#f032e6','#a9a9a9',
# '#fabed4','#ffd8b1','#fffac8','#aaffc3','#dcbeff','#ffffff') # # https://sashamaps.net/docs/resources/20-colors/
# instead of the shiny yellow #ffe119 use #e5c700
# instead of the cyan #42d4f4 use #0db6db
alphaHEXcode = data.frame("value" = 100:0,
"code" = c("FF","FC","FA","F7","F5","F2","F0","ED","EB","E8","E6","E3","E0","DE","DB","D9","D6",
"D4","D1","CF","CC","C9","C7","C4","C2","BF","BD","BA","B8","B5","B3","B0","AD","AB","A8",
"A6","A3","A1","9E","9C","99","96","94","91","8F","8C","8A","87","85","82","80","7D","7A","78","75","73",
"70","6E","6B","69","66","63","61","5E","5C","59","57","54","52","4F","4D","4A","47","45","42","40","3D","3B","38",
"36","33","30","2E","2B","29","26","24","21","1F","1C","1A","17","14","12","0F","0D","0A","08","05","03","00"))
brewerset_Cat = c('#808000', '#469990','#dcbeff', '#9A6324','#000000',
'#e6194B','#f58231','#e5c700','#bfef45','#3cb44b','#0db6db','#4363d8','#911eb4','#f032e6','#a9a9a9',
'#fabed4','#ffd8b1','#fffac8','#aaffc3','#000075', '#ffffff','#800000')
# brewerset_Clusters = c('#000000', '#db6d00', '#006ddb', '#920000',
# '#8f4e00', '#ffdf4d', '#676767', '#009999',
# '#ff6db6', '#490092')
brewerset_Clusters = c('#000000', '#db6d00', '#006ddb', '#920000',
'#754a3c', '#ffdf4d', '#676767', '#009999',
'#ff6db6', '#490092')
newcol_Cat = colorRampPalette(brewerset_Cat)
colorset = newcol_Cat(length(unique(df_colorAll$TV))) # TV color categories
df_colorTV = cbind.data.frame(sort(unique(df_colorAll$TV)), colorset)
names(df_colorTV) = c("TV", "col")
df_colorTV$TV = as.character(df_colorTV$TV)
df_colorAll = merge(df_colorAll, df_colorTV, by = "TV")
names(df_colorAll)[4] = "TV_col"
brewerset_Con = brewer.pal(11, name = "BrBG")
newcol_Con = colorRampPalette(brewerset_Con)
for(i in 4:length(groupLabels)) {
vari = bee_dataplot[, names(bee_dataplot) %in% groupLabels[i]]
colorset = newcol_Con(length(unique(vari)))
df_colorVari = cbind.data.frame(sort(unique(vari)), colorset)
names(df_colorVari)[1] = names(bee_dataplot)[(names(bee_dataplot) %in% groupLabels[i])]
names(df_colorVari)[2] = "col"
# head(df_colorVari)
df_colorFili = bee_dataplot[ , c(1, which(names(bee_dataplot) %in% groupLabels[i]))] # 1=>bee_dataplot$filenaam
# head(df_colorFili)
df_colorI = merge(df_colorFili, df_colorVari, by = names(bee_dataplot)[(names(bee_dataplot) %in% groupLabels[i])])
# head(df_colorI)
df_colorI = df_colorI[,-1]
names(df_colorI)[2] = names(bee_dataplot)[(names(bee_dataplot) %in% groupLabels[i])]
# head(df_colorI)
df_colorAll = merge(df_colorAll, df_colorI, by = "filenaam")
}
rm(df_colorI, df_colorFili, df_colorVari)
# All selected data enriched: variables Original, variables Scaled, variable Colors, filewd
# df_dataplot, bee_dataplot, df_colorAll
df_All = cbind.data.frame(rownames(df_dataplot), df_dataplot)
names(df_All)[1] = "filenaam"
names(bee_dataplot)[2:length(names(bee_dataplot))] = paste0(names(bee_dataplot)[2:length(names(bee_dataplot))], "_Sc")
df_All = merge(df_All, bee_dataplot, by = "filenaam")
names(df_colorAll)[5:length(names(df_colorAll))] = paste0(names(df_colorAll)[5:length(names(df_colorAll))], "_col")
df_colorAll = as.data.frame(lapply(df_colorAll, as.character))
df_All = merge(df_All, df_colorAll, by = "filenaam")
df_filewd = data_df[ ,c("filename","filewd")]
names(df_filewd)[1] = "filenaam"
df_All = merge(df_All, df_filewd, by = "filenaam")
df_All$filenaam = as.character(df_All$filenaam)
df_All$plain_col = ifelse(df_All$filenaam %in% special_set_1B150$filename, "#000000", "#d6d6d6")
df_All$TV = as.character(df_All$TV)
# clean some data from the environmnet
rm(df_colorAll, df_colorTV, vmin, vmax) # df_colorCentr,
## descriptors in FIS 1
df_descriptors = cbind.data.frame(unique(data_df_FIS_1$final.descr.FIS_1), brewerset_Cat[2:(1+length(unique(data_df_FIS_1$final.descr.FIS_1)))])
names(df_descriptors) = c("final.descr.FIS_1", "final.descr.FIS_1_col")
df_descriptors$final.descr.FIS_1 = as.character(df_descriptors$final.descr.FIS_1)
df_colorI = merge(data_df_FIS_1, df_descriptors, by = "final.descr.FIS_1")
df_colorI$value = round(df_colorI$final.support.FIS_1*100,0)
df_colorI = merge(df_colorI, alphaHEXcode, by = "value")
df_colorI$final.descr.FIS_1_col_alpha = paste0(df_colorI$final.descr.FIS_1_col, df_colorI$code)
df_colorI = df_colorI[,-which(colnames(df_colorI) %in% c("final.support.FIS_1", "value", "code"))]
names(df_colorI)[which(names(df_colorI) == "filename")] = "filenaam"
df_All = merge(df_All, df_colorI, by = "filenaam")
## descriptors in FIS 2
df_descriptors = cbind.data.frame(unique(data_df_FIS_2$final.descr.FIS_2), brewerset_Cat[2:(1+length(unique(data_df_FIS_2$final.descr.FIS_2)))])
names(df_descriptors) = c("final.descr.FIS_2", "final.descr.FIS_2_col")
df_descriptors$final.descr.FIS_2 = as.character(df_descriptors$final.descr.FIS_2)
df_colorI = merge(data_df_FIS_2, df_descriptors, by = "final.descr.FIS_2")
df_colorI$value = round(df_colorI$final.support.FIS_2*100,0)
df_colorI = merge(df_colorI, alphaHEXcode, by = "value")
df_colorI$final.descr.FIS_2_col_alpha = paste0(df_colorI$final.descr.FIS_2_col, df_colorI$code)
df_colorI = df_colorI[,-which(colnames(df_colorI) %in% c("final.support.FIS_2", "value", "code"))]
names(df_colorI)[which(names(df_colorI) == "filename")] = "filenaam"
df_All = merge(df_All, df_colorI, by = "filenaam")
## descriptors in FIS 3
df_descriptors = cbind.data.frame(unique(data_df_FIS_3$final.descr.FIS_3), brewerset_Cat[2:(1+length(unique(data_df_FIS_3$final.descr.FIS_3)))])
names(df_descriptors) = c("final.descr.FIS_3", "final.descr.FIS_3_col")
df_descriptors$final.descr.FIS_3 = as.character(df_descriptors$final.descr.FIS_3)
df_colorI = merge(data_df_FIS_3, df_descriptors, by = "final.descr.FIS_3")
df_colorI$value = round(df_colorI$final.support.FIS_3*100,0)
df_colorI = merge(df_colorI, alphaHEXcode, by = "value")
df_colorI$final.descr.FIS_3_col_alpha = paste0(df_colorI$final.descr.FIS_3_col, df_colorI$code)
df_colorI = df_colorI[,-which(colnames(df_colorI) %in% c("final.support.FIS_3", "value", "code"))]
names(df_colorI)[which(names(df_colorI) == "filename")] = "filenaam"
df_All = merge(df_All, df_colorI, by = "filenaam")
## descriptors in FIS 4
# exterior
df_descriptors = cbind.data.frame(unique(data_df_FIS_4$final.descr.ext.FIS_4), brewerset_Cat[2:(1+length(unique(data_df_FIS_4$final.descr.ext.FIS_4)))])
names(df_descriptors) = c("final.descr.ext.FIS_4", "final.descr.ext.FIS_4_col")
df_descriptors$final.descr.ext.FIS_4 = as.character(df_descriptors$final.descr.ext.FIS_4)
df_colorI = merge(data_df_FIS_4, df_descriptors, by = "final.descr.ext.FIS_4")
df_colorI$value = round(df_colorI$final.support.ext.FIS_4*100,0)
df_colorI = merge(df_colorI, alphaHEXcode, by = "value")
df_colorI$final.descr.ext.FIS_4_col_alpha = paste0(df_colorI$final.descr.ext.FIS_4_col, df_colorI$code)
df_colorI = df_colorI[,which(colnames(df_colorI) %in% c("filename", "final.descr.ext.FIS_4", "final.descr.ext.FIS_4_col"))]
names(df_colorI)[which(names(df_colorI) == "filename")] = "filenaam"
df_All = merge(df_All, df_colorI, by = "filenaam")
# interior
df_descriptors = cbind.data.frame(unique(data_df_FIS_4$final.descr.int.FIS_4), brewerset_Cat[2:(1+length(unique(data_df_FIS_4$final.descr.int.FIS_4)))])
names(df_descriptors) = c("final.descr.int.FIS_4", "final.descr.int.FIS_4_col")
df_descriptors$final.descr.int.FIS_4 = as.character(df_descriptors$final.descr.int.FIS_4)
df_colorI = merge(data_df_FIS_4, df_descriptors, by = "final.descr.int.FIS_4")
df_colorI$value = round(df_colorI$final.support.int.FIS_4*100,0)
df_colorI = merge(df_colorI, alphaHEXcode, by = "value")
df_colorI$final.descr.int.FIS_4_col_alpha = paste0(df_colorI$final.descr.int.FIS_4_col, df_colorI$code)
df_colorI = df_colorI[,which(colnames(df_colorI) %in% c("filename", "final.descr.int.FIS_4", "final.descr.int.FIS_4_col"))]
names(df_colorI)[which(names(df_colorI) == "filename")] = "filenaam"
df_All = merge(df_All, df_colorI, by = "filenaam")
# ## descriptors in FIS 5
# df_descriptors = cbind.data.frame(unique(data_df_FIS_5$final.descr.FIS_5), brewerset_Cat[2:(1+length(unique(data_df_FIS_5$final.descr.FIS_5)))])
# names(df_descriptors) = c("final.descr.FIS_5", "final.descr.FIS_5_col")
# df_descriptors$final.descr.FIS_5 = as.character(df_descriptors$final.descr.FIS_5)
# df_colorI = merge(data_df_FIS_5, df_descriptors, by = "final.descr.FIS_5")
# df_colorI$value = round(df_colorI$final.support.FIS_5*100,0)
# df_colorI = merge(df_colorI, alphaHEXcode, by = "value")
# df_colorI$final.descr.FIS_5_col_alpha = paste0(df_colorI$final.descr.FIS_5_col, df_colorI$code)
# df_colorI = df_colorI[,-which(colnames(df_colorI) %in% c("final.support.FIS_5", "value", "code"))]
# names(df_colorI)[which(names(df_colorI) == "filename")] = "filenaam"
# df_All = merge(df_All, df_colorI, by = "filenaam")
## descriptors in FIS 6
df_descriptors = cbind.data.frame(unique(data_df_FIS_6$final.descr.FIS_6), brewerset_Cat[2:(1+length(unique(data_df_FIS_6$final.descr.FIS_6)))])
names(df_descriptors) = c("final.descr.FIS_6", "final.descr.FIS_6_col")
df_descriptors$final.descr.FIS_6 = as.character(df_descriptors$final.descr.FIS_6)
df_colorI = merge(data_df_FIS_6, df_descriptors, by = "final.descr.FIS_6")
df_colorI$value = round(df_colorI$final.support.FIS_6*100,0)
df_colorI = merge(df_colorI, alphaHEXcode, by = "value")
df_colorI$final.descr.FIS_6_col_alpha = paste0(df_colorI$final.descr.FIS_6_col, df_colorI$code)
df_colorI = df_colorI[,-which(colnames(df_colorI) %in% c("final.support.FIS_6", "value", "code"))]
names(df_colorI)[which(names(df_colorI) == "filename")] = "filenaam"
df_All = merge(df_All, df_colorI, by = "filenaam")
## descriptors in FIS 7
df_descriptors = cbind.data.frame(unique(data_df_FIS_7$final.descr.FIS_7), brewerset_Cat[2:(1+length(unique(data_df_FIS_7$final.descr.FIS_7)))])
names(df_descriptors) = c("final.descr.FIS_7", "final.descr.FIS_7_col")
df_descriptors$final.descr.FIS_7 = as.character(df_descriptors$final.descr.FIS_7)
df_colorI = merge(data_df_FIS_7, df_descriptors, by = "final.descr.FIS_7")
df_colorI$value = round(df_colorI$final.support.FIS_7*100,0)
df_colorI = merge(df_colorI, alphaHEXcode, by = "value")
df_colorI$final.descr.FIS_7_col_alpha = paste0(df_colorI$final.descr.FIS_7_col, df_colorI$code)
df_colorI = df_colorI[,-which(colnames(df_colorI) %in% c("final.support.FIS_7", "value", "code"))]
names(df_colorI)[which(names(df_colorI) == "filename")] = "filenaam"
df_All = merge(df_All, df_colorI, by = "filenaam")
# hard cluster results coloring ##
## 10 clusters
# colorset = newcol_Cat(length(unique(cluster_df_10$hard_cl))) # Cluster color categories
df_colorCluster = cbind.data.frame(unique(cluster_df_10$hard_cl), brewerset_Clusters[1:length(unique(cluster_df_10$hard_cl))])
names(df_colorCluster) = c("hard_cl", "Cluster_10_col")
df_colorCluster$hard_cl = as.character(df_colorCluster$hard_cl)
df_colorI = merge(cluster_df_10, df_colorCluster, by = "hard_cl")
# adjust transparency
vecAlpha = data.frame("filenaam" = df_colorI$filenaam, "value" = rep(1, dim(df_colorI)[1]))
for (rown in 1:dim(vecAlpha)[1]) {
vecAlpha[rown, "value"] = round(df_colorI[rown, as.numeric(df_colorI$hard_cl[rown])+2]*100,0)
}
# scale values for clearer visualisation: we scale in [20, 100]
minVal = min(vecAlpha$value); maxVal = max(vecAlpha$value)
vecAlpha$value = round(20 + (100 - 20)*(vecAlpha$value - minVal) / (maxVal - minVal),0)
vecAlpha = merge(vecAlpha, alphaHEXcode, by = "value")
colnames(vecAlpha)[which(colnames(vecAlpha) == "value")] = "value_10"
df_colorI = merge(df_colorI, vecAlpha, by = "filenaam")
colnames(df_colorI)[which(colnames(df_colorI) == "code")] = "code_10"
df_colorI$Cluster_10_Membership_col = paste0(df_colorI$Cluster_10_col, df_colorI$code_10) #, vecAlpha$code ??
colnames(df_colorI)[which(colnames(df_colorI) == "hard_cl")] = "hard_cl_10"
df_colorI = df_colorI[, c("filenaam", "hard_cl_10", "Cluster_10_col", "value_10", "code_10", "Cluster_10_Membership_col")]
df_All = merge(df_All, df_colorI, by = "filenaam")
## 8 clusters
# colorset = newcol_Cat(length(unique(cluster_df_8$hard_cl))) # TV color categories
df_colorCluster = cbind.data.frame(unique(cluster_df_8$hard_cl), brewerset_Clusters[1:length(unique(cluster_df_8$hard_cl))])
names(df_colorCluster) = c("hard_cl", "Cluster_8_col")
df_colorCluster$hard_cl = as.character(df_colorCluster$hard_cl)
df_colorI = merge(cluster_df_8, df_colorCluster, by = "hard_cl")
# adjust transparency
vecAlpha = data.frame("filenaam" = df_colorI$filenaam, "value" = rep(1,dim(df_colorI)[1]))
for (rown in 1:dim(vecAlpha)[1]) {
vecAlpha[rown,"value"] = round(df_colorI[rown, as.numeric(df_colorI$hard_cl[rown])+2]*100,0)
}
# scale values for clearer visualisation: we scale in [20, 100]
minVal = min(vecAlpha$value); maxVal = max(vecAlpha$value)
vecAlpha$value = round(20 + (100 - 20)*(vecAlpha$value - minVal) / (maxVal - minVal),0)
vecAlpha = merge(vecAlpha, alphaHEXcode, by = "value")
colnames(vecAlpha)[which(colnames(vecAlpha) == "value")] = "value_8"
df_colorI = merge(df_colorI, vecAlpha, by = "filenaam")
colnames(df_colorI)[which(colnames(df_colorI) == "code")] = "code_8"
df_colorI$Cluster_8_Membership_col = paste0(df_colorI$Cluster_8_col, df_colorI$code_8)
colnames(df_colorI)[which(colnames(df_colorI) == "hard_cl")] = "hard_cl_8"
df_colorI = df_colorI[, c("filenaam", "hard_cl_8", "Cluster_8_col", "value_8", "code_8", "Cluster_8_Membership_col")]
df_All = merge(df_All, df_colorI, by = "filenaam")
## 6 clusters
# colorset = newcol_Cat(length(unique(cluster_df_6$hard_cl))) # TV color categories
df_colorCluster = cbind.data.frame(unique(cluster_df_6$hard_cl), brewerset_Clusters[1:length(unique(cluster_df_6$hard_cl))])
names(df_colorCluster) = c("hard_cl", "Cluster_6_col")
df_colorCluster$hard_cl = as.character(df_colorCluster$hard_cl)
df_colorI = merge(cluster_df_6, df_colorCluster, by = "hard_cl")
# adjust transparency
vecAlpha = data.frame("filenaam" = df_colorI$filenaam, "value" = rep(1,dim(df_colorI)[1]))
for (rown in 1:dim(vecAlpha)[1]) {
vecAlpha[rown,"value"] = round(df_colorI[rown, as.numeric(df_colorI$hard_cl[rown])+2]*100,0)
}
# scale values for clearer visualisation: we scale in [20, 100]
minVal = min(vecAlpha$value); maxVal = max(vecAlpha$value)
vecAlpha$value = round(20 + (100 - 20)*(vecAlpha$value - minVal) / (maxVal - minVal),0)
vecAlpha = merge(vecAlpha, alphaHEXcode, by = "value")
colnames(vecAlpha)[which(colnames(vecAlpha) == "value")] = "value_6"
df_colorI = merge(df_colorI, vecAlpha, by = "filenaam")
colnames(df_colorI)[which(colnames(df_colorI) == "code")] = "code_6"
df_colorI$Cluster_6_Membership_col = paste0(df_colorI$Cluster_6_col, df_colorI$code_6)
colnames(df_colorI)[which(colnames(df_colorI) == "hard_cl")] = "hard_cl_6"
df_colorI = df_colorI[, c("filenaam", "hard_cl_6", "Cluster_6_col", "value_6", "code_6", "Cluster_6_Membership_col")]
df_All = merge(df_All, df_colorI, by = "filenaam")
## 4 clusters
# colorset = newcol_Cat(length(unique(cluster_df_4$hard_cl))) # TV color categories
df_colorCluster = cbind.data.frame(unique(cluster_df_4$hard_cl), brewerset_Clusters[1:length(unique(cluster_df_4$hard_cl))])
names(df_colorCluster) = c("hard_cl", "Cluster_4_col")
df_colorCluster$hard_cl = as.character(df_colorCluster$hard_cl)
df_colorI = merge(cluster_df_4, df_colorCluster, by = "hard_cl")
# adjust transparency
vecAlpha = data.frame("filenaam" = df_colorI$filenaam, "value" = rep(1,dim(df_colorI)[1]))
for (rown in 1:dim(vecAlpha)[1]) {
vecAlpha[rown,"value"] = round(df_colorI[rown, as.numeric(df_colorI$hard_cl[rown])+2]*100,0)
}
# scale values for clearer visualisation: we scale in [20, 100]
minVal = min(vecAlpha$value); maxVal = max(vecAlpha$value)
vecAlpha$value = round(20 + (100 - 20)*(vecAlpha$value - minVal) / (maxVal - minVal),0)
vecAlpha = merge(vecAlpha, alphaHEXcode, by = "value")
colnames(vecAlpha)[which(colnames(vecAlpha) == "value")] = "value_4"
df_colorI = merge(df_colorI, vecAlpha, by = "filenaam")
colnames(df_colorI)[which(colnames(df_colorI) == "code")] = "code_4"
df_colorI$Cluster_4_Membership_col = paste0(df_colorI$Cluster_4_col, df_colorI$code_4)
colnames(df_colorI)[which(colnames(df_colorI) == "hard_cl")] = "hard_cl_4"
df_colorI = df_colorI[, c("filenaam", "hard_cl_4", "Cluster_4_col", "value_4", "code_4", "Cluster_4_Membership_col")]
df_All = merge(df_All, df_colorI, by = "filenaam")
# Add the columns that are not yet added in the df_All ###
data_df_categ = data_df[,which(colnames(data_df) %in% columCateg)]
colnames(data_df_categ)[1] = "filenaam"
data_df_categ$ContextLabel = as.character(data_df_categ$ContextCompare)
data_df_categ$ContextLabel = substring(data_df_categ$ContextLabel, 4)
df_All = merge(df_All, data_df_categ, by = "filenaam")
# reorder the dataframe
obs_order_id = match(obs_order, df_All$filenaam) # added for the expected ordering
df_All = df_All[obs_order_id, ] # added for the expected ordering
rownames(df_All) = NULL # added for the expected ordering
listcolors = list("plain" = "plain_col", "full profile" = "binaryLabel", "TV" = "TV_col",
"Cluster (10)" = "Cluster_10_col", "Cluster_Membership (10)" = "Cluster_10_Membership_col",
"Cluster (8)" = "Cluster_8_col", "Cluster_Membership (8)" = "Cluster_8_Membership_col",
"Cluster (6)" = "Cluster_6_col", "Cluster_Membership (6)" = "Cluster_6_Membership_col",
"Cluster (4)" = "Cluster_4_col", "Cluster_Membership (4)" = "Cluster_4_Membership_col",
"rim.out.prot.length" = "rim.out.prot.length_col",
"rim.inn.prot.length" = "rim.inn.prot.length_col", "rim.out.diff.length" = "rim.out.diff.length_col", "rim.inn.diff.length" = "rim.inn.diff.length_col",
"rim.inn.prot.length.selected" = "rim.inn.prot.length.selected_col", "rim.out.prot.length.selected" = "rim.out.prot.length.selected_col",
"rim.inn.prot.length.selected_scaled" = "rim.inn.prot.length.selected_scaled_col", "rim.out.prot.length.selected_scaled" = "rim.out.prot.length.selected_scaled_col",
"block.perC" = "block.perC_col", "trapez.perC" = "trapez.perC_col",
"Rim_HorizCut.Out_WT" = "Rim_HorizCut.Out_WT_col", "Rim_HorizCut.Inn_WT" = "Rim_HorizCut.Inn_WT_col",
"Rim_mean.WT" = "Rim_mean.WT_col", "Rim_median.WT" = "Rim_median.WT_col",
"Rim_sd.WT" = "Rim_sd.WT_col", "Rim_min.WT" = "Rim_min.WT_col", "Rim_max.WT" = "Rim_max.WT_col",
"RimWT_margin" = "RimWT_margin_col", "Rim_Diam_extra_half" = "Rim_Diam_extra_half_col", "Rim_curl" = "Rim_curl_col",
"Rim_elongation.min" = "Rim_elongation.min_col", "Rim_elongation.max" = "Rim_elongation.max_col", "Rim_elongation.avg" = "Rim_elongation.avg_col",
"Rim_radius.ratio" = "Rim_radius.ratio_col", "Rim_height_manual" = "Rim_height_manual_col", "Rim_incl_sin_mean" = "Rim_incl_sin_mean_col",
"form.factor" = "form.factor_col", "aspect.ratio" = "aspect.ratio_col", "roundness" = "roundness_col",
"Rim_extent.1" = "Rim_extent.1_col", "Rim_eccentricity" = "Rim_eccentricity_col", "Rim_incl_sin_min" = "Rim_incl_sin_min_col", "Rim_incl_sin_max" = "Rim_incl_sin_max_col",
"Rim_mass.centre.x" = "Rim_mass.centre.x_col", "Rim_mass.centre.y" = "Rim_mass.centre.y_col",
"Rim_majoraxis" = "Rim_majoraxis_col", "Rim_WT.mean.norm" = "Rim_WT.mean.norm_col", "Rim_WT.med.norm" = "Rim_WT.med.norm_col",
"Rim_WT.var.norm" = "Rim_WT.var.norm_col", "Rim_WT.skew.norm" = "Rim_WT.skew.norm_col", "Rim_WT.kurt.norm" = "Rim_WT.kurt.norm_col",
"Rim_WT.BotMed.norm" = "Rim_WT.BotMed.norm_col", "Below.Rim_incl_sin_min" = "Below.Rim_incl_sin_min_col",
"Below.Rim_incl_sin_max" = "Below.Rim_incl_sin_max_col", "Below.Rim_incl_sin_mean" = "Below.Rim_incl_sin_mean_col",
"Below.Rim_incl_min_sign" = "Below.Rim_incl_min_sign_col", "Below.Rim_incl_max_sign" = "Below.Rim_incl_max_sign_col",
"Below.Rim_incl_mean_sign" = "Below.Rim_incl_mean_sign_col",
"Rim_flattness_med" = "Rim_flattness_med_col", "Rim_flattness_max" = "Rim_flattness_max_col", "CutsSymmetric" = "CutsSymmetric_col", "WT_LipBot" = "WT_LipBot_col",
"ellipse.perC" = "ellipse.perC_col",
# 'FIS.1_mf.1.out' = 'FIS.1_mf.1.out_col', 'FIS.1_mf.1.avg' = 'FIS.1_mf.1.avg_col', 'FIS.1_mf.1.inn' = 'FIS.1_mf.1.inn_col', 'FIS.1_mf.2.out' = 'FIS.1_mf.2.out_col',
# 'FIS.1_mf.2.avg' = 'FIS.1_mf.2.avg_col', 'FIS.1_mf.2.inn' = 'FIS.1_mf.2.inn_col', 'FIS.1_mf.3.out' = 'FIS.1_mf.3.out_col', 'FIS.1_mf.3.avg' = 'FIS.1_mf.3.avg_col',
# 'FIS.1_mf.3.inn' = 'FIS.1_mf.3.inn_col', 'FIS.1_mf.4.out' = 'FIS.1_mf.4.out_col', 'FIS.1_mf.4.avg' = 'FIS.1_mf.4.avg_col', 'FIS.1_mf.4.inn' = 'FIS.1_mf.4.inn_col',
# 'straight.rim' = 'straight.rim_col', 'slightly.bent.rim' = 'slightly.bent.rim_col', 'quite.bent.rim' = 'quite.bent.rim_col', 'profoundly.bent.rim' = 'profoundly.bent.rim_col',
# 'straight.rim.sign' = 'straight.rim.sign_col', 'straight.rim.descr' = 'straight.rim.descr_col', 'slightly.bent.rim.sign' = 'slightly.bent.rim.sign_col',
# 'slightly.bent.rim.descr' = 'slightly.bent.rim.descr_col', 'quite.bent.rim.sign' = 'quite.bent.rim.sign_col', 'quite.bent.rim.descr' = 'quite.bent.rim.descr_col',
# 'profoundly.bent.rim.sign' = 'profoundly.bent.rim.sign_col', 'profoundly.bent.rim.descr' = 'profoundly.bent.rim.descr_col', 'final.support' = 'final.support_col',
# 'interm.1.descr' = 'interm.1.descr_col', 'interm.2.descr' = 'interm.2.descr_col', 'final.descr' = 'final.descr_col',
'final.support.FIS_1' = 'final.support.FIS_1_col', 'final.support.FIS_2' = 'final.support.FIS_2_col', 'final.support.FIS_3' = 'final.support.FIS_3_col', 'final.support.ext.FIS_4' = 'final.support.ext.FIS_4_col', 'final.support.int.FIS_4' = 'final.support.int.FIS_4_col', 'final.support.FIS_6' = 'final.support.FIS_6_col', # 'final.support.FIS_5' = 'final.support.FIS_5_col',
'final.descr.FIS_1' = 'final.descr.FIS_1_col', 'final.descr.FIS_1_alpha' = 'final.descr.FIS_1_col_alpha',
'final.descr.FIS_2' = 'final.descr.FIS_2_col', 'final.descr.FIS_2_alpha' = 'final.descr.FIS_2_col_alpha',
'final.descr.FIS_3' = 'final.descr.FIS_3_col', 'final.descr.FIS_3_alpha' = 'final.descr.FIS_3_col_alpha',
'final.descr.ext.FIS_4' = 'final.descr.ext.FIS_4_col', 'final.descr.ext.FIS_4_alpha' = 'final.descr.ext.FIS_4_col_alpha',
'final.descr.int.FIS_4' = 'final.descr.int.FIS_4_col', 'final.descr.int.FIS_4_alpha' = 'final.descr.int.FIS_4_col_alpha',
# 'final.descr.FIS_5' = 'final.descr.FIS_5_col', 'final.descr.FIS_5_alpha' = 'final.descr.FIS_5_col_alpha',
'final.descr.FIS_6' = 'final.descr.FIS_6_col', 'final.descr.FIS_6_alpha' = 'final.descr.FIS_6_col_alpha',
'final.descr.FIS_7' = 'final.descr.FIS_7_col', 'final.descr.FIS_7_alpha' = 'final.descr.FIS_7_col_alpha'
)
# Context data
tbl_contexts = as.data.frame(table(df_All$ContextLabel)) # df_All$ContextCompare
tbl_contexts$Var1 = as.character(tbl_contexts$Var1)
tbl_contexts$Site = ""
for (tbli in 1:dim(tbl_contexts)[1]) {
tbl_contexts$Site[tbli] = unlist(strsplit(tbl_contexts[tbli,"Var1"], "-"))[2]
}
colnames(tbl_contexts) = c("Context", "sherds", "Site")
tbl_inERlist = unique(cbind.data.frame(df_All$ContextLabel,df_All$inERlist))
colnames(tbl_inERlist) = c("Context", "Checked")
tbl_contexts = merge(tbl_contexts, tbl_inERlist, by = "Context")
tbl_contexts = tbl_contexts[order(tbl_contexts$Site),c("Site", "Context", "Checked", "sherds")]
# Size of node data as the certainty of belonging to the group
# add in df_All?
df_nodesize = data.frame("filenaam" = df_All$filenaam, "nodesize" = 0)
for (cl_i in unique(cluster_df_10$hard_cl)) {
df_cl_i = subset(cluster_df_10, cluster_df_10$hard_cl == cl_i)
# here we can normalise inside the cluster if we want ....
df_nodesize[c(match(df_cl_i$filenaam, df_nodesize$filenaam)), "nodesize"] = round(df_cl_i[,cl_i+2]*20,0)
} # the order is correct because we have taken the filenaam data from 'df_All'
### Network Data ###
## wd ##
# wd = as.character(df_All$filewd.y) # original profile image
wd = rep("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Scripts_VesselMorphology/img_rim_20201711", length(data_df$filename)) # rim image
# wd = rep("C:/Users/u0112360/Documents/____/Sagalassos/__PhD/Articles/_Typology/Scripts_VesselMorphology/img_cropped_Profile/img_cropped_Pr_1B150_20211112", length(data_df$filename)) # cropped profile image
## images ##
# pic = as.character(df_All$filenaam) # original image
pic = paste0("img_rim_", as.character(data_df$filename)) # rim image
# pic = paste0("remade_", as.character(data_df$filename)) # cropped profile image
## base64 images ##
txt = NULL
for (i in 1:length(pic)) {
txt[i] = RCurl::base64Encode(readBin(paste0(wd[i], "/", pic[i]), 'raw', file.info(paste0(wd[i], "/", pic[i]))[1, 'size']), 'txt')
}
# node attributes
sizeGroup = 20 # for all sherds having the same size
|
setwd("~/Dropbox/Wisconsin Study")
url <- "http://www.ssc.wisc.edu/wlsresearch/data/downloads/wls-pub-13.03.stata.zip"
f <- file.path(getwd(), "wls-pub-13.03.stata.zip")
download.file(url, f, mode = "wb")
unzip("wls-pub-13.03.stata.zip")
# input Stata file
library(foreign)
wisconsin <- read.dta("wls_pub_13_03.dta")
save(wisconsin, file = "wisconsin.RData")
###from now on will load from the Rdata file
load("wisconsin.RData")
dim(wisconsin)
str(wisconsin)
library("dplyr", lib.loc="~/Library/R/3.2/library")
wisconsin<-tbl_df(wisconsin)
purpose1992Mail<-select(wisconsin, idpub, sexrsp, mn039rer, mn049rer, mn041rer, mn042rer, mn043rer, mn044rer, mn045rer)
purpose1992Phone<-select(wisconsin, )
View(purpose)
| /wisconsin code.R | no_license | yissylevi/work | R | false | false | 711 | r | setwd("~/Dropbox/Wisconsin Study")
url <- "http://www.ssc.wisc.edu/wlsresearch/data/downloads/wls-pub-13.03.stata.zip"
f <- file.path(getwd(), "wls-pub-13.03.stata.zip")
download.file(url, f, mode = "wb")
unzip("wls-pub-13.03.stata.zip")
# input Stata file
library(foreign)
wisconsin <- read.dta("wls_pub_13_03.dta")
save(wisconsin, file = "wisconsin.RData")
###from now on will load from the Rdata file
load("wisconsin.RData")
dim(wisconsin)
str(wisconsin)
library("dplyr", lib.loc="~/Library/R/3.2/library")
wisconsin<-tbl_df(wisconsin)
purpose1992Mail<-select(wisconsin, idpub, sexrsp, mn039rer, mn049rer, mn041rer, mn042rer, mn043rer, mn044rer, mn045rer)
purpose1992Phone<-select(wisconsin, )
View(purpose)
|
\name{mtaBin_next}
\alias{mtaBin_next}
\alias{print.mtaBin_next}
\title{Optimal dose determination for MTA with binary outcomes}
\description{
\code{mtaBin_next} is used to determine the next optimal dose to administer in a Phase I/II clinical trial for Molecularly Targeted Agent using the design proposed by Riviere et al. entitled "Phase I/II Dose-Finding Design for Molecularly Targeted Agent: Plateau Determination using Adaptive Randomization".
}
\usage{
mtaBin_next(ngroups=1, group_cur=1, ndose, prior_tox, prior_eff, tox_max,
eff_min, cohort_start, cohort, final=FALSE, method="MTA-RA",
s_1=function(n_cur){0.2}, s_2=0.07, group_pat, id_dose, toxicity, tite=TRUE,
efficacy, time_follow, time_eff, time_full, cycle, c_tox=0.90, c_eff=0.40,
seed = 8)
}
\arguments{
\item{ngroups}{Number of groups for the dose-finding process leading to the recommendation of different dose levels. Several groups of efficacy (e.g. based on biomarker) sharing the same toxicity can be considered. The default value is set at 1.}
\item{group_cur}{Group number for which the estimation and the optimal dose determination is required by the function. The default value is set at 1.}
\item{ndose}{Number of dose levels.}
\item{prior_tox}{A vector of initial guesses of toxicity probabilities associated with the doses. Must be of same length as \code{ndose}.}
\item{prior_eff}{A vector of initial guesses of efficacy probabilities associated with the doses for \code{group_cur}. Must be of same length as \code{ndose}.}
\item{tox_max}{Toxicity upper bound, i.e. maximum acceptable toxicity probability.}
\item{eff_min}{Efficacy lower bound, i.e. minimum acceptable efficacy probability.}
\item{cohort_start}{Cohort size for the start-up phase.}
\item{cohort}{Cohort size for the model phase.}
\item{final}{A boolean with value TRUE if the trial is finished and the recommended dose for further phases should be given, or FALSE (default value) if the dose determination is performed for the next cohort of patients.}
\item{method}{A character string to specify the method for dose allocation (<=> plateau determination). The default method "MTA-RA" use adaptive randomization on posterior probabilities for the plateau location. Method based on difference in efficacy probabilities is specified by "MTA-PM".}
\item{s_1}{A function depending on the number of patients included used for adaptive randomization in plateau determination, only used if the estimation method chosen is "MTA-RA". The default function is function(n_cur,n){0.2}.}
\item{s_2}{Cutoff value for plateau determination, only used if the estimation method chosen is "MTA-PM". Can be seen as the minimal efficacy difference of practical importance. The default value is 0.07.}
\item{group_pat}{A vector indicating the group number associated with each patient included in the trial.}
\item{id_dose}{A vector indicating the dose levels administered to each patient included in the trial. Must be of same length as \code{group_pat}.}
\item{toxicity}{A vector of observed toxicities (DLTs) for each patient included in the trial. Must be of same length as \code{group_pat}.}
\item{tite}{A boolean indicating if the efficacy is considered as a time-to-event (default value TRUE), or if it is a binary outcome (FALSE).}
\item{efficacy}{A vector of observed efficacies for each patient included in the trial. Must be of same length as \code{group_pat}. This argument is used/required only if tite=FALSE. The observed efficacies of patients belonging to other groups than \code{group_cur} should also be filled (although not used) in the same order as \code{group_pat} (NA can be put).}
\item{time_follow}{A vector of follow-up times for each patient included in the trial. Must be of same length as \code{group_pat}. This argument is used/required only if tite=TRUE.}
\item{time_eff}{A vector of times-to-efficacy for each patient included in the trial. If no efficacy was observed for a patient, must be filled with +Inf. Must be of same length as \code{group_pat}. This argument is used/required only if tite=TRUE.}
\item{time_full}{Full follow-up time window. This argument is used only if tite=TRUE.}
\item{cycle}{Minimum waiting time between two dose cohorts (usually a toxicity cycle). This argument is used only if tite=TRUE.}
\item{c_tox}{Tocixity threshold for decision rules. The default value is set at 0.90.}
\item{c_eff}{Efficacy threshold for decision rules. The default value is set at 0.40.}
\item{seed}{Seed of the random number generator. Default value is set at 8.}
}
\value{
An object of class "mtaBin_next" is returned, consisting of determination of the next optimal dose level to administer and estimations. Objects generated by \code{mtaBin_next} contain at least the following components:
\item{prior_tox}{Prior toxicities.}
\item{prior_eff}{Prior efficacies.}
\item{pat_incl_group}{Number of patients included.}
\item{n_tox_tot}{Number of observed toxicities.}
\item{pi}{Estimated toxicity probabilities (if the start-up ended).}
\item{ptox_inf}{Estimated probabilities that the toxicity probability is inferior to \code{tox_max} (if the start-up ended).}
\item{n_eff}{Number of observed efficacies.}
\item{resp}{Estimated efficacy probabilities (if the start-up ended).}
\item{1-qeff_inf}{Estimated probabilities that the efficacy probability is superior to \code{eff_min} (if the start-up ended).}
\item{proba_tau}{Posterior probabilities for the plateau location.}
\item{group_cur}{Current Group for dose determination.}
\item{in_startup}{Start-up phase is ended or not.}
\item{cdose}{NEXT RECOMMENDED DOSE.}
\item{ngroups}{Number of groups.}
\item{final}{Maximim sample size reached.}
\item{}{Allocation method.}
\item{tox_max}{Toxicity upper bound (if the start-up ended).}
\item{eff_min}{Efficacy lower bound (if the start-up ended).}
\item{c_tox}{Toxicity threshold (if the start-up ended).}
\item{c_eff}{Efficacy threshold (if the start-up ended).}
\item{tite}{Type of outcome for efficacy (time-to-event or binary).}
\item{time_full}{If efficacy is a time-to-event, full follow-up time is also reminded.}
\item{cycle}{If efficacy is a time-to-event, minimum waiting time between two dose cohorts (cycle) is also reminded.}
}
\references{
Riviere, M-K., Yuan, Y., Jourdan, J-H., Dubois, F., and Zohar, S. Phase I/II Dose-Finding Design for Molecularly Targeted Agent: Plateau Determination using Adaptive Randomization.
}
\note{The "MTA-PM" method is not implemented for non-binary efficacy, as "MTA-RA" is recommended for general use.}
\author{Jacques-Henri Jourdan and Marie-Karelle Riviere-Jourdan \email{eldamjh@gmail.com}}
\seealso{
\code{\link{mtaBin_sim}}.
}
\examples{
prior_tox = c(0.02, 0.06, 0.12, 0.20, 0.30, 0.40)
prior_eff = c(0.12, 0.20, 0.30, 0.40, 0.50, 0.59)
group_pat_1 = rep(1,33)
id_dose_1 = c(1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,4,4,4,5,5,5,6,6,6,3,3,3,4,4,4,3,3,3)
tox_1 = c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,1,0,0,0,0)
time_follow_1 = c(rep(7,30),6.8,5,3.5)
time_eff_1 = c(rep(+Inf,8),4,+Inf,+Inf,+Inf,3,6,+Inf,+Inf,2,+Inf,+Inf,4.5,+Inf,
+Inf,3.2,+Inf,+Inf,2.4,6.1,+Inf,5.8,+Inf,+Inf,2.1,3.6)
eff_2 = c(0,0,0,0,0,0,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,1,0,0,1,1,0,1,0,0,1,1)
group_pat_3 = c(1,2,3,2,1,2,3,1,2,3,3,2,2,1,3,1,2,3,1,2,3,3,3,2,1,1,2,1,2,2)
id_dose_3 = c(1,1,1,1,1,1,1,1,2,1,2,2,2,2,2,2,3,2,2,3,3,3,3,3,1,1,2,1,2,2)
toxicity_3 = c(0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0)
efficacy_3 = c(NA,0,NA,0,NA,1,NA,NA,0,NA,NA,1,0,NA,NA,NA,0,NA,NA,1,NA,NA,NA,
0,NA,NA,0,NA,1,1)
s_1=function(n_cur){0.2*(1-n_cur/60)}
\donttest{
# One group, time-to-event
mta1 = mtaBin_next(ngroups=1, group_cur=1, ndose=6, prior_tox=prior_tox,
prior_eff=prior_eff, tox_max=0.35, eff_min=0.20, cohort_start=3,
cohort=3, method="MTA-PM", group_pat=group_pat_1, id_dose=id_dose_1,
toxicity=tox_1, tite=TRUE, time_follow=time_follow_1,
time_eff=time_eff_1, time_full=7, cycle=3, c_tox=0.90, c_eff=0.40)
mta1
# One group, binary
mta2 = mtaBin_next(ngroups=1, group_cur=1, ndose=6, prior_tox=prior_tox,
prior_eff=prior_eff, tox_max=0.35, eff_min=0.20, cohort_start=3,
cohort=3, final = TRUE, method="MTA-RA", group_pat=group_pat_1,
id_dose=id_dose_1, toxicity=tox_1, tite=FALSE, efficacy=eff_2,
seed = 190714)
mta2
# Three groups, binary
mta3 = mtaBin_next(ngroups=3, group_cur=2, ndose=6, prior_tox=prior_tox,
prior_eff=prior_eff, tox_max=0.35, eff_min=0.20, cohort_start=3,
cohort=3, final = FALSE, s_1=s_1, group_pat=group_pat_3,
id_dose=id_dose_3, toxicity=toxicity_3, tite=FALSE, efficacy=efficacy_3)
mta3
}
# Dummy example, running quickly
useless = mtaBin_next(ngroups=1, group_cur=1, ndose=4,
prior_tox=c(0.12,0.20,0.30,0.40), prior_eff=c(0.20,0.30,0.40,0.50),
tox_max=0.35, eff_min=0.20, cohort_start=3, cohort=3,
group_pat=rep(1,9), id_dose=c(1,1,1,2,2,2,2,2,2),
toxicity=c(0,0,0,1,0,0,0,0,0), efficacy=c(0,0,0,0,0,1,0,1,0), tite=FALSE)
}
| /fuzzedpackages/dfmta/man/mtaBin_next.Rd | no_license | akhikolla/testpackages | R | false | false | 9,048 | rd | \name{mtaBin_next}
\alias{mtaBin_next}
\alias{print.mtaBin_next}
\title{Optimal dose determination for MTA with binary outcomes}
\description{
\code{mtaBin_next} is used to determine the next optimal dose to administer in a Phase I/II clinical trial for Molecularly Targeted Agent using the design proposed by Riviere et al. entitled "Phase I/II Dose-Finding Design for Molecularly Targeted Agent: Plateau Determination using Adaptive Randomization".
}
\usage{
mtaBin_next(ngroups=1, group_cur=1, ndose, prior_tox, prior_eff, tox_max,
eff_min, cohort_start, cohort, final=FALSE, method="MTA-RA",
s_1=function(n_cur){0.2}, s_2=0.07, group_pat, id_dose, toxicity, tite=TRUE,
efficacy, time_follow, time_eff, time_full, cycle, c_tox=0.90, c_eff=0.40,
seed = 8)
}
\arguments{
\item{ngroups}{Number of groups for the dose-finding process leading to the recommendation of different dose levels. Several groups of efficacy (e.g. based on biomarker) sharing the same toxicity can be considered. The default value is set at 1.}
\item{group_cur}{Group number for which the estimation and the optimal dose determination is required by the function. The default value is set at 1.}
\item{ndose}{Number of dose levels.}
\item{prior_tox}{A vector of initial guesses of toxicity probabilities associated with the doses. Must be of same length as \code{ndose}.}
\item{prior_eff}{A vector of initial guesses of efficacy probabilities associated with the doses for \code{group_cur}. Must be of same length as \code{ndose}.}
\item{tox_max}{Toxicity upper bound, i.e. maximum acceptable toxicity probability.}
\item{eff_min}{Efficacy lower bound, i.e. minimum acceptable efficacy probability.}
\item{cohort_start}{Cohort size for the start-up phase.}
\item{cohort}{Cohort size for the model phase.}
\item{final}{A boolean with value TRUE if the trial is finished and the recommended dose for further phases should be given, or FALSE (default value) if the dose determination is performed for the next cohort of patients.}
\item{method}{A character string to specify the method for dose allocation (<=> plateau determination). The default method "MTA-RA" use adaptive randomization on posterior probabilities for the plateau location. Method based on difference in efficacy probabilities is specified by "MTA-PM".}
\item{s_1}{A function depending on the number of patients included used for adaptive randomization in plateau determination, only used if the estimation method chosen is "MTA-RA". The default function is function(n_cur,n){0.2}.}
\item{s_2}{Cutoff value for plateau determination, only used if the estimation method chosen is "MTA-PM". Can be seen as the minimal efficacy difference of practical importance. The default value is 0.07.}
\item{group_pat}{A vector indicating the group number associated with each patient included in the trial.}
\item{id_dose}{A vector indicating the dose levels administered to each patient included in the trial. Must be of same length as \code{group_pat}.}
\item{toxicity}{A vector of observed toxicities (DLTs) for each patient included in the trial. Must be of same length as \code{group_pat}.}
\item{tite}{A boolean indicating if the efficacy is considered as a time-to-event (default value TRUE), or if it is a binary outcome (FALSE).}
\item{efficacy}{A vector of observed efficacies for each patient included in the trial. Must be of same length as \code{group_pat}. This argument is used/required only if tite=FALSE. The observed efficacies of patients belonging to other groups than \code{group_cur} should also be filled (although not used) in the same order as \code{group_pat} (NA can be put).}
\item{time_follow}{A vector of follow-up times for each patient included in the trial. Must be of same length as \code{group_pat}. This argument is used/required only if tite=TRUE.}
\item{time_eff}{A vector of times-to-efficacy for each patient included in the trial. If no efficacy was observed for a patient, must be filled with +Inf. Must be of same length as \code{group_pat}. This argument is used/required only if tite=TRUE.}
\item{time_full}{Full follow-up time window. This argument is used only if tite=TRUE.}
\item{cycle}{Minimum waiting time between two dose cohorts (usually a toxicity cycle). This argument is used only if tite=TRUE.}
\item{c_tox}{Tocixity threshold for decision rules. The default value is set at 0.90.}
\item{c_eff}{Efficacy threshold for decision rules. The default value is set at 0.40.}
\item{seed}{Seed of the random number generator. Default value is set at 8.}
}
\value{
An object of class "mtaBin_next" is returned, consisting of determination of the next optimal dose level to administer and estimations. Objects generated by \code{mtaBin_next} contain at least the following components:
\item{prior_tox}{Prior toxicities.}
\item{prior_eff}{Prior efficacies.}
\item{pat_incl_group}{Number of patients included.}
\item{n_tox_tot}{Number of observed toxicities.}
\item{pi}{Estimated toxicity probabilities (if the start-up ended).}
\item{ptox_inf}{Estimated probabilities that the toxicity probability is inferior to \code{tox_max} (if the start-up ended).}
\item{n_eff}{Number of observed efficacies.}
\item{resp}{Estimated efficacy probabilities (if the start-up ended).}
\item{1-qeff_inf}{Estimated probabilities that the efficacy probability is superior to \code{eff_min} (if the start-up ended).}
\item{proba_tau}{Posterior probabilities for the plateau location.}
\item{group_cur}{Current Group for dose determination.}
\item{in_startup}{Start-up phase is ended or not.}
\item{cdose}{NEXT RECOMMENDED DOSE.}
\item{ngroups}{Number of groups.}
\item{final}{Maximim sample size reached.}
\item{}{Allocation method.}
\item{tox_max}{Toxicity upper bound (if the start-up ended).}
\item{eff_min}{Efficacy lower bound (if the start-up ended).}
\item{c_tox}{Toxicity threshold (if the start-up ended).}
\item{c_eff}{Efficacy threshold (if the start-up ended).}
\item{tite}{Type of outcome for efficacy (time-to-event or binary).}
\item{time_full}{If efficacy is a time-to-event, full follow-up time is also reminded.}
\item{cycle}{If efficacy is a time-to-event, minimum waiting time between two dose cohorts (cycle) is also reminded.}
}
\references{
Riviere, M-K., Yuan, Y., Jourdan, J-H., Dubois, F., and Zohar, S. Phase I/II Dose-Finding Design for Molecularly Targeted Agent: Plateau Determination using Adaptive Randomization.
}
\note{The "MTA-PM" method is not implemented for non-binary efficacy, as "MTA-RA" is recommended for general use.}
\author{Jacques-Henri Jourdan and Marie-Karelle Riviere-Jourdan \email{eldamjh@gmail.com}}
\seealso{
\code{\link{mtaBin_sim}}.
}
\examples{
prior_tox = c(0.02, 0.06, 0.12, 0.20, 0.30, 0.40)
prior_eff = c(0.12, 0.20, 0.30, 0.40, 0.50, 0.59)
group_pat_1 = rep(1,33)
id_dose_1 = c(1,1,1,2,2,2,3,3,3,4,4,4,5,5,5,4,4,4,5,5,5,6,6,6,3,3,3,4,4,4,3,3,3)
tox_1 = c(0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,1,0,0,0,0,0,1,0,0,0,0)
time_follow_1 = c(rep(7,30),6.8,5,3.5)
time_eff_1 = c(rep(+Inf,8),4,+Inf,+Inf,+Inf,3,6,+Inf,+Inf,2,+Inf,+Inf,4.5,+Inf,
+Inf,3.2,+Inf,+Inf,2.4,6.1,+Inf,5.8,+Inf,+Inf,2.1,3.6)
eff_2 = c(0,0,0,0,0,0,0,0,1,0,0,0,1,1,0,0,1,0,0,1,0,0,1,0,0,1,1,0,1,0,0,1,1)
group_pat_3 = c(1,2,3,2,1,2,3,1,2,3,3,2,2,1,3,1,2,3,1,2,3,3,3,2,1,1,2,1,2,2)
id_dose_3 = c(1,1,1,1,1,1,1,1,2,1,2,2,2,2,2,2,3,2,2,3,3,3,3,3,1,1,2,1,2,2)
toxicity_3 = c(0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0)
efficacy_3 = c(NA,0,NA,0,NA,1,NA,NA,0,NA,NA,1,0,NA,NA,NA,0,NA,NA,1,NA,NA,NA,
0,NA,NA,0,NA,1,1)
s_1=function(n_cur){0.2*(1-n_cur/60)}
\donttest{
# One group, time-to-event
mta1 = mtaBin_next(ngroups=1, group_cur=1, ndose=6, prior_tox=prior_tox,
prior_eff=prior_eff, tox_max=0.35, eff_min=0.20, cohort_start=3,
cohort=3, method="MTA-PM", group_pat=group_pat_1, id_dose=id_dose_1,
toxicity=tox_1, tite=TRUE, time_follow=time_follow_1,
time_eff=time_eff_1, time_full=7, cycle=3, c_tox=0.90, c_eff=0.40)
mta1
# One group, binary
mta2 = mtaBin_next(ngroups=1, group_cur=1, ndose=6, prior_tox=prior_tox,
prior_eff=prior_eff, tox_max=0.35, eff_min=0.20, cohort_start=3,
cohort=3, final = TRUE, method="MTA-RA", group_pat=group_pat_1,
id_dose=id_dose_1, toxicity=tox_1, tite=FALSE, efficacy=eff_2,
seed = 190714)
mta2
# Three groups, binary
mta3 = mtaBin_next(ngroups=3, group_cur=2, ndose=6, prior_tox=prior_tox,
prior_eff=prior_eff, tox_max=0.35, eff_min=0.20, cohort_start=3,
cohort=3, final = FALSE, s_1=s_1, group_pat=group_pat_3,
id_dose=id_dose_3, toxicity=toxicity_3, tite=FALSE, efficacy=efficacy_3)
mta3
}
# Dummy example, running quickly
useless = mtaBin_next(ngroups=1, group_cur=1, ndose=4,
prior_tox=c(0.12,0.20,0.30,0.40), prior_eff=c(0.20,0.30,0.40,0.50),
tox_max=0.35, eff_min=0.20, cohort_start=3, cohort=3,
group_pat=rep(1,9), id_dose=c(1,1,1,2,2,2,2,2,2),
toxicity=c(0,0,0,1,0,0,0,0,0), efficacy=c(0,0,0,0,0,1,0,1,0), tite=FALSE)
}
|
# gg is the output of load_ldsc_gene_groups()
get.celltype.annot <- function(gg){
library(stringi)
files=names(gg)
count=0
for(ff in files){
count=count+1
tmp = data.frame(file=ff,celltypes=names(gg[[ff]]),writtenName="")
tmp$writtenName = get.written.cellnames(files=tmp$file,cellnames=tmp$celltypes)
if(count==1){
print("creating annew")
celltype_annot = tmp
}else{
print("appending")
celltype_annot = rbind(celltype_annot,tmp)
}
}
write.csv(celltype_annot,file="celltype_annot.csv")
return(celltype_annot)
} | /get.celltype.annot.r | no_license | neurogenomics/LDSC_Celltyping | R | false | false | 634 | r | # gg is the output of load_ldsc_gene_groups()
get.celltype.annot <- function(gg){
library(stringi)
files=names(gg)
count=0
for(ff in files){
count=count+1
tmp = data.frame(file=ff,celltypes=names(gg[[ff]]),writtenName="")
tmp$writtenName = get.written.cellnames(files=tmp$file,cellnames=tmp$celltypes)
if(count==1){
print("creating annew")
celltype_annot = tmp
}else{
print("appending")
celltype_annot = rbind(celltype_annot,tmp)
}
}
write.csv(celltype_annot,file="celltype_annot.csv")
return(celltype_annot)
} |
library(keras)
library(readr)
library(stringr)
library(purrr)
library(tibble)
library(dplyr)
library(tools)
source(file_path_as_absolute("utils/getDados.R"))
source(file_path_as_absolute("utils/tokenizer.R"))
load("rdas/sequences.RData")
FLAGS <- flags(
flag_numeric("dropout1", 0.2),
flag_numeric("dropout2", 0.2),
flag_integer("dense_units1", 128),
flag_integer("dense_units2", 128),
flag_integer("epochs", 3),
flag_integer("batch_size", 64)
)
# Data Preparation --------------------------------------------------------
# Parameters --------------------------------------------------------------
embedding_dims <- 50
filters <- 32
kernel_size <- 7
hidden_dims <- 50
main_input3 <- layer_input(shape = c(maxlen), dtype = "int32")
main_input4 <- layer_input(shape = c(maxlen), dtype = "int32")
main_input5 <- layer_input(shape = c(maxlen), dtype = "int32")
ccn_out3 <- main_input3 %>%
layer_embedding(vocab_size, embedding_dims, input_length = maxlen) %>%
layer_conv_1d(
filters, kernel_size,
padding = "valid", activation = "relu", strides = 1
) %>%
layer_global_max_pooling_1d()
ccn_out4 <- main_input4 %>%
layer_embedding(vocab_size, embedding_dims, input_length = maxlen) %>%
layer_conv_1d(
filters, kernel_size + 1,
padding = "valid", activation = "relu", strides = 1
) %>%
layer_global_max_pooling_1d()
ccn_out5 <- main_input5 %>%
layer_embedding(vocab_size, embedding_dims, input_length = maxlen) %>%
layer_conv_1d(
filters, kernel_size + 2,
padding = "valid", activation = "relu", strides = 1
) %>%
layer_global_max_pooling_1d()
auxiliary_input <- layer_input(shape = c(max_sequence))
entities_out <- auxiliary_input %>%
layer_dense(units = FLAGS$dense_units1, activation = 'relu')
auxiliary_input_types <- layer_input(shape = c(max_sequence_types))
types_out <- auxiliary_input_types %>%
layer_dense(units = FLAGS$dense_units1, activation = 'relu')
main_output <- layer_concatenate(c(ccn_out3, ccn_out4, ccn_out5)) %>%
layer_dense(units = FLAGS$dense_units2, activation = 'relu') %>%
layer_dense(units = 1, activation = 'sigmoid')
model <- keras_model(
inputs = c(main_input3, main_input4, main_input5),
outputs = main_output
)
# Compile model
model %>% compile(
loss = "binary_crossentropy",
optimizer = "adam",
metrics = "accuracy"
)
history <- model %>%
fit(
x = list(train_vec$new_textParser, train_vec$new_textParser, train_vec$new_textParser),
y = array(dados_train$resposta),
batch_size = FLAGS$batch_size,
epochs = FLAGS$epochs,
validation_split = 0.2
)
history
predictions <- model %>% predict(list(test_vec$new_textParser, test_vec$new_textParser, test_vec$new_textParser))
#predictions
predictions2 <- round(predictions, 0)
matriz <- confusionMatrix(data = as.factor(predictions2), as.factor(dados_test$resposta), positive="1")
matriz
print(paste("F1 ", matriz$byClass["F1"] * 100, "Precisao ", matriz$byClass["Precision"] * 100, "Recall ", matriz$byClass["Recall"] * 100, "Acuracia ", matriz$overall["Accuracy"] * 100))
| /modelos/dadosok/cnn_drunk_tres_janelas.R | no_license | MarcosGrzeca/drunktweets | R | false | false | 3,103 | r | library(keras)
library(readr)
library(stringr)
library(purrr)
library(tibble)
library(dplyr)
library(tools)
source(file_path_as_absolute("utils/getDados.R"))
source(file_path_as_absolute("utils/tokenizer.R"))
load("rdas/sequences.RData")
FLAGS <- flags(
flag_numeric("dropout1", 0.2),
flag_numeric("dropout2", 0.2),
flag_integer("dense_units1", 128),
flag_integer("dense_units2", 128),
flag_integer("epochs", 3),
flag_integer("batch_size", 64)
)
# Data Preparation --------------------------------------------------------
# Parameters --------------------------------------------------------------
embedding_dims <- 50
filters <- 32
kernel_size <- 7
hidden_dims <- 50
main_input3 <- layer_input(shape = c(maxlen), dtype = "int32")
main_input4 <- layer_input(shape = c(maxlen), dtype = "int32")
main_input5 <- layer_input(shape = c(maxlen), dtype = "int32")
ccn_out3 <- main_input3 %>%
layer_embedding(vocab_size, embedding_dims, input_length = maxlen) %>%
layer_conv_1d(
filters, kernel_size,
padding = "valid", activation = "relu", strides = 1
) %>%
layer_global_max_pooling_1d()
ccn_out4 <- main_input4 %>%
layer_embedding(vocab_size, embedding_dims, input_length = maxlen) %>%
layer_conv_1d(
filters, kernel_size + 1,
padding = "valid", activation = "relu", strides = 1
) %>%
layer_global_max_pooling_1d()
ccn_out5 <- main_input5 %>%
layer_embedding(vocab_size, embedding_dims, input_length = maxlen) %>%
layer_conv_1d(
filters, kernel_size + 2,
padding = "valid", activation = "relu", strides = 1
) %>%
layer_global_max_pooling_1d()
auxiliary_input <- layer_input(shape = c(max_sequence))
entities_out <- auxiliary_input %>%
layer_dense(units = FLAGS$dense_units1, activation = 'relu')
auxiliary_input_types <- layer_input(shape = c(max_sequence_types))
types_out <- auxiliary_input_types %>%
layer_dense(units = FLAGS$dense_units1, activation = 'relu')
main_output <- layer_concatenate(c(ccn_out3, ccn_out4, ccn_out5)) %>%
layer_dense(units = FLAGS$dense_units2, activation = 'relu') %>%
layer_dense(units = 1, activation = 'sigmoid')
model <- keras_model(
inputs = c(main_input3, main_input4, main_input5),
outputs = main_output
)
# Compile model
model %>% compile(
loss = "binary_crossentropy",
optimizer = "adam",
metrics = "accuracy"
)
history <- model %>%
fit(
x = list(train_vec$new_textParser, train_vec$new_textParser, train_vec$new_textParser),
y = array(dados_train$resposta),
batch_size = FLAGS$batch_size,
epochs = FLAGS$epochs,
validation_split = 0.2
)
history
predictions <- model %>% predict(list(test_vec$new_textParser, test_vec$new_textParser, test_vec$new_textParser))
#predictions
predictions2 <- round(predictions, 0)
matriz <- confusionMatrix(data = as.factor(predictions2), as.factor(dados_test$resposta), positive="1")
matriz
print(paste("F1 ", matriz$byClass["F1"] * 100, "Precisao ", matriz$byClass["Precision"] * 100, "Recall ", matriz$byClass["Recall"] * 100, "Acuracia ", matriz$overall["Accuracy"] * 100))
|
# Project 1 - Plot 2
# Read the data file
filename = "power_data/household_power_consumption.txt"
message("Reading data from ", filename)
pdata <- read.table(filename, header=TRUE, sep=";", na.strings=c("?",""))
# set class type for date column
message("Set date type")
pdata$Date <- as.Date(pdata$Date, format="%d/%m/%Y")
# subset to dates of interest
message("Subset to dates of interest. ")
pdata <- pdata[pdata$Date=="2007-02-01" | pdata$Date=="2007-02-02",]
message("Number of rows = ", nrow(pdata))
# set type for time column
pdata$Time <- strptime(paste(pdata$Date, pdata$Time), format = "%Y-%m-%d %H:%M:%S")
# plot time series
message("Creating plot2.png")
png(filename="plot2.png", type="cairo")
plot(pdata$Time, pdata$Global_active_power, type="l", ylab="Global Active Power (kilowatts)", xlab="")
dev.off()
message("Complete.")
| /plot2.R | no_license | RAHansen/ExData_Plotting1 | R | false | false | 885 | r | # Project 1 - Plot 2
# Read the data file
filename = "power_data/household_power_consumption.txt"
message("Reading data from ", filename)
pdata <- read.table(filename, header=TRUE, sep=";", na.strings=c("?",""))
# set class type for date column
message("Set date type")
pdata$Date <- as.Date(pdata$Date, format="%d/%m/%Y")
# subset to dates of interest
message("Subset to dates of interest. ")
pdata <- pdata[pdata$Date=="2007-02-01" | pdata$Date=="2007-02-02",]
message("Number of rows = ", nrow(pdata))
# set type for time column
pdata$Time <- strptime(paste(pdata$Date, pdata$Time), format = "%Y-%m-%d %H:%M:%S")
# plot time series
message("Creating plot2.png")
png(filename="plot2.png", type="cairo")
plot(pdata$Time, pdata$Global_active_power, type="l", ylab="Global Active Power (kilowatts)", xlab="")
dev.off()
message("Complete.")
|
#' @title Get URL of a Fandom
#'
#' @description For a given fandom in a certain medium,
#' this function gives the URL of its story page.
#'
#' @param story Which Fandom would you like the URL for?
#' @param type Which type of media? Must be one of the following
#' ("anime", "book", "cartoon", "comic", "game", "misc", "tv")
#' @param max.entries How many Entries to scrape?
#' @return returns a list with 2 entries
#' the URL of the fandom page as a string
#' max.entries
#' @examples
#' getUrl("Zoolander","movie")
#' getUrl("Kingdom Hearts","game", max.entries=10)
getUrl <- function(story, type, max.entries = NA) {
if (!(type %in% names(typeMedia))) stop("Unsupported Type: use from list")
rowVal <- grep(story, typeMedia[[type]]$title)
href <- typeMedia[[type]]$href[rowVal[1]]
# make desired url
url <- paste0("https://www.fanfiction.net", href)
returns <- list(url = url, max.entries = max.entries)
return(returns)
}
| /R/getUrl.R | no_license | ekmaus19/absentfan | R | false | false | 953 | r | #' @title Get URL of a Fandom
#'
#' @description For a given fandom in a certain medium,
#' this function gives the URL of its story page.
#'
#' @param story Which Fandom would you like the URL for?
#' @param type Which type of media? Must be one of the following
#' ("anime", "book", "cartoon", "comic", "game", "misc", "tv")
#' @param max.entries How many Entries to scrape?
#' @return returns a list with 2 entries
#' the URL of the fandom page as a string
#' max.entries
#' @examples
#' getUrl("Zoolander","movie")
#' getUrl("Kingdom Hearts","game", max.entries=10)
getUrl <- function(story, type, max.entries = NA) {
if (!(type %in% names(typeMedia))) stop("Unsupported Type: use from list")
rowVal <- grep(story, typeMedia[[type]]$title)
href <- typeMedia[[type]]$href[rowVal[1]]
# make desired url
url <- paste0("https://www.fanfiction.net", href)
returns <- list(url = url, max.entries = max.entries)
return(returns)
}
|
library(ggplot2)
library(dplyr)
# Data here: http://regulomics.mimuw.edu.pl/~ilona/metatranscriptomics_in_DS/DeSeq2/
#setwd('/mnt/chr3/People/Ilona/Analyses_microbiomes_gene_exprs/2021_10_02_volcano_plots/')
# WT vs DS volcano plot
data <- read.csv('wt_tri_pvals.csv', stringsAsFactors = F, sep = ',')
data$color <- 'not_significant'
data$color [data$padjFDR<0.05 & data$log2fold < 0] <- 'Ts65Dn_UpRegulated'
data$color [data$padjFDR<0.05 & data$log2fold > 0] <- 'WT_UpRegulated'
data$log10padjFDR <- log10(data$padjFDR)
ggplot(data, aes(x=log2fold, y=-log10padjFDR, color=color)) +
geom_point( ) +
scale_color_manual("", values = c("not_significant" = 'grey',
"Ts65Dn_UpRegulated" = "#ff3030",
"WT_UpRegulated" = "#00bfff"),
labels = c("not significant","Ts65Dn up-regulated", "WT up-regulated")) +
theme(panel.background = element_rect(fill = "white",colour = "white"),
panel.grid.major.y = element_line(size = 0.5, linetype = 'solid',colour = "gray90")) +
ylab('-log10 (p adj FDR)')
ggsave('volcano_plot_WT_DS.png', width=4, height = 4)
ggsave('volcano_plot_WT_DS.svg', width=4, height = 4)
# T1 vs T2 volcano plot
colnames(data)
data <- read.csv('all4_all16_pvals.csv', stringsAsFactors = F, sep = ',')
data <- data %>% arrange(padjFDR)
data$color <- 'not_significant'
data$color [1:100] <- 'significant'
data$color [data$color=='significant' & data$log2fold > 0] <- 'T1_UpRegulated'
data$color [data$color=='significant' & data$log2fold < 0] <- 'T2_UpRegulated'
data$log_parpv <- log(data$DESeq2parpv)
ggplot(data, aes(x=log2fold, y=-log_parpv, color=color)) +
geom_point( ) +
scale_color_manual("", values = c("not_significant" = 'grey',
"T1_UpRegulated" = "#ffb90f",
"T2_UpRegulated" = "#B8860B"),
labels = c("not significant","T1 up-regulated", "T2 up-regulated")) +
theme(panel.background = element_rect(fill = "white",colour = "white"),
panel.grid.major.y = element_line(size = 0.5, linetype = 'solid',colour = "gray90")) +
ylab('-log (p)')
ggsave('volcano_plot_T1_T2.png', width=4, height = 4)
ggsave('volcano_plot_T1_T2.svg', width=4, height = 4)
| /data_visualisation/volcano_plot.R | no_license | ilona-grabowicz/metatranscriptomics_in_DS | R | false | false | 2,296 | r | library(ggplot2)
library(dplyr)
# Data here: http://regulomics.mimuw.edu.pl/~ilona/metatranscriptomics_in_DS/DeSeq2/
#setwd('/mnt/chr3/People/Ilona/Analyses_microbiomes_gene_exprs/2021_10_02_volcano_plots/')
# WT vs DS volcano plot
data <- read.csv('wt_tri_pvals.csv', stringsAsFactors = F, sep = ',')
data$color <- 'not_significant'
data$color [data$padjFDR<0.05 & data$log2fold < 0] <- 'Ts65Dn_UpRegulated'
data$color [data$padjFDR<0.05 & data$log2fold > 0] <- 'WT_UpRegulated'
data$log10padjFDR <- log10(data$padjFDR)
ggplot(data, aes(x=log2fold, y=-log10padjFDR, color=color)) +
geom_point( ) +
scale_color_manual("", values = c("not_significant" = 'grey',
"Ts65Dn_UpRegulated" = "#ff3030",
"WT_UpRegulated" = "#00bfff"),
labels = c("not significant","Ts65Dn up-regulated", "WT up-regulated")) +
theme(panel.background = element_rect(fill = "white",colour = "white"),
panel.grid.major.y = element_line(size = 0.5, linetype = 'solid',colour = "gray90")) +
ylab('-log10 (p adj FDR)')
ggsave('volcano_plot_WT_DS.png', width=4, height = 4)
ggsave('volcano_plot_WT_DS.svg', width=4, height = 4)
# T1 vs T2 volcano plot
colnames(data)
data <- read.csv('all4_all16_pvals.csv', stringsAsFactors = F, sep = ',')
data <- data %>% arrange(padjFDR)
data$color <- 'not_significant'
data$color [1:100] <- 'significant'
data$color [data$color=='significant' & data$log2fold > 0] <- 'T1_UpRegulated'
data$color [data$color=='significant' & data$log2fold < 0] <- 'T2_UpRegulated'
data$log_parpv <- log(data$DESeq2parpv)
ggplot(data, aes(x=log2fold, y=-log_parpv, color=color)) +
geom_point( ) +
scale_color_manual("", values = c("not_significant" = 'grey',
"T1_UpRegulated" = "#ffb90f",
"T2_UpRegulated" = "#B8860B"),
labels = c("not significant","T1 up-regulated", "T2 up-regulated")) +
theme(panel.background = element_rect(fill = "white",colour = "white"),
panel.grid.major.y = element_line(size = 0.5, linetype = 'solid',colour = "gray90")) +
ylab('-log (p)')
ggsave('volcano_plot_T1_T2.png', width=4, height = 4)
ggsave('volcano_plot_T1_T2.svg', width=4, height = 4)
|
library(shiny)
library(shinydashboard)
library(tidyverse)
library(openxlsx)
library(DT)
library(shinyWidgets)
policing_data <- read.xlsx("Completed Policing Legislation-4SB.xlsx", detectDates = TRUE)
topics_data <- read.csv("topics_data.csv")
# cleaning up column names
policing_data <- rename(policing_data, "LawNum" = `Law.Number.(for.title.of.pdf)`,
"Status" = `Status.(failed/enacted/pending)`,
"Local" = Local.Level)
# Creating Year column (taking last 4 characters of Date column)
# For observations with typo in Date column/missing Date, used first 4 characters of law number
policing_data <- policing_data %>%
mutate(Year = substr(policing_data$Date, 1, 4)) %>%
mutate(Year = if_else(Year > "2100" | Year < "2017" | is.na(Year), substr(policing_data$LawNum, 1, 4), Year))
# Fixing typo in Year column
policing_data <- policing_data %>%
mutate(Year = if_else(LawNum == "WI 21 2020", "2020", Year))
# Changing Year column class to numeric
#NAs introduced by coercion here
policing_data$Year <- as.numeric(policing_data$Year)
# Cleaning Status column
policing_data <- policing_data %>%
mutate(Status = if_else(str_detect(Status, "(?i)enacted") == TRUE, "enacted", Status)) %>%
mutate(Status = if_else(str_detect(Status, "(?i)pending") == TRUE, "pending", Status)) %>%
mutate(Status = if_else(str_detect(Status, "(?i)failed") == TRUE, "failed", Status))
# Changing empty spaces in Local column to NA
policing_data$Local[policing_data$Local == " "] <- NA
# Cleaning Local column typo
policing_data <- policing_data %>%
mutate(Local = if_else(Local == "Berkeley City Counci;", "Berkeley City Council", Local))
# Cleaning State column typo
policing_data <- policing_data %>%
mutate(State = if_else(str_detect(State, "Minne"), "Minnesota", State),
State = if_else(str_detect(State, "fornia"), "California", State))
# Removing extra spaces in Local and State columns
policing_data$State <- trimws(policing_data$State)
policing_data$Local <- trimws(policing_data$Local)
policing_data$Topic <-
str_replace(policing_data$Topic, "\\bbody cam\\b", "body cameras")
# Changing order of columns (making Notes column last, moving Year next to Date)
policing_data <- policing_data %>%
select(State:Title, Year, Date:Notes)
ui <- fluidPage(
setBackgroundColor(color = "LemonChiffon"),
h2("Policing Legislation Registry"),
sidebarLayout(
sidebarPanel(h4("Narrow results by:"),
uiOutput('resetable_state'),
actionButton("reset_state", "Reset state filter"),
br(),
br(),
uiOutput('resetable_local'),
actionButton("reset_local", "Reset city/county filter"),
br(),
br(),
uiOutput('resetable_status'),
actionButton("reset_status", "Reset status filter"),
br(),
br(),
uiOutput('resetable_topic'),
actionButton("reset_topic", "Reset topic filter"),
br(),
br(),
sliderInput(
inputId = "year",
label = "Year",
min = min(policing_data$Year, na.rm = T),
max = max(policing_data$Year, na.rm = T),
sep = "",
value = c(min(policing_data$Year, na.rm = T), 2021)
),
),
mainPanel(
fluidRow(
column(width = 8,
box(div(DT::dataTableOutput("policing_table"),
style = "width: 75%"), width = NULL)
)
)
)
)
)
server <- function(input, output, session) {
filtered_state <- reactive({
if(input$state == "All"){
policing_data
} else {
policing_data %>%
filter(State == input$state)
}
})
filtered_local <- reactive({
if(input$local == "All"){
filtered_state()
} else {
filtered_state() %>%
filter(Local == input$local)
}
})
filtered_status <- reactive({
if(input$status == "All"){
filtered_local()
} else {
filtered_local() %>%
filter(Status == input$status)
}
})
filtered_topic <- reactive({
if(input$topic == "All"){
filtered_status()
} else {
filtered_status() %>%
filter(str_detect(Topic, fixed(as.character(input$topic), ignore_case = TRUE)))
}
})
filtered_year <- reactive({
filtered_topic() %>%
filter(Year >= input$year[1] & Year <= input$year[2])
})
output$policing_table <- DT::renderDataTable({
datatable(
# options = list(
# scrollX=TRUE,
# autoWidth = TRUE
# ),
data = filtered_year(), rownames = FALSE
)
})
output$resetable_state <- renderUI({
times <- input$reset_state
div(id = letters[(times %% length(letters))+1],
selectInput(
inputId = "state",
label = "State:",
choices = c("All", unique(policing_data$State)),
#multiple = TRUE,
selected = "All"
)
)
})
### How to make Local filter auto-update choices depending on what state is selected?
# local_choices <- reactiveValues()
#
# observe({
# if(input$state == "All"){
# local_choices$local <- unique(policing_data$Local)
# } else {
# tmp <- policing_data %>%
# filter(State == input$State)
# local_choices$local <- unique(tmp$Local[!is.na(tmp$Local)])
# }
# updateSelectInput(session, "local", choices = local_choice$local)
# })
output$resetable_local <- renderUI({
# lchoices <- local_choices$local
times <- input$reset_local
div(id = letters[(times %% length(letters))+1],
selectInput(
inputId = "local",
label = "City/county:",
#choices = c(unique(filtered_state()$Local))
choices = c("All", unique(policing_data$Local)),
#choices = "",
selected = "All"
)
)
})
output$resetable_status <- renderUI({
times <- input$reset_status
div(id = letters[(times %% length(letters))+1],
selectInput(
inputId = "status",
label = "Status (failed/enacted/pending):",
choices = c("All", unique(policing_data$Status)),
# There's an NA choice since some observations don't have a Status listed
#multiple = TRUE,
selected = "All"
)
)
})
output$resetable_topic <- renderUI({
times <- input$reset_topic
div(id = letters[(times %% length(letters))+1],
selectInput(
inputId = "topic",
label = "Topic of bill:",
choices = c("All", as.character(unique(topics_data$topic))),
selected = "All"
)
)
})
}
#clearing all filters at once?
#the local filter i have in mind: possible choices for local input change based on what state
#is selected
# Run the application
shinyApp(ui = ui, server = server)
| /police_legislation_db.R | no_license | gugek/policing_legislation_database | R | false | false | 7,841 | r | library(shiny)
library(shinydashboard)
library(tidyverse)
library(openxlsx)
library(DT)
library(shinyWidgets)
policing_data <- read.xlsx("Completed Policing Legislation-4SB.xlsx", detectDates = TRUE)
topics_data <- read.csv("topics_data.csv")
# cleaning up column names
policing_data <- rename(policing_data, "LawNum" = `Law.Number.(for.title.of.pdf)`,
"Status" = `Status.(failed/enacted/pending)`,
"Local" = Local.Level)
# Creating Year column (taking last 4 characters of Date column)
# For observations with typo in Date column/missing Date, used first 4 characters of law number
policing_data <- policing_data %>%
mutate(Year = substr(policing_data$Date, 1, 4)) %>%
mutate(Year = if_else(Year > "2100" | Year < "2017" | is.na(Year), substr(policing_data$LawNum, 1, 4), Year))
# Fixing typo in Year column
policing_data <- policing_data %>%
mutate(Year = if_else(LawNum == "WI 21 2020", "2020", Year))
# Changing Year column class to numeric
#NAs introduced by coercion here
policing_data$Year <- as.numeric(policing_data$Year)
# Cleaning Status column
policing_data <- policing_data %>%
mutate(Status = if_else(str_detect(Status, "(?i)enacted") == TRUE, "enacted", Status)) %>%
mutate(Status = if_else(str_detect(Status, "(?i)pending") == TRUE, "pending", Status)) %>%
mutate(Status = if_else(str_detect(Status, "(?i)failed") == TRUE, "failed", Status))
# Changing empty spaces in Local column to NA
policing_data$Local[policing_data$Local == " "] <- NA
# Cleaning Local column typo
policing_data <- policing_data %>%
mutate(Local = if_else(Local == "Berkeley City Counci;", "Berkeley City Council", Local))
# Cleaning State column typo
policing_data <- policing_data %>%
mutate(State = if_else(str_detect(State, "Minne"), "Minnesota", State),
State = if_else(str_detect(State, "fornia"), "California", State))
# Removing extra spaces in Local and State columns
policing_data$State <- trimws(policing_data$State)
policing_data$Local <- trimws(policing_data$Local)
policing_data$Topic <-
str_replace(policing_data$Topic, "\\bbody cam\\b", "body cameras")
# Changing order of columns (making Notes column last, moving Year next to Date)
policing_data <- policing_data %>%
select(State:Title, Year, Date:Notes)
ui <- fluidPage(
setBackgroundColor(color = "LemonChiffon"),
h2("Policing Legislation Registry"),
sidebarLayout(
sidebarPanel(h4("Narrow results by:"),
uiOutput('resetable_state'),
actionButton("reset_state", "Reset state filter"),
br(),
br(),
uiOutput('resetable_local'),
actionButton("reset_local", "Reset city/county filter"),
br(),
br(),
uiOutput('resetable_status'),
actionButton("reset_status", "Reset status filter"),
br(),
br(),
uiOutput('resetable_topic'),
actionButton("reset_topic", "Reset topic filter"),
br(),
br(),
sliderInput(
inputId = "year",
label = "Year",
min = min(policing_data$Year, na.rm = T),
max = max(policing_data$Year, na.rm = T),
sep = "",
value = c(min(policing_data$Year, na.rm = T), 2021)
),
),
mainPanel(
fluidRow(
column(width = 8,
box(div(DT::dataTableOutput("policing_table"),
style = "width: 75%"), width = NULL)
)
)
)
)
)
server <- function(input, output, session) {
filtered_state <- reactive({
if(input$state == "All"){
policing_data
} else {
policing_data %>%
filter(State == input$state)
}
})
filtered_local <- reactive({
if(input$local == "All"){
filtered_state()
} else {
filtered_state() %>%
filter(Local == input$local)
}
})
filtered_status <- reactive({
if(input$status == "All"){
filtered_local()
} else {
filtered_local() %>%
filter(Status == input$status)
}
})
filtered_topic <- reactive({
if(input$topic == "All"){
filtered_status()
} else {
filtered_status() %>%
filter(str_detect(Topic, fixed(as.character(input$topic), ignore_case = TRUE)))
}
})
filtered_year <- reactive({
filtered_topic() %>%
filter(Year >= input$year[1] & Year <= input$year[2])
})
output$policing_table <- DT::renderDataTable({
datatable(
# options = list(
# scrollX=TRUE,
# autoWidth = TRUE
# ),
data = filtered_year(), rownames = FALSE
)
})
output$resetable_state <- renderUI({
times <- input$reset_state
div(id = letters[(times %% length(letters))+1],
selectInput(
inputId = "state",
label = "State:",
choices = c("All", unique(policing_data$State)),
#multiple = TRUE,
selected = "All"
)
)
})
### How to make Local filter auto-update choices depending on what state is selected?
# local_choices <- reactiveValues()
#
# observe({
# if(input$state == "All"){
# local_choices$local <- unique(policing_data$Local)
# } else {
# tmp <- policing_data %>%
# filter(State == input$State)
# local_choices$local <- unique(tmp$Local[!is.na(tmp$Local)])
# }
# updateSelectInput(session, "local", choices = local_choice$local)
# })
output$resetable_local <- renderUI({
# lchoices <- local_choices$local
times <- input$reset_local
div(id = letters[(times %% length(letters))+1],
selectInput(
inputId = "local",
label = "City/county:",
#choices = c(unique(filtered_state()$Local))
choices = c("All", unique(policing_data$Local)),
#choices = "",
selected = "All"
)
)
})
output$resetable_status <- renderUI({
times <- input$reset_status
div(id = letters[(times %% length(letters))+1],
selectInput(
inputId = "status",
label = "Status (failed/enacted/pending):",
choices = c("All", unique(policing_data$Status)),
# There's an NA choice since some observations don't have a Status listed
#multiple = TRUE,
selected = "All"
)
)
})
output$resetable_topic <- renderUI({
times <- input$reset_topic
div(id = letters[(times %% length(letters))+1],
selectInput(
inputId = "topic",
label = "Topic of bill:",
choices = c("All", as.character(unique(topics_data$topic))),
selected = "All"
)
)
})
}
#clearing all filters at once?
#the local filter i have in mind: possible choices for local input change based on what state
#is selected
# Run the application
shinyApp(ui = ui, server = server)
|
library(ggplot2)
library(grid)
library(RColorBrewer)
########### Example plots #################
####Bar
##1 color
#ggplot(data = mtcars, mapping = aes(factor(cyl))) +
# geom_bar() +
# ylim(c(0, 50)) +
# labs(title = "Title")
##3 colors
#ggplot(data = mtcars, mapping = aes(x = factor(cyl), fill = factor(cyl))) +
# geom_bar() +
# labs(title = "Title")
##5 colors (stacked)
#ggplot(data = diamonds, mapping = aes(clarity, fill = cut)) +
# geom_bar() +
# scale_y_continuous(expand = c(0, 0), limits = c(0, 15000)) +
# xlab("Clarity") +
# ylab("Count") +
# labs(
# title = "Diamond Clarity",
# subtitle = "Something Informative About Diamonds",
# caption = "The Source of Diamond Data"
# )
##5 colors (dodged)
#ggplot(data = diamonds, mapping = aes(clarity, fill = cut)) +
# geom_bar(position = "dodge") +
# scale_y_continuous(expand = c(0, 0), limits = c(0, 6000)) +
# xlab("Clarity") +
# ylab("Count") +
# labs(
# title = "Diamond Clarity",
# subtitle = "Something Informative About Diamonds",
# caption = "The Source of Diamond Data"
# )
####Scatter
## 1 Color
#ggplot(data = diamonds, mapping = aes(x = carat, y = price)) +
# geom_point() +
# scale_y_continuous(expand = c(0, 0)) +
# labs(title = "Title")
#ggplot(data = diamonds, mapping = aes(x = carat, y = price)) +
# geom_point(alpha = 0.1) +
# scale_y_continuous(expand = c(0, 0)) +
# labs(title = "Title",
# subtitle = "alpha = 0.1 adds transparency to overlapping points")
#ggplot(data = diamonds, mapping = aes(x = carat, y = price)) +
# geom_hex() +
# labs(title = "Title",
# subtitle = "geom_hex adds clarity to overlapping points")
##3 colors
#ggplot(data = mtcars, mapping = aes(x = wt, y = mpg)) +
# geom_point(aes(colour = factor(cyl))) +
# labs(title = "Title")
##9 colors
#dsamp <- diamonds[sample(nrow(diamonds), 1000), ]
#ggplot(data = dsamp, mapping = aes(x = carat, y = price, color = clarity)) +
# geom_point(size = 3) +
# scale_y_continuous(expand = c(0, 0), limits = c(0, 20000)) +
# labs(title = "Title") +
# xlab("Carat") +
# ylab("Price (USD)")
###Line
##3 colors
#library(tidyverse)
#mtcars %>%
# select(mpg, disp, hp, wt) %>%
# gather(-mpg, key = variable, value = value) %>%
# ggplot(mapping = aes(mpg, value, color = variable)) +
# geom_line(size = 1) +
# labs(title = "Title")
###Facet Grid
#ggplot(mtcars, aes(mpg, wt)) +
# geom_point() +
# ggtitle("Title") +
# facet_grid(vs ~ am, margins = TRUE)
###Histogram
#ggplot(data = diamonds, mapping = aes(x = depth)) +
# geom_histogram() +
# scale_y_continuous(expand = c(0, 0)) +
# labs(title = "Title")
####################################
#resize window to 650 px width
#quartz.options(width = 8.33333333333333, height = 5.55555555555556, dpi = 72)
# For windows, uncomment below line (and comment out above line)
windows.options(width = 8.33333333333333, height = 5.55555555555556)
#################### redefine default ggplot theme ###################
theme_new <- theme_set(theme_bw())
theme_new <- theme_update(
line = element_line(colour = "#000000",
size = 0.5,
linetype = 1L,
lineend = "butt"),
rect = element_rect(fill = "#FFFFFF",
colour = "#000000",
size = 0.5,
linetype = 1L),
text = element_text(family = "Lato",
face = "plain",
colour = "#000000",
size = 12L,
hjust = 0.5,
vjust = 0.5,
angle = 0,
lineheight = 0.9,
margin = structure(c(0, 0, 0, 0),
unit = "pt",
valid.unit = 8L,
class = c("margin", "unit")),
debug = FALSE),
title = element_text(family = NULL,
face = NULL,
colour = NULL,
size = 18L,
hjust = NULL,
vjust = NULL,
angle = NULL,
lineheight = NULL,
margin = structure(c(0, 0, 0, 0),
unit = "pt",
valid.unit = 8L,
class = c("margin", "unit")),
debug = FALSE),
axis.text = element_text(family = NULL,
face = NULL,
colour = NULL,
size = 12L,
hjust = NULL,
vjust = NULL,
angle = NULL,
lineheight = NULL,
margin = structure(c(0, 0, 0, 0),
unit = "pt",
valid.unit = 8L,
class = c("margin", "unit")),
debug = FALSE),
strip.text = element_text(family = NULL,
face = "bold",
colour = NULL,
size = 14L,
hjust = NULL,
vjust = NULL,
angle = NULL,
lineheight = NULL,
margin = structure(c(0, 0, 0, 0),
unit = "pt",
valid.unit = 8L,
class = c("margin", "unit")),
debug = FALSE),
axis.line = element_line(colour = NULL,
size = NULL,
linetype = NULL,
lineend = NULL),
axis.text.x = element_text(family = NULL,
face = NULL,
colour = NULL,
size = NULL,
hjust = NULL,
vjust = NULL,
angle = NULL,
lineheight = NULL,
margin = structure(c(0, 0, 0, 0),
unit = "pt",
valid.unit = 8L,
class = c("margin", "unit")),
debug = FALSE),
axis.text.y = element_text(family = NULL,
face = NULL,
colour = NULL,
size = NULL,
hjust = NULL,
vjust = NULL,
angle = NULL,
lineheight = NULL,
margin = structure(c(0, 0, 0, 0),
unit = "pt",
valid.unit = 8L,
class = c("margin", "unit")),
debug = FALSE),
axis.ticks = element_line(colour = NULL,
size = NULL,
linetype = NULL,
lineend = NULL),
axis.title.x = element_text(family = NULL,
face = NULL,
colour = NULL,
size = NULL,
hjust = NULL,
vjust = NULL,
angle = NULL,
lineheight = NULL,
margin = structure(c(10, 0, 0, 0),
unit = "pt",
valid.unit = 8L,
class = c("margin", "unit")),
debug = FALSE),
axis.title.y = element_text(family = NULL,
face = NULL,
colour = NULL,
size = NULL,
hjust = NULL,
vjust = NULL,
angle = NULL,
lineheight = NULL,
margin = structure(c(0, 10, 0, 0),
unit = "pt",
valid.unit = 8L,
class = c("margin", "unit")),
debug = FALSE),
axis.ticks.length = unit(4L, "points"),
legend.background = element_blank(),
legend.spacing = unit(8L, "points"),
legend.key = element_rect(fill = NULL,
colour = NULL,
size = 0L,
linetype = NULL),
legend.key.size = unit(10L, "points"),
legend.key.height = NULL,
legend.key.width = NULL,
legend.text = element_text(family = NULL,
face = NULL,
colour = NULL,
size = 12L,
hjust = NULL,
vjust = NULL,
angle = NULL,
lineheight = NULL,
margin = structure(c(0, 0, 0, 0),
unit = "pt",
valid.unit = 8L,
class = c("margin", "unit")),
debug = FALSE),
legend.text.align = NULL,
legend.title = element_blank(),
legend.title.align = NULL,
legend.position = "top",
legend.direction = "horizontal",
legend.justification = NULL,
legend.box = "horizontal",
panel.background = element_blank(),
panel.border = element_blank(),
panel.grid.major = element_line(colour = NULL,
size = NULL,
linetype = NULL,
lineend = NULL),
panel.grid.minor = element_line(colour = NULL,
size = NULL,
linetype = NULL,
lineend = NULL),
panel.spacing = unit(2L, "points"))
theme_new <- theme_update(panel.spacing.x = unit(0,"lines"),
panel.spacing.y = unit(0,"lines"),
panel.ontop = FALSE,
strip.background = element_rect(fill = "#dedddd",
colour = NULL,
size = NULL,
linetype = 0L),
strip.text.x = element_text(family = NULL,
face = NULL,
colour = NULL,
size = NULL,
hjust = NULL,
vjust = NULL,
angle = NULL,
lineheight = NULL,
margin = structure(c(0, 0, 0, 0),
unit = "pt",
valid.unit = 8L,
class = c("margin", "unit")),
debug = FALSE),
strip.text.y = element_text(family = NULL,
face = NULL,
colour = NULL,
size = NULL,
hjust = NULL,
vjust = NULL,
angle = NULL,
lineheight = NULL,
margin = structure(c(0, 0, 0, 0),
unit = "pt",
valid.unit = 8L,
class = c("margin", "unit")),
debug = FALSE),
strip.switch.pad.grid = unit(0,"lines"),
strip.switch.pad.wrap = unit(0,"lines"),
plot.background = element_rect(fill = NULL,
colour = NULL,
size = NULL,
linetype = NULL),
plot.title = element_text(family = NULL,
face = NULL,
colour = NULL,
size = NULL,
hjust = 0,
vjust = NULL,
angle = NULL,
lineheight = NULL,
margin = structure(c(0, 0, 0, 0),
unit = "pt",
valid.unit = 8L,
class = c("margin", "unit")),
debug = FALSE),
plot.margin = unit(c(10L, 10L, 10L, 10L), "points"),
axis.line.x = element_line(colour = NULL,
size = NULL,
linetype = NULL,
lineend = NULL),
axis.line.y = element_blank(), axis.title = element_text(family = NULL,
face = "italic",
colour = NULL,
size = 13L,
hjust = NULL,
vjust = NULL,
angle = NULL,
lineheight = NULL,
margin = NULL,
debug = NULL),
axis.ticks.x = element_line(colour = NULL,
size = NULL,
linetype = NULL,
lineend = NULL),
axis.ticks.y = element_blank(),
panel.grid = element_line(colour = NULL,
size = NULL,
linetype = NULL,
lineend = NULL),
panel.grid.major.x = element_blank(),
panel.grid.major.y = element_line(colour = "#DEDDDD",
size = NULL,
linetype = NULL,
lineend = NULL),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y = element_blank())
#############################
#Redefine default discrete colors, up to 9 colors.
scale_colour_discrete <- function(...) scale_colour_custom(..., palette = "Set1")
scale_fill_discrete <- function(...) scale_fill_custom(... , palette = "Set1")
#################### Functions to Define custom colours #####################
divlist <- c("BrBG","PiYG","PRGn","PuOr","RdBu","RdGy","RdYlBu","RdYlGn","Spectral")
quallist <- c("Accent","Dark2","Paired","Pastel1","Pastel2","Set1","Set2","Set3")
seqlist <- c("Blues","BuGn","BuPu","GnBu","Greens","Greys","Oranges","OrRd",
"PuBu","PuBuGn","PuRd","Purples","RdPu","Reds","YlGn","YlGnBu","YlOrBr","YlOrRd")
divnum <- rep(11, length(divlist))
qualnum <- c( 8, 8, 12, 9, 8, 9, 8, 12)
seqnum <- rep(9, length(seqlist))
namelist <- c(divlist,quallist,seqlist)
maxcolours <- c(divnum,qualnum,seqnum)
catlist <- rep(c("div","qual","seq"), c(length(divlist),length(quallist), length(seqlist)))
custom.pal.info <- data.frame(maxcolours = maxcolours, category = catlist, row.names = namelist)
custom.pal <- function(n, name){
if (!(name %in% namelist)) {
stop(paste(name, "is not a valid palette name for custom.pal\n"))
}
if (n < 3) {
warning("minimal value for n is 3, returning requested palette with 3 different levels\n")
return(custom.pal(3, name))
}
if (n > maxcolours[which(name == namelist)]) {
warning(paste("n too large, allowed maximum for palette",name,"is", maxcolours[which(name == namelist)]),
"\nReturning the palette you asked for with that many colours\n")
return(custom.pal(maxcolours[which(name == namelist)], name))
}
c1 <- col2rgb("#1696d2")
c2 <- col2rgb("#fdbf11")
c3 <- col2rgb("#000000")
c4 <- col2rgb("#ec008b")
c5 <- col2rgb("#d2d2d2")
c6 <- col2rgb("#55B748")
c7 <- col2rgb("#5c5859")
c8 <- col2rgb("#db2b27")
c9 <- col2rgb("#761548")
switch(name,
Set1 = switch(n,
rgb(c(c1[1]),
c(c1[2]),
c(c1[3]), maxColorValue = 255),
rgb(c(c1[1],c2[1]),
c(c1[2],c2[2]),
c(c1[3],c2[3]), maxColorValue = 255),
rgb(c(c1[1],c2[1],c3[1]),
c(c1[2],c2[2],c3[2]),
c(c1[3],c2[3],c3[3]), maxColorValue = 255),
rgb(c(c1[1],c2[1],c3[1],c4[1]),
c(c1[2],c2[2],c3[2],c4[2]),
c(c1[3],c2[3],c3[3],c4[3]), maxColorValue = 255),
rgb(c(c1[1],c2[1],c3[1],c4[1],c5[1]),
c(c1[2],c2[2],c3[2],c4[2],c5[2]),
c(c1[3],c2[3],c3[3],c4[3],c5[3]), maxColorValue = 255),
rgb(c(c1[1],c2[1],c3[1],c4[1],c5[1],c6[1]),
c(c1[2],c2[2],c3[2],c4[2],c5[2],c6[2]),
c(c1[3],c2[3],c3[3],c4[3],c5[3],c6[3]), maxColorValue = 255),
rgb(c(c1[1],c2[1],c3[1],c4[1],c5[1],c6[1],c7[1]),
c(c1[2],c2[2],c3[2],c4[2],c5[2],c6[2],c7[2]),
c(c1[3],c2[3],c3[3],c4[3],c5[3],c6[3],c7[3]), maxColorValue = 255),
rgb(c(c1[1],c2[1],c3[1],c4[1],c5[1],c6[1],c7[1],c8[1]),
c(c1[2],c2[2],c3[2],c4[2],c5[2],c6[2],c7[2],c8[2]),
c(c1[3],c2[3],c3[3],c4[3],c5[3],c6[3],c7[3],c8[3]), maxColorValue = 255),
rgb(c(c1[1],c2[1],c3[1],c4[1],c5[1],c6[1],c7[1],c8[1],c9[1]),
c(c1[2],c2[2],c3[2],c4[2],c5[2],c6[2],c7[2],c8[2],c9[2]),
c(c1[3],c2[3],c3[3],c4[3],c5[3],c6[3],c7[3],c8[3],c9[3]), maxColorValue = 255),
),
Set2 = switch(n,
rgb(c(154),
c(62),
c(37), maxColorValue = 255),
rgb(c(154,21),
c(62,107),
c(37,144), maxColorValue = 255),
rgb(c(154,21,112),
c(62,107,130),
c(37,144,89), maxColorValue = 255)
)
)
}
pal_name <- function(palette, type) {
if (is.character(palette)) {
if (!palette %in% RColorBrewer:::namelist) {
warning("Unknown palette ", palette)
palette <- "Set1"
}
return(palette)
}
switch(type,
div = divlist,
qual = quallist,
seq = seqlist,
stop("Unknown palette type. Should be 'div', 'qual' or 'seq'",
call. = FALSE)
)[palette]
}
custom_pal <- function(type = "seq", palette = 1) {
pal <- pal_name(palette, type)
function(n) {
if (n < 3)
suppressWarnings(custom.pal(n, pal))[seq_len(n)]
else
custom.pal(n, pal)[seq_len(n)]
}
}
scale_colour_custom <- function(..., type = "seq", palette = 1) {
discrete_scale("colour", "custom", custom_pal(type, palette), ...)
}
#' @export
#' @rdname scale_custom
scale_fill_custom <- function(..., type = "seq", palette = 1) {
discrete_scale("fill", "custom", custom_pal(type, palette), ...)
}
| /urban_ggplot_theme_new_formatting.R | no_license | dc0sic/urban_R_theme | R | false | false | 21,065 | r | library(ggplot2)
library(grid)
library(RColorBrewer)
########### Example plots #################
####Bar
##1 color
#ggplot(data = mtcars, mapping = aes(factor(cyl))) +
# geom_bar() +
# ylim(c(0, 50)) +
# labs(title = "Title")
##3 colors
#ggplot(data = mtcars, mapping = aes(x = factor(cyl), fill = factor(cyl))) +
# geom_bar() +
# labs(title = "Title")
##5 colors (stacked)
#ggplot(data = diamonds, mapping = aes(clarity, fill = cut)) +
# geom_bar() +
# scale_y_continuous(expand = c(0, 0), limits = c(0, 15000)) +
# xlab("Clarity") +
# ylab("Count") +
# labs(
# title = "Diamond Clarity",
# subtitle = "Something Informative About Diamonds",
# caption = "The Source of Diamond Data"
# )
##5 colors (dodged)
#ggplot(data = diamonds, mapping = aes(clarity, fill = cut)) +
# geom_bar(position = "dodge") +
# scale_y_continuous(expand = c(0, 0), limits = c(0, 6000)) +
# xlab("Clarity") +
# ylab("Count") +
# labs(
# title = "Diamond Clarity",
# subtitle = "Something Informative About Diamonds",
# caption = "The Source of Diamond Data"
# )
####Scatter
## 1 Color
#ggplot(data = diamonds, mapping = aes(x = carat, y = price)) +
# geom_point() +
# scale_y_continuous(expand = c(0, 0)) +
# labs(title = "Title")
#ggplot(data = diamonds, mapping = aes(x = carat, y = price)) +
# geom_point(alpha = 0.1) +
# scale_y_continuous(expand = c(0, 0)) +
# labs(title = "Title",
# subtitle = "alpha = 0.1 adds transparency to overlapping points")
#ggplot(data = diamonds, mapping = aes(x = carat, y = price)) +
# geom_hex() +
# labs(title = "Title",
# subtitle = "geom_hex adds clarity to overlapping points")
##3 colors
#ggplot(data = mtcars, mapping = aes(x = wt, y = mpg)) +
# geom_point(aes(colour = factor(cyl))) +
# labs(title = "Title")
##9 colors
#dsamp <- diamonds[sample(nrow(diamonds), 1000), ]
#ggplot(data = dsamp, mapping = aes(x = carat, y = price, color = clarity)) +
# geom_point(size = 3) +
# scale_y_continuous(expand = c(0, 0), limits = c(0, 20000)) +
# labs(title = "Title") +
# xlab("Carat") +
# ylab("Price (USD)")
###Line
##3 colors
#library(tidyverse)
#mtcars %>%
# select(mpg, disp, hp, wt) %>%
# gather(-mpg, key = variable, value = value) %>%
# ggplot(mapping = aes(mpg, value, color = variable)) +
# geom_line(size = 1) +
# labs(title = "Title")
###Facet Grid
#ggplot(mtcars, aes(mpg, wt)) +
# geom_point() +
# ggtitle("Title") +
# facet_grid(vs ~ am, margins = TRUE)
###Histogram
#ggplot(data = diamonds, mapping = aes(x = depth)) +
# geom_histogram() +
# scale_y_continuous(expand = c(0, 0)) +
# labs(title = "Title")
####################################
#resize window to 650 px width
#quartz.options(width = 8.33333333333333, height = 5.55555555555556, dpi = 72)
# For windows, uncomment below line (and comment out above line)
windows.options(width = 8.33333333333333, height = 5.55555555555556)
#################### redefine default ggplot theme ###################
theme_new <- theme_set(theme_bw())
theme_new <- theme_update(
line = element_line(colour = "#000000",
size = 0.5,
linetype = 1L,
lineend = "butt"),
rect = element_rect(fill = "#FFFFFF",
colour = "#000000",
size = 0.5,
linetype = 1L),
text = element_text(family = "Lato",
face = "plain",
colour = "#000000",
size = 12L,
hjust = 0.5,
vjust = 0.5,
angle = 0,
lineheight = 0.9,
margin = structure(c(0, 0, 0, 0),
unit = "pt",
valid.unit = 8L,
class = c("margin", "unit")),
debug = FALSE),
title = element_text(family = NULL,
face = NULL,
colour = NULL,
size = 18L,
hjust = NULL,
vjust = NULL,
angle = NULL,
lineheight = NULL,
margin = structure(c(0, 0, 0, 0),
unit = "pt",
valid.unit = 8L,
class = c("margin", "unit")),
debug = FALSE),
axis.text = element_text(family = NULL,
face = NULL,
colour = NULL,
size = 12L,
hjust = NULL,
vjust = NULL,
angle = NULL,
lineheight = NULL,
margin = structure(c(0, 0, 0, 0),
unit = "pt",
valid.unit = 8L,
class = c("margin", "unit")),
debug = FALSE),
strip.text = element_text(family = NULL,
face = "bold",
colour = NULL,
size = 14L,
hjust = NULL,
vjust = NULL,
angle = NULL,
lineheight = NULL,
margin = structure(c(0, 0, 0, 0),
unit = "pt",
valid.unit = 8L,
class = c("margin", "unit")),
debug = FALSE),
axis.line = element_line(colour = NULL,
size = NULL,
linetype = NULL,
lineend = NULL),
axis.text.x = element_text(family = NULL,
face = NULL,
colour = NULL,
size = NULL,
hjust = NULL,
vjust = NULL,
angle = NULL,
lineheight = NULL,
margin = structure(c(0, 0, 0, 0),
unit = "pt",
valid.unit = 8L,
class = c("margin", "unit")),
debug = FALSE),
axis.text.y = element_text(family = NULL,
face = NULL,
colour = NULL,
size = NULL,
hjust = NULL,
vjust = NULL,
angle = NULL,
lineheight = NULL,
margin = structure(c(0, 0, 0, 0),
unit = "pt",
valid.unit = 8L,
class = c("margin", "unit")),
debug = FALSE),
axis.ticks = element_line(colour = NULL,
size = NULL,
linetype = NULL,
lineend = NULL),
axis.title.x = element_text(family = NULL,
face = NULL,
colour = NULL,
size = NULL,
hjust = NULL,
vjust = NULL,
angle = NULL,
lineheight = NULL,
margin = structure(c(10, 0, 0, 0),
unit = "pt",
valid.unit = 8L,
class = c("margin", "unit")),
debug = FALSE),
axis.title.y = element_text(family = NULL,
face = NULL,
colour = NULL,
size = NULL,
hjust = NULL,
vjust = NULL,
angle = NULL,
lineheight = NULL,
margin = structure(c(0, 10, 0, 0),
unit = "pt",
valid.unit = 8L,
class = c("margin", "unit")),
debug = FALSE),
axis.ticks.length = unit(4L, "points"),
legend.background = element_blank(),
legend.spacing = unit(8L, "points"),
legend.key = element_rect(fill = NULL,
colour = NULL,
size = 0L,
linetype = NULL),
legend.key.size = unit(10L, "points"),
legend.key.height = NULL,
legend.key.width = NULL,
legend.text = element_text(family = NULL,
face = NULL,
colour = NULL,
size = 12L,
hjust = NULL,
vjust = NULL,
angle = NULL,
lineheight = NULL,
margin = structure(c(0, 0, 0, 0),
unit = "pt",
valid.unit = 8L,
class = c("margin", "unit")),
debug = FALSE),
legend.text.align = NULL,
legend.title = element_blank(),
legend.title.align = NULL,
legend.position = "top",
legend.direction = "horizontal",
legend.justification = NULL,
legend.box = "horizontal",
panel.background = element_blank(),
panel.border = element_blank(),
panel.grid.major = element_line(colour = NULL,
size = NULL,
linetype = NULL,
lineend = NULL),
panel.grid.minor = element_line(colour = NULL,
size = NULL,
linetype = NULL,
lineend = NULL),
panel.spacing = unit(2L, "points"))
theme_new <- theme_update(panel.spacing.x = unit(0,"lines"),
panel.spacing.y = unit(0,"lines"),
panel.ontop = FALSE,
strip.background = element_rect(fill = "#dedddd",
colour = NULL,
size = NULL,
linetype = 0L),
strip.text.x = element_text(family = NULL,
face = NULL,
colour = NULL,
size = NULL,
hjust = NULL,
vjust = NULL,
angle = NULL,
lineheight = NULL,
margin = structure(c(0, 0, 0, 0),
unit = "pt",
valid.unit = 8L,
class = c("margin", "unit")),
debug = FALSE),
strip.text.y = element_text(family = NULL,
face = NULL,
colour = NULL,
size = NULL,
hjust = NULL,
vjust = NULL,
angle = NULL,
lineheight = NULL,
margin = structure(c(0, 0, 0, 0),
unit = "pt",
valid.unit = 8L,
class = c("margin", "unit")),
debug = FALSE),
strip.switch.pad.grid = unit(0,"lines"),
strip.switch.pad.wrap = unit(0,"lines"),
plot.background = element_rect(fill = NULL,
colour = NULL,
size = NULL,
linetype = NULL),
plot.title = element_text(family = NULL,
face = NULL,
colour = NULL,
size = NULL,
hjust = 0,
vjust = NULL,
angle = NULL,
lineheight = NULL,
margin = structure(c(0, 0, 0, 0),
unit = "pt",
valid.unit = 8L,
class = c("margin", "unit")),
debug = FALSE),
plot.margin = unit(c(10L, 10L, 10L, 10L), "points"),
axis.line.x = element_line(colour = NULL,
size = NULL,
linetype = NULL,
lineend = NULL),
axis.line.y = element_blank(), axis.title = element_text(family = NULL,
face = "italic",
colour = NULL,
size = 13L,
hjust = NULL,
vjust = NULL,
angle = NULL,
lineheight = NULL,
margin = NULL,
debug = NULL),
axis.ticks.x = element_line(colour = NULL,
size = NULL,
linetype = NULL,
lineend = NULL),
axis.ticks.y = element_blank(),
panel.grid = element_line(colour = NULL,
size = NULL,
linetype = NULL,
lineend = NULL),
panel.grid.major.x = element_blank(),
panel.grid.major.y = element_line(colour = "#DEDDDD",
size = NULL,
linetype = NULL,
lineend = NULL),
panel.grid.minor.x = element_blank(),
panel.grid.minor.y = element_blank())
#############################
#Redefine default discrete colors, up to 9 colors.
scale_colour_discrete <- function(...) scale_colour_custom(..., palette = "Set1")
scale_fill_discrete <- function(...) scale_fill_custom(... , palette = "Set1")
#################### Functions to Define custom colours #####################
divlist <- c("BrBG","PiYG","PRGn","PuOr","RdBu","RdGy","RdYlBu","RdYlGn","Spectral")
quallist <- c("Accent","Dark2","Paired","Pastel1","Pastel2","Set1","Set2","Set3")
seqlist <- c("Blues","BuGn","BuPu","GnBu","Greens","Greys","Oranges","OrRd",
"PuBu","PuBuGn","PuRd","Purples","RdPu","Reds","YlGn","YlGnBu","YlOrBr","YlOrRd")
divnum <- rep(11, length(divlist))
qualnum <- c( 8, 8, 12, 9, 8, 9, 8, 12)
seqnum <- rep(9, length(seqlist))
namelist <- c(divlist,quallist,seqlist)
maxcolours <- c(divnum,qualnum,seqnum)
catlist <- rep(c("div","qual","seq"), c(length(divlist),length(quallist), length(seqlist)))
custom.pal.info <- data.frame(maxcolours = maxcolours, category = catlist, row.names = namelist)
custom.pal <- function(n, name){
if (!(name %in% namelist)) {
stop(paste(name, "is not a valid palette name for custom.pal\n"))
}
if (n < 3) {
warning("minimal value for n is 3, returning requested palette with 3 different levels\n")
return(custom.pal(3, name))
}
if (n > maxcolours[which(name == namelist)]) {
warning(paste("n too large, allowed maximum for palette",name,"is", maxcolours[which(name == namelist)]),
"\nReturning the palette you asked for with that many colours\n")
return(custom.pal(maxcolours[which(name == namelist)], name))
}
c1 <- col2rgb("#1696d2")
c2 <- col2rgb("#fdbf11")
c3 <- col2rgb("#000000")
c4 <- col2rgb("#ec008b")
c5 <- col2rgb("#d2d2d2")
c6 <- col2rgb("#55B748")
c7 <- col2rgb("#5c5859")
c8 <- col2rgb("#db2b27")
c9 <- col2rgb("#761548")
switch(name,
Set1 = switch(n,
rgb(c(c1[1]),
c(c1[2]),
c(c1[3]), maxColorValue = 255),
rgb(c(c1[1],c2[1]),
c(c1[2],c2[2]),
c(c1[3],c2[3]), maxColorValue = 255),
rgb(c(c1[1],c2[1],c3[1]),
c(c1[2],c2[2],c3[2]),
c(c1[3],c2[3],c3[3]), maxColorValue = 255),
rgb(c(c1[1],c2[1],c3[1],c4[1]),
c(c1[2],c2[2],c3[2],c4[2]),
c(c1[3],c2[3],c3[3],c4[3]), maxColorValue = 255),
rgb(c(c1[1],c2[1],c3[1],c4[1],c5[1]),
c(c1[2],c2[2],c3[2],c4[2],c5[2]),
c(c1[3],c2[3],c3[3],c4[3],c5[3]), maxColorValue = 255),
rgb(c(c1[1],c2[1],c3[1],c4[1],c5[1],c6[1]),
c(c1[2],c2[2],c3[2],c4[2],c5[2],c6[2]),
c(c1[3],c2[3],c3[3],c4[3],c5[3],c6[3]), maxColorValue = 255),
rgb(c(c1[1],c2[1],c3[1],c4[1],c5[1],c6[1],c7[1]),
c(c1[2],c2[2],c3[2],c4[2],c5[2],c6[2],c7[2]),
c(c1[3],c2[3],c3[3],c4[3],c5[3],c6[3],c7[3]), maxColorValue = 255),
rgb(c(c1[1],c2[1],c3[1],c4[1],c5[1],c6[1],c7[1],c8[1]),
c(c1[2],c2[2],c3[2],c4[2],c5[2],c6[2],c7[2],c8[2]),
c(c1[3],c2[3],c3[3],c4[3],c5[3],c6[3],c7[3],c8[3]), maxColorValue = 255),
rgb(c(c1[1],c2[1],c3[1],c4[1],c5[1],c6[1],c7[1],c8[1],c9[1]),
c(c1[2],c2[2],c3[2],c4[2],c5[2],c6[2],c7[2],c8[2],c9[2]),
c(c1[3],c2[3],c3[3],c4[3],c5[3],c6[3],c7[3],c8[3],c9[3]), maxColorValue = 255),
),
Set2 = switch(n,
rgb(c(154),
c(62),
c(37), maxColorValue = 255),
rgb(c(154,21),
c(62,107),
c(37,144), maxColorValue = 255),
rgb(c(154,21,112),
c(62,107,130),
c(37,144,89), maxColorValue = 255)
)
)
}
pal_name <- function(palette, type) {
if (is.character(palette)) {
if (!palette %in% RColorBrewer:::namelist) {
warning("Unknown palette ", palette)
palette <- "Set1"
}
return(palette)
}
switch(type,
div = divlist,
qual = quallist,
seq = seqlist,
stop("Unknown palette type. Should be 'div', 'qual' or 'seq'",
call. = FALSE)
)[palette]
}
custom_pal <- function(type = "seq", palette = 1) {
pal <- pal_name(palette, type)
function(n) {
if (n < 3)
suppressWarnings(custom.pal(n, pal))[seq_len(n)]
else
custom.pal(n, pal)[seq_len(n)]
}
}
scale_colour_custom <- function(..., type = "seq", palette = 1) {
discrete_scale("colour", "custom", custom_pal(type, palette), ...)
}
#' @export
#' @rdname scale_custom
scale_fill_custom <- function(..., type = "seq", palette = 1) {
discrete_scale("fill", "custom", custom_pal(type, palette), ...)
}
|
\name{exampleRAD}
\alias{exampleRAD}
\alias{exampleRAD_mapping}
\alias{Msi01genes}
\docType{data}
\title{
Miniature Datasets for Testing polyRAD Functions
}
\description{
\code{exampleRAD} and \code{exampleRAD_mapping} are two very small
simulated \code{"\link{RADdata}"} datasets for testing polyRAD
functions. Each has four loci. \code{exampleRAD} is a
natural population of 100 individuals with a mix of diploid and tetraploid
loci, with 80 individuals diploid and 20 individuals triploid.
\code{exampleRAD_mapping} is a diploid BC1 mapping population with two parents
and 100 progeny.
\code{Msi01genes} is a \code{"RADdata"} object with 585 taxa and 24 loci,
containing real data from \emph{Miscanthus sinensis}, obtained by using
\code{\link{VCF2RADdata}} on the file Msi01genes.vcf. Most individuals
in \code{Msi01genes} are diploid, with three haploids and one triploid.
}
\usage{
data(exampleRAD)
data(exampleRAD_mapping)
data(Msi01genes)
}
\format{
See the format described in \code{"\link{RADdata}"}.
}
\source{
Randomly generated using a script available in
polyRAD/extdata/simulate_rad_data.R.
\emph{M. sinensis} sequencing data available at
\url{https://www.ncbi.nlm.nih.gov//bioproject/PRJNA207721}, with full genotype
calls at \doi{10.13012/B2IDB-1402948_V1}.
}
\examples{
data(exampleRAD)
exampleRAD
data(exampleRAD_mapping)
exampleRAD_mapping
data(Msi01genes)
Msi01genes
}
\keyword{datasets}
| /man/exampleRAD.Rd | no_license | lvclark/polyRAD | R | false | false | 1,419 | rd | \name{exampleRAD}
\alias{exampleRAD}
\alias{exampleRAD_mapping}
\alias{Msi01genes}
\docType{data}
\title{
Miniature Datasets for Testing polyRAD Functions
}
\description{
\code{exampleRAD} and \code{exampleRAD_mapping} are two very small
simulated \code{"\link{RADdata}"} datasets for testing polyRAD
functions. Each has four loci. \code{exampleRAD} is a
natural population of 100 individuals with a mix of diploid and tetraploid
loci, with 80 individuals diploid and 20 individuals triploid.
\code{exampleRAD_mapping} is a diploid BC1 mapping population with two parents
and 100 progeny.
\code{Msi01genes} is a \code{"RADdata"} object with 585 taxa and 24 loci,
containing real data from \emph{Miscanthus sinensis}, obtained by using
\code{\link{VCF2RADdata}} on the file Msi01genes.vcf. Most individuals
in \code{Msi01genes} are diploid, with three haploids and one triploid.
}
\usage{
data(exampleRAD)
data(exampleRAD_mapping)
data(Msi01genes)
}
\format{
See the format described in \code{"\link{RADdata}"}.
}
\source{
Randomly generated using a script available in
polyRAD/extdata/simulate_rad_data.R.
\emph{M. sinensis} sequencing data available at
\url{https://www.ncbi.nlm.nih.gov//bioproject/PRJNA207721}, with full genotype
calls at \doi{10.13012/B2IDB-1402948_V1}.
}
\examples{
data(exampleRAD)
exampleRAD
data(exampleRAD_mapping)
exampleRAD_mapping
data(Msi01genes)
Msi01genes
}
\keyword{datasets}
|
##############################################################################
########## EDA
##############################################################################
load("ERCM_MPLE_cl.RData")
# Number of new cases every year
counts<- table(scc1$year)
barplot(counts, main="Number of new Cases every Year", xlab="Year", ylab="Frequency")
################# outdegree distribution
library(ggplot2)
outdegree.plot<- qplot(rowSums(adjacency.matrix), geom="histogram", binwidth = 1,
main = "Outdegree Distribution",
xlab = "Outdegree", ylab="Frequency",
fill=I("lightblue"),
col=I("black"), ylim=c(0,1000),
xlim=c(0,50)) + theme(axis.text=element_text(size=24),axis.title=element_text(size=26),
plot.title=element_text(size=28, face='bold', hjust=0.5))
max(rowSums(adjacency.matrix))
################# indegree distribution
indegree.plot<- qplot(colSums(adjacency.matrix), geom="histogram", binwidth = 1,
main = "Indegree Distribution",
xlab = "Indegree", ylab="Frequency",
fill=I("lightblue"),
col=I("black"), ylim=c(0,1000),
xlim=c(0,50)) + theme(axis.text=element_text(size=24),axis.title=element_text(size=26),
plot.title=element_text(size=28, face='bold', hjust=0.5))
max(colSums(adjacency.matrix))
library(gridExtra)
grid.arrange(indegree.plot, outdegree.plot, ncol=1, nrow=2)
###################################################################
### Table 1 in Paper
## number of ties
sum(adjacency.matrix) # 112939
# timepoints
max(scc1$id) # 2645
# number cases
dim(adjacency.matrix)[1] #10020
# number mutual ties
sum(adjacency.matrix*t(adjacency.matrix)) #56
### number triangles
library(statnet)
summary(adjacency.matrix ~ triangle) # 252544
########################
## cases in each era
### hughes
hughes<- sum(scc1$Hughes==1) # 628
# cases per year
hughes/5 # 125.6
### stone
stone<- sum(scc1$Stone==1) # 756
# cases per year
stone/5 #151.2
### vinson
vinson<- sum(scc1$Vinson==1) #789
# cases per year
vinson/8 #98.25
### warren
warren<- sum(scc1$Warren==1) #2149
# cases per year
warren/17 # 126.41
### burger
burger<- sum(scc1$Burger==1) #2805
burger/18 # 155.06
### rehnquist
rehnquist<- sum(scc1$Rehnquist==1) # 2022
rehnquist/19 #106.42
### roberts
roberts<- sum(scc1$Roberts==1) # 871
roberts/10 #87.1
# Some additional exploration
cid <- which(scc1$usCite == '542 U.S. 507') # Getting row/col for specific case (this one is Hamdi v Rumsfeld)
mqs <- mq.matrix[which(adjacency.matrix[cid,] == 1), cid] # Getting all the ideological distances to the cited opinions
range(mqs)
# Change first argument to subset data (currently rehnquist court); change function to get statistic of interest (currently range)
mqdist <- sapply(which(scc1$Rehnquist == 1), function(x){range(mq.matrix[which(adjacency.matrix[x,] == 1), x])})
plot(density(mqdist, na.rm = T)) # Plotting
abline(v = range(mqs))
| /RCode/EDA.R | no_license | desmarais-lab/Supreme_Court_Citation_Network | R | false | false | 3,178 | r | ##############################################################################
########## EDA
##############################################################################
load("ERCM_MPLE_cl.RData")
# Number of new cases every year
counts<- table(scc1$year)
barplot(counts, main="Number of new Cases every Year", xlab="Year", ylab="Frequency")
################# outdegree distribution
library(ggplot2)
outdegree.plot<- qplot(rowSums(adjacency.matrix), geom="histogram", binwidth = 1,
main = "Outdegree Distribution",
xlab = "Outdegree", ylab="Frequency",
fill=I("lightblue"),
col=I("black"), ylim=c(0,1000),
xlim=c(0,50)) + theme(axis.text=element_text(size=24),axis.title=element_text(size=26),
plot.title=element_text(size=28, face='bold', hjust=0.5))
max(rowSums(adjacency.matrix))
################# indegree distribution
indegree.plot<- qplot(colSums(adjacency.matrix), geom="histogram", binwidth = 1,
main = "Indegree Distribution",
xlab = "Indegree", ylab="Frequency",
fill=I("lightblue"),
col=I("black"), ylim=c(0,1000),
xlim=c(0,50)) + theme(axis.text=element_text(size=24),axis.title=element_text(size=26),
plot.title=element_text(size=28, face='bold', hjust=0.5))
max(colSums(adjacency.matrix))
library(gridExtra)
grid.arrange(indegree.plot, outdegree.plot, ncol=1, nrow=2)
###################################################################
### Table 1 in Paper
## number of ties
sum(adjacency.matrix) # 112939
# timepoints
max(scc1$id) # 2645
# number cases
dim(adjacency.matrix)[1] #10020
# number mutual ties
sum(adjacency.matrix*t(adjacency.matrix)) #56
### number triangles
library(statnet)
summary(adjacency.matrix ~ triangle) # 252544
########################
## cases in each era
### hughes
hughes<- sum(scc1$Hughes==1) # 628
# cases per year
hughes/5 # 125.6
### stone
stone<- sum(scc1$Stone==1) # 756
# cases per year
stone/5 #151.2
### vinson
vinson<- sum(scc1$Vinson==1) #789
# cases per year
vinson/8 #98.25
### warren
warren<- sum(scc1$Warren==1) #2149
# cases per year
warren/17 # 126.41
### burger
burger<- sum(scc1$Burger==1) #2805
burger/18 # 155.06
### rehnquist
rehnquist<- sum(scc1$Rehnquist==1) # 2022
rehnquist/19 #106.42
### roberts
roberts<- sum(scc1$Roberts==1) # 871
roberts/10 #87.1
# Some additional exploration
cid <- which(scc1$usCite == '542 U.S. 507') # Getting row/col for specific case (this one is Hamdi v Rumsfeld)
mqs <- mq.matrix[which(adjacency.matrix[cid,] == 1), cid] # Getting all the ideological distances to the cited opinions
range(mqs)
# Change first argument to subset data (currently rehnquist court); change function to get statistic of interest (currently range)
mqdist <- sapply(which(scc1$Rehnquist == 1), function(x){range(mq.matrix[which(adjacency.matrix[x,] == 1), x])})
plot(density(mqdist, na.rm = T)) # Plotting
abline(v = range(mqs))
|
################################################################################################################
## detach specific loaded package
################################################################################################################
detach_package <- function(pkg, character.only = FALSE)
{
if(!character.only)
{
pkg <- deparse(substitute(pkg))
}
search_item <- paste("package", pkg, sep = ":")
while(search_item %in% search())
{
detach(search_item, unload = TRUE, character.only = TRUE)
}
}
################################################################################################################
## detach all loaded packages
################################################################################################################
detachAllPackages <- function() {
basic_packages <- c("package:stats",
"package:graphics",
"package:grDevices",
"package:utils",
"package:datasets",
"package:methods",
"package:base")
package_list <- search()[ifelse(unlist(gregexpr("package:",search()))==1,TRUE,FALSE)]
package_list <- setdiff(package_list, basic_packages)
if (length(package_list)>0)
lapply(package_list, detach, character.only=TRUE)
}
################################################################################################################
## Obtain table from mySQL databases using Sting's account
################################################################################################################
get_sting_table <- function(database, table_name){
library(RMySQL)
drv <- dbDriver("MySQL")
dbcon <- dbConnect(MySQL(), user="sting", password="Pass1234", host = 'bioserv.clsnet.intranet', dbname = database, port = 3307)
rs1 = dbSendQuery(dbcon, paste0('select * from ', table_name))
df_out <- dbFetch(rs1, n = -1)
# dbListConnections(dbDriver( drv = "MySQL"))
suppressWarnings(dbDisconnect(dbcon))
# lapply(dbListConnections(dbDriver( drv = "MySQL")), dbDisconnect)
df_out
}
################################################################################################################
## Obtain query from mySQL databases using Sting's account
################################################################################################################
get_sting_query <- function(database, query){
library(RMySQL)
drv <- dbDriver("MySQL")
dbcon <- dbConnect(MySQL(), user="sting", password="Pass1234", host = 'bioserv.clsnet.intranet', dbname = database, port = 3307)
rs1 = dbSendQuery(dbcon, query)
df_out <- dbFetch(rs1, n = -1)
# dbListConnections(dbDriver( drv = "MySQL"))
suppressWarnings(dbDisconnect(dbcon))
# lapply(dbListConnections(dbDriver( drv = "MySQL")), dbDisconnect)
df_out
}
################################################################################################################
## Obtain table from mySQL databases using Ryan's account
################################################################################################################
get_ryan_table <- function(database, table_name){
library(RMySQL)
drv <- dbDriver("MySQL")
dbcon <- dbConnect(MySQL(), user="jwang", password="Pass1234", host = 'bioserv.clsnet.intranet', dbname = database, port = 3307)
rs1 = dbSendQuery(dbcon, paste0('select * from ', table_name))
df_out <- dbFetch(rs1, n = -1)
# dbListConnections(dbDriver( drv = "MySQL"))
suppressWarnings(dbDisconnect(dbcon))
# lapply(dbListConnections(dbDriver( drv = "MySQL")), dbDisconnect)
df_out
}
################################################################################################################
## Obtain query from mySQL databases using Ryan's account
################################################################################################################
get_ryan_query <- function(database, query){
library(RMySQL)
drv <- dbDriver("MySQL")
dbcon <- dbConnect(MySQL(), user="jwang", password="Pass1234", host = 'bioserv.clsnet.intranet', dbname = database, port = 3307)
rs1 = dbSendQuery(dbcon, query)
df_out <- dbFetch(rs1, n = -1)
# dbListConnections(dbDriver( drv = "MySQL"))
suppressWarnings(dbDisconnect(dbcon))
# lapply(dbListConnections(dbDriver( drv = "MySQL")), dbDisconnect)
df_out
}
################################################################################################################
## Compute tnt from therapy data of a SINGLE subject, using input of line numbers of start_line and end_line.
## input:
## 1. df_therapy_by_subject, a data.frame of therapy of a single subject. It MUST contain these columns:
## 'masterdeid', 'line_num', 'regimen_start', 'deathdate', 'lastcontactdate'.
## 2. start_line, line number of the start line.
## 3. end_line, line number of the end line, it could also be 'max', equivalent to overall survival from start
## of the start line.
##
## output:
## 1. tnt_event, a data.frame contains tnt and event columns with only one row of data.
## 2. df_therapy_by_subject, the updated input data.frame with tnt and event columns added. The tnt and event
## will have the same value among rows.
##
## rule:
## 1. event is considered as either start of the end_line, or death when end_line is not available;
## censored when no deathdate available.
## 2. if event, tnt is the time difference between start of start_line and start of end_line, or time difference
## between start of start_line and death; if censored, tnt is the time difference between start of start_line and
## last contact date.
################################################################################################################
get_tnt <- function(df_therapy_by_subject, start_line = 1, end_line = 'max'){
library(plyr)
#start_line = 2
#end_line = 3
#df_therapy_by_subject <- df_therapy_ken[df_therapy_ken$masterdeid == '9632', ]
#df_therapy_by_subject <- dlply(df_therapy, .variables = 'masterdeid')[[1]]
regimen_start <- as.Date(df_therapy_by_subject$regimen_start, format = '%Y-%m-%d')
deathdate <- as.Date(df_therapy_by_subject$deathdate, format = '%Y-%m-%d')
lastcontactdate <- as.Date(df_therapy_by_subject$lastcontactdate, format = '%Y-%m-%d')
line_num <- df_therapy_by_subject$line_num
## obtain the start time point
start_idx <- which(line_num == start_line)
if(length(start_idx) != 1 | is.na(regimen_start[start_idx])){
#cat('start point: regimen_start of line', start_line, 'is not available for case', df_therapy_by_subject$masterdeid[1],
# '. NA will be returned\n')
startpoint <- NA
}else{
startpoint <- regimen_start[start_idx]
}
## obtain the end time point
if(end_line == 'max'){
end_idx <- which.max(line_num)
if(!is.na(deathdate[end_idx])){
## there is an event characterized by death
endpoint <- deathdate[end_idx]
event <- 1
}else{
## censored at last contact date
endpoint <- lastcontactdate[end_idx]
event <- 0
}
}else{
end_idx <- which(line_num == end_line)
if(length(end_idx) != 1){
#cat('end point: regimen_start of line', end_line, 'is not available for case', df_therapy_by_subject$masterdeid[1],
# '. lastcontact date or death date will be returned\n')
end_idx <- which.max(line_num)
if(!is.na(deathdate[end_idx])){
## there is an event characterized by death
endpoint <- deathdate[end_idx]
event <- 1
}else{
## censored at last contact date
endpoint <- lastcontactdate[end_idx]
event <- 0
}
}else if(is.na(regimen_start[end_idx])){
#cat('end point: regimen_start of line', end_line, 'is not available for case', df_therapy_by_subject$masterdeid[1],
# '. lastcontact date or death date will be returned\n')
end_idx <- which.max(line_num)
if(!is.na(deathdate[end_idx])){
## there is an event characterized by death
endpoint <- deathdate[end_idx]
event <- 1
}else{
## censored at last contact date
endpoint <- lastcontactdate[end_idx]
event <- 0
}
}else{
endpoint <- regimen_start[end_idx]
event <- 1
}
}
## any error will be returned as NA, and can be filtered out after
tnt <- tryCatch(endpoint - startpoint + 1, error = function(e)return(NA))
tnt_event <- data.frame(tnt = as.numeric(tnt), event = event)
df_therapy_by_subject <- data.frame(df_therapy_by_subject,
tnt_event)
z <- list(tnt_event = tnt_event, df_therapy_by_subject = df_therapy_by_subject)
}
################################################################################################################
## Obtain therapy combinations from start_line to end_line.
## input:
## 1. df_therapy_by_subject, a data.frame of therapy of a single subject. It MUST contain these columns:
## 'masterdeid', 'line_num'; optional for tnt calculation: 'regimen_start', 'deathdate', 'lastcontactdate'.
## 2. start_line, line number of the start line.
## 3. end_line, line number of the end line, it could also be 'max', the last line will be included.
##
## output: a character string of all drugs used between start_line and end_line, collapsed with ','.
################################################################################################################
get_therapy <- function(df_therapy_by_subject, start_line, end_line){
therapy <- df_therapy_by_subject$therapies
line_num <- df_therapy_by_subject$line_num
if(end_line == 'max'){
end_line <- which.max(line_num)
lines_idx <- which(line_num >= start_line & line_num <= end_line)
}else{
lines_idx <- which(line_num >= start_line & line_num < end_line)
}
if(length(lines_idx) == 0){
cat('no treatment is found between', start_line, 'and', end_line, 'inclusively. NA will be returned as therapy\n')
out_therapy <- NA
}else{
out_therapy <- paste(therapy[lines_idx], collapse = ',')
out_therapy <- paste(sort(unique(strsplit(out_therapy, ',')[[1]])), collapse = ',')
}
out_therapy
}
################################################################################################################
## A wrapper function to obtain therapies and tnts for multiple subjects using get_tnt() and get_therapy().
## input:
## 1. df_therapy, a data.frame of therapy of a single or multiple subjects. It MUST contain these columns:
## 'masterdeid', 'line_num', 'regimen_start', 'deathdate', 'lastcontactdate'.
## 2. start_line, line number of the start line.
## 3. end_line, line number of the end line, it could also be 'max', the last line will be included.
##
## output:
## 1. df_therapy_out, a data.frame with three columns, masterdeid, therapies, tnt, event.
################################################################################################################
get_therapy_tnt_df <- function(df_therapy, start_line = 1, end_line = 'max'){
# df_therapy <- df_therapy_2nd
# start_line = 2
# end_line = 3
library(plyr)
ls_therapy <- dlply(df_therapy, .variables = 'masterdeid', function(x){
# cat(x$masterdeid, '\n')
# x <- dlply(df_therapy, .variables = 'masterdeid')[[1]]
tnt_event <- get_tnt(x, start_line = start_line, end_line = end_line)$tnt_event
therapies <- get_therapy(x, start_line = start_line, end_line = end_line)
out <- data.frame(masterdeid = x$masterdeid[1], therapies = therapies, tnt_event)
})
df_therapy_out <- do.call(rbind, ls_therapy)
df_therapy_out <- data.frame(df_therapy_out)
rownames(df_therapy_out) <- NULL
df_therapy_out
}
################################################################################################################
## Compute tnt of each line from therapy data of each subject.
## input:
## 1. df_therapy_by_subject, a data.frame of therapy by subject. It MUST contain these columns with name
## 'line_num', 'regimen_start', 'deathdate', 'lastcontactdate'.
##
## output:
## 1. df_therapy_by_subject, the updated input data.frame with tnt and event columns added.
##
## rule:
## 1. event is considered as either start of the nextline, or death when nextline is not available;
## censored when no deathdate available.
## 2. if event, tnt is the time difference between start of current and start of next line or time difference
## between start of current line and death; if censored, tnt is the time difference between start of current and
## last contact date.
## 3. if nextline is not available while not max line, NA will be returned.
################################################################################################################
get_tnt_by_line <- function(df_therapy_by_subject){
library(plyr)
regimen_start <- as.Date(df_therapy_by_subject$regimen_start, format = '%Y-%m-%d')
deathdate <- as.Date(df_therapy_by_subject$deathdate, format = '%Y-%m-%d')
lastcontactdate <- as.Date(df_therapy_by_subject$lastcontactdate, format = '%Y-%m-%d')
line_num <- df_therapy_by_subject$line_num
maxline <- max(line_num)
ls_tnt_event <- llply(df_therapy_by_subject$line_num, function(line){
line_idx <- which(line_num == line)
if(line < maxline){
## there is an event characterized by next line of therapy
nextline_idx <- which(line_num == (line + 1))
if(length(nextline_idx) != 1){
#cat('nextline: regimen_start of line', (line + 1), 'is not available for case', df_therapy_by_subject$masterdeid[1],
# 'NA will be returned\n')
tnt <- NA
event <- NA
}else{
tnt <- regimen_start[nextline_idx] - regimen_start[line_idx] + 1
event <- 1
}
}else if(!is.na(deathdate[line_idx])){
## there is an event characterized by death
tnt <- deathdate[line_idx] - regimen_start[line_idx] + 1
event <- 1
}else{
## censored at last contact date
tnt <- lastcontactdate[line_idx] - regimen_start[line_idx] + 1
event <- 0
}
z <- data.frame(tnt = as.numeric(tnt), event = event)
})
tnt_event <- do.call(rbind, ls_tnt_event)
df_therapy_by_subject <- data.frame(df_therapy_by_subject, tnt_event)
}
################################################################################################################
## Compute tnt and event by single line in batch
## A wrapper function to compute multiple subjects for get_tnt_by_line()
################################################################################################################
get_tnt_df_by_line <- function(df_therapy){
library(plyr)
ls_therapy <- dlply(df_therapy, .variables = 'masterdeid', function(x){
# cat(x$masterdeid[1], '\n')
x_with_tnt <- get_tnt_by_line(x)
})
df_therapy <- do.call(rbind, ls_therapy)
rownames(df_therapy) <- NULL
df_therapy
}
################################################################################################################
## Convert drug rule output file format such that columns are drugs in registry_name, rows are masterdeid
## input:
## 1. recommend_table. The long format of drug recommendations
## column1, masterdeid; column2, registry_name; column3, recommendation.
##
## output:
## 1. df_recommend. A matrix, columns are individual drugs in registry_name (n = 100), rows are subjects'
## masterdeid.
################################################################################################################
format_recommend <- function(recommend_table){
# recommend_table <- legacy_table
library(plyr)
query <- 'select * from cmi_to_registry_agent_lookup'
reg_cmi_mapping <- get_ryan_query('registry_freeze_18_aug_2015', query)
# drug_cmi_name <- data.frame(cmi_name = sort(as.character(reg_cmi_mapping$cmi_name)))
drug_registry_name <- data.frame(registry_name = sort(unique(as.character(reg_cmi_mapping$registry_name))))
recommend_table$prediction_num[recommend_table$recommendation == 'Benefit'] <- 1
recommend_table$prediction_num[recommend_table$recommendation == 'Lack Of Benefit'] <- -1
recommend_table$prediction_num[recommend_table$recommendation == 'Indeterminate'
|recommend_table$recommendation == 'DoNotReport'] <- 0
ls_byPatient_recommend <- dlply(recommend_table, .variables = 'masterdeid', function(masterdeid){
out <- data.frame(registry_name = masterdeid$registry_name,
prediction = masterdeid$prediction_num)
out <- merge(out, drug_registry_name, by = 'registry_name', all.y = T)
out$registry_name <- as.character(out$registry_name)
out <- out[order(out$registry_name), ]
dim(out)
## @@ some drugs are in different drug-rule groups, remove if duplicated for now, assuming there is no conflict
if(any(duplicated(out$registry_name))){
idx_dups <- which(duplicated(out$registry_name))
out <- out[-idx_dups, ]
}
out
})
ls_recommend <- llply(ls_byPatient_recommend, function(x){
as.integer(as.character(x$prediction))
})
df_recommend <- do.call(rbind, ls_recommend)
df_recommend[is.na(df_recommend)] <- 0
colnames(df_recommend) <- drug_registry_name$registry_name
rownames(df_recommend) <- names(ls_byPatient_recommend)
df_recommend
}
################################################################################################################
## convert AccessionNumber to masterdeid
################################################################################################################
access_to_masterdeid <- function(AccessionNumber){
query <- 'select ms.accessionnumber, cp.masterpatientid as masterdeid
from ms1.casepatient cp
join ms1.ms1case ms on ms.casepatientid=cp.casepatientid;'
lookup_table <- get_ryan_query('ms1', query)
masterdeid <- lookup_table$masterdeid[match(AccessionNumber, lookup_table$accessionnumber)]
}
################################################################################################################
## convert cmi_names to registry_names
## cmi to registry: n to 1 mapping. This conversion will NOT yield conflict.
################################################################################################################
drugname_cmi_to_registry <- function(cmi_names){
query <- 'select * from cmi_to_registry_agent_lookup'
reg_cmi_mapping <- get_ryan_query('registry_freeze_18_aug_2015', query)
registry_name <- reg_cmi_mapping$registry_name[match(cmi_names, reg_cmi_mapping$cmi_name)]
}
################################################################################################################
## convert registry_names to cmi_names
## cmi to registry: n to 1 mapping. This conversion will potentially yield conflict.
################################################################################################################
drugname_registry_to_cmi <- function(registry_names){
query <- 'select * from cmi_to_registry_agent_lookup'
reg_cmi_mapping <- get_ryan_query('registry_freeze_18_aug_2015', query)
cmi_names <- reg_cmi_mapping$cmi_name[match(registry_names, reg_cmi_mapping$registry_name)]
}
################################################################################################################
## Obtain matched and unmatched information for one patient. Need to make sure the naming for drugs are
## consistent among cmi and registry and ion.
## input:
## 1. therapy_line, a vector of drugs (registry_name) used in the line of therapy
## 2. recommend_line, a vector of recommendation output from drug rules, names are drugs (registry_name).
## -1, lack of benefit; 0, neither; 1, benefit.
##
## rule: if any drug the subject used is recommended as Lack Of Benefit (-1), return unmatched
## otherwise if any drug the subject used is recommended as Benefit (1), return matched
## otherwise if all drugs the subject used are recommended as either Indeterminant or DoNotReport (0),
## return neither.
################################################################################################################
is_matched <- function(therapy_line, recommend_line){
# therapy_line <- strsplit(df_therapy[120, ]$therapy, split = ',')[[1]]
# recommend_line <- df_recommend1[120, ]
idx <- match(therapy_line, names(recommend_line))
match_drugs <- recommend_line[idx]
if(any(match_drugs == -1, na.rm = T)){
is_matched <- 'unmatched'
}else if(sum(match_drugs, na.rm = T) == 0){
is_matched <- 'neither'
}else{
is_matched <- 'matched'
}
is_matched
}
################################################################################################################
## compute matched unmatched info and add a column to the therapy table.
## input:
## 1. df_therapy: rows are individual cases. It must have a column of 'therapies' that contains drugs used in
## this line, collapsed by ','.
## 2. df_recommend: It MUST be a matrix!(colnames of a dataframe will be corrupted). Rows are individual cases
## ordered in the same way as df_therapy. rownames is masterdeid, colnames are registry_name of the drugs.
################################################################################################################
get_matched_df <- function(df_therapy, df_recommend){
library(plyr)
n <- nrow(df_therapy)
followup <- unlist(llply(1:n, function(i){
# cat(i, ' ')
therapy <- strsplit(as.character(df_therapy$therapies[i]), ',')[[1]]
recommend <- df_recommend[i,]
names(recommend) <- colnames(df_recommend)
is_matched(therapy, recommend)
}))
df_therapy <- data.frame(df_therapy,
followed = followup)
}
################################################################################################################
## To compute matched/unmatched/neither for a single case. An improved version of is_matched(). Match on
## druggroup instead of registry_name.
## input:
## 1. received <- c('druggroup1', 'druggoup2', ...)
## 2. recommend <- data.frame(druggroup, recommendation)
## rule: if any drug the subject used is recommended as Lack Of Benefit (-1), return unmatched
## otherwise if any drug the subject used is recommended as Benefit (1), return matched
## otherwise if all drugs the subject used are recommended as either Indeterminant or DoNotReport (0),
## return neither.
################################################################################################################
get_match_binary <- function(received, recommend){
#received <- c('F', 'G')
#recommend <- data.frame(druggroup = c('A', 'B', 'C', 'D', 'E'),
# recommendation = c(1,1,0,-1,0))
match_idx1 <- match(received, recommend$druggroup)
recommendation <- recommend$recommendation[match_idx1]
if(any(recommendation < 0, na.rm = T)){
is_matched <- 'unmatched'
}else if(sum(recommendation, na.rm = T) == 0){
is_matched <- 'neither'
}else{
is_matched <- 'matched'
}
is_matched
}
################################################################################################################
## To compute match_score for a single case by matching weights and recommendations to received drugs. weights
## were normalized within received drugs.
## input:
## 1. received <- c('druggroup1', 'druggoup2', ...)
## 2. recommend <- data.frame(druggroup, recommendation)
## 3. wt_star <- data.frame(druggroup, weight)
## 4. adjust_recommend = c(T, F) # whether to adjust the weight by benefit driven or lackOfBenefit driven.
## 5. if adjust_recommend == T, recom_weight = c(lackofbenefit, benefit) #lackofbenefit + benefit = 1
## output:
## match_score. A numeric variable range [-1, 1]
################################################################################################################
get_match_score <- function(received, recommend, wt_star, adjust_recommend, recom_weight){
#received <- df_therapy_2nd$druggroups[1]
match_idx1 <- match(received, recommend$druggroup)
match_idx2 <- match(received, wt_star$druggroup)
if(length(match_idx1) == 0 |length(match_idx2) == 0){
recommendation <- NA
weight <- NA
}else{
recommendation <- recommend$recommendation[match_idx1]
weight <- wt_star$weight[match_idx2]
}
if(adjust_recommend){
weight <- sapply(1:length(weight), function(i){
if(is.na(recommendation[i])){
z <- NA
}else if(recommendation[i] < 0){
z <- weight[i] * recom_weight[1]
}else{
z <- weight[i] * recom_weight[2]
}
z
})
}
weight <- convert_weight(weight)
match_score = sum(recommendation * weight, na.rm = T)
match_score
}
################################################################################################################
## A wrapper function to compute match_score in batches using get_match_score(). Only suitable for a single
## version of drugrule at a single version of weight combinations. A bigger wrapper is needed to run different
## combinations. It also scale the match_scores from [-1, 1] to [0, 1].
## IMPORTANT!! match the order between df_therapy and ls_recommend for input.
## input:
## 1. df_therapy. contains a column named 'druggroups'.
## 2. ls_recommend. contains drug rule recommendations organized by masterdeid. It MUST be in the same order
## as df_therapy. ls_recommend should contain columns, named 'drugRule', 'recom'.
## 3. method = c('binary', 'score')
## 4. if method == 'score', adjust_recommend and recom_weight are inherited from get_match_score()
## output:
## df_therapy. the same data.frame as input with a new column named 'match_scores' appended.
################################################################################################################
get_match_score_df <- function(df_therapy, ls_recommend, method, wt_star, adjust_recommend, recom_weight){
# ls_recommend <- ls_recommend[match(inter_masterdeid, names(ls_recommend))]
# df_therapy <- df_therapy_2nd[match(inter_masterdeid, df_therapy_2nd$masterdeid), ]
# wt_star <- data.frame(druggroup = colnames(weight_matrix),
# weight = convert_weight(c(0.3,0.3,0.6,0.3,0.3)), stringsAsFactors = F)
n_sample <- length(ls_recommend)
match_scores <- llply(1:n_sample, function(i){
# cat(i,'\n')
received <- strsplit(df_therapy$druggroups[i], ',')[[1]]
recommend <- data.frame(druggroup = ls_recommend[[i]]$drugRule,
recommendation = ls_recommend[[i]]$recom,
stringsAsFactors = F)
if(method == 'score'){
match_score <- get_match_score(received, recommend, wt_star, adjust_recommend = adjust_recommend, recom_weight = recom_weight)
match_score <- (match_score - -1)/2 ## recommendation score is between -1 and 1, scale to 0 and 1.
}else if(method == 'binary'){
match_score <- get_match_binary(received, recommend)
}
})
df_therapy$match_score <- unlist(match_scores)
df_therapy
}
################################################################################################################
## text processing functions to clean up clinical information
################################################################################################################
## trim off heading/trailing spaces and/or punctuations from string
trim <- function (x) gsub("^(\\s|[[:punct:]])+|([[:punct:]]|\\s)+$", "", x)
## clean up unknowns
convert_unkowns <- function(x) gsub('NA|NULL|(^[Uu]nknown.*$)|(^[Nn]ot.*$)', 'Unknown', x)
## clean up grade
convert_grade <- function(x) {
x <- gsub('\\s*/\\s*', '/', trim(x))
x <- convert_unkowns(x)
x
}
## clean up stage
## consolidate I, II, and IV; only remove trailing numbers for III
convert_stage <- function(x, collapseIII = T) {
x <- gsub('^I[A-D1-9]+$', 'I', x)
x <- gsub('^II[A-D1-9]+$', 'II', x)
if(collapseIII){
x <- gsub('^III[A-D1-9]+$', 'III', x)
}else if(any(grepl('^III$', x))){
cat('detected \'III\', set collapseIII to TRUE\n')
x <- gsub('^III[A-D1-9]+$', 'III', x)
}else{
x <- ifelse(grepl('^III[A-D1-9]+$', x), gsub('[1-9]+$', '', x), x)
}
x <- gsub('^IV[A-D1-9]+$', 'IV', x)
x <- convert_unkowns(x)
x
}
################################################################################################################
## generate the survfit and coxph objects
################################################################################################################
surv_fit <- function(df_clinical_info, plot_covariate){
library(survival)
# df_clinical_info <- df_plot1
# plot_covariate <- 'followed'
is_unknown <- any(sapply(df_clinical_info, function(x) grepl('Unknown', x)))
if(is_unknown){
stop('unknowns detected, please remove the corresponding samples and rerun!\n')
}
## with covariates intended for adjustment
fitCox <- coxph(Surv(time, event) ~ ., data = df_clinical_info)
## KM plot just for 'followed'
surv_formula <- as.formula(paste('Surv(time, event) ~ ', plot_covariate, sep = ''))
fitSurv <- survfit(surv_formula, data = df_clinical_info)
## need to pass colid_covariate to the kmplots function to locate HR and p-values from the coef table
colid_covariate <- which(colnames(df_clinical_info) == plot_covariate)
z <- list(fitCox = fitCox,
fitSurv = fitSurv,
colid_covariate = colid_covariate)
}
################################################################################################################
## plot KM curves using the surv_fit output
## input:
## 1. surv_fit, output from surv_fit(), contains coxph(), survfit(), as well as colid_covariate the column
## index of the target covariate
## 2. display_coef_idx, when levels of target covariates are more than two, pick the desired contrast to
## display by inputing the index of them from the summary(fitCox)$coef table
################################################################################################################
kmplots <- function(
surv_fit, main, display_coef_idx = NULL, simplify = F, drawSegment = T, xlim = NULL
){
library(survival)
fitCox <- surv_fit$fixCox
fitSurv <- surv_fit$fitSurv
colid_covariate <- surv_fit$colid_covariate
fitCox_sum <- summary(surv_fit$fitCox)
fitSurv_sum <- summary(surv_fit$fitSurv)
## prepare for KM plot
line_colors <- c('red', 'blue', 'green', 'pink', 'orange', 'purple', 'cyan')
surv_strata <- toupper(sapply(names(fitSurv$strata), function(x)strsplit(x, '=')[[1]][2]))
n_levels <- length(fitSurv$strata)
if(n_levels > 7) stop('plot_covaraite has more than 7 levels, plot will be ugly, please check covariates!\n')
if(n_levels > 2 & is.null(display_coef_idx)){
stop('more than two levels in the selected covariate, please manually input the row numbers of the desired coeffients to display by setting display_coef_idx!\n')
}else if(!is.null(display_coef_idx)){
rowid <- display_coef_idx
}else{
rowid <- colid_covariate - 2
}
## start plotting
par(mar = c(4,4,5,2))
plot(
fitSurv, col = line_colors[1:n_levels], lwd = 4,
mark.time = T,
xlab = 'Time', ylab = 'Proportion Event Free',
main = main,
cex.main = 1, cex = 1, cex.lab = 1,
xaxs = 'i', yaxs = 'i', # remove extra space between plot and axis
xlim = xlim
)
if(simplify == F){
legend('topright',
legend = c(paste0(surv_strata, ' (n = ', fitSurv$n, ', event = ', fitSurv_sum$table[, 4], ')'),
paste0(
'HR = ', round(fitCox_sum$coef[rowid,2], 3),
' (', round(fitCox_sum$conf.int[3], 3), '-', round(fitCox_sum$conf.int[4], 3),')'
#', p-value =', round(fitCox_sum$coef[rowid,5], 3)
),
paste0('log-rank test p-value =', round(fitCox_sum$sctest[3], 3))),
col = c(line_colors[1:n_levels], rep('white', length(rowid) + 1)), lty = 1, lwd = 2, bty='n', cex = 0.8)
## add segments and text of median survival time
x_del_position <- (-1)^(1:7) * 0.1
# y_del_position <- floor(((1:7)-1)/2) * 0.1
y_del_position <- (1:7) * 0.1 - 0.1
idx = which(colnames(fitSurv_sum$table) == 'median')
lapply(1:n_levels, function(x){
segments(fitSurv_sum$table[x,idx], -0.1, fitSurv_sum$table[x,idx], 0.5, col = line_colors[x], lty = 2, lwd = 2)
text(fitSurv_sum$table[x,idx] + x_del_position[x], y_del_position[x], round(fitSurv_sum$table[x,idx], 2), col = line_colors[x])
})
segments(0, 0.5, max(fitSurv_sum$table[,idx]), 0.5, col = 'black', lty = 2, lwd = 2)
}else{
if(drawSegment){
## add segments and text of median survival time
x_del_position <- (-1)^(1:7) * 0.1
# y_del_position <- floor(((1:7)-1)/2) * 0.1
y_del_position <- (1:7) * 0.1 - 0.1
idx = which(colnames(fitSurv_sum$table) == 'median')
lapply(1:n_levels, function(x){
segments(fitSurv_sum$table[x,idx], -0.1, fitSurv_sum$table[x,idx], 0.5, col = line_colors[x], lty = 2, lwd = 2)
})
segments(0, 0.5, max(fitSurv_sum$table[,idx]), 0.5, col = 'black', lty = 2, lwd = 2)
}
}
}
################################################################################################################
## Demographic table generator
## input:
## 1. dat, a dataframe with all demographic covariates to summarize, column names will be
## displayed in the final table, pick your favourite as input
## 2. y_covariate, the target covariate to split as columns, must be two levels
## 3. x_covariate, a vector of demographic covariates of interest
## 4. category_test, whether to carry out proportion test on each category
## output:
## a demographic table
##example function usage:
## get_demogr_table(regova,"firstline_aftercollection",c("Age","Grade","Stage","Race"),TRUE)
################################################################################################################
get_demogr_tina <- function(dat, y_covariate, x_covariate, category_test=TRUE){
# dat=regova
# y_covariate="firstline_aftercollection"
# x_covariate=c("agecat","grade","stage","racecat")
# category_test=TRUE
#library(xtable)
library(plyr)
#y_idx <- which(colnames(df_demographics) == y_covariate)
y_cov <- dat[, y_covariate]
x_cov <- dat[,x_covariate]
n_y_level <- nlevels(y_cov)
nColumns <- n_y_level + 2
##
n_table <- table(y_cov)
## process numeric variables
x_num_idx <- which(sapply(x_cov, is.numeric))
if (length(x_num_idx)!=0) {
x_num <- data.frame(x_cov[, x_num_idx])
x_num_names <- colnames(x_cov)[x_num_idx]
colnames(x_num) <- x_num_names
n_x_num <- ncol(x_num)
if(n_y_level < 2){
stop('y_covariate must have at least two levels to compute the p values.\n')
}else if(n_y_level == 2){
num_p <- round(sapply(1:n_x_num, function(x_idx){
#wilcox.test(x_num[, x_idx] ~ y_cov)$p.value
t.test(x_num[, x_idx] ~ y_cov)$p.value
}), 4)
}else{
num_p <- round(sapply(1:n_x_num, function(x_idx){
aov_sum <- summary(aov(x_num[, x_idx] ~ y_cov))
aov_sum[[1]]$`Pr(>F)`[1]
}))
}
ls_num_tables <- lapply(1:n_x_num, function(x_idx){
#tab <- tapply(x_num[,x_idx], y_cov, median)
#iqr_low <- tapply(x_num[,x_idx], y_cov, function(x)quantile(x, 0.25))
#iqr_high <- tapply(x_num[,x_idx], y_cov, function(x)quantile(x, 0.75))
#tab <- paste0(tab, '(', iqr_low, '-', iqr_high, ')')
num_mean <- tapply(x_num[,x_idx], y_cov, mean, na.rm = T)
num_sd <- tapply(x_num[,x_idx], y_cov, sd, na.rm = T)
num_mean <- paste(num_mean, '(',num_sd ,')')
})
names(ls_num_tables) <- x_num_names
x_num_table <- do.call(rbind, ls_num_tables)
x_num_table <- cbind(x_num_table, num_p)
}
if (ncol(x_cov)>length(x_num_idx)) {
## process categorical variables
if (length(x_num_idx)==0) {
x_cat <- data.frame(x_cov)
x_cat_names <- colnames(x_cov)
colnames(x_cat) <- x_cat_names
} else {
x_cat <- data.frame(x_cov[, -x_num_idx])
x_cat_names <- colnames(x_cov[,-x_num_idx])
colnames(x_cat) <- x_cat_names
}
n_x_cat <- ncol(x_cat)
ls_cat_tables <- lapply(1:n_x_cat, function(x_idx){
tab1 <- table(x_cat[,x_idx], y_cov)
tab2 <- rbind(NA, tab1)
rownames(tab2)[1] <- x_cat_names[x_idx]
tab2
})
fisher_p <- sapply(ls_cat_tables, function(x) {
p_value <- tryCatch(fisher.test(x[-1,], workspace = 2e8)$p.value, error = function(e)return(NA))
p_value <- round(p_value, 4)
})
ls_cat_tables <- lapply(1:n_x_cat, function(x){
nx <- nrow(ls_cat_tables[[x]])
if (category_test) {
prop_p <- sapply(2:nx, function(x_row) {
p_value <- tryCatch(prop.test(ls_cat_tables[[x]][x_row,],n_table)$p.value, error = function(e)return(NA))
p_value <- round(p_value, 4)
})
out <- cbind(ls_cat_tables[[x]], c(fisher_p[x], prop_p))
out <- rbind(NA, out)
out
} else {
out <- cbind(ls_cat_tables[[x]], c(fisher_p[x], rep(NA, nx-1)))
out <- rbind(NA, out)
out
}
})
x_cat_table_tmp <- do.call(rbind, ls_cat_tables)
x_cat_table <- sapply(1:(ncol(x_cat_table_tmp)-1), function(x){
paste0(x_cat_table_tmp[, x], '(', round(x_cat_table_tmp[, x]/n_table[x]*100, 1), '%)')
})
x_cat_table <- cbind(x_cat_table, x_cat_table_tmp[,ncol(x_cat_table_tmp)])
x_cat_table[grep('NA', x_cat_table)] <- NA
}
n_table <- c('n', paste0(n_table), NA)
x_num_table <- cbind(rownames(x_num_table), x_num_table)
x_cat_table <- cbind(rownames(x_cat_table_tmp), x_cat_table)
x_table <- rbind(n_table, x_num_table, x_cat_table)
x_table <- rbind(c(NA, levels(y_cov), 'P-value'), x_table)
rownames(x_table) <- NULL
colnames(x_table) <- NULL
x_table[grep('^$', x_table)] <- NA
return(x_table)
#align <- paste0(c('ll', rep('c', ncol(x_table)-1)), collapse = '')
#print(xtable(x_table, align=align, caption = caption), include.rownames=FALSE, include.colnames=FALSE,hline.after=c(0, 2, nrow(x_table)), file = outfile, caption.placement = 'top', size = 'scriptsize')
}
################################################################################################################
## Format individual subject for drug time line plotting
################################################################################################################
format_drug_line <- function(df_therapy_by_subject, unit = 'month'){
# df_therapy_by_subject <- ls_drug_by_subject[[13]]
df_therapy_by_subject$drugstart <- as.Date(df_therapy_by_subject$drugstart, format = '%Y-%m-%d')
df_therapy_by_subject$drugend <- as.Date(df_therapy_by_subject$drugend, format = '%Y-%m-%d')
df_therapy_by_subject$collectiondate <- as.Date(df_therapy_by_subject$collectiondate, format = '%Y-%m-%d')
df_therapy_by_subject$determinationdate <- as.Date(df_therapy_by_subject$determinationdate, format = '%Y-%m-%d')
if(unit == 'month'){
unitToDivide = 30
}else if(unit == 'day'){
unitToDivide = 1
}
df_therapy_by_subject <- data.frame(
masterdeid = df_therapy_by_subject$masterdeid,
registry_name = df_therapy_by_subject$registry_name,
collectionPoint = 0,
drugStartPoint = as.numeric(df_therapy_by_subject$drugstart - df_therapy_by_subject$collectiondate)/unitToDivide,
drugEndPoint = as.numeric(df_therapy_by_subject$drugend - df_therapy_by_subject$collectiondate)/unitToDivide,
drugDuration = as.numeric(df_therapy_by_subject$drugend - df_therapy_by_subject$drugstart)/unitToDivide,
determinationPoint = as.numeric(df_therapy_by_subject$determinationdate - df_therapy_by_subject$collectiondate)/unitToDivide
)
df_therapy_by_subject <- arrange(df_therapy_by_subject, drugStartPoint, drugEndPoint)
df_therapy_by_subject$unknownDuration <- ifelse(is.na(df_therapy_by_subject$drugDuration) | df_therapy_by_subject$drugDuration == 0,
T, F)
df_therapy_by_subject$plotEndPoint <- ifelse(df_therapy_by_subject$unknownDuration == T,
df_therapy_by_subject$drugStartPoint + 15/unitToDivide,
df_therapy_by_subject$drugEndPoint)
levels_order = df_therapy_by_subject$registry_name[order(df_therapy_by_subject$drugStartPoint,
df_therapy_by_subject$drugEndPoint,
decreasing = T)]
levels_order <- levels_order[which(!duplicated(levels_order, fromLast = T))]
df_therapy_by_subject$registry_name <- factor(
df_therapy_by_subject$registry_name,
levels = levels_order)
df_format <- df_therapy_by_subject
}
################################################################################################################
## plot the drug time line using the output from format_drug_line() as input
################################################################################################################
plot_drug_line <- function(df_format){
library(ggplot2)
plot_drugs <- ggplot(df_format, aes(colour = unknownDuration)) +
geom_segment(aes(x = drugStartPoint, xend = plotEndPoint, y = registry_name, yend = registry_name),
size = 5) +
labs(x = 'Time (months)', y = NULL,
title = paste0('masterdeid: ', df_format$masterdeid[1])) +
geom_vline(xintercept = 0, color = 'purple') +
geom_vline(xintercept = df_format$determinationPoint[1], color = 'black') +
theme(title = element_text(size = 20), axis.title = element_text(size = 20), axis.text = element_text(size = 15),
axis.text.x = element_text(angle = 90, hjust = 1),
panel.grid = element_line(size = 1),
legend.position='none')
z <- list(plot_drugs = plot_drugs)
}
################################################################################################################
## ovarian cancer registry ion data process appended functions
################################################################################################################
################################################################################################################
## To generate all combinations of weight matrix for given options and druggroups
## input:
## 1. weight_options <- c(1, 0.6, 0.3)
## 2. druggroup <- c('dg1', 'dg2', 'dg3', 'dg4', 'dg5')
## output:
## a matrix. columns are druggroup names, rows are weight combinations.
################################################################################################################
generate_weight_matrix <- function(weight_options, druggroup){
library(gtools)
num_values <- length(weight_options)
num_drugs <- length(druggroup)
wt_matrix <- permutations(n = num_values, r = num_drugs, weight_options, repeats.allowed = T)
colnames(wt_matrix) <- druggroup
cat(num_drugs, 'drug groups\n')
cat(num_values, 'values for each drug group\n')
cat(nrow(wt_matrix), 'combinations\n')
wt_matrix
}
################################################################################################################
## weight conversion to sum to 1
################################################################################################################
convert_weight <- function(wt){
wt_star <- wt/sum(wt, na.rm = T)
}
################################################################################################################
## convert masterpatientid to masterdeid
################################################################################################################
convert_masterdeid <- function(masterpatientid){
query <- 'select * from ion_reg_ova_case_lst;'
masterdeid_mapping <- get_sting_query('bioinfo_projects', query)
masterdeid <- masterdeid_mapping$masterdeid[match(masterpatientid, masterdeid_mapping$masterpatientid)]
}
################################################################################################################
## convert registry_names to druggroups
## mapping_table <- data.frame(registry_name, druggroup_name)
################################################################################################################
convert_registry_to_druggroup <- function(registry_names, mapping_table){
#registry_names <- strsplit(df_therapy_ken$therapies[1], ',')[[1]]
#mapping_table <- reg_druggroup_mapping
druggroup <- as.character(mapping_table$druggroup_name[match(registry_names, mapping_table$registry_name)])
#if(any(is.na(druggroup))){
# druggroup <- druggroup[-which(is.na(druggroup))]
#}
}
################################################################################################################
## Followings are archived functions. DO NOT use
################################################################################################################
################################################################################################################
## Convert drug rule output file format such that columns are drugs in cmi_name, rows are AccessionNumber
## @@@ Important to note that there are cases where a single drug recommendation might be derived from two or more
## @@@ independent drug rules, which might lead to conflict recommendation. Need to resolve this later.
################################################################################################################
format_recommend_akiv <- function(drug_rule_out){
library(plyr)
query <- 'select * from cmi_to_registry_agent_lookup'
reg_cmi_mapping <- get_ryan_query('registry_freeze_18_aug_2015', query)
drug_cmi_name <- data.frame(cmi_name = sort(as.character(reg_cmi_mapping$cmi_name)))
#drug_registry_name <- data.frame(registry_name = sort(as.character(reg_cmi_mapping$registry_name)))
# colnames(drug_rule_out) <- c('AccessionNumber', 'drug_group', 'xx', 'xxx', 'prediction', 'marker', 'cmi_name', 'xxxx', 'xxxxx', 'xxxxxx')
drug_rule_out$prediction_num <- drug_rule_out$prediction
drug_rule_out$prediction_num[drug_rule_out$prediction == 'Benefit'] <- 1
drug_rule_out$prediction_num[drug_rule_out$prediction == 'Lack Of Benefit'] <- -1
drug_rule_out$prediction_num[drug_rule_out$prediction == 'Indeterminate'
|drug_rule_out$prediction == 'DoNotReport'] <- 0
ls_byPatient_recommend <- dlply(drug_rule_out, .variables = 'AccessionNumber', function(AccessionNumber){
out <- data.frame(cmi_name = AccessionNumber$cmi_name,
prediction = AccessionNumber$prediction_num)
out <- merge(out, drug_cmi_name, by = 'cmi_name', all.y = T)
out$cmi_name <- as.character(out$cmi_name)
out <- out[order(out$cmi_name), ]
## @@ some drugs are in different drug-rule groups, remove duplicated for now, assuming there is no conflict
if(any(duplicated(out$cmi_name))){
idx_dups <- which(duplicated(out$cmi_name))
out <- out[-idx_dups, ]
}
out
})
ls_recommend <- llply(ls_byPatient_recommend, function(x){
as.integer(as.character(x$prediction))
})
df_recommend <- do.call(rbind, ls_recommend)
df_recommend[is.na(df_recommend)] <- 0
colnames(df_recommend) <- drug_cmi_name$cmi_name
rownames(df_recommend) <- names(ls_byPatient_recommend)
df_recommend
}
################################################################################################################
## Demographic table generator using xtable
## input:
## 1. df_demographics, a dataframe with all demographic covariates to summarize, column names will be
## displayed in the final table, pick your favourite as input
## 2. y_covariate, the target covariate to split as columns, must be at least two levels
## 3. x_covariate, a vector of demographic covariates of interest
## 4. category_test, whether to carry out proportion test on each category
## 5. caption, table name
## 6. outfile, output directory and filename
################################################################################################################
get_demogr_ryan <- function(
df_demographics, y_covariate, x_covariate, category_test=TRUE,
caption = caption,
outfile = ''
){
# caption <- 'my table'
# df_demographics <- df_plot4[, -c(1:2)]
# df_demographics <- data.frame(age1 = sample(df_plot4$age),
# df_demographics)
# df_demographics <- df_demographics[, -(4:5)]
# y_covariate <- 'followed'
library(xtable)
library(plyr)
y_idx <- which(colnames(df_demographics) == y_covariate)
y_cov <- df_demographics[, y_idx]
x_cov <- df_demographics[, x_covariate]
n_y_level <- nlevels(y_cov)
nColumns <- n_y_level + 2
##
n_table <- table(y_cov)
## process numeric variables
x_num_idx <- which(sapply(x_cov, is.numeric))
x_num <- data.frame(x_cov[, x_num_idx])
x_num_names <- colnames(x_cov)[x_num_idx]
colnames(x_num) <- x_num_names
n_x_num <- ncol(x_num)
if(n_y_level < 2){
stop('y_covariate must have at least two levels to compute the p values.\n')
}else if(n_y_level == 2){
num_p <- round(sapply(1:n_x_num, function(x_idx){
wilcox.test(x_num[, x_idx] ~ y_cov)$p.value
}), 4)
}else{
num_p <- round(sapply(1:n_x_num, function(x_idx){
aov_sum <- summary(aov(x_num[, x_idx] ~ y_cov))
aov_sum[[1]]$`Pr(>F)`[1]
}))
}
ls_num_tables <- lapply(1:n_x_num, function(x_idx){
tab <- tapply(x_num[,x_idx], y_cov, median, na.rm = T)
iqr_low <- tapply(x_num[,x_idx], y_cov, function(x)quantile(x, 0.25, na.rm = T))
iqr_high <- tapply(x_num[,x_idx], y_cov, function(x)quantile(x, 0.75, na.rm = T))
tab <- paste0(tab, '(', iqr_low, '-', iqr_high, ')')
})
names(ls_num_tables) <- x_num_names
x_num_table <- do.call(rbind, ls_num_tables)
x_num_table <- cbind(x_num_table, num_p)
## process categorical variables
x_cat <- data.frame(x_cov[, -x_num_idx])
x_cat_names <- colnames(x_cov[-x_num_idx])
colnames(x_cat) <- x_cat_names
n_x_cat <- ncol(x_cat)
ls_cat_tables <- lapply(1:n_x_cat, function(x_idx){
tab1 <- table(x_cat[,x_idx], y_cov)
tab2 <- rbind(NA, tab1)
rownames(tab2)[1] <- x_cat_names[x_idx]
tab2
})
fisher_p <- sapply(ls_cat_tables, function(x) {
p_value <- tryCatch(fisher.test(x[-1,], workspace = 2e8)$p.value, error = function(e)return(NA))
p_value <- round(p_value, 4)
})
ls_cat_tables <- lapply(1:n_x_cat, function(x){
nx <- nrow(ls_cat_tables[[x]])
if (category_test) {
prop_p <- sapply(2:nx, function(x_row) {
p_value <- tryCatch(prop.test(ls_cat_tables[[x]][x_row,],n_table)$p.value, error = function(e)return(NA))
p_value <- round(p_value, 4)
})
out <- cbind(ls_cat_tables[[x]], c(fisher_p[x], prop_p))
out <- rbind(NA, out)
out
} else {
out <- cbind(ls_cat_tables[[x]], c(fisher_p[x], rep(NA, nx-1)))
out <- rbind(NA, out)
out
}
})
x_cat_table_tmp <- do.call(rbind, ls_cat_tables)
x_cat_table <- sapply(1:(ncol(x_cat_table_tmp)-1), function(x){
paste0(x_cat_table_tmp[, x], '(', round(x_cat_table_tmp[, x]/n_table[x]*100, 1), '%)')
})
x_cat_table <- cbind(x_cat_table, x_cat_table_tmp[,ncol(x_cat_table_tmp)])
x_cat_table[grep('NA', x_cat_table)] <- NA
n_table <- c('n', paste0('(', n_table, ')'), NA)
x_num_table <- cbind(rownames(x_num_table), x_num_table)
x_cat_table <- cbind(rownames(x_cat_table_tmp), x_cat_table)
x_table <- rbind(n_table, x_num_table, x_cat_table)
x_table <- rbind(c(NA, levels(y_cov), 'P-value'), x_table)
rownames(x_table) <- NULL
colnames(x_table) <- NULL
x_table[grep('^$', x_table)] <- NA
align <- paste0(c('ll', rep('c', ncol(x_table)-1)), collapse = '')
print(xtable(x_table, align=align, caption = caption),
include.rownames=FALSE, include.colnames=FALSE,
hline.after=c(0, 2, nrow(x_table)),
file = outfile,
#tabular.environment="longtable",
caption.placement = 'top', size = 'scriptsize')
}
| /clinical_data/clinical_data_pipeline.R | no_license | wjdddj/my_R_respository | R | false | false | 52,537 | r | ################################################################################################################
## detach specific loaded package
################################################################################################################
detach_package <- function(pkg, character.only = FALSE)
{
if(!character.only)
{
pkg <- deparse(substitute(pkg))
}
search_item <- paste("package", pkg, sep = ":")
while(search_item %in% search())
{
detach(search_item, unload = TRUE, character.only = TRUE)
}
}
################################################################################################################
## detach all loaded packages
################################################################################################################
detachAllPackages <- function() {
basic_packages <- c("package:stats",
"package:graphics",
"package:grDevices",
"package:utils",
"package:datasets",
"package:methods",
"package:base")
package_list <- search()[ifelse(unlist(gregexpr("package:",search()))==1,TRUE,FALSE)]
package_list <- setdiff(package_list, basic_packages)
if (length(package_list)>0)
lapply(package_list, detach, character.only=TRUE)
}
################################################################################################################
## Obtain table from mySQL databases using Sting's account
################################################################################################################
get_sting_table <- function(database, table_name){
library(RMySQL)
drv <- dbDriver("MySQL")
dbcon <- dbConnect(MySQL(), user="sting", password="Pass1234", host = 'bioserv.clsnet.intranet', dbname = database, port = 3307)
rs1 = dbSendQuery(dbcon, paste0('select * from ', table_name))
df_out <- dbFetch(rs1, n = -1)
# dbListConnections(dbDriver( drv = "MySQL"))
suppressWarnings(dbDisconnect(dbcon))
# lapply(dbListConnections(dbDriver( drv = "MySQL")), dbDisconnect)
df_out
}
################################################################################################################
## Obtain query from mySQL databases using Sting's account
################################################################################################################
get_sting_query <- function(database, query){
library(RMySQL)
drv <- dbDriver("MySQL")
dbcon <- dbConnect(MySQL(), user="sting", password="Pass1234", host = 'bioserv.clsnet.intranet', dbname = database, port = 3307)
rs1 = dbSendQuery(dbcon, query)
df_out <- dbFetch(rs1, n = -1)
# dbListConnections(dbDriver( drv = "MySQL"))
suppressWarnings(dbDisconnect(dbcon))
# lapply(dbListConnections(dbDriver( drv = "MySQL")), dbDisconnect)
df_out
}
################################################################################################################
## Obtain table from mySQL databases using Ryan's account
################################################################################################################
get_ryan_table <- function(database, table_name){
library(RMySQL)
drv <- dbDriver("MySQL")
dbcon <- dbConnect(MySQL(), user="jwang", password="Pass1234", host = 'bioserv.clsnet.intranet', dbname = database, port = 3307)
rs1 = dbSendQuery(dbcon, paste0('select * from ', table_name))
df_out <- dbFetch(rs1, n = -1)
# dbListConnections(dbDriver( drv = "MySQL"))
suppressWarnings(dbDisconnect(dbcon))
# lapply(dbListConnections(dbDriver( drv = "MySQL")), dbDisconnect)
df_out
}
################################################################################################################
## Obtain query from mySQL databases using Ryan's account
################################################################################################################
get_ryan_query <- function(database, query){
library(RMySQL)
drv <- dbDriver("MySQL")
dbcon <- dbConnect(MySQL(), user="jwang", password="Pass1234", host = 'bioserv.clsnet.intranet', dbname = database, port = 3307)
rs1 = dbSendQuery(dbcon, query)
df_out <- dbFetch(rs1, n = -1)
# dbListConnections(dbDriver( drv = "MySQL"))
suppressWarnings(dbDisconnect(dbcon))
# lapply(dbListConnections(dbDriver( drv = "MySQL")), dbDisconnect)
df_out
}
################################################################################################################
## Compute tnt from therapy data of a SINGLE subject, using input of line numbers of start_line and end_line.
## input:
## 1. df_therapy_by_subject, a data.frame of therapy of a single subject. It MUST contain these columns:
## 'masterdeid', 'line_num', 'regimen_start', 'deathdate', 'lastcontactdate'.
## 2. start_line, line number of the start line.
## 3. end_line, line number of the end line, it could also be 'max', equivalent to overall survival from start
## of the start line.
##
## output:
## 1. tnt_event, a data.frame contains tnt and event columns with only one row of data.
## 2. df_therapy_by_subject, the updated input data.frame with tnt and event columns added. The tnt and event
## will have the same value among rows.
##
## rule:
## 1. event is considered as either start of the end_line, or death when end_line is not available;
## censored when no deathdate available.
## 2. if event, tnt is the time difference between start of start_line and start of end_line, or time difference
## between start of start_line and death; if censored, tnt is the time difference between start of start_line and
## last contact date.
################################################################################################################
get_tnt <- function(df_therapy_by_subject, start_line = 1, end_line = 'max'){
library(plyr)
#start_line = 2
#end_line = 3
#df_therapy_by_subject <- df_therapy_ken[df_therapy_ken$masterdeid == '9632', ]
#df_therapy_by_subject <- dlply(df_therapy, .variables = 'masterdeid')[[1]]
regimen_start <- as.Date(df_therapy_by_subject$regimen_start, format = '%Y-%m-%d')
deathdate <- as.Date(df_therapy_by_subject$deathdate, format = '%Y-%m-%d')
lastcontactdate <- as.Date(df_therapy_by_subject$lastcontactdate, format = '%Y-%m-%d')
line_num <- df_therapy_by_subject$line_num
## obtain the start time point
start_idx <- which(line_num == start_line)
if(length(start_idx) != 1 | is.na(regimen_start[start_idx])){
#cat('start point: regimen_start of line', start_line, 'is not available for case', df_therapy_by_subject$masterdeid[1],
# '. NA will be returned\n')
startpoint <- NA
}else{
startpoint <- regimen_start[start_idx]
}
## obtain the end time point
if(end_line == 'max'){
end_idx <- which.max(line_num)
if(!is.na(deathdate[end_idx])){
## there is an event characterized by death
endpoint <- deathdate[end_idx]
event <- 1
}else{
## censored at last contact date
endpoint <- lastcontactdate[end_idx]
event <- 0
}
}else{
end_idx <- which(line_num == end_line)
if(length(end_idx) != 1){
#cat('end point: regimen_start of line', end_line, 'is not available for case', df_therapy_by_subject$masterdeid[1],
# '. lastcontact date or death date will be returned\n')
end_idx <- which.max(line_num)
if(!is.na(deathdate[end_idx])){
## there is an event characterized by death
endpoint <- deathdate[end_idx]
event <- 1
}else{
## censored at last contact date
endpoint <- lastcontactdate[end_idx]
event <- 0
}
}else if(is.na(regimen_start[end_idx])){
#cat('end point: regimen_start of line', end_line, 'is not available for case', df_therapy_by_subject$masterdeid[1],
# '. lastcontact date or death date will be returned\n')
end_idx <- which.max(line_num)
if(!is.na(deathdate[end_idx])){
## there is an event characterized by death
endpoint <- deathdate[end_idx]
event <- 1
}else{
## censored at last contact date
endpoint <- lastcontactdate[end_idx]
event <- 0
}
}else{
endpoint <- regimen_start[end_idx]
event <- 1
}
}
## any error will be returned as NA, and can be filtered out after
tnt <- tryCatch(endpoint - startpoint + 1, error = function(e)return(NA))
tnt_event <- data.frame(tnt = as.numeric(tnt), event = event)
df_therapy_by_subject <- data.frame(df_therapy_by_subject,
tnt_event)
z <- list(tnt_event = tnt_event, df_therapy_by_subject = df_therapy_by_subject)
}
################################################################################################################
## Obtain therapy combinations from start_line to end_line.
## input:
## 1. df_therapy_by_subject, a data.frame of therapy of a single subject. It MUST contain these columns:
## 'masterdeid', 'line_num'; optional for tnt calculation: 'regimen_start', 'deathdate', 'lastcontactdate'.
## 2. start_line, line number of the start line.
## 3. end_line, line number of the end line, it could also be 'max', the last line will be included.
##
## output: a character string of all drugs used between start_line and end_line, collapsed with ','.
################################################################################################################
get_therapy <- function(df_therapy_by_subject, start_line, end_line){
therapy <- df_therapy_by_subject$therapies
line_num <- df_therapy_by_subject$line_num
if(end_line == 'max'){
end_line <- which.max(line_num)
lines_idx <- which(line_num >= start_line & line_num <= end_line)
}else{
lines_idx <- which(line_num >= start_line & line_num < end_line)
}
if(length(lines_idx) == 0){
cat('no treatment is found between', start_line, 'and', end_line, 'inclusively. NA will be returned as therapy\n')
out_therapy <- NA
}else{
out_therapy <- paste(therapy[lines_idx], collapse = ',')
out_therapy <- paste(sort(unique(strsplit(out_therapy, ',')[[1]])), collapse = ',')
}
out_therapy
}
################################################################################################################
## A wrapper function to obtain therapies and tnts for multiple subjects using get_tnt() and get_therapy().
## input:
## 1. df_therapy, a data.frame of therapy of a single or multiple subjects. It MUST contain these columns:
## 'masterdeid', 'line_num', 'regimen_start', 'deathdate', 'lastcontactdate'.
## 2. start_line, line number of the start line.
## 3. end_line, line number of the end line, it could also be 'max', the last line will be included.
##
## output:
## 1. df_therapy_out, a data.frame with three columns, masterdeid, therapies, tnt, event.
################################################################################################################
get_therapy_tnt_df <- function(df_therapy, start_line = 1, end_line = 'max'){
# df_therapy <- df_therapy_2nd
# start_line = 2
# end_line = 3
library(plyr)
ls_therapy <- dlply(df_therapy, .variables = 'masterdeid', function(x){
# cat(x$masterdeid, '\n')
# x <- dlply(df_therapy, .variables = 'masterdeid')[[1]]
tnt_event <- get_tnt(x, start_line = start_line, end_line = end_line)$tnt_event
therapies <- get_therapy(x, start_line = start_line, end_line = end_line)
out <- data.frame(masterdeid = x$masterdeid[1], therapies = therapies, tnt_event)
})
df_therapy_out <- do.call(rbind, ls_therapy)
df_therapy_out <- data.frame(df_therapy_out)
rownames(df_therapy_out) <- NULL
df_therapy_out
}
################################################################################################################
## Compute tnt of each line from therapy data of each subject.
## input:
## 1. df_therapy_by_subject, a data.frame of therapy by subject. It MUST contain these columns with name
## 'line_num', 'regimen_start', 'deathdate', 'lastcontactdate'.
##
## output:
## 1. df_therapy_by_subject, the updated input data.frame with tnt and event columns added.
##
## rule:
## 1. event is considered as either start of the nextline, or death when nextline is not available;
## censored when no deathdate available.
## 2. if event, tnt is the time difference between start of current and start of next line or time difference
## between start of current line and death; if censored, tnt is the time difference between start of current and
## last contact date.
## 3. if nextline is not available while not max line, NA will be returned.
################################################################################################################
get_tnt_by_line <- function(df_therapy_by_subject){
library(plyr)
regimen_start <- as.Date(df_therapy_by_subject$regimen_start, format = '%Y-%m-%d')
deathdate <- as.Date(df_therapy_by_subject$deathdate, format = '%Y-%m-%d')
lastcontactdate <- as.Date(df_therapy_by_subject$lastcontactdate, format = '%Y-%m-%d')
line_num <- df_therapy_by_subject$line_num
maxline <- max(line_num)
ls_tnt_event <- llply(df_therapy_by_subject$line_num, function(line){
line_idx <- which(line_num == line)
if(line < maxline){
## there is an event characterized by next line of therapy
nextline_idx <- which(line_num == (line + 1))
if(length(nextline_idx) != 1){
#cat('nextline: regimen_start of line', (line + 1), 'is not available for case', df_therapy_by_subject$masterdeid[1],
# 'NA will be returned\n')
tnt <- NA
event <- NA
}else{
tnt <- regimen_start[nextline_idx] - regimen_start[line_idx] + 1
event <- 1
}
}else if(!is.na(deathdate[line_idx])){
## there is an event characterized by death
tnt <- deathdate[line_idx] - regimen_start[line_idx] + 1
event <- 1
}else{
## censored at last contact date
tnt <- lastcontactdate[line_idx] - regimen_start[line_idx] + 1
event <- 0
}
z <- data.frame(tnt = as.numeric(tnt), event = event)
})
tnt_event <- do.call(rbind, ls_tnt_event)
df_therapy_by_subject <- data.frame(df_therapy_by_subject, tnt_event)
}
################################################################################################################
## Compute tnt and event by single line in batch
## A wrapper function to compute multiple subjects for get_tnt_by_line()
################################################################################################################
get_tnt_df_by_line <- function(df_therapy){
library(plyr)
ls_therapy <- dlply(df_therapy, .variables = 'masterdeid', function(x){
# cat(x$masterdeid[1], '\n')
x_with_tnt <- get_tnt_by_line(x)
})
df_therapy <- do.call(rbind, ls_therapy)
rownames(df_therapy) <- NULL
df_therapy
}
################################################################################################################
## Convert drug rule output file format such that columns are drugs in registry_name, rows are masterdeid
## input:
## 1. recommend_table. The long format of drug recommendations
## column1, masterdeid; column2, registry_name; column3, recommendation.
##
## output:
## 1. df_recommend. A matrix, columns are individual drugs in registry_name (n = 100), rows are subjects'
## masterdeid.
################################################################################################################
format_recommend <- function(recommend_table){
# recommend_table <- legacy_table
library(plyr)
query <- 'select * from cmi_to_registry_agent_lookup'
reg_cmi_mapping <- get_ryan_query('registry_freeze_18_aug_2015', query)
# drug_cmi_name <- data.frame(cmi_name = sort(as.character(reg_cmi_mapping$cmi_name)))
drug_registry_name <- data.frame(registry_name = sort(unique(as.character(reg_cmi_mapping$registry_name))))
recommend_table$prediction_num[recommend_table$recommendation == 'Benefit'] <- 1
recommend_table$prediction_num[recommend_table$recommendation == 'Lack Of Benefit'] <- -1
recommend_table$prediction_num[recommend_table$recommendation == 'Indeterminate'
|recommend_table$recommendation == 'DoNotReport'] <- 0
ls_byPatient_recommend <- dlply(recommend_table, .variables = 'masterdeid', function(masterdeid){
out <- data.frame(registry_name = masterdeid$registry_name,
prediction = masterdeid$prediction_num)
out <- merge(out, drug_registry_name, by = 'registry_name', all.y = T)
out$registry_name <- as.character(out$registry_name)
out <- out[order(out$registry_name), ]
dim(out)
## @@ some drugs are in different drug-rule groups, remove if duplicated for now, assuming there is no conflict
if(any(duplicated(out$registry_name))){
idx_dups <- which(duplicated(out$registry_name))
out <- out[-idx_dups, ]
}
out
})
ls_recommend <- llply(ls_byPatient_recommend, function(x){
as.integer(as.character(x$prediction))
})
df_recommend <- do.call(rbind, ls_recommend)
df_recommend[is.na(df_recommend)] <- 0
colnames(df_recommend) <- drug_registry_name$registry_name
rownames(df_recommend) <- names(ls_byPatient_recommend)
df_recommend
}
################################################################################################################
## convert AccessionNumber to masterdeid
################################################################################################################
access_to_masterdeid <- function(AccessionNumber){
query <- 'select ms.accessionnumber, cp.masterpatientid as masterdeid
from ms1.casepatient cp
join ms1.ms1case ms on ms.casepatientid=cp.casepatientid;'
lookup_table <- get_ryan_query('ms1', query)
masterdeid <- lookup_table$masterdeid[match(AccessionNumber, lookup_table$accessionnumber)]
}
################################################################################################################
## convert cmi_names to registry_names
## cmi to registry: n to 1 mapping. This conversion will NOT yield conflict.
################################################################################################################
drugname_cmi_to_registry <- function(cmi_names){
query <- 'select * from cmi_to_registry_agent_lookup'
reg_cmi_mapping <- get_ryan_query('registry_freeze_18_aug_2015', query)
registry_name <- reg_cmi_mapping$registry_name[match(cmi_names, reg_cmi_mapping$cmi_name)]
}
################################################################################################################
## convert registry_names to cmi_names
## cmi to registry: n to 1 mapping. This conversion will potentially yield conflict.
################################################################################################################
drugname_registry_to_cmi <- function(registry_names){
query <- 'select * from cmi_to_registry_agent_lookup'
reg_cmi_mapping <- get_ryan_query('registry_freeze_18_aug_2015', query)
cmi_names <- reg_cmi_mapping$cmi_name[match(registry_names, reg_cmi_mapping$registry_name)]
}
################################################################################################################
## Obtain matched and unmatched information for one patient. Need to make sure the naming for drugs are
## consistent among cmi and registry and ion.
## input:
## 1. therapy_line, a vector of drugs (registry_name) used in the line of therapy
## 2. recommend_line, a vector of recommendation output from drug rules, names are drugs (registry_name).
## -1, lack of benefit; 0, neither; 1, benefit.
##
## rule: if any drug the subject used is recommended as Lack Of Benefit (-1), return unmatched
## otherwise if any drug the subject used is recommended as Benefit (1), return matched
## otherwise if all drugs the subject used are recommended as either Indeterminant or DoNotReport (0),
## return neither.
################################################################################################################
is_matched <- function(therapy_line, recommend_line){
# therapy_line <- strsplit(df_therapy[120, ]$therapy, split = ',')[[1]]
# recommend_line <- df_recommend1[120, ]
idx <- match(therapy_line, names(recommend_line))
match_drugs <- recommend_line[idx]
if(any(match_drugs == -1, na.rm = T)){
is_matched <- 'unmatched'
}else if(sum(match_drugs, na.rm = T) == 0){
is_matched <- 'neither'
}else{
is_matched <- 'matched'
}
is_matched
}
################################################################################################################
## compute matched unmatched info and add a column to the therapy table.
## input:
## 1. df_therapy: rows are individual cases. It must have a column of 'therapies' that contains drugs used in
## this line, collapsed by ','.
## 2. df_recommend: It MUST be a matrix!(colnames of a dataframe will be corrupted). Rows are individual cases
## ordered in the same way as df_therapy. rownames is masterdeid, colnames are registry_name of the drugs.
################################################################################################################
get_matched_df <- function(df_therapy, df_recommend){
library(plyr)
n <- nrow(df_therapy)
followup <- unlist(llply(1:n, function(i){
# cat(i, ' ')
therapy <- strsplit(as.character(df_therapy$therapies[i]), ',')[[1]]
recommend <- df_recommend[i,]
names(recommend) <- colnames(df_recommend)
is_matched(therapy, recommend)
}))
df_therapy <- data.frame(df_therapy,
followed = followup)
}
################################################################################################################
## To compute matched/unmatched/neither for a single case. An improved version of is_matched(). Match on
## druggroup instead of registry_name.
## input:
## 1. received <- c('druggroup1', 'druggoup2', ...)
## 2. recommend <- data.frame(druggroup, recommendation)
## rule: if any drug the subject used is recommended as Lack Of Benefit (-1), return unmatched
## otherwise if any drug the subject used is recommended as Benefit (1), return matched
## otherwise if all drugs the subject used are recommended as either Indeterminant or DoNotReport (0),
## return neither.
################################################################################################################
get_match_binary <- function(received, recommend){
#received <- c('F', 'G')
#recommend <- data.frame(druggroup = c('A', 'B', 'C', 'D', 'E'),
# recommendation = c(1,1,0,-1,0))
match_idx1 <- match(received, recommend$druggroup)
recommendation <- recommend$recommendation[match_idx1]
if(any(recommendation < 0, na.rm = T)){
is_matched <- 'unmatched'
}else if(sum(recommendation, na.rm = T) == 0){
is_matched <- 'neither'
}else{
is_matched <- 'matched'
}
is_matched
}
################################################################################################################
## To compute match_score for a single case by matching weights and recommendations to received drugs. weights
## were normalized within received drugs.
## input:
## 1. received <- c('druggroup1', 'druggoup2', ...)
## 2. recommend <- data.frame(druggroup, recommendation)
## 3. wt_star <- data.frame(druggroup, weight)
## 4. adjust_recommend = c(T, F) # whether to adjust the weight by benefit driven or lackOfBenefit driven.
## 5. if adjust_recommend == T, recom_weight = c(lackofbenefit, benefit) #lackofbenefit + benefit = 1
## output:
## match_score. A numeric variable range [-1, 1]
################################################################################################################
get_match_score <- function(received, recommend, wt_star, adjust_recommend, recom_weight){
#received <- df_therapy_2nd$druggroups[1]
match_idx1 <- match(received, recommend$druggroup)
match_idx2 <- match(received, wt_star$druggroup)
if(length(match_idx1) == 0 |length(match_idx2) == 0){
recommendation <- NA
weight <- NA
}else{
recommendation <- recommend$recommendation[match_idx1]
weight <- wt_star$weight[match_idx2]
}
if(adjust_recommend){
weight <- sapply(1:length(weight), function(i){
if(is.na(recommendation[i])){
z <- NA
}else if(recommendation[i] < 0){
z <- weight[i] * recom_weight[1]
}else{
z <- weight[i] * recom_weight[2]
}
z
})
}
weight <- convert_weight(weight)
match_score = sum(recommendation * weight, na.rm = T)
match_score
}
################################################################################################################
## A wrapper function to compute match_score in batches using get_match_score(). Only suitable for a single
## version of drugrule at a single version of weight combinations. A bigger wrapper is needed to run different
## combinations. It also scale the match_scores from [-1, 1] to [0, 1].
## IMPORTANT!! match the order between df_therapy and ls_recommend for input.
## input:
## 1. df_therapy. contains a column named 'druggroups'.
## 2. ls_recommend. contains drug rule recommendations organized by masterdeid. It MUST be in the same order
## as df_therapy. ls_recommend should contain columns, named 'drugRule', 'recom'.
## 3. method = c('binary', 'score')
## 4. if method == 'score', adjust_recommend and recom_weight are inherited from get_match_score()
## output:
## df_therapy. the same data.frame as input with a new column named 'match_scores' appended.
################################################################################################################
get_match_score_df <- function(df_therapy, ls_recommend, method, wt_star, adjust_recommend, recom_weight){
# ls_recommend <- ls_recommend[match(inter_masterdeid, names(ls_recommend))]
# df_therapy <- df_therapy_2nd[match(inter_masterdeid, df_therapy_2nd$masterdeid), ]
# wt_star <- data.frame(druggroup = colnames(weight_matrix),
# weight = convert_weight(c(0.3,0.3,0.6,0.3,0.3)), stringsAsFactors = F)
n_sample <- length(ls_recommend)
match_scores <- llply(1:n_sample, function(i){
# cat(i,'\n')
received <- strsplit(df_therapy$druggroups[i], ',')[[1]]
recommend <- data.frame(druggroup = ls_recommend[[i]]$drugRule,
recommendation = ls_recommend[[i]]$recom,
stringsAsFactors = F)
if(method == 'score'){
match_score <- get_match_score(received, recommend, wt_star, adjust_recommend = adjust_recommend, recom_weight = recom_weight)
match_score <- (match_score - -1)/2 ## recommendation score is between -1 and 1, scale to 0 and 1.
}else if(method == 'binary'){
match_score <- get_match_binary(received, recommend)
}
})
df_therapy$match_score <- unlist(match_scores)
df_therapy
}
################################################################################################################
## text processing functions to clean up clinical information
################################################################################################################
## trim off heading/trailing spaces and/or punctuations from string
trim <- function (x) gsub("^(\\s|[[:punct:]])+|([[:punct:]]|\\s)+$", "", x)
## clean up unknowns
convert_unkowns <- function(x) gsub('NA|NULL|(^[Uu]nknown.*$)|(^[Nn]ot.*$)', 'Unknown', x)
## clean up grade
convert_grade <- function(x) {
x <- gsub('\\s*/\\s*', '/', trim(x))
x <- convert_unkowns(x)
x
}
## clean up stage
## consolidate I, II, and IV; only remove trailing numbers for III
convert_stage <- function(x, collapseIII = T) {
x <- gsub('^I[A-D1-9]+$', 'I', x)
x <- gsub('^II[A-D1-9]+$', 'II', x)
if(collapseIII){
x <- gsub('^III[A-D1-9]+$', 'III', x)
}else if(any(grepl('^III$', x))){
cat('detected \'III\', set collapseIII to TRUE\n')
x <- gsub('^III[A-D1-9]+$', 'III', x)
}else{
x <- ifelse(grepl('^III[A-D1-9]+$', x), gsub('[1-9]+$', '', x), x)
}
x <- gsub('^IV[A-D1-9]+$', 'IV', x)
x <- convert_unkowns(x)
x
}
################################################################################################################
## generate the survfit and coxph objects
################################################################################################################
surv_fit <- function(df_clinical_info, plot_covariate){
library(survival)
# df_clinical_info <- df_plot1
# plot_covariate <- 'followed'
is_unknown <- any(sapply(df_clinical_info, function(x) grepl('Unknown', x)))
if(is_unknown){
stop('unknowns detected, please remove the corresponding samples and rerun!\n')
}
## with covariates intended for adjustment
fitCox <- coxph(Surv(time, event) ~ ., data = df_clinical_info)
## KM plot just for 'followed'
surv_formula <- as.formula(paste('Surv(time, event) ~ ', plot_covariate, sep = ''))
fitSurv <- survfit(surv_formula, data = df_clinical_info)
## need to pass colid_covariate to the kmplots function to locate HR and p-values from the coef table
colid_covariate <- which(colnames(df_clinical_info) == plot_covariate)
z <- list(fitCox = fitCox,
fitSurv = fitSurv,
colid_covariate = colid_covariate)
}
################################################################################################################
## plot KM curves using the surv_fit output
## input:
## 1. surv_fit, output from surv_fit(), contains coxph(), survfit(), as well as colid_covariate the column
## index of the target covariate
## 2. display_coef_idx, when levels of target covariates are more than two, pick the desired contrast to
## display by inputing the index of them from the summary(fitCox)$coef table
################################################################################################################
kmplots <- function(
surv_fit, main, display_coef_idx = NULL, simplify = F, drawSegment = T, xlim = NULL
){
library(survival)
fitCox <- surv_fit$fixCox
fitSurv <- surv_fit$fitSurv
colid_covariate <- surv_fit$colid_covariate
fitCox_sum <- summary(surv_fit$fitCox)
fitSurv_sum <- summary(surv_fit$fitSurv)
## prepare for KM plot
line_colors <- c('red', 'blue', 'green', 'pink', 'orange', 'purple', 'cyan')
surv_strata <- toupper(sapply(names(fitSurv$strata), function(x)strsplit(x, '=')[[1]][2]))
n_levels <- length(fitSurv$strata)
if(n_levels > 7) stop('plot_covaraite has more than 7 levels, plot will be ugly, please check covariates!\n')
if(n_levels > 2 & is.null(display_coef_idx)){
stop('more than two levels in the selected covariate, please manually input the row numbers of the desired coeffients to display by setting display_coef_idx!\n')
}else if(!is.null(display_coef_idx)){
rowid <- display_coef_idx
}else{
rowid <- colid_covariate - 2
}
## start plotting
par(mar = c(4,4,5,2))
plot(
fitSurv, col = line_colors[1:n_levels], lwd = 4,
mark.time = T,
xlab = 'Time', ylab = 'Proportion Event Free',
main = main,
cex.main = 1, cex = 1, cex.lab = 1,
xaxs = 'i', yaxs = 'i', # remove extra space between plot and axis
xlim = xlim
)
if(simplify == F){
legend('topright',
legend = c(paste0(surv_strata, ' (n = ', fitSurv$n, ', event = ', fitSurv_sum$table[, 4], ')'),
paste0(
'HR = ', round(fitCox_sum$coef[rowid,2], 3),
' (', round(fitCox_sum$conf.int[3], 3), '-', round(fitCox_sum$conf.int[4], 3),')'
#', p-value =', round(fitCox_sum$coef[rowid,5], 3)
),
paste0('log-rank test p-value =', round(fitCox_sum$sctest[3], 3))),
col = c(line_colors[1:n_levels], rep('white', length(rowid) + 1)), lty = 1, lwd = 2, bty='n', cex = 0.8)
## add segments and text of median survival time
x_del_position <- (-1)^(1:7) * 0.1
# y_del_position <- floor(((1:7)-1)/2) * 0.1
y_del_position <- (1:7) * 0.1 - 0.1
idx = which(colnames(fitSurv_sum$table) == 'median')
lapply(1:n_levels, function(x){
segments(fitSurv_sum$table[x,idx], -0.1, fitSurv_sum$table[x,idx], 0.5, col = line_colors[x], lty = 2, lwd = 2)
text(fitSurv_sum$table[x,idx] + x_del_position[x], y_del_position[x], round(fitSurv_sum$table[x,idx], 2), col = line_colors[x])
})
segments(0, 0.5, max(fitSurv_sum$table[,idx]), 0.5, col = 'black', lty = 2, lwd = 2)
}else{
if(drawSegment){
## add segments and text of median survival time
x_del_position <- (-1)^(1:7) * 0.1
# y_del_position <- floor(((1:7)-1)/2) * 0.1
y_del_position <- (1:7) * 0.1 - 0.1
idx = which(colnames(fitSurv_sum$table) == 'median')
lapply(1:n_levels, function(x){
segments(fitSurv_sum$table[x,idx], -0.1, fitSurv_sum$table[x,idx], 0.5, col = line_colors[x], lty = 2, lwd = 2)
})
segments(0, 0.5, max(fitSurv_sum$table[,idx]), 0.5, col = 'black', lty = 2, lwd = 2)
}
}
}
################################################################################################################
## Demographic table generator
## input:
## 1. dat, a dataframe with all demographic covariates to summarize, column names will be
## displayed in the final table, pick your favourite as input
## 2. y_covariate, the target covariate to split as columns, must be two levels
## 3. x_covariate, a vector of demographic covariates of interest
## 4. category_test, whether to carry out proportion test on each category
## output:
## a demographic table
##example function usage:
## get_demogr_table(regova,"firstline_aftercollection",c("Age","Grade","Stage","Race"),TRUE)
################################################################################################################
get_demogr_tina <- function(dat, y_covariate, x_covariate, category_test=TRUE){
# dat=regova
# y_covariate="firstline_aftercollection"
# x_covariate=c("agecat","grade","stage","racecat")
# category_test=TRUE
#library(xtable)
library(plyr)
#y_idx <- which(colnames(df_demographics) == y_covariate)
y_cov <- dat[, y_covariate]
x_cov <- dat[,x_covariate]
n_y_level <- nlevels(y_cov)
nColumns <- n_y_level + 2
##
n_table <- table(y_cov)
## process numeric variables
x_num_idx <- which(sapply(x_cov, is.numeric))
if (length(x_num_idx)!=0) {
x_num <- data.frame(x_cov[, x_num_idx])
x_num_names <- colnames(x_cov)[x_num_idx]
colnames(x_num) <- x_num_names
n_x_num <- ncol(x_num)
if(n_y_level < 2){
stop('y_covariate must have at least two levels to compute the p values.\n')
}else if(n_y_level == 2){
num_p <- round(sapply(1:n_x_num, function(x_idx){
#wilcox.test(x_num[, x_idx] ~ y_cov)$p.value
t.test(x_num[, x_idx] ~ y_cov)$p.value
}), 4)
}else{
num_p <- round(sapply(1:n_x_num, function(x_idx){
aov_sum <- summary(aov(x_num[, x_idx] ~ y_cov))
aov_sum[[1]]$`Pr(>F)`[1]
}))
}
ls_num_tables <- lapply(1:n_x_num, function(x_idx){
#tab <- tapply(x_num[,x_idx], y_cov, median)
#iqr_low <- tapply(x_num[,x_idx], y_cov, function(x)quantile(x, 0.25))
#iqr_high <- tapply(x_num[,x_idx], y_cov, function(x)quantile(x, 0.75))
#tab <- paste0(tab, '(', iqr_low, '-', iqr_high, ')')
num_mean <- tapply(x_num[,x_idx], y_cov, mean, na.rm = T)
num_sd <- tapply(x_num[,x_idx], y_cov, sd, na.rm = T)
num_mean <- paste(num_mean, '(',num_sd ,')')
})
names(ls_num_tables) <- x_num_names
x_num_table <- do.call(rbind, ls_num_tables)
x_num_table <- cbind(x_num_table, num_p)
}
if (ncol(x_cov)>length(x_num_idx)) {
## process categorical variables
if (length(x_num_idx)==0) {
x_cat <- data.frame(x_cov)
x_cat_names <- colnames(x_cov)
colnames(x_cat) <- x_cat_names
} else {
x_cat <- data.frame(x_cov[, -x_num_idx])
x_cat_names <- colnames(x_cov[,-x_num_idx])
colnames(x_cat) <- x_cat_names
}
n_x_cat <- ncol(x_cat)
ls_cat_tables <- lapply(1:n_x_cat, function(x_idx){
tab1 <- table(x_cat[,x_idx], y_cov)
tab2 <- rbind(NA, tab1)
rownames(tab2)[1] <- x_cat_names[x_idx]
tab2
})
fisher_p <- sapply(ls_cat_tables, function(x) {
p_value <- tryCatch(fisher.test(x[-1,], workspace = 2e8)$p.value, error = function(e)return(NA))
p_value <- round(p_value, 4)
})
ls_cat_tables <- lapply(1:n_x_cat, function(x){
nx <- nrow(ls_cat_tables[[x]])
if (category_test) {
prop_p <- sapply(2:nx, function(x_row) {
p_value <- tryCatch(prop.test(ls_cat_tables[[x]][x_row,],n_table)$p.value, error = function(e)return(NA))
p_value <- round(p_value, 4)
})
out <- cbind(ls_cat_tables[[x]], c(fisher_p[x], prop_p))
out <- rbind(NA, out)
out
} else {
out <- cbind(ls_cat_tables[[x]], c(fisher_p[x], rep(NA, nx-1)))
out <- rbind(NA, out)
out
}
})
x_cat_table_tmp <- do.call(rbind, ls_cat_tables)
x_cat_table <- sapply(1:(ncol(x_cat_table_tmp)-1), function(x){
paste0(x_cat_table_tmp[, x], '(', round(x_cat_table_tmp[, x]/n_table[x]*100, 1), '%)')
})
x_cat_table <- cbind(x_cat_table, x_cat_table_tmp[,ncol(x_cat_table_tmp)])
x_cat_table[grep('NA', x_cat_table)] <- NA
}
n_table <- c('n', paste0(n_table), NA)
x_num_table <- cbind(rownames(x_num_table), x_num_table)
x_cat_table <- cbind(rownames(x_cat_table_tmp), x_cat_table)
x_table <- rbind(n_table, x_num_table, x_cat_table)
x_table <- rbind(c(NA, levels(y_cov), 'P-value'), x_table)
rownames(x_table) <- NULL
colnames(x_table) <- NULL
x_table[grep('^$', x_table)] <- NA
return(x_table)
#align <- paste0(c('ll', rep('c', ncol(x_table)-1)), collapse = '')
#print(xtable(x_table, align=align, caption = caption), include.rownames=FALSE, include.colnames=FALSE,hline.after=c(0, 2, nrow(x_table)), file = outfile, caption.placement = 'top', size = 'scriptsize')
}
################################################################################################################
## Format individual subject for drug time line plotting
################################################################################################################
format_drug_line <- function(df_therapy_by_subject, unit = 'month'){
# df_therapy_by_subject <- ls_drug_by_subject[[13]]
df_therapy_by_subject$drugstart <- as.Date(df_therapy_by_subject$drugstart, format = '%Y-%m-%d')
df_therapy_by_subject$drugend <- as.Date(df_therapy_by_subject$drugend, format = '%Y-%m-%d')
df_therapy_by_subject$collectiondate <- as.Date(df_therapy_by_subject$collectiondate, format = '%Y-%m-%d')
df_therapy_by_subject$determinationdate <- as.Date(df_therapy_by_subject$determinationdate, format = '%Y-%m-%d')
if(unit == 'month'){
unitToDivide = 30
}else if(unit == 'day'){
unitToDivide = 1
}
df_therapy_by_subject <- data.frame(
masterdeid = df_therapy_by_subject$masterdeid,
registry_name = df_therapy_by_subject$registry_name,
collectionPoint = 0,
drugStartPoint = as.numeric(df_therapy_by_subject$drugstart - df_therapy_by_subject$collectiondate)/unitToDivide,
drugEndPoint = as.numeric(df_therapy_by_subject$drugend - df_therapy_by_subject$collectiondate)/unitToDivide,
drugDuration = as.numeric(df_therapy_by_subject$drugend - df_therapy_by_subject$drugstart)/unitToDivide,
determinationPoint = as.numeric(df_therapy_by_subject$determinationdate - df_therapy_by_subject$collectiondate)/unitToDivide
)
df_therapy_by_subject <- arrange(df_therapy_by_subject, drugStartPoint, drugEndPoint)
df_therapy_by_subject$unknownDuration <- ifelse(is.na(df_therapy_by_subject$drugDuration) | df_therapy_by_subject$drugDuration == 0,
T, F)
df_therapy_by_subject$plotEndPoint <- ifelse(df_therapy_by_subject$unknownDuration == T,
df_therapy_by_subject$drugStartPoint + 15/unitToDivide,
df_therapy_by_subject$drugEndPoint)
levels_order = df_therapy_by_subject$registry_name[order(df_therapy_by_subject$drugStartPoint,
df_therapy_by_subject$drugEndPoint,
decreasing = T)]
levels_order <- levels_order[which(!duplicated(levels_order, fromLast = T))]
df_therapy_by_subject$registry_name <- factor(
df_therapy_by_subject$registry_name,
levels = levels_order)
df_format <- df_therapy_by_subject
}
################################################################################################################
## plot the drug time line using the output from format_drug_line() as input
################################################################################################################
plot_drug_line <- function(df_format){
library(ggplot2)
plot_drugs <- ggplot(df_format, aes(colour = unknownDuration)) +
geom_segment(aes(x = drugStartPoint, xend = plotEndPoint, y = registry_name, yend = registry_name),
size = 5) +
labs(x = 'Time (months)', y = NULL,
title = paste0('masterdeid: ', df_format$masterdeid[1])) +
geom_vline(xintercept = 0, color = 'purple') +
geom_vline(xintercept = df_format$determinationPoint[1], color = 'black') +
theme(title = element_text(size = 20), axis.title = element_text(size = 20), axis.text = element_text(size = 15),
axis.text.x = element_text(angle = 90, hjust = 1),
panel.grid = element_line(size = 1),
legend.position='none')
z <- list(plot_drugs = plot_drugs)
}
################################################################################################################
## ovarian cancer registry ion data process appended functions
################################################################################################################
################################################################################################################
## To generate all combinations of weight matrix for given options and druggroups
## input:
## 1. weight_options <- c(1, 0.6, 0.3)
## 2. druggroup <- c('dg1', 'dg2', 'dg3', 'dg4', 'dg5')
## output:
## a matrix. columns are druggroup names, rows are weight combinations.
################################################################################################################
generate_weight_matrix <- function(weight_options, druggroup){
library(gtools)
num_values <- length(weight_options)
num_drugs <- length(druggroup)
wt_matrix <- permutations(n = num_values, r = num_drugs, weight_options, repeats.allowed = T)
colnames(wt_matrix) <- druggroup
cat(num_drugs, 'drug groups\n')
cat(num_values, 'values for each drug group\n')
cat(nrow(wt_matrix), 'combinations\n')
wt_matrix
}
################################################################################################################
## weight conversion to sum to 1
################################################################################################################
convert_weight <- function(wt){
wt_star <- wt/sum(wt, na.rm = T)
}
################################################################################################################
## convert masterpatientid to masterdeid
################################################################################################################
convert_masterdeid <- function(masterpatientid){
query <- 'select * from ion_reg_ova_case_lst;'
masterdeid_mapping <- get_sting_query('bioinfo_projects', query)
masterdeid <- masterdeid_mapping$masterdeid[match(masterpatientid, masterdeid_mapping$masterpatientid)]
}
################################################################################################################
## convert registry_names to druggroups
## mapping_table <- data.frame(registry_name, druggroup_name)
################################################################################################################
convert_registry_to_druggroup <- function(registry_names, mapping_table){
#registry_names <- strsplit(df_therapy_ken$therapies[1], ',')[[1]]
#mapping_table <- reg_druggroup_mapping
druggroup <- as.character(mapping_table$druggroup_name[match(registry_names, mapping_table$registry_name)])
#if(any(is.na(druggroup))){
# druggroup <- druggroup[-which(is.na(druggroup))]
#}
}
################################################################################################################
## Followings are archived functions. DO NOT use
################################################################################################################
################################################################################################################
## Convert drug rule output file format such that columns are drugs in cmi_name, rows are AccessionNumber
## @@@ Important to note that there are cases where a single drug recommendation might be derived from two or more
## @@@ independent drug rules, which might lead to conflict recommendation. Need to resolve this later.
################################################################################################################
format_recommend_akiv <- function(drug_rule_out){
library(plyr)
query <- 'select * from cmi_to_registry_agent_lookup'
reg_cmi_mapping <- get_ryan_query('registry_freeze_18_aug_2015', query)
drug_cmi_name <- data.frame(cmi_name = sort(as.character(reg_cmi_mapping$cmi_name)))
#drug_registry_name <- data.frame(registry_name = sort(as.character(reg_cmi_mapping$registry_name)))
# colnames(drug_rule_out) <- c('AccessionNumber', 'drug_group', 'xx', 'xxx', 'prediction', 'marker', 'cmi_name', 'xxxx', 'xxxxx', 'xxxxxx')
drug_rule_out$prediction_num <- drug_rule_out$prediction
drug_rule_out$prediction_num[drug_rule_out$prediction == 'Benefit'] <- 1
drug_rule_out$prediction_num[drug_rule_out$prediction == 'Lack Of Benefit'] <- -1
drug_rule_out$prediction_num[drug_rule_out$prediction == 'Indeterminate'
|drug_rule_out$prediction == 'DoNotReport'] <- 0
ls_byPatient_recommend <- dlply(drug_rule_out, .variables = 'AccessionNumber', function(AccessionNumber){
out <- data.frame(cmi_name = AccessionNumber$cmi_name,
prediction = AccessionNumber$prediction_num)
out <- merge(out, drug_cmi_name, by = 'cmi_name', all.y = T)
out$cmi_name <- as.character(out$cmi_name)
out <- out[order(out$cmi_name), ]
## @@ some drugs are in different drug-rule groups, remove duplicated for now, assuming there is no conflict
if(any(duplicated(out$cmi_name))){
idx_dups <- which(duplicated(out$cmi_name))
out <- out[-idx_dups, ]
}
out
})
ls_recommend <- llply(ls_byPatient_recommend, function(x){
as.integer(as.character(x$prediction))
})
df_recommend <- do.call(rbind, ls_recommend)
df_recommend[is.na(df_recommend)] <- 0
colnames(df_recommend) <- drug_cmi_name$cmi_name
rownames(df_recommend) <- names(ls_byPatient_recommend)
df_recommend
}
################################################################################################################
## Demographic table generator using xtable
## input:
## 1. df_demographics, a dataframe with all demographic covariates to summarize, column names will be
## displayed in the final table, pick your favourite as input
## 2. y_covariate, the target covariate to split as columns, must be at least two levels
## 3. x_covariate, a vector of demographic covariates of interest
## 4. category_test, whether to carry out proportion test on each category
## 5. caption, table name
## 6. outfile, output directory and filename
################################################################################################################
get_demogr_ryan <- function(
df_demographics, y_covariate, x_covariate, category_test=TRUE,
caption = caption,
outfile = ''
){
# caption <- 'my table'
# df_demographics <- df_plot4[, -c(1:2)]
# df_demographics <- data.frame(age1 = sample(df_plot4$age),
# df_demographics)
# df_demographics <- df_demographics[, -(4:5)]
# y_covariate <- 'followed'
library(xtable)
library(plyr)
y_idx <- which(colnames(df_demographics) == y_covariate)
y_cov <- df_demographics[, y_idx]
x_cov <- df_demographics[, x_covariate]
n_y_level <- nlevels(y_cov)
nColumns <- n_y_level + 2
##
n_table <- table(y_cov)
## process numeric variables
x_num_idx <- which(sapply(x_cov, is.numeric))
x_num <- data.frame(x_cov[, x_num_idx])
x_num_names <- colnames(x_cov)[x_num_idx]
colnames(x_num) <- x_num_names
n_x_num <- ncol(x_num)
if(n_y_level < 2){
stop('y_covariate must have at least two levels to compute the p values.\n')
}else if(n_y_level == 2){
num_p <- round(sapply(1:n_x_num, function(x_idx){
wilcox.test(x_num[, x_idx] ~ y_cov)$p.value
}), 4)
}else{
num_p <- round(sapply(1:n_x_num, function(x_idx){
aov_sum <- summary(aov(x_num[, x_idx] ~ y_cov))
aov_sum[[1]]$`Pr(>F)`[1]
}))
}
ls_num_tables <- lapply(1:n_x_num, function(x_idx){
tab <- tapply(x_num[,x_idx], y_cov, median, na.rm = T)
iqr_low <- tapply(x_num[,x_idx], y_cov, function(x)quantile(x, 0.25, na.rm = T))
iqr_high <- tapply(x_num[,x_idx], y_cov, function(x)quantile(x, 0.75, na.rm = T))
tab <- paste0(tab, '(', iqr_low, '-', iqr_high, ')')
})
names(ls_num_tables) <- x_num_names
x_num_table <- do.call(rbind, ls_num_tables)
x_num_table <- cbind(x_num_table, num_p)
## process categorical variables
x_cat <- data.frame(x_cov[, -x_num_idx])
x_cat_names <- colnames(x_cov[-x_num_idx])
colnames(x_cat) <- x_cat_names
n_x_cat <- ncol(x_cat)
ls_cat_tables <- lapply(1:n_x_cat, function(x_idx){
tab1 <- table(x_cat[,x_idx], y_cov)
tab2 <- rbind(NA, tab1)
rownames(tab2)[1] <- x_cat_names[x_idx]
tab2
})
fisher_p <- sapply(ls_cat_tables, function(x) {
p_value <- tryCatch(fisher.test(x[-1,], workspace = 2e8)$p.value, error = function(e)return(NA))
p_value <- round(p_value, 4)
})
ls_cat_tables <- lapply(1:n_x_cat, function(x){
nx <- nrow(ls_cat_tables[[x]])
if (category_test) {
prop_p <- sapply(2:nx, function(x_row) {
p_value <- tryCatch(prop.test(ls_cat_tables[[x]][x_row,],n_table)$p.value, error = function(e)return(NA))
p_value <- round(p_value, 4)
})
out <- cbind(ls_cat_tables[[x]], c(fisher_p[x], prop_p))
out <- rbind(NA, out)
out
} else {
out <- cbind(ls_cat_tables[[x]], c(fisher_p[x], rep(NA, nx-1)))
out <- rbind(NA, out)
out
}
})
x_cat_table_tmp <- do.call(rbind, ls_cat_tables)
x_cat_table <- sapply(1:(ncol(x_cat_table_tmp)-1), function(x){
paste0(x_cat_table_tmp[, x], '(', round(x_cat_table_tmp[, x]/n_table[x]*100, 1), '%)')
})
x_cat_table <- cbind(x_cat_table, x_cat_table_tmp[,ncol(x_cat_table_tmp)])
x_cat_table[grep('NA', x_cat_table)] <- NA
n_table <- c('n', paste0('(', n_table, ')'), NA)
x_num_table <- cbind(rownames(x_num_table), x_num_table)
x_cat_table <- cbind(rownames(x_cat_table_tmp), x_cat_table)
x_table <- rbind(n_table, x_num_table, x_cat_table)
x_table <- rbind(c(NA, levels(y_cov), 'P-value'), x_table)
rownames(x_table) <- NULL
colnames(x_table) <- NULL
x_table[grep('^$', x_table)] <- NA
align <- paste0(c('ll', rep('c', ncol(x_table)-1)), collapse = '')
print(xtable(x_table, align=align, caption = caption),
include.rownames=FALSE, include.colnames=FALSE,
hline.after=c(0, 2, nrow(x_table)),
file = outfile,
#tabular.environment="longtable",
caption.placement = 'top', size = 'scriptsize')
}
|
library('testthat')
context('insertRowAndKeepAttr')
test_that("Keeps attributes", {
test <- matrix(1:4, ncol=2)
attr(test, 'wow') <- 1000
test <- insertRowAndKeepAttr(test, 1)
expect_equal(attr(test, 'wow'), 1000)
expect_equal(nrow(test), 3)
})
test_that("Keeps class", {
test <- matrix(1:4, ncol=2)
attr(test, 'wow') <- 1000
class(test) <- c("new_class", class(test))
test <- insertRowAndKeepAttr(test, 1)
expect_true("new_class" == class(test)[1])
})
| /tests/testthat/test-insertRowAndKeepAttr.R | no_license | Holzhauer/Gmisc | R | false | false | 479 | r | library('testthat')
context('insertRowAndKeepAttr')
test_that("Keeps attributes", {
test <- matrix(1:4, ncol=2)
attr(test, 'wow') <- 1000
test <- insertRowAndKeepAttr(test, 1)
expect_equal(attr(test, 'wow'), 1000)
expect_equal(nrow(test), 3)
})
test_that("Keeps class", {
test <- matrix(1:4, ncol=2)
attr(test, 'wow') <- 1000
class(test) <- c("new_class", class(test))
test <- insertRowAndKeepAttr(test, 1)
expect_true("new_class" == class(test)[1])
})
|
#' draws a graph of multiple names over time
#'
#' @param my_names list of names
#' @param the_sex Male or female, both by default
#'
#' @return a plot
#' @export
#' @import dplyr
#' @import ggplot2
#'
#' @examples
#' draw_names(c("Clément","Bertrand"),"M")
#'
draw_names <- function(my_names, the_sex = c("M","F")){
names_count <- mydata %>%
filter(name %in% my_names, sex %in% the_sex) %>%
group_by(year,name) %>%
summarise(count = sum(n))
plot <- ggplot(data = names_count,
aes(x = year, y = count, colour = name)) +
geom_line()
return(plot)
}
| /R/draw_names.R | no_license | clemonster/nameplay | R | false | false | 590 | r |
#' draws a graph of multiple names over time
#'
#' @param my_names list of names
#' @param the_sex Male or female, both by default
#'
#' @return a plot
#' @export
#' @import dplyr
#' @import ggplot2
#'
#' @examples
#' draw_names(c("Clément","Bertrand"),"M")
#'
draw_names <- function(my_names, the_sex = c("M","F")){
names_count <- mydata %>%
filter(name %in% my_names, sex %in% the_sex) %>%
group_by(year,name) %>%
summarise(count = sum(n))
plot <- ggplot(data = names_count,
aes(x = year, y = count, colour = name)) +
geom_line()
return(plot)
}
|
preda <- function(x,k) {
n <- length(x)
k2 <- k/2
# the vector pred will contain our predicted values
pred <- vector(length=n-k)
for (i in 1:(n-k)) {
if (sum(x[i:(i+(k-1))]) >= k2) pred[i] <- 1 else pred[i] <- 0 _label~sumx@
}
return(mean(abs(pred-x[(k+1):n])))
}
| /Art of R Programming Code/Ch2/preda.R | no_license | abhi8893/Intensive-R | R | false | false | 291 | r | preda <- function(x,k) {
n <- length(x)
k2 <- k/2
# the vector pred will contain our predicted values
pred <- vector(length=n-k)
for (i in 1:(n-k)) {
if (sum(x[i:(i+(k-1))]) >= k2) pred[i] <- 1 else pred[i] <- 0 _label~sumx@
}
return(mean(abs(pred-x[(k+1):n])))
}
|
\name{eulerian}
\alias{eulerian}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Method for finding an eulerian path or cycle.
}
\description{
An eulerian path is a path in a graph which visits every edge exactly once. This function returns an eulerian path from a graph (if there is any). It works for both directed and undirected graphs.
}
\usage{
eulerian(graph, start = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{graph}{
a \code{graphNEL} object.
}
\item{start}{
\code{character} or \code{NULL}. The name of the start node of an eulerian path.
}
}
\details{
If \code{start} is not \code{NULL}, then eulerian returns a path starting from it. Otherwise, the start node is automatically selected.
}
\value{
A character vector representing an eulerian path/cycle in \code{graph}. Each entry in the vector represents the name of a node in the graph.
}
\author{
Ashis Saha
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\examples{
require(graph)
require(eulerian)
g <- new("graphNEL", nodes=LETTERS[1:4], edgemode="uirected")
g <- addEdge(graph=g, from=LETTERS[1:3], to=LETTERS[2:4])
ep <- eulerian(g)
g <- new("graphNEL", nodes=as.character(1:10), edgemode="directed")
g <- addEdge(graph=g, from=c("1","2","2","3","4","5","6","6","7","8","9","10"),
to=c("10","1","6","2","2","4","5","8","9","7","6","3"))
ep <- eulerian(g, "6")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /eulerian/man/eulerian.Rd | no_license | alorchhota/eulerian | R | false | false | 1,605 | rd | \name{eulerian}
\alias{eulerian}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Method for finding an eulerian path or cycle.
}
\description{
An eulerian path is a path in a graph which visits every edge exactly once. This function returns an eulerian path from a graph (if there is any). It works for both directed and undirected graphs.
}
\usage{
eulerian(graph, start = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{graph}{
a \code{graphNEL} object.
}
\item{start}{
\code{character} or \code{NULL}. The name of the start node of an eulerian path.
}
}
\details{
If \code{start} is not \code{NULL}, then eulerian returns a path starting from it. Otherwise, the start node is automatically selected.
}
\value{
A character vector representing an eulerian path/cycle in \code{graph}. Each entry in the vector represents the name of a node in the graph.
}
\author{
Ashis Saha
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\examples{
require(graph)
require(eulerian)
g <- new("graphNEL", nodes=LETTERS[1:4], edgemode="uirected")
g <- addEdge(graph=g, from=LETTERS[1:3], to=LETTERS[2:4])
ep <- eulerian(g)
g <- new("graphNEL", nodes=as.character(1:10), edgemode="directed")
g <- addEdge(graph=g, from=c("1","2","2","3","4","5","6","6","7","8","9","10"),
to=c("10","1","6","2","2","4","5","8","9","7","6","3"))
ep <- eulerian(g, "6")
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
## Functions to cache the inverse of a matrix.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
invr <- NULL
set <- function(y){
x <<- y
invr <<- NULL
}
get <- function() x
setInv <- function(solveMatrix) invr <<- solveMatrix
getInv <- function() invr
list(set = set, get = get, setInv = setInv, getInv = getInv)
}
## This function computes the inverse of the matrix.
cacheSolve <- function(x, ...) {
invr <- x$getInv()
# return cache if matrix hasn't changed
if(!is.null(invr)){
message("getting cached data")
return(invr)
}
# calculate new inverse
data <- x$get()
invr <- solve(data)
x$setInv(invr)
invr
}
| /cachematrix.R | no_license | Shashank9830/ProgrammingAssignment2 | R | false | false | 736 | r | ## Functions to cache the inverse of a matrix.
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
invr <- NULL
set <- function(y){
x <<- y
invr <<- NULL
}
get <- function() x
setInv <- function(solveMatrix) invr <<- solveMatrix
getInv <- function() invr
list(set = set, get = get, setInv = setInv, getInv = getInv)
}
## This function computes the inverse of the matrix.
cacheSolve <- function(x, ...) {
invr <- x$getInv()
# return cache if matrix hasn't changed
if(!is.null(invr)){
message("getting cached data")
return(invr)
}
# calculate new inverse
data <- x$get()
invr <- solve(data)
x$setInv(invr)
invr
}
|
data <- read.table("household.txt", sep = ";")
# rename column names
colnames(data) <- as.character(unlist(data[1,]))
data = data[-1,]
# extract data with date between 2007-02-01 and 2007-02-02
dateStart <- "2007-02-01"
dateEnd <- "2007-02-02"
data$Date <- as.Date(strptime((data$Date), format = "%d/%m/%Y"))
data2 <- subset(data, data$Date <= dateEnd & data$Date >= dateStart)
data2$newtime <- as.POSIXct(paste(data2$Date, data2$Time), format="%Y-%m-%d %H:%M:%S")
# Third plot
png(filename = "plot3.png",
width = 480, height = 480)
plot(data2$newtime ,
as.numeric(as.character(data2$Sub_metering_1)),
type = "l",
xlab = "",
ylab = "Energy Sub metering")
lines(data2$newtime, as.character(data2$Sub_metering_2), col = "red")
lines(data2$newtime, as.character(data2$Sub_metering_3), col = "blue")
legend("topright",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col=c("black", "red", "blue"), lty = 1:1, cex = 0.8)
dev.off() | /plot3.R | no_license | mcb17/ExData_Plotting1 | R | false | false | 997 | r | data <- read.table("household.txt", sep = ";")
# rename column names
colnames(data) <- as.character(unlist(data[1,]))
data = data[-1,]
# extract data with date between 2007-02-01 and 2007-02-02
dateStart <- "2007-02-01"
dateEnd <- "2007-02-02"
data$Date <- as.Date(strptime((data$Date), format = "%d/%m/%Y"))
data2 <- subset(data, data$Date <= dateEnd & data$Date >= dateStart)
data2$newtime <- as.POSIXct(paste(data2$Date, data2$Time), format="%Y-%m-%d %H:%M:%S")
# Third plot
png(filename = "plot3.png",
width = 480, height = 480)
plot(data2$newtime ,
as.numeric(as.character(data2$Sub_metering_1)),
type = "l",
xlab = "",
ylab = "Energy Sub metering")
lines(data2$newtime, as.character(data2$Sub_metering_2), col = "red")
lines(data2$newtime, as.character(data2$Sub_metering_3), col = "blue")
legend("topright",
legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col=c("black", "red", "blue"), lty = 1:1, cex = 0.8)
dev.off() |
martingaleRun <- function(startMoney, baseBet = 1, betIncrease = 2.0, houseEdge=0.01) {
mVector <- c();
money <- startMoney;
bet <- baseBet;
bettingAgainst <- (0.5-(houseEdge/2));
maxMoney <- startMoney;
maxMoneyAtStep <- 0;
step <- 0;
while (bet < money) {
step <- step + 1;
#mVector[step] <- money;
if (runif(1) < bettingAgainst) {
money <- money + bet;
bet <- baseBet;
} else {
money <- money - bet;
bet <- bet * betIncrease;
}
if (money > maxMoney) {
maxMoney <- money;
maxMoneyAtStep <- step;
}
}
#result <- list("maxMoney" = maxMoney, "maxMoneyAtStep" = maxMoneyAtStep, "steps" = step, "bets" = mVector);
result <- list("maxMoney" = maxMoney, "maxMoneyAtStep" = maxMoneyAtStep, "steps" = step);
}
| /martingale.R | no_license | Coornail/r-martingale-simulator | R | false | false | 800 | r | martingaleRun <- function(startMoney, baseBet = 1, betIncrease = 2.0, houseEdge=0.01) {
mVector <- c();
money <- startMoney;
bet <- baseBet;
bettingAgainst <- (0.5-(houseEdge/2));
maxMoney <- startMoney;
maxMoneyAtStep <- 0;
step <- 0;
while (bet < money) {
step <- step + 1;
#mVector[step] <- money;
if (runif(1) < bettingAgainst) {
money <- money + bet;
bet <- baseBet;
} else {
money <- money - bet;
bet <- bet * betIncrease;
}
if (money > maxMoney) {
maxMoney <- money;
maxMoneyAtStep <- step;
}
}
#result <- list("maxMoney" = maxMoney, "maxMoneyAtStep" = maxMoneyAtStep, "steps" = step, "bets" = mVector);
result <- list("maxMoney" = maxMoney, "maxMoneyAtStep" = maxMoneyAtStep, "steps" = step);
}
|
library(quantmod)
library(ggplot2)
library(plyr)
library(grid)
library(gridExtra)
library(scales)
library(xlsx)
library(rgdal)
library(gdata)
library(sp)
# Carga data barril brent
getSymbols('DCOILBRENTEU', src='FRED')
# Carga datos Gasolina 95 y Gasoleo A de 2000 a 2015
precioCombust <- read.csv('src/precio_combustible.csv', header = T, sep = ';')
#Funcion para obtener como objeto Date todas las fechas del dataset precioCombust
createDate <- function(anio, mes){
date_month <- ifelse(nchar(mes) == 1, paste(anio, mes, sep='-0'), paste(anio, mes, sep='-'))
return(as.Date(paste(date_month, c('01'), sep='-'), format="%Y-%m-%d"))
}
#creamos dataset con la fecha correcta del dataset precioCombust
#con formato aaaa-mm
combustibles <- data.frame(fecha = createDate(precioCombust$ANYO, precioCombust$MES),
tipo = precioCombust$GASOLINA,
precio = precioCombust$PRECIO)
#Agrupamos mensualmente el dataset del Brent y lo filtramos
#desde el primer mes del 2000 hasta ahora y formateamos la fecha en formato aaaa-mm
brent_monthly <- to.monthly(DCOILBRENTEU['2000::'])[,c(4)]
df.brent <- data.frame(fecha = as.Date(format(index(brent_monthly), "%Y-%m-%d")),
tipo = c('brent'),
precio = coredata(brent_monthly)[,1])
#Concatenamos df.brent bajo combustible con el tipo brent
df.total <- rbind(combustibles, df.brent)
################################## GRAFICAS ###########################################
graficaGasolina <- ggplot(combustibles, aes(fecha, precio, shape = tipo, colour = tipo)) +
geom_line(aes(group = tipo)) +
theme(legend.position = "top", axis.text.x = element_blank(), axis.ticks = element_blank(), axis.title.x = element_blank()) +
scale_x_date(labels = date_format("%Y"), breaks = "3 month")
graficaBrent <- ggplot(df.brent, aes(fecha, precio, shape = tipo, colour = tipo)) +
geom_line(aes(group = tipo)) +
theme(legend.position = "top", axis.text.x = element_text(angle=90)) +
scale_x_date(labels = date_format("%Y-%b"), breaks = "3 month")
grid.arrange(graficaGasolina, graficaBrent, ncol=1)
############################## CORRELACIONES ################################
cor(combustibles[combustibles$tipo == 95, c('precio')], df.brent$precio)
cor(combustibles[combustibles$tipo == 'GASOLEO_A', c('precio')], df.brent$precio)
# Parece que hay más correlación entre el diesel y el brent que con el gasoil 95 históricamente
########################### ANALISIS PRECIO GASOLINA EN MADRID ##########################
# Vamos a analizar los precios de los combustibles en Madrid
# mediante un mapa de calor para cada tipo
#Con el fichero descargado con los datos de ambos combustibles,
#creamos el dataframe
#Fuente: http://geoportalgasolineras.es/
precMedio.gasoleo <- read.xls('src/PRECIOS_SHP_23022015.xls', sheet = "promedio_gasoleo", header = TRUE, colClasses=c("Provincia"= "character","Localidad"= "character","TIPO"= "character","GEOCODIGO"= "character"),stringsAsFactors=FALSE)
DatosGasoleo <- precMedio.gasoleo[,1:5] # Realizo esta seleccion ya que ponia muchas columnas sin dato a la izquierda de la talba
precMedio.gasolina <- read.xls('src/PRECIOS_SHP_23022015.xls', sheet = "promedio_gasolina", header = TRUE, colClasses=c("Provincia"= "character","Localidad"= "character","TIPO"= "character","GEOCODIGO"= "character"), stringsAsFactors=FALSE)
DatosGasolina <- precMedio.gasolina[,1:5] # Realizo esta seleccion ya que ponia muchas columnas sin dato a la izquierda de la talba
#Leyenda
#Venta
##P: Venta al público en general.
##R: Venta restringida a socios o
#Remisión (Rem.):
##dm: Datos procedentes del distribuidor minorista.
##OM: Datos procedentes del operador mayorista.
#Cargamos el shapefile con los datos geoespaciales de los municipios de Madrid
municipios <- readOGR(dsn = "src/shapefile", layer = "municipios")
########################## FORTIFY DEL DATAFRAME ############################################
municipios@data$id <- rownames(municipios@data)
municipios.df <- fortify(municipios)
municipios.df <- join(municipios.df, municipios@data, by="id")
########################## JOIN CON LOS PRECIOS ##############################################
municipios.df <- join(municipios.df, DatosGasolina, by = c('GEOCODIGO'), type = "inner")
municipios.df <- join(municipios.df, DatosGasoleo, by = c('GEOCODIGO', 'Localidad', 'Provincia'), type = 'inner')
######################### GGPLOT PRECIOS DE GASOLINA #################################
plotGasolina <- ggplot(data=municipios.df, aes(x=long, y=lat, group=group))
plotGasolina <- plotGasolina + geom_polygon(aes(fill = PrecioGasolina)) # draw polygons
plotGasolina <- plotGasolina + theme(legend.position = "bottom", axis.ticks = element_blank(), axis.text.x = element_blank(), axis.title.x = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank())
plotGasolina <- plotGasolina + labs(title="Precio medio Gasolina 95 por municipio")
#plotGasolina <- plotGasolina + geom_path(color="grey", linestyle=2) # draw boundaries
plotGasolina <- plotGasolina + coord_equal()
plotGasolina <- plotGasolina + scale_fill_gradient(low = "#F5FBEF", high = "#38610B", space = "Lab", na.value = "grey50", guide = "colourbar")
######################### GGPLOT PRECIOS DE GASOLEO #################################
plotGasoleo <- ggplot(data=municipios.df, aes(x=long, y=lat, group=group))
plotGasoleo <- plotGasoleo + geom_polygon(aes(fill = PrecioGasoleo)) # draw polygons
plotGasoleo <- plotGasoleo + theme(legend.position = "bottom", axis.ticks = element_blank(), axis.text.x = element_blank(), axis.title.x = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank())
plotGasoleo <- plotGasoleo + labs(title="Precio medio Gasoleo A por municipio")
#plotGasoleo <- plotGasoleo + geom_path(color="grey", linestyle=2)# draw boundaries
plotGasoleo <- plotGasoleo + coord_equal()
plotGasoleo <- plotGasoleo + scale_fill_gradient(low = "#FBEFEF", high = "#610B0B", space = "Lab", na.value = "grey50", guide = "colourbar")
#Ambos plot dividiendo el grid en 2
grid.arrange(plotGasolina, plotGasoleo, ncol=2)
| /main.R | no_license | David-Carrasco/Brent-Oil-Prices-Analysis | R | false | false | 6,206 | r | library(quantmod)
library(ggplot2)
library(plyr)
library(grid)
library(gridExtra)
library(scales)
library(xlsx)
library(rgdal)
library(gdata)
library(sp)
# Carga data barril brent
getSymbols('DCOILBRENTEU', src='FRED')
# Carga datos Gasolina 95 y Gasoleo A de 2000 a 2015
precioCombust <- read.csv('src/precio_combustible.csv', header = T, sep = ';')
#Funcion para obtener como objeto Date todas las fechas del dataset precioCombust
createDate <- function(anio, mes){
date_month <- ifelse(nchar(mes) == 1, paste(anio, mes, sep='-0'), paste(anio, mes, sep='-'))
return(as.Date(paste(date_month, c('01'), sep='-'), format="%Y-%m-%d"))
}
#creamos dataset con la fecha correcta del dataset precioCombust
#con formato aaaa-mm
combustibles <- data.frame(fecha = createDate(precioCombust$ANYO, precioCombust$MES),
tipo = precioCombust$GASOLINA,
precio = precioCombust$PRECIO)
#Agrupamos mensualmente el dataset del Brent y lo filtramos
#desde el primer mes del 2000 hasta ahora y formateamos la fecha en formato aaaa-mm
brent_monthly <- to.monthly(DCOILBRENTEU['2000::'])[,c(4)]
df.brent <- data.frame(fecha = as.Date(format(index(brent_monthly), "%Y-%m-%d")),
tipo = c('brent'),
precio = coredata(brent_monthly)[,1])
#Concatenamos df.brent bajo combustible con el tipo brent
df.total <- rbind(combustibles, df.brent)
################################## GRAFICAS ###########################################
graficaGasolina <- ggplot(combustibles, aes(fecha, precio, shape = tipo, colour = tipo)) +
geom_line(aes(group = tipo)) +
theme(legend.position = "top", axis.text.x = element_blank(), axis.ticks = element_blank(), axis.title.x = element_blank()) +
scale_x_date(labels = date_format("%Y"), breaks = "3 month")
graficaBrent <- ggplot(df.brent, aes(fecha, precio, shape = tipo, colour = tipo)) +
geom_line(aes(group = tipo)) +
theme(legend.position = "top", axis.text.x = element_text(angle=90)) +
scale_x_date(labels = date_format("%Y-%b"), breaks = "3 month")
grid.arrange(graficaGasolina, graficaBrent, ncol=1)
############################## CORRELACIONES ################################
cor(combustibles[combustibles$tipo == 95, c('precio')], df.brent$precio)
cor(combustibles[combustibles$tipo == 'GASOLEO_A', c('precio')], df.brent$precio)
# Parece que hay más correlación entre el diesel y el brent que con el gasoil 95 históricamente
########################### ANALISIS PRECIO GASOLINA EN MADRID ##########################
# Vamos a analizar los precios de los combustibles en Madrid
# mediante un mapa de calor para cada tipo
#Con el fichero descargado con los datos de ambos combustibles,
#creamos el dataframe
#Fuente: http://geoportalgasolineras.es/
precMedio.gasoleo <- read.xls('src/PRECIOS_SHP_23022015.xls', sheet = "promedio_gasoleo", header = TRUE, colClasses=c("Provincia"= "character","Localidad"= "character","TIPO"= "character","GEOCODIGO"= "character"),stringsAsFactors=FALSE)
DatosGasoleo <- precMedio.gasoleo[,1:5] # Realizo esta seleccion ya que ponia muchas columnas sin dato a la izquierda de la talba
precMedio.gasolina <- read.xls('src/PRECIOS_SHP_23022015.xls', sheet = "promedio_gasolina", header = TRUE, colClasses=c("Provincia"= "character","Localidad"= "character","TIPO"= "character","GEOCODIGO"= "character"), stringsAsFactors=FALSE)
DatosGasolina <- precMedio.gasolina[,1:5] # Realizo esta seleccion ya que ponia muchas columnas sin dato a la izquierda de la talba
#Leyenda
#Venta
##P: Venta al público en general.
##R: Venta restringida a socios o
#Remisión (Rem.):
##dm: Datos procedentes del distribuidor minorista.
##OM: Datos procedentes del operador mayorista.
#Cargamos el shapefile con los datos geoespaciales de los municipios de Madrid
municipios <- readOGR(dsn = "src/shapefile", layer = "municipios")
########################## FORTIFY DEL DATAFRAME ############################################
municipios@data$id <- rownames(municipios@data)
municipios.df <- fortify(municipios)
municipios.df <- join(municipios.df, municipios@data, by="id")
########################## JOIN CON LOS PRECIOS ##############################################
municipios.df <- join(municipios.df, DatosGasolina, by = c('GEOCODIGO'), type = "inner")
municipios.df <- join(municipios.df, DatosGasoleo, by = c('GEOCODIGO', 'Localidad', 'Provincia'), type = 'inner')
######################### GGPLOT PRECIOS DE GASOLINA #################################
plotGasolina <- ggplot(data=municipios.df, aes(x=long, y=lat, group=group))
plotGasolina <- plotGasolina + geom_polygon(aes(fill = PrecioGasolina)) # draw polygons
plotGasolina <- plotGasolina + theme(legend.position = "bottom", axis.ticks = element_blank(), axis.text.x = element_blank(), axis.title.x = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank())
plotGasolina <- plotGasolina + labs(title="Precio medio Gasolina 95 por municipio")
#plotGasolina <- plotGasolina + geom_path(color="grey", linestyle=2) # draw boundaries
plotGasolina <- plotGasolina + coord_equal()
plotGasolina <- plotGasolina + scale_fill_gradient(low = "#F5FBEF", high = "#38610B", space = "Lab", na.value = "grey50", guide = "colourbar")
######################### GGPLOT PRECIOS DE GASOLEO #################################
plotGasoleo <- ggplot(data=municipios.df, aes(x=long, y=lat, group=group))
plotGasoleo <- plotGasoleo + geom_polygon(aes(fill = PrecioGasoleo)) # draw polygons
plotGasoleo <- plotGasoleo + theme(legend.position = "bottom", axis.ticks = element_blank(), axis.text.x = element_blank(), axis.title.x = element_blank(), axis.text.y = element_blank(), axis.title.y = element_blank())
plotGasoleo <- plotGasoleo + labs(title="Precio medio Gasoleo A por municipio")
#plotGasoleo <- plotGasoleo + geom_path(color="grey", linestyle=2)# draw boundaries
plotGasoleo <- plotGasoleo + coord_equal()
plotGasoleo <- plotGasoleo + scale_fill_gradient(low = "#FBEFEF", high = "#610B0B", space = "Lab", na.value = "grey50", guide = "colourbar")
#Ambos plot dividiendo el grid en 2
grid.arrange(plotGasolina, plotGasoleo, ncol=2)
|
context("JSS article 2013")
suppressPackageStartupMessages(library("texreg"))
# example models from ?lm
ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
group <- gl(2, 10, 20, labels = c("Ctl", "Trt"))
weight <- c(ctl, trt)
m1 <- lm(weight ~ group)
m2 <- lm(weight ~ group - 1)
test_that("texreg returns output as in the JSS 2013 article", {
# Simple screenreg example
expect_equal(output <- screenreg(list(m1, m2)),
readRDS("../files/jss_screenreg_lm.RDS"))
# saveRDS(output, "../files/jss_screenreg_lm.RDS")
# texreg example with dcolumn and booktabs usage and table float options
expect_equal(output <- texreg(list(m1, m2),
dcolumn = TRUE,
booktabs = TRUE,
use.packages = FALSE,
label = "tab:3",
caption = "Two linear models.",
float.pos = "bh"),
readRDS("../files/jss_texreg_dcolumn_booktabs.RDS"))
# saveRDS(output, "../files/jss_texreg_dcolumn_booktabs.RDS")
# Bold coefficients, custom note, omit.coef, and coefficient customization
# (difference to JSS: dollar signs around GOF values; but appearance otherwise identical)
expect_equal(output <- texreg(list(m1, m2),
label = "tab:4",
caption = "Bolded coefficients, custom notes, three digits.",
float.pos = "h",
return.string = TRUE,
bold = 0.05,
stars = 0,
custom.note = "Coefficients with $p < 0.05$ in \\textbf{bold}.",
digits = 3,
leading.zero = FALSE,
omit.coef = "Inter"),
readRDS("../files/jss_texreg_bold_customnote_digits.RDS"))
# saveRDS(output, "../files/jss_texreg_bold_customnote_digits.RDS")
# GLS example; custom names, reordering, single.row, 'extract' arguments
# (difference to JSS: the paper reports results using 'no.margin = TRUE', but it's not in the code example)
# (difference to JSS: the version used in the paper counts 11 places left of the right bracket; this is now correctly counted as 9)
expect_equal({
library("nlme")
m3 <- gls(follicles ~ sin(2 * pi * Time) + cos(2 * pi * Time), Ovary,
correlation = corAR1(form = ~ 1 | Mare))
table <- texreg(list(m1, m3),
custom.coef.names = c(
"Intercept",
"Control",
"$\\sin(2 \\cdot \\pi \\cdot \\mbox{time})$",
"$\\cos(2 \\cdot \\pi \\cdot \\mbox{time})$"
),
custom.model.names = c("OLS model", "GLS model"),
reorder.coef = c(1, 3, 4, 2),
caption = "Multiple model types, custom names, and single row.",
label = "tab:5",
stars = c(0.01, 0.001),
dcolumn = TRUE,
booktabs = TRUE,
use.packages = FALSE,
no.margin = TRUE,
single.row = TRUE,
include.adjrs = FALSE,
include.bic = FALSE)
},
readRDS("../files/jss_texreg_gls.RDS")
)
# saveRDS(table, "../files/jss_texreg_gls.RDS")
# How to use "robust" standard errors with texreg
expect_equal({
library("sandwich")
library("lmtest")
hc <- vcovHC(m2)
ct <- coeftest(m2, vcov = hc)
se <- ct[, 2]
pval <- ct[, 4]
output <- texreg(m2, override.se = se, override.pvalues = pval)
},
readRDS("../files/jss_texreg_robust.RDS")
)
# saveRDS(output, "../files/jss_texreg_robust.RDS")
# Creating Word-readable HTML files using htmlreg
expect_equal({
output <- htmlreg(list(m1, m2, m3),
inline.css = FALSE,
doctype = TRUE,
html.tag = TRUE,
head.tag = TRUE,
body.tag = TRUE)
},
readRDS("../files/jss_htmlreg_word.RDS")
)
# saveRDS(output, "../files/jss_htmlreg_word.RDS")
# Compatibility with Markdown
expect_equal({
output <- htmlreg(list(m1, m2, m3), star.symbol = "\\*", center = TRUE)
},
readRDS("../files/jss_htmlreg_markdown.RDS")
)
# saveRDS(output, "../files/jss_htmlreg_markdown.RDS")
# How to write a complete extension for linear models
expect_equal({
extract.lm <- function(model, include.rsquared = TRUE,
include.adjrs = TRUE, include.nobs = TRUE, ...) {
s <- summary(model, ...)
names <- rownames(s$coef)
co <- s$coef[, 1]
se <- s$coef[, 2]
pval <- s$coef[, 4]
gof <- numeric()
gof.names <- character()
gof.decimal <- logical()
if (include.rsquared == TRUE) {
rs <- s$r.squared
gof <- c(gof, rs)
gof.names <- c(gof.names, "R$^2$")
gof.decimal <- c(gof.decimal, TRUE)
}
if (include.adjrs == TRUE) {
adj <- s$adj.r.squared
gof <- c(gof, adj)
gof.names <- c(gof.names, "Adj.\\ R$^2$")
gof.decimal <- c(gof.decimal, TRUE)
}
if (include.nobs == TRUE) {
n <- nobs(model)
gof <- c(gof, n)
gof.names <- c(gof.names, "Num.\\ obs.")
gof.decimal <- c(gof.decimal, FALSE)
}
tr <- createTexreg(
coef.names = names,
coef = co,
se = se,
pvalues = pval,
gof.names = gof.names,
gof = gof,
gof.decimal = gof.decimal
)
return(tr)
}
setMethod("extract", signature = className("lm", "stats"), definition = extract.lm)
},
"extract"
)
})
| /tests/testthat/test-jss.R | no_license | leifeld/texreg | R | false | false | 6,058 | r | context("JSS article 2013")
suppressPackageStartupMessages(library("texreg"))
# example models from ?lm
ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
group <- gl(2, 10, 20, labels = c("Ctl", "Trt"))
weight <- c(ctl, trt)
m1 <- lm(weight ~ group)
m2 <- lm(weight ~ group - 1)
test_that("texreg returns output as in the JSS 2013 article", {
# Simple screenreg example
expect_equal(output <- screenreg(list(m1, m2)),
readRDS("../files/jss_screenreg_lm.RDS"))
# saveRDS(output, "../files/jss_screenreg_lm.RDS")
# texreg example with dcolumn and booktabs usage and table float options
expect_equal(output <- texreg(list(m1, m2),
dcolumn = TRUE,
booktabs = TRUE,
use.packages = FALSE,
label = "tab:3",
caption = "Two linear models.",
float.pos = "bh"),
readRDS("../files/jss_texreg_dcolumn_booktabs.RDS"))
# saveRDS(output, "../files/jss_texreg_dcolumn_booktabs.RDS")
# Bold coefficients, custom note, omit.coef, and coefficient customization
# (difference to JSS: dollar signs around GOF values; but appearance otherwise identical)
expect_equal(output <- texreg(list(m1, m2),
label = "tab:4",
caption = "Bolded coefficients, custom notes, three digits.",
float.pos = "h",
return.string = TRUE,
bold = 0.05,
stars = 0,
custom.note = "Coefficients with $p < 0.05$ in \\textbf{bold}.",
digits = 3,
leading.zero = FALSE,
omit.coef = "Inter"),
readRDS("../files/jss_texreg_bold_customnote_digits.RDS"))
# saveRDS(output, "../files/jss_texreg_bold_customnote_digits.RDS")
# GLS example; custom names, reordering, single.row, 'extract' arguments
# (difference to JSS: the paper reports results using 'no.margin = TRUE', but it's not in the code example)
# (difference to JSS: the version used in the paper counts 11 places left of the right bracket; this is now correctly counted as 9)
expect_equal({
library("nlme")
m3 <- gls(follicles ~ sin(2 * pi * Time) + cos(2 * pi * Time), Ovary,
correlation = corAR1(form = ~ 1 | Mare))
table <- texreg(list(m1, m3),
custom.coef.names = c(
"Intercept",
"Control",
"$\\sin(2 \\cdot \\pi \\cdot \\mbox{time})$",
"$\\cos(2 \\cdot \\pi \\cdot \\mbox{time})$"
),
custom.model.names = c("OLS model", "GLS model"),
reorder.coef = c(1, 3, 4, 2),
caption = "Multiple model types, custom names, and single row.",
label = "tab:5",
stars = c(0.01, 0.001),
dcolumn = TRUE,
booktabs = TRUE,
use.packages = FALSE,
no.margin = TRUE,
single.row = TRUE,
include.adjrs = FALSE,
include.bic = FALSE)
},
readRDS("../files/jss_texreg_gls.RDS")
)
# saveRDS(table, "../files/jss_texreg_gls.RDS")
# How to use "robust" standard errors with texreg
expect_equal({
library("sandwich")
library("lmtest")
hc <- vcovHC(m2)
ct <- coeftest(m2, vcov = hc)
se <- ct[, 2]
pval <- ct[, 4]
output <- texreg(m2, override.se = se, override.pvalues = pval)
},
readRDS("../files/jss_texreg_robust.RDS")
)
# saveRDS(output, "../files/jss_texreg_robust.RDS")
# Creating Word-readable HTML files using htmlreg
expect_equal({
output <- htmlreg(list(m1, m2, m3),
inline.css = FALSE,
doctype = TRUE,
html.tag = TRUE,
head.tag = TRUE,
body.tag = TRUE)
},
readRDS("../files/jss_htmlreg_word.RDS")
)
# saveRDS(output, "../files/jss_htmlreg_word.RDS")
# Compatibility with Markdown
expect_equal({
output <- htmlreg(list(m1, m2, m3), star.symbol = "\\*", center = TRUE)
},
readRDS("../files/jss_htmlreg_markdown.RDS")
)
# saveRDS(output, "../files/jss_htmlreg_markdown.RDS")
# How to write a complete extension for linear models
expect_equal({
extract.lm <- function(model, include.rsquared = TRUE,
include.adjrs = TRUE, include.nobs = TRUE, ...) {
s <- summary(model, ...)
names <- rownames(s$coef)
co <- s$coef[, 1]
se <- s$coef[, 2]
pval <- s$coef[, 4]
gof <- numeric()
gof.names <- character()
gof.decimal <- logical()
if (include.rsquared == TRUE) {
rs <- s$r.squared
gof <- c(gof, rs)
gof.names <- c(gof.names, "R$^2$")
gof.decimal <- c(gof.decimal, TRUE)
}
if (include.adjrs == TRUE) {
adj <- s$adj.r.squared
gof <- c(gof, adj)
gof.names <- c(gof.names, "Adj.\\ R$^2$")
gof.decimal <- c(gof.decimal, TRUE)
}
if (include.nobs == TRUE) {
n <- nobs(model)
gof <- c(gof, n)
gof.names <- c(gof.names, "Num.\\ obs.")
gof.decimal <- c(gof.decimal, FALSE)
}
tr <- createTexreg(
coef.names = names,
coef = co,
se = se,
pvalues = pval,
gof.names = gof.names,
gof = gof,
gof.decimal = gof.decimal
)
return(tr)
}
setMethod("extract", signature = className("lm", "stats"), definition = extract.lm)
},
"extract"
)
})
|
#---------------------------
# tabs4-subgroup-ses.R
#
# Kishor Das (kishorisrt@gmail.com)
#
# estimate the effects of WASH
# interventions on easq
# subgroup analysis by socio-
# economic status
#-----------------------------
#----------------------------
# input files :
# washb-bangladesh-easq-year2.dta
# washb-bangladesh-subgroupvar.dta
# output files:
# sub-ses.Rdata
#---------------------------
#---------------------------
# preamble
#---------------------------
rm(list = ls())
library(foreign) # for read.dta() function
library(plyr) # for rename() function
library(washb)
## desired format
dformat<-function(x,d){
x<-round(x,d)+0.000 # removing signed zero
x<-formatC(x,format="f",digits = d)
return(x)
}
#-------------------------------
# load the analysis dataset
#-------------------------------
easq<-read.dta("C:\\Users\\kdas\\Dropbox\\WASHB-Bangladesh-data\\2-child-development-outcomes-datasets\\washb-bangladesh-easq-year2.dta")
subvar<-read.dta("C:\\Users\\kdas\\Dropbox\\WASHB-Bangladesh-data\\2-child-development-outcomes-datasets\\washb-bangladesh-subgroupvar.dta")
easq<- subset(easq,
select=c("dataid","childid","tchild","clusterid",
"block","arm","svydate","sex","dob","agedays",
"ageyears","agegroup","fracode","res","care","resage",
"z_com","z_motor","z_personal","z_combined" ))
easq<- rename(easq,
replace=c("z_com"="com",
"z_motor"="motor",
"z_personal"="personal",
"z_combined"="combined"
))
easq<-merge(easq,subvar,by="dataid")
group<-c("Control","Water","Sanitation","Handwashing","WSH","Nutrition","WSH+N")
#-----------------------------------------
# analysis for easq-communication z-score
#-----------------------------------------
com_arm<-NULL
com_low_n<-NULL
com_low_mean<-NULL
com_low_mdiff<-NULL
com_high_n<-NULL
com_high_mean<-NULL
com_high_mdiff<-NULL
com_p<-NULL
com_arm<-c(
"\u005cMyIndent Control",
"\u005cMyIndent Water",
"\u005cMyIndent Sanitation",
"\u005cMyIndent Handwashing",
"\u005cMyIndent WSH",
"\u005cMyIndent Nutrition",
"\u005cMyIndent Nutrition+WSH",
"\u005cMyIndent N+WSH vs WSH",
"\u005cMyIndent N+WSH vs N"
)
for(arm in group){
ind_low <-easq$arm==arm & easq$catasset=="Low"
ind_high<-easq$arm==arm & easq$catasset=="High"
com_low_n[arm]<- dformat(sum(!is.na(easq[ind_low,]$com)),0)
mean_low<- mean(easq[ind_low,]$com,na.rm =TRUE)
SD_low <- sd(easq[ind_low,]$com,na.rm =TRUE)
com_low_mean[arm]<-paste(dformat(mean_low,2)," (",dformat(SD_low,2),")",sep="")
com_high_n[arm]<-dformat(sum(!is.na(easq[ind_high,]$com)),0)
mean_high <- mean(easq[ind_high,]$com,na.rm =TRUE)
SD_high <- sd(easq[ind_high,]$com,na.rm =TRUE)
com_high_mean[arm]<-paste(dformat(mean_high,2)," (",dformat(SD_high,2),")",sep="")
}
com_low_n[c("N+WSH vs WSH","N+WSH vs N")]<-""
com_low_mean[c("N+WSH vs WSH","N+WSH vs N")]<-""
com_high_n[c("N+WSH vs WSH","N+WSH vs N")]<-""
com_high_mean[c("N+WSH vs WSH","N+WSH vs N")]<-""
com_low_mdiff["Control"]<-""
com_high_mdiff["Control"]<-""
com_p["Control"]<-""
for(arm in group[2:7]){
reg<- washb_glm(Y=easq$com,tr=easq$arm,pair=easq$block,id=easq$block,W=easq["catasset"],V="catasset",contrast=c("Control",arm))
dif_low<- dformat(reg$lincom$est[1],2)
lcb_low<- dformat(reg$lincom$est.lb[1],2)
ucb_low<- dformat(reg$lincom$est.ub[1],2)
com_low_mdiff[arm]<-paste(dif_low," (",lcb_low,", ",ucb_low,")",sep="")
dif_high<- dformat(reg$lincom$est[2],2)
lcb_high<- dformat(reg$lincom$est.lb[2],2)
ucb_high<- dformat(reg$lincom$est.ub[2],2)
com_high_mdiff[arm]<-paste(dif_high," (",lcb_high,", ",ucb_high,")",sep="")
com_p[arm]<- dformat(reg$fit[paste("tr",arm,":VHigh",sep=""),"Pr(>|z|)"],3)
}
# row N+WSH vs WSH
reg<- washb_glm(Y=easq$com,tr=easq$arm,pair=easq$block,id=easq$block,W=easq["catasset"],V="catasset",contrast=c("WSH","WSH+N"))
dif_low<- dformat(reg$lincom$est[1],2)
lcb_low<- dformat(reg$lincom$est.lb[1],2)
ucb_low<- dformat(reg$lincom$est.ub[1],2)
com_low_mdiff["N+WSH vs WSH"]<-paste(dif_low," (",lcb_low,", ",ucb_low,")",sep="")
dif_high<- dformat(reg$lincom$est[2],2)
lcb_high<- dformat(reg$lincom$est.lb[2],2)
ucb_high<- dformat(reg$lincom$est.ub[2],2)
com_high_mdiff["N+WSH vs WSH"]<-paste(dif_high," (",lcb_high,", ",ucb_high,")",sep="")
com_p["N+WSH vs WSH"]<- dformat(reg$fit["trWSH+N:VHigh","Pr(>|z|)"],3)
# row N+WSH vs N
reg<- washb_glm(Y=easq$com,tr=easq$arm,pair=easq$block,id=easq$block,W=easq["catasset"],V="catasset",contrast=c("Nutrition","WSH+N"))
dif_low<- dformat(reg$lincom$est[1],2)
lcb_low<- dformat(reg$lincom$est.lb[1],2)
ucb_low<- dformat(reg$lincom$est.ub[1],2)
com_low_mdiff["N+WSH vs N"]<-paste(dif_low," (",lcb_low,", ",ucb_low,")",sep="")
dif_high<- dformat(reg$lincom$est[2],2)
lcb_high<- dformat(reg$lincom$est.lb[2],2)
ucb_high<- dformat(reg$lincom$est.ub[2],2)
com_high_mdiff["N+WSH vs N"]<-paste(dif_high," (",lcb_high,", ",ucb_high,")",sep="")
com_p["N+WSH vs N"]<- dformat(reg$fit["trWSH+N:VHigh","Pr(>|z|)"],3)
#-----------------------------------------
# analysis for easq-gross-motor z-score
#-----------------------------------------
mot_arm<-NULL
mot_low_n<-NULL
mot_low_mean<-NULL
mot_low_mdiff<-NULL
mot_high_n<-NULL
mot_high_mean<-NULL
mot_high_mdiff<-NULL
mot_p<-NULL
mot_arm<-c(
"\u005cMyIndent Control",
"\u005cMyIndent Water",
"\u005cMyIndent Sanitation",
"\u005cMyIndent Handwashing",
"\u005cMyIndent WSH",
"\u005cMyIndent Nutrition",
"\u005cMyIndent Nutrition+WSH",
"\u005cMyIndent N+WSH vs WSH",
"\u005cMyIndent N+WSH vs N"
)
for(arm in group){
ind_low <-easq$arm==arm & easq$catasset=="Low"
ind_high<-easq$arm==arm & easq$catasset=="High"
mot_low_n[arm]<- dformat(sum(!is.na(easq[ind_low,]$motor)),0)
mean_low<- mean(easq[ind_low,]$motor,na.rm =TRUE)
SD_low <- sd(easq[ind_low,]$motor,na.rm =TRUE)
mot_low_mean[arm]<-paste(dformat(mean_low,2)," (",dformat(SD_low,2),")",sep="")
mot_high_n[arm]<-dformat(sum(!is.na(easq[ind_high,]$motor)),0)
mean_high <- mean(easq[ind_high,]$motor,na.rm =TRUE)
SD_high <- sd(easq[ind_high,]$motor,na.rm =TRUE)
mot_high_mean[arm]<-paste(dformat(mean_high,2)," (",dformat(SD_high,2),")",sep="")
}
mot_low_n[c("N+WSH vs WSH","N+WSH vs N")]<-""
mot_low_mean[c("N+WSH vs WSH","N+WSH vs N")]<-""
mot_high_n[c("N+WSH vs WSH","N+WSH vs N")]<-""
mot_high_mean[c("N+WSH vs WSH","N+WSH vs N")]<-""
mot_low_mdiff["Control"]<-""
mot_high_mdiff["Control"]<-""
mot_p["Control"]<-""
#
for(arm in group[2:7]){
reg<- washb_glm(Y=easq$motor,tr=easq$arm,pair=easq$block,id=easq$block,W=easq["catasset"],V="catasset",contrast=c("Control",arm))
dif_low<- dformat(reg$lincom$est[1],2)
lcb_low<- dformat(reg$lincom$est.lb[1],2)
ucb_low<- dformat(reg$lincom$est.ub[1],2)
mot_low_mdiff[arm]<-paste(dif_low," (",lcb_low,", ",ucb_low,")",sep="")
dif_high<- dformat(reg$lincom$est[2],2)
lcb_high<- dformat(reg$lincom$est.lb[2],2)
ucb_high<- dformat(reg$lincom$est.ub[2],2)
mot_high_mdiff[arm]<-paste(dif_high," (",lcb_high,", ",ucb_high,")",sep="")
mot_p[arm]<- dformat(reg$fit[paste("tr",arm,":VHigh",sep=""),"Pr(>|z|)"],3)
}
# row N+WSH vs WSH
reg<- washb_glm(Y=easq$motor,tr=easq$arm,pair=easq$block,id=easq$block,W=easq["catasset"],V="catasset",contrast=c("WSH","WSH+N"))
dif_low<- dformat(reg$lincom$est[1],2)
lcb_low<- dformat(reg$lincom$est.lb[1],2)
ucb_low<- dformat(reg$lincom$est.ub[1],2)
mot_low_mdiff["N+WSH vs WSH"]<-paste(dif_low," (",lcb_low,", ",ucb_low,")",sep="")
dif_high<- dformat(reg$lincom$est[2],2)
lcb_high<- dformat(reg$lincom$est.lb[2],2)
ucb_high<- dformat(reg$lincom$est.ub[2],2)
mot_high_mdiff["N+WSH vs WSH"]<-paste(dif_high," (",lcb_high,", ",ucb_high,")",sep="")
mot_p["N+WSH vs WSH"]<- dformat(reg$fit["trWSH+N:VHigh","Pr(>|z|)"],3)
# row N+WSH vs N
reg<- washb_glm(Y=easq$motor,tr=easq$arm,pair=easq$block,id=easq$block,W=easq["catasset"],V="catasset",contrast=c("Nutrition","WSH+N"))
dif_low<- dformat(reg$lincom$est[1],2)
lcb_low<- dformat(reg$lincom$est.lb[1],2)
ucb_low<- dformat(reg$lincom$est.ub[1],2)
mot_low_mdiff["N+WSH vs N"]<-paste(dif_low," (",lcb_low,", ",ucb_low,")",sep="")
dif_high<- dformat(reg$lincom$est[2],2)
lcb_high<- dformat(reg$lincom$est.lb[2],2)
ucb_high<- dformat(reg$lincom$est.ub[2],2)
mot_high_mdiff["N+WSH vs N"]<-paste(dif_high," (",lcb_high,", ",ucb_high,")",sep="")
mot_p["N+WSH vs N"]<- dformat(reg$fit["trWSH+N:VHigh","Pr(>|z|)"],3)
#-----------------------------------------
# analysis for easq-personal-social z-score
#-----------------------------------------
per_arm<-NULL
per_low_n<-NULL
per_low_mean<-NULL
per_low_mdiff<-NULL
per_high_n<-NULL
per_high_mean<-NULL
per_high_mdiff<-NULL
per_p<-NULL
per_arm<-c(
"\u005cMyIndent Control",
"\u005cMyIndent Water",
"\u005cMyIndent Sanitation",
"\u005cMyIndent Handwashing",
"\u005cMyIndent WSH",
"\u005cMyIndent Nutrition",
"\u005cMyIndent Nutrition+WSH",
"\u005cMyIndent N+WSH vs WSH",
"\u005cMyIndent N+WSH vs N"
)
for(arm in group){
ind_low <-easq$arm==arm & easq$catasset=="Low"
ind_high<-easq$arm==arm & easq$catasset=="High"
per_low_n[arm]<- dformat(sum(!is.na(easq[ind_low,]$personal)),0)
mean_low<- mean(easq[ind_low,]$personal,na.rm =TRUE)
SD_low <- sd(easq[ind_low,]$personal,na.rm =TRUE)
per_low_mean[arm]<-paste(dformat(mean_low,2)," (",dformat(SD_low,2),")",sep="")
per_high_n[arm]<-dformat(sum(!is.na(easq[ind_high,]$personal)),0)
mean_high <- mean(easq[ind_high,]$personal,na.rm =TRUE)
SD_high <- sd(easq[ind_high,]$personal,na.rm =TRUE)
per_high_mean[arm]<-paste(dformat(mean_high,2)," (",dformat(SD_high,2),")",sep="")
}
per_low_n[c("N+WSH vs WSH","N+WSH vs N")]<-""
per_low_mean[c("N+WSH vs WSH","N+WSH vs N")]<-""
per_high_n[c("N+WSH vs WSH","N+WSH vs N")]<-""
per_high_mean[c("N+WSH vs WSH","N+WSH vs N")]<-""
per_low_mdiff["Control"]<-""
per_high_mdiff["Control"]<-""
per_p["Control"]<-""
#
for(arm in group[2:7]){
reg<- washb_glm(Y=easq$personal,tr=easq$arm,pair=easq$block,id=easq$block,W=easq["catasset"],V="catasset",contrast=c("Control",arm))
dif_low<- dformat(reg$lincom$est[1],2)
lcb_low<- dformat(reg$lincom$est.lb[1],2)
ucb_low<- dformat(reg$lincom$est.ub[1],2)
per_low_mdiff[arm]<-paste(dif_low," (",lcb_low,", ",ucb_low,")",sep="")
dif_high<- dformat(reg$lincom$est[2],2)
lcb_high<- dformat(reg$lincom$est.lb[2],2)
ucb_high<- dformat(reg$lincom$est.ub[2],2)
per_high_mdiff[arm]<-paste(dif_high," (",lcb_high,", ",ucb_high,")",sep="")
per_p[arm]<- dformat(reg$fit[paste("tr",arm,":VHigh",sep=""),"Pr(>|z|)"],3)
}
# row N+WSH vs WSH
reg<- washb_glm(Y=easq$personal,tr=easq$arm,pair=easq$block,id=easq$block,W=easq["catasset"],V="catasset",contrast=c("WSH","WSH+N"))
dif_low<- dformat(reg$lincom$est[1],2)
lcb_low<- dformat(reg$lincom$est.lb[1],2)
ucb_low<- dformat(reg$lincom$est.ub[1],2)
per_low_mdiff["N+WSH vs WSH"]<-paste(dif_low," (",lcb_low,", ",ucb_low,")",sep="")
dif_high<- dformat(reg$lincom$est[2],2)
lcb_high<- dformat(reg$lincom$est.lb[2],2)
ucb_high<- dformat(reg$lincom$est.ub[2],2)
per_high_mdiff["N+WSH vs WSH"]<-paste(dif_high," (",lcb_high,", ",ucb_high,")",sep="")
per_p["N+WSH vs WSH"]<- dformat(reg$fit["trWSH+N:VHigh","Pr(>|z|)"],3)
# row N+WSH vs N
reg<- washb_glm(Y=easq$personal,tr=easq$arm,pair=easq$block,id=easq$block,W=easq["catasset"],V="catasset",contrast=c("Nutrition","WSH+N"))
dif_low<- dformat(reg$lincom$est[1],2)
lcb_low<- dformat(reg$lincom$est.lb[1],2)
ucb_low<- dformat(reg$lincom$est.ub[1],2)
per_low_mdiff["N+WSH vs N"]<-paste(dif_low," (",lcb_low,", ",ucb_low,")",sep="")
dif_high<- dformat(reg$lincom$est[2],2)
lcb_high<- dformat(reg$lincom$est.lb[2],2)
ucb_high<- dformat(reg$lincom$est.ub[2],2)
per_high_mdiff["N+WSH vs N"]<-paste(dif_high," (",lcb_high,", ",ucb_high,")",sep="")
per_p["N+WSH vs N"]<- dformat(reg$fit["trWSH+N:VHigh","Pr(>|z|)"],3)
#-----------------------------------------
# analysis for easq-combined z-score
#-----------------------------------------
combined_arm<-NULL
combined_low_n<-NULL
combined_low_mean<-NULL
combined_low_mdiff<-NULL
combined_high_n<-NULL
combined_high_mean<-NULL
combined_high_mdiff<-NULL
combined_p<-NULL
combined_arm<-c(
"\u005cMyIndent Control",
"\u005cMyIndent Water",
"\u005cMyIndent Sanitation",
"\u005cMyIndent Handwashing",
"\u005cMyIndent WSH",
"\u005cMyIndent Nutrition",
"\u005cMyIndent Nutrition+WSH",
"\u005cMyIndent N+WSH vs WSH",
"\u005cMyIndent N+WSH vs N"
)
for(arm in group){
ind_low <-easq$arm==arm & easq$catasset=="Low"
ind_high<-easq$arm==arm & easq$catasset=="High"
combined_low_n[arm]<- dformat(sum(!is.na(easq[ind_low,]$combined)),0)
mean_low<- mean(easq[ind_low,]$combined,na.rm =TRUE)
SD_low <- sd(easq[ind_low,]$combined,na.rm =TRUE)
combined_low_mean[arm]<-paste(dformat(mean_low,2)," (",dformat(SD_low,2),")",sep="")
combined_high_n[arm]<-dformat(sum(!is.na(easq[ind_high,]$combined)),0)
mean_high <- mean(easq[ind_high,]$combined,na.rm =TRUE)
SD_high <- sd(easq[ind_high,]$combined,na.rm =TRUE)
combined_high_mean[arm]<-paste(dformat(mean_high,2)," (",dformat(SD_high,2),")",sep="")
}
combined_low_n[c("N+WSH vs WSH","N+WSH vs N")]<-""
combined_low_mean[c("N+WSH vs WSH","N+WSH vs N")]<-""
combined_high_n[c("N+WSH vs WSH","N+WSH vs N")]<-""
combined_high_mean[c("N+WSH vs WSH","N+WSH vs N")]<-""
combined_low_mdiff["Control"]<-""
combined_high_mdiff["Control"]<-""
combined_p["Control"]<-""
#
for(arm in group[2:7]){
reg<- washb_glm(Y=easq$combined,tr=easq$arm,pair=easq$block,id=easq$block,W=easq["catasset"],V="catasset",contrast=c("Control",arm))
dif_low<- dformat(reg$lincom$est[1],2)
lcb_low<- dformat(reg$lincom$est.lb[1],2)
ucb_low<- dformat(reg$lincom$est.ub[1],2)
combined_low_mdiff[arm]<-paste(dif_low," (",lcb_low,", ",ucb_low,")",sep="")
dif_high<- dformat(reg$lincom$est[2],2)
lcb_high<- dformat(reg$lincom$est.lb[2],2)
ucb_high<- dformat(reg$lincom$est.ub[2],2)
combined_high_mdiff[arm]<-paste(dif_high," (",lcb_high,", ",ucb_high,")",sep="")
combined_p[arm]<- dformat(reg$fit[paste("tr",arm,":VHigh",sep=""),"Pr(>|z|)"],3)
}
# row N+WSH vs WSH
reg<- washb_glm(Y=easq$combined,tr=easq$arm,pair=easq$block,id=easq$block,W=easq["catasset"],V="catasset",contrast=c("WSH","WSH+N"))
dif_low<- dformat(reg$lincom$est[1],2)
lcb_low<- dformat(reg$lincom$est.lb[1],2)
ucb_low<- dformat(reg$lincom$est.ub[1],2)
combined_low_mdiff["N+WSH vs WSH"]<-paste(dif_low," (",lcb_low,", ",ucb_low,")",sep="")
dif_high<- dformat(reg$lincom$est[2],2)
lcb_high<- dformat(reg$lincom$est.lb[2],2)
ucb_high<- dformat(reg$lincom$est.ub[2],2)
combined_high_mdiff["N+WSH vs WSH"]<-paste(dif_high," (",lcb_high,", ",ucb_high,")",sep="")
combined_p["N+WSH vs WSH"]<- dformat(reg$fit["trWSH+N:VHigh","Pr(>|z|)"],3)
# row N+WSH vs N
reg<- washb_glm(Y=easq$combined,tr=easq$arm,pair=easq$block,id=easq$block,W=easq["catasset"],V="catasset",contrast=c("Nutrition","WSH+N"))
dif_low<- dformat(reg$lincom$est[1],2)
lcb_low<- dformat(reg$lincom$est.lb[1],2)
ucb_low<- dformat(reg$lincom$est.ub[1],2)
combined_low_mdiff["N+WSH vs N"]<-paste(dif_low," (",lcb_low,", ",ucb_low,")",sep="")
dif_high<- dformat(reg$lincom$est[2],2)
lcb_high<- dformat(reg$lincom$est.lb[2],2)
ucb_high<- dformat(reg$lincom$est.ub[2],2)
combined_high_mdiff["N+WSH vs N"]<-paste(dif_high," (",lcb_high,", ",ucb_high,")",sep="")
combined_p["N+WSH vs N"]<- dformat(reg$fit["trWSH+N:VHigh","Pr(>|z|)"],3)
#----------------------------------------------------
# combining all the results into a single data frame
#----------------------------------------------------
com_df<- cbind(
a=com_arm,
b=com_low_n,
c=com_low_mean,
d=com_low_mdiff,
e=com_high_n,
f=com_high_mean,
g=com_high_mdiff,
h=com_p
)
mot_df<- cbind(
a=mot_arm,
b=mot_low_n,
c=mot_low_mean,
d=mot_low_mdiff,
e=mot_high_n,
f=mot_high_mean,
g=mot_high_mdiff,
h=mot_p
)
per_df<- cbind(
a=per_arm,
b=per_low_n,
c=per_low_mean,
d=per_low_mdiff,
e=per_high_n,
f=per_high_mean,
g=per_high_mdiff,
h=per_p
)
combined_df<- cbind(
a=combined_arm,
b=combined_low_n,
c=combined_low_mean,
d=combined_low_mdiff,
e=combined_high_n,
f=combined_high_mean,
g=combined_high_mdiff,
h=combined_p
)
com_glue<- data.frame(a=c("\u005ctextbf{Communication z-score}"),b=c(""),c=c(""),d=c(""),e=c(""),f=c(""),g=c(""),h=c(""))
mot_glue<- data.frame(a=c("\u005ctextbf{Gross Motor z-score}"),b=c(""),c=c(""),d=c(""),e=c(""),f=c(""),g=c(""),h=c(""))
per_glue<- data.frame(a=c("\u005ctextbf{Personal-social z-score}"),b=c(""),c=c(""),d=c(""),e=c(""),f=c(""),g=c(""),h=c(""))
combined_glue<- data.frame(a=c("\u005ctextbf{Combined z-score}"),b=c(""),c=c(""),d=c(""),e=c(""),f=c(""),g=c(""),h=c(""))
sub_ses<-rbind(
com_glue,
com_df,
mot_glue,
mot_df,
per_glue,
per_df,
combined_glue,
combined_df
)
save(sub_ses,file="C:\\Users\\kdas\\Dropbox\\WASHB-cognitive-development-analysis\\results\\raw\\sub-ses.Rdata") | /easq/subgroup-ses.R | no_license | kishor-das/WBB-child-development-outcomes | R | false | false | 17,153 | r | #---------------------------
# tabs4-subgroup-ses.R
#
# Kishor Das (kishorisrt@gmail.com)
#
# estimate the effects of WASH
# interventions on easq
# subgroup analysis by socio-
# economic status
#-----------------------------
#----------------------------
# input files :
# washb-bangladesh-easq-year2.dta
# washb-bangladesh-subgroupvar.dta
# output files:
# sub-ses.Rdata
#---------------------------
#---------------------------
# preamble
#---------------------------
rm(list = ls())
library(foreign) # for read.dta() function
library(plyr) # for rename() function
library(washb)
## desired format
dformat<-function(x,d){
x<-round(x,d)+0.000 # removing signed zero
x<-formatC(x,format="f",digits = d)
return(x)
}
#-------------------------------
# load the analysis dataset
#-------------------------------
easq<-read.dta("C:\\Users\\kdas\\Dropbox\\WASHB-Bangladesh-data\\2-child-development-outcomes-datasets\\washb-bangladesh-easq-year2.dta")
subvar<-read.dta("C:\\Users\\kdas\\Dropbox\\WASHB-Bangladesh-data\\2-child-development-outcomes-datasets\\washb-bangladesh-subgroupvar.dta")
easq<- subset(easq,
select=c("dataid","childid","tchild","clusterid",
"block","arm","svydate","sex","dob","agedays",
"ageyears","agegroup","fracode","res","care","resage",
"z_com","z_motor","z_personal","z_combined" ))
easq<- rename(easq,
replace=c("z_com"="com",
"z_motor"="motor",
"z_personal"="personal",
"z_combined"="combined"
))
easq<-merge(easq,subvar,by="dataid")
group<-c("Control","Water","Sanitation","Handwashing","WSH","Nutrition","WSH+N")
#-----------------------------------------
# analysis for easq-communication z-score
#-----------------------------------------
com_arm<-NULL
com_low_n<-NULL
com_low_mean<-NULL
com_low_mdiff<-NULL
com_high_n<-NULL
com_high_mean<-NULL
com_high_mdiff<-NULL
com_p<-NULL
com_arm<-c(
"\u005cMyIndent Control",
"\u005cMyIndent Water",
"\u005cMyIndent Sanitation",
"\u005cMyIndent Handwashing",
"\u005cMyIndent WSH",
"\u005cMyIndent Nutrition",
"\u005cMyIndent Nutrition+WSH",
"\u005cMyIndent N+WSH vs WSH",
"\u005cMyIndent N+WSH vs N"
)
for(arm in group){
ind_low <-easq$arm==arm & easq$catasset=="Low"
ind_high<-easq$arm==arm & easq$catasset=="High"
com_low_n[arm]<- dformat(sum(!is.na(easq[ind_low,]$com)),0)
mean_low<- mean(easq[ind_low,]$com,na.rm =TRUE)
SD_low <- sd(easq[ind_low,]$com,na.rm =TRUE)
com_low_mean[arm]<-paste(dformat(mean_low,2)," (",dformat(SD_low,2),")",sep="")
com_high_n[arm]<-dformat(sum(!is.na(easq[ind_high,]$com)),0)
mean_high <- mean(easq[ind_high,]$com,na.rm =TRUE)
SD_high <- sd(easq[ind_high,]$com,na.rm =TRUE)
com_high_mean[arm]<-paste(dformat(mean_high,2)," (",dformat(SD_high,2),")",sep="")
}
com_low_n[c("N+WSH vs WSH","N+WSH vs N")]<-""
com_low_mean[c("N+WSH vs WSH","N+WSH vs N")]<-""
com_high_n[c("N+WSH vs WSH","N+WSH vs N")]<-""
com_high_mean[c("N+WSH vs WSH","N+WSH vs N")]<-""
com_low_mdiff["Control"]<-""
com_high_mdiff["Control"]<-""
com_p["Control"]<-""
for(arm in group[2:7]){
reg<- washb_glm(Y=easq$com,tr=easq$arm,pair=easq$block,id=easq$block,W=easq["catasset"],V="catasset",contrast=c("Control",arm))
dif_low<- dformat(reg$lincom$est[1],2)
lcb_low<- dformat(reg$lincom$est.lb[1],2)
ucb_low<- dformat(reg$lincom$est.ub[1],2)
com_low_mdiff[arm]<-paste(dif_low," (",lcb_low,", ",ucb_low,")",sep="")
dif_high<- dformat(reg$lincom$est[2],2)
lcb_high<- dformat(reg$lincom$est.lb[2],2)
ucb_high<- dformat(reg$lincom$est.ub[2],2)
com_high_mdiff[arm]<-paste(dif_high," (",lcb_high,", ",ucb_high,")",sep="")
com_p[arm]<- dformat(reg$fit[paste("tr",arm,":VHigh",sep=""),"Pr(>|z|)"],3)
}
# row N+WSH vs WSH
reg<- washb_glm(Y=easq$com,tr=easq$arm,pair=easq$block,id=easq$block,W=easq["catasset"],V="catasset",contrast=c("WSH","WSH+N"))
dif_low<- dformat(reg$lincom$est[1],2)
lcb_low<- dformat(reg$lincom$est.lb[1],2)
ucb_low<- dformat(reg$lincom$est.ub[1],2)
com_low_mdiff["N+WSH vs WSH"]<-paste(dif_low," (",lcb_low,", ",ucb_low,")",sep="")
dif_high<- dformat(reg$lincom$est[2],2)
lcb_high<- dformat(reg$lincom$est.lb[2],2)
ucb_high<- dformat(reg$lincom$est.ub[2],2)
com_high_mdiff["N+WSH vs WSH"]<-paste(dif_high," (",lcb_high,", ",ucb_high,")",sep="")
com_p["N+WSH vs WSH"]<- dformat(reg$fit["trWSH+N:VHigh","Pr(>|z|)"],3)
# row N+WSH vs N
reg<- washb_glm(Y=easq$com,tr=easq$arm,pair=easq$block,id=easq$block,W=easq["catasset"],V="catasset",contrast=c("Nutrition","WSH+N"))
dif_low<- dformat(reg$lincom$est[1],2)
lcb_low<- dformat(reg$lincom$est.lb[1],2)
ucb_low<- dformat(reg$lincom$est.ub[1],2)
com_low_mdiff["N+WSH vs N"]<-paste(dif_low," (",lcb_low,", ",ucb_low,")",sep="")
dif_high<- dformat(reg$lincom$est[2],2)
lcb_high<- dformat(reg$lincom$est.lb[2],2)
ucb_high<- dformat(reg$lincom$est.ub[2],2)
com_high_mdiff["N+WSH vs N"]<-paste(dif_high," (",lcb_high,", ",ucb_high,")",sep="")
com_p["N+WSH vs N"]<- dformat(reg$fit["trWSH+N:VHigh","Pr(>|z|)"],3)
#-----------------------------------------
# analysis for easq-gross-motor z-score
#-----------------------------------------
mot_arm<-NULL
mot_low_n<-NULL
mot_low_mean<-NULL
mot_low_mdiff<-NULL
mot_high_n<-NULL
mot_high_mean<-NULL
mot_high_mdiff<-NULL
mot_p<-NULL
mot_arm<-c(
"\u005cMyIndent Control",
"\u005cMyIndent Water",
"\u005cMyIndent Sanitation",
"\u005cMyIndent Handwashing",
"\u005cMyIndent WSH",
"\u005cMyIndent Nutrition",
"\u005cMyIndent Nutrition+WSH",
"\u005cMyIndent N+WSH vs WSH",
"\u005cMyIndent N+WSH vs N"
)
for(arm in group){
ind_low <-easq$arm==arm & easq$catasset=="Low"
ind_high<-easq$arm==arm & easq$catasset=="High"
mot_low_n[arm]<- dformat(sum(!is.na(easq[ind_low,]$motor)),0)
mean_low<- mean(easq[ind_low,]$motor,na.rm =TRUE)
SD_low <- sd(easq[ind_low,]$motor,na.rm =TRUE)
mot_low_mean[arm]<-paste(dformat(mean_low,2)," (",dformat(SD_low,2),")",sep="")
mot_high_n[arm]<-dformat(sum(!is.na(easq[ind_high,]$motor)),0)
mean_high <- mean(easq[ind_high,]$motor,na.rm =TRUE)
SD_high <- sd(easq[ind_high,]$motor,na.rm =TRUE)
mot_high_mean[arm]<-paste(dformat(mean_high,2)," (",dformat(SD_high,2),")",sep="")
}
mot_low_n[c("N+WSH vs WSH","N+WSH vs N")]<-""
mot_low_mean[c("N+WSH vs WSH","N+WSH vs N")]<-""
mot_high_n[c("N+WSH vs WSH","N+WSH vs N")]<-""
mot_high_mean[c("N+WSH vs WSH","N+WSH vs N")]<-""
mot_low_mdiff["Control"]<-""
mot_high_mdiff["Control"]<-""
mot_p["Control"]<-""
#
for(arm in group[2:7]){
reg<- washb_glm(Y=easq$motor,tr=easq$arm,pair=easq$block,id=easq$block,W=easq["catasset"],V="catasset",contrast=c("Control",arm))
dif_low<- dformat(reg$lincom$est[1],2)
lcb_low<- dformat(reg$lincom$est.lb[1],2)
ucb_low<- dformat(reg$lincom$est.ub[1],2)
mot_low_mdiff[arm]<-paste(dif_low," (",lcb_low,", ",ucb_low,")",sep="")
dif_high<- dformat(reg$lincom$est[2],2)
lcb_high<- dformat(reg$lincom$est.lb[2],2)
ucb_high<- dformat(reg$lincom$est.ub[2],2)
mot_high_mdiff[arm]<-paste(dif_high," (",lcb_high,", ",ucb_high,")",sep="")
mot_p[arm]<- dformat(reg$fit[paste("tr",arm,":VHigh",sep=""),"Pr(>|z|)"],3)
}
# row N+WSH vs WSH
reg<- washb_glm(Y=easq$motor,tr=easq$arm,pair=easq$block,id=easq$block,W=easq["catasset"],V="catasset",contrast=c("WSH","WSH+N"))
dif_low<- dformat(reg$lincom$est[1],2)
lcb_low<- dformat(reg$lincom$est.lb[1],2)
ucb_low<- dformat(reg$lincom$est.ub[1],2)
mot_low_mdiff["N+WSH vs WSH"]<-paste(dif_low," (",lcb_low,", ",ucb_low,")",sep="")
dif_high<- dformat(reg$lincom$est[2],2)
lcb_high<- dformat(reg$lincom$est.lb[2],2)
ucb_high<- dformat(reg$lincom$est.ub[2],2)
mot_high_mdiff["N+WSH vs WSH"]<-paste(dif_high," (",lcb_high,", ",ucb_high,")",sep="")
mot_p["N+WSH vs WSH"]<- dformat(reg$fit["trWSH+N:VHigh","Pr(>|z|)"],3)
# row N+WSH vs N
reg<- washb_glm(Y=easq$motor,tr=easq$arm,pair=easq$block,id=easq$block,W=easq["catasset"],V="catasset",contrast=c("Nutrition","WSH+N"))
dif_low<- dformat(reg$lincom$est[1],2)
lcb_low<- dformat(reg$lincom$est.lb[1],2)
ucb_low<- dformat(reg$lincom$est.ub[1],2)
mot_low_mdiff["N+WSH vs N"]<-paste(dif_low," (",lcb_low,", ",ucb_low,")",sep="")
dif_high<- dformat(reg$lincom$est[2],2)
lcb_high<- dformat(reg$lincom$est.lb[2],2)
ucb_high<- dformat(reg$lincom$est.ub[2],2)
mot_high_mdiff["N+WSH vs N"]<-paste(dif_high," (",lcb_high,", ",ucb_high,")",sep="")
mot_p["N+WSH vs N"]<- dformat(reg$fit["trWSH+N:VHigh","Pr(>|z|)"],3)
#-----------------------------------------
# analysis for easq-personal-social z-score
#-----------------------------------------
per_arm<-NULL
per_low_n<-NULL
per_low_mean<-NULL
per_low_mdiff<-NULL
per_high_n<-NULL
per_high_mean<-NULL
per_high_mdiff<-NULL
per_p<-NULL
per_arm<-c(
"\u005cMyIndent Control",
"\u005cMyIndent Water",
"\u005cMyIndent Sanitation",
"\u005cMyIndent Handwashing",
"\u005cMyIndent WSH",
"\u005cMyIndent Nutrition",
"\u005cMyIndent Nutrition+WSH",
"\u005cMyIndent N+WSH vs WSH",
"\u005cMyIndent N+WSH vs N"
)
for(arm in group){
ind_low <-easq$arm==arm & easq$catasset=="Low"
ind_high<-easq$arm==arm & easq$catasset=="High"
per_low_n[arm]<- dformat(sum(!is.na(easq[ind_low,]$personal)),0)
mean_low<- mean(easq[ind_low,]$personal,na.rm =TRUE)
SD_low <- sd(easq[ind_low,]$personal,na.rm =TRUE)
per_low_mean[arm]<-paste(dformat(mean_low,2)," (",dformat(SD_low,2),")",sep="")
per_high_n[arm]<-dformat(sum(!is.na(easq[ind_high,]$personal)),0)
mean_high <- mean(easq[ind_high,]$personal,na.rm =TRUE)
SD_high <- sd(easq[ind_high,]$personal,na.rm =TRUE)
per_high_mean[arm]<-paste(dformat(mean_high,2)," (",dformat(SD_high,2),")",sep="")
}
per_low_n[c("N+WSH vs WSH","N+WSH vs N")]<-""
per_low_mean[c("N+WSH vs WSH","N+WSH vs N")]<-""
per_high_n[c("N+WSH vs WSH","N+WSH vs N")]<-""
per_high_mean[c("N+WSH vs WSH","N+WSH vs N")]<-""
per_low_mdiff["Control"]<-""
per_high_mdiff["Control"]<-""
per_p["Control"]<-""
#
for(arm in group[2:7]){
reg<- washb_glm(Y=easq$personal,tr=easq$arm,pair=easq$block,id=easq$block,W=easq["catasset"],V="catasset",contrast=c("Control",arm))
dif_low<- dformat(reg$lincom$est[1],2)
lcb_low<- dformat(reg$lincom$est.lb[1],2)
ucb_low<- dformat(reg$lincom$est.ub[1],2)
per_low_mdiff[arm]<-paste(dif_low," (",lcb_low,", ",ucb_low,")",sep="")
dif_high<- dformat(reg$lincom$est[2],2)
lcb_high<- dformat(reg$lincom$est.lb[2],2)
ucb_high<- dformat(reg$lincom$est.ub[2],2)
per_high_mdiff[arm]<-paste(dif_high," (",lcb_high,", ",ucb_high,")",sep="")
per_p[arm]<- dformat(reg$fit[paste("tr",arm,":VHigh",sep=""),"Pr(>|z|)"],3)
}
# row N+WSH vs WSH
reg<- washb_glm(Y=easq$personal,tr=easq$arm,pair=easq$block,id=easq$block,W=easq["catasset"],V="catasset",contrast=c("WSH","WSH+N"))
dif_low<- dformat(reg$lincom$est[1],2)
lcb_low<- dformat(reg$lincom$est.lb[1],2)
ucb_low<- dformat(reg$lincom$est.ub[1],2)
per_low_mdiff["N+WSH vs WSH"]<-paste(dif_low," (",lcb_low,", ",ucb_low,")",sep="")
dif_high<- dformat(reg$lincom$est[2],2)
lcb_high<- dformat(reg$lincom$est.lb[2],2)
ucb_high<- dformat(reg$lincom$est.ub[2],2)
per_high_mdiff["N+WSH vs WSH"]<-paste(dif_high," (",lcb_high,", ",ucb_high,")",sep="")
per_p["N+WSH vs WSH"]<- dformat(reg$fit["trWSH+N:VHigh","Pr(>|z|)"],3)
# row N+WSH vs N
reg<- washb_glm(Y=easq$personal,tr=easq$arm,pair=easq$block,id=easq$block,W=easq["catasset"],V="catasset",contrast=c("Nutrition","WSH+N"))
dif_low<- dformat(reg$lincom$est[1],2)
lcb_low<- dformat(reg$lincom$est.lb[1],2)
ucb_low<- dformat(reg$lincom$est.ub[1],2)
per_low_mdiff["N+WSH vs N"]<-paste(dif_low," (",lcb_low,", ",ucb_low,")",sep="")
dif_high<- dformat(reg$lincom$est[2],2)
lcb_high<- dformat(reg$lincom$est.lb[2],2)
ucb_high<- dformat(reg$lincom$est.ub[2],2)
per_high_mdiff["N+WSH vs N"]<-paste(dif_high," (",lcb_high,", ",ucb_high,")",sep="")
per_p["N+WSH vs N"]<- dformat(reg$fit["trWSH+N:VHigh","Pr(>|z|)"],3)
#-----------------------------------------
# analysis for easq-combined z-score
#-----------------------------------------
combined_arm<-NULL
combined_low_n<-NULL
combined_low_mean<-NULL
combined_low_mdiff<-NULL
combined_high_n<-NULL
combined_high_mean<-NULL
combined_high_mdiff<-NULL
combined_p<-NULL
combined_arm<-c(
"\u005cMyIndent Control",
"\u005cMyIndent Water",
"\u005cMyIndent Sanitation",
"\u005cMyIndent Handwashing",
"\u005cMyIndent WSH",
"\u005cMyIndent Nutrition",
"\u005cMyIndent Nutrition+WSH",
"\u005cMyIndent N+WSH vs WSH",
"\u005cMyIndent N+WSH vs N"
)
for(arm in group){
ind_low <-easq$arm==arm & easq$catasset=="Low"
ind_high<-easq$arm==arm & easq$catasset=="High"
combined_low_n[arm]<- dformat(sum(!is.na(easq[ind_low,]$combined)),0)
mean_low<- mean(easq[ind_low,]$combined,na.rm =TRUE)
SD_low <- sd(easq[ind_low,]$combined,na.rm =TRUE)
combined_low_mean[arm]<-paste(dformat(mean_low,2)," (",dformat(SD_low,2),")",sep="")
combined_high_n[arm]<-dformat(sum(!is.na(easq[ind_high,]$combined)),0)
mean_high <- mean(easq[ind_high,]$combined,na.rm =TRUE)
SD_high <- sd(easq[ind_high,]$combined,na.rm =TRUE)
combined_high_mean[arm]<-paste(dformat(mean_high,2)," (",dformat(SD_high,2),")",sep="")
}
combined_low_n[c("N+WSH vs WSH","N+WSH vs N")]<-""
combined_low_mean[c("N+WSH vs WSH","N+WSH vs N")]<-""
combined_high_n[c("N+WSH vs WSH","N+WSH vs N")]<-""
combined_high_mean[c("N+WSH vs WSH","N+WSH vs N")]<-""
combined_low_mdiff["Control"]<-""
combined_high_mdiff["Control"]<-""
combined_p["Control"]<-""
#
for(arm in group[2:7]){
reg<- washb_glm(Y=easq$combined,tr=easq$arm,pair=easq$block,id=easq$block,W=easq["catasset"],V="catasset",contrast=c("Control",arm))
dif_low<- dformat(reg$lincom$est[1],2)
lcb_low<- dformat(reg$lincom$est.lb[1],2)
ucb_low<- dformat(reg$lincom$est.ub[1],2)
combined_low_mdiff[arm]<-paste(dif_low," (",lcb_low,", ",ucb_low,")",sep="")
dif_high<- dformat(reg$lincom$est[2],2)
lcb_high<- dformat(reg$lincom$est.lb[2],2)
ucb_high<- dformat(reg$lincom$est.ub[2],2)
combined_high_mdiff[arm]<-paste(dif_high," (",lcb_high,", ",ucb_high,")",sep="")
combined_p[arm]<- dformat(reg$fit[paste("tr",arm,":VHigh",sep=""),"Pr(>|z|)"],3)
}
# row N+WSH vs WSH
reg<- washb_glm(Y=easq$combined,tr=easq$arm,pair=easq$block,id=easq$block,W=easq["catasset"],V="catasset",contrast=c("WSH","WSH+N"))
dif_low<- dformat(reg$lincom$est[1],2)
lcb_low<- dformat(reg$lincom$est.lb[1],2)
ucb_low<- dformat(reg$lincom$est.ub[1],2)
combined_low_mdiff["N+WSH vs WSH"]<-paste(dif_low," (",lcb_low,", ",ucb_low,")",sep="")
dif_high<- dformat(reg$lincom$est[2],2)
lcb_high<- dformat(reg$lincom$est.lb[2],2)
ucb_high<- dformat(reg$lincom$est.ub[2],2)
combined_high_mdiff["N+WSH vs WSH"]<-paste(dif_high," (",lcb_high,", ",ucb_high,")",sep="")
combined_p["N+WSH vs WSH"]<- dformat(reg$fit["trWSH+N:VHigh","Pr(>|z|)"],3)
# row N+WSH vs N
reg<- washb_glm(Y=easq$combined,tr=easq$arm,pair=easq$block,id=easq$block,W=easq["catasset"],V="catasset",contrast=c("Nutrition","WSH+N"))
dif_low<- dformat(reg$lincom$est[1],2)
lcb_low<- dformat(reg$lincom$est.lb[1],2)
ucb_low<- dformat(reg$lincom$est.ub[1],2)
combined_low_mdiff["N+WSH vs N"]<-paste(dif_low," (",lcb_low,", ",ucb_low,")",sep="")
dif_high<- dformat(reg$lincom$est[2],2)
lcb_high<- dformat(reg$lincom$est.lb[2],2)
ucb_high<- dformat(reg$lincom$est.ub[2],2)
combined_high_mdiff["N+WSH vs N"]<-paste(dif_high," (",lcb_high,", ",ucb_high,")",sep="")
combined_p["N+WSH vs N"]<- dformat(reg$fit["trWSH+N:VHigh","Pr(>|z|)"],3)
#----------------------------------------------------
# combining all the results into a single data frame
#----------------------------------------------------
com_df<- cbind(
a=com_arm,
b=com_low_n,
c=com_low_mean,
d=com_low_mdiff,
e=com_high_n,
f=com_high_mean,
g=com_high_mdiff,
h=com_p
)
mot_df<- cbind(
a=mot_arm,
b=mot_low_n,
c=mot_low_mean,
d=mot_low_mdiff,
e=mot_high_n,
f=mot_high_mean,
g=mot_high_mdiff,
h=mot_p
)
per_df<- cbind(
a=per_arm,
b=per_low_n,
c=per_low_mean,
d=per_low_mdiff,
e=per_high_n,
f=per_high_mean,
g=per_high_mdiff,
h=per_p
)
combined_df<- cbind(
a=combined_arm,
b=combined_low_n,
c=combined_low_mean,
d=combined_low_mdiff,
e=combined_high_n,
f=combined_high_mean,
g=combined_high_mdiff,
h=combined_p
)
com_glue<- data.frame(a=c("\u005ctextbf{Communication z-score}"),b=c(""),c=c(""),d=c(""),e=c(""),f=c(""),g=c(""),h=c(""))
mot_glue<- data.frame(a=c("\u005ctextbf{Gross Motor z-score}"),b=c(""),c=c(""),d=c(""),e=c(""),f=c(""),g=c(""),h=c(""))
per_glue<- data.frame(a=c("\u005ctextbf{Personal-social z-score}"),b=c(""),c=c(""),d=c(""),e=c(""),f=c(""),g=c(""),h=c(""))
combined_glue<- data.frame(a=c("\u005ctextbf{Combined z-score}"),b=c(""),c=c(""),d=c(""),e=c(""),f=c(""),g=c(""),h=c(""))
sub_ses<-rbind(
com_glue,
com_df,
mot_glue,
mot_df,
per_glue,
per_df,
combined_glue,
combined_df
)
save(sub_ses,file="C:\\Users\\kdas\\Dropbox\\WASHB-cognitive-development-analysis\\results\\raw\\sub-ses.Rdata") |
#' @title
#' Print Method for Seroincidence Object
#'
#' @description
#' Custom \code{\link{print}} function to show output of the seroincidence calculator \code{\link{estimateSeroincidence}}.
#'
#' @param x A list containing output of function \code{\link{estimateSeroincidence}}.
#' @param ... Additional arguments affecting the summary produced.
#'
#' @examples
#'
#' \dontrun{
#' # estimate seroincidence
#' seroincidence <- estimateSeroincidence(...)
#'
#' # print the seroincidence object to the console
#' print(seroincidence)
#'
#' # or simply type (appropriate print method will be invoked automatically)
#' seroincidence
#' }
#'
#' @export
print.seroincidence <- function(x, ...)
{
cat("Seroincidence object estimated given the following setup:\n")
cat(paste("a) Antibodies :", paste(x[["Antibodies"]], collapse = ", ")), "\n")
cat(paste("b) Strata :", paste(x[["Strata"]], collapse = ", ")), "\n")
censorLimits <- x[["CensorLimits"]]
cat(paste("c) Censor limits:", paste(sapply(names(censorLimits), FUN = function(name) {
paste(name, censorLimits[name], sep = " = ")
}), collapse = ", "), "\n"))
cat("\n")
cat("This object is a list containing the following items:\n")
cat("Fits - List of outputs of \"optim\" function per stratum.\n")
cat("Antibodies - Input parameter antibodies of function \"estimateSeroincidence\".\n")
cat("Strata - Input parameter strata of function \"estimateSeroincidence\".\n")
cat("CensorLimits - Input parameter censorLimits of function \"estimateSeroincidence\".\n")
cat("\n")
cat("Call summary function to obtain output results.\n")
}
| /R/print.seroincidence.R | no_license | cran/seroincidence | R | false | false | 1,636 | r | #' @title
#' Print Method for Seroincidence Object
#'
#' @description
#' Custom \code{\link{print}} function to show output of the seroincidence calculator \code{\link{estimateSeroincidence}}.
#'
#' @param x A list containing output of function \code{\link{estimateSeroincidence}}.
#' @param ... Additional arguments affecting the summary produced.
#'
#' @examples
#'
#' \dontrun{
#' # estimate seroincidence
#' seroincidence <- estimateSeroincidence(...)
#'
#' # print the seroincidence object to the console
#' print(seroincidence)
#'
#' # or simply type (appropriate print method will be invoked automatically)
#' seroincidence
#' }
#'
#' @export
print.seroincidence <- function(x, ...)
{
cat("Seroincidence object estimated given the following setup:\n")
cat(paste("a) Antibodies :", paste(x[["Antibodies"]], collapse = ", ")), "\n")
cat(paste("b) Strata :", paste(x[["Strata"]], collapse = ", ")), "\n")
censorLimits <- x[["CensorLimits"]]
cat(paste("c) Censor limits:", paste(sapply(names(censorLimits), FUN = function(name) {
paste(name, censorLimits[name], sep = " = ")
}), collapse = ", "), "\n"))
cat("\n")
cat("This object is a list containing the following items:\n")
cat("Fits - List of outputs of \"optim\" function per stratum.\n")
cat("Antibodies - Input parameter antibodies of function \"estimateSeroincidence\".\n")
cat("Strata - Input parameter strata of function \"estimateSeroincidence\".\n")
cat("CensorLimits - Input parameter censorLimits of function \"estimateSeroincidence\".\n")
cat("\n")
cat("Call summary function to obtain output results.\n")
}
|
# Location of local schools with geocodes
ames_schools <- AmesHousing::ames_schools
# Function to compute distance to
distance_to_schools <- function(lon, lat) {
x <- c(lon, ames_schools$Longitude)
y <- c(lat, ames_schools$Latitude)
res <- as.matrix(dist(cbind(x, y)))[-1L, 1L, drop = TRUE]
names(res) <- ames_schools$School
res
}
# Add column indicating nearest school (categorical)
res <- NULL
for (i in seq_len(nrow(ames))) {
res <- rbind(res, distance_to_schools(ames$Longitude[i], ames$Latitude[i]))
}
nearest_school <- ames_schools$School[apply(res, MARGIN = 1, FUN = which.min)]
ames$nearest_school <- as.factor(nearest_school)
set.seed(159)
rfo <- ranger(Sale_Price ~ ., data = ames, importance = "impurity")
vip::vip(rfo, num_features = 20)
| /docs/distance_to_schools.R | no_license | bgreenwell/intro-ml-r | R | false | false | 766 | r | # Location of local schools with geocodes
ames_schools <- AmesHousing::ames_schools
# Function to compute distance to
distance_to_schools <- function(lon, lat) {
x <- c(lon, ames_schools$Longitude)
y <- c(lat, ames_schools$Latitude)
res <- as.matrix(dist(cbind(x, y)))[-1L, 1L, drop = TRUE]
names(res) <- ames_schools$School
res
}
# Add column indicating nearest school (categorical)
res <- NULL
for (i in seq_len(nrow(ames))) {
res <- rbind(res, distance_to_schools(ames$Longitude[i], ames$Latitude[i]))
}
nearest_school <- ames_schools$School[apply(res, MARGIN = 1, FUN = which.min)]
ames$nearest_school <- as.factor(nearest_school)
set.seed(159)
rfo <- ranger(Sale_Price ~ ., data = ames, importance = "impurity")
vip::vip(rfo, num_features = 20)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.