content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
# This test is to check bernoulli xgboost implementation,
# It creates a synthetic dataset, runs xgboost grid in H2O and R and compares aucs
test.XGBoost.bernoulli.SyntheticData <- function() {
# Generate dataset
# http://www.stat.missouri.edu/~speckman/stat461/boost.R
set.seed(3719)
n <- 2000
# Generate variables V1, ... V10
X <- matrix(rnorm(10*n), n, 10)
# y = +1 if sum_i x_{ij}^2 > chisq median on 10 df
y <- rep(-1, n)
y[apply(X*X, 1, sum) > qchisq(.5, 10)] <- 1
# Assign names to the columns of X:
dimnames(X)[[2]] <- c("V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10")
# Convert to data.frame
train.data <- as.data.frame(X)
# Add y
train.data$y <- y
# Now repeat for 10000 test data
n <- 10000
X <- matrix(rnorm(10*n), n, 10)
y <- rep(-1, n)
y[apply(X*X, 1, sum) > qchisq(.5, 10)] <- 1
dimnames(X)[[2]] <- c("V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10")
test.data <- as.data.frame(X)
test.data$y <- y
# Need to put training and test data together for xgboost below and convert
# to 0-1 data
train.data2 <- train.data
train.data2$y[train.data2$y < 0] <- 0
test.data2 <- test.data
test.data2$y[test.data2$y < 0] <- 0
all.data2 <- rbind(train.data2, test.data2)
# Parse data to H2O
print("Parse data to H2O")
system.time(alldata <- as.h2o(all.data2, destination_frame = "alldata"))
system.time(test <- as.h2o(test.data2, destination_frame = "test"))
str(alldata)
myX <- c("V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10")
myY <- "y"
test[,myY] <- as.factor(test[,myY])
alldata[,myY] <- as.factor(alldata[,myY])
# Run H2O-XGBoost grid job
print("H2O XGBoost grid search")
grid_space <- list()
grid_space$ntrees <- c(5)
grid_space$min_rows <- c(2)
grid_space$max_depth <- c(1,2,3)
grid_space$learn_rate <- c(1,.1)
# No nbins in XGBoost! grid_space$nbins <- c(20)
grid_space$distribution <- "bernoulli"
system.time(tru.xgboost <- h2o.grid("xgboost", x = myX, y = myY, training_frame = alldata, hyper_params = grid_space))
num_models <- length(tru.xgboost@model_ids)
print(paste("Number of xgboost models created:", num_models,sep ='') )
expect_equal(num_models,6)
print("XGBoost models summary")
print(tru.xgboost)
}
doTest("XGBoost Grid Test: Synthetic dataset with Bernoulli distribution H2O vs R", test.XGBoost.bernoulli.SyntheticData)
| /h2o-r/tests/testdir_algos/xgboost/runit_XGBoost_bernoulli_SyntheticData.R | permissive | h2oai/h2o-3 | R | false | false | 2,650 | r | setwd(normalizePath(dirname(R.utils::commandArgs(asValues=TRUE)$"f")))
source("../../../scripts/h2o-r-test-setup.R")
# This test is to check bernoulli xgboost implementation,
# It creates a synthetic dataset, runs xgboost grid in H2O and R and compares aucs
test.XGBoost.bernoulli.SyntheticData <- function() {
# Generate dataset
# http://www.stat.missouri.edu/~speckman/stat461/boost.R
set.seed(3719)
n <- 2000
# Generate variables V1, ... V10
X <- matrix(rnorm(10*n), n, 10)
# y = +1 if sum_i x_{ij}^2 > chisq median on 10 df
y <- rep(-1, n)
y[apply(X*X, 1, sum) > qchisq(.5, 10)] <- 1
# Assign names to the columns of X:
dimnames(X)[[2]] <- c("V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10")
# Convert to data.frame
train.data <- as.data.frame(X)
# Add y
train.data$y <- y
# Now repeat for 10000 test data
n <- 10000
X <- matrix(rnorm(10*n), n, 10)
y <- rep(-1, n)
y[apply(X*X, 1, sum) > qchisq(.5, 10)] <- 1
dimnames(X)[[2]] <- c("V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10")
test.data <- as.data.frame(X)
test.data$y <- y
# Need to put training and test data together for xgboost below and convert
# to 0-1 data
train.data2 <- train.data
train.data2$y[train.data2$y < 0] <- 0
test.data2 <- test.data
test.data2$y[test.data2$y < 0] <- 0
all.data2 <- rbind(train.data2, test.data2)
# Parse data to H2O
print("Parse data to H2O")
system.time(alldata <- as.h2o(all.data2, destination_frame = "alldata"))
system.time(test <- as.h2o(test.data2, destination_frame = "test"))
str(alldata)
myX <- c("V1", "V2", "V3", "V4", "V5", "V6", "V7", "V8", "V9", "V10")
myY <- "y"
test[,myY] <- as.factor(test[,myY])
alldata[,myY] <- as.factor(alldata[,myY])
# Run H2O-XGBoost grid job
print("H2O XGBoost grid search")
grid_space <- list()
grid_space$ntrees <- c(5)
grid_space$min_rows <- c(2)
grid_space$max_depth <- c(1,2,3)
grid_space$learn_rate <- c(1,.1)
# No nbins in XGBoost! grid_space$nbins <- c(20)
grid_space$distribution <- "bernoulli"
system.time(tru.xgboost <- h2o.grid("xgboost", x = myX, y = myY, training_frame = alldata, hyper_params = grid_space))
num_models <- length(tru.xgboost@model_ids)
print(paste("Number of xgboost models created:", num_models,sep ='') )
expect_equal(num_models,6)
print("XGBoost models summary")
print(tru.xgboost)
}
doTest("XGBoost Grid Test: Synthetic dataset with Bernoulli distribution H2O vs R", test.XGBoost.bernoulli.SyntheticData)
|
library(pxR)
library(tidyverse)
library(readxl)
# alt pop data -----------------------------------------------------------------------------------------------
altpops <- read_xlsx("Downloaded data files/population_total.xlsx")
pop_perc <- read_xlsx("Downloaded data files/pop_female_perc.xlsx")
# Ireland births -----------------------------------------------------------------
Ire_births<-as.data.frame(read.px("Downloaded data files/IRE2007-2017.px"))
Ire2007<- tibble(Code="IRE", Year=2007, Age=c(15:20), sumBirths=c(69,158,397,723,1158,1359), Country="Ireland")
Ire2006<- tibble(Code="IRE", Year=2006, Age=c(15:20), sumBirths=c(48,151,365,676,1095,1325), Country="Ireland")
Ire2005<- tibble(Code="IRE", Year=2005, Age=c(15:20), sumBirths=c(42,182,388,772,1043,1252), Country="Ireland")
Ire2004<- tibble(Code="IRE", Year=2004, Age=c(15:20), sumBirths=c(53,202,399,779,1127,1339), Country="Ireland")
Ire2003<- tibble(Code="IRE", Year=2003, Age=c(15:20), sumBirths=c(58,187,489,852,1217,1394), Country="Ireland")
Ire2002<- tibble(Code="IRE", Year=2002, Age=c(15:20), sumBirths=c(63,225,504,932,1254,1517), Country="Ireland")
Ire2001<- tibble(Code="IRE", Year=2001, Age=c(15:20), sumBirths=c(67,214,520,975,1311,1530), Country="Ireland")
Ireprerates <- tibble(Year=c(1985:2000), Country="Ireland", agegrp=3, agecat="Under 20", rate=c(16.6,16.4,16.1,15.3,14.8,16.7,17.1,16.9,16.3,15.0,15.1,16.7,17.5,19.1,20.0,19.3)) %>%
mutate(rate=rate/1000)
Ire_tidy <- Ire_births %>%
filter(Statistic=="All Births (Number)" & Sex.of.Child == "Both sexes") %>%
select(Year, Age.of.Mother, value) %>%
mutate(Age.of.Mother = gsub(" years", "", Age.of.Mother),
Age.of.Mother = ifelse(Age.of.Mother=="15 and under", "15", Age.of.Mother)) %>%
filter(as.numeric(Age.of.Mother)>=15 & as.numeric(Age.of.Mother)<=20) %>%
mutate(Age=as.numeric(Age.of.Mother), Total=as.numeric(value), Year=as.numeric(as.character(Year)), Country="Ireland", Code="IRE") %>%
select(Code, Country, Year, Age, sumBirths = value) %>%
as_tibble() %>%
bind_rows(Ire2007, Ire2006, Ire2005, Ire2004, Ire2003, Ire2002, Ire2001) %>%
arrange(Year, Age)
# Australia births ------------------------------------------------------------------------
Aus2004 <- tibble(Code="AUS", Year=2004, Age=15:19, sumBirths=c(356,886,380,502,572), Country="Australia")
Aus1984to2003 <- read_tsv("Downloaded data files/Ausnuptcon.txt") %>%
gather("Year", "Total", 2:10) %>%
bind_rows(read_tsv("Downloaded data files/Ausexnuptcon.txt") %>%
gather("Year", "Total", 2:10)) %>%
group_by(Age, Year) %>%
dplyr::summarise(sumBirths=sum(Total)) %>%
mutate(Year=as.numeric(Year), Code = "AUS", Country = "Australia")
Aus_births <- read_excel("Downloaded data files/AUS teenage births.xlsx", sheet="Table 1.3", skip=5)
Aus_tidy <- Aus_births[2:7,] %>%
select(Age = 1, 2:12) %>%
mutate(Age = ifelse(Age=="Younger than 15 years", 14, as.numeric(gsub(" years","",Age)))) %>%
gather("Year", "Total", 2:12) %>%
mutate(Age=as.numeric(Age),
sumBirths=as.numeric(Total),
Year=as.numeric(as.character(Year)),
Country="Australia",
Code="AUS") %>%
select(Code, Country, Year, Age, sumBirths) %>%
bind_rows(Aus2004, Aus1984to2003) %>%
arrange(Year, Age)
Ausprerates <- tibble(Year=c(1985:2003),
Country="Australia", agegrp=3, agecat="Under 20",
rate = c(22.8,21.8,20.6,20.3,20.6,22.1,22.1,22.0,20.9,20.7,20.4,20.1,19.8,18.9,18.5,17.7,17.7,17.4,16.3))
# NZ pop from HMD ---------------------------------------------------------
NZ_pop <- readpop("NZL_NP")
sumNZ_pop <- NZ_pop %>% merge(popAgeGrps) %>%
group_by(Code, Year, agegrp) %>%
summarise(sumPops = sum(Female)) %>%
ungroup() %>%
mutate(Code = "NZL")
New_NZ_pop <- sumNZ_pop %>%
filter(agegrp == 'Under 20',
Year > 1989) %>%
right_join(U_20_pop %>% mutate(Year = as.numeric(Year)), by = c("Year", "agegrp")) %>%
mutate(Code = "NZL")
# New Zealand - abortions, stillbirths and livebirths combined ----------------------------------
# ** under 20 pregnancies -------------------------------------------------
NZ_import_rates <- read_csv("Downloaded data files/NZ_totalpregrates.csv", skip=2, col_names=c("Year", "Under 15", "Under 20"))[1:26,]
totpop_NZ <- altpops %>% filter(country == "New Zealand") %>% select(Country = country, `1990`:`2017`) %>%
gather("Year", "totpop", -1)
U_20_pop <- pop_perc %>% filter(country == "New Zealand") %>% select(Country = country, `1990`:`2017`) %>%
gather("Year", "pop_perc", -1) %>%
mutate(pop_perc = ifelse(is.na(.$pop_perc), mean(pop_perc, na.rm = TRUE), pop_perc)) %>%
left_join(totpop_NZ, by = c("Country", "Year")) %>%
mutate(sumPops = totpop * pop_perc / 100, agegrp = "Under 20") %>%
select(Country, Year, agegrp, sumPops)
NZ_totalrates <- NZ_import_rates %>%
select(Year, `Under 20`) %>%
gather("agegrp", "totalPregs", -1) %>%
left_join(U_20_pop, by = c("Year", "agegrp")) %>%
mutate(pRate = 1000*totalPregs/sumPops,
Year = as.numeric(Year),
Code = "NZL")
NZ_import_births <- read_csv("Downloaded data files/NZ_births_age.csv",
skip = 1) %>% rename(Year = X1)
NZ_import_abo <- read_csv("Downloaded data files/NZ_abortions_agegrp.csv",
skip = 1) %>% rename(Year = X1)
NZ_synth_rates <- NZ_import_births %>%
select(-`20 years`) %>%
pivot_longer(cols = -1, names_to = "agegrp", values_to = "births") %>%
group_by(Year) %>%
dplyr::summarise(births = sum(births)) %>%
filter(!is.na(births),
Year >1989) %>%
left_join(
NZ_import_abo %>%
select(Year, abortions = `15 - 19 years`) %>%
filter(Year > 1989),
by = "Year"
) %>%
mutate(Year = as.numeric(Year)) %>%
left_join(
New_NZ_pop, by = "Year"
) %>%
filter(Year<2014) %>%
mutate(sumPops.x = zoo::na.approx(sumPops.x),
pRate = (births + abortions)/sumPops.x * 1000,
Code = 30,
agegrp = "Under 20") %>%
select(Code, Country, Year, pRate)
NZ_synth_rates %>%
left_join(GDP_cap, by = c("Country", "Year"))
# SD_import_u20 <- synthData_u20_filt
synthData_u20_filt <- bind_rows(SD_import_u20, NZ_synth_rates)
# ** Under-18 births ------------------------------------------------------
NZ_under_18 <- NZ_import_births %>%
select(-`19 years`, -`20 years`) %>%
pivot_longer(cols = -1, names_to = "agegrp", values_to = "births") %>%
group_by(Year) %>%
summarise(births = sum(births)) %>%
filter(!is.na(births),
Year >1989) %>%
mutate(Year = as.numeric(Year)) %>%
left_join(
sumNZ_pop %>% filter(agegrp == 'Under 18'), by = "Year"
) %>%
filter(Year<2014) %>%
mutate(Code = 30,
agegrp = "Under 18",
Country = "New Zealand",
sumPops = na.approx(sumPops),
rate = births/sumPops * 1000) %>%
select(Code, Country, Year, rate)
NZ_rates <- left_join(NZ_synth_rates, NZ_under_18 , by = c("Code", "Country", "Year"))
# SD_u18_import <- synthData_u18_filt
synthData_u18_filt <- bind_rows(SD_u18_import, NZ_under_18)
SD_import <- sd_noScot %>% filter(Country != "New Zealand")
sd_noScot <- bind_rows(SD_import, NZ_rates)
save(synthData, synthData_u18_filt, synthData_u20_filt, u_18_ccodes_f, u_20_ccodes_f, sd_noScot, file = "Data/synth_data_c.rdata")
| /R/Combining extra preg data.R | permissive | andrewbaxter439/teen-preg-project | R | false | false | 7,387 | r | library(pxR)
library(tidyverse)
library(readxl)
# alt pop data -----------------------------------------------------------------------------------------------
altpops <- read_xlsx("Downloaded data files/population_total.xlsx")
pop_perc <- read_xlsx("Downloaded data files/pop_female_perc.xlsx")
# Ireland births -----------------------------------------------------------------
Ire_births<-as.data.frame(read.px("Downloaded data files/IRE2007-2017.px"))
Ire2007<- tibble(Code="IRE", Year=2007, Age=c(15:20), sumBirths=c(69,158,397,723,1158,1359), Country="Ireland")
Ire2006<- tibble(Code="IRE", Year=2006, Age=c(15:20), sumBirths=c(48,151,365,676,1095,1325), Country="Ireland")
Ire2005<- tibble(Code="IRE", Year=2005, Age=c(15:20), sumBirths=c(42,182,388,772,1043,1252), Country="Ireland")
Ire2004<- tibble(Code="IRE", Year=2004, Age=c(15:20), sumBirths=c(53,202,399,779,1127,1339), Country="Ireland")
Ire2003<- tibble(Code="IRE", Year=2003, Age=c(15:20), sumBirths=c(58,187,489,852,1217,1394), Country="Ireland")
Ire2002<- tibble(Code="IRE", Year=2002, Age=c(15:20), sumBirths=c(63,225,504,932,1254,1517), Country="Ireland")
Ire2001<- tibble(Code="IRE", Year=2001, Age=c(15:20), sumBirths=c(67,214,520,975,1311,1530), Country="Ireland")
Ireprerates <- tibble(Year=c(1985:2000), Country="Ireland", agegrp=3, agecat="Under 20", rate=c(16.6,16.4,16.1,15.3,14.8,16.7,17.1,16.9,16.3,15.0,15.1,16.7,17.5,19.1,20.0,19.3)) %>%
mutate(rate=rate/1000)
Ire_tidy <- Ire_births %>%
filter(Statistic=="All Births (Number)" & Sex.of.Child == "Both sexes") %>%
select(Year, Age.of.Mother, value) %>%
mutate(Age.of.Mother = gsub(" years", "", Age.of.Mother),
Age.of.Mother = ifelse(Age.of.Mother=="15 and under", "15", Age.of.Mother)) %>%
filter(as.numeric(Age.of.Mother)>=15 & as.numeric(Age.of.Mother)<=20) %>%
mutate(Age=as.numeric(Age.of.Mother), Total=as.numeric(value), Year=as.numeric(as.character(Year)), Country="Ireland", Code="IRE") %>%
select(Code, Country, Year, Age, sumBirths = value) %>%
as_tibble() %>%
bind_rows(Ire2007, Ire2006, Ire2005, Ire2004, Ire2003, Ire2002, Ire2001) %>%
arrange(Year, Age)
# Australia births ------------------------------------------------------------------------
Aus2004 <- tibble(Code="AUS", Year=2004, Age=15:19, sumBirths=c(356,886,380,502,572), Country="Australia")
Aus1984to2003 <- read_tsv("Downloaded data files/Ausnuptcon.txt") %>%
gather("Year", "Total", 2:10) %>%
bind_rows(read_tsv("Downloaded data files/Ausexnuptcon.txt") %>%
gather("Year", "Total", 2:10)) %>%
group_by(Age, Year) %>%
dplyr::summarise(sumBirths=sum(Total)) %>%
mutate(Year=as.numeric(Year), Code = "AUS", Country = "Australia")
Aus_births <- read_excel("Downloaded data files/AUS teenage births.xlsx", sheet="Table 1.3", skip=5)
Aus_tidy <- Aus_births[2:7,] %>%
select(Age = 1, 2:12) %>%
mutate(Age = ifelse(Age=="Younger than 15 years", 14, as.numeric(gsub(" years","",Age)))) %>%
gather("Year", "Total", 2:12) %>%
mutate(Age=as.numeric(Age),
sumBirths=as.numeric(Total),
Year=as.numeric(as.character(Year)),
Country="Australia",
Code="AUS") %>%
select(Code, Country, Year, Age, sumBirths) %>%
bind_rows(Aus2004, Aus1984to2003) %>%
arrange(Year, Age)
Ausprerates <- tibble(Year=c(1985:2003),
Country="Australia", agegrp=3, agecat="Under 20",
rate = c(22.8,21.8,20.6,20.3,20.6,22.1,22.1,22.0,20.9,20.7,20.4,20.1,19.8,18.9,18.5,17.7,17.7,17.4,16.3))
# NZ pop from HMD ---------------------------------------------------------
NZ_pop <- readpop("NZL_NP")
sumNZ_pop <- NZ_pop %>% merge(popAgeGrps) %>%
group_by(Code, Year, agegrp) %>%
summarise(sumPops = sum(Female)) %>%
ungroup() %>%
mutate(Code = "NZL")
New_NZ_pop <- sumNZ_pop %>%
filter(agegrp == 'Under 20',
Year > 1989) %>%
right_join(U_20_pop %>% mutate(Year = as.numeric(Year)), by = c("Year", "agegrp")) %>%
mutate(Code = "NZL")
# New Zealand - abortions, stillbirths and livebirths combined ----------------------------------
# ** under 20 pregnancies -------------------------------------------------
NZ_import_rates <- read_csv("Downloaded data files/NZ_totalpregrates.csv", skip=2, col_names=c("Year", "Under 15", "Under 20"))[1:26,]
totpop_NZ <- altpops %>% filter(country == "New Zealand") %>% select(Country = country, `1990`:`2017`) %>%
gather("Year", "totpop", -1)
U_20_pop <- pop_perc %>% filter(country == "New Zealand") %>% select(Country = country, `1990`:`2017`) %>%
gather("Year", "pop_perc", -1) %>%
mutate(pop_perc = ifelse(is.na(.$pop_perc), mean(pop_perc, na.rm = TRUE), pop_perc)) %>%
left_join(totpop_NZ, by = c("Country", "Year")) %>%
mutate(sumPops = totpop * pop_perc / 100, agegrp = "Under 20") %>%
select(Country, Year, agegrp, sumPops)
NZ_totalrates <- NZ_import_rates %>%
select(Year, `Under 20`) %>%
gather("agegrp", "totalPregs", -1) %>%
left_join(U_20_pop, by = c("Year", "agegrp")) %>%
mutate(pRate = 1000*totalPregs/sumPops,
Year = as.numeric(Year),
Code = "NZL")
NZ_import_births <- read_csv("Downloaded data files/NZ_births_age.csv",
skip = 1) %>% rename(Year = X1)
NZ_import_abo <- read_csv("Downloaded data files/NZ_abortions_agegrp.csv",
skip = 1) %>% rename(Year = X1)
NZ_synth_rates <- NZ_import_births %>%
select(-`20 years`) %>%
pivot_longer(cols = -1, names_to = "agegrp", values_to = "births") %>%
group_by(Year) %>%
dplyr::summarise(births = sum(births)) %>%
filter(!is.na(births),
Year >1989) %>%
left_join(
NZ_import_abo %>%
select(Year, abortions = `15 - 19 years`) %>%
filter(Year > 1989),
by = "Year"
) %>%
mutate(Year = as.numeric(Year)) %>%
left_join(
New_NZ_pop, by = "Year"
) %>%
filter(Year<2014) %>%
mutate(sumPops.x = zoo::na.approx(sumPops.x),
pRate = (births + abortions)/sumPops.x * 1000,
Code = 30,
agegrp = "Under 20") %>%
select(Code, Country, Year, pRate)
NZ_synth_rates %>%
left_join(GDP_cap, by = c("Country", "Year"))
# SD_import_u20 <- synthData_u20_filt
synthData_u20_filt <- bind_rows(SD_import_u20, NZ_synth_rates)
# ** Under-18 births ------------------------------------------------------
NZ_under_18 <- NZ_import_births %>%
select(-`19 years`, -`20 years`) %>%
pivot_longer(cols = -1, names_to = "agegrp", values_to = "births") %>%
group_by(Year) %>%
summarise(births = sum(births)) %>%
filter(!is.na(births),
Year >1989) %>%
mutate(Year = as.numeric(Year)) %>%
left_join(
sumNZ_pop %>% filter(agegrp == 'Under 18'), by = "Year"
) %>%
filter(Year<2014) %>%
mutate(Code = 30,
agegrp = "Under 18",
Country = "New Zealand",
sumPops = na.approx(sumPops),
rate = births/sumPops * 1000) %>%
select(Code, Country, Year, rate)
NZ_rates <- left_join(NZ_synth_rates, NZ_under_18 , by = c("Code", "Country", "Year"))
# SD_u18_import <- synthData_u18_filt
synthData_u18_filt <- bind_rows(SD_u18_import, NZ_under_18)
SD_import <- sd_noScot %>% filter(Country != "New Zealand")
sd_noScot <- bind_rows(SD_import, NZ_rates)
save(synthData, synthData_u18_filt, synthData_u20_filt, u_18_ccodes_f, u_20_ccodes_f, sd_noScot, file = "Data/synth_data_c.rdata")
|
n = 25
mu = 5
lambda = 2
M = 10000
M1 = 2000
source("./functions.R")
y <- matrix(rinvgauss(n*M, mean = mu, shape = lambda), nrow = n)
ints <- apply(y, 2, intervals)
dim(ints) <- c(2,12, M)
muints <- ints[,2*(1:6)-1,]
lambdaints <- ints[,2*(1:6),]
mucoverrate <- apply(apply(muints, c(2,3), muiscovered),1,sum)/M
lambdacoverrate <- apply(apply(lambdaints, c(2,3), lambdaiscovered),1,sum)/M
muoutp <- apply(apply(muints, c(2,3), isout), 1, sum)/M
lambdaoutp <- apply(apply(lambdaints, c(2,3), isout), 1, sum)/M
muwidth <- apply(apply(muints, c(2,3), intwidth), 1, median)
lambdawidth <- apply(apply(lambdaints, c(2,3), intwidth), 1, median)
dmu <- data.frame(coverage_rate = mucoverrate, median_width = muwidth, prob_out = muoutp, row.names = c("wald", "inv_lrt", "boot1", "boot2", "boot3", "boot4"))
dlambda <- data.frame(coverage_rate = lambdacoverrate, median_width = lambdawidth, prob_out = lambdaoutp, row.names = c("wald", "inv_lrt", "boot1", "boot2", "boot3", "boot4"))
out1 <- paste("./n=", n, "_lambda=",lambda, "_mu.csv", sep="")
write.csv(dmu, out1, quote = F)
out2 <- paste("./n=", n, "_lambda=",lambda, "_lambda.csv", sep="")
write.csv(dlambda, out2, quote = F)
| /STAT520/STAT520exam2YifanZhu/Codes/run.R | no_license | fanne-stat/Homework | R | false | false | 1,216 | r | n = 25
mu = 5
lambda = 2
M = 10000
M1 = 2000
source("./functions.R")
y <- matrix(rinvgauss(n*M, mean = mu, shape = lambda), nrow = n)
ints <- apply(y, 2, intervals)
dim(ints) <- c(2,12, M)
muints <- ints[,2*(1:6)-1,]
lambdaints <- ints[,2*(1:6),]
mucoverrate <- apply(apply(muints, c(2,3), muiscovered),1,sum)/M
lambdacoverrate <- apply(apply(lambdaints, c(2,3), lambdaiscovered),1,sum)/M
muoutp <- apply(apply(muints, c(2,3), isout), 1, sum)/M
lambdaoutp <- apply(apply(lambdaints, c(2,3), isout), 1, sum)/M
muwidth <- apply(apply(muints, c(2,3), intwidth), 1, median)
lambdawidth <- apply(apply(lambdaints, c(2,3), intwidth), 1, median)
dmu <- data.frame(coverage_rate = mucoverrate, median_width = muwidth, prob_out = muoutp, row.names = c("wald", "inv_lrt", "boot1", "boot2", "boot3", "boot4"))
dlambda <- data.frame(coverage_rate = lambdacoverrate, median_width = lambdawidth, prob_out = lambdaoutp, row.names = c("wald", "inv_lrt", "boot1", "boot2", "boot3", "boot4"))
out1 <- paste("./n=", n, "_lambda=",lambda, "_mu.csv", sep="")
write.csv(dmu, out1, quote = F)
out2 <- paste("./n=", n, "_lambda=",lambda, "_lambda.csv", sep="")
write.csv(dlambda, out2, quote = F)
|
library(tidyverse)
## Set file path
file_path <- "Input Dataset/Cleaned Dataset/Supermarket_Data_Classification.csv"
## Read data from a file
supermarket_data_class <- read_csv(file_path)
#### Feature selection using Boruta ####
## Install the package
##install.packages("Boruta")
library(Boruta)
## Set Pseudo Random Number Generator
set.seed(1234)
## Implement Boruta
sm_boruta <- Boruta(class~., data = supermarket_data_class, doTrace = 1)
## Check the performance of Boruta
print(sm_boruta)
### Feature selection using Random Forest ###
library(randomForest)
library(caret)
## Implement Random Forest
sm_rf <- randomForest(class~., data = supermarket_data_class)
## Generate the importance value for features
varImp(sm_rf)
importance(sm_rf)
## Plot the importance value of features
varImpPlot(sm_rf)
| /Tasks Collaboration/Sumit/Feature Selection.R | permissive | dgquintero/Data-Science-with-R | R | false | false | 818 | r | library(tidyverse)
## Set file path
file_path <- "Input Dataset/Cleaned Dataset/Supermarket_Data_Classification.csv"
## Read data from a file
supermarket_data_class <- read_csv(file_path)
#### Feature selection using Boruta ####
## Install the package
##install.packages("Boruta")
library(Boruta)
## Set Pseudo Random Number Generator
set.seed(1234)
## Implement Boruta
sm_boruta <- Boruta(class~., data = supermarket_data_class, doTrace = 1)
## Check the performance of Boruta
print(sm_boruta)
### Feature selection using Random Forest ###
library(randomForest)
library(caret)
## Implement Random Forest
sm_rf <- randomForest(class~., data = supermarket_data_class)
## Generate the importance value for features
varImp(sm_rf)
importance(sm_rf)
## Plot the importance value of features
varImpPlot(sm_rf)
|
#' @title Plot interaction effects of (generalized) linear (mixed) models
#' @name sjp.int
#'
#' @references \itemize{
#' \item Aiken and West (1991). Multiple Regression: Testing and Interpreting Interactions.
#' \item Brambor T, Clark WR and Golder M (2006) Understanding Interaction Models: Improving Empirical Analyses. Political Analysis 14: 63-82 \href{https://files.nyu.edu/mrg217/public/pa_final.pdf}{download}
#' \item Esarey J, Sumner JL (2015) Marginal Effects in Interaction Models: Determining and Controlling the False Positive Rate. \href{http://jee3.web.rice.edu/interaction-overconfidence.pdf}{download}
#' \item Fox J (2003) Effect displays in R for generalised linear models. Journal of Statistical Software 8:15, 1–27, \href{http://www.jstatsoft.org/v08/i15/}{<http://www.jstatsoft.org/v08/i15/>}
#' \item Hayes AF (2012) PROCESS: A versatile computational tool for observed variable mediation, moderation, and conditional process modeling [White paper] \href{http://imaging.mrc-cbu.cam.ac.uk/statswiki/FAQ/SobelTest?action=AttachFile&do=get&target=process.pdf}{download}
#' \item \href{http://www.theanalysisfactor.com/interpreting-interactions-in-regression/}{Grace-Martin K: Interpreting Interactions in Regression}
#' \item \href{http://www.theanalysisfactor.com/clarifications-on-interpreting-interactions-in-regression/}{Grace-Martin K: Clarifications on Interpreting Interactions in Regression}
#' }
#'
#' @seealso \href{http://www.strengejacke.de/sjPlot/sjp.int/}{sjPlot manual: sjp.int}
#'
#' @description Plot regression (predicted values) or probability lines (predicted probabilities) of
#' significant interaction terms to better understand effects
#' of moderations in regression models. This function accepts following fitted model classes:
#' \itemize{
#' \item linear models (\code{\link{lm}})
#' \item generalized linear models (\code{\link{glm}})
#' \item linear mixed effects models (\code{\link[lme4]{lmer}})
#' \item generalized linear mixed effects models (\code{\link[lme4]{glmer}})
#' \item non-linear mixed effects models (\code{\link[lme4]{nlmer}})
#' \item linear mixed effects models (\code{\link[nlme]{lme}}, but only for \code{type = "eff"})
#' \item generalized least squares models (\code{\link[nlme]{gls}}, but only for \code{type = "eff"})
#' \item panel data estimators (\code{\link[plm]{plm}})
#' }
#' Note that beside interaction terms, also the single predictors of each interaction (main effects)
#' must be included in the fitted model as well. Thus, \code{lm(dep ~ pred1 * pred2)} will work,
#' but \code{lm(dep ~ pred1:pred2)} won't!
#'
#' @param fit the fitted (generalized) linear (mixed) model object, including interaction terms. Accepted model
#' classes are
#' \itemize{
#' \item linear models (\code{\link{lm}})
#' \item generalized linear models (\code{\link{glm}})
#' \item linear mixed effects models (\code{\link[lme4]{lmer}})
#' \item generalized linear mixed effects models (\code{\link[lme4]{glmer}})
#' \item non-linear mixed effects models (\code{\link[lme4]{nlmer}})
#' \item linear mixed effects models (\code{\link[nlme]{lme}}, but only for \code{type = "eff"})
#' \item generalized least squares models (\code{\link[nlme]{gls}}, but only for \code{type = "eff"})
#' \item panel data estimators (\code{\link[plm]{plm}})
#' }
#' @param type interaction plot type. Use one of following values:
#' \describe{
#' \item{\code{type = "eff"}}{(default) plots the overall moderation effect on the response value. See 'Details'.}
#' \item{\code{type = "cond"}}{plots the mere \emph{change} of the moderating effect on the response value (conditional effect). See 'Details'.}
#' \item{\code{type = "emm"}}{plots the estimated marginal means (least square means). If this type is chosen, not all function arguments are applicable. See 'Details'.}
#' }
#' @param int.term name of interaction term of \code{fit} (as character), which should be plotted
#' when using \code{type = "eff"}. By default, this argument will be ignored
#' (i.e. \code{int.term = NULL}). See 'Details'.
#' @param int.plot.index numeric vector with index numbers that indicate which
#' interaction terms should be plotted in case the \code{fit} has more than
#' one interaction. By default, this value is \code{NULL}, hence all interactions
#' are plotted.
#' @param diff if \code{FALSE} (default), the minimum and maximum interaction effects of the moderating variable
#' is shown (one line each). if \code{TRUE}, only the difference between minimum and maximum interaction effect
#' is shown (single line). Only applies to \code{type = "cond"}.
#' @param mdrt.values indicates which values of the moderator variable should be
#' used when plotting the interaction effects.
#' \describe{
#' \item{\code{"minmax"}}{(default) minimum and maximum values (lower and upper bounds) of the moderator are used to plot the interaction between independent variable and moderator.}
#' \item{\code{"meansd"}}{uses the mean value of the moderator as well as one standard deviation below and above mean value to plot the effect of the moderator on the independent variable (following the convention suggested by Cohen and Cohen and popularized by Aiken and West, i.e. using the mean, the value one standard deviation above, and the value one standard deviation below the mean as values of the moderator, see \href{http://www.theanalysisfactor.com/3-tips-interpreting-moderation/}{Grace-Martin K: 3 Tips to Make Interpreting Moderation Effects Easier}).}
#' \item{\code{"zeromax"}}{is similar to the \code{"minmax"} option, however, \code{0} is always used as minimum value for the moderator. This may be useful for predictors that don't have an empirical zero-value, but absence of moderation should be simulated by using 0 as minimum.}
#' \item{\code{"quart"}}{calculates and uses the quartiles (lower, median and upper) of the moderator value.}
#' \item{\code{"all"}}{uses all values of the moderator variable. Note that this option only applies to \code{type = "eff"}, for numeric moderator values.}
#' }
#' @param swap.pred if \code{TRUE}, the predictor on the x-axis and the moderator value in an interaction are
#' swapped. For \code{type = "eff"}, the first interaction term is used as moderator and the second term
#' is plotted at the x-axis. For \code{type = "cond"}, the interaction's predictor with less unique values is
#' printed along the x-axis. Default is \code{FALSE}, so the second predictor in an interaction, respectively
#' the predictor with more unique values is printed along the x-axis.
#' @param plevel indicates at which p-value an interaction term is considered as \emph{significant},
#' i.e. at which p-level an interaction term will be considered for plotting. Default is
#' 0.1 (10 percent), hence, non-significant interactions are excluded by default. This
#' argument does not apply to \code{type = "eff"}.
#' @param title default title used for the plots. Should be a character vector
#' of same length as interaction plots to be plotted. Default value is \code{NULL}, which means that each plot's title
#' includes the dependent variable as well as the names of the interaction terms.
#' @param fill.color fill color of the shaded area between the minimum and maximum lines. Default is \code{"grey"}.
#' Either set \code{fill.color} to \code{NULL} or use 0 for \code{fill.alpha} if you want to hide the shaded area.
#' @param fill.alpha alpha value (transparancy) of the shaded area between the minimum and maximum lines. Default is 0.4.
#' Use either 0 or set \code{fill.color} to \code{NULL} if you want to hide the shaded area.
#' @param geom.colors vector of color values or name of a valid color brewer palette.
#' If not a color brewer palette name, \code{geom.colors} must be of same
#' length as moderator values used in the plot (see \code{mdrt.values}).
#' See also 'Details' in \code{\link{sjp.grpfrq}}.
#' @param axis.title a default title used for the x-axis. Should be a character vector
#' of same length as interaction plots to be plotted. Default value is \code{NULL},
#' which means that each plot's x-axis uses the predictor's name as title.
#' @param axis.labels character vector with value labels of the interaction, used
#' to label the x-axis. Only applies to \code{type = "emm"}.
#' @param legend.title title of the diagram's legend. A character vector of same length as
#' amount of interaction plots to be plotted (i.e. one vector element for each
#' plot's legend title).
#' @param legend.labels labels for the guide/legend. Either a character vector of same length as
#' amount of legend labels of the plot, or a \code{list} of character vectors, if more than one
#' interaction plot is plotted (i.e. one vector of legend labels for each interaction plot).
#' Default is \code{NULL}, so the name of the predictor with min/max-effect is used
#' as legend label.
#' @param show.ci may be a numeric or logical value. If \code{show.ci} is logical and
#' \code{TRUE}, a 95\% confidence region will be plotted. If \code{show.ci}
#' if numeric, must be a number between 0 and 1, indicating the proportion
#' for the confidence regeion (e.g. \code{show.ci = 0.9} plots a 90\% CI).
#' Only applies to \code{type = "emm"} or \code{type = "eff"}.
#'
#' @inheritParams sjp.grpfrq
#' @inheritParams sjp.frq
#' @inheritParams sjp.lmer
#' @inheritParams sjp.glmer
#'
#' @return (Insisibily) returns the ggplot-objects with the complete plot-list (\code{plot.list})
#' as well as the data frames that were used for setting up the ggplot-objects (\code{data.list}).
#'
#' @details \describe{
#' \item{\code{type = "eff"}}{plots the overall effects (marginal effects) of the interaction, with all remaining
#' covariates set to the mean. Effects are calculated using the \code{\link[effects]{effect}}-
#' function from the \pkg{effects}-package.
#' You can pass further arguments down to \code{allEffects} for flexible
#' function call via the \code{...}-argument.
#' }
#' \item{\code{type = "cond"}}{plots the effective \emph{change} or \emph{impact}
#' (conditional effect) on a dependent variable of a moderation effect, as
#' described by Grace-Martin, i.e. the difference of the moderation effect on the
#' dependent variable in \emph{presence} and \emph{absence} of the moderating effect
#' (\emph{simple slope} plot or \emph{conditional effect}, see Hayes 2012). All
#' remaining predictors are set to zero (i.e. ignored and not adjusted for).
#' Hence, this plot type may be used especially for \emph{binary or dummy coded}
#' moderator values (see also Esarey and Summer 2015).
#' This type \emph{does not} show the overall effect (marginal mean, i.e. adjusted
#' for all other predictors and covariates) of interactions on the result of Y. Use
#' \code{type = "eff"} for effect displays similar to the \code{\link[effects]{effect}}-function
#' from the \pkg{effects}-package.
#' }
#' \item{\code{type = "emm"}}{plots the estimated marginal means of repeated measures designs,
#' like two-way repeated measures AN(C)OVA. In detail, this type plots estimated marginal means
#' (also called \emph{least square means} or \emph{marginal means}) of (significant) interaction terms.
#' The fitted models may be linear (mixed effects)
#' models of class \code{\link{lm}} or \code{\link[lme4]{merMod}}. This function may be used, for example,
#' to plot differences in interventions between control and treatment groups over multiple time points.
#' }
#' }
#' The argument \code{int.term} only applies to \code{type = "eff"} and can be used
#' to select a specific interaction term of the model that should be plotted. The function
#' then calls \code{effect(int.term, fit)} to compute effects for this specific interaction
#' term only. This approach is recommended, when the fitted model contains many observations
#' and/or variables, which may slow down the effect-computation dramatically. In such cases,
#' consider computing effects for selected interaction terms only with \code{int.terms}.
#' See 'Examples'.
#'
#' @note Note that beside interaction terms, also the single predictors of each interaction (main effects)
#' must be included in the fitted model as well. Thus, \code{lm(dep ~ pred1 * pred2)} will work,
#' but \code{lm(dep ~ pred1:pred2)} won't! \cr \cr
#' For \code{type = "emm"}, all interaction terms have to be factors.
#' Furthermore, for \code{type = "eff"}, predictors of interactions that are introduced first into the model
#' are used as grouping variable, while the latter predictor is printed along the x-axis
#' (i.e. lm(y~a+b+a:b) means that "a" is used as grouping variable and "b" is plotted along the x-axis).
#'
#' @examples
#' # Note that the data sets used in this example may not be perfectly suitable for
#' # fitting linear models. I just used them because they are part of the R-software.
#'
#' # fit "dummy" model. Note that moderator should enter
#' # first the model, followed by predictor. Else, use
#' # argument "swap.pred" to change predictor on
#' # x-axis with moderator
#' fit <- lm(weight ~ Diet * Time, data = ChickWeight)
#'
#' # show summary to see significant interactions
#' summary(fit)
#'
#' # plot regression line of interaction terms, including value labels
#' sjp.int(fit, type = "eff", show.values = TRUE)
#'
#'
#' # load sample data set
#' library(sjmisc)
#' data(efc)
#' # create data frame with variables that should be included
#' # in the model
#' mydf <- data.frame(usage = efc$tot_sc_e,
#' sex = efc$c161sex,
#' education = efc$c172code,
#' burden = efc$neg_c_7,
#' dependency = efc$e42dep)
#' # convert gender predictor to factor
#' mydf$sex <- relevel(factor(mydf$sex), ref = "2")
#' # fit "dummy" model
#' fit <- lm(usage ~ .*., data = mydf)
#' summary(fit)
#'
#' # plot interactions. note that type = "cond" only considers
#' # significant interactions by default. use "plevel" to
#' # adjust p-level sensivity
#' sjp.int(fit, type = "cond")
#'
#' # plot only selected interaction term for
#' # type = "eff"
#' sjp.int(fit, type = "eff", int.term = "sex*education")
#'
#' # plot interactions, using mean and sd as moderator
#' # values to calculate interaction effect
#' sjp.int(fit, type = "eff", mdrt.values = "meansd")
#' sjp.int(fit, type = "cond", mdrt.values = "meansd")
#'
#' # plot interactions, including those with p-value up to 0.1
#' sjp.int(fit, type = "cond", plevel = 0.1)
#'
#' # -------------------------------
#' # Predictors for negative impact of care.
#' # Data from the EUROFAMCARE sample dataset
#' # -------------------------------
#' library(sjmisc)
#' data(efc)
#' # create binary response
#' y <- ifelse(efc$neg_c_7 < median(stats::na.omit(efc$neg_c_7)), 0, 1)
#' # create data frame for fitted model
#' mydf <- data.frame(y = as.factor(y),
#' sex = as.factor(efc$c161sex),
#' barthel = as.numeric(efc$barthtot))
#' # fit model
#' fit <- glm(y ~ sex * barthel, data = mydf, family = binomial(link = "logit"))
#' # plot interaction, increase p-level sensivity
#' sjp.int(fit, type = "eff", legend.labels = get_labels(efc$c161sex), plevel = 0.1)
#' sjp.int(fit, type = "cond", legend.labels = get_labels(efc$c161sex), plevel = 0.1)
#'
#' \dontrun{
#' # -------------------------------
#' # Plot estimated marginal means
#' # -------------------------------
#' # load sample data set
#' library(sjmisc)
#' data(efc)
#' # create data frame with variables that should be included
#' # in the model
#' mydf <- data.frame(burden = efc$neg_c_7,
#' sex = efc$c161sex,
#' education = efc$c172code)
#' # convert gender predictor to factor
#' mydf$sex <- factor(mydf$sex)
#' mydf$education <- factor(mydf$education)
#' # name factor levels and dependent variable
#' levels(mydf$sex) <- c("female", "male")
#' levels(mydf$education) <- c("low", "mid", "high")
#' mydf$burden <- set_label(mydf$burden, "care burden")
#' # fit "dummy" model
#' fit <- lm(burden ~ .*., data = mydf)
#' summary(fit)
#'
#' # plot marginal means of interactions, no interaction found
#' sjp.int(fit, type = "emm")
#' # plot marginal means of interactions, including those with p-value up to 1
#' sjp.int(fit, type = "emm", plevel = 1)
#' # swap predictors
#' sjp.int(fit, type = "emm", plevel = 1, swap.pred = TRUE)
#'
#' # -------------------------------
#' # Plot effects
#' # -------------------------------
#' # add continuous variable
#' mydf$barthel <- efc$barthtot
#' # re-fit model with continuous variable
#' fit <- lm(burden ~ .*., data = mydf)
#'
#' # plot effects
#' sjp.int(fit, type = "eff", show.ci = TRUE)
#'
#' # plot effects, faceted
#' sjp.int(fit, type = "eff", int.plot.index = 3, show.ci = TRUE, facet.grid = TRUE)}
#'
#' @import ggplot2
#' @import sjmisc
#' @importFrom stats family quantile
#' @importFrom effects allEffects effect
#' @export
sjp.int <- function(fit,
type = c("eff", "cond", "emm"),
int.term = NULL,
int.plot.index = NULL,
mdrt.values = c("minmax", "meansd", "zeromax", "quart", "all"),
swap.pred = FALSE,
plevel = 0.1,
diff = FALSE,
title = NULL,
axis.title = NULL,
axis.labels = NULL,
legend.title = NULL,
legend.labels = NULL,
wrap.title = 50,
wrap.legend.labels = 20,
wrap.legend.title = 20,
geom.colors = "Set1",
geom.size = NULL,
fill.color = "grey",
fill.alpha = 0.3,
show.values = FALSE,
show.ci = FALSE,
p.kr = TRUE,
grid.breaks = NULL,
xlim = NULL,
ylim = NULL,
y.offset = 0.07,
digits = 2,
facet.grid = FALSE,
prnt.plot = TRUE,
...) {
# -----------------------------------------------------------
# match arguments
# -----------------------------------------------------------
type <- match.arg(type)
mdrt.values <- match.arg(mdrt.values)
# -----------------------------------------------------------
# check class of fitted model
# -----------------------------------------------------------
c.f <- class(fit)
fun <- "lm"
if (any(c.f == "glm")) {
fun <- "glm"
} else if (any(c.f == "lm")) {
fun <- "lm"
} else if (any(c.f == "plm")) {
fun <- "plm"
} else if (any(c.f == "glmerMod")) {
fun <- "glmer"
} else if (any(c.f == "nlmerMod")) {
fun <- "nlmer"
} else if (any(c.f == "lmerMod") || any(c.f == "merModLmerTest")) {
fun <- "lmer"
} else if (any(c.f == "lme")) {
fun <- "lme"
if (type != "eff") {
message("Only 'type = \"eff\"' supports objects of class 'nlme::lme'. Defaulting type to \"eff\".")
type <- "eff"
}
} else if (any(c.f == "gls")) {
fun <- "gls"
if (type != "eff") {
message("Only 'type = \"eff\"' supports objects of class 'nlme::gls'. Defaulting type to \"eff\".")
type <- "eff"
}
}
# --------------------------------------------------------
# check default geom.size
# --------------------------------------------------------
if (is.null(geom.size)) geom.size = .7
# ------------------------
# check if suggested package is available
# ------------------------
if ((fun == "lmer" || fun == "glmer" || fun == "nlmer") && !requireNamespace("lme4", quietly = TRUE)) {
stop("Package `lme4` needed for this function to work. Please install it.", call. = FALSE)
}
if (fun == "plm" && !"package:plm" %in% search()) {
stop("Package `plm` needs to be loaded for this function to work... Use `library(plm)` and call this function again.", call. = FALSE)
}
# -----------------------------------------------------------
# argument check
# -----------------------------------------------------------
if (is.null(fill.color)) {
fill.color <- "white"
fill.alpha <- 0
}
# gridbreaks
if (is.null(grid.breaks)) gridbreaks.x <- gridbreaks.y <- ggplot2::waiver()
# check matching argument combinations
if (type == "cond" && mdrt.values == "all") {
message("`mdrt.values = \"all\"` only applies to `type = \"eff\". Defaulting `mdrt.values` to `minmax`.")
mdrt.values <- "minmax"
}
# ------------------------
# do we have glm? if so, get link family. make exceptions
# for specific models that don't have family function
# ------------------------
fitfam <- get_glm_family(fit)
# --------------------------------------------------------
# create logical for family
# --------------------------------------------------------
binom_fam <- fitfam$is_bin
# --------------------------------------------------------
# plot estimated marginal means?
# --------------------------------------------------------
if (type == "emm") {
# ------------------------
# multiple purpose of show.ci parameter. if logical,
# sets default CI to 0.95, else show.ci also may be
# numeric
# ------------------------
if (!is.null(show.ci) && !is.logical(show.ci)) {
show.ci <- TRUE
warning("argument `show.ci` must be logical for `type = 'emm'`.", call. = F)
}
return(sjp.emm(fit, swap.pred, plevel, title, geom.colors, geom.size,
axis.title, axis.labels, legend.title, legend.labels,
show.values, digits, show.ci, p.kr, wrap.title,
wrap.legend.title, wrap.legend.labels, y.offset, ylim,
grid.breaks, facet.grid, prnt.plot))
}
# --------------------------------------------------------
# list labels
# --------------------------------------------------------
if (!is.null(legend.labels) && !is.list(legend.labels)) legend.labels <- list(legend.labels)
if (!is.null(legend.title) && is.list(legend.title)) legend.title <- unlist(legend.title)
# --------------------------------------------------------
# plot moderation effeczs?
# --------------------------------------------------------
if (type == "eff") {
return(sjp.eff.int(fit, int.term, int.plot.index, mdrt.values, swap.pred, plevel,
title, fill.alpha, geom.colors, geom.size, axis.title,
legend.title, legend.labels, show.values, wrap.title, wrap.legend.labels,
wrap.legend.title, xlim, ylim, y.offset, grid.breaks,
show.ci, p.kr, facet.grid, prnt.plot, fun, ...))
}
# -----------------------------------------------------------
# set axis title
# -----------------------------------------------------------
y_title <- NULL
if ((fun == "glm" || fun == "glmer")) {
if (binom_fam)
y_title <- "Change in Predicted Probability"
else
y_title <- "Change in Incidents Rates"
}
# -----------------------------------------------------------
# get all (significant) interaction terms from model
# the function "getInteractionTerms" checks if a fitted
# model contains any interaction terms that are significant
# at the level specified by "plevel". returns NULL, if model
# contains no interaction terms or no significant interaction term.
# else, information on model and interaction terms is returned
# -----------------------------------------------------------
git <- getInteractionTerms(fit, fun, plevel, p.kr)
# check return value
if (is.null(git)) return(invisible(NULL))
# -----------------------------------------------------------
# init variables from return values
# -----------------------------------------------------------
# b0 <- git[["b0"]]
estimates.names <- git[["estimates.names"]]
estimates <- git[["estimates"]]
fitdat <- git[["fitdat"]]
# init vector that saves ggplot objects
plotlist <- list()
dflist <- list()
# -----------------------------------------------------------
# when we have linear mixed effects models and both interaction
# terms are factors, we may have the same interaction term names
# multiples times - thus, remove redundant duplicates
# -----------------------------------------------------------
intnames <- unique(git[["intnames"]])
# check if we have selected plots only, and remove any plots
# that should not be plotted. but be careful for out of bound index!
if (!is.null(int.plot.index) && !any(int.plot.index > length(intnames))) intnames <- intnames[int.plot.index]
# -----------------------------------------------------------
# get model frame, needed later for label detection
# -----------------------------------------------------------
modfram <- stats::model.frame(fit)
# -----------------------------------------------------------
# Now iterate all significant interaction terms
# and manually calculate the linear regression by inserting
# the estimates of each term and the associated interaction term,
# i.e.: y = b0 + (b1 * pred1) + (b2 * pred2) + (b3 * pred1 * pred2)
# -----------------------------------------------------------
for (cnt in 1:length(intnames)) {
# -----------------------------------------------------------
# first, retrieve and split interaction term so we know
# the two predictor variables of the interaction term
# -----------------------------------------------------------
interactionterms <- unlist(strsplit(intnames[cnt], ":"))
labx <- c()
# Label on y-axis is name of dependent variable
laby <- paste0("Change in ", sjmisc::get_label(modfram[[git[["depvar.label"]]]],
def.value = git[["depvar.label"]]))
# -----------------------------------------------------------
# find estimates (beta values) for each single predictor of
# the interaction as well as of the interaction term
# -----------------------------------------------------------
b1 <- as.numeric(estimates[match(interactionterms[1], estimates.names)])
b2 <- as.numeric(estimates[match(interactionterms[2], estimates.names)])
b3 <- as.numeric(estimates[match(intnames[cnt], estimates.names)])
# -----------------------------------------------------------
# check whether each predictor was included in the model
# as single term as well
# -----------------------------------------------------------
if (is.na(b1) || is.na(b2) || is.na(b3)) {
stop("Predictors of interaction terms (main effects) must be included as single term as well. See Note in ?sjp.int", call. = FALSE)
}
# -----------------------------------------------------------
# retrieve number of unique values in each predictor variable.
# depending on the amount of values the variable for the x-axis
# is chosen. In this case, we use the predictor with the higher
# number of unique values on the x-axis.
# -----------------------------------------------------------
# retrieve values as data frame
df_pred1uniquevals <- unique(stats::na.omit(fitdat[, interactionterms[1]]))
df_pred2uniquevals <- unique(stats::na.omit(fitdat[, interactionterms[2]]))
# convert data frame to numeric vector
pred1uniquevals <- pred2uniquevals <- as.numeric(c())
pred1uniquevals <- sort(as.numeric(sapply(df_pred1uniquevals, as.numeric)))
pred2uniquevals <- sort(as.numeric(sapply(df_pred2uniquevals, as.numeric)))
# init data frame
intdf <- c()
# -----------------------------------------------------------
# choose x-value according to higher number of unique values
# choose minimum and maximum value from predictor that has
# a "smaller range" (i.e. less unique values)
# or swap predictors on axes if requested
# -----------------------------------------------------------
if (swap.pred) {
useFirstPredOnY <- ifelse(length(pred1uniquevals) > length(pred2uniquevals), F, T)
} else {
useFirstPredOnY <- ifelse(length(pred1uniquevals) > length(pred2uniquevals), T, F)
}
# -----------------------------------------------------------
# calculate regression line
# -----------------------------------------------------------
if (useFirstPredOnY) {
labx <- sjmisc::get_label(modfram[[interactionterms[1]]],
def.value = interactionterms[1])
predy <- interactionterms[2]
# -----------------------------------------------------------
# define predictor and moderator values
# -----------------------------------------------------------
pred.value <- pred1uniquevals
mod.value <- pred2uniquevals
# -----------------------------------------------------------
# define predictor beta
# -----------------------------------------------------------
b.pred <- b1
} else {
labx <- sjmisc::get_label(modfram[[interactionterms[2]]],
def.value = interactionterms[2])
predy <- interactionterms[1]
# -----------------------------------------------------------
# define predictor and moderator values
# -----------------------------------------------------------
pred.value <- pred2uniquevals
mod.value <- pred1uniquevals
# -----------------------------------------------------------
# define predictor beta
# -----------------------------------------------------------
b.pred <- b2
}
# -----------------------------------------------------------
# Check whether moderator value has enough unique values
# for quartiles
# -----------------------------------------------------------
mdrt.values <- mv_check(mdrt.values, mod.value)
# -----------------------------------------------------------
# check which values of moderator should be plotted, i.e. if
# lower/upper bound (min-max) or mean and standard-deviation
# should be used as valus for the moderator.
# see http://www.theanalysisfactor.com/3-tips-interpreting-moderation/
# -----------------------------------------------------------
if (mdrt.values == "minmax") {
mw <- NA
ymin <- min(mod.value, na.rm = T)
ymax <- max(mod.value, na.rm = T)
} else if (mdrt.values == "meansd") {
mw <- mean(mod.value, na.rm = T)
ymin <- mw - sd(mod.value, na.rm = T)
ymax <- mw + sd(mod.value, na.rm = T)
} else if (mdrt.values == "zeromax") {
mw <- NA
ymin <- 0
ymax <- max(mod.value, na.rm = T)
} else if (mdrt.values == "quart") {
qu <- as.vector(stats::quantile(mod.value, na.rm = T))
mw <- qu[3]
ymin <- qu[2]
ymax <- qu[4]
}
# -----------------------------------------------------------
# Create data frame for plotting the interactions by
# manually calculating the linear regression by inserting
# the estimates of each term and the associated interaction term,
# i.e.: y = b0 + (b1 * pred1) + (b2 * pred2) + (b3 * pred1 * pred2)
# -----------------------------------------------------------
# We now calculate the conditional effect of predictor 1 under absence
# (or lowest impact) of predictor 2 on the dependent variable. Thus,
# the slope for predictor 2 is not calculated. see
# http://www.theanalysisfactor.com/interpreting-interactions-in-regression/
# http://www.theanalysisfactor.com/clarifications-on-interpreting-interactions-in-regression/
# ------------------------------
miny <- (b.pred * pred.value) + (b3 * pred.value * ymin)
# ------------------------------
# here we calculate the conditional effect of predictor 1 under presence
# (or strongest impact) of predictor 2 on the dependent variable. Thus,
# the slope for predictor 2 only is not needed. see references above
# ------------------------------
maxy <- (b.pred * pred.value) + (b3 * pred.value * ymax)
# store in df
tmp <- data.frame(x = pred.value, y = miny, ymin = miny, ymax = maxy, grp = "min")
intdf <- as.data.frame(rbind(intdf, tmp))
# store in df
tmp <- data.frame(x = pred.value, y = maxy, ymin = miny, ymax = maxy, grp = "max")
intdf <- as.data.frame(rbind(intdf, tmp))
# store in df
if (mdrt.values == "meansd" || mdrt.values == "quart") {
# ------------------------------
# here we calculate the effect of predictor 1 under presence
# of mean of predictor 2 on the dependent variable. Thus, the slope for
# predictor 2 only is not needed. see references above
# ------------------------------
mittelwert <- (b.pred * pred.value) + (b3 * pred.value * mw)
tmp <- data.frame(x = pred.value, y = mittelwert, ymin = miny, ymax = maxy, grp = "mean")
intdf <- as.data.frame(rbind(intdf, tmp))
}
# -----------------------------------------------------------
# convert df-values to numeric
# -----------------------------------------------------------
if (fun == "lm" || fun == "lmer" || fun == "lme") {
intdf$x <- sjmisc::to_value(intdf$x, keep.labels = F)
intdf$y <- sjmisc::to_value(intdf$y, keep.labels = F)
intdf$ymin <- sjmisc::to_value(intdf$ymin, keep.labels = F)
intdf$ymax <- sjmisc::to_value(intdf$ymax, keep.labels = F)
intdf$ydiff <- intdf$ymax - intdf$ymin
# -----------------------------------------------------------
# retrieve lowest and highest x and y position to determine
# the scale limits
# -----------------------------------------------------------
if (is.null(ylim)) {
if (diff) {
lowerLim.y <- floor(min(intdf$ydiff, na.rm = T))
upperLim.y <- ceiling(max(intdf$ydiff, na.rm = T))
} else {
lowerLim.y <- floor(min(intdf$y, na.rm = T))
upperLim.y <- ceiling(max(intdf$y, na.rm = T))
}
} else {
lowerLim.y <- ylim[1]
upperLim.y <- ylim[2]
}
} else {
invlink <- stats::family(fit)
intdf$x <- sjmisc::to_value(intdf$x, keep.labels = F)
intdf$y <- invlink$linkinv(eta = sjmisc::to_value(intdf$y, keep.labels = F))
intdf$ymin <- invlink$linkinv(eta = sjmisc::to_value(intdf$ymin, keep.labels = F))
intdf$ymax <- invlink$linkinv(eta = sjmisc::to_value(intdf$ymax, keep.labels = F))
intdf$ydiff <- invlink$linkinv(eta = intdf$ymax - intdf$ymin)
}
# -----------------------------------------------------------
# retrieve lowest and highest x and y position to determine
# the scale limits
# -----------------------------------------------------------
if (is.null(ylim)) {
if (binom_fam) {
lowerLim.y <- as.integer(floor(10 * min(intdf$y, na.rm = T) * .9)) / 10
upperLim.y <- as.integer(ceiling(10 * max(intdf$y, na.rm = T) * 1.1)) / 10
} else {
if (diff) {
lowerLim.y <- floor(min(intdf$ydiff, na.rm = T))
upperLim.y <- ceiling(max(intdf$ydiff, na.rm = T))
} else {
lowerLim.y <- floor(min(intdf$y, na.rm = T))
upperLim.y <- ceiling(max(intdf$y, na.rm = T))
}
}
} else {
lowerLim.y <- ylim[1]
upperLim.y <- ylim[2]
}
# -----------------------------------------------------------
# check x-axis limits
# -----------------------------------------------------------
if (!is.null(xlim)) {
lowerLim.x <- xlim[1]
upperLim.x <- xlim[2]
} else {
lowerLim.x <- floor(min(intdf$x, na.rm = T))
upperLim.x <- ceiling(max(intdf$x, na.rm = T))
}
# -----------------------------------------------------------
# check whether user defined grid breaks / tick marks are used
# -----------------------------------------------------------
if (!is.null(grid.breaks)) {
gridbreaks.x <- seq(lowerLim.x, upperLim.x, by = grid.breaks)
gridbreaks.y <- seq(lowerLim.y, upperLim.y, by = grid.breaks)
}
# -----------------------------------------------------------
# prepare plot title and axis titles
# -----------------------------------------------------------
if (is.null(title)) {
labtitle <- paste0("Conditional effect of ",
interactionterms[ifelse(isTRUE(useFirstPredOnY), 1, 2)],
" (by ",
interactionterms[ifelse(isTRUE(useFirstPredOnY), 2, 1)],
") on ", git[["depvar.label"]])
} else {
# copy plot counter
l_nr <- cnt
# check if we have enough labels. if not, use last labels
if (l_nr > length(title)) l_nr <- length(title)
# set legend labels for plot
labtitle <- title[l_nr]
}
# -----------------------------------------------------------
# get model frame, needed for label detection
# -----------------------------------------------------------
modfram <- stats::model.frame(fit)
modfound <- modfram[[predy]]
# -----------------------------------------------------------
# legend labels
# -----------------------------------------------------------
if (is.null(legend.labels)) {
# ---------------------------------
# find moderator variable in data
# ---------------------------------
if (!is.null(modfound)) {
lLabels <- sjmisc::get_labels(modfound, attr.only = F)
} else {
lLabels <- NULL
}
# if we still have no labels, prepare generic labels
if (is.null(lLabels)) {
if (mdrt.values == "minmax") {
lLabels <- c(paste0("lower bound of ", predy), paste0("upper bound of ", predy))
} else if (mdrt.values == "meansd") {
lLabels <- c(paste0("lower sd of ", predy), paste0("upper sd of ", predy), paste0("mean of ", predy))
} else if (mdrt.values == "quart") {
lLabels <- c(paste0("lower quartile of ", predy), paste0("upper quartile of ", predy), paste0("median of ", predy))
} else {
lLabels <- c(paste0("0 for ", predy), paste0("upper bound of ", predy))
}
} else {
if (mdrt.values == "minmax") {
lLabels <- lLabels[c(1, length(lLabels))]
} else if (mdrt.values == "meansd") {
lLabels <- c(paste0("lower sd of ", predy), paste0("upper sd of ", predy), paste0("mean of ", predy))
} else if (mdrt.values == "quart") {
lLabels <- c(paste0("lower quartile of ", predy), paste0("upper quartile of ", predy), paste0("median of ", predy))
} else {
lLabels <- c(paste0("0 for ", predy), lLabels[length(lLabels)])
}
}
} else {
# copy plot counter
l_nr <- cnt
# check if we have enough labels. if not, use last labels
if (l_nr > length(legend.labels)) l_nr <- length(legend.labels)
# set legend labels for plot
lLabels <- legend.labels[[l_nr]]
}
# -----------------------------------------------------------
# legend titles
# -----------------------------------------------------------
if (is.null(legend.title)) {
lTitle <- sjmisc::get_label(modfound, def.value = predy)
} else {
# copy plot counter
l_nr <- cnt
# check if we have enough legend titles, if not, use last legend title
if (l_nr > length(legend.title)) l_nr <- length(legend.title)
# set legend title for plot
lTitle <- legend.title[l_nr]
}
# -----------------------------------------------------------
# x axis titles
# -----------------------------------------------------------
if (!is.null(axis.title)) {
# copy plot counter
l_nr <- cnt
# check if we have enough axis titles, if not, use last legend title
if (l_nr > length(axis.title)) l_nr <- length(axis.title)
# set axis title
labx <- axis.title[l_nr]
}
if (!is.null(y_title)) laby <- y_title
# -----------------------------------------------------------
# prepare annotation labels
# -----------------------------------------------------------
# wrap title
labtitle <- sjmisc::word_wrap(labtitle, wrap.title)
# wrap legend labels
lLabels <- sjmisc::word_wrap(lLabels, wrap.legend.labels)
# wrap legend title
lTitle <- sjmisc::word_wrap(lTitle, wrap.legend.title)
# -----------------------------------------------------------
# prepare base plot of interactions
# -----------------------------------------------------------
if (diff) {
baseplot <- ggplot(intdf, aes(x = x, y = ydiff)) +
# -----------------------------------------------------------
# add a shaded region between minimun
# and maximum curve of interactions
# -----------------------------------------------------------
geom_ribbon(aes(ymin = 0, ymax = ydiff),
fill = fill.color,
alpha = fill.alpha) +
geom_line(size = geom.size)
# -----------------------------------------------------------
# show value labels
# -----------------------------------------------------------
if (show.values) {
baseplot <- baseplot +
geom_text(aes(label = round(ydiff, 1)),
nudge_y = y.offset,
show.legend = FALSE)
}
} else {
baseplot <- ggplot(intdf, aes(x = x, y = y, colour = grp))
# the shaded area between line only if plots are not faceted
if (!facet.grid) {
baseplot <- baseplot +
# add a shaded region between minimun and maximum curve of interactions
geom_ribbon(aes(ymin = ymin, ymax = ymax, colour = NULL),
fill = fill.color,
alpha = fill.alpha,
show.legend = FALSE)
}
# add line
baseplot <- baseplot + geom_line()
# ------------------------------------------------------------
# plot value labels
# ------------------------------------------------------------
if (show.values) {
baseplot <- baseplot +
geom_point() +
geom_text(aes(label = round(y, 1)),
nudge_y = y.offset,
show.legend = FALSE)
}
}
# ------------------------------------------------------------------------------------
# check whether only diff-line is shown or upper and lower boundaries. in the latter
# case, show legend, else hide legend
# ------------------------------------------------------------------------------------
if (diff) {
col.len <- 1
lLabels <- NULL
lTitle <- NULL
} else {
if (mdrt.values == "minmax" || mdrt.values == "zeromax") {
col.len <- 2
} else {
col.len <- 3
}
}
# ------------------------------------------------------------------------------------
# build plot object with theme and labels
# ------------------------------------------------------------------------------------
baseplot <- baseplot +
# set plot and axis titles
labs(title = labtitle, x = labx, y = laby, colour = lTitle) +
# set axis scale breaks
scale_x_continuous(limits = c(lowerLim.x, upperLim.x), breaks = gridbreaks.x) +
scale_y_continuous(limits = c(lowerLim.y, upperLim.y), breaks = gridbreaks.y)
# ---------------------------------------------------------
# facet grids?
# ---------------------------------------------------------
if (facet.grid && !diff) baseplot <- baseplot + facet_grid(~grp)
# ---------------------------------------------------------
# set geom colors
# ---------------------------------------------------------
baseplot <- sj.setGeomColors(baseplot, geom.colors, col.len, !is.null(lLabels), lLabels)
# ---------------------------------------------------------
# Check whether ggplot object should be returned or plotted
# ---------------------------------------------------------
if (prnt.plot) graphics::plot(baseplot)
# concatenate plot object
plotlist[[length(plotlist) + 1]] <- baseplot
dflist[[length(dflist) + 1]] <- intdf
}
# -------------------------------------
# return results
# -------------------------------------
invisible(structure(class = "sjpint",
list(plot.list = plotlist,
data.list = dflist)))
}
#' @importFrom stats na.omit model.frame
sjp.eff.int <- function(fit,
int.term = NULL,
int.plot.index = NULL,
mdrt.values = "minmax",
swap.pred = FALSE,
plevel = 0.05,
title = NULL,
fill.alpha = 0.3,
geom.colors = "Set1",
geom.size = 0.7,
axis.title = NULL,
legend.title = NULL,
legend.labels = NULL,
show.values = FALSE,
wrap.title = 50,
wrap.legend.labels = 20,
wrap.legend.title = 20,
xlim = NULL,
ylim = NULL,
y.offset = 0.07,
grid.breaks = NULL,
show.ci = FALSE,
p.kr = FALSE,
facet.grid = FALSE,
prnt.plot = TRUE,
fun,
...) {
# --------------------------------------------------------
# check default geom.size
# --------------------------------------------------------
if (is.null(geom.size)) geom.size = .7
# ------------------------
# check if suggested package is available
# ------------------------
if (!requireNamespace("effects", quietly = TRUE)) {
stop("Package `effects` needed for this function to work. Please install it.", call. = FALSE)
}
# gridbreaks
if (is.null(grid.breaks)) gridbreaks.x <- gridbreaks.y <- ggplot2::waiver()
# init default
binom_fam <- FALSE
# ------------------------
# multiple purpose of show.ci parameter. if logical,
# sets default CI to 0.95, else show.ci also may be
# numeric
# ------------------------
if (!is.null(show.ci) && !is.logical(show.ci)) {
eci <- show.ci
show.ci = TRUE
} else {
eci <- 0.95
}
# ------------------------
# calculate effects of higher order terms and
# check if fitted model contains any interaction terms
# allEffects returns a list, with all interaction effects
# (or higher order terms) as separate list element. each list
# element contains the higher-order-term of the fitted model,
# where the 'term' attribute of interaction terms have a "*".
# So we just need to look at each 'term' attribute of each
# list element and see if there is a "*"...
# ------------------------
if (is.null(int.term)) {
eff <- effects::allEffects(fit, KR = F)
int <- unlist(lapply(eff, function(x) grep("*", x['term'], fixed = T)))
} else {
eff <- effects::effect(int.term, fit, KR = F)
int <- grep("*", eff$term, fixed = T)
}
if (length(int) == 0) {
warning("No interaction term found in fitted model...", call. = FALSE)
return(invisible(NULL))
}
# ------------------------
# retrieve position of interaction terms in effects-object
# ------------------------
if (is.null(int.term)) {
intpos <- which(as.vector(sapply(eff, function(x) sjmisc::str_contains(x['term'], "*"))))
} else {
intpos <- 1
}
# select only specific plots
if (!is.null(int.plot.index) && !any(int.plot.index > length(intpos))) intpos <- intpos[int.plot.index]
# init vector that saves ggplot objects
plotlist <- list()
dflist <- list()
# -----------------------------------------------------------
# iterate all interaction terms
# -----------------------------------------------------------
for (i in 1:length(intpos)) {
# -----------------------------------------------------------
# copy "eff" object, so we don't confuse with effect-return-
# value from single term and multiple terms
# -----------------------------------------------------------
if (is.null(int.term)) {
dummy.eff <- eff[[intpos[i]]]
} else {
dummy.eff <- eff
}
# -----------------------------------------------------------
# retrieve data frame
# -----------------------------------------------------------
intdf <- data.frame(dummy.eff)
# -----------------------------------------------------------
# save response, predictor and moderator names
# -----------------------------------------------------------
pred_x.name <- colnames(intdf)[ifelse(isTRUE(swap.pred), 1, 2)]
moderator.name <- colnames(intdf)[ifelse(isTRUE(swap.pred), 2, 1)]
response.name <- dummy.eff$response
# prepare axis titles
labx <- sjmisc::get_label(stats::model.frame(fit)[[pred_x.name]], def.value = pred_x.name)
# check whether x-axis-predictor is a factor or not
x_is_factor <- is.factor(intdf[[pred_x.name]]) || (length(unique(na.omit(intdf[[pred_x.name]]))) < 3)
mod_is_factor <- is.factor(intdf[[moderator.name]])
# -----------------------------------------------------------
# check for moderator values, but only, if moderator
# is no factor value. In this case, we can choose
# the values for continuous moderator intentionally,
# e.g. only min/max, or mean and sd. We don't need these
# values for categorical moderator values.
# -----------------------------------------------------------
if (!mod_is_factor) {
# retrieve moderator value
modval <- dummy.eff$data[[moderator.name]]
# retrieve predictor value
predval <- dummy.eff$data[[pred_x.name]]
# -----------------------------------------------------------
# Check whether moderator value has enough unique values
# for quartiles
# -----------------------------------------------------------
mdrt.values <- mv_check(mdrt.values, modval)
# we have more than two values, so re-calculate effects, just using
# min and max value of moderator.
if (mdrt.values == "minmax" && length(unique(intdf[[moderator.name]])) > 2) {
# retrieve min and max values
mv.min <- min(modval, na.rm = T)
mv.max <- max(modval, na.rm = T)
# re-compute effects, prepare xlevels
xl1 <- list(x = c(mv.min, mv.max))
# we have more than two values, so re-calculate effects, just using
# 0 and max value of moderator.
} else if (mdrt.values == "zeromax" && length(unique(intdf[[moderator.name]])) > 2) {
# retrieve max values
mv.max <- max(modval, na.rm = T)
# re-compute effects, prepare xlevels
xl1 <- list(x = c(0, mv.max))
# compute mean +/- sd
} else if (mdrt.values == "meansd") {
# retrieve mean and sd
mv.mean <- round(mean(modval, na.rm = T), 2)
mv.sd <- round(sd(modval, na.rm = T), 2)
# re-compute effects, prepare xlevels
xl1 <- list(x = c(mv.mean - mv.sd, mv.mean, mv.mean + mv.sd))
} else if (mdrt.values == "all") {
# re-compute effects, prepare xlevels
xl1 <- list(x = as.vector((unique(sort(modval, na.last = NA)))))
} else if (mdrt.values == "quart") {
# re-compute effects, prepare xlevels
xl1 <- list(x = as.vector(stats::quantile(modval, na.rm = T)))
}
# change list name to moderator value name
names(xl1) <- moderator.name
# add values of interaction term
# first, get all unqiue values
prvl <- sort(unique(stats::na.omit(predval)))
# add them to list as well
xl2 <- list(y = prvl)
# change list name
names(xl2) <- pred_x.name
# combine lists
if (is.null(int.term)) {
# re-compute effects
eff.tmp <- effects::allEffects(fit, xlevels = c(xl1, xl2), KR = p.kr,
confidence.level = eci, ...)
# reset data frame
intdf <- data.frame(eff.tmp[[intpos[i]]])
} else {
# re-compute effects
eff.tmp <- effects::effect(int.term, fit, xlevels = c(xl1, xl2),
KR = p.kr, confidence.level = eci, ...)
# reset data frame
intdf <- data.frame(eff.tmp)
}
# -----------------------------------------------------------
# check for predictor values on x-axis. if it
# is no factor, select whole range of possible
# values.
# -----------------------------------------------------------
} else if (!x_is_factor) {
# retrieve predictor value
predval <- dummy.eff$data[[pred_x.name]]
# add values of interaction term
# first, get all unqiue values
prvl <- sort(unique(stats::na.omit(predval)))
# add them to list as well
xl <- list(x = prvl)
# change list name
names(xl) <- pred_x.name
# combine lists
if (is.null(int.term)) {
# re-compute effects
eff.tmp <- effects::allEffects(fit, xlevels = xl, KR = p.kr,
confidence.level = eci, ...)
# reset data frame
intdf <- data.frame(eff.tmp[[intpos[i]]])
} else {
# re-compute effects
eff.tmp <- effects::effect(int.term, fit, xlevels = xl, KR = p.kr,
confidence.level = eci, ...)
# reset data frame
intdf <- data.frame(eff.tmp)
}
}
# -----------------------------------------------------------
# change column names
# -----------------------------------------------------------
if (swap.pred) {
colnames(intdf) <- c("x", "grp", "y", "se", "conf.low", "conf.high")
} else {
colnames(intdf) <- c("grp", "x", "y", "se", "conf.low", "conf.high")
}
# -----------------------------------------------------------
# effects-package creates "NA" factor levels, which
# need to be removed
# -----------------------------------------------------------
intdf <- droplevels(intdf)
# group as factor
intdf$grp <- factor(intdf$grp, levels = unique(as.character(intdf$grp)))
# reset labels
x_labels <- NULL
lLabels <- NULL
# does model have labels? we want these if x is a factor.
# first we need to know whether we have a model-data-frame
if (x_is_factor) {
# do we have a factor with level-labels for "x"?
# if yes, use these as labels
if (!sjmisc::is_num_fac(intdf$x)) {
x_labels <- levels(intdf$x)
} else {
x_labels <- sjmisc::get_labels(stats::model.frame(fit)[[pred_x.name]], attr.only = F)
}
}
# make sure x is numeric
intdf$x <- sjmisc::to_value(intdf$x, keep.labels = F)
# get name of response, for axis title
yaxisname <- sjmisc::get_label(stats::model.frame(fit)[[response.name]],
def.value = response.name)
# -----------------------------------------------------------
# check if we have linear regression
# -----------------------------------------------------------
if (fun == "lm" || fun == "lmer" || fun == "lme" || fun == "gls") {
# Label on y-axis is name of dependent variable
y_title <- sprintf("Predicted values of %s", yaxisname)
# -----------------------------------------------------------
# retrieve lowest and highest x and y position to determine
# the scale limits
# -----------------------------------------------------------
if (is.null(ylim)) {
if (show.ci) {
lowerLim.y <- floor(min(intdf$conf.low, na.rm = T))
upperLim.y <- ceiling(max(intdf$conf.high, na.rm = T))
} else {
lowerLim.y <- floor(min(intdf$y, na.rm = T))
upperLim.y <- ceiling(max(intdf$y, na.rm = T))
}
} else {
lowerLim.y <- ylim[1]
upperLim.y <- ylim[2]
}
} else {
# ------------------------
# do we have glm? if so, get link family. make exceptions
# for specific models that don't have family function
# ------------------------
fitfam <- get_glm_family(fit)
# --------------------------------------------------------
# create logical for family
# --------------------------------------------------------
binom_fam <- fitfam$is_bin
poisson_fam <- fitfam$is_pois
# --------------------------------------------------------
# Label on y-axis is fixed
# --------------------------------------------------------
# for logistic reg.
if (binom_fam)
y_title <- sprintf("Predicted probabilities for %s", yaxisname)
else if (poisson_fam)
y_title <- sprintf("Predicted incidents for %s", yaxisname)
else
y_title <- sprintf("Predicted values for %s", yaxisname)
# -----------------------------------------------------------
# retrieve lowest and highest x and y position to determine
# the scale limits
# -----------------------------------------------------------
if (is.null(ylim)) {
if (binom_fam) {
if (show.ci) {
lowerLim.y <- as.integer(floor(10 * min(intdf$conf.low, na.rm = T) * .9)) / 10
upperLim.y <- as.integer(ceiling(10 * max(intdf$conf.high, na.rm = T) * 1.1)) / 10
} else {
lowerLim.y <- as.integer(floor(10 * min(intdf$y, na.rm = T) * .9)) / 10
upperLim.y <- as.integer(ceiling(10 * max(intdf$y, na.rm = T) * 1.1)) / 10
}
} else {
if (show.ci) {
lowerLim.y <- floor(min(intdf$conf.low, na.rm = T))
upperLim.y <- ceiling(max(intdf$conf.high, na.rm = T))
} else {
lowerLim.y <- floor(min(intdf$y, na.rm = T))
upperLim.y <- ceiling(max(intdf$y, na.rm = T))
}
}
} else {
lowerLim.y <- ylim[1]
upperLim.y <- ylim[2]
}
}
# -----------------------------------------------------------
# check x-axis limits
# -----------------------------------------------------------
if (!is.null(xlim)) {
lowerLim.x <- xlim[1]
upperLim.x <- xlim[2]
} else {
lowerLim.x <- floor(min(intdf$x, na.rm = T))
upperLim.x <- ceiling(max(intdf$x, na.rm = T))
}
# -----------------------------------------------------------
# check whether user defined grid breaks / tick marks are used
# -----------------------------------------------------------
if (!is.null(grid.breaks)) {
gridbreaks.x <- seq(lowerLim.x, upperLim.x, by = grid.breaks)
gridbreaks.y <- seq(lowerLim.y, upperLim.y, by = grid.breaks)
} else if (x_is_factor) {
gridbreaks.x <- sort(unique(intdf$x))
} else {
gridbreaks.x <- gridbreaks.y <- ggplot2::waiver()
}
# -----------------------------------------------------------
# prepare plot title and axis titles
# -----------------------------------------------------------
if (is.null(title)) {
labtitle <- paste0("Interaction effect of ", moderator.name, " and ",
pred_x.name, " on ", response.name)
} else {
# copy plot counter
l_nr <- i
# check if we have enough labels. if not, use last labels
if (l_nr > length(title)) l_nr <- length(title)
# set legend labels for plot
labtitle <- title[l_nr]
}
# -----------------------------------------------------------
# legend labels
# -----------------------------------------------------------
if (is.null(legend.labels)) {
# try to get labels, but only for factors
if (mod_is_factor) {
lLabels <- sjmisc::get_labels(stats::model.frame(fit)[[moderator.name]],
attr.only = F)
}
# if we still have no labels, get values from group
if (is.null(lLabels)) lLabels <- unique(as.character(intdf$grp))
} else {
# copy plot counter
l_nr <- i
# check if we have enough labels. if not, use last labels
if (l_nr > length(legend.labels)) l_nr <- length(legend.labels)
# set legend labels for plot
lLabels <- legend.labels[[l_nr]]
}
# -----------------------------------------------------------
# prepare facet-labels
# -----------------------------------------------------------
if (length(unique(intdf$grp)) == length(lLabels) && isTRUE(facet.grid)) {
levels(intdf$grp) <- lLabels
}
# -----------------------------------------------------------
# legend titles
# -----------------------------------------------------------
if (is.null(legend.title)) {
lTitle <- sjmisc::get_label(stats::model.frame(fit)[[moderator.name]],
def.value = moderator.name)
} else {
# copy plot counter
l_nr <- i
# check if we have enough legend titles, if not, use last legend title
if (l_nr > length(legend.title)) l_nr <- length(legend.title)
# set legend title for plot
lTitle <- legend.title[l_nr]
}
# -----------------------------------------------------------
# x axis titles
# -----------------------------------------------------------
if (!is.null(axis.title)) {
# copy plot counter
l_nr <- i
# check if we have enough axis titles, if not, use last legend title
if (l_nr > length(axis.title)) l_nr <- length(axis.title)
# set axis title
labx <- axis.title[l_nr]
}
# y-axis title.
laby <- y_title
# -----------------------------------------------------------
# wrap titles
# -----------------------------------------------------------
labtitle <- sjmisc::word_wrap(labtitle, wrap.title)
labx <- sjmisc::word_wrap(labx, wrap.title)
laby <- sjmisc::word_wrap(laby, wrap.title)
# wrap legend labels
lLabels <- sjmisc::word_wrap(lLabels, wrap.legend.labels)
# wrap legend title
lTitle <- sjmisc::word_wrap(lTitle, wrap.legend.title)
# ------------------------------------------------------------
# start plot
# ------------------------------------------------------------
baseplot <- ggplot(intdf, aes(x = x, y = y, colour = grp))
# ------------------------------------------------------------
# confidence interval?
# ------------------------------------------------------------
if (show.ci) {
if (x_is_factor) {
# -------------------------------------------------
# for factors, we add error bars instead of
# continuous confidence region
# -------------------------------------------------
baseplot <- baseplot +
geom_errorbar(aes(ymin = conf.low, ymax = conf.high, colour = grp),
width = 0, show.legend = FALSE) +
geom_point()
} else {
# -------------------------------------------------
# for continuous variables, we add continuous
# confidence region instead of error bars
# -------------------------------------------------
baseplot <- baseplot +
geom_ribbon(aes(ymin = conf.low, ymax = conf.high, colour = NULL, fill = grp),
alpha = fill.alpha, show.legend = FALSE)
}
}
baseplot <- baseplot + geom_line(size = geom.size)
# ------------------------------------------------------------
# plot value labels
# ------------------------------------------------------------
if (show.values) {
# don't need geom_point, because point-layer already
# added with x_is_factor
if (!x_is_factor) baseplot <- baseplot + geom_point()
# add value label text
baseplot <- baseplot +
geom_text(aes(label = round(y, 1)), nudge_y = y.offset, show.legend = FALSE)
}
# ------------------------------------------------------------------------------------
# build plot object with theme and labels
# ------------------------------------------------------------------------------------
baseplot <- baseplot +
# set plot and axis titles
labs(title = labtitle, x = labx, y = laby, colour = lTitle)
# we have specified labels for factors on x-axis only...
if (x_is_factor && !is.null(x_labels)) {
baseplot <- baseplot +
scale_x_continuous(limits = c(lowerLim.x, upperLim.x),
breaks = gridbreaks.x,
labels = x_labels)
} else {
# ...else, we use waiver-labels
baseplot <- baseplot +
scale_x_continuous(limits = c(lowerLim.x, upperLim.x),
breaks = gridbreaks.x)
}
# ------------------------
# for logistic regression, use
# 0 to 1 scale limits and percentage scale
# ------------------------
if (binom_fam) {
baseplot <- baseplot +
scale_y_continuous(limits = c(lowerLim.y, upperLim.y),
breaks = gridbreaks.y,
labels = scales::percent)
} else {
baseplot <- baseplot +
# set axis scale breaks
scale_y_continuous(limits = c(lowerLim.y, upperLim.y),
breaks = gridbreaks.y)
}
# ---------------------------------------------------------
# facet grids?
# ---------------------------------------------------------
if (facet.grid) baseplot <- baseplot + facet_grid(~grp)
# ---------------------------------------------------------
# set geom colors
# ---------------------------------------------------------
baseplot <- sj.setGeomColors(baseplot,
geom.colors,
pal.len = length(unique(stats::na.omit(intdf$grp))),
show.legend = !is.null(lLabels) & !facet.grid,
lLabels)
# ---------------------------------------------------------
# Check whether ggplot object should be returned or plotted
# ---------------------------------------------------------
if (prnt.plot) graphics::plot(baseplot)
# concatenate plot object
plotlist[[length(plotlist) + 1]] <- baseplot
dflist[[length(dflist) + 1]] <- intdf
}
# -------------------------------------
# return results
# -------------------------------------
invisible(structure(class = c("sjPlot", "sjpint"),
list(plot.list = plotlist,
data.list = dflist)))
}
#' @importFrom stats quantile
mv_check <- function(mdrt.values, x) {
mvc <- length(unique(as.vector(stats::quantile(x, na.rm = T))))
if (mdrt.values == "quart" && mvc < 3) {
# tell user that quart won't work
message("Could not compute quartiles, too small range of moderator variable. Defaulting `mdrt.values` to `minmax`.")
mdrt.values <- "minmax"
}
return(mdrt.values)
}
# get all (significant) interaction terms from model
# the function "getInteractionTerms" checks if a fitted
# model contains any interaction terms that are significant
# at the level specified by "plevel". returns NULL, if model
# contains no interaction terms or no significant interaction term.
# else, information on model and interaction terms is returned
#' @importFrom stats model.matrix model.frame
getInteractionTerms <- function(fit, fun, plevel, p.kr) {
# -----------------------------------------------------------
# retrieve coefficients
# -----------------------------------------------------------
coef.tab <- summary(fit)$coefficients
pval <- rep(0, times = nrow(coef.tab) - 1)
# -----------------------------------------------------------
# Help-function that removes AsIS I from formulas.
# If someone know regular expressions better than me,
# please provide a one-liner solution for the 3 sub commands.
# -----------------------------------------------------------
remove_I <- function(xnames) {
fpos <- grep("I(", xnames, fixed = T)
if (length(fpos) > 0 && fpos > 0) {
xnames <- sub("I(", "", xnames, fixed = T)
xnames <- sub(")", "", xnames, fixed = T)
xnames <- sub(" * ", ":", xnames, fixed = T)
}
return(xnames)
}
# -----------------------------------------------------------
# prepare values for (generalized) linear models
# -----------------------------------------------------------
if (fun == "lm" || fun == "glm" || fun == "plm" || fun == "lme" || fun == "gls") {
# -----------------------------------------------------------
# retrieve amount and names of predictor variables and
# of dependent variable
# -----------------------------------------------------------
if (fun == "plm") {
# plm objects have different structure than (g)lm
depvar.label <- attr(attr(attr(fit$model, "terms"), "dataClasses"), "names")[1]
# retrieve model matrix
fitdat <- data.frame(cbind(as.vector(fit$model[, 1]), stats::model.matrix(fit)))
} else {
depvar.label <- colnames(stats::model.frame(fit))[1]
# retrieve model matrix
fitdat <- data.frame(stats::model.matrix(fit))
}
# -----------------------------------------------------------
# retrieve p-values, without intercept
# -----------------------------------------------------------
if (ncol(coef.tab) > 3) pval <- coef.tab[-1, 4]
# -----------------------------------------------------------
# retrieve estimates, without intercept
# -----------------------------------------------------------
estimates <- coef.tab[-1, 1]
# -----------------------------------------------------------
# need to remove "I(...)"?
# -----------------------------------------------------------
estimates.names <- names(estimates)
estimates.names <- remove_I(estimates.names)
it <- estimates.names
# -----------------------------------------------------------
# retrieve estimate of intercept
# -----------------------------------------------------------
b0 <- coef.tab[1, 1]
# -----------------------------------------------------------
# prepare values for (generalized) linear mixed effecrs models
# -----------------------------------------------------------
} else if (fun == "lmer" || fun == "glmer" || fun == "nlmer") {
# -----------------------------------------------------------
# retrieve amount and names of predictor variables and
# of dependent variable
# -----------------------------------------------------------
depvar.label <- colnames(stats::model.frame(fit))[1]
# -----------------------------------------------------------
# retrieve p-values, without intercept
# -----------------------------------------------------------
pval <- get_lmerMod_pvalues(fit, p.kr)[-1]
# -----------------------------------------------------------
# retrieve estimates, without intercept
# -----------------------------------------------------------
estimates <- unname(lme4::fixef(fit)[-1])
estimates.names <- names(lme4::fixef(fit)[-1])
# -----------------------------------------------------------
# retrieve model matrix with all relevant predictors
# -----------------------------------------------------------
fitdat <- stats::model.matrix(fit)
# -----------------------------------------------------------
# need to remove "I(...)"?
# -----------------------------------------------------------
estimates.names <- remove_I(estimates.names)
it <- estimates.names
# -----------------------------------------------------------
# retrieve estimate of intercept
# -----------------------------------------------------------
b0 <- unname(lme4::fixef(fit)[1])
} else {
stop("Unsupported model-class. This type of regression is not yet supported by 'sjp.int'.", call. = F)
}
# -----------------------------------------------------------
# find all significant interactions
# we start looking for significant p-values beginning
# with the first interaction, not the first single term!
# thus, the starting point is first position after all single
# predictor variables
# -----------------------------------------------------------
# find interaction terms, which contains a colon, in row names
firstit <- grep(":", it, fixed = TRUE)[1]
# check whether we have any interaction terms included at all
if (is.null(firstit) || is.na(firstit) || firstit == 0) {
warning("No interaction term found in fitted model...", call. = FALSE)
return(invisible(NULL))
}
# save names of interaction predictor variables into this object
intnames <- c()
int.dropped <- c()
non.p.dropped <- FALSE
for (i in firstit:length(pval)) {
if (pval[i] < plevel) {
intnames <- c(intnames, it[i])
} else {
non.p.dropped <- T
int.dropped <- c(int.dropped, it[i], "\n")
}
}
# check for any signigicant interactions, stop if nothing found
if (is.null(intnames)) {
warning("No significant interactions found... Try to adjust `plevel` argument.", call. = FALSE)
return(invisible(NULL))
} else if (non.p.dropped) {
message(sprintf("Following non-significant interaction terms were omitted from the output:\n%s\nUse `plevel` to show more interaction terms.",
paste(int.dropped, collapse = "")))
}
return(list(intnames = intnames,
estimates = estimates,
estimates.names = estimates.names,
b0 = b0,
fitdat = fitdat,
depvar.label = depvar.label))
} | /R/sjPlotInteractions.R | no_license | BenWiseman/devel | R | false | false | 75,212 | r | #' @title Plot interaction effects of (generalized) linear (mixed) models
#' @name sjp.int
#'
#' @references \itemize{
#' \item Aiken and West (1991). Multiple Regression: Testing and Interpreting Interactions.
#' \item Brambor T, Clark WR and Golder M (2006) Understanding Interaction Models: Improving Empirical Analyses. Political Analysis 14: 63-82 \href{https://files.nyu.edu/mrg217/public/pa_final.pdf}{download}
#' \item Esarey J, Sumner JL (2015) Marginal Effects in Interaction Models: Determining and Controlling the False Positive Rate. \href{http://jee3.web.rice.edu/interaction-overconfidence.pdf}{download}
#' \item Fox J (2003) Effect displays in R for generalised linear models. Journal of Statistical Software 8:15, 1–27, \href{http://www.jstatsoft.org/v08/i15/}{<http://www.jstatsoft.org/v08/i15/>}
#' \item Hayes AF (2012) PROCESS: A versatile computational tool for observed variable mediation, moderation, and conditional process modeling [White paper] \href{http://imaging.mrc-cbu.cam.ac.uk/statswiki/FAQ/SobelTest?action=AttachFile&do=get&target=process.pdf}{download}
#' \item \href{http://www.theanalysisfactor.com/interpreting-interactions-in-regression/}{Grace-Martin K: Interpreting Interactions in Regression}
#' \item \href{http://www.theanalysisfactor.com/clarifications-on-interpreting-interactions-in-regression/}{Grace-Martin K: Clarifications on Interpreting Interactions in Regression}
#' }
#'
#' @seealso \href{http://www.strengejacke.de/sjPlot/sjp.int/}{sjPlot manual: sjp.int}
#'
#' @description Plot regression (predicted values) or probability lines (predicted probabilities) of
#' significant interaction terms to better understand effects
#' of moderations in regression models. This function accepts following fitted model classes:
#' \itemize{
#' \item linear models (\code{\link{lm}})
#' \item generalized linear models (\code{\link{glm}})
#' \item linear mixed effects models (\code{\link[lme4]{lmer}})
#' \item generalized linear mixed effects models (\code{\link[lme4]{glmer}})
#' \item non-linear mixed effects models (\code{\link[lme4]{nlmer}})
#' \item linear mixed effects models (\code{\link[nlme]{lme}}, but only for \code{type = "eff"})
#' \item generalized least squares models (\code{\link[nlme]{gls}}, but only for \code{type = "eff"})
#' \item panel data estimators (\code{\link[plm]{plm}})
#' }
#' Note that beside interaction terms, also the single predictors of each interaction (main effects)
#' must be included in the fitted model as well. Thus, \code{lm(dep ~ pred1 * pred2)} will work,
#' but \code{lm(dep ~ pred1:pred2)} won't!
#'
#' @param fit the fitted (generalized) linear (mixed) model object, including interaction terms. Accepted model
#' classes are
#' \itemize{
#' \item linear models (\code{\link{lm}})
#' \item generalized linear models (\code{\link{glm}})
#' \item linear mixed effects models (\code{\link[lme4]{lmer}})
#' \item generalized linear mixed effects models (\code{\link[lme4]{glmer}})
#' \item non-linear mixed effects models (\code{\link[lme4]{nlmer}})
#' \item linear mixed effects models (\code{\link[nlme]{lme}}, but only for \code{type = "eff"})
#' \item generalized least squares models (\code{\link[nlme]{gls}}, but only for \code{type = "eff"})
#' \item panel data estimators (\code{\link[plm]{plm}})
#' }
#' @param type interaction plot type. Use one of following values:
#' \describe{
#' \item{\code{type = "eff"}}{(default) plots the overall moderation effect on the response value. See 'Details'.}
#' \item{\code{type = "cond"}}{plots the mere \emph{change} of the moderating effect on the response value (conditional effect). See 'Details'.}
#' \item{\code{type = "emm"}}{plots the estimated marginal means (least square means). If this type is chosen, not all function arguments are applicable. See 'Details'.}
#' }
#' @param int.term name of interaction term of \code{fit} (as character), which should be plotted
#' when using \code{type = "eff"}. By default, this argument will be ignored
#' (i.e. \code{int.term = NULL}). See 'Details'.
#' @param int.plot.index numeric vector with index numbers that indicate which
#' interaction terms should be plotted in case the \code{fit} has more than
#' one interaction. By default, this value is \code{NULL}, hence all interactions
#' are plotted.
#' @param diff if \code{FALSE} (default), the minimum and maximum interaction effects of the moderating variable
#' is shown (one line each). if \code{TRUE}, only the difference between minimum and maximum interaction effect
#' is shown (single line). Only applies to \code{type = "cond"}.
#' @param mdrt.values indicates which values of the moderator variable should be
#' used when plotting the interaction effects.
#' \describe{
#' \item{\code{"minmax"}}{(default) minimum and maximum values (lower and upper bounds) of the moderator are used to plot the interaction between independent variable and moderator.}
#' \item{\code{"meansd"}}{uses the mean value of the moderator as well as one standard deviation below and above mean value to plot the effect of the moderator on the independent variable (following the convention suggested by Cohen and Cohen and popularized by Aiken and West, i.e. using the mean, the value one standard deviation above, and the value one standard deviation below the mean as values of the moderator, see \href{http://www.theanalysisfactor.com/3-tips-interpreting-moderation/}{Grace-Martin K: 3 Tips to Make Interpreting Moderation Effects Easier}).}
#' \item{\code{"zeromax"}}{is similar to the \code{"minmax"} option, however, \code{0} is always used as minimum value for the moderator. This may be useful for predictors that don't have an empirical zero-value, but absence of moderation should be simulated by using 0 as minimum.}
#' \item{\code{"quart"}}{calculates and uses the quartiles (lower, median and upper) of the moderator value.}
#' \item{\code{"all"}}{uses all values of the moderator variable. Note that this option only applies to \code{type = "eff"}, for numeric moderator values.}
#' }
#' @param swap.pred if \code{TRUE}, the predictor on the x-axis and the moderator value in an interaction are
#' swapped. For \code{type = "eff"}, the first interaction term is used as moderator and the second term
#' is plotted at the x-axis. For \code{type = "cond"}, the interaction's predictor with less unique values is
#' printed along the x-axis. Default is \code{FALSE}, so the second predictor in an interaction, respectively
#' the predictor with more unique values is printed along the x-axis.
#' @param plevel indicates at which p-value an interaction term is considered as \emph{significant},
#' i.e. at which p-level an interaction term will be considered for plotting. Default is
#' 0.1 (10 percent), hence, non-significant interactions are excluded by default. This
#' argument does not apply to \code{type = "eff"}.
#' @param title default title used for the plots. Should be a character vector
#' of same length as interaction plots to be plotted. Default value is \code{NULL}, which means that each plot's title
#' includes the dependent variable as well as the names of the interaction terms.
#' @param fill.color fill color of the shaded area between the minimum and maximum lines. Default is \code{"grey"}.
#' Either set \code{fill.color} to \code{NULL} or use 0 for \code{fill.alpha} if you want to hide the shaded area.
#' @param fill.alpha alpha value (transparancy) of the shaded area between the minimum and maximum lines. Default is 0.4.
#' Use either 0 or set \code{fill.color} to \code{NULL} if you want to hide the shaded area.
#' @param geom.colors vector of color values or name of a valid color brewer palette.
#' If not a color brewer palette name, \code{geom.colors} must be of same
#' length as moderator values used in the plot (see \code{mdrt.values}).
#' See also 'Details' in \code{\link{sjp.grpfrq}}.
#' @param axis.title a default title used for the x-axis. Should be a character vector
#' of same length as interaction plots to be plotted. Default value is \code{NULL},
#' which means that each plot's x-axis uses the predictor's name as title.
#' @param axis.labels character vector with value labels of the interaction, used
#' to label the x-axis. Only applies to \code{type = "emm"}.
#' @param legend.title title of the diagram's legend. A character vector of same length as
#' amount of interaction plots to be plotted (i.e. one vector element for each
#' plot's legend title).
#' @param legend.labels labels for the guide/legend. Either a character vector of same length as
#' amount of legend labels of the plot, or a \code{list} of character vectors, if more than one
#' interaction plot is plotted (i.e. one vector of legend labels for each interaction plot).
#' Default is \code{NULL}, so the name of the predictor with min/max-effect is used
#' as legend label.
#' @param show.ci may be a numeric or logical value. If \code{show.ci} is logical and
#' \code{TRUE}, a 95\% confidence region will be plotted. If \code{show.ci}
#' if numeric, must be a number between 0 and 1, indicating the proportion
#' for the confidence regeion (e.g. \code{show.ci = 0.9} plots a 90\% CI).
#' Only applies to \code{type = "emm"} or \code{type = "eff"}.
#'
#' @inheritParams sjp.grpfrq
#' @inheritParams sjp.frq
#' @inheritParams sjp.lmer
#' @inheritParams sjp.glmer
#'
#' @return (Insisibily) returns the ggplot-objects with the complete plot-list (\code{plot.list})
#' as well as the data frames that were used for setting up the ggplot-objects (\code{data.list}).
#'
#' @details \describe{
#' \item{\code{type = "eff"}}{plots the overall effects (marginal effects) of the interaction, with all remaining
#' covariates set to the mean. Effects are calculated using the \code{\link[effects]{effect}}-
#' function from the \pkg{effects}-package.
#' You can pass further arguments down to \code{allEffects} for flexible
#' function call via the \code{...}-argument.
#' }
#' \item{\code{type = "cond"}}{plots the effective \emph{change} or \emph{impact}
#' (conditional effect) on a dependent variable of a moderation effect, as
#' described by Grace-Martin, i.e. the difference of the moderation effect on the
#' dependent variable in \emph{presence} and \emph{absence} of the moderating effect
#' (\emph{simple slope} plot or \emph{conditional effect}, see Hayes 2012). All
#' remaining predictors are set to zero (i.e. ignored and not adjusted for).
#' Hence, this plot type may be used especially for \emph{binary or dummy coded}
#' moderator values (see also Esarey and Summer 2015).
#' This type \emph{does not} show the overall effect (marginal mean, i.e. adjusted
#' for all other predictors and covariates) of interactions on the result of Y. Use
#' \code{type = "eff"} for effect displays similar to the \code{\link[effects]{effect}}-function
#' from the \pkg{effects}-package.
#' }
#' \item{\code{type = "emm"}}{plots the estimated marginal means of repeated measures designs,
#' like two-way repeated measures AN(C)OVA. In detail, this type plots estimated marginal means
#' (also called \emph{least square means} or \emph{marginal means}) of (significant) interaction terms.
#' The fitted models may be linear (mixed effects)
#' models of class \code{\link{lm}} or \code{\link[lme4]{merMod}}. This function may be used, for example,
#' to plot differences in interventions between control and treatment groups over multiple time points.
#' }
#' }
#' The argument \code{int.term} only applies to \code{type = "eff"} and can be used
#' to select a specific interaction term of the model that should be plotted. The function
#' then calls \code{effect(int.term, fit)} to compute effects for this specific interaction
#' term only. This approach is recommended, when the fitted model contains many observations
#' and/or variables, which may slow down the effect-computation dramatically. In such cases,
#' consider computing effects for selected interaction terms only with \code{int.terms}.
#' See 'Examples'.
#'
#' @note Note that beside interaction terms, also the single predictors of each interaction (main effects)
#' must be included in the fitted model as well. Thus, \code{lm(dep ~ pred1 * pred2)} will work,
#' but \code{lm(dep ~ pred1:pred2)} won't! \cr \cr
#' For \code{type = "emm"}, all interaction terms have to be factors.
#' Furthermore, for \code{type = "eff"}, predictors of interactions that are introduced first into the model
#' are used as grouping variable, while the latter predictor is printed along the x-axis
#' (i.e. lm(y~a+b+a:b) means that "a" is used as grouping variable and "b" is plotted along the x-axis).
#'
#' @examples
#' # Note that the data sets used in this example may not be perfectly suitable for
#' # fitting linear models. I just used them because they are part of the R-software.
#'
#' # fit "dummy" model. Note that moderator should enter
#' # first the model, followed by predictor. Else, use
#' # argument "swap.pred" to change predictor on
#' # x-axis with moderator
#' fit <- lm(weight ~ Diet * Time, data = ChickWeight)
#'
#' # show summary to see significant interactions
#' summary(fit)
#'
#' # plot regression line of interaction terms, including value labels
#' sjp.int(fit, type = "eff", show.values = TRUE)
#'
#'
#' # load sample data set
#' library(sjmisc)
#' data(efc)
#' # create data frame with variables that should be included
#' # in the model
#' mydf <- data.frame(usage = efc$tot_sc_e,
#' sex = efc$c161sex,
#' education = efc$c172code,
#' burden = efc$neg_c_7,
#' dependency = efc$e42dep)
#' # convert gender predictor to factor
#' mydf$sex <- relevel(factor(mydf$sex), ref = "2")
#' # fit "dummy" model
#' fit <- lm(usage ~ .*., data = mydf)
#' summary(fit)
#'
#' # plot interactions. note that type = "cond" only considers
#' # significant interactions by default. use "plevel" to
#' # adjust p-level sensivity
#' sjp.int(fit, type = "cond")
#'
#' # plot only selected interaction term for
#' # type = "eff"
#' sjp.int(fit, type = "eff", int.term = "sex*education")
#'
#' # plot interactions, using mean and sd as moderator
#' # values to calculate interaction effect
#' sjp.int(fit, type = "eff", mdrt.values = "meansd")
#' sjp.int(fit, type = "cond", mdrt.values = "meansd")
#'
#' # plot interactions, including those with p-value up to 0.1
#' sjp.int(fit, type = "cond", plevel = 0.1)
#'
#' # -------------------------------
#' # Predictors for negative impact of care.
#' # Data from the EUROFAMCARE sample dataset
#' # -------------------------------
#' library(sjmisc)
#' data(efc)
#' # create binary response
#' y <- ifelse(efc$neg_c_7 < median(stats::na.omit(efc$neg_c_7)), 0, 1)
#' # create data frame for fitted model
#' mydf <- data.frame(y = as.factor(y),
#' sex = as.factor(efc$c161sex),
#' barthel = as.numeric(efc$barthtot))
#' # fit model
#' fit <- glm(y ~ sex * barthel, data = mydf, family = binomial(link = "logit"))
#' # plot interaction, increase p-level sensivity
#' sjp.int(fit, type = "eff", legend.labels = get_labels(efc$c161sex), plevel = 0.1)
#' sjp.int(fit, type = "cond", legend.labels = get_labels(efc$c161sex), plevel = 0.1)
#'
#' \dontrun{
#' # -------------------------------
#' # Plot estimated marginal means
#' # -------------------------------
#' # load sample data set
#' library(sjmisc)
#' data(efc)
#' # create data frame with variables that should be included
#' # in the model
#' mydf <- data.frame(burden = efc$neg_c_7,
#' sex = efc$c161sex,
#' education = efc$c172code)
#' # convert gender predictor to factor
#' mydf$sex <- factor(mydf$sex)
#' mydf$education <- factor(mydf$education)
#' # name factor levels and dependent variable
#' levels(mydf$sex) <- c("female", "male")
#' levels(mydf$education) <- c("low", "mid", "high")
#' mydf$burden <- set_label(mydf$burden, "care burden")
#' # fit "dummy" model
#' fit <- lm(burden ~ .*., data = mydf)
#' summary(fit)
#'
#' # plot marginal means of interactions, no interaction found
#' sjp.int(fit, type = "emm")
#' # plot marginal means of interactions, including those with p-value up to 1
#' sjp.int(fit, type = "emm", plevel = 1)
#' # swap predictors
#' sjp.int(fit, type = "emm", plevel = 1, swap.pred = TRUE)
#'
#' # -------------------------------
#' # Plot effects
#' # -------------------------------
#' # add continuous variable
#' mydf$barthel <- efc$barthtot
#' # re-fit model with continuous variable
#' fit <- lm(burden ~ .*., data = mydf)
#'
#' # plot effects
#' sjp.int(fit, type = "eff", show.ci = TRUE)
#'
#' # plot effects, faceted
#' sjp.int(fit, type = "eff", int.plot.index = 3, show.ci = TRUE, facet.grid = TRUE)}
#'
#' @import ggplot2
#' @import sjmisc
#' @importFrom stats family quantile
#' @importFrom effects allEffects effect
#' @export
sjp.int <- function(fit,
type = c("eff", "cond", "emm"),
int.term = NULL,
int.plot.index = NULL,
mdrt.values = c("minmax", "meansd", "zeromax", "quart", "all"),
swap.pred = FALSE,
plevel = 0.1,
diff = FALSE,
title = NULL,
axis.title = NULL,
axis.labels = NULL,
legend.title = NULL,
legend.labels = NULL,
wrap.title = 50,
wrap.legend.labels = 20,
wrap.legend.title = 20,
geom.colors = "Set1",
geom.size = NULL,
fill.color = "grey",
fill.alpha = 0.3,
show.values = FALSE,
show.ci = FALSE,
p.kr = TRUE,
grid.breaks = NULL,
xlim = NULL,
ylim = NULL,
y.offset = 0.07,
digits = 2,
facet.grid = FALSE,
prnt.plot = TRUE,
...) {
# -----------------------------------------------------------
# match arguments
# -----------------------------------------------------------
type <- match.arg(type)
mdrt.values <- match.arg(mdrt.values)
# -----------------------------------------------------------
# check class of fitted model
# -----------------------------------------------------------
c.f <- class(fit)
fun <- "lm"
if (any(c.f == "glm")) {
fun <- "glm"
} else if (any(c.f == "lm")) {
fun <- "lm"
} else if (any(c.f == "plm")) {
fun <- "plm"
} else if (any(c.f == "glmerMod")) {
fun <- "glmer"
} else if (any(c.f == "nlmerMod")) {
fun <- "nlmer"
} else if (any(c.f == "lmerMod") || any(c.f == "merModLmerTest")) {
fun <- "lmer"
} else if (any(c.f == "lme")) {
fun <- "lme"
if (type != "eff") {
message("Only 'type = \"eff\"' supports objects of class 'nlme::lme'. Defaulting type to \"eff\".")
type <- "eff"
}
} else if (any(c.f == "gls")) {
fun <- "gls"
if (type != "eff") {
message("Only 'type = \"eff\"' supports objects of class 'nlme::gls'. Defaulting type to \"eff\".")
type <- "eff"
}
}
# --------------------------------------------------------
# check default geom.size
# --------------------------------------------------------
if (is.null(geom.size)) geom.size = .7
# ------------------------
# check if suggested package is available
# ------------------------
if ((fun == "lmer" || fun == "glmer" || fun == "nlmer") && !requireNamespace("lme4", quietly = TRUE)) {
stop("Package `lme4` needed for this function to work. Please install it.", call. = FALSE)
}
if (fun == "plm" && !"package:plm" %in% search()) {
stop("Package `plm` needs to be loaded for this function to work... Use `library(plm)` and call this function again.", call. = FALSE)
}
# -----------------------------------------------------------
# argument check
# -----------------------------------------------------------
if (is.null(fill.color)) {
fill.color <- "white"
fill.alpha <- 0
}
# gridbreaks
if (is.null(grid.breaks)) gridbreaks.x <- gridbreaks.y <- ggplot2::waiver()
# check matching argument combinations
if (type == "cond" && mdrt.values == "all") {
message("`mdrt.values = \"all\"` only applies to `type = \"eff\". Defaulting `mdrt.values` to `minmax`.")
mdrt.values <- "minmax"
}
# ------------------------
# do we have glm? if so, get link family. make exceptions
# for specific models that don't have family function
# ------------------------
fitfam <- get_glm_family(fit)
# --------------------------------------------------------
# create logical for family
# --------------------------------------------------------
binom_fam <- fitfam$is_bin
# --------------------------------------------------------
# plot estimated marginal means?
# --------------------------------------------------------
if (type == "emm") {
# ------------------------
# multiple purpose of show.ci parameter. if logical,
# sets default CI to 0.95, else show.ci also may be
# numeric
# ------------------------
if (!is.null(show.ci) && !is.logical(show.ci)) {
show.ci <- TRUE
warning("argument `show.ci` must be logical for `type = 'emm'`.", call. = F)
}
return(sjp.emm(fit, swap.pred, plevel, title, geom.colors, geom.size,
axis.title, axis.labels, legend.title, legend.labels,
show.values, digits, show.ci, p.kr, wrap.title,
wrap.legend.title, wrap.legend.labels, y.offset, ylim,
grid.breaks, facet.grid, prnt.plot))
}
# --------------------------------------------------------
# list labels
# --------------------------------------------------------
if (!is.null(legend.labels) && !is.list(legend.labels)) legend.labels <- list(legend.labels)
if (!is.null(legend.title) && is.list(legend.title)) legend.title <- unlist(legend.title)
# --------------------------------------------------------
# plot moderation effeczs?
# --------------------------------------------------------
if (type == "eff") {
return(sjp.eff.int(fit, int.term, int.plot.index, mdrt.values, swap.pred, plevel,
title, fill.alpha, geom.colors, geom.size, axis.title,
legend.title, legend.labels, show.values, wrap.title, wrap.legend.labels,
wrap.legend.title, xlim, ylim, y.offset, grid.breaks,
show.ci, p.kr, facet.grid, prnt.plot, fun, ...))
}
# -----------------------------------------------------------
# set axis title
# -----------------------------------------------------------
y_title <- NULL
if ((fun == "glm" || fun == "glmer")) {
if (binom_fam)
y_title <- "Change in Predicted Probability"
else
y_title <- "Change in Incidents Rates"
}
# -----------------------------------------------------------
# get all (significant) interaction terms from model
# the function "getInteractionTerms" checks if a fitted
# model contains any interaction terms that are significant
# at the level specified by "plevel". returns NULL, if model
# contains no interaction terms or no significant interaction term.
# else, information on model and interaction terms is returned
# -----------------------------------------------------------
git <- getInteractionTerms(fit, fun, plevel, p.kr)
# check return value
if (is.null(git)) return(invisible(NULL))
# -----------------------------------------------------------
# init variables from return values
# -----------------------------------------------------------
# b0 <- git[["b0"]]
estimates.names <- git[["estimates.names"]]
estimates <- git[["estimates"]]
fitdat <- git[["fitdat"]]
# init vector that saves ggplot objects
plotlist <- list()
dflist <- list()
# -----------------------------------------------------------
# when we have linear mixed effects models and both interaction
# terms are factors, we may have the same interaction term names
# multiples times - thus, remove redundant duplicates
# -----------------------------------------------------------
intnames <- unique(git[["intnames"]])
# check if we have selected plots only, and remove any plots
# that should not be plotted. but be careful for out of bound index!
if (!is.null(int.plot.index) && !any(int.plot.index > length(intnames))) intnames <- intnames[int.plot.index]
# -----------------------------------------------------------
# get model frame, needed later for label detection
# -----------------------------------------------------------
modfram <- stats::model.frame(fit)
# -----------------------------------------------------------
# Now iterate all significant interaction terms
# and manually calculate the linear regression by inserting
# the estimates of each term and the associated interaction term,
# i.e.: y = b0 + (b1 * pred1) + (b2 * pred2) + (b3 * pred1 * pred2)
# -----------------------------------------------------------
for (cnt in 1:length(intnames)) {
# -----------------------------------------------------------
# first, retrieve and split interaction term so we know
# the two predictor variables of the interaction term
# -----------------------------------------------------------
interactionterms <- unlist(strsplit(intnames[cnt], ":"))
labx <- c()
# Label on y-axis is name of dependent variable
laby <- paste0("Change in ", sjmisc::get_label(modfram[[git[["depvar.label"]]]],
def.value = git[["depvar.label"]]))
# -----------------------------------------------------------
# find estimates (beta values) for each single predictor of
# the interaction as well as of the interaction term
# -----------------------------------------------------------
b1 <- as.numeric(estimates[match(interactionterms[1], estimates.names)])
b2 <- as.numeric(estimates[match(interactionterms[2], estimates.names)])
b3 <- as.numeric(estimates[match(intnames[cnt], estimates.names)])
# -----------------------------------------------------------
# check whether each predictor was included in the model
# as single term as well
# -----------------------------------------------------------
if (is.na(b1) || is.na(b2) || is.na(b3)) {
stop("Predictors of interaction terms (main effects) must be included as single term as well. See Note in ?sjp.int", call. = FALSE)
}
# -----------------------------------------------------------
# retrieve number of unique values in each predictor variable.
# depending on the amount of values the variable for the x-axis
# is chosen. In this case, we use the predictor with the higher
# number of unique values on the x-axis.
# -----------------------------------------------------------
# retrieve values as data frame
df_pred1uniquevals <- unique(stats::na.omit(fitdat[, interactionterms[1]]))
df_pred2uniquevals <- unique(stats::na.omit(fitdat[, interactionterms[2]]))
# convert data frame to numeric vector
pred1uniquevals <- pred2uniquevals <- as.numeric(c())
pred1uniquevals <- sort(as.numeric(sapply(df_pred1uniquevals, as.numeric)))
pred2uniquevals <- sort(as.numeric(sapply(df_pred2uniquevals, as.numeric)))
# init data frame
intdf <- c()
# -----------------------------------------------------------
# choose x-value according to higher number of unique values
# choose minimum and maximum value from predictor that has
# a "smaller range" (i.e. less unique values)
# or swap predictors on axes if requested
# -----------------------------------------------------------
if (swap.pred) {
useFirstPredOnY <- ifelse(length(pred1uniquevals) > length(pred2uniquevals), F, T)
} else {
useFirstPredOnY <- ifelse(length(pred1uniquevals) > length(pred2uniquevals), T, F)
}
# -----------------------------------------------------------
# calculate regression line
# -----------------------------------------------------------
if (useFirstPredOnY) {
labx <- sjmisc::get_label(modfram[[interactionterms[1]]],
def.value = interactionterms[1])
predy <- interactionterms[2]
# -----------------------------------------------------------
# define predictor and moderator values
# -----------------------------------------------------------
pred.value <- pred1uniquevals
mod.value <- pred2uniquevals
# -----------------------------------------------------------
# define predictor beta
# -----------------------------------------------------------
b.pred <- b1
} else {
labx <- sjmisc::get_label(modfram[[interactionterms[2]]],
def.value = interactionterms[2])
predy <- interactionterms[1]
# -----------------------------------------------------------
# define predictor and moderator values
# -----------------------------------------------------------
pred.value <- pred2uniquevals
mod.value <- pred1uniquevals
# -----------------------------------------------------------
# define predictor beta
# -----------------------------------------------------------
b.pred <- b2
}
# -----------------------------------------------------------
# Check whether moderator value has enough unique values
# for quartiles
# -----------------------------------------------------------
mdrt.values <- mv_check(mdrt.values, mod.value)
# -----------------------------------------------------------
# check which values of moderator should be plotted, i.e. if
# lower/upper bound (min-max) or mean and standard-deviation
# should be used as valus for the moderator.
# see http://www.theanalysisfactor.com/3-tips-interpreting-moderation/
# -----------------------------------------------------------
if (mdrt.values == "minmax") {
mw <- NA
ymin <- min(mod.value, na.rm = T)
ymax <- max(mod.value, na.rm = T)
} else if (mdrt.values == "meansd") {
mw <- mean(mod.value, na.rm = T)
ymin <- mw - sd(mod.value, na.rm = T)
ymax <- mw + sd(mod.value, na.rm = T)
} else if (mdrt.values == "zeromax") {
mw <- NA
ymin <- 0
ymax <- max(mod.value, na.rm = T)
} else if (mdrt.values == "quart") {
qu <- as.vector(stats::quantile(mod.value, na.rm = T))
mw <- qu[3]
ymin <- qu[2]
ymax <- qu[4]
}
# -----------------------------------------------------------
# Create data frame for plotting the interactions by
# manually calculating the linear regression by inserting
# the estimates of each term and the associated interaction term,
# i.e.: y = b0 + (b1 * pred1) + (b2 * pred2) + (b3 * pred1 * pred2)
# -----------------------------------------------------------
# We now calculate the conditional effect of predictor 1 under absence
# (or lowest impact) of predictor 2 on the dependent variable. Thus,
# the slope for predictor 2 is not calculated. see
# http://www.theanalysisfactor.com/interpreting-interactions-in-regression/
# http://www.theanalysisfactor.com/clarifications-on-interpreting-interactions-in-regression/
# ------------------------------
miny <- (b.pred * pred.value) + (b3 * pred.value * ymin)
# ------------------------------
# here we calculate the conditional effect of predictor 1 under presence
# (or strongest impact) of predictor 2 on the dependent variable. Thus,
# the slope for predictor 2 only is not needed. see references above
# ------------------------------
maxy <- (b.pred * pred.value) + (b3 * pred.value * ymax)
# store in df
tmp <- data.frame(x = pred.value, y = miny, ymin = miny, ymax = maxy, grp = "min")
intdf <- as.data.frame(rbind(intdf, tmp))
# store in df
tmp <- data.frame(x = pred.value, y = maxy, ymin = miny, ymax = maxy, grp = "max")
intdf <- as.data.frame(rbind(intdf, tmp))
# store in df
if (mdrt.values == "meansd" || mdrt.values == "quart") {
# ------------------------------
# here we calculate the effect of predictor 1 under presence
# of mean of predictor 2 on the dependent variable. Thus, the slope for
# predictor 2 only is not needed. see references above
# ------------------------------
mittelwert <- (b.pred * pred.value) + (b3 * pred.value * mw)
tmp <- data.frame(x = pred.value, y = mittelwert, ymin = miny, ymax = maxy, grp = "mean")
intdf <- as.data.frame(rbind(intdf, tmp))
}
# -----------------------------------------------------------
# convert df-values to numeric
# -----------------------------------------------------------
if (fun == "lm" || fun == "lmer" || fun == "lme") {
intdf$x <- sjmisc::to_value(intdf$x, keep.labels = F)
intdf$y <- sjmisc::to_value(intdf$y, keep.labels = F)
intdf$ymin <- sjmisc::to_value(intdf$ymin, keep.labels = F)
intdf$ymax <- sjmisc::to_value(intdf$ymax, keep.labels = F)
intdf$ydiff <- intdf$ymax - intdf$ymin
# -----------------------------------------------------------
# retrieve lowest and highest x and y position to determine
# the scale limits
# -----------------------------------------------------------
if (is.null(ylim)) {
if (diff) {
lowerLim.y <- floor(min(intdf$ydiff, na.rm = T))
upperLim.y <- ceiling(max(intdf$ydiff, na.rm = T))
} else {
lowerLim.y <- floor(min(intdf$y, na.rm = T))
upperLim.y <- ceiling(max(intdf$y, na.rm = T))
}
} else {
lowerLim.y <- ylim[1]
upperLim.y <- ylim[2]
}
} else {
invlink <- stats::family(fit)
intdf$x <- sjmisc::to_value(intdf$x, keep.labels = F)
intdf$y <- invlink$linkinv(eta = sjmisc::to_value(intdf$y, keep.labels = F))
intdf$ymin <- invlink$linkinv(eta = sjmisc::to_value(intdf$ymin, keep.labels = F))
intdf$ymax <- invlink$linkinv(eta = sjmisc::to_value(intdf$ymax, keep.labels = F))
intdf$ydiff <- invlink$linkinv(eta = intdf$ymax - intdf$ymin)
}
# -----------------------------------------------------------
# retrieve lowest and highest x and y position to determine
# the scale limits
# -----------------------------------------------------------
if (is.null(ylim)) {
if (binom_fam) {
lowerLim.y <- as.integer(floor(10 * min(intdf$y, na.rm = T) * .9)) / 10
upperLim.y <- as.integer(ceiling(10 * max(intdf$y, na.rm = T) * 1.1)) / 10
} else {
if (diff) {
lowerLim.y <- floor(min(intdf$ydiff, na.rm = T))
upperLim.y <- ceiling(max(intdf$ydiff, na.rm = T))
} else {
lowerLim.y <- floor(min(intdf$y, na.rm = T))
upperLim.y <- ceiling(max(intdf$y, na.rm = T))
}
}
} else {
lowerLim.y <- ylim[1]
upperLim.y <- ylim[2]
}
# -----------------------------------------------------------
# check x-axis limits
# -----------------------------------------------------------
if (!is.null(xlim)) {
lowerLim.x <- xlim[1]
upperLim.x <- xlim[2]
} else {
lowerLim.x <- floor(min(intdf$x, na.rm = T))
upperLim.x <- ceiling(max(intdf$x, na.rm = T))
}
# -----------------------------------------------------------
# check whether user defined grid breaks / tick marks are used
# -----------------------------------------------------------
if (!is.null(grid.breaks)) {
gridbreaks.x <- seq(lowerLim.x, upperLim.x, by = grid.breaks)
gridbreaks.y <- seq(lowerLim.y, upperLim.y, by = grid.breaks)
}
# -----------------------------------------------------------
# prepare plot title and axis titles
# -----------------------------------------------------------
if (is.null(title)) {
labtitle <- paste0("Conditional effect of ",
interactionterms[ifelse(isTRUE(useFirstPredOnY), 1, 2)],
" (by ",
interactionterms[ifelse(isTRUE(useFirstPredOnY), 2, 1)],
") on ", git[["depvar.label"]])
} else {
# copy plot counter
l_nr <- cnt
# check if we have enough labels. if not, use last labels
if (l_nr > length(title)) l_nr <- length(title)
# set legend labels for plot
labtitle <- title[l_nr]
}
# -----------------------------------------------------------
# get model frame, needed for label detection
# -----------------------------------------------------------
modfram <- stats::model.frame(fit)
modfound <- modfram[[predy]]
# -----------------------------------------------------------
# legend labels
# -----------------------------------------------------------
if (is.null(legend.labels)) {
# ---------------------------------
# find moderator variable in data
# ---------------------------------
if (!is.null(modfound)) {
lLabels <- sjmisc::get_labels(modfound, attr.only = F)
} else {
lLabels <- NULL
}
# if we still have no labels, prepare generic labels
if (is.null(lLabels)) {
if (mdrt.values == "minmax") {
lLabels <- c(paste0("lower bound of ", predy), paste0("upper bound of ", predy))
} else if (mdrt.values == "meansd") {
lLabels <- c(paste0("lower sd of ", predy), paste0("upper sd of ", predy), paste0("mean of ", predy))
} else if (mdrt.values == "quart") {
lLabels <- c(paste0("lower quartile of ", predy), paste0("upper quartile of ", predy), paste0("median of ", predy))
} else {
lLabels <- c(paste0("0 for ", predy), paste0("upper bound of ", predy))
}
} else {
if (mdrt.values == "minmax") {
lLabels <- lLabels[c(1, length(lLabels))]
} else if (mdrt.values == "meansd") {
lLabels <- c(paste0("lower sd of ", predy), paste0("upper sd of ", predy), paste0("mean of ", predy))
} else if (mdrt.values == "quart") {
lLabels <- c(paste0("lower quartile of ", predy), paste0("upper quartile of ", predy), paste0("median of ", predy))
} else {
lLabels <- c(paste0("0 for ", predy), lLabels[length(lLabels)])
}
}
} else {
# copy plot counter
l_nr <- cnt
# check if we have enough labels. if not, use last labels
if (l_nr > length(legend.labels)) l_nr <- length(legend.labels)
# set legend labels for plot
lLabels <- legend.labels[[l_nr]]
}
# -----------------------------------------------------------
# legend titles
# -----------------------------------------------------------
if (is.null(legend.title)) {
lTitle <- sjmisc::get_label(modfound, def.value = predy)
} else {
# copy plot counter
l_nr <- cnt
# check if we have enough legend titles, if not, use last legend title
if (l_nr > length(legend.title)) l_nr <- length(legend.title)
# set legend title for plot
lTitle <- legend.title[l_nr]
}
# -----------------------------------------------------------
# x axis titles
# -----------------------------------------------------------
if (!is.null(axis.title)) {
# copy plot counter
l_nr <- cnt
# check if we have enough axis titles, if not, use last legend title
if (l_nr > length(axis.title)) l_nr <- length(axis.title)
# set axis title
labx <- axis.title[l_nr]
}
if (!is.null(y_title)) laby <- y_title
# -----------------------------------------------------------
# prepare annotation labels
# -----------------------------------------------------------
# wrap title
labtitle <- sjmisc::word_wrap(labtitle, wrap.title)
# wrap legend labels
lLabels <- sjmisc::word_wrap(lLabels, wrap.legend.labels)
# wrap legend title
lTitle <- sjmisc::word_wrap(lTitle, wrap.legend.title)
# -----------------------------------------------------------
# prepare base plot of interactions
# -----------------------------------------------------------
if (diff) {
baseplot <- ggplot(intdf, aes(x = x, y = ydiff)) +
# -----------------------------------------------------------
# add a shaded region between minimun
# and maximum curve of interactions
# -----------------------------------------------------------
geom_ribbon(aes(ymin = 0, ymax = ydiff),
fill = fill.color,
alpha = fill.alpha) +
geom_line(size = geom.size)
# -----------------------------------------------------------
# show value labels
# -----------------------------------------------------------
if (show.values) {
baseplot <- baseplot +
geom_text(aes(label = round(ydiff, 1)),
nudge_y = y.offset,
show.legend = FALSE)
}
} else {
baseplot <- ggplot(intdf, aes(x = x, y = y, colour = grp))
# the shaded area between line only if plots are not faceted
if (!facet.grid) {
baseplot <- baseplot +
# add a shaded region between minimun and maximum curve of interactions
geom_ribbon(aes(ymin = ymin, ymax = ymax, colour = NULL),
fill = fill.color,
alpha = fill.alpha,
show.legend = FALSE)
}
# add line
baseplot <- baseplot + geom_line()
# ------------------------------------------------------------
# plot value labels
# ------------------------------------------------------------
if (show.values) {
baseplot <- baseplot +
geom_point() +
geom_text(aes(label = round(y, 1)),
nudge_y = y.offset,
show.legend = FALSE)
}
}
# ------------------------------------------------------------------------------------
# check whether only diff-line is shown or upper and lower boundaries. in the latter
# case, show legend, else hide legend
# ------------------------------------------------------------------------------------
if (diff) {
col.len <- 1
lLabels <- NULL
lTitle <- NULL
} else {
if (mdrt.values == "minmax" || mdrt.values == "zeromax") {
col.len <- 2
} else {
col.len <- 3
}
}
# ------------------------------------------------------------------------------------
# build plot object with theme and labels
# ------------------------------------------------------------------------------------
baseplot <- baseplot +
# set plot and axis titles
labs(title = labtitle, x = labx, y = laby, colour = lTitle) +
# set axis scale breaks
scale_x_continuous(limits = c(lowerLim.x, upperLim.x), breaks = gridbreaks.x) +
scale_y_continuous(limits = c(lowerLim.y, upperLim.y), breaks = gridbreaks.y)
# ---------------------------------------------------------
# facet grids?
# ---------------------------------------------------------
if (facet.grid && !diff) baseplot <- baseplot + facet_grid(~grp)
# ---------------------------------------------------------
# set geom colors
# ---------------------------------------------------------
baseplot <- sj.setGeomColors(baseplot, geom.colors, col.len, !is.null(lLabels), lLabels)
# ---------------------------------------------------------
# Check whether ggplot object should be returned or plotted
# ---------------------------------------------------------
if (prnt.plot) graphics::plot(baseplot)
# concatenate plot object
plotlist[[length(plotlist) + 1]] <- baseplot
dflist[[length(dflist) + 1]] <- intdf
}
# -------------------------------------
# return results
# -------------------------------------
invisible(structure(class = "sjpint",
list(plot.list = plotlist,
data.list = dflist)))
}
#' @importFrom stats na.omit model.frame
sjp.eff.int <- function(fit,
int.term = NULL,
int.plot.index = NULL,
mdrt.values = "minmax",
swap.pred = FALSE,
plevel = 0.05,
title = NULL,
fill.alpha = 0.3,
geom.colors = "Set1",
geom.size = 0.7,
axis.title = NULL,
legend.title = NULL,
legend.labels = NULL,
show.values = FALSE,
wrap.title = 50,
wrap.legend.labels = 20,
wrap.legend.title = 20,
xlim = NULL,
ylim = NULL,
y.offset = 0.07,
grid.breaks = NULL,
show.ci = FALSE,
p.kr = FALSE,
facet.grid = FALSE,
prnt.plot = TRUE,
fun,
...) {
# --------------------------------------------------------
# check default geom.size
# --------------------------------------------------------
if (is.null(geom.size)) geom.size = .7
# ------------------------
# check if suggested package is available
# ------------------------
if (!requireNamespace("effects", quietly = TRUE)) {
stop("Package `effects` needed for this function to work. Please install it.", call. = FALSE)
}
# gridbreaks
if (is.null(grid.breaks)) gridbreaks.x <- gridbreaks.y <- ggplot2::waiver()
# init default
binom_fam <- FALSE
# ------------------------
# multiple purpose of show.ci parameter. if logical,
# sets default CI to 0.95, else show.ci also may be
# numeric
# ------------------------
if (!is.null(show.ci) && !is.logical(show.ci)) {
eci <- show.ci
show.ci = TRUE
} else {
eci <- 0.95
}
# ------------------------
# calculate effects of higher order terms and
# check if fitted model contains any interaction terms
# allEffects returns a list, with all interaction effects
# (or higher order terms) as separate list element. each list
# element contains the higher-order-term of the fitted model,
# where the 'term' attribute of interaction terms have a "*".
# So we just need to look at each 'term' attribute of each
# list element and see if there is a "*"...
# ------------------------
if (is.null(int.term)) {
eff <- effects::allEffects(fit, KR = F)
int <- unlist(lapply(eff, function(x) grep("*", x['term'], fixed = T)))
} else {
eff <- effects::effect(int.term, fit, KR = F)
int <- grep("*", eff$term, fixed = T)
}
if (length(int) == 0) {
warning("No interaction term found in fitted model...", call. = FALSE)
return(invisible(NULL))
}
# ------------------------
# retrieve position of interaction terms in effects-object
# ------------------------
if (is.null(int.term)) {
intpos <- which(as.vector(sapply(eff, function(x) sjmisc::str_contains(x['term'], "*"))))
} else {
intpos <- 1
}
# select only specific plots
if (!is.null(int.plot.index) && !any(int.plot.index > length(intpos))) intpos <- intpos[int.plot.index]
# init vector that saves ggplot objects
plotlist <- list()
dflist <- list()
# -----------------------------------------------------------
# iterate all interaction terms
# -----------------------------------------------------------
for (i in 1:length(intpos)) {
# -----------------------------------------------------------
# copy "eff" object, so we don't confuse with effect-return-
# value from single term and multiple terms
# -----------------------------------------------------------
if (is.null(int.term)) {
dummy.eff <- eff[[intpos[i]]]
} else {
dummy.eff <- eff
}
# -----------------------------------------------------------
# retrieve data frame
# -----------------------------------------------------------
intdf <- data.frame(dummy.eff)
# -----------------------------------------------------------
# save response, predictor and moderator names
# -----------------------------------------------------------
pred_x.name <- colnames(intdf)[ifelse(isTRUE(swap.pred), 1, 2)]
moderator.name <- colnames(intdf)[ifelse(isTRUE(swap.pred), 2, 1)]
response.name <- dummy.eff$response
# prepare axis titles
labx <- sjmisc::get_label(stats::model.frame(fit)[[pred_x.name]], def.value = pred_x.name)
# check whether x-axis-predictor is a factor or not
x_is_factor <- is.factor(intdf[[pred_x.name]]) || (length(unique(na.omit(intdf[[pred_x.name]]))) < 3)
mod_is_factor <- is.factor(intdf[[moderator.name]])
# -----------------------------------------------------------
# check for moderator values, but only, if moderator
# is no factor value. In this case, we can choose
# the values for continuous moderator intentionally,
# e.g. only min/max, or mean and sd. We don't need these
# values for categorical moderator values.
# -----------------------------------------------------------
if (!mod_is_factor) {
# retrieve moderator value
modval <- dummy.eff$data[[moderator.name]]
# retrieve predictor value
predval <- dummy.eff$data[[pred_x.name]]
# -----------------------------------------------------------
# Check whether moderator value has enough unique values
# for quartiles
# -----------------------------------------------------------
mdrt.values <- mv_check(mdrt.values, modval)
# we have more than two values, so re-calculate effects, just using
# min and max value of moderator.
if (mdrt.values == "minmax" && length(unique(intdf[[moderator.name]])) > 2) {
# retrieve min and max values
mv.min <- min(modval, na.rm = T)
mv.max <- max(modval, na.rm = T)
# re-compute effects, prepare xlevels
xl1 <- list(x = c(mv.min, mv.max))
# we have more than two values, so re-calculate effects, just using
# 0 and max value of moderator.
} else if (mdrt.values == "zeromax" && length(unique(intdf[[moderator.name]])) > 2) {
# retrieve max values
mv.max <- max(modval, na.rm = T)
# re-compute effects, prepare xlevels
xl1 <- list(x = c(0, mv.max))
# compute mean +/- sd
} else if (mdrt.values == "meansd") {
# retrieve mean and sd
mv.mean <- round(mean(modval, na.rm = T), 2)
mv.sd <- round(sd(modval, na.rm = T), 2)
# re-compute effects, prepare xlevels
xl1 <- list(x = c(mv.mean - mv.sd, mv.mean, mv.mean + mv.sd))
} else if (mdrt.values == "all") {
# re-compute effects, prepare xlevels
xl1 <- list(x = as.vector((unique(sort(modval, na.last = NA)))))
} else if (mdrt.values == "quart") {
# re-compute effects, prepare xlevels
xl1 <- list(x = as.vector(stats::quantile(modval, na.rm = T)))
}
# change list name to moderator value name
names(xl1) <- moderator.name
# add values of interaction term
# first, get all unqiue values
prvl <- sort(unique(stats::na.omit(predval)))
# add them to list as well
xl2 <- list(y = prvl)
# change list name
names(xl2) <- pred_x.name
# combine lists
if (is.null(int.term)) {
# re-compute effects
eff.tmp <- effects::allEffects(fit, xlevels = c(xl1, xl2), KR = p.kr,
confidence.level = eci, ...)
# reset data frame
intdf <- data.frame(eff.tmp[[intpos[i]]])
} else {
# re-compute effects
eff.tmp <- effects::effect(int.term, fit, xlevels = c(xl1, xl2),
KR = p.kr, confidence.level = eci, ...)
# reset data frame
intdf <- data.frame(eff.tmp)
}
# -----------------------------------------------------------
# check for predictor values on x-axis. if it
# is no factor, select whole range of possible
# values.
# -----------------------------------------------------------
} else if (!x_is_factor) {
# retrieve predictor value
predval <- dummy.eff$data[[pred_x.name]]
# add values of interaction term
# first, get all unqiue values
prvl <- sort(unique(stats::na.omit(predval)))
# add them to list as well
xl <- list(x = prvl)
# change list name
names(xl) <- pred_x.name
# combine lists
if (is.null(int.term)) {
# re-compute effects
eff.tmp <- effects::allEffects(fit, xlevels = xl, KR = p.kr,
confidence.level = eci, ...)
# reset data frame
intdf <- data.frame(eff.tmp[[intpos[i]]])
} else {
# re-compute effects
eff.tmp <- effects::effect(int.term, fit, xlevels = xl, KR = p.kr,
confidence.level = eci, ...)
# reset data frame
intdf <- data.frame(eff.tmp)
}
}
# -----------------------------------------------------------
# change column names
# -----------------------------------------------------------
if (swap.pred) {
colnames(intdf) <- c("x", "grp", "y", "se", "conf.low", "conf.high")
} else {
colnames(intdf) <- c("grp", "x", "y", "se", "conf.low", "conf.high")
}
# -----------------------------------------------------------
# effects-package creates "NA" factor levels, which
# need to be removed
# -----------------------------------------------------------
intdf <- droplevels(intdf)
# group as factor
intdf$grp <- factor(intdf$grp, levels = unique(as.character(intdf$grp)))
# reset labels
x_labels <- NULL
lLabels <- NULL
# does model have labels? we want these if x is a factor.
# first we need to know whether we have a model-data-frame
if (x_is_factor) {
# do we have a factor with level-labels for "x"?
# if yes, use these as labels
if (!sjmisc::is_num_fac(intdf$x)) {
x_labels <- levels(intdf$x)
} else {
x_labels <- sjmisc::get_labels(stats::model.frame(fit)[[pred_x.name]], attr.only = F)
}
}
# make sure x is numeric
intdf$x <- sjmisc::to_value(intdf$x, keep.labels = F)
# get name of response, for axis title
yaxisname <- sjmisc::get_label(stats::model.frame(fit)[[response.name]],
def.value = response.name)
# -----------------------------------------------------------
# check if we have linear regression
# -----------------------------------------------------------
if (fun == "lm" || fun == "lmer" || fun == "lme" || fun == "gls") {
# Label on y-axis is name of dependent variable
y_title <- sprintf("Predicted values of %s", yaxisname)
# -----------------------------------------------------------
# retrieve lowest and highest x and y position to determine
# the scale limits
# -----------------------------------------------------------
if (is.null(ylim)) {
if (show.ci) {
lowerLim.y <- floor(min(intdf$conf.low, na.rm = T))
upperLim.y <- ceiling(max(intdf$conf.high, na.rm = T))
} else {
lowerLim.y <- floor(min(intdf$y, na.rm = T))
upperLim.y <- ceiling(max(intdf$y, na.rm = T))
}
} else {
lowerLim.y <- ylim[1]
upperLim.y <- ylim[2]
}
} else {
# ------------------------
# do we have glm? if so, get link family. make exceptions
# for specific models that don't have family function
# ------------------------
fitfam <- get_glm_family(fit)
# --------------------------------------------------------
# create logical for family
# --------------------------------------------------------
binom_fam <- fitfam$is_bin
poisson_fam <- fitfam$is_pois
# --------------------------------------------------------
# Label on y-axis is fixed
# --------------------------------------------------------
# for logistic reg.
if (binom_fam)
y_title <- sprintf("Predicted probabilities for %s", yaxisname)
else if (poisson_fam)
y_title <- sprintf("Predicted incidents for %s", yaxisname)
else
y_title <- sprintf("Predicted values for %s", yaxisname)
# -----------------------------------------------------------
# retrieve lowest and highest x and y position to determine
# the scale limits
# -----------------------------------------------------------
if (is.null(ylim)) {
if (binom_fam) {
if (show.ci) {
lowerLim.y <- as.integer(floor(10 * min(intdf$conf.low, na.rm = T) * .9)) / 10
upperLim.y <- as.integer(ceiling(10 * max(intdf$conf.high, na.rm = T) * 1.1)) / 10
} else {
lowerLim.y <- as.integer(floor(10 * min(intdf$y, na.rm = T) * .9)) / 10
upperLim.y <- as.integer(ceiling(10 * max(intdf$y, na.rm = T) * 1.1)) / 10
}
} else {
if (show.ci) {
lowerLim.y <- floor(min(intdf$conf.low, na.rm = T))
upperLim.y <- ceiling(max(intdf$conf.high, na.rm = T))
} else {
lowerLim.y <- floor(min(intdf$y, na.rm = T))
upperLim.y <- ceiling(max(intdf$y, na.rm = T))
}
}
} else {
lowerLim.y <- ylim[1]
upperLim.y <- ylim[2]
}
}
# -----------------------------------------------------------
# check x-axis limits
# -----------------------------------------------------------
if (!is.null(xlim)) {
lowerLim.x <- xlim[1]
upperLim.x <- xlim[2]
} else {
lowerLim.x <- floor(min(intdf$x, na.rm = T))
upperLim.x <- ceiling(max(intdf$x, na.rm = T))
}
# -----------------------------------------------------------
# check whether user defined grid breaks / tick marks are used
# -----------------------------------------------------------
if (!is.null(grid.breaks)) {
gridbreaks.x <- seq(lowerLim.x, upperLim.x, by = grid.breaks)
gridbreaks.y <- seq(lowerLim.y, upperLim.y, by = grid.breaks)
} else if (x_is_factor) {
gridbreaks.x <- sort(unique(intdf$x))
} else {
gridbreaks.x <- gridbreaks.y <- ggplot2::waiver()
}
# -----------------------------------------------------------
# prepare plot title and axis titles
# -----------------------------------------------------------
if (is.null(title)) {
labtitle <- paste0("Interaction effect of ", moderator.name, " and ",
pred_x.name, " on ", response.name)
} else {
# copy plot counter
l_nr <- i
# check if we have enough labels. if not, use last labels
if (l_nr > length(title)) l_nr <- length(title)
# set legend labels for plot
labtitle <- title[l_nr]
}
# -----------------------------------------------------------
# legend labels
# -----------------------------------------------------------
if (is.null(legend.labels)) {
# try to get labels, but only for factors
if (mod_is_factor) {
lLabels <- sjmisc::get_labels(stats::model.frame(fit)[[moderator.name]],
attr.only = F)
}
# if we still have no labels, get values from group
if (is.null(lLabels)) lLabels <- unique(as.character(intdf$grp))
} else {
# copy plot counter
l_nr <- i
# check if we have enough labels. if not, use last labels
if (l_nr > length(legend.labels)) l_nr <- length(legend.labels)
# set legend labels for plot
lLabels <- legend.labels[[l_nr]]
}
# -----------------------------------------------------------
# prepare facet-labels
# -----------------------------------------------------------
if (length(unique(intdf$grp)) == length(lLabels) && isTRUE(facet.grid)) {
levels(intdf$grp) <- lLabels
}
# -----------------------------------------------------------
# legend titles
# -----------------------------------------------------------
if (is.null(legend.title)) {
lTitle <- sjmisc::get_label(stats::model.frame(fit)[[moderator.name]],
def.value = moderator.name)
} else {
# copy plot counter
l_nr <- i
# check if we have enough legend titles, if not, use last legend title
if (l_nr > length(legend.title)) l_nr <- length(legend.title)
# set legend title for plot
lTitle <- legend.title[l_nr]
}
# -----------------------------------------------------------
# x axis titles
# -----------------------------------------------------------
if (!is.null(axis.title)) {
# copy plot counter
l_nr <- i
# check if we have enough axis titles, if not, use last legend title
if (l_nr > length(axis.title)) l_nr <- length(axis.title)
# set axis title
labx <- axis.title[l_nr]
}
# y-axis title.
laby <- y_title
# -----------------------------------------------------------
# wrap titles
# -----------------------------------------------------------
labtitle <- sjmisc::word_wrap(labtitle, wrap.title)
labx <- sjmisc::word_wrap(labx, wrap.title)
laby <- sjmisc::word_wrap(laby, wrap.title)
# wrap legend labels
lLabels <- sjmisc::word_wrap(lLabels, wrap.legend.labels)
# wrap legend title
lTitle <- sjmisc::word_wrap(lTitle, wrap.legend.title)
# ------------------------------------------------------------
# start plot
# ------------------------------------------------------------
baseplot <- ggplot(intdf, aes(x = x, y = y, colour = grp))
# ------------------------------------------------------------
# confidence interval?
# ------------------------------------------------------------
if (show.ci) {
if (x_is_factor) {
# -------------------------------------------------
# for factors, we add error bars instead of
# continuous confidence region
# -------------------------------------------------
baseplot <- baseplot +
geom_errorbar(aes(ymin = conf.low, ymax = conf.high, colour = grp),
width = 0, show.legend = FALSE) +
geom_point()
} else {
# -------------------------------------------------
# for continuous variables, we add continuous
# confidence region instead of error bars
# -------------------------------------------------
baseplot <- baseplot +
geom_ribbon(aes(ymin = conf.low, ymax = conf.high, colour = NULL, fill = grp),
alpha = fill.alpha, show.legend = FALSE)
}
}
baseplot <- baseplot + geom_line(size = geom.size)
# ------------------------------------------------------------
# plot value labels
# ------------------------------------------------------------
if (show.values) {
# don't need geom_point, because point-layer already
# added with x_is_factor
if (!x_is_factor) baseplot <- baseplot + geom_point()
# add value label text
baseplot <- baseplot +
geom_text(aes(label = round(y, 1)), nudge_y = y.offset, show.legend = FALSE)
}
# ------------------------------------------------------------------------------------
# build plot object with theme and labels
# ------------------------------------------------------------------------------------
baseplot <- baseplot +
# set plot and axis titles
labs(title = labtitle, x = labx, y = laby, colour = lTitle)
# we have specified labels for factors on x-axis only...
if (x_is_factor && !is.null(x_labels)) {
baseplot <- baseplot +
scale_x_continuous(limits = c(lowerLim.x, upperLim.x),
breaks = gridbreaks.x,
labels = x_labels)
} else {
# ...else, we use waiver-labels
baseplot <- baseplot +
scale_x_continuous(limits = c(lowerLim.x, upperLim.x),
breaks = gridbreaks.x)
}
# ------------------------
# for logistic regression, use
# 0 to 1 scale limits and percentage scale
# ------------------------
if (binom_fam) {
baseplot <- baseplot +
scale_y_continuous(limits = c(lowerLim.y, upperLim.y),
breaks = gridbreaks.y,
labels = scales::percent)
} else {
baseplot <- baseplot +
# set axis scale breaks
scale_y_continuous(limits = c(lowerLim.y, upperLim.y),
breaks = gridbreaks.y)
}
# ---------------------------------------------------------
# facet grids?
# ---------------------------------------------------------
if (facet.grid) baseplot <- baseplot + facet_grid(~grp)
# ---------------------------------------------------------
# set geom colors
# ---------------------------------------------------------
baseplot <- sj.setGeomColors(baseplot,
geom.colors,
pal.len = length(unique(stats::na.omit(intdf$grp))),
show.legend = !is.null(lLabels) & !facet.grid,
lLabels)
# ---------------------------------------------------------
# Check whether ggplot object should be returned or plotted
# ---------------------------------------------------------
if (prnt.plot) graphics::plot(baseplot)
# concatenate plot object
plotlist[[length(plotlist) + 1]] <- baseplot
dflist[[length(dflist) + 1]] <- intdf
}
# -------------------------------------
# return results
# -------------------------------------
invisible(structure(class = c("sjPlot", "sjpint"),
list(plot.list = plotlist,
data.list = dflist)))
}
#' @importFrom stats quantile
mv_check <- function(mdrt.values, x) {
mvc <- length(unique(as.vector(stats::quantile(x, na.rm = T))))
if (mdrt.values == "quart" && mvc < 3) {
# tell user that quart won't work
message("Could not compute quartiles, too small range of moderator variable. Defaulting `mdrt.values` to `minmax`.")
mdrt.values <- "minmax"
}
return(mdrt.values)
}
# get all (significant) interaction terms from model
# the function "getInteractionTerms" checks if a fitted
# model contains any interaction terms that are significant
# at the level specified by "plevel". returns NULL, if model
# contains no interaction terms or no significant interaction term.
# else, information on model and interaction terms is returned
#' @importFrom stats model.matrix model.frame
getInteractionTerms <- function(fit, fun, plevel, p.kr) {
# -----------------------------------------------------------
# retrieve coefficients
# -----------------------------------------------------------
coef.tab <- summary(fit)$coefficients
pval <- rep(0, times = nrow(coef.tab) - 1)
# -----------------------------------------------------------
# Help-function that removes AsIS I from formulas.
# If someone know regular expressions better than me,
# please provide a one-liner solution for the 3 sub commands.
# -----------------------------------------------------------
remove_I <- function(xnames) {
fpos <- grep("I(", xnames, fixed = T)
if (length(fpos) > 0 && fpos > 0) {
xnames <- sub("I(", "", xnames, fixed = T)
xnames <- sub(")", "", xnames, fixed = T)
xnames <- sub(" * ", ":", xnames, fixed = T)
}
return(xnames)
}
# -----------------------------------------------------------
# prepare values for (generalized) linear models
# -----------------------------------------------------------
if (fun == "lm" || fun == "glm" || fun == "plm" || fun == "lme" || fun == "gls") {
# -----------------------------------------------------------
# retrieve amount and names of predictor variables and
# of dependent variable
# -----------------------------------------------------------
if (fun == "plm") {
# plm objects have different structure than (g)lm
depvar.label <- attr(attr(attr(fit$model, "terms"), "dataClasses"), "names")[1]
# retrieve model matrix
fitdat <- data.frame(cbind(as.vector(fit$model[, 1]), stats::model.matrix(fit)))
} else {
depvar.label <- colnames(stats::model.frame(fit))[1]
# retrieve model matrix
fitdat <- data.frame(stats::model.matrix(fit))
}
# -----------------------------------------------------------
# retrieve p-values, without intercept
# -----------------------------------------------------------
if (ncol(coef.tab) > 3) pval <- coef.tab[-1, 4]
# -----------------------------------------------------------
# retrieve estimates, without intercept
# -----------------------------------------------------------
estimates <- coef.tab[-1, 1]
# -----------------------------------------------------------
# need to remove "I(...)"?
# -----------------------------------------------------------
estimates.names <- names(estimates)
estimates.names <- remove_I(estimates.names)
it <- estimates.names
# -----------------------------------------------------------
# retrieve estimate of intercept
# -----------------------------------------------------------
b0 <- coef.tab[1, 1]
# -----------------------------------------------------------
# prepare values for (generalized) linear mixed effecrs models
# -----------------------------------------------------------
} else if (fun == "lmer" || fun == "glmer" || fun == "nlmer") {
# -----------------------------------------------------------
# retrieve amount and names of predictor variables and
# of dependent variable
# -----------------------------------------------------------
depvar.label <- colnames(stats::model.frame(fit))[1]
# -----------------------------------------------------------
# retrieve p-values, without intercept
# -----------------------------------------------------------
pval <- get_lmerMod_pvalues(fit, p.kr)[-1]
# -----------------------------------------------------------
# retrieve estimates, without intercept
# -----------------------------------------------------------
estimates <- unname(lme4::fixef(fit)[-1])
estimates.names <- names(lme4::fixef(fit)[-1])
# -----------------------------------------------------------
# retrieve model matrix with all relevant predictors
# -----------------------------------------------------------
fitdat <- stats::model.matrix(fit)
# -----------------------------------------------------------
# need to remove "I(...)"?
# -----------------------------------------------------------
estimates.names <- remove_I(estimates.names)
it <- estimates.names
# -----------------------------------------------------------
# retrieve estimate of intercept
# -----------------------------------------------------------
b0 <- unname(lme4::fixef(fit)[1])
} else {
stop("Unsupported model-class. This type of regression is not yet supported by 'sjp.int'.", call. = F)
}
# -----------------------------------------------------------
# find all significant interactions
# we start looking for significant p-values beginning
# with the first interaction, not the first single term!
# thus, the starting point is first position after all single
# predictor variables
# -----------------------------------------------------------
# find interaction terms, which contains a colon, in row names
firstit <- grep(":", it, fixed = TRUE)[1]
# check whether we have any interaction terms included at all
if (is.null(firstit) || is.na(firstit) || firstit == 0) {
warning("No interaction term found in fitted model...", call. = FALSE)
return(invisible(NULL))
}
# save names of interaction predictor variables into this object
intnames <- c()
int.dropped <- c()
non.p.dropped <- FALSE
for (i in firstit:length(pval)) {
if (pval[i] < plevel) {
intnames <- c(intnames, it[i])
} else {
non.p.dropped <- T
int.dropped <- c(int.dropped, it[i], "\n")
}
}
# check for any signigicant interactions, stop if nothing found
if (is.null(intnames)) {
warning("No significant interactions found... Try to adjust `plevel` argument.", call. = FALSE)
return(invisible(NULL))
} else if (non.p.dropped) {
message(sprintf("Following non-significant interaction terms were omitted from the output:\n%s\nUse `plevel` to show more interaction terms.",
paste(int.dropped, collapse = "")))
}
return(list(intnames = intnames,
estimates = estimates,
estimates.names = estimates.names,
b0 = b0,
fitdat = fitdat,
depvar.label = depvar.label))
} |
# Cargo el dataset
meteo <- read.csv("meteo.csv", row.name = 1)
# Me quedo solo con las variables que tengas correlación con y grande
# |corr(y,Xn)| > 0.4
meteo.slim <- meteo[cor(meteo, method = "spearman")[1,] > 0.4 | cor(meteo, method = "spearman")[1,] < -0.4]
ncol(meteo.slim)
head(meteo.slim)
# Fijo el seed, para que el sample que uso para separar en train y test
# de siempre el mismo resultado.
set.seed(23)
# Separo en set de train y de test. En total, uso solo los primeros 5000 días del dataset
n <- 5000
# indice de train
indtrain <- sample(1:n, round(0.75*n))
meteo.slim.train <- meteo.slim[indtrain,]
# indice de test
indtest <- setdiff(1:n, indtrain)
meteo.slim.test <- meteo.slim[indtest,]
# Dataset de entrenamiento: meteo.slim.train
# Dataset de test: meteo.slim.test
# converto la variable objetivo en categórica
rain = ifelse(meteo.slim$y < 1, 0, 1)
meteo.slim$rain = as.factor(rain)
# Cargo la libreria necesaria para usar los trees
library(tree)
# Quito 'y' y me quedo solo con 'rain'
meteo.rain <- meteo.slim[,2:ncol(meteo.slim)]
head(meteo.rain)
# Entreno un arbol que dejo crecer sin limites
t.tree = tree(formula = rain ~ .,
data = meteo.rain,
subset = indtrain,
control = tree.control(length(indtrain),
mincut = 1,
minsize = 2,
mindev = 0))
# Pinto el arbol
plot(t.tree)
text(t.tree, pretty = F)
# Cuento las hojas terminales
print(paste("El arbol completo tiene",
length(t.tree$frame$var[t.tree$frame$var == '<leaf>']), "hojas"))
# Ahora uso cross-validation para podar el arbol completo,
# haciendo que sea más generalizable, o más útil para predecir
# Preparo la validación cruzada
tree.cv <- cv.tree(object = t.tree,
FUN = prune.tree,
K = 10)
# Pinto la accuracy en función de la profundidad
plot(tree.cv$size, tree.cv$dev / length(indtrain), type = "b",
xlab = "Tree Size", ylab = "CV-RMSE",
xlim = c(0,10))
# Podo el arbol completo según lo resultados de la validación cruzada
opt.tree <- prune.tree(t.tree, best = 6)
# Pinto el arbol optimo
plot(opt.tree)
text(opt.tree, pretty = F)
summary(opt.tree)
# Preparo los dataset de train y de test
meteo.rain.train <- meteo.rain[indtrain,]
meteo.rain.test <- meteo.rain[-indtrain,]
# Guardo las predicciones del arbol optimo
pred.train = predict(opt.tree, newdata = meteo.rain.train, type = "class")
pred.test = predict(opt.tree, newdata = meteo.rain.test, type = "class")
# Preparo el dataset de entrenamiento que tenga solo los días de lluvia
# 'cont' en el nombre significa ~continuo (la lluvia ya no es un factor)
meteo.cont.rain <- meteo.slim[meteo.slim$rain == 1,]
meteo.cont.rain <- meteo.cont.rain[1:ncol(meteo.cont.rain)-1]
head(meteo.cont.rain)
nrow(meteo.cont.rain)
# Preparo el dataset de entrenamiento que tenga
# tanto los días sin lluvia como los días sin lluvia
# 'cont' en el nombre significa ~continuo (la lluvia ya no es un factor)
meteo.cont <- meteo.slim[1:ncol(meteo.slim)-1]
head(meteo.cont)
nrow(meteo.cont)
# Entreno un arbol que dejo crecer sin limites
# Uso el dataset de entrenamiento completo
full.tree = tree(formula = y ~ .,
data = meteo.cont,
subset = indtrain,
method = "cubist",
control = tree.control(length(indtrain),
mincut = 1,
minsize = 2,
mindev = 0))
# Pinto el arbol
plot(full.tree)
text(full.tree, pretty = F)
# Cuento las hojas terminales
print(paste("El arbol completo tiene",
length(full.tree$frame$var[full.tree$frame$var == '<leaf>']), "hojas"))
# Ahora uso cross-validation para podar el arbol completo,
# haciendo que sea más generalizable, o más útil para predecir
# Preparo la validación cruzada
full.tree.cv <- cv.tree(object = full.tree,
FUN = prune.tree,
K = 10)
# Pinto la accuracy en función de la profundidad
plot(full.tree.cv$size, full.tree.cv$dev / length(indtrain), type = "b",
xlab = "Tree Size", ylab = "CV-RMSE",
xlim = c(0,30))
# Podo el arbol completo según lo resultados de la validación cruzada
opt.tree.full <- prune.tree(full.tree, best = 10)
# Pinto el arbol optimo
plot(opt.tree.full)
text(opt.tree.full, pretty = F)
summary(opt.tree.full)
print(opt.tree.full)
# Preparo los dataset de train y de test
meteo.cont.full.train <- meteo.cont[indtrain,]
meteo.cont.full.test <- meteo.cont[-indtrain,]
# Guardo las predicciones del arbol optimo entrenado con todos los eventos (full)
pred.cont.full.train = predict(opt.tree.full, newdata = meteo.cont.full.train)
pred.cont.full.test = predict(opt.tree.full, newdata = meteo.cont.full.test)
# Entreno un arbol que dejo crecer sin limites
# Uso el dataset de entrenamiento con solo días de lluvia
rain.tree = tree(formula = y ~ .,
data = meteo.cont.rain,
subset = indtrain,
method = "cubist", # uso el método cubist para obtener predicciones continuas
control = tree.control(length(indtrain),
mincut = 1,
minsize = 2,
mindev = 0))
# Pinto el arbol
plot(rain.tree)
text(rain.tree, pretty = F)
# Cuento las hojas terminales
print(paste("El arbol completo entrenado solo con días de lluvia tiene",
length(rain.tree$frame$var[rain.tree$frame$var == '<leaf>']), "hojas"))
# Ahora uso cross-validation para podar el arbol completo,
# haciendo que sea más generalizable, o más útil para predecir
# Preparo la validación cruzada
rain.tree.cv <- cv.tree(object = rain.tree,
FUN = prune.tree,
K = 10)
# Pinto la accuracy en función de la profundidad
plot(rain.tree.cv$size, rain.tree.cv$dev / length(indtrain), type = "b",
xlab = "Tree Size", ylab = "CV-RMSE",
xlim = c(0,10))
# Podo el arbol completo según lo resultados de la validación cruzada
# Vemos que el arbol optimo deberia tener 5 hojas finales.
opt.tree.rain <- prune.tree(rain.tree, best = 5)
# Pinto el arbol optimo
plot(opt.tree.rain)
text(opt.tree.rain, pretty = F)
print(opt.tree.rain)
# Podriamos pensar que la usar solo 5 hojas implique
# reducir la acpacidad del arbol en describir la variabilidad
# de la cantidad de lluvia. Intentamos usar 10 hojas
opt.tree.rain.10 <- prune.tree(rain.tree, best = 10)
# Pinto el arbol optimo
plot(opt.tree.rain.10)
text(opt.tree.rain.10, pretty = F)
print(opt.tree.rain.10)
# Preparo los dataset de train y de test
meteo.cont.rain.train <- na.omit(meteo.cont.rain[indtrain,])
meteo.cont.rain.test <- na.omit(meteo.cont.rain[-indtrain,])
# Guardo las predicciones del arbol optimo
pred.cont.rain.test.all = predict(opt.tree.rain, newdata = meteo.cont.full.test)
pred.cont.rain.test = predict(opt.tree.rain, newdata = meteo.cont.full.test)
# Guardo las predicciones del arbol con 10 hojas
pred.cont.rain.test.all.10 = predict(opt.tree.rain.10, newdata = meteo.cont.full.test)
pred.cont.rain.test.10 = predict(opt.tree.rain.10, newdata = meteo.cont.full.test)
# Mido el accuracy de la classificación binaria lluvia/no-lluvia
print("Accuracy de la clasificación lluvia/no-lluvia")
100*sum(diag(table(meteo.rain.test[,"rain"], pred.test))) / length(pred.test)
# Miro también 'a ojo' las primeras 20 predicciones, comparadas con el dataset original de test
meteo.rain.test[1:20,"rain"]
pred.test[1:20]
# Y la matriz de confusión, para evaluar los falsos positivos/negativos
table(pred.test, meteo.rain.test$rain)
table(pred.test)
table(meteo.rain.test$rain)
# Construyo la predicción completa
pred.test.num <- as.numeric(pred.test) - 1
pred.cont.full.test.complete <- pred.test.num*pred.cont.full.test
# Pinto los valores original contra la predicción
plot(pred.cont.full.test.complete, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.full <- sqrt(mean((meteo.cont.full.test[,'y'] - pred.cont.full.test.complete)^2))
rmse.full
# Correlación - mejor si es alta
corr.full <- cor(meteo.cont.full.test[,'y'], pred.cont.full.test.complete, method = "spearman")
corr.full
# Ratio de varianzas - mejor si cerca de 1
variance.ratio.full <- var(pred.cont.full.test.complete) / var(meteo.cont.full.test[,'y'])
variance.ratio.full
# Construyo la predicción completa
pred.test.num <- as.numeric(pred.test) - 1
pred.cont.rain.test.complete <- pred.test.num*pred.cont.rain.test
# Pinto los valores original contra la predicción
plot(pred.cont.rain.test.complete, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.rain <- sqrt(mean((meteo.cont.full.test[,'y'] - pred.cont.rain.test.complete)^2))
rmse.rain
# Correlación - mejor si es alta
corr.rain <- cor(meteo.cont.full.test[,'y'], pred.cont.rain.test.complete, method = "spearman")
corr.rain
# Ratio de varianzas - mejor si cerca de 1
variance.ratio.rain <- var(pred.cont.rain.test.complete) / var(meteo.cont.full.test[,'y'])
variance.ratio.rain
# Lo mismo, pero con el arbol de 10 hoajs
# Construyo la predicción completa
pred.test.num <- as.numeric(pred.test) - 1
pred.cont.rain.test.complete.10 <- pred.test.num*pred.cont.rain.test.10
# Pinto los valores original contra la predicción
plot(pred.cont.rain.test.complete.10, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.rain <- sqrt(mean((meteo.cont.full.test[,'y'] - pred.cont.rain.test.complete.10)^2))
rmse.rain
# Correlación - mejor si es alta
corr.rain <- cor(meteo.cont.full.test[,'y'], pred.cont.rain.test.complete.10, method = "spearman")
corr.rain
# Ratio de varianzas - mejor si cerca de 1
variance.ratio.rain <- var(pred.cont.rain.test.complete) / var(meteo.cont.full.test[,'y'])
variance.ratio.rain
library(randomForest)
# Ya tengo los datasets de entrenamiento y de test listos:
# Clasificación:
# - Todos los días (lluvia y no-lluvia):
# - entrenamiento: meteo.rain.train
# - test: meteo.rain.test
# Predicción continua:
# - Todos los días (lluvia y no-lluvia):
# - entrenamiento: meteo.cont.full.train
# - test: meteo.cont.full.test
# - Solo días de lluvia:
# - entrenamiento: meteo.cont.rain.train
# - test: meteo.cont.rain.test
# Entreno un random forest para predecir la ocurrencia de la lluvia
rf <- randomForest(rain ~ .,
data = meteo.rain.train,
ntree = 1000,
importance=TRUE,
proximity=TRUE)
rf
# Pinto los errores OOB en función del numero de arboles
plot(rf$err.rate[, 1], type = "l", xlab = "no. trees", ylab = "OOBerror",ylim = c(0.13,0.16))
grid()
# Repito el entrenamiento con 180 arboles y saco la predicción de test
rf.opt <- randomForest(rain ~ .,
data = meteo.rain.train,
ntree = 180,
importance=TRUE,
proximity=TRUE)
rf.opt
pred.rf.test = predict(rf.opt, meteo.rain.test)
# Entreno un random forest para predecir la ocurrencia de la lluvia
# Uso todos los eventos (lluvia/no-lluvia)
rf.cont.full <- randomForest(y ~ .,
data = meteo.cont.full.train,
ntree = 1000,
importance=TRUE,
proximity=TRUE)
rf.cont.full
# Pinto los errores OOB en función del numero de arboles
plot(rf.cont.full$mse, type = "l", xlab = "no. trees", ylab = "OOBerror", ylim = c(25,30))
grid()
# Repito el entrenamiento con 200 arboles y saco la predicción de test
rf.cont.full.opt <- randomForest(y ~ .,
data = meteo.cont.full.train,
ntree = 200,
importance=TRUE,
proximity=TRUE)
rf.cont.full.opt
pred.rf.full.opt.test = predict(rf.cont.full.opt, meteo.cont.full.test)
# Entreno un random forest para predecir la ocurrencia de la lluvia
# Uso solo los eventos con lluvia
rf.cont.rain <- randomForest(y ~ .,
data = meteo.cont.rain.train,
ntree = 1000,
importance=TRUE,
proximity=TRUE)
rf.cont.rain
# Pinto los errores OOB en función del numero de arboles
plot(rf.cont.rain$mse, type = "l", xlab = "no. trees", ylab = "OOBerror", ylim = c(110,120))
grid()
# Repito el entrenamiento con 200 arboles y saco la predicción de test
rf.cont.rain.opt <- randomForest(y ~ .,
data = meteo.cont.rain.train,
ntree = 200,
importance=TRUE,
proximity=TRUE)
rf.cont.rain.opt
pred.rf.cont.rain.opt = predict(rf.cont.rain.opt, meteo.cont.full.test)
# Mido el accuracy de la classificación binaria lluvia/no-lluvia
print("Accuracy de la clasificación lluvia/no-lluvia")
100*sum(diag(table(meteo.rain.test[,"rain"], pred.rf.test))) / length(pred.rf.test)
# Y la matriz de confusión, para evaluar los falsos positivos/negativos
table(pred.rf.test, meteo.rain.test$rain)
# Miro también 'a ojo' las primeras 20 predicciones, comparadas con el dataset original de test
meteo.rain.test[1:20,"rain"]
pred.rf.test[1:20]
table(pred.rf.test)
table(meteo.rain.test$rain)
# Predicción completa
pred.complete.full <- pred.rf.full.opt.test * (as.numeric(pred.rf.test) - 1)
# Pinto los valores original contra la predicción
plot(pred.complete.full, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.rf.full <- sqrt(mean((meteo.cont.full.test[,'y'] - pred.complete.full)^2))
rmse.rf.full
# Correlación - mejor si es alta
corr.rf.full <- cor(meteo.cont.full.test[,'y'], pred.complete.full, method = "spearman")
corr.rf.full
# Ratio de varianzas - mejor si cerca de 1
variance.ratio.rf.full <- var(pred.complete.full) / var(meteo.cont.full.test[,'y'])
variance.ratio.rf.full
pred.complete.rain <- pred.rf.cont.rain.opt * (as.numeric(pred.rf.test) - 1)
# Pinto los valores original contra la predicción
plot(pred.complete.rain, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.rf.rain <- sqrt(mean((meteo.cont.full.test[,'y'] - pred.complete.rain)^2))
rmse.rain
# Correlación - mejor si es alta
corr.rf.rain <- cor(meteo.cont.full.test[,'y'], pred.complete.rain, method = "spearman")
corr.rf.rain
# Ratio de varianzas - mejor si cerca de 1
variance.ratio.rf.rain <- var(pred.complete.rain) / var(meteo.cont.full.test[,'y'])
variance.ratio.rf.rain
# Ya tengo los datasets de entrenamiento y de test listos:
# Clasificación:
# - Todos los días (lluvia y no-lluvia):
# - entrenamiento: meteo.rain.train
# - test: meteo.rain.test
# Predicción continua:
# - Todos los días (lluvia y no-lluvia):
# - entrenamiento: meteo.cont.full.train
# - test: meteo.cont.full.test
# - Solo días de lluvia:
# - entrenamiento: meteo.cont.rain.train
# - test: meteo.cont.rain.test
# Entreno el modelo para la ocurrencia de la lluvia
lin.model <- glm(rain ~ .,
data = meteo.rain.train,
family = binomial(link = "logit"))
lin.model
# Transformo la predicción continua en categorica (lluvia/no-lluvia)
# Fijo el umbral a 0.5
out.lin.model <- lin.model$fitted.values
out.bin.lin.model <- as.double(out.lin.model > 0.5)
# Histograma de salida del modelo
hist(out.lin.model, main = "Histograma de salida del modelo")
# Histograma de salida del modelo (con variable categorica)
hist(out.bin.lin.model, main = "Histograma de salida del modelo (con variable categorica)")
# Preparo la predicción de clasificación de días de lluvia/no lluvia
out.test.rain <- predict(object = lin.model, newdata = meteo.rain.test);
out.bin.test.rain <- as.double(out.test.rain > 0.5)
# Tasa de acierto (test)
print("Accuracy sobre el datset de test:")
100*sum(diag(table(meteo.rain.test$rain, out.bin.test.rain))) / length(out.bin.test.rain)
print(paste("Días con lluvia (1) y sin lluvia (0) en el dataset de test:"))
table(meteo.rain.test$rain)
# Matriz de confusión de test
print("Matriz de confusión de test:")
table(meteo.rain.test$rain, out.bin.test.rain)
# Entreno el modelo para la cantidad de lluvia
# Selecciono sólo días de lluvia porque la familia de funciones Gamma
# acepta valores mayores de 0
lin.model.cont <- glm(y ~ .,
data = meteo.cont.rain.train,
family = Gamma(link = "inverse"))
lin.model.cont
# Miro la salida del modelo
out.lin.model.cont <- lin.model.cont$fitted.values
# Histograma de salida del modelo
hist(out.lin.model.cont,
breaks = 100,
xlim = c(0,100),
main = "Histograma de salida del modelo")
# Miro como el modelo predice sobre el dataset de test
# con solo días de lluvia
out.test.rain.cont <- predict(object = lin.model.cont,
newdata = meteo.cont.rain.test,
type = "response")
out.test.rain.cont[1:20]
# Miro como el modelo predice sobre el dataset de test
# con días con lluvia y dias sin lluvia
out.train.rain.cont.all <- predict(object = lin.model.cont,
newdata = meteo.cont.full.test,
type = "response")
out.train.rain.cont.all[1:20]
# Produzco las predicciones completas:
# multiplico la clasificación lluvia/no-lluvia por la cantidad
glm.complete.rain <- out.train.rain.cont.all * (as.numeric(out.bin.test.rain))
for (i in 1:20){
print(paste(
format(round(glm.complete.rain[i],2), nsmall = 2), " ",
format(round(meteo.cont.full.test[i,1],2), nsmall = 2))
)
}
# Pinto los valores original contra la predicción
plot(glm.complete.rain, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.lm.full <- sqrt(mean((meteo.cont.full.test[,'y'] - glm.complete.rain)^2))
rmse.lm.full
# Correlación - mejor si es alta
corr.lm.full <- cor(meteo.cont.full.test[,'y'], glm.complete.rain, method = "spearman")
corr.lm.full
# Ratio de varianzas - mejor si cerca de 1
variance.ratio.lm.rain <- var(glm.complete.rain) / var(meteo.cont.full.test[,'y'])
variance.ratio.lm.rain
# Ya tengo los datasets de entrenamiento y de test listos:
# Clasificación:
# - Todos los días (lluvia y no-lluvia):
# - entrenamiento: meteo.rain.train
# - test: meteo.rain.test
# Predicción continua:
# - Todos los días (lluvia y no-lluvia):
# - entrenamiento: meteo.cont.full.train
# - test: meteo.cont.full.test
#
# - Solo días de lluvia:
# - entrenamiento: meteo.cont.rain.train
# - test: meteo.cont.rain.test
# Miro el rango de las variables
plot(1:38,apply(meteo.cont.full.train,2,mean),
xlab = "variable",
ylab = "range [a.u.]",
main = "Mean values of the dataset variables")
# Uso scale para que todas las variables tengan el mismo order de magnitud
# Dataset de entrenamiento (en el caso de knn, más bien de calibración)
meteo.cont.full.train.scale <- scale(meteo.cont.full.train, center = TRUE, scale = TRUE)
# Dataset de test
meteo.cont.full.test.scale <- scale(meteo.cont.full.test, center = TRUE, scale = TRUE)
# Miro el rango de las variables después de haber usado scale
plot(1:38,apply(meteo.cont.full.train.scale,2,mean),
xlab = "variable",
ylab = "range [a.u.]",
main = "Mean values of the dataset variables")
# Cargo las librerias necesarias
library(FNN)
library(caret)
ctrl <- trainControl(method = "cv",
number = 10)
# cambio el número de vecinos cercanos
knn <- train(y ~ .,
data = meteo.cont.full.train,
method = "knn",
preProcess = c("center", "scale"),
trControl = ctrl,
tuneGrid = expand.grid(k = 1:50))
plot(knn)
# knn.reg(train, test = NULL, y, k = 3, algorithm=c("kd_tree", "cover_tree", "brute"))
# Predigo en el dataset de test usando k = 20 vecinos cercanos
k = 20
pred.knn.opt <- knn.reg(train = scale(meteo.cont.full.train[,-1]),
test = scale(meteo.cont.full.test[,-1]),
y = meteo.cont.full.train$y,
k = k)
# Ahora lo mismo, pero con solo 1 vecino (K = 1)
k = 1
pred.knn.1 <- knn.reg(train = scale(meteo.cont.full.train[,-1]),
test = scale(meteo.cont.full.test[,-1]),
y = meteo.cont.full.train$y,
k = k)
# Pinto los valores original contra la predicción - caso k = 20
plot(pred.knn.opt$pred, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.knn.full <- sqrt(mean((meteo.cont.full.test[,'y'] - pred.knn.opt$pred)^2))
rmse.knn.full
# Correlación - mejor si es alta
corr.knn.full <- cor(meteo.cont.full.test[,'y'], pred.knn.opt$pred, method = "spearman")
corr.knn.full
# Ratio de varianzas - mejor si cerca de 1
variance.ratio.knn.full <- var(pred.knn.opt$pred) / var(meteo.cont.full.test[,'y'])
variance.ratio.knn.full
# Pinto los valores original contra la predicción - caso k = 1
plot(pred.knn.1$pred, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.knn.full.1 <- sqrt(mean((meteo.cont.full.test[,'y'] - pred.knn.1$pred)^2))
rmse.knn.full.1
# Correlación - mejor si es alta
corr.knn.full.1 <- cor(meteo.cont.full.test[,'y'], pred.knn.1$pred, method = "spearman")
corr.knn.full.1
# Ratio de varianzas - mejor si cerca de 1
variance.ratio.knn.full.1 <- var(pred.knn.1$pred) / var(meteo.cont.full.test[,'y'])
variance.ratio.knn.full.1
out.bin.test.rain.003 <- as.double(out.test.rain > -0.03)
# Matriz de confusión de test
print("Matriz de confusión de test:")
table(meteo.rain.test$rain, out.bin.test.rain.003)
# Clasificacíon GLM + árbol de regresión
pred.glm.tree <- out.bin.test.rain.003*pred.cont.rain.test
# Pinto los valores original contra la predicción
plot(pred.glm.tree, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.glm.tree <- sqrt(mean((meteo.cont.full.test[,'y'] - pred.glm.tree)^2))
rmse.glm.tree
# Correlación - mejor si es alta
corr.glm.tree <- cor(meteo.cont.full.test[,'y'], pred.glm.tree, method = "spearman")
corr.glm.tree
# Ratio de varianzas - mejor si cerca de 1
variance.glm.tree <- var(pred.glm.tree) / var(meteo.cont.full.test[,'y'])
variance.glm.tree
# Clasificacíon GLM + random forest
pred.glm.rf <- out.bin.test.rain.003*pred.rf.cont.rain.opt
# Pinto los valores original contra la predicción
plot(pred.glm.rf, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.glm.rf <- sqrt(mean((meteo.cont.full.test[,'y'] - pred.glm.rf)^2))
rmse.glm.rf
# Correlación - mejor si es alta
corr.glm.rf <- cor(meteo.cont.full.test[,'y'], pred.glm.rf, method = "spearman")
corr.glm.rf
# Ratio de varianzas - mejor si cerca de 1
variance.glm.rf <- var(pred.glm.rf) / var(meteo.cont.full.test[,'y'])
variance.glm.rf
# Clasificacíon GLM + GLM
pred.glm.glm <- out.bin.test.rain.003*out.train.rain.cont.all
# Pinto los valores original contra la predicción
plot(pred.glm.glm, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.glm.glm <- sqrt(mean((meteo.cont.full.test[,'y'] - pred.glm.glm)^2))
rmse.glm.glm
# Correlación - mejor si es alta
corr.glm.glm <- cor(meteo.cont.full.test[,'y'], pred.glm.glm, method = "spearman")
corr.glm.glm
# Ratio de varianzas - mejor si cerca de 1
variance.glm.glm <- var(pred.glm.glm) / var(meteo.cont.full.test[,'y'])
variance.glm.glm
# Clasificacíon GLM + KNN (K = 20)
pred.glm.knn.20 <- out.bin.test.rain.003*pred.knn.opt$pred
# Pinto los valores original contra la predicción
plot(pred.glm.knn.20, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.glm.knn.20 <- sqrt(mean((meteo.cont.full.test[,'y'] - pred.glm.knn.20)^2))
rmse.glm.knn.20
# Correlación - mejor si es alta
corr.glm.knn.20 <- cor(meteo.cont.full.test[,'y'], pred.glm.knn.20, method = "spearman")
corr.glm.knn.20
# Ratio de varianzas - mejor si cerca de 1
variance.glm.knn.20 <- var(pred.glm.knn.20) / var(meteo.cont.full.test[,'y'])
variance.glm.knn.20
# Clasificacíon GLM + KNN (K = 1)
pred.glm.knn.1 <- out.bin.test.rain.003*pred.knn.1$pred
# Pinto los valores original contra la predicción
plot(pred.glm.knn.1, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.glm.knn.1 <- sqrt(mean((meteo.cont.full.test[,'y'] - pred.glm.knn.1)^2))
rmse.glm.knn.1
# Correlación - mejor si es alta
corr.glm.knn.1 <- cor(meteo.cont.full.test[,'y'], pred.glm.knn.1, method = "spearman")
corr.glm.knn.1
# Ratio de varianzas - mejor si cerca de 1
variance.glm.knn.1 <- var(pred.glm.knn.1) / var(meteo.cont.full.test[,'y'])
variance.glm.knn.1
| /DataMining/Practica3/Practica3_NicoloTrevisani.r | no_license | NTrevisani/DataScienceMaster | R | false | false | 24,674 | r | # Cargo el dataset
meteo <- read.csv("meteo.csv", row.name = 1)
# Me quedo solo con las variables que tengas correlación con y grande
# |corr(y,Xn)| > 0.4
meteo.slim <- meteo[cor(meteo, method = "spearman")[1,] > 0.4 | cor(meteo, method = "spearman")[1,] < -0.4]
ncol(meteo.slim)
head(meteo.slim)
# Fijo el seed, para que el sample que uso para separar en train y test
# de siempre el mismo resultado.
set.seed(23)
# Separo en set de train y de test. En total, uso solo los primeros 5000 días del dataset
n <- 5000
# indice de train
indtrain <- sample(1:n, round(0.75*n))
meteo.slim.train <- meteo.slim[indtrain,]
# indice de test
indtest <- setdiff(1:n, indtrain)
meteo.slim.test <- meteo.slim[indtest,]
# Dataset de entrenamiento: meteo.slim.train
# Dataset de test: meteo.slim.test
# converto la variable objetivo en categórica
rain = ifelse(meteo.slim$y < 1, 0, 1)
meteo.slim$rain = as.factor(rain)
# Cargo la libreria necesaria para usar los trees
library(tree)
# Quito 'y' y me quedo solo con 'rain'
meteo.rain <- meteo.slim[,2:ncol(meteo.slim)]
head(meteo.rain)
# Entreno un arbol que dejo crecer sin limites
t.tree = tree(formula = rain ~ .,
data = meteo.rain,
subset = indtrain,
control = tree.control(length(indtrain),
mincut = 1,
minsize = 2,
mindev = 0))
# Pinto el arbol
plot(t.tree)
text(t.tree, pretty = F)
# Cuento las hojas terminales
print(paste("El arbol completo tiene",
length(t.tree$frame$var[t.tree$frame$var == '<leaf>']), "hojas"))
# Ahora uso cross-validation para podar el arbol completo,
# haciendo que sea más generalizable, o más útil para predecir
# Preparo la validación cruzada
tree.cv <- cv.tree(object = t.tree,
FUN = prune.tree,
K = 10)
# Pinto la accuracy en función de la profundidad
plot(tree.cv$size, tree.cv$dev / length(indtrain), type = "b",
xlab = "Tree Size", ylab = "CV-RMSE",
xlim = c(0,10))
# Podo el arbol completo según lo resultados de la validación cruzada
opt.tree <- prune.tree(t.tree, best = 6)
# Pinto el arbol optimo
plot(opt.tree)
text(opt.tree, pretty = F)
summary(opt.tree)
# Preparo los dataset de train y de test
meteo.rain.train <- meteo.rain[indtrain,]
meteo.rain.test <- meteo.rain[-indtrain,]
# Guardo las predicciones del arbol optimo
pred.train = predict(opt.tree, newdata = meteo.rain.train, type = "class")
pred.test = predict(opt.tree, newdata = meteo.rain.test, type = "class")
# Preparo el dataset de entrenamiento que tenga solo los días de lluvia
# 'cont' en el nombre significa ~continuo (la lluvia ya no es un factor)
meteo.cont.rain <- meteo.slim[meteo.slim$rain == 1,]
meteo.cont.rain <- meteo.cont.rain[1:ncol(meteo.cont.rain)-1]
head(meteo.cont.rain)
nrow(meteo.cont.rain)
# Preparo el dataset de entrenamiento que tenga
# tanto los días sin lluvia como los días sin lluvia
# 'cont' en el nombre significa ~continuo (la lluvia ya no es un factor)
meteo.cont <- meteo.slim[1:ncol(meteo.slim)-1]
head(meteo.cont)
nrow(meteo.cont)
# Entreno un arbol que dejo crecer sin limites
# Uso el dataset de entrenamiento completo
full.tree = tree(formula = y ~ .,
data = meteo.cont,
subset = indtrain,
method = "cubist",
control = tree.control(length(indtrain),
mincut = 1,
minsize = 2,
mindev = 0))
# Pinto el arbol
plot(full.tree)
text(full.tree, pretty = F)
# Cuento las hojas terminales
print(paste("El arbol completo tiene",
length(full.tree$frame$var[full.tree$frame$var == '<leaf>']), "hojas"))
# Ahora uso cross-validation para podar el arbol completo,
# haciendo que sea más generalizable, o más útil para predecir
# Preparo la validación cruzada
full.tree.cv <- cv.tree(object = full.tree,
FUN = prune.tree,
K = 10)
# Pinto la accuracy en función de la profundidad
plot(full.tree.cv$size, full.tree.cv$dev / length(indtrain), type = "b",
xlab = "Tree Size", ylab = "CV-RMSE",
xlim = c(0,30))
# Podo el arbol completo según lo resultados de la validación cruzada
opt.tree.full <- prune.tree(full.tree, best = 10)
# Pinto el arbol optimo
plot(opt.tree.full)
text(opt.tree.full, pretty = F)
summary(opt.tree.full)
print(opt.tree.full)
# Preparo los dataset de train y de test
meteo.cont.full.train <- meteo.cont[indtrain,]
meteo.cont.full.test <- meteo.cont[-indtrain,]
# Guardo las predicciones del arbol optimo entrenado con todos los eventos (full)
pred.cont.full.train = predict(opt.tree.full, newdata = meteo.cont.full.train)
pred.cont.full.test = predict(opt.tree.full, newdata = meteo.cont.full.test)
# Entreno un arbol que dejo crecer sin limites
# Uso el dataset de entrenamiento con solo días de lluvia
rain.tree = tree(formula = y ~ .,
data = meteo.cont.rain,
subset = indtrain,
method = "cubist", # uso el método cubist para obtener predicciones continuas
control = tree.control(length(indtrain),
mincut = 1,
minsize = 2,
mindev = 0))
# Pinto el arbol
plot(rain.tree)
text(rain.tree, pretty = F)
# Cuento las hojas terminales
print(paste("El arbol completo entrenado solo con días de lluvia tiene",
length(rain.tree$frame$var[rain.tree$frame$var == '<leaf>']), "hojas"))
# Ahora uso cross-validation para podar el arbol completo,
# haciendo que sea más generalizable, o más útil para predecir
# Preparo la validación cruzada
rain.tree.cv <- cv.tree(object = rain.tree,
FUN = prune.tree,
K = 10)
# Pinto la accuracy en función de la profundidad
plot(rain.tree.cv$size, rain.tree.cv$dev / length(indtrain), type = "b",
xlab = "Tree Size", ylab = "CV-RMSE",
xlim = c(0,10))
# Podo el arbol completo según lo resultados de la validación cruzada
# Vemos que el arbol optimo deberia tener 5 hojas finales.
opt.tree.rain <- prune.tree(rain.tree, best = 5)
# Pinto el arbol optimo
plot(opt.tree.rain)
text(opt.tree.rain, pretty = F)
print(opt.tree.rain)
# Podriamos pensar que la usar solo 5 hojas implique
# reducir la acpacidad del arbol en describir la variabilidad
# de la cantidad de lluvia. Intentamos usar 10 hojas
opt.tree.rain.10 <- prune.tree(rain.tree, best = 10)
# Pinto el arbol optimo
plot(opt.tree.rain.10)
text(opt.tree.rain.10, pretty = F)
print(opt.tree.rain.10)
# Preparo los dataset de train y de test
meteo.cont.rain.train <- na.omit(meteo.cont.rain[indtrain,])
meteo.cont.rain.test <- na.omit(meteo.cont.rain[-indtrain,])
# Guardo las predicciones del arbol optimo
pred.cont.rain.test.all = predict(opt.tree.rain, newdata = meteo.cont.full.test)
pred.cont.rain.test = predict(opt.tree.rain, newdata = meteo.cont.full.test)
# Guardo las predicciones del arbol con 10 hojas
pred.cont.rain.test.all.10 = predict(opt.tree.rain.10, newdata = meteo.cont.full.test)
pred.cont.rain.test.10 = predict(opt.tree.rain.10, newdata = meteo.cont.full.test)
# Mido el accuracy de la classificación binaria lluvia/no-lluvia
print("Accuracy de la clasificación lluvia/no-lluvia")
100*sum(diag(table(meteo.rain.test[,"rain"], pred.test))) / length(pred.test)
# Miro también 'a ojo' las primeras 20 predicciones, comparadas con el dataset original de test
meteo.rain.test[1:20,"rain"]
pred.test[1:20]
# Y la matriz de confusión, para evaluar los falsos positivos/negativos
table(pred.test, meteo.rain.test$rain)
table(pred.test)
table(meteo.rain.test$rain)
# Construyo la predicción completa
pred.test.num <- as.numeric(pred.test) - 1
pred.cont.full.test.complete <- pred.test.num*pred.cont.full.test
# Pinto los valores original contra la predicción
plot(pred.cont.full.test.complete, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.full <- sqrt(mean((meteo.cont.full.test[,'y'] - pred.cont.full.test.complete)^2))
rmse.full
# Correlación - mejor si es alta
corr.full <- cor(meteo.cont.full.test[,'y'], pred.cont.full.test.complete, method = "spearman")
corr.full
# Ratio de varianzas - mejor si cerca de 1
variance.ratio.full <- var(pred.cont.full.test.complete) / var(meteo.cont.full.test[,'y'])
variance.ratio.full
# Construyo la predicción completa
pred.test.num <- as.numeric(pred.test) - 1
pred.cont.rain.test.complete <- pred.test.num*pred.cont.rain.test
# Pinto los valores original contra la predicción
plot(pred.cont.rain.test.complete, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.rain <- sqrt(mean((meteo.cont.full.test[,'y'] - pred.cont.rain.test.complete)^2))
rmse.rain
# Correlación - mejor si es alta
corr.rain <- cor(meteo.cont.full.test[,'y'], pred.cont.rain.test.complete, method = "spearman")
corr.rain
# Ratio de varianzas - mejor si cerca de 1
variance.ratio.rain <- var(pred.cont.rain.test.complete) / var(meteo.cont.full.test[,'y'])
variance.ratio.rain
# Lo mismo, pero con el arbol de 10 hoajs
# Construyo la predicción completa
pred.test.num <- as.numeric(pred.test) - 1
pred.cont.rain.test.complete.10 <- pred.test.num*pred.cont.rain.test.10
# Pinto los valores original contra la predicción
plot(pred.cont.rain.test.complete.10, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.rain <- sqrt(mean((meteo.cont.full.test[,'y'] - pred.cont.rain.test.complete.10)^2))
rmse.rain
# Correlación - mejor si es alta
corr.rain <- cor(meteo.cont.full.test[,'y'], pred.cont.rain.test.complete.10, method = "spearman")
corr.rain
# Ratio de varianzas - mejor si cerca de 1
variance.ratio.rain <- var(pred.cont.rain.test.complete) / var(meteo.cont.full.test[,'y'])
variance.ratio.rain
library(randomForest)
# Ya tengo los datasets de entrenamiento y de test listos:
# Clasificación:
# - Todos los días (lluvia y no-lluvia):
# - entrenamiento: meteo.rain.train
# - test: meteo.rain.test
# Predicción continua:
# - Todos los días (lluvia y no-lluvia):
# - entrenamiento: meteo.cont.full.train
# - test: meteo.cont.full.test
# - Solo días de lluvia:
# - entrenamiento: meteo.cont.rain.train
# - test: meteo.cont.rain.test
# Entreno un random forest para predecir la ocurrencia de la lluvia
rf <- randomForest(rain ~ .,
data = meteo.rain.train,
ntree = 1000,
importance=TRUE,
proximity=TRUE)
rf
# Pinto los errores OOB en función del numero de arboles
plot(rf$err.rate[, 1], type = "l", xlab = "no. trees", ylab = "OOBerror",ylim = c(0.13,0.16))
grid()
# Repito el entrenamiento con 180 arboles y saco la predicción de test
rf.opt <- randomForest(rain ~ .,
data = meteo.rain.train,
ntree = 180,
importance=TRUE,
proximity=TRUE)
rf.opt
pred.rf.test = predict(rf.opt, meteo.rain.test)
# Entreno un random forest para predecir la ocurrencia de la lluvia
# Uso todos los eventos (lluvia/no-lluvia)
rf.cont.full <- randomForest(y ~ .,
data = meteo.cont.full.train,
ntree = 1000,
importance=TRUE,
proximity=TRUE)
rf.cont.full
# Pinto los errores OOB en función del numero de arboles
plot(rf.cont.full$mse, type = "l", xlab = "no. trees", ylab = "OOBerror", ylim = c(25,30))
grid()
# Repito el entrenamiento con 200 arboles y saco la predicción de test
rf.cont.full.opt <- randomForest(y ~ .,
data = meteo.cont.full.train,
ntree = 200,
importance=TRUE,
proximity=TRUE)
rf.cont.full.opt
pred.rf.full.opt.test = predict(rf.cont.full.opt, meteo.cont.full.test)
# Entreno un random forest para predecir la ocurrencia de la lluvia
# Uso solo los eventos con lluvia
rf.cont.rain <- randomForest(y ~ .,
data = meteo.cont.rain.train,
ntree = 1000,
importance=TRUE,
proximity=TRUE)
rf.cont.rain
# Pinto los errores OOB en función del numero de arboles
plot(rf.cont.rain$mse, type = "l", xlab = "no. trees", ylab = "OOBerror", ylim = c(110,120))
grid()
# Repito el entrenamiento con 200 arboles y saco la predicción de test
rf.cont.rain.opt <- randomForest(y ~ .,
data = meteo.cont.rain.train,
ntree = 200,
importance=TRUE,
proximity=TRUE)
rf.cont.rain.opt
pred.rf.cont.rain.opt = predict(rf.cont.rain.opt, meteo.cont.full.test)
# Mido el accuracy de la classificación binaria lluvia/no-lluvia
print("Accuracy de la clasificación lluvia/no-lluvia")
100*sum(diag(table(meteo.rain.test[,"rain"], pred.rf.test))) / length(pred.rf.test)
# Y la matriz de confusión, para evaluar los falsos positivos/negativos
table(pred.rf.test, meteo.rain.test$rain)
# Miro también 'a ojo' las primeras 20 predicciones, comparadas con el dataset original de test
meteo.rain.test[1:20,"rain"]
pred.rf.test[1:20]
table(pred.rf.test)
table(meteo.rain.test$rain)
# Predicción completa
pred.complete.full <- pred.rf.full.opt.test * (as.numeric(pred.rf.test) - 1)
# Pinto los valores original contra la predicción
plot(pred.complete.full, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.rf.full <- sqrt(mean((meteo.cont.full.test[,'y'] - pred.complete.full)^2))
rmse.rf.full
# Correlación - mejor si es alta
corr.rf.full <- cor(meteo.cont.full.test[,'y'], pred.complete.full, method = "spearman")
corr.rf.full
# Ratio de varianzas - mejor si cerca de 1
variance.ratio.rf.full <- var(pred.complete.full) / var(meteo.cont.full.test[,'y'])
variance.ratio.rf.full
pred.complete.rain <- pred.rf.cont.rain.opt * (as.numeric(pred.rf.test) - 1)
# Pinto los valores original contra la predicción
plot(pred.complete.rain, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.rf.rain <- sqrt(mean((meteo.cont.full.test[,'y'] - pred.complete.rain)^2))
rmse.rain
# Correlación - mejor si es alta
corr.rf.rain <- cor(meteo.cont.full.test[,'y'], pred.complete.rain, method = "spearman")
corr.rf.rain
# Ratio de varianzas - mejor si cerca de 1
variance.ratio.rf.rain <- var(pred.complete.rain) / var(meteo.cont.full.test[,'y'])
variance.ratio.rf.rain
# Ya tengo los datasets de entrenamiento y de test listos:
# Clasificación:
# - Todos los días (lluvia y no-lluvia):
# - entrenamiento: meteo.rain.train
# - test: meteo.rain.test
# Predicción continua:
# - Todos los días (lluvia y no-lluvia):
# - entrenamiento: meteo.cont.full.train
# - test: meteo.cont.full.test
# - Solo días de lluvia:
# - entrenamiento: meteo.cont.rain.train
# - test: meteo.cont.rain.test
# Entreno el modelo para la ocurrencia de la lluvia
lin.model <- glm(rain ~ .,
data = meteo.rain.train,
family = binomial(link = "logit"))
lin.model
# Transformo la predicción continua en categorica (lluvia/no-lluvia)
# Fijo el umbral a 0.5
out.lin.model <- lin.model$fitted.values
out.bin.lin.model <- as.double(out.lin.model > 0.5)
# Histograma de salida del modelo
hist(out.lin.model, main = "Histograma de salida del modelo")
# Histograma de salida del modelo (con variable categorica)
hist(out.bin.lin.model, main = "Histograma de salida del modelo (con variable categorica)")
# Preparo la predicción de clasificación de días de lluvia/no lluvia
out.test.rain <- predict(object = lin.model, newdata = meteo.rain.test);
out.bin.test.rain <- as.double(out.test.rain > 0.5)
# Tasa de acierto (test)
print("Accuracy sobre el datset de test:")
100*sum(diag(table(meteo.rain.test$rain, out.bin.test.rain))) / length(out.bin.test.rain)
print(paste("Días con lluvia (1) y sin lluvia (0) en el dataset de test:"))
table(meteo.rain.test$rain)
# Matriz de confusión de test
print("Matriz de confusión de test:")
table(meteo.rain.test$rain, out.bin.test.rain)
# Entreno el modelo para la cantidad de lluvia
# Selecciono sólo días de lluvia porque la familia de funciones Gamma
# acepta valores mayores de 0
lin.model.cont <- glm(y ~ .,
data = meteo.cont.rain.train,
family = Gamma(link = "inverse"))
lin.model.cont
# Miro la salida del modelo
out.lin.model.cont <- lin.model.cont$fitted.values
# Histograma de salida del modelo
hist(out.lin.model.cont,
breaks = 100,
xlim = c(0,100),
main = "Histograma de salida del modelo")
# Miro como el modelo predice sobre el dataset de test
# con solo días de lluvia
out.test.rain.cont <- predict(object = lin.model.cont,
newdata = meteo.cont.rain.test,
type = "response")
out.test.rain.cont[1:20]
# Miro como el modelo predice sobre el dataset de test
# con días con lluvia y dias sin lluvia
out.train.rain.cont.all <- predict(object = lin.model.cont,
newdata = meteo.cont.full.test,
type = "response")
out.train.rain.cont.all[1:20]
# Produzco las predicciones completas:
# multiplico la clasificación lluvia/no-lluvia por la cantidad
glm.complete.rain <- out.train.rain.cont.all * (as.numeric(out.bin.test.rain))
for (i in 1:20){
print(paste(
format(round(glm.complete.rain[i],2), nsmall = 2), " ",
format(round(meteo.cont.full.test[i,1],2), nsmall = 2))
)
}
# Pinto los valores original contra la predicción
plot(glm.complete.rain, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.lm.full <- sqrt(mean((meteo.cont.full.test[,'y'] - glm.complete.rain)^2))
rmse.lm.full
# Correlación - mejor si es alta
corr.lm.full <- cor(meteo.cont.full.test[,'y'], glm.complete.rain, method = "spearman")
corr.lm.full
# Ratio de varianzas - mejor si cerca de 1
variance.ratio.lm.rain <- var(glm.complete.rain) / var(meteo.cont.full.test[,'y'])
variance.ratio.lm.rain
# Ya tengo los datasets de entrenamiento y de test listos:
# Clasificación:
# - Todos los días (lluvia y no-lluvia):
# - entrenamiento: meteo.rain.train
# - test: meteo.rain.test
# Predicción continua:
# - Todos los días (lluvia y no-lluvia):
# - entrenamiento: meteo.cont.full.train
# - test: meteo.cont.full.test
#
# - Solo días de lluvia:
# - entrenamiento: meteo.cont.rain.train
# - test: meteo.cont.rain.test
# Miro el rango de las variables
plot(1:38,apply(meteo.cont.full.train,2,mean),
xlab = "variable",
ylab = "range [a.u.]",
main = "Mean values of the dataset variables")
# Uso scale para que todas las variables tengan el mismo order de magnitud
# Dataset de entrenamiento (en el caso de knn, más bien de calibración)
meteo.cont.full.train.scale <- scale(meteo.cont.full.train, center = TRUE, scale = TRUE)
# Dataset de test
meteo.cont.full.test.scale <- scale(meteo.cont.full.test, center = TRUE, scale = TRUE)
# Miro el rango de las variables después de haber usado scale
plot(1:38,apply(meteo.cont.full.train.scale,2,mean),
xlab = "variable",
ylab = "range [a.u.]",
main = "Mean values of the dataset variables")
# Cargo las librerias necesarias
library(FNN)
library(caret)
ctrl <- trainControl(method = "cv",
number = 10)
# cambio el número de vecinos cercanos
knn <- train(y ~ .,
data = meteo.cont.full.train,
method = "knn",
preProcess = c("center", "scale"),
trControl = ctrl,
tuneGrid = expand.grid(k = 1:50))
plot(knn)
# knn.reg(train, test = NULL, y, k = 3, algorithm=c("kd_tree", "cover_tree", "brute"))
# Predigo en el dataset de test usando k = 20 vecinos cercanos
k = 20
pred.knn.opt <- knn.reg(train = scale(meteo.cont.full.train[,-1]),
test = scale(meteo.cont.full.test[,-1]),
y = meteo.cont.full.train$y,
k = k)
# Ahora lo mismo, pero con solo 1 vecino (K = 1)
k = 1
pred.knn.1 <- knn.reg(train = scale(meteo.cont.full.train[,-1]),
test = scale(meteo.cont.full.test[,-1]),
y = meteo.cont.full.train$y,
k = k)
# Pinto los valores original contra la predicción - caso k = 20
plot(pred.knn.opt$pred, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.knn.full <- sqrt(mean((meteo.cont.full.test[,'y'] - pred.knn.opt$pred)^2))
rmse.knn.full
# Correlación - mejor si es alta
corr.knn.full <- cor(meteo.cont.full.test[,'y'], pred.knn.opt$pred, method = "spearman")
corr.knn.full
# Ratio de varianzas - mejor si cerca de 1
variance.ratio.knn.full <- var(pred.knn.opt$pred) / var(meteo.cont.full.test[,'y'])
variance.ratio.knn.full
# Pinto los valores original contra la predicción - caso k = 1
plot(pred.knn.1$pred, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.knn.full.1 <- sqrt(mean((meteo.cont.full.test[,'y'] - pred.knn.1$pred)^2))
rmse.knn.full.1
# Correlación - mejor si es alta
corr.knn.full.1 <- cor(meteo.cont.full.test[,'y'], pred.knn.1$pred, method = "spearman")
corr.knn.full.1
# Ratio de varianzas - mejor si cerca de 1
variance.ratio.knn.full.1 <- var(pred.knn.1$pred) / var(meteo.cont.full.test[,'y'])
variance.ratio.knn.full.1
out.bin.test.rain.003 <- as.double(out.test.rain > -0.03)
# Matriz de confusión de test
print("Matriz de confusión de test:")
table(meteo.rain.test$rain, out.bin.test.rain.003)
# Clasificacíon GLM + árbol de regresión
pred.glm.tree <- out.bin.test.rain.003*pred.cont.rain.test
# Pinto los valores original contra la predicción
plot(pred.glm.tree, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.glm.tree <- sqrt(mean((meteo.cont.full.test[,'y'] - pred.glm.tree)^2))
rmse.glm.tree
# Correlación - mejor si es alta
corr.glm.tree <- cor(meteo.cont.full.test[,'y'], pred.glm.tree, method = "spearman")
corr.glm.tree
# Ratio de varianzas - mejor si cerca de 1
variance.glm.tree <- var(pred.glm.tree) / var(meteo.cont.full.test[,'y'])
variance.glm.tree
# Clasificacíon GLM + random forest
pred.glm.rf <- out.bin.test.rain.003*pred.rf.cont.rain.opt
# Pinto los valores original contra la predicción
plot(pred.glm.rf, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.glm.rf <- sqrt(mean((meteo.cont.full.test[,'y'] - pred.glm.rf)^2))
rmse.glm.rf
# Correlación - mejor si es alta
corr.glm.rf <- cor(meteo.cont.full.test[,'y'], pred.glm.rf, method = "spearman")
corr.glm.rf
# Ratio de varianzas - mejor si cerca de 1
variance.glm.rf <- var(pred.glm.rf) / var(meteo.cont.full.test[,'y'])
variance.glm.rf
# Clasificacíon GLM + GLM
pred.glm.glm <- out.bin.test.rain.003*out.train.rain.cont.all
# Pinto los valores original contra la predicción
plot(pred.glm.glm, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.glm.glm <- sqrt(mean((meteo.cont.full.test[,'y'] - pred.glm.glm)^2))
rmse.glm.glm
# Correlación - mejor si es alta
corr.glm.glm <- cor(meteo.cont.full.test[,'y'], pred.glm.glm, method = "spearman")
corr.glm.glm
# Ratio de varianzas - mejor si cerca de 1
variance.glm.glm <- var(pred.glm.glm) / var(meteo.cont.full.test[,'y'])
variance.glm.glm
# Clasificacíon GLM + KNN (K = 20)
pred.glm.knn.20 <- out.bin.test.rain.003*pred.knn.opt$pred
# Pinto los valores original contra la predicción
plot(pred.glm.knn.20, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.glm.knn.20 <- sqrt(mean((meteo.cont.full.test[,'y'] - pred.glm.knn.20)^2))
rmse.glm.knn.20
# Correlación - mejor si es alta
corr.glm.knn.20 <- cor(meteo.cont.full.test[,'y'], pred.glm.knn.20, method = "spearman")
corr.glm.knn.20
# Ratio de varianzas - mejor si cerca de 1
variance.glm.knn.20 <- var(pred.glm.knn.20) / var(meteo.cont.full.test[,'y'])
variance.glm.knn.20
# Clasificacíon GLM + KNN (K = 1)
pred.glm.knn.1 <- out.bin.test.rain.003*pred.knn.1$pred
# Pinto los valores original contra la predicción
plot(pred.glm.knn.1, meteo.cont.full.test[,'y'])
abline(0,1)
# RMSE - mejor si es baja
rmse.glm.knn.1 <- sqrt(mean((meteo.cont.full.test[,'y'] - pred.glm.knn.1)^2))
rmse.glm.knn.1
# Correlación - mejor si es alta
corr.glm.knn.1 <- cor(meteo.cont.full.test[,'y'], pred.glm.knn.1, method = "spearman")
corr.glm.knn.1
# Ratio de varianzas - mejor si cerca de 1
variance.glm.knn.1 <- var(pred.glm.knn.1) / var(meteo.cont.full.test[,'y'])
variance.glm.knn.1
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pca.R
\name{pca_load_plot}
\alias{pca_load_plot}
\title{Plot Loading Score from PCA}
\usage{
pca_load_plot(
pca_recipe,
component = "all",
fill = c("#b6dfe2", "#0A537D"),
nrow = NULL,
ncol = NULL,
...
)
}
\arguments{
\item{pca_recipe}{Object class recipe that already \code{step_pca}}
\item{component}{Integer vector indicate which PCs to show, the default "all" shows every components.}
\item{fill}{(character) Fill of positive & negative values}
\item{nrow}{(numeric) Number of rows in facet}
\item{ncol}{(numeric) Number of columns in facet}
\item{...}{passed to \code{geom_col}}
}
\value{
plot
}
\description{
Plot Loading Score from PCA
}
\examples{
library(magrittr)
library(lbmod)
iris \%>\%
prep_pca() \%>\%
pca_load_plot()
}
| /man/pca_load_plot.Rd | permissive | Lightbridge-KS/lbmod | R | false | true | 833 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pca.R
\name{pca_load_plot}
\alias{pca_load_plot}
\title{Plot Loading Score from PCA}
\usage{
pca_load_plot(
pca_recipe,
component = "all",
fill = c("#b6dfe2", "#0A537D"),
nrow = NULL,
ncol = NULL,
...
)
}
\arguments{
\item{pca_recipe}{Object class recipe that already \code{step_pca}}
\item{component}{Integer vector indicate which PCs to show, the default "all" shows every components.}
\item{fill}{(character) Fill of positive & negative values}
\item{nrow}{(numeric) Number of rows in facet}
\item{ncol}{(numeric) Number of columns in facet}
\item{...}{passed to \code{geom_col}}
}
\value{
plot
}
\description{
Plot Loading Score from PCA
}
\examples{
library(magrittr)
library(lbmod)
iris \%>\%
prep_pca() \%>\%
pca_load_plot()
}
|
library(forecast)
library(tseries)
library(rugarch)
library(xts)
# Using the EuStockMarkets datasets
# plot the series
Start = c(1991, 130)
End = c(1998, 169)
Frequency = 260
# dates <- seq(as.Date("2016-01-01"), length = 5, by = "days")
# bday <- as.POSIXct("1899-05-08")
# dates <- as.Date("2016-01-01") + 0:4
# Convert to zoo object
eu <- zoo(EuStockMarkets)
autoplot(eu, facets = FALSE)
autoplot(eu, facets = TRUE)
autoplot(eu)
index(eu)
# convert to xts
# dates <- seq(as.Date("1991-05-10"), length = 1860, by = "days")
dates <- as.Date("1991-05-10") + 1:1860
eux <- xts(eu, order.by = dates)
plot(eux[, "CAC"], main = "CAC Index")
plot(eux, main = "European Index")
plot(eux, multi.panel = TRUE, main = "European Index")
# fit the model with xreg, multivariate ARIMA
# auto.arima(eu[, "DAX"], order = c(0,0,5), xreg = eu[, c(2:4)])
model1 <- Arima(eu[, "DAX"], order = c(0,0,5), xreg = eu[, c(2:4)])
| /timeSeries/dynamic_forecast.R | no_license | jocoder22/R_DataScience | R | false | false | 924 | r | library(forecast)
library(tseries)
library(rugarch)
library(xts)
# Using the EuStockMarkets datasets
# plot the series
Start = c(1991, 130)
End = c(1998, 169)
Frequency = 260
# dates <- seq(as.Date("2016-01-01"), length = 5, by = "days")
# bday <- as.POSIXct("1899-05-08")
# dates <- as.Date("2016-01-01") + 0:4
# Convert to zoo object
eu <- zoo(EuStockMarkets)
autoplot(eu, facets = FALSE)
autoplot(eu, facets = TRUE)
autoplot(eu)
index(eu)
# convert to xts
# dates <- seq(as.Date("1991-05-10"), length = 1860, by = "days")
dates <- as.Date("1991-05-10") + 1:1860
eux <- xts(eu, order.by = dates)
plot(eux[, "CAC"], main = "CAC Index")
plot(eux, main = "European Index")
plot(eux, multi.panel = TRUE, main = "European Index")
# fit the model with xreg, multivariate ARIMA
# auto.arima(eu[, "DAX"], order = c(0,0,5), xreg = eu[, c(2:4)])
model1 <- Arima(eu[, "DAX"], order = c(0,0,5), xreg = eu[, c(2:4)])
|
###############################################################################
###############################################################################
#### ####
#### 完成日期: 2018-07-19 ####
#### 作者:Roddy Hung ####
#### 版本:V4.10 ####
#### ####
#### 第4章範例程式: ####
#### 1.基礎繪圖(圖形)文法的概念 ####
#### 2.美學映射的概念 ####
#### i.基本變數映射設定 ####
#### ii.變數映射到fill、color或形狀等的概念 ####
#### iii.表格轉換成聚集的形式 ####
#### iv.複式長條圖 ####
#### 3.長條圖 ####
#### i.堆疊長條圖(引數position的使用) ####
#### ii.顏色設定 ####
#### iii.外框大小和長條圖寬度的設定 ####
#### 4.直方圖 ####
#### i.統計轉換的概念 ####
#### 5.線圖 ####
#### 6.散佈圖 ####
#### 7.Facets ####
#### 8.Scale ####
#### 9.文字輔助標示與版面配置 ####
#### 10.座標軸設定 ####
#### ####
###############################################################################
###############################################################################
source("common/check_package.R")#檢查是否有未安裝的套件
source("common/function.R",encoding="utf-8") #將公用自訂函數載起來
###############################################################################
#### ####
#### 載入套件相關使用函數參考: ####
#### readr: read_csv ####
#### dplyr: filter,select,rename,mutate ####
#### ggplot2: 略 ####
#### tidyr: gather,spread ####
#### lubridate: make_date ####
#### ####
###############################################################################
library(readr)
library(dplyr)
library(ggplot2)
library(tidyr)
library(lubridate)
############################################檔案載入與設定################################################
ch4sample.exp1_path="ch4/sample_data/最近一年內曾因家庭緣故影響工作之情形-按無法加班或無法延長工時分(年齡).csv"
ch4sample.exp1_1_path="ch4/sample_data/最近一年內曾因家庭緣故影響工作之情形-按中斷工作或上班時臨時趕回家分(年齡).csv"
ch4sample.exp2_path="ch4/sample_data/成績單.csv"
ch4sample.exp3_path="ch4/sample_data/消費者物價基本分類暨項目群指數.csv"
ch4sample.exp4_path="ch4/sample_data/台灣太陽光電發電量統計表.csv"
ch4sample.exp1<-read_csv(ch4sample.exp1_path,col_names = TRUE)
ch4sample.exp1_1<-read_csv(ch4sample.exp1_1_path,col_names=TRUE)
ch4sample.exp2<-read_csv(ch4sample.exp2_path,col_names=TRUE)
ch4sample.exp3<-read_csv(ch4sample.exp3_path,col_names=TRUE)
ch4sample.exp4<-read_csv(ch4sample.exp4_path,col_names=TRUE)
############################ aesthetic mapping ################################
ggplot(ch4sample.exp1,aes(x=項目別,y=經常)) + geom_bar(stat="identity")
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,fill=項目別)) + geom_bar(stat="identity")
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,fill=有時)) + geom_bar(stat="identity")
############################## bar graphic ####################################
freq_tag<-c("經常","有時","極少","從不")
ch4sample.exp1_gatable<-gather(ch4sample.exp1,freq_tag,key="頻率",value="人數") #將表格轉成聚集形式
ch4sample.exp1_gatable<-rename(ch4sample.exp1_gatable, "年齡區間"="項目別")
ggplot(ch4sample.exp1_gatable,aes(x=年齡區間,y=人數,fill=頻率)) +
geom_bar(position="dodge",stat="identity")
ggplot(ch4sample.exp1_gatable,aes(x=頻率,y=人數,fill=年齡區間)) +
geom_bar(position="dodge",stat="identity")
ggplot(ch4sample.exp1_gatable,aes(x=年齡區間,y=人數,fill=頻率)) +
geom_bar(position="dodge",stat="identity") +
guides(fill=guide_legend(reverse = TRUE)) #改變legend(頻率)順序
#ch4sample.exp1_gatable$頻率<-factor(ch4sample.exp1_gatable$頻率,levels=freq_tag)#改變lengend(頻率)順序另一個方法
############################# fill & colour ###################################
exp1.fill<-ggplot(ch4sample.exp1_gatable,aes(x=年齡區間,y=人數,fill=頻率))
exp1.colour<-ggplot(ch4sample.exp1_gatable,aes(x=年齡區間,y=人數,colour=頻率))
exp1.fill + geom_bar(stat="identity",position="stack")#position="stack"
exp1.fill + geom_bar(stat="identity")#default position="stack"
exp1.fill + geom_bar(stat="identity",position="fill")#position="fill"
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,colour=項目別)) +
geom_bar(stat="identity",fill="white")
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,colour=有時)) +
geom_bar(stat="identity",fill="white")
exp1.colour + geom_bar(stat="identity", position="dodge", fill="white")
exp1.colour + geom_bar(stat="identity", position="dodge", fill="white", size=2)
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,colour=有時)) +
geom_bar(stat="identity",fill="white",width=2)
########################### Histogram #########################################
subject_tag<-c("國文","數學","歷史","地理")
ch4sample.exp2_gatable<-gather(ch4sample.exp2,subject_tag,key="科目",value="分數") #將表格轉成聚集形式
scope.his<-ggplot(ch4sample.exp2,aes(x=國文))
stat_trans_1<-ggplot(ch4sample.exp2,aes(x=國文))
stat_trans_2<-ggplot(ch4sample.exp2_gatable,aes(x=分數,fill=科目))
scope.his + geom_histogram(fill="white", colour="black")
scope.his + geom_histogram(fill="white", colour="black", binwidth=10)
ggplot(ch4sample.exp2_gatable,aes(x=分數,fill=科目)) +
geom_histogram(position="identity", binwidth=15, alpha=0.3)
stat_trans_1 + geom_bar(stat="bin", bins=5, fill="white", colour="black")
stat_trans_1 + stat_bin(geom="bar", bins=5, fill="white", colour="black")
stat_trans_2 + geom_bar(stat="bin", bins=8, position="identity", alpha=0.3)
stat_trans_2 + stat_bin(geom="bar", bins=8, position="identity", alpha=0.3)
######################### Line Graphs #########################################
item.gather<-c("年月","總指數","米類及其製品","肉類","蔬菜")
item.seq<-c("總指數","米類及其製品","肉類","蔬菜")
#利用套件lubridate中的函數make_data可以將年月日合併成為日期的資料型態
ch4sample.exp3<-mutate(ch4sample.exp3, "年月" = make_date(年,月))
ggplot(ch4sample.exp3,aes(x=年月,y=總指數))+geom_line()
ch4sample.exp3_gatable<-select(ch4sample.exp3, item.gather)
ch4sample.exp3_gatable<-gather(ch4sample.exp3_gatable,item.seq,key="品項",value="年增率") #將表格轉成聚集形式
ggplot(ch4sample.exp3_gatable,aes(x=年月,y=年增率,colour=品項)) +
geom_line() +
guides(colour=guide_legend(reverse = TRUE)) #改變legend(品項)順序
#ch4sample.exp3_gatable$品項<-factor(ch4sample.exp3_gatable$品項,levels=item.seq)#改變legend(品項)順序另一個方法
ggplot(ch4sample.exp3_gatable,aes(x=年月,y=年增率,colour=品項,linetype=品項)) +
geom_line() +
guides(colour=guide_legend(reverse = TRUE), linetype=guide_legend(reverse = TRUE))
ggplot(ch4sample.exp3_gatable,aes(x=年月,y=年增率,group=品項)) +
geom_line(colour="blue",linetype="dashed")
############################# Scatter Graphs ######################################
ch4sample.exp4<-mutate(ch4sample.exp4, "年月" = make_date(年度,月份))
#將表格轉成分散形式來表示"平均單位裝置容量每日發電量"與"各光電站的關係"
ch4sample.exp4_sprtable.perday<-spread(select(ch4sample.exp4, "年月", "光電站名稱", "平均單位裝置容量每日發電量"),key="光電站名稱", value="平均單位裝置容量每日發電量")
#將表格轉成分散形式來表示"發電量(度)"與"各光電站的關係"
ch4sample.exp4_sprtable.total<-spread(select(ch4sample.exp4, "年月", "光電站名稱", "發電量(度)"),key="光電站名稱", value="發電量(度)")
ch4sample.exp4_ponhu<-filter(ch4sample.exp4, 光電站名稱== "澎湖光電")
ggplot(ch4sample.exp4_ponhu,aes(x=年月, y=平均單位裝置容量每日發電量)) +
geom_point()#使用filter來將表格抽離
ggplot(ch4sample.exp4_sprtable.perday,aes(x=年月, y=澎湖光電)) +
geom_point()#使用分散表示法,這個的優點是可以在美學映射中較好知道程式要講甚麼
ggplot(ch4sample.exp4_ponhu,aes(x=年月, y=`發電量(度)`)) +
geom_point()#使用filter來將表格抽離
ggplot(ch4sample.exp4_sprtable.total,aes(x=年月, y=澎湖光電)) +
geom_point()#使用分散表示法
###################################################################################
ggplot(ch4sample.exp4,aes(x=年月, y=平均單位裝置容量每日發電量, colour=光電站名稱)) +
geom_point()
ch4sample.exp4$年月<-factor(ch4sample.exp4$年月)
ggplot(ch4sample.exp4,aes(x=年月, y=平均單位裝置容量每日發電量, colour=光電站名稱,group=1)) +
geom_point()
ggplot(ch4sample.exp4,aes(x=年月, y=平均單位裝置容量每日發電量, colour=光電站名稱, shape=光電站名稱)) +
geom_point()
###################################################################################
ggplot(ch4sample.exp4_sprtable.perday, aes(x=年月, y=澎湖光電)) +
geom_line() +
geom_point(size=4,shape=22,fill="pink")
ggplot(ch4sample.exp4_sprtable.perday) +
geom_line(aes(x=年月, y=澎湖光電))+
geom_line(aes(x=年月, y=七美光電)) +
geom_point(aes(x=年月, y=澎湖光電)) +
geom_point(aes(x=年月, y=七美光電),size=4,shape=22,fill="pink")
ggplot(ch4sample.exp4_sprtable.perday, aes(x=年月, y=澎湖光電)) +
geom_line()+
geom_line(aes(x=年月, y=七美光電)) +
geom_point() +
geom_point(aes(x=年月, y=七美光電),size=4,shape=22,fill="pink")
################################## Facets #####################################
#橫軸顯示
ggplot(ch4sample.exp2_gatable,aes(x=學號,y=分數,colour=科目)) +
geom_point() +
facet_grid(科目~.)
#縱軸顯示
ggplot(ch4sample.exp2_gatable,aes(x=學號,y=分數,colour=科目)) +
geom_point() +
facet_grid(.~科目)
#非aesthetic映射的變數
ggplot(ch4sample.exp2_gatable,aes(x=學號,y=分數,colour=科目)) +
geom_point() +
facet_grid(.~性別)
#雙變數
ggplot(ch4sample.exp2_gatable,aes(x=學號,y=分數,colour=科目)) +
geom_point() +
facet_grid(班級~性別)
#雙變數以上
ggplot(ch4sample.exp2_gatable,aes(x=學號,y=分數,colour=科目)) +
geom_point() +
facet_grid(班級+性別~科目)
#grid & wrap的比較
ggplot(ch4sample.exp4,aes(x=光電站名稱, y=平均單位裝置容量每日發電量, colour=光電站名稱)) +
geom_point() +
geom_line() +
facet_grid(.~月份) +
scale_x_discrete(labels=NULL, breaks=NULL) +
scale_y_continuous(breaks=seq(0.5,5.2,0.5))
ggplot(ch4sample.exp4,aes(x=光電站名稱, y=平均單位裝置容量每日發電量, colour=光電站名稱)) +
geom_point() +
geom_line() +
facet_wrap(~月份) +
scale_x_discrete(labels=NULL, breaks=NULL) +
scale_y_continuous(breaks=seq(0.5,5.5,0.5))
#使用wrap更改row和column
ggplot(ch4sample.exp4,aes(x=光電站名稱, y=平均單位裝置容量每日發電量, colour=光電站名稱)) +
geom_point() +
geom_line() +
facet_wrap(~月份,nrow=4) +
scale_x_discrete(labels=NULL, breaks=NULL) +
scale_y_continuous(breaks=seq(0.5,5.5,0.5))
ggplot(ch4sample.exp4,aes(x=光電站名稱, y=平均單位裝置容量每日發電量, colour=光電站名稱)) +
geom_point() +
geom_line() +
facet_wrap(~月份,ncol=5,nrow=3) +
scale_x_discrete(labels=NULL, breaks=NULL) +
scale_y_continuous(breaks=seq(0.5,5.5,0.5))
ch4sample.exp2_matrix<-m2gg.scope(ch4sample.exp2) #轉換表格符合散佈矩矩陣圖
#改變排列順序為"國文","數學","歷史","地理"
ch4sample.exp2_matrix$科目1<-factor(ch4sample.exp2_matrix$科目1,levels=c("國文","數學","歷史","地理"))
ch4sample.exp2_matrix$科目2<-factor(ch4sample.exp2_matrix$科目2,levels=c("國文","數學","歷史","地理"))
ggplot(ch4sample.exp2_matrix,aes(x=分數1,y=分數2)) +
geom_point() +
facet_grid(科目1~科目2)
ggplot(ch4sample.exp2_matrix,aes(x=分數1,y=性別,colour=班級)) +
geom_point() +
facet_grid(科目1~科目2)
ggplot(ch4sample.exp2_matrix,aes(x=分數1,y=性別,colour=班級)) +
geom_point() +
facet_grid(科目1~科目2+性別)
ggplot(ch4sample.exp2_matrix,aes(x=分數1,y=分數2,colour=性別)) +
geom_point() +
facet_wrap(科目1~科目2)
ggplot(ch4sample.exp2_matrix,aes(x=分數1,y=分數2,colour=性別)) +
geom_point() +
facet_wrap(性別~科目2)
################################## Scales #####################################
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,fill=項目別)) +
geom_bar(stat="identity") #離散變數
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,fill=有時)) +
geom_bar(stat="identity") #連續變數
ggplot(ch4sample.exp4,aes(x=年月, y=平均單位裝置容量每日發電量, colour=光電站名稱)) +
geom_point() +
scale_colour_brewer(palette = "Set3")
#gradient是連續變數,所以使用在離散變數上會出現錯誤
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,fill=項目別)) +
geom_bar(stat="identity") +
scale_fill_gradient(low="#00ff00", high="#ff99cc")
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,fill=有時)) +
geom_bar(stat="identity") +
scale_fill_gradient(low="#00ff00", high="#ff99cc")
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,fill=項目別,colour=項目別)) +
geom_bar(stat="identity") +
scale_fill_hue(h=c(0,360),c=50,l=80)
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,fill=項目別)) +
geom_bar(stat="identity") +
scale_fill_grey(start=0.2,end=0.8)
ggplot(ch4sample.exp4,aes(x=光電站名稱, y=平均單位裝置容量每日發電量, colour=光電站名稱)) +
geom_point() +
scale_colour_hue(breaks=c("七美光電","澎湖光電","金門光電")) +
scale_y_continuous(limits=c(2,2.5),breaks=seq(2,2.5,0.01))+
scale_x_discrete(limits=c("七美光電","澎湖光電","金門光電"))
ggplot(ch4sample.exp4,aes(x=光電站名稱, y=平均單位裝置容量每日發電量, colour=光電站名稱)) +
geom_point() +
scale_colour_hue(h=c(0,360),c=50,l=80,breaks=c("七美光電","澎湖光電","金門光電")) +
scale_x_discrete(limits=c("七美光電","澎湖光電","金門光電"), breaks=c("七美光電","澎湖光電"))
#反轉y軸
ggplot(ch4sample.exp3_gatable,aes(x=年月,y=年增率,colour=品項)) +
geom_line() +
scale_y_reverse()
#y軸以log10為刻度
ggplot(ch4sample.exp1_gatable,aes(x=年齡區間,y=人數,fill=頻率)) +
geom_bar(position="dodge",stat="identity") +
guides(colour=guide_legend(reverse = TRUE)) +
scale_y_log10()
ggplot(ch4sample.exp3_gatable,aes(x=年月,y=年增率,colour=品項,linetype=品項)) +
geom_line() +
guides(colour=guide_legend(reverse = TRUE),
linetype=guide_legend(reverse = TRUE)) +
scale_linetype_manual(values=c(5:8))
ggplot(ch4sample.exp1_gatable,aes(x=年齡區間,y=人數,fill=頻率)) +
geom_bar(position="dodge",stat="identity") +
scale_fill_manual(values=c("magenta2","#7e9f68","chocolate4","cyan2"))
ggplot(ch4sample.exp4,aes(x=年月, y=平均單位裝置容量每日發電量, colour=光電站名稱, shape=光電站名稱)) +
geom_point() +
scale_shape_manual(values=c(0:15))
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,fill=有時)) +
geom_bar(stat="identity") +
scale_fill_gradient(name="統計人數",labels=seq(0,350,30),
breaks=seq(0,350,30),low="#00ff00", high="#ff99cc")
#缺失值設定
item.gather.traffic<-c("年月","總指數","交通工具","油料費","汽車","機車")
item.seq.traffic<-c("總指數","交通工具","油料費","汽車","機車")
ch4sample.exp3_gatable.traffic<-select(ch4sample.exp3, item.gather.traffic)
ch4sample.exp3_gatable.traffic<-gather(ch4sample.exp3_gatable.traffic,item.seq.traffic,key="品項",value="年增率") #將表格轉成聚集形式
ch4sample.exp3_gatable.traffic$品項<-factor(ch4sample.exp3_gatable.traffic$品項,levels=item.seq.traffic)
ggplot(ch4sample.exp3_gatable.traffic,aes(x=年月,y=年增率,colour=品項)) +
geom_line() +
scale_x_date(limits=c(as.Date("2010-01-01","%Y-%m-%d"),as.Date("2018-01-01","%Y-%m-%d"))) +
scale_y_continuous(na.value = 0,limits=c(-10,10))
############################# Annotation & Layout #############################
ggplot(ch4sample.exp3,aes(x=年月,y=總指數)) +
geom_line(colour="#ec8092") +
labs(
title = "消費者物價基本分類暨項目群指數",
subtitle = "總指數",
y="年增率",
caption = "主計處"
)
data_we_want<-filter(group_by(ch4sample.exp4,光電站名稱),
(光電站名稱=="七美光電")|(光電站名稱=="澎湖光電")|(光電站名稱=="金門光電"))
ggplot(ch4sample.exp4,aes(x=年月, y=平均單位裝置容量每日發電量)) +
geom_point(aes(colour=光電站名稱),data=data_we_want) +
geom_text(aes(label=光電站名稱,colour=光電站名稱), data=data_we_want, show.legend=F)
ggplot(ch4sample.exp4,aes(x=年月, y=平均單位裝置容量每日發電量)) +
geom_point(aes(colour=光電站名稱)) +
geom_text(aes(label=光電站名稱,colour=光電站名稱),
data=data_we_want, show.legend=F, nudge_y=0.1) +
scale_colour_hue(limits=c("七美光電","澎湖光電","金門光電"),
breaks=c("七美光電","澎湖光電","金門光電"))
ggplot(ch4sample.exp4,aes(x=年月, y=平均單位裝置容量每日發電量)) +
geom_point(aes(colour=光電站名稱),data=data_we_want) +
geom_label(aes(label=光電站名稱,colour=光電站名稱),
data=data_we_want,show.legend=F,nudge_y=0.1,alpha=0.5)
ggplot(ch4sample.exp3_gatable,aes(x=年月,y=年增率,colour=品項)) +
geom_line() +
guides(colour=guide_legend(reverse = TRUE)) +
annotate(
"rect", xmin=as.Date("2010-01-01","%Y-%m-%d"),
xmax=as.Date("2016-12-01","%Y-%m-%d"),
ymin=-50, ymax=85, fill="#cc00ff", alpha=0.3
) +
annotate(
"segment", x=as.Date("2008-05-01","%Y-%m-%d"),
xend=as.Date("2016-02-01","%Y-%m-%d"),
y=90, yend=80, color="#006600",
alpha=0.8, size=1.2, arrow=arrow()
)
ggplot(ch4sample.exp1_gatable,aes(x=年齡區間,y=人數,fill=頻率)) +
geom_bar(position="dodge",stat="identity") +
guides(colour=guide_legend(reverse = TRUE)) +
theme(
legend.position = "top",
legend.background = element_rect(colour="green",size=1.2),
legend.text = element_text(colour="blue"),
panel.grid.major = element_line(colour="red"),
panel.grid.minor = element_line(colour="red",
linetype="dashed", size=0.3)
)
ggplot(ch4sample.exp2_matrix,aes(x=分數1,y=分數2)) +
geom_point() +
facet_grid(科目1~科目2) +
labs(title="三班成績散佈矩陣圖") +
theme(
strip.background = element_rect(fill="pink2"),
strip.text = element_text(size=14,face="bold"),
strip.text.y = element_text(angle=0),
plot.title = element_text(color="#cc3300",
size=22, hjust=0.5)
)
############################# Coordinate System ###############################
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,fill=項目別)) +
geom_bar(stat="identity") +
coord_flip()
ggplot(ch4sample.exp4,aes(x=光電站名稱, y=平均單位裝置容量每日發電量, fill=光電站名稱)) +
geom_boxplot() +
guides(fill=guide_legend(reverse = TRUE)) +
coord_flip()
ggplot(ch4sample.exp3_gatable,aes(x=品項,y=年增率,colour=品項)) +
geom_boxplot() +
guides(colour=guide_legend(reverse = TRUE)) +
coord_flip()
#改為極座標
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,fill=項目別)) +
geom_bar(stat="identity",width=1) +
coord_polar(theta = "x")
#改為圓餅圖
y.breaks <- cumsum(rev(ch4sample.exp1$有時)) - rev(ch4sample.exp1$有時)/2
y.labels <- paste(scales::percent(rev(ch4sample.exp1$有時)/sum(rev(ch4sample.exp1$有時))))
ggplot(ch4sample.exp1,aes(x=1,y=有時,fill=項目別)) +
geom_bar(stat="identity") +
coord_polar(theta = "y") +
scale_y_continuous(breaks=y.breaks,labels=y.labels)
#心臟線
x<-seq(0,2*pi,2*pi/1000)
heart_line<-tibble(
theta = x,
cardioid = 2*(0.25-cos(theta))
)
ggplot(heart_line,aes(x=theta,y=cardioid)) +
geom_line() +
coord_polar()
#玫瑰線
rose_line<-tibble(
theta = x,
rose = cos(9*theta)
)
ggplot(rose_line,aes(x=theta,y=rose)) +
geom_line() +
coord_polar()
| /ch4/ch4.R | no_license | ifferent/r_programming | R | false | false | 23,493 | r | ###############################################################################
###############################################################################
#### ####
#### 完成日期: 2018-07-19 ####
#### 作者:Roddy Hung ####
#### 版本:V4.10 ####
#### ####
#### 第4章範例程式: ####
#### 1.基礎繪圖(圖形)文法的概念 ####
#### 2.美學映射的概念 ####
#### i.基本變數映射設定 ####
#### ii.變數映射到fill、color或形狀等的概念 ####
#### iii.表格轉換成聚集的形式 ####
#### iv.複式長條圖 ####
#### 3.長條圖 ####
#### i.堆疊長條圖(引數position的使用) ####
#### ii.顏色設定 ####
#### iii.外框大小和長條圖寬度的設定 ####
#### 4.直方圖 ####
#### i.統計轉換的概念 ####
#### 5.線圖 ####
#### 6.散佈圖 ####
#### 7.Facets ####
#### 8.Scale ####
#### 9.文字輔助標示與版面配置 ####
#### 10.座標軸設定 ####
#### ####
###############################################################################
###############################################################################
source("common/check_package.R")#檢查是否有未安裝的套件
source("common/function.R",encoding="utf-8") #將公用自訂函數載起來
###############################################################################
#### ####
#### 載入套件相關使用函數參考: ####
#### readr: read_csv ####
#### dplyr: filter,select,rename,mutate ####
#### ggplot2: 略 ####
#### tidyr: gather,spread ####
#### lubridate: make_date ####
#### ####
###############################################################################
library(readr)
library(dplyr)
library(ggplot2)
library(tidyr)
library(lubridate)
############################################檔案載入與設定################################################
ch4sample.exp1_path="ch4/sample_data/最近一年內曾因家庭緣故影響工作之情形-按無法加班或無法延長工時分(年齡).csv"
ch4sample.exp1_1_path="ch4/sample_data/最近一年內曾因家庭緣故影響工作之情形-按中斷工作或上班時臨時趕回家分(年齡).csv"
ch4sample.exp2_path="ch4/sample_data/成績單.csv"
ch4sample.exp3_path="ch4/sample_data/消費者物價基本分類暨項目群指數.csv"
ch4sample.exp4_path="ch4/sample_data/台灣太陽光電發電量統計表.csv"
ch4sample.exp1<-read_csv(ch4sample.exp1_path,col_names = TRUE)
ch4sample.exp1_1<-read_csv(ch4sample.exp1_1_path,col_names=TRUE)
ch4sample.exp2<-read_csv(ch4sample.exp2_path,col_names=TRUE)
ch4sample.exp3<-read_csv(ch4sample.exp3_path,col_names=TRUE)
ch4sample.exp4<-read_csv(ch4sample.exp4_path,col_names=TRUE)
############################ aesthetic mapping ################################
ggplot(ch4sample.exp1,aes(x=項目別,y=經常)) + geom_bar(stat="identity")
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,fill=項目別)) + geom_bar(stat="identity")
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,fill=有時)) + geom_bar(stat="identity")
############################## bar graphic ####################################
freq_tag<-c("經常","有時","極少","從不")
ch4sample.exp1_gatable<-gather(ch4sample.exp1,freq_tag,key="頻率",value="人數") #將表格轉成聚集形式
ch4sample.exp1_gatable<-rename(ch4sample.exp1_gatable, "年齡區間"="項目別")
ggplot(ch4sample.exp1_gatable,aes(x=年齡區間,y=人數,fill=頻率)) +
geom_bar(position="dodge",stat="identity")
ggplot(ch4sample.exp1_gatable,aes(x=頻率,y=人數,fill=年齡區間)) +
geom_bar(position="dodge",stat="identity")
ggplot(ch4sample.exp1_gatable,aes(x=年齡區間,y=人數,fill=頻率)) +
geom_bar(position="dodge",stat="identity") +
guides(fill=guide_legend(reverse = TRUE)) #改變legend(頻率)順序
#ch4sample.exp1_gatable$頻率<-factor(ch4sample.exp1_gatable$頻率,levels=freq_tag)#改變lengend(頻率)順序另一個方法
############################# fill & colour ###################################
exp1.fill<-ggplot(ch4sample.exp1_gatable,aes(x=年齡區間,y=人數,fill=頻率))
exp1.colour<-ggplot(ch4sample.exp1_gatable,aes(x=年齡區間,y=人數,colour=頻率))
exp1.fill + geom_bar(stat="identity",position="stack")#position="stack"
exp1.fill + geom_bar(stat="identity")#default position="stack"
exp1.fill + geom_bar(stat="identity",position="fill")#position="fill"
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,colour=項目別)) +
geom_bar(stat="identity",fill="white")
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,colour=有時)) +
geom_bar(stat="identity",fill="white")
exp1.colour + geom_bar(stat="identity", position="dodge", fill="white")
exp1.colour + geom_bar(stat="identity", position="dodge", fill="white", size=2)
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,colour=有時)) +
geom_bar(stat="identity",fill="white",width=2)
########################### Histogram #########################################
subject_tag<-c("國文","數學","歷史","地理")
ch4sample.exp2_gatable<-gather(ch4sample.exp2,subject_tag,key="科目",value="分數") #將表格轉成聚集形式
scope.his<-ggplot(ch4sample.exp2,aes(x=國文))
stat_trans_1<-ggplot(ch4sample.exp2,aes(x=國文))
stat_trans_2<-ggplot(ch4sample.exp2_gatable,aes(x=分數,fill=科目))
scope.his + geom_histogram(fill="white", colour="black")
scope.his + geom_histogram(fill="white", colour="black", binwidth=10)
ggplot(ch4sample.exp2_gatable,aes(x=分數,fill=科目)) +
geom_histogram(position="identity", binwidth=15, alpha=0.3)
stat_trans_1 + geom_bar(stat="bin", bins=5, fill="white", colour="black")
stat_trans_1 + stat_bin(geom="bar", bins=5, fill="white", colour="black")
stat_trans_2 + geom_bar(stat="bin", bins=8, position="identity", alpha=0.3)
stat_trans_2 + stat_bin(geom="bar", bins=8, position="identity", alpha=0.3)
######################### Line Graphs #########################################
item.gather<-c("年月","總指數","米類及其製品","肉類","蔬菜")
item.seq<-c("總指數","米類及其製品","肉類","蔬菜")
#利用套件lubridate中的函數make_data可以將年月日合併成為日期的資料型態
ch4sample.exp3<-mutate(ch4sample.exp3, "年月" = make_date(年,月))
ggplot(ch4sample.exp3,aes(x=年月,y=總指數))+geom_line()
ch4sample.exp3_gatable<-select(ch4sample.exp3, item.gather)
ch4sample.exp3_gatable<-gather(ch4sample.exp3_gatable,item.seq,key="品項",value="年增率") #將表格轉成聚集形式
ggplot(ch4sample.exp3_gatable,aes(x=年月,y=年增率,colour=品項)) +
geom_line() +
guides(colour=guide_legend(reverse = TRUE)) #改變legend(品項)順序
#ch4sample.exp3_gatable$品項<-factor(ch4sample.exp3_gatable$品項,levels=item.seq)#改變legend(品項)順序另一個方法
ggplot(ch4sample.exp3_gatable,aes(x=年月,y=年增率,colour=品項,linetype=品項)) +
geom_line() +
guides(colour=guide_legend(reverse = TRUE), linetype=guide_legend(reverse = TRUE))
ggplot(ch4sample.exp3_gatable,aes(x=年月,y=年增率,group=品項)) +
geom_line(colour="blue",linetype="dashed")
############################# Scatter Graphs ######################################
ch4sample.exp4<-mutate(ch4sample.exp4, "年月" = make_date(年度,月份))
#將表格轉成分散形式來表示"平均單位裝置容量每日發電量"與"各光電站的關係"
ch4sample.exp4_sprtable.perday<-spread(select(ch4sample.exp4, "年月", "光電站名稱", "平均單位裝置容量每日發電量"),key="光電站名稱", value="平均單位裝置容量每日發電量")
#將表格轉成分散形式來表示"發電量(度)"與"各光電站的關係"
ch4sample.exp4_sprtable.total<-spread(select(ch4sample.exp4, "年月", "光電站名稱", "發電量(度)"),key="光電站名稱", value="發電量(度)")
ch4sample.exp4_ponhu<-filter(ch4sample.exp4, 光電站名稱== "澎湖光電")
ggplot(ch4sample.exp4_ponhu,aes(x=年月, y=平均單位裝置容量每日發電量)) +
geom_point()#使用filter來將表格抽離
ggplot(ch4sample.exp4_sprtable.perday,aes(x=年月, y=澎湖光電)) +
geom_point()#使用分散表示法,這個的優點是可以在美學映射中較好知道程式要講甚麼
ggplot(ch4sample.exp4_ponhu,aes(x=年月, y=`發電量(度)`)) +
geom_point()#使用filter來將表格抽離
ggplot(ch4sample.exp4_sprtable.total,aes(x=年月, y=澎湖光電)) +
geom_point()#使用分散表示法
###################################################################################
ggplot(ch4sample.exp4,aes(x=年月, y=平均單位裝置容量每日發電量, colour=光電站名稱)) +
geom_point()
ch4sample.exp4$年月<-factor(ch4sample.exp4$年月)
ggplot(ch4sample.exp4,aes(x=年月, y=平均單位裝置容量每日發電量, colour=光電站名稱,group=1)) +
geom_point()
ggplot(ch4sample.exp4,aes(x=年月, y=平均單位裝置容量每日發電量, colour=光電站名稱, shape=光電站名稱)) +
geom_point()
###################################################################################
ggplot(ch4sample.exp4_sprtable.perday, aes(x=年月, y=澎湖光電)) +
geom_line() +
geom_point(size=4,shape=22,fill="pink")
ggplot(ch4sample.exp4_sprtable.perday) +
geom_line(aes(x=年月, y=澎湖光電))+
geom_line(aes(x=年月, y=七美光電)) +
geom_point(aes(x=年月, y=澎湖光電)) +
geom_point(aes(x=年月, y=七美光電),size=4,shape=22,fill="pink")
ggplot(ch4sample.exp4_sprtable.perday, aes(x=年月, y=澎湖光電)) +
geom_line()+
geom_line(aes(x=年月, y=七美光電)) +
geom_point() +
geom_point(aes(x=年月, y=七美光電),size=4,shape=22,fill="pink")
################################## Facets #####################################
#橫軸顯示
ggplot(ch4sample.exp2_gatable,aes(x=學號,y=分數,colour=科目)) +
geom_point() +
facet_grid(科目~.)
#縱軸顯示
ggplot(ch4sample.exp2_gatable,aes(x=學號,y=分數,colour=科目)) +
geom_point() +
facet_grid(.~科目)
#非aesthetic映射的變數
ggplot(ch4sample.exp2_gatable,aes(x=學號,y=分數,colour=科目)) +
geom_point() +
facet_grid(.~性別)
#雙變數
ggplot(ch4sample.exp2_gatable,aes(x=學號,y=分數,colour=科目)) +
geom_point() +
facet_grid(班級~性別)
#雙變數以上
ggplot(ch4sample.exp2_gatable,aes(x=學號,y=分數,colour=科目)) +
geom_point() +
facet_grid(班級+性別~科目)
#grid & wrap的比較
ggplot(ch4sample.exp4,aes(x=光電站名稱, y=平均單位裝置容量每日發電量, colour=光電站名稱)) +
geom_point() +
geom_line() +
facet_grid(.~月份) +
scale_x_discrete(labels=NULL, breaks=NULL) +
scale_y_continuous(breaks=seq(0.5,5.2,0.5))
ggplot(ch4sample.exp4,aes(x=光電站名稱, y=平均單位裝置容量每日發電量, colour=光電站名稱)) +
geom_point() +
geom_line() +
facet_wrap(~月份) +
scale_x_discrete(labels=NULL, breaks=NULL) +
scale_y_continuous(breaks=seq(0.5,5.5,0.5))
#使用wrap更改row和column
ggplot(ch4sample.exp4,aes(x=光電站名稱, y=平均單位裝置容量每日發電量, colour=光電站名稱)) +
geom_point() +
geom_line() +
facet_wrap(~月份,nrow=4) +
scale_x_discrete(labels=NULL, breaks=NULL) +
scale_y_continuous(breaks=seq(0.5,5.5,0.5))
ggplot(ch4sample.exp4,aes(x=光電站名稱, y=平均單位裝置容量每日發電量, colour=光電站名稱)) +
geom_point() +
geom_line() +
facet_wrap(~月份,ncol=5,nrow=3) +
scale_x_discrete(labels=NULL, breaks=NULL) +
scale_y_continuous(breaks=seq(0.5,5.5,0.5))
ch4sample.exp2_matrix<-m2gg.scope(ch4sample.exp2) #轉換表格符合散佈矩矩陣圖
#改變排列順序為"國文","數學","歷史","地理"
ch4sample.exp2_matrix$科目1<-factor(ch4sample.exp2_matrix$科目1,levels=c("國文","數學","歷史","地理"))
ch4sample.exp2_matrix$科目2<-factor(ch4sample.exp2_matrix$科目2,levels=c("國文","數學","歷史","地理"))
ggplot(ch4sample.exp2_matrix,aes(x=分數1,y=分數2)) +
geom_point() +
facet_grid(科目1~科目2)
ggplot(ch4sample.exp2_matrix,aes(x=分數1,y=性別,colour=班級)) +
geom_point() +
facet_grid(科目1~科目2)
ggplot(ch4sample.exp2_matrix,aes(x=分數1,y=性別,colour=班級)) +
geom_point() +
facet_grid(科目1~科目2+性別)
ggplot(ch4sample.exp2_matrix,aes(x=分數1,y=分數2,colour=性別)) +
geom_point() +
facet_wrap(科目1~科目2)
ggplot(ch4sample.exp2_matrix,aes(x=分數1,y=分數2,colour=性別)) +
geom_point() +
facet_wrap(性別~科目2)
################################## Scales #####################################
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,fill=項目別)) +
geom_bar(stat="identity") #離散變數
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,fill=有時)) +
geom_bar(stat="identity") #連續變數
ggplot(ch4sample.exp4,aes(x=年月, y=平均單位裝置容量每日發電量, colour=光電站名稱)) +
geom_point() +
scale_colour_brewer(palette = "Set3")
#gradient是連續變數,所以使用在離散變數上會出現錯誤
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,fill=項目別)) +
geom_bar(stat="identity") +
scale_fill_gradient(low="#00ff00", high="#ff99cc")
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,fill=有時)) +
geom_bar(stat="identity") +
scale_fill_gradient(low="#00ff00", high="#ff99cc")
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,fill=項目別,colour=項目別)) +
geom_bar(stat="identity") +
scale_fill_hue(h=c(0,360),c=50,l=80)
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,fill=項目別)) +
geom_bar(stat="identity") +
scale_fill_grey(start=0.2,end=0.8)
ggplot(ch4sample.exp4,aes(x=光電站名稱, y=平均單位裝置容量每日發電量, colour=光電站名稱)) +
geom_point() +
scale_colour_hue(breaks=c("七美光電","澎湖光電","金門光電")) +
scale_y_continuous(limits=c(2,2.5),breaks=seq(2,2.5,0.01))+
scale_x_discrete(limits=c("七美光電","澎湖光電","金門光電"))
ggplot(ch4sample.exp4,aes(x=光電站名稱, y=平均單位裝置容量每日發電量, colour=光電站名稱)) +
geom_point() +
scale_colour_hue(h=c(0,360),c=50,l=80,breaks=c("七美光電","澎湖光電","金門光電")) +
scale_x_discrete(limits=c("七美光電","澎湖光電","金門光電"), breaks=c("七美光電","澎湖光電"))
#反轉y軸
ggplot(ch4sample.exp3_gatable,aes(x=年月,y=年增率,colour=品項)) +
geom_line() +
scale_y_reverse()
#y軸以log10為刻度
ggplot(ch4sample.exp1_gatable,aes(x=年齡區間,y=人數,fill=頻率)) +
geom_bar(position="dodge",stat="identity") +
guides(colour=guide_legend(reverse = TRUE)) +
scale_y_log10()
ggplot(ch4sample.exp3_gatable,aes(x=年月,y=年增率,colour=品項,linetype=品項)) +
geom_line() +
guides(colour=guide_legend(reverse = TRUE),
linetype=guide_legend(reverse = TRUE)) +
scale_linetype_manual(values=c(5:8))
ggplot(ch4sample.exp1_gatable,aes(x=年齡區間,y=人數,fill=頻率)) +
geom_bar(position="dodge",stat="identity") +
scale_fill_manual(values=c("magenta2","#7e9f68","chocolate4","cyan2"))
ggplot(ch4sample.exp4,aes(x=年月, y=平均單位裝置容量每日發電量, colour=光電站名稱, shape=光電站名稱)) +
geom_point() +
scale_shape_manual(values=c(0:15))
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,fill=有時)) +
geom_bar(stat="identity") +
scale_fill_gradient(name="統計人數",labels=seq(0,350,30),
breaks=seq(0,350,30),low="#00ff00", high="#ff99cc")
#缺失值設定
item.gather.traffic<-c("年月","總指數","交通工具","油料費","汽車","機車")
item.seq.traffic<-c("總指數","交通工具","油料費","汽車","機車")
ch4sample.exp3_gatable.traffic<-select(ch4sample.exp3, item.gather.traffic)
ch4sample.exp3_gatable.traffic<-gather(ch4sample.exp3_gatable.traffic,item.seq.traffic,key="品項",value="年增率") #將表格轉成聚集形式
ch4sample.exp3_gatable.traffic$品項<-factor(ch4sample.exp3_gatable.traffic$品項,levels=item.seq.traffic)
ggplot(ch4sample.exp3_gatable.traffic,aes(x=年月,y=年增率,colour=品項)) +
geom_line() +
scale_x_date(limits=c(as.Date("2010-01-01","%Y-%m-%d"),as.Date("2018-01-01","%Y-%m-%d"))) +
scale_y_continuous(na.value = 0,limits=c(-10,10))
############################# Annotation & Layout #############################
ggplot(ch4sample.exp3,aes(x=年月,y=總指數)) +
geom_line(colour="#ec8092") +
labs(
title = "消費者物價基本分類暨項目群指數",
subtitle = "總指數",
y="年增率",
caption = "主計處"
)
data_we_want<-filter(group_by(ch4sample.exp4,光電站名稱),
(光電站名稱=="七美光電")|(光電站名稱=="澎湖光電")|(光電站名稱=="金門光電"))
ggplot(ch4sample.exp4,aes(x=年月, y=平均單位裝置容量每日發電量)) +
geom_point(aes(colour=光電站名稱),data=data_we_want) +
geom_text(aes(label=光電站名稱,colour=光電站名稱), data=data_we_want, show.legend=F)
ggplot(ch4sample.exp4,aes(x=年月, y=平均單位裝置容量每日發電量)) +
geom_point(aes(colour=光電站名稱)) +
geom_text(aes(label=光電站名稱,colour=光電站名稱),
data=data_we_want, show.legend=F, nudge_y=0.1) +
scale_colour_hue(limits=c("七美光電","澎湖光電","金門光電"),
breaks=c("七美光電","澎湖光電","金門光電"))
ggplot(ch4sample.exp4,aes(x=年月, y=平均單位裝置容量每日發電量)) +
geom_point(aes(colour=光電站名稱),data=data_we_want) +
geom_label(aes(label=光電站名稱,colour=光電站名稱),
data=data_we_want,show.legend=F,nudge_y=0.1,alpha=0.5)
ggplot(ch4sample.exp3_gatable,aes(x=年月,y=年增率,colour=品項)) +
geom_line() +
guides(colour=guide_legend(reverse = TRUE)) +
annotate(
"rect", xmin=as.Date("2010-01-01","%Y-%m-%d"),
xmax=as.Date("2016-12-01","%Y-%m-%d"),
ymin=-50, ymax=85, fill="#cc00ff", alpha=0.3
) +
annotate(
"segment", x=as.Date("2008-05-01","%Y-%m-%d"),
xend=as.Date("2016-02-01","%Y-%m-%d"),
y=90, yend=80, color="#006600",
alpha=0.8, size=1.2, arrow=arrow()
)
ggplot(ch4sample.exp1_gatable,aes(x=年齡區間,y=人數,fill=頻率)) +
geom_bar(position="dodge",stat="identity") +
guides(colour=guide_legend(reverse = TRUE)) +
theme(
legend.position = "top",
legend.background = element_rect(colour="green",size=1.2),
legend.text = element_text(colour="blue"),
panel.grid.major = element_line(colour="red"),
panel.grid.minor = element_line(colour="red",
linetype="dashed", size=0.3)
)
ggplot(ch4sample.exp2_matrix,aes(x=分數1,y=分數2)) +
geom_point() +
facet_grid(科目1~科目2) +
labs(title="三班成績散佈矩陣圖") +
theme(
strip.background = element_rect(fill="pink2"),
strip.text = element_text(size=14,face="bold"),
strip.text.y = element_text(angle=0),
plot.title = element_text(color="#cc3300",
size=22, hjust=0.5)
)
############################# Coordinate System ###############################
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,fill=項目別)) +
geom_bar(stat="identity") +
coord_flip()
ggplot(ch4sample.exp4,aes(x=光電站名稱, y=平均單位裝置容量每日發電量, fill=光電站名稱)) +
geom_boxplot() +
guides(fill=guide_legend(reverse = TRUE)) +
coord_flip()
ggplot(ch4sample.exp3_gatable,aes(x=品項,y=年增率,colour=品項)) +
geom_boxplot() +
guides(colour=guide_legend(reverse = TRUE)) +
coord_flip()
#改為極座標
ggplot(ch4sample.exp1,aes(x=項目別,y=有時,fill=項目別)) +
geom_bar(stat="identity",width=1) +
coord_polar(theta = "x")
#改為圓餅圖
y.breaks <- cumsum(rev(ch4sample.exp1$有時)) - rev(ch4sample.exp1$有時)/2
y.labels <- paste(scales::percent(rev(ch4sample.exp1$有時)/sum(rev(ch4sample.exp1$有時))))
ggplot(ch4sample.exp1,aes(x=1,y=有時,fill=項目別)) +
geom_bar(stat="identity") +
coord_polar(theta = "y") +
scale_y_continuous(breaks=y.breaks,labels=y.labels)
#心臟線
x<-seq(0,2*pi,2*pi/1000)
heart_line<-tibble(
theta = x,
cardioid = 2*(0.25-cos(theta))
)
ggplot(heart_line,aes(x=theta,y=cardioid)) +
geom_line() +
coord_polar()
#玫瑰線
rose_line<-tibble(
theta = x,
rose = cos(9*theta)
)
ggplot(rose_line,aes(x=theta,y=rose)) +
geom_line() +
coord_polar()
|
file ="training pharmacy1.csv"
f <- read.csv(file, header=T)
fme <- melt (f)
dfme <- dcast(fme, Trainee + variable ~ Assess,fun.aggregate = sum)
| /R files From OneDrive/New folder (2)/prepost.R | no_license | AbidAliShaikh/R-Space | R | false | false | 152 | r | file ="training pharmacy1.csv"
f <- read.csv(file, header=T)
fme <- melt (f)
dfme <- dcast(fme, Trainee + variable ~ Assess,fun.aggregate = sum)
|
library(testthat)
library(agesim)
test_check("agesim")
| /tests/testthat.R | permissive | johnrbryant/agesim | R | false | false | 56 | r | library(testthat)
library(agesim)
test_check("agesim")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parseargs.R
\name{parseargs}
\alias{parseargs}
\title{Function to parse arguments from command line}
\usage{
parseargs(required = character(0), ..., silent = F, include.others = TRUE,
test = NULL)
}
\keyword{internal}
| /man/parseargs.Rd | permissive | choishingwan/lassosum | R | false | true | 298 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parseargs.R
\name{parseargs}
\alias{parseargs}
\title{Function to parse arguments from command line}
\usage{
parseargs(required = character(0), ..., silent = F, include.others = TRUE,
test = NULL)
}
\keyword{internal}
|
line_equation <- function(x1, y1, x2, y2) {
slope <- (y2 - y1) / (x2 - x1)
intercept <- y1 - slope*x1
list(intercept = intercept, slope = slope)
}
averages <- function(x, y, breaks = 5) {
x_cut<- cut(x, breaks = breaks)
y_averages <- as.vector(tapply(y, x_cut, mean))
x_boundaries <- gsub('\\(', '', levels(x_cut))
x_boundaries <- gsub('\\]', '', x_boundaries)
x_boundaries <- strsplit(x_boundaries, ',')
x1 <- as.numeric(sapply(x_boundaries, function(u) u[1]))
x2 <- as.numeric(sapply(x_boundaries, function(u) u[2]))
x_midpoints <- x1 + (x2 - x1) / 2
list(x = x_midpoints, y = y_averages)
}
| /regression-effect/helpers.R | permissive | gastonstat/shiny-introstats | R | false | false | 620 | r | line_equation <- function(x1, y1, x2, y2) {
slope <- (y2 - y1) / (x2 - x1)
intercept <- y1 - slope*x1
list(intercept = intercept, slope = slope)
}
averages <- function(x, y, breaks = 5) {
x_cut<- cut(x, breaks = breaks)
y_averages <- as.vector(tapply(y, x_cut, mean))
x_boundaries <- gsub('\\(', '', levels(x_cut))
x_boundaries <- gsub('\\]', '', x_boundaries)
x_boundaries <- strsplit(x_boundaries, ',')
x1 <- as.numeric(sapply(x_boundaries, function(u) u[1]))
x2 <- as.numeric(sapply(x_boundaries, function(u) u[2]))
x_midpoints <- x1 + (x2 - x1) / 2
list(x = x_midpoints, y = y_averages)
}
|
library("rvest")
page.resource <- read_html("http://na.op.gg/champion/statistics")
heros.rank <- html_nodes(page.resource, ".champion-index-table__cell--value , .champion-index-table__header , .champion-index-table__cell--rank , .champion-index-table__cell--champion")
heros.rank %>% html_text()
| /week_2/hw_2/爬蟲練習/Rvest_Practice_1.R | no_license | ShuHsiangLiang/CSX4001 | R | false | false | 297 | r | library("rvest")
page.resource <- read_html("http://na.op.gg/champion/statistics")
heros.rank <- html_nodes(page.resource, ".champion-index-table__cell--value , .champion-index-table__header , .champion-index-table__cell--rank , .champion-index-table__cell--champion")
heros.rank %>% html_text()
|
setwd("users/krinalmanakiwala/desktop/RProjects")
data = read.csv("eBayClean.csv")
library(caTools)
RNGkind(sample.kind = 'Rounding')
set.seed(100)
split = sample.split(data$sold, SplitRatio = 0.7)
train = data[split,]
test = data[!split,]
nrow(train)
nrow(test)
install.packages("rpart")
install.packages("rpart.plot")
library(rpart)
library(rpart.plot)
##Regression Tree
tree1 = rpart(sold~startprice, data = train)
rpart.plot(tree1)
summary(tree1)
##complexity of 0.2244469
##Classification tree
##the second one has less leaves than the regression tree because it has a lower complexity number and the regression tree has a higher densiy
##this one starts witha higher cp number (complexity) so it
tree2 = rpart(sold~startprice, data = train, method = 'class', cp = 0.00001)
rpart.plot(tree2)
summary(tree2)
##why isn't this working? it should be 39
sum(train$startprice>=152 && train$startprice<172)
##Tree3 (based on storage) regression tree
tree3 = rpart(sold~storage, data = train, method = 'anova')
rpart.plot(tree2)
##There's no split because you can't run regression on catagorical variable
summary(tree3)
##Tree4 (based on storage) classification tree
tree4 = rpart(sold~storage, data = train, method = 'class')
rpart.plot(tree4)
##There's no split because you can't run regression/classification on catagorical variable. you can't split categorical variables!
summary(tree4)
tree5 = rpart(sold~.-UniqueID, data = train, method = 'class')
rpart.plot(tree5)
summary(tree5)
##based on the tree, the most important are biddable, startprice, productline in that order
##you can see this in summary
##primary split is a split in the data and surrogate split is a split caused by the primary split
##type = class for logistic regression
##question: what is minbucket?
pred = predict(tree5, newdata = test, type = 'class')
ct = table(test$sold, pred); ct
fpr =ct[1,2]/(ct[1,1] + ct[1,2])
fpr
tpr =ct[2,2]/(ct[2,2]+ct[2,1])
tpr
##the logistic had a higher TPR than this decision tree. This means decision tree is slightly worse
library(ROCR)
##ERROR: Error in prediction(pred, test$sold) : Format of predictions is invalid.
ROCRpred = prediction(pred, test$sold)
as.numeric(performance(ROCRpred, 'auc')@y.values)
accuracy = sum(ct[1,1],ct[2,2])/nrow(test)
accuracy
##complexity of 0.4285714 | /eBayDecisionTree.R | no_license | aashvi22/eBay | R | false | false | 2,306 | r | setwd("users/krinalmanakiwala/desktop/RProjects")
data = read.csv("eBayClean.csv")
library(caTools)
RNGkind(sample.kind = 'Rounding')
set.seed(100)
split = sample.split(data$sold, SplitRatio = 0.7)
train = data[split,]
test = data[!split,]
nrow(train)
nrow(test)
install.packages("rpart")
install.packages("rpart.plot")
library(rpart)
library(rpart.plot)
##Regression Tree
tree1 = rpart(sold~startprice, data = train)
rpart.plot(tree1)
summary(tree1)
##complexity of 0.2244469
##Classification tree
##the second one has less leaves than the regression tree because it has a lower complexity number and the regression tree has a higher densiy
##this one starts witha higher cp number (complexity) so it
tree2 = rpart(sold~startprice, data = train, method = 'class', cp = 0.00001)
rpart.plot(tree2)
summary(tree2)
##why isn't this working? it should be 39
sum(train$startprice>=152 && train$startprice<172)
##Tree3 (based on storage) regression tree
tree3 = rpart(sold~storage, data = train, method = 'anova')
rpart.plot(tree2)
##There's no split because you can't run regression on catagorical variable
summary(tree3)
##Tree4 (based on storage) classification tree
tree4 = rpart(sold~storage, data = train, method = 'class')
rpart.plot(tree4)
##There's no split because you can't run regression/classification on catagorical variable. you can't split categorical variables!
summary(tree4)
tree5 = rpart(sold~.-UniqueID, data = train, method = 'class')
rpart.plot(tree5)
summary(tree5)
##based on the tree, the most important are biddable, startprice, productline in that order
##you can see this in summary
##primary split is a split in the data and surrogate split is a split caused by the primary split
##type = class for logistic regression
##question: what is minbucket?
pred = predict(tree5, newdata = test, type = 'class')
ct = table(test$sold, pred); ct
fpr =ct[1,2]/(ct[1,1] + ct[1,2])
fpr
tpr =ct[2,2]/(ct[2,2]+ct[2,1])
tpr
##the logistic had a higher TPR than this decision tree. This means decision tree is slightly worse
library(ROCR)
##ERROR: Error in prediction(pred, test$sold) : Format of predictions is invalid.
ROCRpred = prediction(pred, test$sold)
as.numeric(performance(ROCRpred, 'auc')@y.values)
accuracy = sum(ct[1,1],ct[2,2])/nrow(test)
accuracy
##complexity of 0.4285714 |
#' @export
#diet script and function
diet.analysis<-function(path=getwd(),dsn = 'ptran', uid = oracle.stomach.user,pwd = oracle.stomach.password) {
if(!require(RODBC)) { install.packages('RODBC',repos="http://cran.r-project.org")}
if(!require(vegan)) { install.packages('vegan',repos="http://cran.r-project.org")}
if(!require(reshape)) { install.packages('reshape',repos="http://cran.r-project.org")}
if(!require(splancs)) { install.packages('splancs',repos="http://cran.r-project.org")}
if(!require(plyr)) { install.packages('plyr',repos="http://cran.r-project.org")}
if(!require(nlstools)) { install.packages('nlstools',repos="http://cran.r-project.org")}
if(!require(doBy)) { install.packages('doBy',repos="http://cran.r-project.org")}
require(RODBC)
require(vegan)
require(reshape)
require(splancs)
require(plyr)
require(nlstools)
require(doBy)
channel<<-odbcConnect(dsn,uid, pwd) #called globally
options(stringsAsFactors=F)
options(warn=-1)
#source(file.path(path,"diet.functions.R"))
diet.version <- "0.1"
cat("---------------------------------------------------------- \n")
cat(paste("Welcome to Diet Analysis in R; version",
diet.version, "\n"))
cat("Author: Adam Cook, PED\n")
cat("The diet data used here is described in Cook and Bundy 2010\n")
cat("Please report bugs to: amcook127@gmail.com\n")
cat("---------------------------------------------------------- \n")
cat("\n")
cat("Press Esc to exit at any time. \n \n")
diet.data <- list()
diet.data$EXIT <-FALSE
while (diet.data$EXIT == FALSE) {
choices <- c("Specify diet data **need to to this before option 2,3,4 or 5**","View the Data","Species Accumulation Plot","Mean diet","Stratified Diet Analysis","Stratifed Numbers per tow","a's and b's","LVB growth curve",
"Predators", "Save all data compiled during this session to your R workspace?","Save all results to a txt file?","Exit")
title <- "The available options are:"
choose <- menu(choices, title = title)
if (choose == 1) { #get diet data
diet.data <- list()
diet.data$EXIT <-FALSE
src<-data.src()
datasource <- menu(src,title='Choose a DATASOURCE (if for stratified analysis only *GS* works:')
ds<-src[datasource]
yrs<-data.year(ds)
year<- select.list(yrs,title='Choose one or multiple YEARS:',multiple=T,graphics=T)
seas<-data.seasons(ds,year)
seasons <- menu(seas,title='Choose a SEASON:')
season<-seas[seasons]
areas<- menu(c("Strata","NAFO"),title='Choose Area based on:')
region<-data.region(dat=ds,strat.nafo=areas,seas=season,yrs=year)
regions<- select.list(region,title='Choose one or multiple AREAS:',multiple=T,graphics=F)
spec<-data.species(dat=ds, strat.nafo=areas,seas=season,yrs=year,reg=regions)
species<-menu(spec,title='Choose a SPECIES (- n of stoms w/ prey):')
spec1<-spec[species]
raw <- get.diet.data(dat=ds, strat.nafo=areas,seas=season,yrs=year,reg=regions,specs=spec1)
diet.data$raw.diet.data <- raw
cat("Do you want to save the data to a .csv? (y/n) \n")
plotans <- scan(what = "", nlines = 1, quiet = TRUE)
if (plotans == "yes" || plotans == "y")
save.to.csv(raw)
if(ds=='GS') {
gs.dat <- gs.rv.data (year=year,area=regions,species=spec1,season=season)
diet.data$strata.weights <-gs.dat$strata.weights
diet.data$gs.survey.data <- gs.dat$survey.data
diet.data$a.b<-a.b(specs=spec1,year=year,area=regions,season=season,plot=F,diet.analysis=T)
}
}
if (choose == 2) { #print diet data
print(diet.data)
}
if (choose ==3) { #do SAC's
cat("Do you want SAC by Length? (y/n) \n")
lens <- scan(what = "", nlines = 1, quiet = TRUE)
if (lens == "no" || lens == "n") {
species.accumulation(diet.data$raw.diet.data)
}
if(lens=='yes' || lens=='y') {
ll<-menu(c('5','10','20','Cutpoint'),title='Choose a Length grouping:')
if(ll==4) {
cat("What is your cutpoint length <= ? \n")
l1<-scan(what = "", nlines = 1, quiet = TRUE)
species.accumulation.by.length(data=diet.data$raw.diet.data,cut.p=T,lengths=as.numeric(l1))
}
else {
species.accumulation.by.length(data=diet.data$raw.diet.data,cut.p=F,lengths=as.numeric(ll))
}
}
}
if(choose ==4) { #mean diets
preys <- c('PREYSPECCD','FAM')
a3 <- menu(preys,title='How do you want prey items grouped?')
pp <- preys[a3]
cat("Do you want to remove influential observations? (y/n) \n")
ac<- scan(what = "", nlines = 1, quiet = TRUE)
ag<-0
if(ac=='y' | ac=='yes') {
ag<-1
cat("Removing influential observations is done by comparing the percent differences\n")
cat("of the mean diets with and without each observation\n")
cat("\n")
cat("Please provide the percent difference cut off point\n")
cat("(i.e. 30, 40, 50 with no percent sign)\n")
af<- scan(what = "", nlines = 1, quiet = TRUE)
}
cat("Do you want to remove singletons? (y/n) \n")
ad<- scan(what = "", nlines = 1, quiet = TRUE)
if(ad=='y' || ad=='yes') ah<-1
else ah<-0
cat("Do you want mean diets by length class? (y/n) \n")
ab<- scan(what = "", nlines = 1, quiet = TRUE)
if(ab=='no' || ab =='n') {
diet.data$mean.diets<-mean.diet(diet.data=diet.data$raw.diet.data,prey.grouping=pp,remove.singletons=ah,remove.influentials=ag,percent.diff=as.numeric(af))
}
if (ab == "yes" || ab == "y") {
ll<-menu(c('5','10','20','Cutpoint'),title='Choose a Length grouping:')
if(ll==4) {
cat("What is your cutpoint length <= ? \n")
l1<-scan(what = "", nlines = 1, quiet = TRUE)
diet.data$mean.diets<-mean.diet.by.length(data=diet.data$raw.diet,prey.grouping=pp,remove.singletons=ah,remove.influentials=ag,percent.diff=as.numeric(af),cut.p=T,lengths=as.numeric(l1))
}
else {
diet.data$mean.diets<-mean.diet.by.length(data=diet.data$raw.diet,prey.grouping=pp,remove.singletons=ah,remove.influentials=ag,percent.diff=as.numeric(af),cut.p=F,lengths=as.numeric(ll))
}
}
cat("Do you want to save this mean diet data? (y/n)\n")
at1<- scan(what = "", nlines = 1, quiet = TRUE)
if(at1=='yes' || at1=='y') {
save.to.csv(diet.data$mean.diets)
}
cat("Do you want to view this mean diet data? (y/n)\n")
at1<- scan(what = "", nlines = 1, quiet = TRUE)
if(at1=='yes' || at1=='y') {
print(diet.data$mean.diets)
}
}
if(choose ==5) {
#only for fam right now
# preys <- c('PREYSPECCD','FAM','CAT1','CAT2','CAT3')
#a3 <- menu(preys,title='How do you want prey items grouped?')
#pp <- preys[a3]
cat("Do you want to remove singletons? (y/n) \n")
ad<- scan(what = "", nlines = 1, quiet = TRUE)
if(ad=='y' || ad=='yes') ah<-1
else ah<-0
cat("Do you want stratified diets by length class? (y/n) \n")
ab<- scan(what = "", nlines = 1, quiet = TRUE)
if (ab == "no" || ab == "n") {
diet.data$stratified.diet<-stratified.diet(diet.data=diet.data$raw.diet,survey.data=diet.data$gs.survey.data,strata.data=diet.data$strata.weights,prey.grouping='FAM',
remove.singletons=ad, a.b=diet.data$a.b)
cat("Do you want to view your diet data? (y/n) \n")
ab1<- scan(what = "", nlines = 1, quiet = TRUE)
if(ab1 == 'y') {
print(diet.data$stratified.diet)
}
}
if (ab == "yes" || ab == "y") {
ll<-menu(c('5','10','20','Cutpoint'),title='Choose a Length grouping:')
if(ll==4) {
cat("What is your cutpoint length <= ? \n")
l1<-scan(what = "", nlines = 1, quiet = TRUE)
diet.data$stratified.diet<-stratified.diet(diet.data=diet.data$raw.diet,survey.data=diet.data$gs.survey.data,strata.data=diet.data$strata.weights,prey.grouping='FAM',
remove.singletons=ad, a.b=diet.data$a.b,by.lengths=T,cut.p=T,lengths=l1)
}
else {
diet.data$stratified.diet<-stratified.diet(diet.data=diet.data$raw.diet,survey.data=diet.data$gs.survey.data,strata.data=diet.data$strata.weights,prey.grouping='FAM',
remove.singletons=ad, a.b=diet.data$a.b,by.lengths=T,cut.p=F,lengths=ll)
}
cat("Do you want to view your diet data? (y/n) \n")
ab1<- scan(what = "", nlines = 1, quiet = TRUE)
if(ab1 == 'y') {
print(diet.data$stratified.diet)
}
}
cat("Do you want to save the stratified mean diet data? (y/n)\n")
at1<- scan(what = "", nlines = 1, quiet = TRUE)
if(at1=='yes' || at1=='y') {
save.to.csv(diet.data$stratified.diet)
}
}
if(choose==6) {
aw<-c('Winter','Spring','Summer','Autumn')
d<-menu(aw,title='Choose a season:')
dd<-aw[d]
d1<- paste(c(rv.year(dd))[[1]],sep=",")
ddd<-select.list(d1,multiple=T, graphics=T, title='Choose one or more years:')
areas<- menu(c("Strata","NAFO"),title='Choose Area based on:')
reg <- rv.data.region(strat.nafo=areas,seas=dd,yrs=ddd)
reg<-select.list(reg,multiple=T,graphics=F,title='Choose one or more areas:')
dddd<-c(rv.species(ddd,season=dd, area=reg))[[1]]
ddddd<-menu(dddd,title='Select a species:',graphics=F)
ddddd<-dddd[ddddd]
diet.data$rv.data.by.length<-rv.data(year=ddd,area=reg,specs=ddddd,season=dd)
cat("Do you want stratified estimates by length class? (y/n) \n")
ab<- scan(what = "", nlines = 1, quiet = TRUE)
if (ab == "no" || ab == "n") {
diet.data$strat.ests<-stratified.estimates(survey.dat=diet.data$rv.data.by.length$survey.data,strata.dat=diet.data$rv.data.by.length$strata.weights,by.lengths=F,cut.p=F,lengths)
}
if (ab == "yes" || ab == "y") {
ll<-menu(c('3','5','10','20','Cutpoint'),title='Choose a Length grouping:')
if(ll==5) {
cat("What is your cutpoint length <= ? \n")
l1<-scan(what = "", nlines = 1, quiet = TRUE)
diet.data$strat.ests<-stratified.estimates(survey.dat=diet.data$rv.data.by.length$survey.data,strata.dat=diet.data$rv.data.by.length$strata.weights,by.lengths=T,cut.p=T,lengths=as.numeric(l1))
}
else {
diet.data$strat.ests<-stratified.estimates(survey.dat=diet.data$rv.data.by.length$survey.data,strata.dat=diet.data$rv.data.by.length$strata.weights,by.lengths=T,cut.p=F,lengths=as.numeric(ll))
}
}
cat("Do you want to save to workspace? (y/n) \n")
ab<- scan(what = "", nlines = 1, quiet = TRUE)
if(ab=='yes' || ab=='y') {
strat.ests<<-diet.data$strat.ests
cat("------------------------------------------------------------ \n")
cat("Your object is named 'strat.ests'\n\n")
cat("------------------------------------------------------------ \n")
}
cat("Plot stratified catch estimates? (y/n) \n")
ab<- scan(what = "", nlines = 1, quiet = TRUE)
if(ab=='yes' || ab=='y') {
plot.strat.ests(diet.data$strat.ests)
cat("Title for plot? (in quotes)\n")
ab<- scan(what = "", nlines = 1, quiet = TRUE)
title(ab)
}
cat("Calculate rate of decline/increase? (y/n) \n")
ab<- scan(what = "", nlines = 1, quiet = TRUE)
if(ab=='yes' || ab=='y') {
aa<-unique(diet.data$strat.ests$Year)
ab<- select.list(aa,multiple=T,graphics=F,title="Choose both a starting year and ending year for calculating rates:")
diet.data$Rate.of.Change<-decline.estimates(diet.data$strat.ests,syear=ab[1],eyear=ab[2])
cat("Title for plot? (in quotes)\n")
ab<- scan(what = "", nlines = 1, quiet = TRUE)
title(ab)
}
}
if(choose==7) {
aw<-c('Winter','Spring','Summer','Autumn')
d<-menu(aw,title='Choose a season:')
dd<-aw[d]
d1<- paste(c(rv.year(dd))[[1]],sep=",")
ddd<-select.list(d1,multiple=T, graphics=T, title='Choose one or more years:')
areas<- menu(c("Strata","NAFO"),title='Choose Area based on:')
reg <- rv.data.region(strat.nafo=areas,seas=dd,yrs=ddd)
reg<-select.list(reg,multiple=T,graphics=F,title='Choose one or more areas:')
dddd<-c(rv.species(ddd,season=dd, area=reg))[[1]]
ddddd<-menu(dddd,title='Select a species:',graphics=F)
ddddd<-dddd[ddddd]
cat("What is your alpha level for confidence intervals? (e.g. 1, 5, 10) \n")
ab<- scan(what = "", nlines = 1, quiet = TRUE)
cat("Do you want a plot? (y/n) \n")
ac<- scan(what = "", nlines = 1, quiet = TRUE)
if(ac=='yes' || ac=='y') {
diet.data$a.b<-a.b(specs=ddddd,year=ddd,area=reg,stat.num=as.numeric(ab),plots=T,season=dd)
}
else {
diet.data$a.b<-a.b(specs=ddddd,year=ddd,area=reg,stat.num=as.numeric(ab),plots=F,season=dd)
}
print(diet.data$a.b)
}
if(choose ==8) {
a1<-vb.spps()
d<-menu(a1,title='Choose a species:')
dd<-a1[d]
a2<-vb.season(dd)
d1<-menu(a2,title='Choose a season:')
dd2<-a2[d1]
cat('Do you want to track a cohort? (y/n) \n')
aaa<-scan(what = "", nlines = 1, quiet = TRUE)
aaa<-ifelse(aaa=='y',TRUE,FALSE)
a3<-vb.year(species=dd,season=dd2)
d1<-select.list(a3,title='Choose a start year and end year:',graphics=F,multiple=T)
if(length(d1)==1) d1[2]<-d1[1]
areas<- menu(c("Strata","NAFO"),title='Choose Area based on:')
a4<-vb.region(sp=dd,seas=dd2,strat.nafo=areas,yrs=d1)
d2<-select.list(a4,title='Select one or multiple areas:', multiple=T,graphics=F)
cat("Enter your starting values for Linf, K and t0 (separated by a space) :\n ---------------------------------------------------------------\n if left blank starting values will be estimated by Walford lines\n Everhart, et al. 1975. Principles of Fishery Science \n---------------------------------------------------------------\n")
asw<-scan(what = "", nlines = 1, quiet = TRUE)
if(is.na(asw[1])) { sts<-list(hinf=0,K=0,t0=0) }
else {sts<-list(hinf=asw[1],K=asw[2],t0=asw[3])}
meth<-c('Nonlinear least squares regression','Maxmium liklihood estimation')
cat('Do you want to use means at age for VB curve? (y/n)\n')
ask1<-scan(what = "", nlines = 1, quiet = TRUE)
a91<-ifelse(ask1=='y',T,F)
if(ask1=='y') {
cat('\n Do you want to weight the means by the length specific variances\n as per Kimura (1970) method-c? (y/n)\n')
ask2<-scan(what = "", nlines = 1, quiet = TRUE)
a92<-ifelse(ask2=='y',T,F)
}
a6<-menu(meth,title='Choose fitting method')
if(a6==1) {
a44<-LVB(species=dd,area=d2,syear=d1[1],season=dd2,eyear=d1[2],plot=T,add.plot=F,line.col='blue',init.pars=sts, NLS=T,MLE=F, method=c('BFGS','CG','SANN'),
compare=F,species2,area2,syear2,eyear2,control = list(maxiter = 10000, minFactor = 1/2024, tol = 1e-05), cohort=aaa,means=a91,error.kimura.c=a92)
}
if(a6==2) {
metho<-c('BFGS a quasi-Newton method uses function values and gradients to build up a picture of the surface to be optimized','CG conjugate gradients method more fragile than the BFGS method may be successful in much larger probles',
'SANN simulated annealing uses the Metropolis function for the acceptance probability')
a7<-menu(metho,title='Select a fitting method: \n for more details on these methods see ?optim and references therein')
a8<-ifelse(a7==1,'BFGS',ifelse(a7==2,'CG','SANN'))
a44<-LVB(species=dd,area=d2,syear=d1[1],eyear=d1[2],season=dd2,plot=T,add.plot=F,line.col='blue',init.pars=sts, NLS=F,MLE=T, method=a8,
compare=F,species2,area2,syear2,eyear2,control = list(maxiter = 10000, minFactor = 1/2024, tol = 1e-05),cohort=aaa,means=a91)
}
print(a44)
cat('\n Do you want to compare this VB Growth Curve with another? (y/n) \n\n\t from Kimura 1980. U.S.Fish.Bull.77(4):765-776.\nThis method is currently programmed for least squares regression only\n')
comps<-scan(what = "", nlines = 1, quiet = TRUE)
if(comps=='y'){
d<-menu(a1,title='Choose a species:')
dd1<-a1[d]
a21<-vb.season(dd1)
d11<-menu(a2,title='Choose a season:')
dd21<-a2[d11]
cat('Do you want to track a cohort? (y/n) \n')
aaa1<-scan(what = "", nlines = 1, quiet = TRUE)
aaa1<-ifelse(aaa1=='y',TRUE,FALSE)
a31<-vb.year(species=dd1,season=dd21)
d11<-select.list(a31,title='Choose a start year and end year:',graphics=F,multiple=T)
if(length(d11)==1) d11[2]<-d11[1]
areas<- menu(c("Strata","NAFO"),title='Choose Area based on:')
a41<-vb.region(sp=dd1,seas=dd21,strat.nafo=areas,yrs=d11)
d21<-select.list(a41,title='Select one or multiple areas:', multiple=T,graphics=F)
cat("Enter your starting values for Linf, K and t0 (separated by a space) :\n ---------------------------------------------------------------\n if left blank starting values will be estimated by Walford lines\n Everhart, et al. 1975. Principles of Fishery Science \n---------------------------------------------------------------\n")
asw1<-scan(what = "", nlines = 1, quiet = TRUE)
if(is.na(asw1[1])) { sts1<-list(hinf=0,K=0,t0=0) }
else {sts1<-list(hinf=asw1[1],K=asw1[2],t0=asw1[3])}
cat('Do you want to use means at age for VB curve? (y/n)\n')
ask1<-scan(what = "", nlines = 1, quiet = TRUE)
a93<-ifelse(ask1=='y',T,F)
if(ask1=='y') {
cat('\n Do you want to weight the means by the length specific variances\n as per Kimura (1970) method-c? (y/n)\n')
ask2<-scan(what = "", nlines = 1, quiet = TRUE)
a94<-ifelse(ask2=='y',T,F)
}
a45<-LVB(species=dd,area=d2,syear=d1[1],eyear=d1[2],season=dd2,plot=T,add.plot=F,line.col='blue',init.pars=sts, NLS=T,MLE=F, method=c('BFGS','CG','SANN'),cohort=aaa,
compare=T,species2=dd1,area2=d21,season2=dd21,init.pars2=sts1,syear2=d11[1],eyear2=d11[2],control = list(maxiter = 10000, minFactor = 1/2024, tol = 1e-05), cohort2=aaa1,means=a91,
error.kimura.c=a92,means2=a93,error.kimura.c2=a94 )
print(a45)
}
}
if(choose ==9) {
cat("Who eats me??\n")
cat("----------------------------------------------- \n")
grps<-c('Fin Fish','Shrimps','Crabs','Cephalopods','Amphipods','All - caution this is a very long list')
abc<-menu(grps,title='Choose a broad classification:')
aaa3<-grps[abc]
next1<-prey.species(aaa3)
next2 <- select.list(next1,multiple=F,title='Choose a species',graphics=F)
diet.data$predation<-predators(next2)
aa<-c('Predation by Species','Predation by Species and Year','Predation by Species and Area','Predation by Species,Area and Year')
dc<-menu(aa,title='How do you want to see the predation data?')
print(diet.data$predation[[dc]])
cat("Do you want to save this predation data? (y/n)\n")
at1<- scan(what = "", nlines = 1, quiet = TRUE)
if(at1=='yes' || at1=='y') {
save.to.csv(diet.data$predation[[dc]])
}
}
if(choose==10) {
diet.data<<-diet.data
cat("------------------------------------------------------------ \n")
cat("Your object is named 'diet.data'\n\n")
cat("------------------------------------------------------------ \n")
}
if(choose==11) {
a<-getwd()
b<-Sys.time()
b<-format(b,format="%m-%d %H%M",tz='America/Halifax')
f<-paste("Diet analysis ", b,".txt",sep="")
capture.output(print(diet.data),file=f)
cat("\n")
cat('Your file is in:\n')
cat(paste(a,"/",f,sep=""))
cat("\n")
cat("------------------------------- \n")
cat("\n")
}
if (choose == 12) {
cat("Enjoy your diet data...... \n")
odbcCloseAll()
gc(reset=T)
diet.data$EXIT = TRUE
}
}
}
| /R/diet.analysis.r | no_license | AMCOOK/FoodHabits | R | false | false | 21,875 | r | #' @export
#diet script and function
diet.analysis<-function(path=getwd(),dsn = 'ptran', uid = oracle.stomach.user,pwd = oracle.stomach.password) {
if(!require(RODBC)) { install.packages('RODBC',repos="http://cran.r-project.org")}
if(!require(vegan)) { install.packages('vegan',repos="http://cran.r-project.org")}
if(!require(reshape)) { install.packages('reshape',repos="http://cran.r-project.org")}
if(!require(splancs)) { install.packages('splancs',repos="http://cran.r-project.org")}
if(!require(plyr)) { install.packages('plyr',repos="http://cran.r-project.org")}
if(!require(nlstools)) { install.packages('nlstools',repos="http://cran.r-project.org")}
if(!require(doBy)) { install.packages('doBy',repos="http://cran.r-project.org")}
require(RODBC)
require(vegan)
require(reshape)
require(splancs)
require(plyr)
require(nlstools)
require(doBy)
channel<<-odbcConnect(dsn,uid, pwd) #called globally
options(stringsAsFactors=F)
options(warn=-1)
#source(file.path(path,"diet.functions.R"))
diet.version <- "0.1"
cat("---------------------------------------------------------- \n")
cat(paste("Welcome to Diet Analysis in R; version",
diet.version, "\n"))
cat("Author: Adam Cook, PED\n")
cat("The diet data used here is described in Cook and Bundy 2010\n")
cat("Please report bugs to: amcook127@gmail.com\n")
cat("---------------------------------------------------------- \n")
cat("\n")
cat("Press Esc to exit at any time. \n \n")
diet.data <- list()
diet.data$EXIT <-FALSE
while (diet.data$EXIT == FALSE) {
choices <- c("Specify diet data **need to to this before option 2,3,4 or 5**","View the Data","Species Accumulation Plot","Mean diet","Stratified Diet Analysis","Stratifed Numbers per tow","a's and b's","LVB growth curve",
"Predators", "Save all data compiled during this session to your R workspace?","Save all results to a txt file?","Exit")
title <- "The available options are:"
choose <- menu(choices, title = title)
if (choose == 1) { #get diet data
diet.data <- list()
diet.data$EXIT <-FALSE
src<-data.src()
datasource <- menu(src,title='Choose a DATASOURCE (if for stratified analysis only *GS* works:')
ds<-src[datasource]
yrs<-data.year(ds)
year<- select.list(yrs,title='Choose one or multiple YEARS:',multiple=T,graphics=T)
seas<-data.seasons(ds,year)
seasons <- menu(seas,title='Choose a SEASON:')
season<-seas[seasons]
areas<- menu(c("Strata","NAFO"),title='Choose Area based on:')
region<-data.region(dat=ds,strat.nafo=areas,seas=season,yrs=year)
regions<- select.list(region,title='Choose one or multiple AREAS:',multiple=T,graphics=F)
spec<-data.species(dat=ds, strat.nafo=areas,seas=season,yrs=year,reg=regions)
species<-menu(spec,title='Choose a SPECIES (- n of stoms w/ prey):')
spec1<-spec[species]
raw <- get.diet.data(dat=ds, strat.nafo=areas,seas=season,yrs=year,reg=regions,specs=spec1)
diet.data$raw.diet.data <- raw
cat("Do you want to save the data to a .csv? (y/n) \n")
plotans <- scan(what = "", nlines = 1, quiet = TRUE)
if (plotans == "yes" || plotans == "y")
save.to.csv(raw)
if(ds=='GS') {
gs.dat <- gs.rv.data (year=year,area=regions,species=spec1,season=season)
diet.data$strata.weights <-gs.dat$strata.weights
diet.data$gs.survey.data <- gs.dat$survey.data
diet.data$a.b<-a.b(specs=spec1,year=year,area=regions,season=season,plot=F,diet.analysis=T)
}
}
if (choose == 2) { #print diet data
print(diet.data)
}
if (choose ==3) { #do SAC's
cat("Do you want SAC by Length? (y/n) \n")
lens <- scan(what = "", nlines = 1, quiet = TRUE)
if (lens == "no" || lens == "n") {
species.accumulation(diet.data$raw.diet.data)
}
if(lens=='yes' || lens=='y') {
ll<-menu(c('5','10','20','Cutpoint'),title='Choose a Length grouping:')
if(ll==4) {
cat("What is your cutpoint length <= ? \n")
l1<-scan(what = "", nlines = 1, quiet = TRUE)
species.accumulation.by.length(data=diet.data$raw.diet.data,cut.p=T,lengths=as.numeric(l1))
}
else {
species.accumulation.by.length(data=diet.data$raw.diet.data,cut.p=F,lengths=as.numeric(ll))
}
}
}
if(choose ==4) { #mean diets
preys <- c('PREYSPECCD','FAM')
a3 <- menu(preys,title='How do you want prey items grouped?')
pp <- preys[a3]
cat("Do you want to remove influential observations? (y/n) \n")
ac<- scan(what = "", nlines = 1, quiet = TRUE)
ag<-0
if(ac=='y' | ac=='yes') {
ag<-1
cat("Removing influential observations is done by comparing the percent differences\n")
cat("of the mean diets with and without each observation\n")
cat("\n")
cat("Please provide the percent difference cut off point\n")
cat("(i.e. 30, 40, 50 with no percent sign)\n")
af<- scan(what = "", nlines = 1, quiet = TRUE)
}
cat("Do you want to remove singletons? (y/n) \n")
ad<- scan(what = "", nlines = 1, quiet = TRUE)
if(ad=='y' || ad=='yes') ah<-1
else ah<-0
cat("Do you want mean diets by length class? (y/n) \n")
ab<- scan(what = "", nlines = 1, quiet = TRUE)
if(ab=='no' || ab =='n') {
diet.data$mean.diets<-mean.diet(diet.data=diet.data$raw.diet.data,prey.grouping=pp,remove.singletons=ah,remove.influentials=ag,percent.diff=as.numeric(af))
}
if (ab == "yes" || ab == "y") {
ll<-menu(c('5','10','20','Cutpoint'),title='Choose a Length grouping:')
if(ll==4) {
cat("What is your cutpoint length <= ? \n")
l1<-scan(what = "", nlines = 1, quiet = TRUE)
diet.data$mean.diets<-mean.diet.by.length(data=diet.data$raw.diet,prey.grouping=pp,remove.singletons=ah,remove.influentials=ag,percent.diff=as.numeric(af),cut.p=T,lengths=as.numeric(l1))
}
else {
diet.data$mean.diets<-mean.diet.by.length(data=diet.data$raw.diet,prey.grouping=pp,remove.singletons=ah,remove.influentials=ag,percent.diff=as.numeric(af),cut.p=F,lengths=as.numeric(ll))
}
}
cat("Do you want to save this mean diet data? (y/n)\n")
at1<- scan(what = "", nlines = 1, quiet = TRUE)
if(at1=='yes' || at1=='y') {
save.to.csv(diet.data$mean.diets)
}
cat("Do you want to view this mean diet data? (y/n)\n")
at1<- scan(what = "", nlines = 1, quiet = TRUE)
if(at1=='yes' || at1=='y') {
print(diet.data$mean.diets)
}
}
if(choose ==5) {
#only for fam right now
# preys <- c('PREYSPECCD','FAM','CAT1','CAT2','CAT3')
#a3 <- menu(preys,title='How do you want prey items grouped?')
#pp <- preys[a3]
cat("Do you want to remove singletons? (y/n) \n")
ad<- scan(what = "", nlines = 1, quiet = TRUE)
if(ad=='y' || ad=='yes') ah<-1
else ah<-0
cat("Do you want stratified diets by length class? (y/n) \n")
ab<- scan(what = "", nlines = 1, quiet = TRUE)
if (ab == "no" || ab == "n") {
diet.data$stratified.diet<-stratified.diet(diet.data=diet.data$raw.diet,survey.data=diet.data$gs.survey.data,strata.data=diet.data$strata.weights,prey.grouping='FAM',
remove.singletons=ad, a.b=diet.data$a.b)
cat("Do you want to view your diet data? (y/n) \n")
ab1<- scan(what = "", nlines = 1, quiet = TRUE)
if(ab1 == 'y') {
print(diet.data$stratified.diet)
}
}
if (ab == "yes" || ab == "y") {
ll<-menu(c('5','10','20','Cutpoint'),title='Choose a Length grouping:')
if(ll==4) {
cat("What is your cutpoint length <= ? \n")
l1<-scan(what = "", nlines = 1, quiet = TRUE)
diet.data$stratified.diet<-stratified.diet(diet.data=diet.data$raw.diet,survey.data=diet.data$gs.survey.data,strata.data=diet.data$strata.weights,prey.grouping='FAM',
remove.singletons=ad, a.b=diet.data$a.b,by.lengths=T,cut.p=T,lengths=l1)
}
else {
diet.data$stratified.diet<-stratified.diet(diet.data=diet.data$raw.diet,survey.data=diet.data$gs.survey.data,strata.data=diet.data$strata.weights,prey.grouping='FAM',
remove.singletons=ad, a.b=diet.data$a.b,by.lengths=T,cut.p=F,lengths=ll)
}
cat("Do you want to view your diet data? (y/n) \n")
ab1<- scan(what = "", nlines = 1, quiet = TRUE)
if(ab1 == 'y') {
print(diet.data$stratified.diet)
}
}
cat("Do you want to save the stratified mean diet data? (y/n)\n")
at1<- scan(what = "", nlines = 1, quiet = TRUE)
if(at1=='yes' || at1=='y') {
save.to.csv(diet.data$stratified.diet)
}
}
if(choose==6) {
aw<-c('Winter','Spring','Summer','Autumn')
d<-menu(aw,title='Choose a season:')
dd<-aw[d]
d1<- paste(c(rv.year(dd))[[1]],sep=",")
ddd<-select.list(d1,multiple=T, graphics=T, title='Choose one or more years:')
areas<- menu(c("Strata","NAFO"),title='Choose Area based on:')
reg <- rv.data.region(strat.nafo=areas,seas=dd,yrs=ddd)
reg<-select.list(reg,multiple=T,graphics=F,title='Choose one or more areas:')
dddd<-c(rv.species(ddd,season=dd, area=reg))[[1]]
ddddd<-menu(dddd,title='Select a species:',graphics=F)
ddddd<-dddd[ddddd]
diet.data$rv.data.by.length<-rv.data(year=ddd,area=reg,specs=ddddd,season=dd)
cat("Do you want stratified estimates by length class? (y/n) \n")
ab<- scan(what = "", nlines = 1, quiet = TRUE)
if (ab == "no" || ab == "n") {
diet.data$strat.ests<-stratified.estimates(survey.dat=diet.data$rv.data.by.length$survey.data,strata.dat=diet.data$rv.data.by.length$strata.weights,by.lengths=F,cut.p=F,lengths)
}
if (ab == "yes" || ab == "y") {
ll<-menu(c('3','5','10','20','Cutpoint'),title='Choose a Length grouping:')
if(ll==5) {
cat("What is your cutpoint length <= ? \n")
l1<-scan(what = "", nlines = 1, quiet = TRUE)
diet.data$strat.ests<-stratified.estimates(survey.dat=diet.data$rv.data.by.length$survey.data,strata.dat=diet.data$rv.data.by.length$strata.weights,by.lengths=T,cut.p=T,lengths=as.numeric(l1))
}
else {
diet.data$strat.ests<-stratified.estimates(survey.dat=diet.data$rv.data.by.length$survey.data,strata.dat=diet.data$rv.data.by.length$strata.weights,by.lengths=T,cut.p=F,lengths=as.numeric(ll))
}
}
cat("Do you want to save to workspace? (y/n) \n")
ab<- scan(what = "", nlines = 1, quiet = TRUE)
if(ab=='yes' || ab=='y') {
strat.ests<<-diet.data$strat.ests
cat("------------------------------------------------------------ \n")
cat("Your object is named 'strat.ests'\n\n")
cat("------------------------------------------------------------ \n")
}
cat("Plot stratified catch estimates? (y/n) \n")
ab<- scan(what = "", nlines = 1, quiet = TRUE)
if(ab=='yes' || ab=='y') {
plot.strat.ests(diet.data$strat.ests)
cat("Title for plot? (in quotes)\n")
ab<- scan(what = "", nlines = 1, quiet = TRUE)
title(ab)
}
cat("Calculate rate of decline/increase? (y/n) \n")
ab<- scan(what = "", nlines = 1, quiet = TRUE)
if(ab=='yes' || ab=='y') {
aa<-unique(diet.data$strat.ests$Year)
ab<- select.list(aa,multiple=T,graphics=F,title="Choose both a starting year and ending year for calculating rates:")
diet.data$Rate.of.Change<-decline.estimates(diet.data$strat.ests,syear=ab[1],eyear=ab[2])
cat("Title for plot? (in quotes)\n")
ab<- scan(what = "", nlines = 1, quiet = TRUE)
title(ab)
}
}
if(choose==7) {
aw<-c('Winter','Spring','Summer','Autumn')
d<-menu(aw,title='Choose a season:')
dd<-aw[d]
d1<- paste(c(rv.year(dd))[[1]],sep=",")
ddd<-select.list(d1,multiple=T, graphics=T, title='Choose one or more years:')
areas<- menu(c("Strata","NAFO"),title='Choose Area based on:')
reg <- rv.data.region(strat.nafo=areas,seas=dd,yrs=ddd)
reg<-select.list(reg,multiple=T,graphics=F,title='Choose one or more areas:')
dddd<-c(rv.species(ddd,season=dd, area=reg))[[1]]
ddddd<-menu(dddd,title='Select a species:',graphics=F)
ddddd<-dddd[ddddd]
cat("What is your alpha level for confidence intervals? (e.g. 1, 5, 10) \n")
ab<- scan(what = "", nlines = 1, quiet = TRUE)
cat("Do you want a plot? (y/n) \n")
ac<- scan(what = "", nlines = 1, quiet = TRUE)
if(ac=='yes' || ac=='y') {
diet.data$a.b<-a.b(specs=ddddd,year=ddd,area=reg,stat.num=as.numeric(ab),plots=T,season=dd)
}
else {
diet.data$a.b<-a.b(specs=ddddd,year=ddd,area=reg,stat.num=as.numeric(ab),plots=F,season=dd)
}
print(diet.data$a.b)
}
if(choose ==8) {
a1<-vb.spps()
d<-menu(a1,title='Choose a species:')
dd<-a1[d]
a2<-vb.season(dd)
d1<-menu(a2,title='Choose a season:')
dd2<-a2[d1]
cat('Do you want to track a cohort? (y/n) \n')
aaa<-scan(what = "", nlines = 1, quiet = TRUE)
aaa<-ifelse(aaa=='y',TRUE,FALSE)
a3<-vb.year(species=dd,season=dd2)
d1<-select.list(a3,title='Choose a start year and end year:',graphics=F,multiple=T)
if(length(d1)==1) d1[2]<-d1[1]
areas<- menu(c("Strata","NAFO"),title='Choose Area based on:')
a4<-vb.region(sp=dd,seas=dd2,strat.nafo=areas,yrs=d1)
d2<-select.list(a4,title='Select one or multiple areas:', multiple=T,graphics=F)
cat("Enter your starting values for Linf, K and t0 (separated by a space) :\n ---------------------------------------------------------------\n if left blank starting values will be estimated by Walford lines\n Everhart, et al. 1975. Principles of Fishery Science \n---------------------------------------------------------------\n")
asw<-scan(what = "", nlines = 1, quiet = TRUE)
if(is.na(asw[1])) { sts<-list(hinf=0,K=0,t0=0) }
else {sts<-list(hinf=asw[1],K=asw[2],t0=asw[3])}
meth<-c('Nonlinear least squares regression','Maxmium liklihood estimation')
cat('Do you want to use means at age for VB curve? (y/n)\n')
ask1<-scan(what = "", nlines = 1, quiet = TRUE)
a91<-ifelse(ask1=='y',T,F)
if(ask1=='y') {
cat('\n Do you want to weight the means by the length specific variances\n as per Kimura (1970) method-c? (y/n)\n')
ask2<-scan(what = "", nlines = 1, quiet = TRUE)
a92<-ifelse(ask2=='y',T,F)
}
a6<-menu(meth,title='Choose fitting method')
if(a6==1) {
a44<-LVB(species=dd,area=d2,syear=d1[1],season=dd2,eyear=d1[2],plot=T,add.plot=F,line.col='blue',init.pars=sts, NLS=T,MLE=F, method=c('BFGS','CG','SANN'),
compare=F,species2,area2,syear2,eyear2,control = list(maxiter = 10000, minFactor = 1/2024, tol = 1e-05), cohort=aaa,means=a91,error.kimura.c=a92)
}
if(a6==2) {
metho<-c('BFGS a quasi-Newton method uses function values and gradients to build up a picture of the surface to be optimized','CG conjugate gradients method more fragile than the BFGS method may be successful in much larger probles',
'SANN simulated annealing uses the Metropolis function for the acceptance probability')
a7<-menu(metho,title='Select a fitting method: \n for more details on these methods see ?optim and references therein')
a8<-ifelse(a7==1,'BFGS',ifelse(a7==2,'CG','SANN'))
a44<-LVB(species=dd,area=d2,syear=d1[1],eyear=d1[2],season=dd2,plot=T,add.plot=F,line.col='blue',init.pars=sts, NLS=F,MLE=T, method=a8,
compare=F,species2,area2,syear2,eyear2,control = list(maxiter = 10000, minFactor = 1/2024, tol = 1e-05),cohort=aaa,means=a91)
}
print(a44)
cat('\n Do you want to compare this VB Growth Curve with another? (y/n) \n\n\t from Kimura 1980. U.S.Fish.Bull.77(4):765-776.\nThis method is currently programmed for least squares regression only\n')
comps<-scan(what = "", nlines = 1, quiet = TRUE)
if(comps=='y'){
d<-menu(a1,title='Choose a species:')
dd1<-a1[d]
a21<-vb.season(dd1)
d11<-menu(a2,title='Choose a season:')
dd21<-a2[d11]
cat('Do you want to track a cohort? (y/n) \n')
aaa1<-scan(what = "", nlines = 1, quiet = TRUE)
aaa1<-ifelse(aaa1=='y',TRUE,FALSE)
a31<-vb.year(species=dd1,season=dd21)
d11<-select.list(a31,title='Choose a start year and end year:',graphics=F,multiple=T)
if(length(d11)==1) d11[2]<-d11[1]
areas<- menu(c("Strata","NAFO"),title='Choose Area based on:')
a41<-vb.region(sp=dd1,seas=dd21,strat.nafo=areas,yrs=d11)
d21<-select.list(a41,title='Select one or multiple areas:', multiple=T,graphics=F)
cat("Enter your starting values for Linf, K and t0 (separated by a space) :\n ---------------------------------------------------------------\n if left blank starting values will be estimated by Walford lines\n Everhart, et al. 1975. Principles of Fishery Science \n---------------------------------------------------------------\n")
asw1<-scan(what = "", nlines = 1, quiet = TRUE)
if(is.na(asw1[1])) { sts1<-list(hinf=0,K=0,t0=0) }
else {sts1<-list(hinf=asw1[1],K=asw1[2],t0=asw1[3])}
cat('Do you want to use means at age for VB curve? (y/n)\n')
ask1<-scan(what = "", nlines = 1, quiet = TRUE)
a93<-ifelse(ask1=='y',T,F)
if(ask1=='y') {
cat('\n Do you want to weight the means by the length specific variances\n as per Kimura (1970) method-c? (y/n)\n')
ask2<-scan(what = "", nlines = 1, quiet = TRUE)
a94<-ifelse(ask2=='y',T,F)
}
a45<-LVB(species=dd,area=d2,syear=d1[1],eyear=d1[2],season=dd2,plot=T,add.plot=F,line.col='blue',init.pars=sts, NLS=T,MLE=F, method=c('BFGS','CG','SANN'),cohort=aaa,
compare=T,species2=dd1,area2=d21,season2=dd21,init.pars2=sts1,syear2=d11[1],eyear2=d11[2],control = list(maxiter = 10000, minFactor = 1/2024, tol = 1e-05), cohort2=aaa1,means=a91,
error.kimura.c=a92,means2=a93,error.kimura.c2=a94 )
print(a45)
}
}
if(choose ==9) {
cat("Who eats me??\n")
cat("----------------------------------------------- \n")
grps<-c('Fin Fish','Shrimps','Crabs','Cephalopods','Amphipods','All - caution this is a very long list')
abc<-menu(grps,title='Choose a broad classification:')
aaa3<-grps[abc]
next1<-prey.species(aaa3)
next2 <- select.list(next1,multiple=F,title='Choose a species',graphics=F)
diet.data$predation<-predators(next2)
aa<-c('Predation by Species','Predation by Species and Year','Predation by Species and Area','Predation by Species,Area and Year')
dc<-menu(aa,title='How do you want to see the predation data?')
print(diet.data$predation[[dc]])
cat("Do you want to save this predation data? (y/n)\n")
at1<- scan(what = "", nlines = 1, quiet = TRUE)
if(at1=='yes' || at1=='y') {
save.to.csv(diet.data$predation[[dc]])
}
}
if(choose==10) {
diet.data<<-diet.data
cat("------------------------------------------------------------ \n")
cat("Your object is named 'diet.data'\n\n")
cat("------------------------------------------------------------ \n")
}
if(choose==11) {
a<-getwd()
b<-Sys.time()
b<-format(b,format="%m-%d %H%M",tz='America/Halifax')
f<-paste("Diet analysis ", b,".txt",sep="")
capture.output(print(diet.data),file=f)
cat("\n")
cat('Your file is in:\n')
cat(paste(a,"/",f,sep=""))
cat("\n")
cat("------------------------------- \n")
cat("\n")
}
if (choose == 12) {
cat("Enjoy your diet data...... \n")
odbcCloseAll()
gc(reset=T)
diet.data$EXIT = TRUE
}
}
}
|
intervalPlot=function(listaIntervalos,horario,listaDeVariablesElegidas,paleta){
lasVariables=listaDeVariablesElegidas %>% unlist(use.names=F)
variable2Color=map2(listaDeVariablesElegidas,names(listaDeVariablesElegidas), ~ paleta[[.y]][1:length(.x)]) %>% unlist(use.names=F)
names(variable2Color)=lasVariables
variable2Altura=1:length(lasVariables); names(variable2Altura)=lasVariables
variable2Familia=names(listaDeVariablesElegidas %>% unlist()) %>% str_replace("[0-9]$","")
names(variable2Familia)=listaDeVariablesElegidas %>% unlist()
Familia2Orden=seq_along(listaDeVariablesElegidas)
names(Familia2Orden)=names(listaDeVariablesElegidas)
ordenLocalVariables=listaDeVariablesElegidas %>% map( function(x) {resultado=seq_along(x);names(resultado)=x;resultado})
names(ordenLocalVariables)=NULL
variable2Orden=ordenLocalVariables %>% unlist()
dfInt=listaIntervalos %>% map2(names(listaIntervalos),function(x,y)x %>%
mutate(content=y,
Familia=variable2Familia[content],
variable=variable2Orden[content],
#altura=Familia2Orden[Familia]*5+variable2Orden[content],
altura=variable2Altura[content],
medicion=sprintf("%02d-%s",as.integer(altura),content)
)
) %>%
reduce(rbind) %>%
filter(!is.na(Familia))
miPaleta=dfInt %>% group_by(Familia,medicion) %>% summarise(variable=first(variable)) %>% arrange(medicion) %>%
mutate(elColor=map2_chr(Familia,variable,function(x,y)paleta[[x]][y])) %>% .[["elColor"]]
zona=tz(dfInt$from[1])
primero=as_date(min(dfInt$from))
ultimo=as_date(max(dfInt$to))
dias=primero+(0:(1+ultimo-primero))
dfIntDias=intervalIntersectv2(dfInt,horario) %>% #left_join(dfInt) %>%
transmute(from=fromNew,to=toNew,content=content,Familia=Familia,variable=variable,altura=altura,medicion=medicion,day=day) %>%
mutate(
from_b= from-(day-primero),
to_b= to-(day-primero),
dia= str_c(str_sub(as.character(as_date(day)),3,10),"\n",weekdays(day,abbreviate=TRUE))) %>%
arrange(medicion,from_b)
grafico=ggplot(dfIntDias,aes(x=from_b,y=altura))+
geom_segment(aes(xend=to_b,yend=altura,color=medicion),size=1)+
scale_x_datetime(labels=date_format("%H",tz=zona),date_minor_breaks="30 mins",date_breaks="1 hours",position = "top")+
theme_stata() +
scale_y_continuous(breaks=NULL)+
xlab("Hora")+
scale_color_manual(values=miPaleta)+
theme(
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
axis.text.x=element_text(size=8),
panel.grid.major.x=element_line(colour = 'lightblue'),
axis.title.y=element_blank(),
legend.position="top",
legend.title = element_blank(),
strip.text.y = element_text(size = 7) )
if (sum(!is.na(dfIntDias$dia))>0) grafico=grafico+facet_wrap(~dia,ncol = 1,strip.position="left")
grafico
}
| /R/intervalPlot.R | no_license | fjbaron/accelerator | R | false | false | 3,092 | r | intervalPlot=function(listaIntervalos,horario,listaDeVariablesElegidas,paleta){
lasVariables=listaDeVariablesElegidas %>% unlist(use.names=F)
variable2Color=map2(listaDeVariablesElegidas,names(listaDeVariablesElegidas), ~ paleta[[.y]][1:length(.x)]) %>% unlist(use.names=F)
names(variable2Color)=lasVariables
variable2Altura=1:length(lasVariables); names(variable2Altura)=lasVariables
variable2Familia=names(listaDeVariablesElegidas %>% unlist()) %>% str_replace("[0-9]$","")
names(variable2Familia)=listaDeVariablesElegidas %>% unlist()
Familia2Orden=seq_along(listaDeVariablesElegidas)
names(Familia2Orden)=names(listaDeVariablesElegidas)
ordenLocalVariables=listaDeVariablesElegidas %>% map( function(x) {resultado=seq_along(x);names(resultado)=x;resultado})
names(ordenLocalVariables)=NULL
variable2Orden=ordenLocalVariables %>% unlist()
dfInt=listaIntervalos %>% map2(names(listaIntervalos),function(x,y)x %>%
mutate(content=y,
Familia=variable2Familia[content],
variable=variable2Orden[content],
#altura=Familia2Orden[Familia]*5+variable2Orden[content],
altura=variable2Altura[content],
medicion=sprintf("%02d-%s",as.integer(altura),content)
)
) %>%
reduce(rbind) %>%
filter(!is.na(Familia))
miPaleta=dfInt %>% group_by(Familia,medicion) %>% summarise(variable=first(variable)) %>% arrange(medicion) %>%
mutate(elColor=map2_chr(Familia,variable,function(x,y)paleta[[x]][y])) %>% .[["elColor"]]
zona=tz(dfInt$from[1])
primero=as_date(min(dfInt$from))
ultimo=as_date(max(dfInt$to))
dias=primero+(0:(1+ultimo-primero))
dfIntDias=intervalIntersectv2(dfInt,horario) %>% #left_join(dfInt) %>%
transmute(from=fromNew,to=toNew,content=content,Familia=Familia,variable=variable,altura=altura,medicion=medicion,day=day) %>%
mutate(
from_b= from-(day-primero),
to_b= to-(day-primero),
dia= str_c(str_sub(as.character(as_date(day)),3,10),"\n",weekdays(day,abbreviate=TRUE))) %>%
arrange(medicion,from_b)
grafico=ggplot(dfIntDias,aes(x=from_b,y=altura))+
geom_segment(aes(xend=to_b,yend=altura,color=medicion),size=1)+
scale_x_datetime(labels=date_format("%H",tz=zona),date_minor_breaks="30 mins",date_breaks="1 hours",position = "top")+
theme_stata() +
scale_y_continuous(breaks=NULL)+
xlab("Hora")+
scale_color_manual(values=miPaleta)+
theme(
axis.text.y=element_blank(),
axis.ticks.y=element_blank(),
axis.text.x=element_text(size=8),
panel.grid.major.x=element_line(colour = 'lightblue'),
axis.title.y=element_blank(),
legend.position="top",
legend.title = element_blank(),
strip.text.y = element_text(size = 7) )
if (sum(!is.na(dfIntDias$dia))>0) grafico=grafico+facet_wrap(~dia,ncol = 1,strip.position="left")
grafico
}
|
library(memisc)
### Name: withSE
### Title: Add Alternative Variance Estimates to Models Estimates
### Aliases: withSE withVCov withVCov.lm vcov.withVCov summary.withVCov
### summary.withVCov.lm
### ** Examples
## Generate poisson regression relationship
x <- sin(1:100)
y <- rpois(100, exp(1 + x))
## compute usual covariance matrix of coefficient estimates
fm <- glm(y ~ x, family = poisson)
library(sandwich)
fmo <- withVCov(fm,vcovOPG)
vcov(fm)
vcov(fmo)
summary(fm)
summary(fmo)
mtable(Default=fm,
OPG=withSE(fm,"OPG"),
summary.stats=c("Deviance","N")
)
vo <- vcovOPG(fm)
mtable(Default=fm,
OPG=withSE(fm,vo),
summary.stats=c("Deviance","N")
)
| /data/genthat_extracted_code/memisc/examples/withVCov.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 705 | r | library(memisc)
### Name: withSE
### Title: Add Alternative Variance Estimates to Models Estimates
### Aliases: withSE withVCov withVCov.lm vcov.withVCov summary.withVCov
### summary.withVCov.lm
### ** Examples
## Generate poisson regression relationship
x <- sin(1:100)
y <- rpois(100, exp(1 + x))
## compute usual covariance matrix of coefficient estimates
fm <- glm(y ~ x, family = poisson)
library(sandwich)
fmo <- withVCov(fm,vcovOPG)
vcov(fm)
vcov(fmo)
summary(fm)
summary(fmo)
mtable(Default=fm,
OPG=withSE(fm,"OPG"),
summary.stats=c("Deviance","N")
)
vo <- vcovOPG(fm)
mtable(Default=fm,
OPG=withSE(fm,vo),
summary.stats=c("Deviance","N")
)
|
library('dplyr')
w1<-read.table("household_power_consumption.txt", sep= ';', header = T)
m1<-subset(w1,grepl('^1/2/2007',w1$Date))
m2<-subset(w1,grepl('^2/2/2007',w1$Date))
total <- rbind(m1, m2)
hist(as.numeric(as.character(total$Global_active_power)), col = "red", main = "Global Active Power", xlab = "Global Active Power (Kilowatts)" )
dev.copy(png, file = "plot1.png", width=480,height=480,units="px")
dev.off()
| /plot1.R | no_license | ikagiampis/GitHub | R | false | false | 422 | r | library('dplyr')
w1<-read.table("household_power_consumption.txt", sep= ';', header = T)
m1<-subset(w1,grepl('^1/2/2007',w1$Date))
m2<-subset(w1,grepl('^2/2/2007',w1$Date))
total <- rbind(m1, m2)
hist(as.numeric(as.character(total$Global_active_power)), col = "red", main = "Global Active Power", xlab = "Global Active Power (Kilowatts)" )
dev.copy(png, file = "plot1.png", width=480,height=480,units="px")
dev.off()
|
library(cluster)
library(ape)
library(doParallel)
dat_raw <- as.matrix(read.table('topo_dist_mat.txt', head = T, as.is = T))
dat_raw[upper.tri(dat_raw, diag = T)] <- 0
dat_raw[1:ncol(dat_raw), 1:nrow(dat_raw)] <- as.numeric(dat_raw)
dat_dist <- as.dist(dat_raw)
mds_dat <- cmdscale(dat_dist, k = 2)
rm(list = c('dat_raw', 'dat_dist'))
write.table(mds_dat, file = 'mds_topo.txt', row.names = T)
### functions
clus_fun <- function(x, kmax) sapply(2:(kmax), function(i) clara(x, k = i)$silinfo$avg.width )
clus_fun_par <- function(x, kmax) foreach(i = 2:(kmax), .combine = cbind) %dopar% cluster::clara(x, k = i)$silinfo$avg.width
get_boot_rep <- function(x){
boot_mat <- cbind(runif(nrow(x), min(x[, 1]), max(x[, 1])), runif(nrow(x), min(x[, 2]), max(x[, 2])))
}
###
if(T){
cl <- makeCluster(10)
registerDoParallel(cl)
true_dat_clus <- clus_fun_par(mds_dat, 50)
stopCluster(cl)
boot_dat_clus <- list()
cl <- makeCluster(10)
registerDoParallel(cl)
for(i in 1:50){
print(paste('boot_rep' , i))
boot_dat_temp <- get_boot_rep(mds_dat)
boot_dat_clus[[i]] <- clus_fun_par(boot_dat_temp, 50)
}
stopCluster(cl)
}
pdf('topo_clusters.pdf')
par(mfrow = c(2, 1))
plot(2:50, as.numeric(true_dat_clus), type = 'l', lwd = 2, col = 'red', ylim = c(0.3, 0.6), ylab = 'Average silhouette width', xlab = 'Number of topology clusters')
for(i in 1:length(boot_dat_clus)){
points(jitter(2:50), as.numeric(boot_dat_clus[[i]]), pch = 20, col = 'blue')
}
clu1 <- pam(mds_dat, k = 10)
plot(mds_dat, pch = 20, col = rainbow(10)[clu1$clustering], xlab = 'MDS 1', ylab = 'MDS 2')
dev.off()
write.table(t(t(clu1$clustering)), file = 'clust_k_10.txt', row.names = T)
write.table(clu1$clusinfo, file = 'clust_info_k_10.txt', row.names = F)
| /get_mds_topo.R | no_license | sebastianduchene/tree_length | R | false | false | 1,760 | r | library(cluster)
library(ape)
library(doParallel)
dat_raw <- as.matrix(read.table('topo_dist_mat.txt', head = T, as.is = T))
dat_raw[upper.tri(dat_raw, diag = T)] <- 0
dat_raw[1:ncol(dat_raw), 1:nrow(dat_raw)] <- as.numeric(dat_raw)
dat_dist <- as.dist(dat_raw)
mds_dat <- cmdscale(dat_dist, k = 2)
rm(list = c('dat_raw', 'dat_dist'))
write.table(mds_dat, file = 'mds_topo.txt', row.names = T)
### functions
clus_fun <- function(x, kmax) sapply(2:(kmax), function(i) clara(x, k = i)$silinfo$avg.width )
clus_fun_par <- function(x, kmax) foreach(i = 2:(kmax), .combine = cbind) %dopar% cluster::clara(x, k = i)$silinfo$avg.width
get_boot_rep <- function(x){
boot_mat <- cbind(runif(nrow(x), min(x[, 1]), max(x[, 1])), runif(nrow(x), min(x[, 2]), max(x[, 2])))
}
###
if(T){
cl <- makeCluster(10)
registerDoParallel(cl)
true_dat_clus <- clus_fun_par(mds_dat, 50)
stopCluster(cl)
boot_dat_clus <- list()
cl <- makeCluster(10)
registerDoParallel(cl)
for(i in 1:50){
print(paste('boot_rep' , i))
boot_dat_temp <- get_boot_rep(mds_dat)
boot_dat_clus[[i]] <- clus_fun_par(boot_dat_temp, 50)
}
stopCluster(cl)
}
pdf('topo_clusters.pdf')
par(mfrow = c(2, 1))
plot(2:50, as.numeric(true_dat_clus), type = 'l', lwd = 2, col = 'red', ylim = c(0.3, 0.6), ylab = 'Average silhouette width', xlab = 'Number of topology clusters')
for(i in 1:length(boot_dat_clus)){
points(jitter(2:50), as.numeric(boot_dat_clus[[i]]), pch = 20, col = 'blue')
}
clu1 <- pam(mds_dat, k = 10)
plot(mds_dat, pch = 20, col = rainbow(10)[clu1$clustering], xlab = 'MDS 1', ylab = 'MDS 2')
dev.off()
write.table(t(t(clu1$clustering)), file = 'clust_k_10.txt', row.names = T)
write.table(clu1$clusinfo, file = 'clust_info_k_10.txt', row.names = F)
|
#' @title Identify significant genes through SAM
#' @description Implement SAM and compute significant genes given delta. Output
#' will consist of all significant genes ordered by increasing q-value and
#' decreasing d-score.
#' @usage SigGenesSAM(background.subtraction.obj, class.compare.cols,
#' class.compare.name, fdr.cutoff=0.1, response)
#' @param background.subtraction.obj Object returned from call to
#' BackgroundSubtraction
#' @param class.compare.cols Vector of column indices indicating which subset
#' of arrays are to be compared for this comparison
#' @param class.compare.name String title given to the name of the comparison
#' @param fdr.cutoff Max FDR for SAM, will use delta value which results in max
#' FDR below this cutoff
#' @param response For two class unpaired: vector of 1, 2 values that indicate
#' group membership. For two class paired: vector of -1, 1, -2, 2, etc.
#' values that indicate pairings.
#' @return A list with components
#' \item{siggenes.table}{Combined data frame of genes having significant
#' positive and negative correlation}
#' \item{data.col}{Vector of column indices containing array data}
#' \item{ntext}{Number of leading text columns}
#' \item{response}{Vector of array group membership, 1=control, 2=experimental}
#' \item{pipeline.name}{Name of pipeline generated from input file name sans
#' extension}
#' \item{data}{Data frame of chosen normalization method data}
#' \item{class.compare.cols}{Value entered through class.compare.cols parameter}
#' \item{class.compare.name}{Value entered through class.compare.name parameter}
#' \item{symbol.index}{Column index that contains gene symbol}
#' @export
SigGenesSAM <- function(background.subtraction.obj, class.compare.cols,
class.compare.name, fdr.cutoff=0.1, response, delta) {
if ((missing(class.compare.cols) & !missing(class.compare.name)) |
(missing(class.compare.name) & !missing(class.compare.cols))) {
stop("Cannot have class.compare.cols set without class.compare.name
and vice-versa")
}
if (missing(class.compare.cols)) {
data.SAM <-
background.subtraction.obj$normalized[, background.subtraction.obj$data.col]
}
else {
data.SAM <- background.subtraction.obj$normalized[, class.compare.cols]
}
log.data.SAM <- log2(data.SAM)
genenames <- as.data.frame(background.subtraction.obj$symbol)
geneid <- as.data.frame(background.subtraction.obj$id)
symbol.index <- background.subtraction.obj$symbol.index
geneid.index <- background.subtraction.obj$id.index
pipeline.name <- background.subtraction.obj$pipeline.name
if(length(response) != ncol(data.SAM)) {
stop("Number of responses does not match number of samples.")
}
list.SAM = list(x=log.data.SAM, y=response, genenames=genenames,
geneid=geneid, logged2=T)
cat("Beginning SAM processing\n")
if (sum(response < 0) == 0) {
capture.output(samr.obj <- samr::samr(list.SAM,
resp.type="Two class unpaired",
s0.perc=50, testStatistic="standard",
nperms=200))
} else {
capture.output(samr.obj <- samr::samr(list.SAM,
resp.type="Two class paired",
s0.perc=50, testStatistic="standard",
nperms=200))
}
if(missing(delta)) {
cat("Calculating delta table\n")
capture.output(delta.table <- samr::samr.compute.delta.table(samr.obj,
nvals=1000))
delta <- delta.table[which(delta.table[, 5] <= fdr.cutoff)[1], 1]
while (is.na(delta)) {
fdr.cutoff <- fdr.cutoff + 0.05
if (fdr.cutoff == 1.00) {
stop("Have reached cutoff of 1.00 and no delta found.")
}
cat("Cutoff is too stringent, no delta available. Increasing FDR cutoff to ",
fdr.cutoff, "\n")
delta <- delta.table[which(delta.table[, 5] <= fdr.cutoff)[1], 1]
}
}
desc.data.SAM <- data.frame(background.subtraction.obj$desc.stats, data.SAM)
# samr.compute.siggenes.table flips genename and geneid
symbol.id.columns <- c(symbol.index, geneid.index)
colnames(desc.data.SAM)[symbol.id.columns] <- c("geneid", "genenames")
siggenes.table <- samr::samr.compute.siggenes.table(samr.obj, delta,
desc.data.SAM,
delta.table,
compute.localfdr=T)
cat("\nSAM Results:\nOptimal delta:", delta,
"\nNo. of significantly down regulated genes:",
siggenes.table$ngenes.lo, "\nNo. of significantly up regulated genes:",
siggenes.table$ngenes.up, "\n\n")
if (sum(nrow(siggenes.table$genes.up), nrow(siggenes.table$genes.lo)) < 1) {
stop("No significant genes at provided delta")
}
if (!missing(class.compare.name)) {
siggenes.file=paste0(pipeline.name, "_pipeline/",
pipeline.name, "_siggenes_",
class.compare.name, ".csv")
}
else {
siggenes.file=paste0(pipeline.name, "_pipeline/",
pipeline.name, "_siggenes.csv")
}
all.siggenes <- as.matrix(rbind(siggenes.table$genes.up, siggenes.table$genes.lo))
ordered.all.siggenes <- all.siggenes[order(-(as.numeric(all.siggenes[, 8])),
abs(as.numeric(all.siggenes[, 4])),
decreasing=T), ]
write.siggenes <- as.data.frame(ordered.all.siggenes)
write.siggenes$optimalDelta <- as.character(
c(delta, rep(" ", nrow(ordered.all.siggenes)-1)))
write.siggenes$fdr.cutoff <- as.character(
c(fdr.cutoff, rep(" ", nrow(ordered.all.siggenes)-1)))
write.csv(write.siggenes[,-1], file=siggenes.file, row.names=F)
cat("Significant gene list available at ./", siggenes.file, "\n", sep="")
colnames(ordered.all.siggenes) <- make.names(colnames(all.siggenes))
if (missing(class.compare.cols)) {
sam.return.list <- list(siggenes.table=ordered.all.siggenes,
normalized=desc.data.SAM,
ntext=background.subtraction.obj$ntext,
pipeline.name=pipeline.name,
response=response,
data.col=background.subtraction.obj$data.col,
id.index=geneid.index,
symbol.index=symbol.index,
fdr.cutoff=fdr.cutoff)
save(background.subtraction.obj, sam.return.list,
file=paste0(pipeline.name, "_pipeline/", "PIMENTo-", pipeline.name, "_",
format(Sys.time(), "%Y-%m-%d_%H%M%S"), ".RData"))
}
else {
ntext <- background.subtraction.obj$ntext
subsetclass.compare.cols <- c((ntext+1):(ntext+length(class.compare.cols)))
sam.return.list <- list(siggenes.table=ordered.all.siggenes,
normalized=desc.data.SAM,
ntext=background.subtraction.obj$ntext,
pipeline.name=pipeline.name,
response=response,
data.col=background.subtraction.obj$data.col,
class.compare.cols=subsetclass.compare.cols,
class.compare.name=class.compare.name,
id.index=geneid.index,
symbol.index=symbol.index,
fdr.cutoff=fdr.cutoff)
save(background.subtraction.obj, sam.return.list,
file=paste0(pipeline.name, "_pipeline/", "PIMENTo-", pipeline.name,
"_", class.compare.name, "_",
format(Sys.time(), "%Y-%m-%d_%H%M%S"), ".RData"))
}
SampleSimilarity(sam.return.list)
return(sam.return.list)
}
| /R/sig_genes_SAM.R | no_license | MUSC-CGM/PIMENTo | R | false | false | 8,075 | r | #' @title Identify significant genes through SAM
#' @description Implement SAM and compute significant genes given delta. Output
#' will consist of all significant genes ordered by increasing q-value and
#' decreasing d-score.
#' @usage SigGenesSAM(background.subtraction.obj, class.compare.cols,
#' class.compare.name, fdr.cutoff=0.1, response)
#' @param background.subtraction.obj Object returned from call to
#' BackgroundSubtraction
#' @param class.compare.cols Vector of column indices indicating which subset
#' of arrays are to be compared for this comparison
#' @param class.compare.name String title given to the name of the comparison
#' @param fdr.cutoff Max FDR for SAM, will use delta value which results in max
#' FDR below this cutoff
#' @param response For two class unpaired: vector of 1, 2 values that indicate
#' group membership. For two class paired: vector of -1, 1, -2, 2, etc.
#' values that indicate pairings.
#' @return A list with components
#' \item{siggenes.table}{Combined data frame of genes having significant
#' positive and negative correlation}
#' \item{data.col}{Vector of column indices containing array data}
#' \item{ntext}{Number of leading text columns}
#' \item{response}{Vector of array group membership, 1=control, 2=experimental}
#' \item{pipeline.name}{Name of pipeline generated from input file name sans
#' extension}
#' \item{data}{Data frame of chosen normalization method data}
#' \item{class.compare.cols}{Value entered through class.compare.cols parameter}
#' \item{class.compare.name}{Value entered through class.compare.name parameter}
#' \item{symbol.index}{Column index that contains gene symbol}
#' @export
SigGenesSAM <- function(background.subtraction.obj, class.compare.cols,
class.compare.name, fdr.cutoff=0.1, response, delta) {
if ((missing(class.compare.cols) & !missing(class.compare.name)) |
(missing(class.compare.name) & !missing(class.compare.cols))) {
stop("Cannot have class.compare.cols set without class.compare.name
and vice-versa")
}
if (missing(class.compare.cols)) {
data.SAM <-
background.subtraction.obj$normalized[, background.subtraction.obj$data.col]
}
else {
data.SAM <- background.subtraction.obj$normalized[, class.compare.cols]
}
log.data.SAM <- log2(data.SAM)
genenames <- as.data.frame(background.subtraction.obj$symbol)
geneid <- as.data.frame(background.subtraction.obj$id)
symbol.index <- background.subtraction.obj$symbol.index
geneid.index <- background.subtraction.obj$id.index
pipeline.name <- background.subtraction.obj$pipeline.name
if(length(response) != ncol(data.SAM)) {
stop("Number of responses does not match number of samples.")
}
list.SAM = list(x=log.data.SAM, y=response, genenames=genenames,
geneid=geneid, logged2=T)
cat("Beginning SAM processing\n")
if (sum(response < 0) == 0) {
capture.output(samr.obj <- samr::samr(list.SAM,
resp.type="Two class unpaired",
s0.perc=50, testStatistic="standard",
nperms=200))
} else {
capture.output(samr.obj <- samr::samr(list.SAM,
resp.type="Two class paired",
s0.perc=50, testStatistic="standard",
nperms=200))
}
if(missing(delta)) {
cat("Calculating delta table\n")
capture.output(delta.table <- samr::samr.compute.delta.table(samr.obj,
nvals=1000))
delta <- delta.table[which(delta.table[, 5] <= fdr.cutoff)[1], 1]
while (is.na(delta)) {
fdr.cutoff <- fdr.cutoff + 0.05
if (fdr.cutoff == 1.00) {
stop("Have reached cutoff of 1.00 and no delta found.")
}
cat("Cutoff is too stringent, no delta available. Increasing FDR cutoff to ",
fdr.cutoff, "\n")
delta <- delta.table[which(delta.table[, 5] <= fdr.cutoff)[1], 1]
}
}
desc.data.SAM <- data.frame(background.subtraction.obj$desc.stats, data.SAM)
# samr.compute.siggenes.table flips genename and geneid
symbol.id.columns <- c(symbol.index, geneid.index)
colnames(desc.data.SAM)[symbol.id.columns] <- c("geneid", "genenames")
siggenes.table <- samr::samr.compute.siggenes.table(samr.obj, delta,
desc.data.SAM,
delta.table,
compute.localfdr=T)
cat("\nSAM Results:\nOptimal delta:", delta,
"\nNo. of significantly down regulated genes:",
siggenes.table$ngenes.lo, "\nNo. of significantly up regulated genes:",
siggenes.table$ngenes.up, "\n\n")
if (sum(nrow(siggenes.table$genes.up), nrow(siggenes.table$genes.lo)) < 1) {
stop("No significant genes at provided delta")
}
if (!missing(class.compare.name)) {
siggenes.file=paste0(pipeline.name, "_pipeline/",
pipeline.name, "_siggenes_",
class.compare.name, ".csv")
}
else {
siggenes.file=paste0(pipeline.name, "_pipeline/",
pipeline.name, "_siggenes.csv")
}
all.siggenes <- as.matrix(rbind(siggenes.table$genes.up, siggenes.table$genes.lo))
ordered.all.siggenes <- all.siggenes[order(-(as.numeric(all.siggenes[, 8])),
abs(as.numeric(all.siggenes[, 4])),
decreasing=T), ]
write.siggenes <- as.data.frame(ordered.all.siggenes)
write.siggenes$optimalDelta <- as.character(
c(delta, rep(" ", nrow(ordered.all.siggenes)-1)))
write.siggenes$fdr.cutoff <- as.character(
c(fdr.cutoff, rep(" ", nrow(ordered.all.siggenes)-1)))
write.csv(write.siggenes[,-1], file=siggenes.file, row.names=F)
cat("Significant gene list available at ./", siggenes.file, "\n", sep="")
colnames(ordered.all.siggenes) <- make.names(colnames(all.siggenes))
if (missing(class.compare.cols)) {
sam.return.list <- list(siggenes.table=ordered.all.siggenes,
normalized=desc.data.SAM,
ntext=background.subtraction.obj$ntext,
pipeline.name=pipeline.name,
response=response,
data.col=background.subtraction.obj$data.col,
id.index=geneid.index,
symbol.index=symbol.index,
fdr.cutoff=fdr.cutoff)
save(background.subtraction.obj, sam.return.list,
file=paste0(pipeline.name, "_pipeline/", "PIMENTo-", pipeline.name, "_",
format(Sys.time(), "%Y-%m-%d_%H%M%S"), ".RData"))
}
else {
ntext <- background.subtraction.obj$ntext
subsetclass.compare.cols <- c((ntext+1):(ntext+length(class.compare.cols)))
sam.return.list <- list(siggenes.table=ordered.all.siggenes,
normalized=desc.data.SAM,
ntext=background.subtraction.obj$ntext,
pipeline.name=pipeline.name,
response=response,
data.col=background.subtraction.obj$data.col,
class.compare.cols=subsetclass.compare.cols,
class.compare.name=class.compare.name,
id.index=geneid.index,
symbol.index=symbol.index,
fdr.cutoff=fdr.cutoff)
save(background.subtraction.obj, sam.return.list,
file=paste0(pipeline.name, "_pipeline/", "PIMENTo-", pipeline.name,
"_", class.compare.name, "_",
format(Sys.time(), "%Y-%m-%d_%H%M%S"), ".RData"))
}
SampleSimilarity(sam.return.list)
return(sam.return.list)
}
|
## Compute the residuals
library(dplyr)
inFrame <- mutate(inFrame, resids = predicted - cnt)
#####
# Example of group_by to calculate Quantile from Azure ML tutorial
if(Azure){
## Read in the dataset.
BikeShare <- maml.mapInputPort(1)
BikeShare$dteday <- as.POSIXct(as.integer(BikeShare$dteday),
origin = "1970-01-01")
}
## Build a dataframe with the quantile by month and
## hour. Parameter Quantile determines the trim point.
Quantile <- 0.10
require(dplyr)
quantByPer <- (
BikeShare %>%
group_by(workTime, monthCount) %>%
summarise(Quant = quantile(cnt,
probs = Quantile,
na.rm = TRUE))
)
## Join the quantile informaiton with the
## matching rows of the data frame. This is
## join uses the names with common columns
## as the keys.
BikeShare2 <- inner_join(BikeShare, quantByPer)
## Filter for the rows we want and remove the
## no longer needed column.
BikeShare2 <- BikeShare2 %>%
filter(cnt > Quant)
BikeShare2[, "Quant"] <- NULL
##### | /stats-qr.R | no_license | alathrop/R-Quick-Reference | R | false | false | 1,076 | r | ## Compute the residuals
library(dplyr)
inFrame <- mutate(inFrame, resids = predicted - cnt)
#####
# Example of group_by to calculate Quantile from Azure ML tutorial
if(Azure){
## Read in the dataset.
BikeShare <- maml.mapInputPort(1)
BikeShare$dteday <- as.POSIXct(as.integer(BikeShare$dteday),
origin = "1970-01-01")
}
## Build a dataframe with the quantile by month and
## hour. Parameter Quantile determines the trim point.
Quantile <- 0.10
require(dplyr)
quantByPer <- (
BikeShare %>%
group_by(workTime, monthCount) %>%
summarise(Quant = quantile(cnt,
probs = Quantile,
na.rm = TRUE))
)
## Join the quantile informaiton with the
## matching rows of the data frame. This is
## join uses the names with common columns
## as the keys.
BikeShare2 <- inner_join(BikeShare, quantByPer)
## Filter for the rows we want and remove the
## no longer needed column.
BikeShare2 <- BikeShare2 %>%
filter(cnt > Quant)
BikeShare2[, "Quant"] <- NULL
##### |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/supportfunctions.R
\name{extractparam}
\alias{extractparam}
\title{Extract parameters for posterior simulations}
\usage{
extractparam(model, x)
}
\arguments{
\item{model}{Object of class GCMM containing output from GCMM function}
\item{x}{Name of parameter to be extracted}
}
\value{
Returns posterior samples of the parameter
}
\description{
Support function that extracts parameter estimates for creating posterior simulations of activity curves
}
| /man/extractparam.Rd | no_license | cran/activityGCMM | R | false | true | 529 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/supportfunctions.R
\name{extractparam}
\alias{extractparam}
\title{Extract parameters for posterior simulations}
\usage{
extractparam(model, x)
}
\arguments{
\item{model}{Object of class GCMM containing output from GCMM function}
\item{x}{Name of parameter to be extracted}
}
\value{
Returns posterior samples of the parameter
}
\description{
Support function that extracts parameter estimates for creating posterior simulations of activity curves
}
|
#!/usr/bin/env Rscript
library(ggplot2)
library(plotly)
library(dplyr)
library(maps)
library(ggrepel)
library(scatterpie)
data = read.csv("../covid_06042020_choice_values.csv", header=T, stringsAsFactors=F)
data = data[3:nrow(data),]
world_map <- map_data("world")
world_map$country = world_map$region
world_map[world_map$region == "Grenadines","country"] <- "Saint Vincent and the Grenadines"
world_map[world_map$region == "Saint Vincent","country"] <- "Saint Vincent and the Grenadines"
world_map[world_map$region == "Antigua","country"] <- "Antigua and Barbuda"
world_map[world_map$region == "Barbuda","country"] <- "Antigua and Barbuda"
world_map[world_map$region == "Aruba","country"] <- "Netherlands"
world_map[world_map$region == "Curacao","country"] <- "Netherlands"
world_map[world_map$region == "Bonaire","country"] <- "Netherlands"
world_map[world_map$region == "Sint Eustatius","country"] <- "Netherlands"
world_map[world_map$region == "Saba","country"] <- "Netherlands"
world_map[world_map$region == "Sint Maarten","country"] <- "Netherlands"
world_map[world_map$region == "Anguilla","country"] <- "UK"
world_map[world_map$region == "Bermuda","country"] <- "UK"
world_map[world_map$region == "Falkland Islands","country"] <- "UK"
world_map[world_map$region == "Chagos Archipelago","country"] <- "UK"
world_map[world_map$region == "Pitcairn Islands","country"] <- "UK"
world_map[world_map$region == "South Sandwich Islands","country"] <- "UK"
world_map[world_map$region == "Saint Helena","country"] <- "UK"
world_map[world_map$region == "Ascension Island","country"] <- "UK"
world_map[world_map$region == "Turks and Caicos Islands","country"] <- "UK"
world_map[world_map$region == "French Southern and Antarctic Lands","country"] <- "France"
world_map[world_map$region == "Saint Barthelemy","country"] <- "France"
world_map[world_map$region == "Reunion","country"] <- "France"
world_map[world_map$region == "Mayotte","country"] <- "France"
world_map[world_map$region == "French Guiana","country"] <- "France"
world_map[world_map$region == "Martinique","country"] <- "France"
world_map[world_map$region == "Guadeloupe","country"] <- "France"
world_map[world_map$region == "Saint Martin","country"] <- "France"
world_map[world_map$region == "New Caledonia","country"] <- "France"
world_map[world_map$region == "French Polynesia","country"] <- "France"
world_map[world_map$region == "Saint Pierre and Miquelon","country"] <- "France"
world_map[world_map$region == "Wallis and Futuna","country"] <- "France"
world_map[world_map$region == "Canary Islands","country"] <- "Spain"
world_map[world_map$region == "Montserrat","country"] <- "Spain"
world_map[world_map$region == "Azores","country"] <- "Portugal"
world_map[world_map$region == "Guam","country"] <- "USA"
world_map[world_map$region == "Puerto Rico","country"] <- "USA"
world_map[world_map$region == "Heard Island","country"] <- "Australia"
world_map[world_map$region == "Cocos Islands","country"] <- "Australia"
world_map[world_map$region == "Christmas Island","country"] <- "Australia"
world_map[world_map$region == "Norfolk Island","country"] <- "Australia"
world_map[world_map$region == "Siachen Glacier","country"] <- "India"
world_map[world_map$region == "Trinidad","country"] <- "Trinidad and Tobago"
world_map[world_map$region == "Tobago","country"] <- "Trinidad and Tobago"
data$Country <- replace(data$Country,
data$Country %in% c("- other","Cabo Verde","Congo, Democratic Republic of the","Congo, Republic of the","Côte d’Ivoire","East Timor (Timor-Leste)","Korea, North","Korea, South","Micronesia, Federated States of","North Macedonia","Sudan, South","The Bahamas","United Kingdom","United States"),
c(NA,"Cape Verde","Democratic Republic of the Congo","Republic of Congo","Ivory Coast","Timor-Leste","North Korea","South Korea","Micronesia","Macedonia","South Sudan","Bahamas","UK","USA"))
nb_rep_by_country <- table(data$Country)
nb_rep_by_country <- nb_rep_by_country[names(nb_rep_by_country) != ""]
world_map$nb_answers <- 0
world_map$nb_answers <- as.vector(nb_rep_by_country[match(world_map$country,names(nb_rep_by_country))])
world_map <- world_map %>%
mutate(country_text = paste0(
"Country: ", country, "\n",
"Region: ", region, "\n",
"# of answers: ", nb_answers))
world_cities <- data(world.cities)
enough_answers_countries <- unique(world_map[world_map$nb_answers>=50,"country"])
coord_capital <- world.cities[world.cities$capital==1 & world.cities$country.etc %in% enough_answers_countries,]
for(a in unique(data[,"Dem_gender"])){
if(a!=""){
coord_capital[, a] <- 0
}
}
for(country in enough_answers_countries){
answers = table(data[data$Country == country,"Dem_gender"])
for(a in names(answers)){
if(a!=""){
coord_capital[coord_capital$country.etc == country, a] <- answers[a]
}
}
}
coord_capital$nb_answers <- as.vector(nb_rep_by_country[match(coord_capital$country.etc,names(nb_rep_by_country))])
scale=5
p <- ggplot() +
geom_polygon(data=world_map,aes( x = long, y = lat, group = group, text = country_text),fill="grey90", color="grey60", size = 0.1)+
geom_point(data=coord_capital,aes( x = long, y = lat),size=0.1)+
geom_scatterpie(data=coord_capital,
aes(x=long, y=lat, r=sqrt(nb_answers)/(pi*scale)), cols=setdiff(unique(data[,"Dem_gender"]),""),
color=NA,
alpha=0.5,
legend_name="Gender")+
geom_text_repel(data=coord_capital,aes(x=long, y=lat, label=country.etc),size=2)+
geom_scatterpie_legend(sqrt(coord_capital$nb_answers)/(pi*scale), -150,-50, n = 5,labeller = function(x){round((x*pi*scale)^2)})+
#geom_scatterpie_legend(sqrt(coord_capital$nb_answers)/(pi*scale), -10,72, n = 5,labeller = function(x){round((x*pi*scale)^2)})+
#scale_x_continuous(limits=c(-20,50))+
#scale_y_continuous(limits=c(25,75))+
theme_void()
#, labeller = function(x){x*x*31.4}
scale=5
p <- ggplot() +
geom_polygon(data=world_map,aes( x = long, y = lat, group = group, text = country_text),fill="grey90", color="grey60", size = 0.1)+
geom_point(data=coord_capital,aes( x = long, y = lat),size=0.1)+
geom_scatterpie(data=coord_capital,
aes(x=long, y=lat, r=sqrt(nb_answers)/(pi*scale)), cols=setdiff(unique(data[,"Dem_gender"]),""),
color=NA,
alpha=0.5,
legend_name="Gender")+
geom_text_repel(data=coord_capital,aes(x=long, y=lat, label=country.etc),size=2)+
geom_scatterpie_legend(sqrt(coord_capital$nb_answers)/(pi*scale), -150,-50, n = 5,labeller = function(x){round((x*pi*scale)^2)})+
#geom_scatterpie_legend(sqrt(coord_capital$nb_answers)/(pi*scale), -10,72, n = 5,labeller = function(x){round((x*pi*scale)^2)})+
#scale_x_continuous(limits=c(-20,50))+
#scale_y_continuous(limits=c(25,75))+
theme_void()
ggplotly(p, tooltip="text")
| /geoviz/covid19_geoviz_sex_ratio.R | no_license | ggautreau/COVIDiSTRESS_Scripts | R | false | false | 7,134 | r | #!/usr/bin/env Rscript
library(ggplot2)
library(plotly)
library(dplyr)
library(maps)
library(ggrepel)
library(scatterpie)
data = read.csv("../covid_06042020_choice_values.csv", header=T, stringsAsFactors=F)
data = data[3:nrow(data),]
world_map <- map_data("world")
world_map$country = world_map$region
world_map[world_map$region == "Grenadines","country"] <- "Saint Vincent and the Grenadines"
world_map[world_map$region == "Saint Vincent","country"] <- "Saint Vincent and the Grenadines"
world_map[world_map$region == "Antigua","country"] <- "Antigua and Barbuda"
world_map[world_map$region == "Barbuda","country"] <- "Antigua and Barbuda"
world_map[world_map$region == "Aruba","country"] <- "Netherlands"
world_map[world_map$region == "Curacao","country"] <- "Netherlands"
world_map[world_map$region == "Bonaire","country"] <- "Netherlands"
world_map[world_map$region == "Sint Eustatius","country"] <- "Netherlands"
world_map[world_map$region == "Saba","country"] <- "Netherlands"
world_map[world_map$region == "Sint Maarten","country"] <- "Netherlands"
world_map[world_map$region == "Anguilla","country"] <- "UK"
world_map[world_map$region == "Bermuda","country"] <- "UK"
world_map[world_map$region == "Falkland Islands","country"] <- "UK"
world_map[world_map$region == "Chagos Archipelago","country"] <- "UK"
world_map[world_map$region == "Pitcairn Islands","country"] <- "UK"
world_map[world_map$region == "South Sandwich Islands","country"] <- "UK"
world_map[world_map$region == "Saint Helena","country"] <- "UK"
world_map[world_map$region == "Ascension Island","country"] <- "UK"
world_map[world_map$region == "Turks and Caicos Islands","country"] <- "UK"
world_map[world_map$region == "French Southern and Antarctic Lands","country"] <- "France"
world_map[world_map$region == "Saint Barthelemy","country"] <- "France"
world_map[world_map$region == "Reunion","country"] <- "France"
world_map[world_map$region == "Mayotte","country"] <- "France"
world_map[world_map$region == "French Guiana","country"] <- "France"
world_map[world_map$region == "Martinique","country"] <- "France"
world_map[world_map$region == "Guadeloupe","country"] <- "France"
world_map[world_map$region == "Saint Martin","country"] <- "France"
world_map[world_map$region == "New Caledonia","country"] <- "France"
world_map[world_map$region == "French Polynesia","country"] <- "France"
world_map[world_map$region == "Saint Pierre and Miquelon","country"] <- "France"
world_map[world_map$region == "Wallis and Futuna","country"] <- "France"
world_map[world_map$region == "Canary Islands","country"] <- "Spain"
world_map[world_map$region == "Montserrat","country"] <- "Spain"
world_map[world_map$region == "Azores","country"] <- "Portugal"
world_map[world_map$region == "Guam","country"] <- "USA"
world_map[world_map$region == "Puerto Rico","country"] <- "USA"
world_map[world_map$region == "Heard Island","country"] <- "Australia"
world_map[world_map$region == "Cocos Islands","country"] <- "Australia"
world_map[world_map$region == "Christmas Island","country"] <- "Australia"
world_map[world_map$region == "Norfolk Island","country"] <- "Australia"
world_map[world_map$region == "Siachen Glacier","country"] <- "India"
world_map[world_map$region == "Trinidad","country"] <- "Trinidad and Tobago"
world_map[world_map$region == "Tobago","country"] <- "Trinidad and Tobago"
data$Country <- replace(data$Country,
data$Country %in% c("- other","Cabo Verde","Congo, Democratic Republic of the","Congo, Republic of the","Côte d’Ivoire","East Timor (Timor-Leste)","Korea, North","Korea, South","Micronesia, Federated States of","North Macedonia","Sudan, South","The Bahamas","United Kingdom","United States"),
c(NA,"Cape Verde","Democratic Republic of the Congo","Republic of Congo","Ivory Coast","Timor-Leste","North Korea","South Korea","Micronesia","Macedonia","South Sudan","Bahamas","UK","USA"))
nb_rep_by_country <- table(data$Country)
nb_rep_by_country <- nb_rep_by_country[names(nb_rep_by_country) != ""]
world_map$nb_answers <- 0
world_map$nb_answers <- as.vector(nb_rep_by_country[match(world_map$country,names(nb_rep_by_country))])
world_map <- world_map %>%
mutate(country_text = paste0(
"Country: ", country, "\n",
"Region: ", region, "\n",
"# of answers: ", nb_answers))
world_cities <- data(world.cities)
enough_answers_countries <- unique(world_map[world_map$nb_answers>=50,"country"])
coord_capital <- world.cities[world.cities$capital==1 & world.cities$country.etc %in% enough_answers_countries,]
for(a in unique(data[,"Dem_gender"])){
if(a!=""){
coord_capital[, a] <- 0
}
}
for(country in enough_answers_countries){
answers = table(data[data$Country == country,"Dem_gender"])
for(a in names(answers)){
if(a!=""){
coord_capital[coord_capital$country.etc == country, a] <- answers[a]
}
}
}
coord_capital$nb_answers <- as.vector(nb_rep_by_country[match(coord_capital$country.etc,names(nb_rep_by_country))])
scale=5
p <- ggplot() +
geom_polygon(data=world_map,aes( x = long, y = lat, group = group, text = country_text),fill="grey90", color="grey60", size = 0.1)+
geom_point(data=coord_capital,aes( x = long, y = lat),size=0.1)+
geom_scatterpie(data=coord_capital,
aes(x=long, y=lat, r=sqrt(nb_answers)/(pi*scale)), cols=setdiff(unique(data[,"Dem_gender"]),""),
color=NA,
alpha=0.5,
legend_name="Gender")+
geom_text_repel(data=coord_capital,aes(x=long, y=lat, label=country.etc),size=2)+
geom_scatterpie_legend(sqrt(coord_capital$nb_answers)/(pi*scale), -150,-50, n = 5,labeller = function(x){round((x*pi*scale)^2)})+
#geom_scatterpie_legend(sqrt(coord_capital$nb_answers)/(pi*scale), -10,72, n = 5,labeller = function(x){round((x*pi*scale)^2)})+
#scale_x_continuous(limits=c(-20,50))+
#scale_y_continuous(limits=c(25,75))+
theme_void()
#, labeller = function(x){x*x*31.4}
scale=5
p <- ggplot() +
geom_polygon(data=world_map,aes( x = long, y = lat, group = group, text = country_text),fill="grey90", color="grey60", size = 0.1)+
geom_point(data=coord_capital,aes( x = long, y = lat),size=0.1)+
geom_scatterpie(data=coord_capital,
aes(x=long, y=lat, r=sqrt(nb_answers)/(pi*scale)), cols=setdiff(unique(data[,"Dem_gender"]),""),
color=NA,
alpha=0.5,
legend_name="Gender")+
geom_text_repel(data=coord_capital,aes(x=long, y=lat, label=country.etc),size=2)+
geom_scatterpie_legend(sqrt(coord_capital$nb_answers)/(pi*scale), -150,-50, n = 5,labeller = function(x){round((x*pi*scale)^2)})+
#geom_scatterpie_legend(sqrt(coord_capital$nb_answers)/(pi*scale), -10,72, n = 5,labeller = function(x){round((x*pi*scale)^2)})+
#scale_x_continuous(limits=c(-20,50))+
#scale_y_continuous(limits=c(25,75))+
theme_void()
ggplotly(p, tooltip="text")
|
## Getting started
library(dplyr)
library(ggplot2)
animals <- read.csv("~/Handouts/data/animals.csv", na.strings = "" ) %>%
filter(!is.na(species_id), !is.na(sex), !is.na(weight))
## Constructing layered graphics in ggplot
ggplot(data = animals,
aes(x = species_id,weight)) +
geom_point()
ggplot(data = animals,
aes(x = species_id, y = weight)) +
geom_boxplot()
ggplot(data = animals,
aes(x = species_id, y = weight, color = species_id)) +
geom_boxplot() +
geom_point(stat = "summary",
fun.y = "mean",
color = "blue")
ggplot(data = animals,
aes(x = species_id, y = weight, ...)) +
geom_boxplot() +
geom_point(stat = 'summary',
fun.y = 'mean')
## Exercise 1
animals_dm <- filter(animals, species_id == "DM")
ggplot(data = animals_dm, aes(x = weight, y = year, color = sex)) +
geom_point(stat = 'summary',
fun.y = 'mean')
## Adding a regression line
animals_dm <- filter(animals, species_id == 'DM')
ggplot(data = animals_dm,
aes(x = year, y = weight)) +
geom_point(aes(shape = sex),
size = 3,
stat = 'summary',
fun.y = 'mean') +
geom_smooth(method = 'lm')
...
ggplot(data = animals_dm,
aes(x = year, y = weight)) +
geom_point(aes(shape = sex),
size = 3,
stat = 'summary',
fun.y = 'mean') +
geom_smooth(aes(group=sex), method = 'lm')
ggplot(data = animals_dm,
aes(x=year, y= weight, color = sex)) +
geom_point(aes(shape = sex),
size = 3,
stat = 'summary',
fun.y = 'mean') +
geom_smooth(method = 'lm')
# Storing and re-plotting
year_wgt <- ggplot(data = animals_dm,
aes(x = year,
y = weight,
color = sex)) +
geom_point(aes(shape = sex),
size = 3,
stat = 'summary',
fun.y = 'mean') +
geom_smooth(method = 'lm')
year_wgt +
scale_color_manual(values=c("green", "orange"))
year_wgt <- year_wgt +
scale_color_manual(values=c("green", "orange"))
year_wgt
## Exercise 2
ggplot(data = animals_dm,
aes(x = weight, fill = sex)) +
geom_histogram(binwidth = 0.75)
## Axes, labels and themes
histo <- ggplot(data = animals_dm,
aes(x = weight, fill = sex)) +
geom_histogram (binwidth = 3, color="white")
histo
histo <- histo +
labs(title = 'Dipodomys merriami weight distribution',
x = 'Weight (g)',
y = 'Count') +
scale_x_continuous(limits = c(20, 60),
breaks = c(20, 30, 40, 50, 60))
histo
histo <- histo +
theme_bw() +
theme(legend.position = c(0.2, 0.5),
plot.title = element_text(face='bold', vjust=2),
axis.title.x = element_text(size=13, vjust=1),
axis.title.y = element_text(size = 13, vjust = 0))
histo
## Facets
animals_common <- filter(animals, species_id %in% c('DM', "PP", "DO"))
ggplot(data = animals_common,
aes(x=weight)) +
geom_histogram() +
facet_wrap('species_id') +
labs(title = "Weight of most common species",
x = "Count",
y = "Weight (g)")
ggplot(data = animals_common,
aes(x = weight)) +
geom_histogram(data=select(animals_common, -species_id),
alpha=0.2) +
geom_histogram() +
facet_wrap( ~ species_id) +
labs(title = "Weight of most common species",
x = "Count",
y = "Weight (g)")
ggplot(data = animals_common,
aes(x = weight, fill=species_id)) +
geom_histogram(aes(y=..density..)) +
facet_wrap( ~ species_id) +
labs(title = "Weight of most common species",
x = "Count",
y = "Weight (g)") +
guides(fill = FALSE)
## Exercise 3
ggplot(data = animals_common,
aes(x = weight, fill=species_id:sex)) +
geom_histogram(aes(y=..density..)) +
facet_grid( sex ~ species_id) +
labs(title = "Weight of most common species",
x = "Count",
y = "Weight (g)") +
guides(fill = FALSE)
| /worksheet-5.R | no_license | aishwarya611/Handouts | R | false | false | 4,022 | r | ## Getting started
library(dplyr)
library(ggplot2)
animals <- read.csv("~/Handouts/data/animals.csv", na.strings = "" ) %>%
filter(!is.na(species_id), !is.na(sex), !is.na(weight))
## Constructing layered graphics in ggplot
ggplot(data = animals,
aes(x = species_id,weight)) +
geom_point()
ggplot(data = animals,
aes(x = species_id, y = weight)) +
geom_boxplot()
ggplot(data = animals,
aes(x = species_id, y = weight, color = species_id)) +
geom_boxplot() +
geom_point(stat = "summary",
fun.y = "mean",
color = "blue")
ggplot(data = animals,
aes(x = species_id, y = weight, ...)) +
geom_boxplot() +
geom_point(stat = 'summary',
fun.y = 'mean')
## Exercise 1
animals_dm <- filter(animals, species_id == "DM")
ggplot(data = animals_dm, aes(x = weight, y = year, color = sex)) +
geom_point(stat = 'summary',
fun.y = 'mean')
## Adding a regression line
animals_dm <- filter(animals, species_id == 'DM')
ggplot(data = animals_dm,
aes(x = year, y = weight)) +
geom_point(aes(shape = sex),
size = 3,
stat = 'summary',
fun.y = 'mean') +
geom_smooth(method = 'lm')
...
ggplot(data = animals_dm,
aes(x = year, y = weight)) +
geom_point(aes(shape = sex),
size = 3,
stat = 'summary',
fun.y = 'mean') +
geom_smooth(aes(group=sex), method = 'lm')
ggplot(data = animals_dm,
aes(x=year, y= weight, color = sex)) +
geom_point(aes(shape = sex),
size = 3,
stat = 'summary',
fun.y = 'mean') +
geom_smooth(method = 'lm')
# Storing and re-plotting
year_wgt <- ggplot(data = animals_dm,
aes(x = year,
y = weight,
color = sex)) +
geom_point(aes(shape = sex),
size = 3,
stat = 'summary',
fun.y = 'mean') +
geom_smooth(method = 'lm')
year_wgt +
scale_color_manual(values=c("green", "orange"))
year_wgt <- year_wgt +
scale_color_manual(values=c("green", "orange"))
year_wgt
## Exercise 2
ggplot(data = animals_dm,
aes(x = weight, fill = sex)) +
geom_histogram(binwidth = 0.75)
## Axes, labels and themes
histo <- ggplot(data = animals_dm,
aes(x = weight, fill = sex)) +
geom_histogram (binwidth = 3, color="white")
histo
histo <- histo +
labs(title = 'Dipodomys merriami weight distribution',
x = 'Weight (g)',
y = 'Count') +
scale_x_continuous(limits = c(20, 60),
breaks = c(20, 30, 40, 50, 60))
histo
histo <- histo +
theme_bw() +
theme(legend.position = c(0.2, 0.5),
plot.title = element_text(face='bold', vjust=2),
axis.title.x = element_text(size=13, vjust=1),
axis.title.y = element_text(size = 13, vjust = 0))
histo
## Facets
animals_common <- filter(animals, species_id %in% c('DM', "PP", "DO"))
ggplot(data = animals_common,
aes(x=weight)) +
geom_histogram() +
facet_wrap('species_id') +
labs(title = "Weight of most common species",
x = "Count",
y = "Weight (g)")
ggplot(data = animals_common,
aes(x = weight)) +
geom_histogram(data=select(animals_common, -species_id),
alpha=0.2) +
geom_histogram() +
facet_wrap( ~ species_id) +
labs(title = "Weight of most common species",
x = "Count",
y = "Weight (g)")
ggplot(data = animals_common,
aes(x = weight, fill=species_id)) +
geom_histogram(aes(y=..density..)) +
facet_wrap( ~ species_id) +
labs(title = "Weight of most common species",
x = "Count",
y = "Weight (g)") +
guides(fill = FALSE)
## Exercise 3
ggplot(data = animals_common,
aes(x = weight, fill=species_id:sex)) +
geom_histogram(aes(y=..density..)) +
facet_grid( sex ~ species_id) +
labs(title = "Weight of most common species",
x = "Count",
y = "Weight (g)") +
guides(fill = FALSE)
|
#Assign taxonomy using dada2 in parallel.
#This script assumesyou have a taxonomy table where:
#1. the row names are sample names.
#2. the column names are the actual unique sequences.
#clear environment, source paths.
rm(list=ls())
source('paths.r')
source('NEFI_functions/tic_toc.r')
library(doParallel)
#specify output path for taxonomic table here.
tax_output_path <- bahram_dada2_tax_table.path
#Here i load an OTU table with column names as unique sequences to assign.
otu <- readRDS(bahram_dada2_SV_table.path)
#Rarefy OTU table.----
# set.seed(5) # so that rarefaction is repeatable.
# otu_rare <- otu[rowSums(otu) >= 5000,]
# otu_rare <- vegan::rrarefy(otu_rare, 5000)
# # save rarefied otu table
# saveRDS(otu_rare, bahram_dada2_SV_table_rare_not_subset.path)
to_assign <- colnames(otu) #grab sequences to assign.
#Everything from here below *should* just run and save where you told it to.
greengenes.path <- paste0(data.dir,'gg_13_8_train_set_97.fa')
if(file.exists(greengenes.path)) {
cat('using previously downloaded green genes database.')
} else {
#download greengenes reference database.
cat('downloading green genes...\n')
gg.download.link <- 'https://zenodo.org/record/158955/files/gg_13_8_train_set_97.fa.gz?download=1'
cmd <- paste0('curl ',gg.download.link,' > ',greengenes.path)
system(cmd)
cat('greengenes download complete.\n')
}
#assign taxonomy.
tic()
cat('Assigning taxonomy using the RDP Classifier...\n')
out <- dada2::assignTaxonomy(to_assign,greengenes.path,multithread = T, tryRC=T)
cat('Taxonomy assignment complete. ')
toc()
#how many cores to run on and therefore how many subsets to break taxonomy string into.
#n <- detectCores()
#registerDoParallel(cores=n)
#set breakpoints for subsetting taxonomy list.
#to_assign <- colnames(otu)
#brk <- round(length(to_assign) / n)
#use a foreach loop to do this in parallel on subsets.
#tic()
#cat('assigning taxonomy with the RDP classifier and greengenes training set...\n')
#output.list <-
# foreach(i = 1:n) %dopar% {
# #tell loop where i of n taxonomy subset starts and ends.
# start <- (brk*i - brk) + 1
# end <- brk*i
#if you on the last subset go to end.
# if(i == n){end = length(to_assign)}
#assign taxa
# tax.out <- dada2::assignTaxonomy(to_assign[start:end],greengenes.path)
#return output to list
# return(tax.out)
# }
#cat('Taxonomy assignment complete! yeahhhh.\n')
#toc()
#merge together output of parallel assignment.
#out <- data.frame(do.call('rbind',output.list))
#save output as your taxonomy file.
saveRDS(out, tax_output_path)
| /16S/data_construction/prior_16S_bahram/5._assign_taxonomy.r | permissive | saracg-forks/NEFI_microbe | R | false | false | 2,583 | r | #Assign taxonomy using dada2 in parallel.
#This script assumesyou have a taxonomy table where:
#1. the row names are sample names.
#2. the column names are the actual unique sequences.
#clear environment, source paths.
rm(list=ls())
source('paths.r')
source('NEFI_functions/tic_toc.r')
library(doParallel)
#specify output path for taxonomic table here.
tax_output_path <- bahram_dada2_tax_table.path
#Here i load an OTU table with column names as unique sequences to assign.
otu <- readRDS(bahram_dada2_SV_table.path)
#Rarefy OTU table.----
# set.seed(5) # so that rarefaction is repeatable.
# otu_rare <- otu[rowSums(otu) >= 5000,]
# otu_rare <- vegan::rrarefy(otu_rare, 5000)
# # save rarefied otu table
# saveRDS(otu_rare, bahram_dada2_SV_table_rare_not_subset.path)
to_assign <- colnames(otu) #grab sequences to assign.
#Everything from here below *should* just run and save where you told it to.
greengenes.path <- paste0(data.dir,'gg_13_8_train_set_97.fa')
if(file.exists(greengenes.path)) {
cat('using previously downloaded green genes database.')
} else {
#download greengenes reference database.
cat('downloading green genes...\n')
gg.download.link <- 'https://zenodo.org/record/158955/files/gg_13_8_train_set_97.fa.gz?download=1'
cmd <- paste0('curl ',gg.download.link,' > ',greengenes.path)
system(cmd)
cat('greengenes download complete.\n')
}
#assign taxonomy.
tic()
cat('Assigning taxonomy using the RDP Classifier...\n')
out <- dada2::assignTaxonomy(to_assign,greengenes.path,multithread = T, tryRC=T)
cat('Taxonomy assignment complete. ')
toc()
#how many cores to run on and therefore how many subsets to break taxonomy string into.
#n <- detectCores()
#registerDoParallel(cores=n)
#set breakpoints for subsetting taxonomy list.
#to_assign <- colnames(otu)
#brk <- round(length(to_assign) / n)
#use a foreach loop to do this in parallel on subsets.
#tic()
#cat('assigning taxonomy with the RDP classifier and greengenes training set...\n')
#output.list <-
# foreach(i = 1:n) %dopar% {
# #tell loop where i of n taxonomy subset starts and ends.
# start <- (brk*i - brk) + 1
# end <- brk*i
#if you on the last subset go to end.
# if(i == n){end = length(to_assign)}
#assign taxa
# tax.out <- dada2::assignTaxonomy(to_assign[start:end],greengenes.path)
#return output to list
# return(tax.out)
# }
#cat('Taxonomy assignment complete! yeahhhh.\n')
#toc()
#merge together output of parallel assignment.
#out <- data.frame(do.call('rbind',output.list))
#save output as your taxonomy file.
saveRDS(out, tax_output_path)
|
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(dplyr)
library(data.table)
library(tidytext)
library(stringr)
source("last_n_words.R")
source("first_n_words.R")
source("find_last_word.R")
#setkey(train2_bigrams_start_dt, start)
#setkey(train2_trigrams_start_dt, start)
#setkey(train2_quadgrams_start_dt, start)
# Define server logic required to predict the next word given a phrase.
shinyServer(function(input, output) {
pred<-eventReactive(input$readyButton, {
instring<-input$initial_string
predict_last_word(instring)
})
tryagain<-eventReactive(input$readyButton, {
return("To try again type new text in the box above and push Ready.")
})
output$pred <- renderText({
pred() })
output$tryagain <- renderText({
tryagain()
})
})
| /server.R | no_license | Waterman3/Predict_Word | R | false | false | 999 | r |
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(dplyr)
library(data.table)
library(tidytext)
library(stringr)
source("last_n_words.R")
source("first_n_words.R")
source("find_last_word.R")
#setkey(train2_bigrams_start_dt, start)
#setkey(train2_trigrams_start_dt, start)
#setkey(train2_quadgrams_start_dt, start)
# Define server logic required to predict the next word given a phrase.
shinyServer(function(input, output) {
pred<-eventReactive(input$readyButton, {
instring<-input$initial_string
predict_last_word(instring)
})
tryagain<-eventReactive(input$readyButton, {
return("To try again type new text in the box above and push Ready.")
})
output$pred <- renderText({
pred() })
output$tryagain <- renderText({
tryagain()
})
})
|
#' Draw meanfield model attractor
#'
#' @param model
#' @param times
#' @param parms
#' @param method
#' @param rho
#' @param colors
#'
#' @import foreach
#'
#' @return
#' @export
#' @examples
#'
#' p <- set_parms(livestock$parms, set = list(b = 0.1, f = 0.5, p = 0.9, L = 1.5))
#' par(mfrow = c(1,2))
#' plot_pairapproximation(livestock, parms = p) -> out
#' plot_pairapproximation(out, parms = p, side = "plain")
plot_pairapproximation <- function(
model,
parms = model$parms,
side = "rho",
rho_1_ini = seq(0,1, length = 11),
rho_11_ini = seq(0,1, length = 11),
times = c(0,1000),
method = "ode45",
rho = seq(0,1,length = 100),
colors = c("#000000","#009933"),
#fog = TRUE,
new = TRUE,
...
) {
#par(mfrow = c(1,3))
# open new base plot if none exists
if(dev.cur() == 1 | new == TRUE) plot_base(ylim = switch(side, plain = c(0,1), c(0,0.25) ),
ylab = switch(side, plain = "local cover", "plant mortality/growth" ),
xlab = switch(side, q = "local cover","vegetation cover"), ...)
# draw trajectories of mortality and growth
if(class(model) == "attractor") {
trajectories <- model$trajectories
} else {
trajectories <- sim_trajectories(model = model, parms = parms, rho_1_ini = rho_1_ini, times = times, method = method)
}
# visualize trajectories to the attractor
sapply(trajectories, function(x){
rho <- ini_rho(x$rho_1, x$rho_11)
mort <- limit(mortality(rho, parms))
grow <- limit(growth(rho, parms))
q_11_vec <- q_11(rho)
#fog_m <- highlight(q_11_vec, colrange = c(paste0(colors[1],"88"),colors[1]))
#fog_g <- highlight(q_11_vec, colrange = c(paste0(colors[2],"88"),colors[2]))
switch(side,
rho = {
lines(rho$rho_1, mort)
#arrows(tail(rho$rho_1,2)[1],tail(mort,2)[1],tail(rho$rho_1,1),tail(mort,1), length = 0.1 )
lines(rho$rho_1, grow, col = "#009933")
#arrows(tail(rho$rho_1,2)[1],tail(grow,2)[1],tail(rho$rho_1,1),tail(grow,1), length = 0.1 , col = "#009933")
},
q = {
lines(q_11_vec, mort)
#arrows(tail(q_11_vec,2)[1],tail(mort,2)[1],tail(q_11_vec,1),tail(mort,1), length = 0.1 )
lines(q_11_vec, grow, col = "#009933")
#arrows(tail(q_11_vec,2)[1],tail(grow,2)[1],tail(q_11_vec,1),tail(grow,1), length = 0.1 , col = "#009933")
},
plain = {
lines(rho$rho_1, q_11_vec)
#arrows(tail(rho$rho_1,2)[1],tail(q_11_vec,2)[1],tail(rho$rho_1,1),tail(q_11_vec,1), length = 0.1 )
lines(rho$rho_1+0.002, q_11_vec+0.002, col = "#009933")
#arrows(tail(rho$rho_1,2)[1],tail(q_11_vec,2)[1],tail(rho$rho_1,1),tail(q_11_vec,1), length = 0.1 , col = "#009933")
}
)
}
)
if(class(model) == "attractor") {
eq <- model$eq
} else {
eq <- get_equilibria(y = model$template, func = model$pair, parms = parms, method = method, t_max = 130)
}
rho_steady <- ini_rho(c(eq$lo[1],eq$hi[1]),c(eq$lo[2],eq$hi[2]))
q_steady <- q_11(rho_steady)
switch(side,
rho = {
points(rho_steady$rho_1, mortality(rho_steady, parms = parms), xpd = TRUE, pch = 20, cex = 2)
},
q = {
points(q_steady, mortality(rho_steady, parms = parms), xpd = TRUE, pch = 20, cex = 2)
},
plain = {
points(rho_steady$rho_1, q_steady, xpd = TRUE, pch = 20, cex = 2)
}
)
output <- list(trajectories = trajectories,
eq = eq
)
class(output) <- "attractor"
return(output)
}
| /R/plot_pairapproximation.r | permissive | fdschneider/livestock | R | false | false | 3,633 | r | #' Draw meanfield model attractor
#'
#' @param model
#' @param times
#' @param parms
#' @param method
#' @param rho
#' @param colors
#'
#' @import foreach
#'
#' @return
#' @export
#' @examples
#'
#' p <- set_parms(livestock$parms, set = list(b = 0.1, f = 0.5, p = 0.9, L = 1.5))
#' par(mfrow = c(1,2))
#' plot_pairapproximation(livestock, parms = p) -> out
#' plot_pairapproximation(out, parms = p, side = "plain")
plot_pairapproximation <- function(
model,
parms = model$parms,
side = "rho",
rho_1_ini = seq(0,1, length = 11),
rho_11_ini = seq(0,1, length = 11),
times = c(0,1000),
method = "ode45",
rho = seq(0,1,length = 100),
colors = c("#000000","#009933"),
#fog = TRUE,
new = TRUE,
...
) {
#par(mfrow = c(1,3))
# open new base plot if none exists
if(dev.cur() == 1 | new == TRUE) plot_base(ylim = switch(side, plain = c(0,1), c(0,0.25) ),
ylab = switch(side, plain = "local cover", "plant mortality/growth" ),
xlab = switch(side, q = "local cover","vegetation cover"), ...)
# draw trajectories of mortality and growth
if(class(model) == "attractor") {
trajectories <- model$trajectories
} else {
trajectories <- sim_trajectories(model = model, parms = parms, rho_1_ini = rho_1_ini, times = times, method = method)
}
# visualize trajectories to the attractor
sapply(trajectories, function(x){
rho <- ini_rho(x$rho_1, x$rho_11)
mort <- limit(mortality(rho, parms))
grow <- limit(growth(rho, parms))
q_11_vec <- q_11(rho)
#fog_m <- highlight(q_11_vec, colrange = c(paste0(colors[1],"88"),colors[1]))
#fog_g <- highlight(q_11_vec, colrange = c(paste0(colors[2],"88"),colors[2]))
switch(side,
rho = {
lines(rho$rho_1, mort)
#arrows(tail(rho$rho_1,2)[1],tail(mort,2)[1],tail(rho$rho_1,1),tail(mort,1), length = 0.1 )
lines(rho$rho_1, grow, col = "#009933")
#arrows(tail(rho$rho_1,2)[1],tail(grow,2)[1],tail(rho$rho_1,1),tail(grow,1), length = 0.1 , col = "#009933")
},
q = {
lines(q_11_vec, mort)
#arrows(tail(q_11_vec,2)[1],tail(mort,2)[1],tail(q_11_vec,1),tail(mort,1), length = 0.1 )
lines(q_11_vec, grow, col = "#009933")
#arrows(tail(q_11_vec,2)[1],tail(grow,2)[1],tail(q_11_vec,1),tail(grow,1), length = 0.1 , col = "#009933")
},
plain = {
lines(rho$rho_1, q_11_vec)
#arrows(tail(rho$rho_1,2)[1],tail(q_11_vec,2)[1],tail(rho$rho_1,1),tail(q_11_vec,1), length = 0.1 )
lines(rho$rho_1+0.002, q_11_vec+0.002, col = "#009933")
#arrows(tail(rho$rho_1,2)[1],tail(q_11_vec,2)[1],tail(rho$rho_1,1),tail(q_11_vec,1), length = 0.1 , col = "#009933")
}
)
}
)
if(class(model) == "attractor") {
eq <- model$eq
} else {
eq <- get_equilibria(y = model$template, func = model$pair, parms = parms, method = method, t_max = 130)
}
rho_steady <- ini_rho(c(eq$lo[1],eq$hi[1]),c(eq$lo[2],eq$hi[2]))
q_steady <- q_11(rho_steady)
switch(side,
rho = {
points(rho_steady$rho_1, mortality(rho_steady, parms = parms), xpd = TRUE, pch = 20, cex = 2)
},
q = {
points(q_steady, mortality(rho_steady, parms = parms), xpd = TRUE, pch = 20, cex = 2)
},
plain = {
points(rho_steady$rho_1, q_steady, xpd = TRUE, pch = 20, cex = 2)
}
)
output <- list(trajectories = trajectories,
eq = eq
)
class(output) <- "attractor"
return(output)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clustervol.R
\name{num_clusters,ClusteredNeuroVol-method}
\alias{num_clusters,ClusteredNeuroVol-method}
\title{Number of Clusters}
\usage{
\S4method{num_clusters}{ClusteredNeuroVol}(x)
}
\arguments{
\item{x}{A ClusteredNeuroVol object.}
}
\value{
An integer representing the number of clusters in the input object.
}
\description{
This function returns the number of clusters in a ClusteredNeuroVol object.
}
| /man/num_clusters-methods.Rd | permissive | bbuchsbaum/neuroim2 | R | false | true | 487 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clustervol.R
\name{num_clusters,ClusteredNeuroVol-method}
\alias{num_clusters,ClusteredNeuroVol-method}
\title{Number of Clusters}
\usage{
\S4method{num_clusters}{ClusteredNeuroVol}(x)
}
\arguments{
\item{x}{A ClusteredNeuroVol object.}
}
\value{
An integer representing the number of clusters in the input object.
}
\description{
This function returns the number of clusters in a ClusteredNeuroVol object.
}
|
#Load data into table
dat<-read.table('household_power_consumption.txt',sep=';',header=TRUE, stringsAsFactors=FALSE)
#Subset data to relevant dates
dat<-dat[dat$Date=='1/2/2007'| dat$Date=='2/2/2007',]
#Format date
dat$Date<-as.Date(dat$Date,format='%d/%m/%Y'
t<-strptime(paste(dat$Date,dat$Time),'%Y-%m-%d %H:%M:%S')
#Plot 3
x11()
plot(t,as.numeric(dat$Sub_metering_1),col='black',type='l',ylab='Energy sub metering',xlab='')
#lines(t,as.numeric(dat$Sub_metering_1),col='black')
lines(t,as.numeric(dat$Sub_metering_2),col='red')
lines(t,as.numeric(dat$Sub_metering_3),col='blue')
legend('topright',lty=1,col=c('black','red','blue'),legend=names(dat)[7:9])
dev.copy(png,file='figure/plot3.png')
dev.off() | /plot3.R | no_license | mikejhuang/ExData_Plotting1 | R | false | false | 705 | r | #Load data into table
dat<-read.table('household_power_consumption.txt',sep=';',header=TRUE, stringsAsFactors=FALSE)
#Subset data to relevant dates
dat<-dat[dat$Date=='1/2/2007'| dat$Date=='2/2/2007',]
#Format date
dat$Date<-as.Date(dat$Date,format='%d/%m/%Y'
t<-strptime(paste(dat$Date,dat$Time),'%Y-%m-%d %H:%M:%S')
#Plot 3
x11()
plot(t,as.numeric(dat$Sub_metering_1),col='black',type='l',ylab='Energy sub metering',xlab='')
#lines(t,as.numeric(dat$Sub_metering_1),col='black')
lines(t,as.numeric(dat$Sub_metering_2),col='red')
lines(t,as.numeric(dat$Sub_metering_3),col='blue')
legend('topright',lty=1,col=c('black','red','blue'),legend=names(dat)[7:9])
dev.copy(png,file='figure/plot3.png')
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codestar_operations.R
\name{codestar_delete_project}
\alias{codestar_delete_project}
\title{Deletes a project, including project resources}
\usage{
codestar_delete_project(id, clientRequestToken = NULL, deleteStack = NULL)
}
\arguments{
\item{id}{[required] The ID of the project to be deleted in AWS CodeStar.}
\item{clientRequestToken}{A user- or system-generated token that identifies the entity that
requested project deletion. This token can be used to repeat the
request.}
\item{deleteStack}{Whether to send a delete request for the primary stack in AWS
CloudFormation originally used to generate the project and its
resources. This option will delete all AWS resources for the project
(except for any buckets in Amazon S3) as well as deleting the project
itself. Recommended for most use cases.}
}
\description{
Deletes a project, including project resources. Does not delete users associated with the project, but does delete the IAM roles that allowed access to the project.
See \url{https://www.paws-r-sdk.com/docs/codestar_delete_project/} for full documentation.
}
\keyword{internal}
| /cran/paws.developer.tools/man/codestar_delete_project.Rd | permissive | paws-r/paws | R | false | true | 1,177 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codestar_operations.R
\name{codestar_delete_project}
\alias{codestar_delete_project}
\title{Deletes a project, including project resources}
\usage{
codestar_delete_project(id, clientRequestToken = NULL, deleteStack = NULL)
}
\arguments{
\item{id}{[required] The ID of the project to be deleted in AWS CodeStar.}
\item{clientRequestToken}{A user- or system-generated token that identifies the entity that
requested project deletion. This token can be used to repeat the
request.}
\item{deleteStack}{Whether to send a delete request for the primary stack in AWS
CloudFormation originally used to generate the project and its
resources. This option will delete all AWS resources for the project
(except for any buckets in Amazon S3) as well as deleting the project
itself. Recommended for most use cases.}
}
\description{
Deletes a project, including project resources. Does not delete users associated with the project, but does delete the IAM roles that allowed access to the project.
See \url{https://www.paws-r-sdk.com/docs/codestar_delete_project/} for full documentation.
}
\keyword{internal}
|
## get an average tranform???
## read the paper again
getNcZest <- function(c, nc)
{
d <- c(1, diff(c))
dc <- c(0.5 * (d[-1] + d[ -length(d)]), d[length(d)])
return(nc/dc)
}
getcStar <- function(c, coef)
{
return(c * (1 + 1/c)^(1 + coef[2]))
}
GoodTuring <- function(textMatrix)
{
textMatrix <- textMatrix[sort(textMatrix$counts, decreasing = T, index.return = T)$ix,]
textMatrix <- group_by(textMatrix, counts)
goodTuring <- data.frame(Nc = group_size(textMatrix),
C = summarise(textMatrix))
Counts <- goodTuring$counts # c counts
Nc <- goodTuring$Nc # Nc frequency
N <- sum(Counts*Nc)
goodTuring <- mutate(goodTuring, p = counts*Nc/N)
## make averaging transform
NcZest <- getNcZest(Counts,Nc)
## get Linear Good-Turing estimate
fitLGT <- lsfit(log(Counts),log(NcZest))
coef <- fitLGT$coef
cStar <- getcStar(Counts,coef)
cStarLGT <- cStar/Counts
#get Turing estimate
cTE <- Counts == c(Counts[-1]-1,0)
cStarTE <- rep(0,length(Counts))
cStarTE[cTE] <- (Counts[cTE]+1)/Counts[cTE]*c(Nc[-1],0)[cTE]/Nc[cTE]
#make switch from Turing to LGT estimates
turingSD <- rep(1,length(Counts)) # standard deviation
for(i in 1:length(Counts))
if(cTE[i])
turingSD[i] <- (i+1)/Nc[i]*sqrt(Nc[i+1]*(1 + Nc[i+1]/Nc[i]))
cStarcmbrel <- rep(0,length(Counts))
useTuring <- TRUE
for(r in 1:length(Counts))
{
if(!useTuring)
cStarcmbrel[r] <- cStarLGT[r]
else
if(abs(cStarLGT-cStarTE)[r]*r/turingSD[r] > 1.65)
cStarcmbrel[r] <- cStarTE[r]
else{
useTuring <- FALSE
cStarcmbrel[r] <- cStarLGT[r]}
}
## renormalize the probabilities for observed objects
sumProbRaw <- sum(cStarcmbrel*Counts*Nc/N)
cStarcmbrel <- cStarcmbrel*(1 - Nc[1]/N)/sumProbRaw
## output
goodTuring <- rbind(c(Nc = N, C = 0, p = 0), goodTuring)
goodTuring <- goodTuring %>%
mutate(cStar = c(Nc[2]/N, Counts*cStarcmbrel),
pStar = cStar*Nc/N,
NcZest = c(Nc[1],NcZest),
pAve = cStar/N)
return(goodTuring)
}
| /FunGoodTuring.R | no_license | 7cats/DataScienceCapstone | R | false | false | 2,247 | r | ## get an average tranform???
## read the paper again
getNcZest <- function(c, nc)
{
d <- c(1, diff(c))
dc <- c(0.5 * (d[-1] + d[ -length(d)]), d[length(d)])
return(nc/dc)
}
getcStar <- function(c, coef)
{
return(c * (1 + 1/c)^(1 + coef[2]))
}
GoodTuring <- function(textMatrix)
{
textMatrix <- textMatrix[sort(textMatrix$counts, decreasing = T, index.return = T)$ix,]
textMatrix <- group_by(textMatrix, counts)
goodTuring <- data.frame(Nc = group_size(textMatrix),
C = summarise(textMatrix))
Counts <- goodTuring$counts # c counts
Nc <- goodTuring$Nc # Nc frequency
N <- sum(Counts*Nc)
goodTuring <- mutate(goodTuring, p = counts*Nc/N)
## make averaging transform
NcZest <- getNcZest(Counts,Nc)
## get Linear Good-Turing estimate
fitLGT <- lsfit(log(Counts),log(NcZest))
coef <- fitLGT$coef
cStar <- getcStar(Counts,coef)
cStarLGT <- cStar/Counts
#get Turing estimate
cTE <- Counts == c(Counts[-1]-1,0)
cStarTE <- rep(0,length(Counts))
cStarTE[cTE] <- (Counts[cTE]+1)/Counts[cTE]*c(Nc[-1],0)[cTE]/Nc[cTE]
#make switch from Turing to LGT estimates
turingSD <- rep(1,length(Counts)) # standard deviation
for(i in 1:length(Counts))
if(cTE[i])
turingSD[i] <- (i+1)/Nc[i]*sqrt(Nc[i+1]*(1 + Nc[i+1]/Nc[i]))
cStarcmbrel <- rep(0,length(Counts))
useTuring <- TRUE
for(r in 1:length(Counts))
{
if(!useTuring)
cStarcmbrel[r] <- cStarLGT[r]
else
if(abs(cStarLGT-cStarTE)[r]*r/turingSD[r] > 1.65)
cStarcmbrel[r] <- cStarTE[r]
else{
useTuring <- FALSE
cStarcmbrel[r] <- cStarLGT[r]}
}
## renormalize the probabilities for observed objects
sumProbRaw <- sum(cStarcmbrel*Counts*Nc/N)
cStarcmbrel <- cStarcmbrel*(1 - Nc[1]/N)/sumProbRaw
## output
goodTuring <- rbind(c(Nc = N, C = 0, p = 0), goodTuring)
goodTuring <- goodTuring %>%
mutate(cStar = c(Nc[2]/N, Counts*cStarcmbrel),
pStar = cStar*Nc/N,
NcZest = c(Nc[1],NcZest),
pAve = cStar/N)
return(goodTuring)
}
|
# Read the reason code CSV and change them in the rma table.
addReasonCodes <- function(rma) {
codes <- read.csv("H:/Code/supportFiles/Reasoncode.csv", header = TRUE)
rma <- merge(rma, codes, by.x = c("Code"), by.y = c("reason"))[c(2:17)]
rma
}
# Split into groups for drivers, light engines, and other parts, then
# change the product families.
addProductFamilies <- function(rma) {
engines <- rma[grep("LEM-", rma$Prob_Part),]
drivers <- rma[grep("SP-[0-9]{3}-[0-9]{4}-", rma$Prob_Part),]
others <- rma
others <- others[-grep("LEM-|SP-[0-9]{3}-[0-9]{4}-", others$Prob_Part),]
engineToPartFam <- read.csv("H:/Code/supportFiles/LightEngineToPartFam.csv")
names(engineToPartFam) <- c("Item.ID", "Product.Family")
driverToPartFam <- read.csv("H:/Code/supportFiles/driverToPartFam.csv")
names(driverToPartFam) <- c("Item.ID", "Product.Family")
engines <- cbind(engines, PartFam = substring(engines$Prob_Part, 1, 7))
drivers <- cbind(drivers, PartFam = substring(drivers$Prob_Part, 1, 6))
engines <- merge(engines, engineToPartFam, by.x = c("PartFam"), by.y = c("Item.ID"))
drivers <- merge(drivers, driverToPartFam, by.x = c("PartFam"), by.y = c("Item.ID"))
engines$Product_Family <- engines$Product.Family
drivers$Product_Family <- drivers$Product.Family
engines <- engines[c(2:17)]
drivers <- drivers[c(2:17)]
rmaOutput <- rbind(engines, drivers) %>%
rbind(others) %>%
as.data.table()
rmaOutput <- rmaOutput[order(-rmaOutput$RMA),]
rmaOutput
}
addUserDefs <- function(rma) {
so_master <- read.csv("H:/Code/supportFiles/so_User_Defs.csv")
rma <- left_join(rma, so_master, by = c("orig_SO" = "Sales.Order.ID"))
rma$Specifier <- rma$Specifier.User.Def.3
rma$Job_Name <- rma$Job.Name.User.Def.5
rma <- rma[1:16]
rma
}
| /Code/Scripts/updatedTable.R | no_license | Vinladar/USAI | R | false | false | 1,748 | r | # Read the reason code CSV and change them in the rma table.
addReasonCodes <- function(rma) {
codes <- read.csv("H:/Code/supportFiles/Reasoncode.csv", header = TRUE)
rma <- merge(rma, codes, by.x = c("Code"), by.y = c("reason"))[c(2:17)]
rma
}
# Split into groups for drivers, light engines, and other parts, then
# change the product families.
addProductFamilies <- function(rma) {
engines <- rma[grep("LEM-", rma$Prob_Part),]
drivers <- rma[grep("SP-[0-9]{3}-[0-9]{4}-", rma$Prob_Part),]
others <- rma
others <- others[-grep("LEM-|SP-[0-9]{3}-[0-9]{4}-", others$Prob_Part),]
engineToPartFam <- read.csv("H:/Code/supportFiles/LightEngineToPartFam.csv")
names(engineToPartFam) <- c("Item.ID", "Product.Family")
driverToPartFam <- read.csv("H:/Code/supportFiles/driverToPartFam.csv")
names(driverToPartFam) <- c("Item.ID", "Product.Family")
engines <- cbind(engines, PartFam = substring(engines$Prob_Part, 1, 7))
drivers <- cbind(drivers, PartFam = substring(drivers$Prob_Part, 1, 6))
engines <- merge(engines, engineToPartFam, by.x = c("PartFam"), by.y = c("Item.ID"))
drivers <- merge(drivers, driverToPartFam, by.x = c("PartFam"), by.y = c("Item.ID"))
engines$Product_Family <- engines$Product.Family
drivers$Product_Family <- drivers$Product.Family
engines <- engines[c(2:17)]
drivers <- drivers[c(2:17)]
rmaOutput <- rbind(engines, drivers) %>%
rbind(others) %>%
as.data.table()
rmaOutput <- rmaOutput[order(-rmaOutput$RMA),]
rmaOutput
}
addUserDefs <- function(rma) {
so_master <- read.csv("H:/Code/supportFiles/so_User_Defs.csv")
rma <- left_join(rma, so_master, by = c("orig_SO" = "Sales.Order.ID"))
rma$Specifier <- rma$Specifier.User.Def.3
rma$Job_Name <- rma$Job.Name.User.Def.5
rma <- rma[1:16]
rma
}
|
library(nimble) #, lib.loc='/tmp/nim063'
library(splines)
library(maps)
library(plyr)
library(oce)
library(RCurl)
load("~/babystepps/Data/calibration.data.Rdata") #from get.data.R
load("~/babystepps/Data/prediction.data.Rdata") #from get.data.R
load(file = '~/babystepps/Data/nimble.betas_1_22016-12-02.Rdata')
source("~/babySTEPPS/Workflow Code/utils/bs_nimble.R")
Z.knots = bs(biomass,intercept=TRUE,df=5)
beta1.est.real = matrix(colMeans(samples.mixed[100:nrow(samples.mixed),1:105]),ncol(Z.knots),ncol(Y))
beta2.est.real = matrix(colMeans(samples.mixed[100:nrow(samples.mixed),106:210]),ncol(Z.knots),ncol(Y))
#plots a confidence interval around an x-y plot (e.g. a timeseries)
ciEnvelope <- function(x,ylo,yhi,...){
polygon(cbind(c(x, rev(x), x[1]), c(ylo, rev(yhi),
ylo[1])), border = NA,...)
}
dbetabin <- nimbleFunction(
run = function(x = double(0), alpha = double(0), beta = double(0), size = double(0),
log = integer(0, default = 0)) {
returnType(double(0))
logProb <- lgamma(size+1) - lgamma(x+1) - lgamma(size - x + 1) +
lgamma(alpha + beta) - lgamma(alpha) - lgamma(beta) +
lgamma(x + alpha) + lgamma(size - x + beta) - lgamma(size + alpha + beta)
if(log) return(logProb)
else return(exp(logProb))
})
rbetabin <- nimbleFunction(
run = function(n = integer(0), alpha = double(0), beta = double(0), size = double(0)) {
returnType(double(0))
if(n != 1) print("rbetabin only allows n = 1; using n = 1.")
p <- rbeta(1, alpha, beta)
return(rbinom(1, size = size, prob = p))
})
order1 <- TRUE # for first order model; set to FALSE for 2nd order
pred_code <- nimbleCode({
sigma ~ dunif(0,50) #GELMAN PAPER #5
if(order1) {
b[1] ~ dunif(0,145)
for(t in 2:T){
b[t] ~ T(dnorm(b[t-1],1/sigma^2),0,145)
}
} else {
b[1] ~ dunif(0,145)
b[2] ~ dunif(0,145)
for(t in 3:T){
b[t] ~ dnorm(2*b[t-1] - b[t-2],1/sigma^2)
}
}
for(t in 1:T){
Zb[t,1:5] <- bs_nimble(b[t], u[1:3], N0[1:2], N1[1:3], N2[1:4], N3[1:5])
}
for(i in 1:I){
for(t in 1:T){
phi.first[t,i] <- sum(Zb[t,1:5] %*% beta1[1:5,i])
phi.first1[t,i] <- sum(Zb[t,1:5] %*% beta2[1:5,i])
}
}
for(t in 1:T){
for(i in 1:I){
shape1[t,i] <- exp(phi.first[t,i])
shape2[t,i] <- exp(phi.first1[t,i])
}
}
for(j in 1:J){
Y[j, 1] ~ dbetabin(shape1[age_index[j], 1], shape2[age_index[j], 1], n[j])
for(i in 2:(I-1)){
Y[j, i] ~ dbetabin(shape1[age_index[j], i], shape2[age_index[j], i], n[j] - sum(Y[j,1:(i-1)]))
}
}
})
#Here's where you pick which lake you want to run
site_number = unique(x.meta[x.meta$site.name=='Cub Lake',1])
ten.count.use = ten.count[which(x.meta$site.id==site_number),]
Y = as.matrix(ten.count.use)
sample.ages <- x.meta[x.meta[,1]==site_number,]$age_bacon
age.bins <- seq(0,10000,100)
age.index <- as.matrix(as.numeric(cut(sample.ages,breaks = age.bins,labels=seq(1:(length(age.bins)-1)))))
tmp <- data.frame(cbind(age.index, Y))
names(tmp)[1] <- 'age.index'
Y2 <- aggregate(tmp, by = list(tmp$age.index), FUN = sum)
Y <- as.matrix(Y2[ , -c(1,2)])
age.index = as.matrix(Y2[,1])
Z.knots = Z
T = length(age.bins)-1
I = ncol(Y)
K = ncol(Z.knots)
J = length(age.index)
n = rowSums(Y)
Zb = matrix(NA,T,K)
phi.first = matrix(NA,T,I); exp.phi = phi.first
#beta.est = matrix(colMeans(samples1[100:nrow(samples1),]),K,I)
new.biomass = seq(1,200,1)
Z.new = matrix(0,nrow=length(new.biomass),ncol=K)
u<-c(rep(attr(Z.knots,"Boundary.knots")[1],1),attr(Z.knots,"knots"),rep(attr(Z.knots,"Boundary.knots")[2],1))
data.pred = list(Y = Y)
constants.pred = list(beta1 = beta1.est.real, beta2 = beta2.est.real, I = I, J = J,
T = T, n = n, u = u, N0 = rep(0, (length(u)-1)),
N1 = rep(0, (length(u))), N2 = rep(0, (length(u)+1)),
N3 = rep(0, (length(u)+2)), age_index = age_index)
inits.pred = list(b = rep(10, T),sigma = 4.5)#logb = matrix(log(10),1,T) #b = matrix(10,1,T),
dimensions.pred = list(exp.phi = c(T,I), exp.phi1 = c(T,I), phi.first = c(T,I),
phi.first1 = c(T,I), Zb = dim(Zb), Y = dim(Y))
set.seed(0)
source('~/babySTEPPS/Workflow Code/samplers/samplers.R')
model_pred <- nimbleModel(pred_code, inits = inits.pred, constants = constants.pred,
data = data.pred, dimensions = dimensions.pred)
spec.pred <- configureMCMC(model_pred, thin = 10)#,control = list(log=TRUE) , print = FALSE,control = list(log=TRUE)
smp <- spec.pred$getSamplers()
for(i in 1:length(smp)) {
if(smp[[i]]$name == 'RW sampler' && smp[[i]]$target != 'sigma') {
spec.pred$removeSamplers(smp[[i]]$target)
spec.pred$addSampler(smp[[i]]$target, type = 'RWt_trunc', control = list(log=TRUE, range = c(0,145)))
spec.pred$addSampler(smp[[i]]$target, type = 'jointb', control = list(log = TRUE, range = c(0,145), weights = c(.7,.2))) # this seems to help avoid getting stuck at low-lik values early in chain and leads to higher ESS, but sampling does take longer...
}
}
spec.pred$addMonitors(c("b"))
Rmcmc.pred <- buildMCMC(spec.pred)
cm <- compileNimble(model_pred, Rmcmc.pred)
# don't initialize all b's at same value as that can lead to samples for sigma being driven to be very small, at least for a while
b1 <- rnorm(T, 25, 10)
b2 <- rnorm(T, 75, 10)
b3 <- rnorm(T, 125, 10)
b1[b1 < 0] <- 2
b3[b3 > 145] <- 144
samplesList <- runMCMC(mcmc = cm$Rmcmc.pred, niter = 10000, nchains = 1,
inits = list(list(b = b1, sigma = 4.5)))
#,
#list(b = b2, sigma = 4.5),
#list(b = b3, sigma = 4.5))
stop()
samplesList <- runMCMC(mcmc = cm$Rmcmc.pred, niter = 10000, nchains = 1,
inits = list(b = b3, sigma = 4.5))
sl = samplesList[[3]]
par(mfrow=c(2,2))
ts.plot(sl[,88])
ts.plot(sl[,91])
ts.plot(sl[,94])
bvals <- sl[500:1000,1:100]
bs <- colMeans(bvals)
ts.plot(bs)
lines(1:100, apply(bvals, 2, quantile, .025), col = 'red')
lines(1:100, apply(bvals, 2, quantile, .975), col = 'red')
# basic likelihood plot (for biomass at time 'tt'
if(F) {
tt = 86
dps = cm$model_pred$getDependencies(paste0('b[', tt, ']'))
ind = which(age.index == tt)
bs = seq(10, 145, by = 1)
i=1; out = rep(0, length(bs))
for( b in bs ) {
cm$model_pred$b[tt] = b
cm$model_pred$calculate(dps)
out[i] = cm$model_pred$calculate(paste0('Y[', ind, ', 1:20]'))
i = i + 1
}
plot(bs, exp(out), type = 'l')
}
| /Workflow_Code/older_code/prediction.model.R | permissive | PalEON-Project/ReFAB | R | false | false | 6,558 | r | library(nimble) #, lib.loc='/tmp/nim063'
library(splines)
library(maps)
library(plyr)
library(oce)
library(RCurl)
load("~/babystepps/Data/calibration.data.Rdata") #from get.data.R
load("~/babystepps/Data/prediction.data.Rdata") #from get.data.R
load(file = '~/babystepps/Data/nimble.betas_1_22016-12-02.Rdata')
source("~/babySTEPPS/Workflow Code/utils/bs_nimble.R")
Z.knots = bs(biomass,intercept=TRUE,df=5)
beta1.est.real = matrix(colMeans(samples.mixed[100:nrow(samples.mixed),1:105]),ncol(Z.knots),ncol(Y))
beta2.est.real = matrix(colMeans(samples.mixed[100:nrow(samples.mixed),106:210]),ncol(Z.knots),ncol(Y))
#plots a confidence interval around an x-y plot (e.g. a timeseries)
ciEnvelope <- function(x,ylo,yhi,...){
polygon(cbind(c(x, rev(x), x[1]), c(ylo, rev(yhi),
ylo[1])), border = NA,...)
}
dbetabin <- nimbleFunction(
run = function(x = double(0), alpha = double(0), beta = double(0), size = double(0),
log = integer(0, default = 0)) {
returnType(double(0))
logProb <- lgamma(size+1) - lgamma(x+1) - lgamma(size - x + 1) +
lgamma(alpha + beta) - lgamma(alpha) - lgamma(beta) +
lgamma(x + alpha) + lgamma(size - x + beta) - lgamma(size + alpha + beta)
if(log) return(logProb)
else return(exp(logProb))
})
rbetabin <- nimbleFunction(
run = function(n = integer(0), alpha = double(0), beta = double(0), size = double(0)) {
returnType(double(0))
if(n != 1) print("rbetabin only allows n = 1; using n = 1.")
p <- rbeta(1, alpha, beta)
return(rbinom(1, size = size, prob = p))
})
order1 <- TRUE # for first order model; set to FALSE for 2nd order
pred_code <- nimbleCode({
sigma ~ dunif(0,50) #GELMAN PAPER #5
if(order1) {
b[1] ~ dunif(0,145)
for(t in 2:T){
b[t] ~ T(dnorm(b[t-1],1/sigma^2),0,145)
}
} else {
b[1] ~ dunif(0,145)
b[2] ~ dunif(0,145)
for(t in 3:T){
b[t] ~ dnorm(2*b[t-1] - b[t-2],1/sigma^2)
}
}
for(t in 1:T){
Zb[t,1:5] <- bs_nimble(b[t], u[1:3], N0[1:2], N1[1:3], N2[1:4], N3[1:5])
}
for(i in 1:I){
for(t in 1:T){
phi.first[t,i] <- sum(Zb[t,1:5] %*% beta1[1:5,i])
phi.first1[t,i] <- sum(Zb[t,1:5] %*% beta2[1:5,i])
}
}
for(t in 1:T){
for(i in 1:I){
shape1[t,i] <- exp(phi.first[t,i])
shape2[t,i] <- exp(phi.first1[t,i])
}
}
for(j in 1:J){
Y[j, 1] ~ dbetabin(shape1[age_index[j], 1], shape2[age_index[j], 1], n[j])
for(i in 2:(I-1)){
Y[j, i] ~ dbetabin(shape1[age_index[j], i], shape2[age_index[j], i], n[j] - sum(Y[j,1:(i-1)]))
}
}
})
#Here's where you pick which lake you want to run
site_number = unique(x.meta[x.meta$site.name=='Cub Lake',1])
ten.count.use = ten.count[which(x.meta$site.id==site_number),]
Y = as.matrix(ten.count.use)
sample.ages <- x.meta[x.meta[,1]==site_number,]$age_bacon
age.bins <- seq(0,10000,100)
age.index <- as.matrix(as.numeric(cut(sample.ages,breaks = age.bins,labels=seq(1:(length(age.bins)-1)))))
tmp <- data.frame(cbind(age.index, Y))
names(tmp)[1] <- 'age.index'
Y2 <- aggregate(tmp, by = list(tmp$age.index), FUN = sum)
Y <- as.matrix(Y2[ , -c(1,2)])
age.index = as.matrix(Y2[,1])
Z.knots = Z
T = length(age.bins)-1
I = ncol(Y)
K = ncol(Z.knots)
J = length(age.index)
n = rowSums(Y)
Zb = matrix(NA,T,K)
phi.first = matrix(NA,T,I); exp.phi = phi.first
#beta.est = matrix(colMeans(samples1[100:nrow(samples1),]),K,I)
new.biomass = seq(1,200,1)
Z.new = matrix(0,nrow=length(new.biomass),ncol=K)
u<-c(rep(attr(Z.knots,"Boundary.knots")[1],1),attr(Z.knots,"knots"),rep(attr(Z.knots,"Boundary.knots")[2],1))
data.pred = list(Y = Y)
constants.pred = list(beta1 = beta1.est.real, beta2 = beta2.est.real, I = I, J = J,
T = T, n = n, u = u, N0 = rep(0, (length(u)-1)),
N1 = rep(0, (length(u))), N2 = rep(0, (length(u)+1)),
N3 = rep(0, (length(u)+2)), age_index = age_index)
inits.pred = list(b = rep(10, T),sigma = 4.5)#logb = matrix(log(10),1,T) #b = matrix(10,1,T),
dimensions.pred = list(exp.phi = c(T,I), exp.phi1 = c(T,I), phi.first = c(T,I),
phi.first1 = c(T,I), Zb = dim(Zb), Y = dim(Y))
set.seed(0)
source('~/babySTEPPS/Workflow Code/samplers/samplers.R')
model_pred <- nimbleModel(pred_code, inits = inits.pred, constants = constants.pred,
data = data.pred, dimensions = dimensions.pred)
spec.pred <- configureMCMC(model_pred, thin = 10)#,control = list(log=TRUE) , print = FALSE,control = list(log=TRUE)
smp <- spec.pred$getSamplers()
for(i in 1:length(smp)) {
if(smp[[i]]$name == 'RW sampler' && smp[[i]]$target != 'sigma') {
spec.pred$removeSamplers(smp[[i]]$target)
spec.pred$addSampler(smp[[i]]$target, type = 'RWt_trunc', control = list(log=TRUE, range = c(0,145)))
spec.pred$addSampler(smp[[i]]$target, type = 'jointb', control = list(log = TRUE, range = c(0,145), weights = c(.7,.2))) # this seems to help avoid getting stuck at low-lik values early in chain and leads to higher ESS, but sampling does take longer...
}
}
spec.pred$addMonitors(c("b"))
Rmcmc.pred <- buildMCMC(spec.pred)
cm <- compileNimble(model_pred, Rmcmc.pred)
# don't initialize all b's at same value as that can lead to samples for sigma being driven to be very small, at least for a while
b1 <- rnorm(T, 25, 10)
b2 <- rnorm(T, 75, 10)
b3 <- rnorm(T, 125, 10)
b1[b1 < 0] <- 2
b3[b3 > 145] <- 144
samplesList <- runMCMC(mcmc = cm$Rmcmc.pred, niter = 10000, nchains = 1,
inits = list(list(b = b1, sigma = 4.5)))
#,
#list(b = b2, sigma = 4.5),
#list(b = b3, sigma = 4.5))
stop()
samplesList <- runMCMC(mcmc = cm$Rmcmc.pred, niter = 10000, nchains = 1,
inits = list(b = b3, sigma = 4.5))
sl = samplesList[[3]]
par(mfrow=c(2,2))
ts.plot(sl[,88])
ts.plot(sl[,91])
ts.plot(sl[,94])
bvals <- sl[500:1000,1:100]
bs <- colMeans(bvals)
ts.plot(bs)
lines(1:100, apply(bvals, 2, quantile, .025), col = 'red')
lines(1:100, apply(bvals, 2, quantile, .975), col = 'red')
# basic likelihood plot (for biomass at time 'tt'
if(F) {
tt = 86
dps = cm$model_pred$getDependencies(paste0('b[', tt, ']'))
ind = which(age.index == tt)
bs = seq(10, 145, by = 1)
i=1; out = rep(0, length(bs))
for( b in bs ) {
cm$model_pred$b[tt] = b
cm$model_pred$calculate(dps)
out[i] = cm$model_pred$calculate(paste0('Y[', ind, ', 1:20]'))
i = i + 1
}
plot(bs, exp(out), type = 'l')
}
|
#' Suma de dos numeros
#'
#' Esta funcion devuelve la suma entre dos numeros.
#'
#' @param a Primer numero
#' @param b Segundo numero
#'
#' @return La suma entre a y b
#' @export
#'
#' @examples
#' suma(5,3)
#'
#' @seealso \link{resta}
suma<- function(a,b){
return(a+b)
} | /R/suma.R | permissive | serlomudata/SIQ025-paqueteR | R | false | false | 276 | r | #' Suma de dos numeros
#'
#' Esta funcion devuelve la suma entre dos numeros.
#'
#' @param a Primer numero
#' @param b Segundo numero
#'
#' @return La suma entre a y b
#' @export
#'
#' @examples
#' suma(5,3)
#'
#' @seealso \link{resta}
suma<- function(a,b){
return(a+b)
} |
#' annotate the celltype for cluster in the seurat.obj
#'
#' @param seurat_object annotation of the seurat.obj
#' @param cluster_markers marker genes of celltypes
#' @param filename the name of the generated file
#' @param width width of the figure
#' @param height height of the figure
#' @param n_col ncol for the vlnplot
#'
#' @importFrom Seurat RenameIdents VlnPlot
#' @importFrom SeuratObject Idents
#' @return a seurat.obj with annotation
#' @export
#'
#' @examples
#' \dontrun{
#' data(common_cluster,package="LIANLAB")
#' input.file <- system.file('extdata','pbmc_1k.RDS',package = 'LIANLABDATA')
#' pbmc_1k <- readRDS(input.file)
#' pbmc_1k <- cluster_annotate( seurat_object = pbmc_1k, cluster_markers = common_cluster)
#' }
cluster_annotate = function(seurat_object,cluster_markers = NULL,filename = "",width = 4,height = 2.5,n_col = NULL){
if(is.null(cluster_markers)){
stop("There is no cluster markers supply !!")
}
annotation_target <- names(cluster_markers)
clustermean <- NULL
seurat_violin <- seurat_object
for (i in 1:length(cluster_markers)) {
choose <- subset(seurat_object,features=cluster_markers[[i]])
df <- as.data.frame(choose@assays$RNA@data)
score <- apply(df, 2, mean)
clustermean <- rbind(clustermean,tapply(score,Idents(choose),mean))
row.names(clustermean)[nrow(clustermean)] <- annotation_target[i]
seurat_violin$score <- as.numeric(score)
colnames(seurat_violin@meta.data)[ncol(seurat_violin@meta.data)] <- annotation_target[i]
}
annotation <- row.names(clustermean)[apply(clustermean, 2, which.max)]
for (i in 1:length(annotation)) {
j <- as.numeric(apply(clustermean, 2, which.max)[i])
if(clustermean[j,i] < 0.5){
annotation[i] <- "Unknown"
}
}
if(is.null(n_col)){
n_col <- ceiling(30/length(levels(seurat_object)))
}
n_row <- ceiling(length(annotation_target)/n_col)
new.cluster.ids <- annotation
names(new.cluster.ids) <- levels(seurat_object)
seurat_object <- RenameIdents(seurat_object,new.cluster.ids)
pdf(paste0("annotation_violin_",filename,".pdf"),width = width*n_col,height = height*n_row)
p <- VlnPlot(seurat_violin,features = annotation_target,pt.size = 0,ncol = n_col)
print(p)
dev.off()
return(seurat_object)
}
| /R/cluster_annotate.R | permissive | Lian-Lab/LIANLAB | R | false | false | 2,262 | r | #' annotate the celltype for cluster in the seurat.obj
#'
#' @param seurat_object annotation of the seurat.obj
#' @param cluster_markers marker genes of celltypes
#' @param filename the name of the generated file
#' @param width width of the figure
#' @param height height of the figure
#' @param n_col ncol for the vlnplot
#'
#' @importFrom Seurat RenameIdents VlnPlot
#' @importFrom SeuratObject Idents
#' @return a seurat.obj with annotation
#' @export
#'
#' @examples
#' \dontrun{
#' data(common_cluster,package="LIANLAB")
#' input.file <- system.file('extdata','pbmc_1k.RDS',package = 'LIANLABDATA')
#' pbmc_1k <- readRDS(input.file)
#' pbmc_1k <- cluster_annotate( seurat_object = pbmc_1k, cluster_markers = common_cluster)
#' }
cluster_annotate = function(seurat_object,cluster_markers = NULL,filename = "",width = 4,height = 2.5,n_col = NULL){
if(is.null(cluster_markers)){
stop("There is no cluster markers supply !!")
}
annotation_target <- names(cluster_markers)
clustermean <- NULL
seurat_violin <- seurat_object
for (i in 1:length(cluster_markers)) {
choose <- subset(seurat_object,features=cluster_markers[[i]])
df <- as.data.frame(choose@assays$RNA@data)
score <- apply(df, 2, mean)
clustermean <- rbind(clustermean,tapply(score,Idents(choose),mean))
row.names(clustermean)[nrow(clustermean)] <- annotation_target[i]
seurat_violin$score <- as.numeric(score)
colnames(seurat_violin@meta.data)[ncol(seurat_violin@meta.data)] <- annotation_target[i]
}
annotation <- row.names(clustermean)[apply(clustermean, 2, which.max)]
for (i in 1:length(annotation)) {
j <- as.numeric(apply(clustermean, 2, which.max)[i])
if(clustermean[j,i] < 0.5){
annotation[i] <- "Unknown"
}
}
if(is.null(n_col)){
n_col <- ceiling(30/length(levels(seurat_object)))
}
n_row <- ceiling(length(annotation_target)/n_col)
new.cluster.ids <- annotation
names(new.cluster.ids) <- levels(seurat_object)
seurat_object <- RenameIdents(seurat_object,new.cluster.ids)
pdf(paste0("annotation_violin_",filename,".pdf"),width = width*n_col,height = height*n_row)
p <- VlnPlot(seurat_violin,features = annotation_target,pt.size = 0,ncol = n_col)
print(p)
dev.off()
return(seurat_object)
}
|
# R Libraries for Equivalence Test #
# Diletti E, Hauscheke D, Steinjans VW. Sample size determination for bioequivalence assessment by means of confidence intervals. Int J Clin Pharmacology, Therapy and Toxicology. 1992;29:S51-58
# Chow SC, Liu JP. Design and Analysis of Bioavailability and Bioequivalence Study. 2nd ed. p158. 2000, Marcel Dekker Inc
##################################
# Sample Size for BE : Difference
bpow.d.cv <- function(i, cv, delta=0, alpha=0.1, beta=0.2, thetaL=-20, thetaU=20)
{
if (delta <= thetaL | delta >= thetaU) return(0);
t0 <- qt(1-alpha/2, 2*i-2);
p1 <- pt(-1*t0, 2*i-2, ncp=(thetaL-delta)/(cv/sqrt(i)))
p2 <- pt(t0, 2*i-2, ncp=(thetaU-delta)/(cv/sqrt(i)))
power <- p1 - p2;
if (power < 0) power <- 0;
return(power);
}
bpow.d.mse <- function(i, mu.r, mse, true.d=0, alpha=0.1, beta=0.2, thetaL=-20, thetaU=20)
{
cv <- 100*sqrt(mse) / mu.r;
delta <- 100*true.d / mu.r;
return(bpow.d.cv(i, cv, delta, alpha, beta, thetaL, thetaU));
}
bss.d.cv <- function(cv, delta=0, alpha=0.1, beta=0.2, thetaL=-20, thetaU=20)
{
if (delta <= thetaL | delta >= thetaU) return(Inf);
for(i in 2:1000) {
power <- bpow.d.cv(i, cv, delta, alpha, beta, thetaL, thetaU);
if (power > 1 - beta) return(i);
}
return(">1000");
}
bss.d.mse <- function(mu.r, mse, true.d=0, alpha=0.1, beta=0.2, thetaL=-20, thetaU=20)
{
cv <- 100*sqrt(mse) / mu.r;
delta <- 100*true.d / mu.r;
return(bss.d.cv(cv, delta, alpha, beta, thetaL, thetaU));
}
# LL, UL like 85% - 115%
# N: total N, 2 * n per group
bss.d.ci <- function(N, LL, UL)
{
pe <- (LL + UL)/2
t0 <- qt(0.95, N-2);
cv <- sqrt(N/2)*(UL - LL)/(2*t0);
s1 <- bss.d.cv(cv)
s2 <- bss.d.cv(cv, delta=(pe-100));
sampsize <- cbind(s1, s2);
p1 <- round(100 * bpow.d.cv(N/2, cv));
p2 <- round(100 * bpow.d.cv(N/2, cv, delta=(pe-100)));
power <- cbind(p1, p2);
result <- rbind(sampsize, power);
dimnames(result) <- list(c("80% Power Sample Size", paste("Power at N =",N)), c("True Percent=100", sprintf("True Percent=%.2f", pe)));
return(result);
}
############################
# Sample Size for BE : Ratio
bpow.r.mse <- function(i, mse, true.r=1, alpha=0.1, beta=0.2, thetaL=0.8, thetaU=1.25)
{
if (true.r <= thetaL | true.r >= thetaU) return(0);
t0 <- qt(1-alpha/2,2*i-2);
p1 <- pt(-1*t0, 2*i-2, ncp=log(thetaL/true.r)/sqrt(mse/i));
p2 <- pt(t0, 2*i-2, ncp=log(thetaU/true.r)/sqrt(mse/i));
power <- p1 - p2;
if (power < 0) power <- 0;
return(power);
}
bpow.r.cv <- function(i, cv, true.r=1, alpha=0.1, beta=0.2, thetaL=0.8, thetaU=1.25)
{
mse <- log(1+(cv/100)^2);
return(bpow.r.mse(i, mse, true.r, alpha, beta, thetaL, thetaU));
}
bss.r.mse <- function(mse, true.r=1, alpha=0.1, beta=0.2, thetaL=0.8, thetaU=1.25)
{
if (true.r <= thetaL | true.r >= thetaU) return(Inf);
for (i in 2:1000) {
power <- bpow.r.mse(i, mse, true.r, alpha, beta, thetaL, thetaU);
if (power > 1 - beta) return(i);
}
return(">1000");
}
bss.r.cv <- function(cv, true.r=1, alpha=0.1, beta=0.2, thetaL=0.8, thetaU=1.25)
{
mse <- log(1+(cv/100)^2);
return(bss.r.mse(mse, true.r, alpha, beta, thetaL, thetaU));
}
# LL, UL like 0.85 ~ 1.15
# N: toal N, 2 * n per group
bss.r.ci <- function(N, LL, UL)
{
pe <- exp((log(UL)+log(LL))/2);
t0 <- qt(0.95, N-2);
sd <- (log(UL)-log(LL))/(2*t0);
mse <- sd^2 * N / 2;
s1 <- bss.r.mse(mse)
s2 <- bss.r.mse(mse, true.r=pe);
sampsize <- cbind(s1, s2);
p1 <- round(100 * bpow.r.mse(N/2, mse));
p2 <- round(100 * bpow.r.mse(N/2, mse, true.r=pe));
power <- cbind(p1, p2);
result <- rbind(sampsize, power);
dimnames(result) <- list(c("80% Power Sample Size", paste("Power at N =",N)), c("True Ratio=1", sprintf("True Ratio=%.4f", pe)));
return(result);
}
#########################
# 2x2 BE Test
assert <- function(bedata)
{
Si11 <- bedata[bedata$GRP=="RT" & bedata$PRD==1, "SUBJ"]
Si21 <- bedata[bedata$GRP=="RT" & bedata$PRD==2, "SUBJ"]
Si12 <- bedata[bedata$GRP=="TR" & bedata$PRD==1, "SUBJ"]
Si22 <- bedata[bedata$GRP=="TR" & bedata$PRD==2, "SUBJ"]
return(identical(Si11, Si21) & identical(Si12, Si22))
}
betest <- function(bedata, var, logtransformed)
{
if(!assert(bedata)) {
cat("\n Bad Data Format !\n");
return(NULL);
}
Yijk <- bedata[, var]
Yi11 <- bedata[bedata$GRP=="RT" & bedata$PRD==1, var]
Yi21 <- bedata[bedata$GRP=="RT" & bedata$PRD==2, var]
Yi12 <- bedata[bedata$GRP=="TR" & bedata$PRD==1, var]
Yi22 <- bedata[bedata$GRP=="TR" & bedata$PRD==2, var]
n1 <- length(Yi11)
n2 <- length(Yi12)
Y... <- mean(Yijk)
SStotal <- sum((Yijk - Y...)^2)
Y.11 <- mean(Yi11)
Y.21 <- mean(Yi21)
Y.12 <- mean(Yi12)
Y.22 <- mean(Yi22)
Yi.1 <- (Yi11 + Yi21) / 2
Yi.2 <- (Yi12 + Yi22) / 2
Y..1 <- mean(Yi.1);
Y..2 <- mean(Yi.2);
mu.r <- (Y.11 + Y.22) / 2
mu.t <- (Y.21 + Y.12) / 2
SScarry <- 2*n1*n2/(n1+n2)*(Y.12 + Y.22 - Y.11 - Y.21)^2 / 4
# SSinter <- (sum(Yi.1^2) + sum(Yi.2^2) - Y..1^2 * n1 - Y..2^2 * n2) * 2
SSinter <- (sum((Yi.1-Y..1)^2) + sum((Yi.2-Y..2)^2)) * 2
SSbetween <- SScarry + SSinter
SSperiod <- 2*n1*n2/(n1+n2)*(Y.21 + Y.22 - Y.11 - Y.12)^2 / 4
SSdrug <- 2*n1*n2/(n1+n2)*(Y.21 + Y.12 - Y.11 - Y.22)^2 / 4
SSintra <- SStotal - SScarry - SSinter - SSdrug - SSperiod
Source <- c("SUBJECT", "GROUP", "SUBJECT(GROUP)", "PERIOD", "DRUG", "ERROR", "TOTAL");
SS <- c(SSbetween, SScarry, SSinter, SSperiod, SSdrug, SSintra, SStotal);
DF <- c(n1+n2-1, 1, n1+n2-2, 1, 1, n1+n2-2, 2*n1+2*n2-1);
MS <- SS / DF
mse <- SSintra / (n1+n2-2);
F <- MS / c(mse, MS[3], mse, mse, mse, mse, mse);
p1 <- 1 - pf(F[1], n1+n2-1, n1+n2-2)
p2 <- 1 - pf(F[2], 1, n1+n2-2);
p3 <- 1 - pf(F[3], n1+n2-2, n1+n2-2);
p4 <- 1 - pf(F[4], 1, n1+n2-2);
p5 <- 1 - pf(F[5], 1, n1+n2-2);
p <- c(p1, p2, p3, p4, p5, NA, NA)
F[6] <- F[7] <- MS[7] <- NA
ANOVA <- cbind(SS, DF, MS, F, p)
dimnames(ANOVA) <- list(Source,c("SS", "DF", "MS", "F", "p"))
pe <- mu.t - mu.r
sd <- sqrt(mse / 2 * (1/n1 + 1/n2)) # See pp 62-63
t0 <- qt(0.95, n1+n2-2);
ci0 <- cbind(pe - t0 * sd, pe, pe + t0 * sd)
if (logtransformed == TRUE) {
lsm <- cbind(exp(mu.r), exp(mu.t))
dimnames(lsm) <- list("Geometric Means", cbind("Reference Drug", "Test Drug"))
ci <- exp(ci0);
dimnames(ci) <- list("90% CI for Ratio", c("Lower Limit", "Point Estimate", "Upper Limit"));
sampsize1 <- bss.r.mse(mse);
sampsize2 <- bss.r.mse(mse, true.r=exp(pe));
ss <- cbind(sampsize1, sampsize2)
dimnames(ss) <- list("80% Power Sample Size", c("True Ratio=1", "True Ratio=Point Estimate"));
} else {
lsm <- cbind(mu.r, mu.t)
dimnames(lsm) <- list("Arithmetic Means", cbind("Reference Drug", "Test Drug"))
ci1 <- (1 + ci0 / mu.r) * 100
ci <- rbind(ci0, ci1)
dimnames(ci) <- list(c("90% CI for Difference", "90% CI for Difference(%)"), c("Lower Limit", "Point Estimate", "Upper Limit"));
sampsize1 <- bss.d.mse(mu.r, mse);
sampsize2 <- bss.d.mse(mu.r, mse, true.d=pe);
ss <- cbind(sampsize1, sampsize2)
dimnames(ss) <- list("80% Power Sample Size", c("True Difference=0", "True Difference=Point Estimate"));
}
result <- list(ANOVA, lsm, ci, ss);
names(result) <- c("Analysis of Variance", "Least Square Means", "90% Confidence Interval", "Sample Size")
return(result);
}
hodges <- function(bedata, var)
{
if(!assert(bedata)) {
cat("\n Bad Data Format !\n");
return(NULL);
}
Yi11 <- bedata[bedata$GRP=="RT" & bedata$PRD==1, var]
Yi21 <- bedata[bedata$GRP=="RT" & bedata$PRD==2, var]
Yi12 <- bedata[bedata$GRP=="TR" & bedata$PRD==1, var]
Yi22 <- bedata[bedata$GRP=="TR" & bedata$PRD==2, var]
n1 <- length(Yi11)
n2 <- length(Yi12)
if(n1 * n2 < 12) {
cat("\n Too Small Sample Size for 90% Confidence Interval !\n");
return(NULL);
}
mu.r <- (mean(Yi11) + mean(Yi22)) / 2;
G1D <- (Yi21 - Yi11) / 2
G2D <- (Yi22 - Yi12) / 2
D <- sort(outer(G1D, G2D, "-"));
pval <- pwilcox(min(length(D[D>0]), length(D[D<0])), n1, n2)
w05 <- qwilcox(0.05, n1, n2)
w95 <- qwilcox(0.95, n1, n2)
names(pval) <- list(c("p-value"));
est1 <- cbind(D[w05-1], median(D), D[w95])
est2 <- (1 + est1 / mu.r ) * 100
est.a <- rbind(est1, est2)
dimnames(est.a) <- list(c("90% Confidence Interval", "90% Confidence Interval(%)"), c("Lower Limit", "Point Estimate", "Upper Limit"));
# est3 <- cbind(D[w05], median(D), D[w95+1])
# est4 <- (1 + est3 / mu.r ) * 100
# est.b <- rbind(est3, est4)
# dimnames(est.b) <- list(c("90% Confidence Interval", "90% Confidence Interval(%)"), c("Lower Limit", "Point Estimate", "Upper Limit"));
# result <- list(pval, est.a, est.b);
# names(result) <- c("Wilcoxon Signed-Rank Test", "Hodges-Lehmann Estimate", "Hodges-Lehmann Estimate Old")
result <- list(pval, est.a);
names(result) <- c("Wilcoxon Signed-Rank Test", "Hodges-Lehmann Estimate")
return(result);
}
########################################
# BE Plot
drawind <- function(g1l, g1r, g2l, g2r, g1s, g2s)
{
for (i in 1:length(g1l)) {
x <- jitter(c(1, 2), factor=0.3)
y <- c(g1l[i], g1r[i])
lines(x, y, type="l", lty=1, col="red")
text(x[1]-0.05, y[1], paste(g1s[i]), cex=0.6, col="red")
}
for (i in 1:length(g2l)) {
x <- jitter(c(1, 2), factor=0.3)
y <- c(g2l[i], g2r[i])
lines(x, y, type="l", lty=2, col="blue")
text(x[2]+0.05, y[2], paste(g2s[i]), cex=0.6, col="blue")
}
}
drawmeansd <- function(ma, sa, mb, sb, mc, sc, md, sd, y.max)
{
sft <- 0.03
delta <- mean(ma, mc) - mean(mb, md)
y.RT <- mean(ma, mc) + sign(delta) * y.max * 0.05
y.TR <- mean(mb, md) - sign(delta) * y.max * 0.05
lines(c(1-sft, 2-sft), c(ma, mc), type="l", lty=1, col="red")
text(1.5-sft, y.RT, "RT", col="red")
if (sa > 0) arrows(1-sft, ma-sa, 1-sft, ma+sa, length=0.1, code=3, angle=90, col="red")
if (sc > 0) arrows(2-sft, mc-sc, 2-sft, mc+sc, length=0.1, code=3, angle=90, col="red")
lines(c(1+sft, 2+sft), c(mb, md), type="l", lty=2, col="blue")
text(1.5+sft, y.TR, "TR", col="blue")
if (sb > 0) arrows(1+sft, mb-sb, 1+sft, mb+sd, length=0.1, code=3, angle=90, col="blue")
if (sd > 0) arrows(2+sft, md-sd, 2+sft, md+sd, length=0.1, code=3, angle=90, col="blue")
}
beplot <- function(bedata, var)
{
if(!assert(bedata)) {
cat("\n Bad Data Format !\n");
return(NULL);
}
Si11 <- bedata[bedata$GRP=="RT" & bedata$PRD==1, "SUBJ"]
Si21 <- bedata[bedata$GRP=="RT" & bedata$PRD==2, "SUBJ"]
Si12 <- bedata[bedata$GRP=="TR" & bedata$PRD==1, "SUBJ"]
Si22 <- bedata[bedata$GRP=="TR" & bedata$PRD==2, "SUBJ"]
Yi11 <- bedata[bedata$GRP=="RT" & bedata$PRD==1, var]
Yi21 <- bedata[bedata$GRP=="RT" & bedata$PRD==2, var]
Yi12 <- bedata[bedata$GRP=="TR" & bedata$PRD==1, var]
Yi22 <- bedata[bedata$GRP=="TR" & bedata$PRD==2, var]
n1 <- length(Yi11)
n2 <- length(Yi12)
Y.11 <- mean(Yi11)
Y.21 <- mean(Yi21)
Y.12 <- mean(Yi12)
Y.22 <- mean(Yi22)
sY.11 <- sd(Yi11)
sY.21 <- sd(Yi21)
sY.12 <- sd(Yi12)
sY.22 <- sd(Yi22)
y.max <- max(Y.11 + sY.11, Y.21 + sY.21, Y.12 + sY.12, Y.22 + sY.22, max(bedata[,var])) * 1.2
windows()
par(oma=c(1,1,3,1), mfrow=c(2,2))
plot(0, 0, type="n", ylim=c(0, y.max), xlim=c(0.5, 2.5), axes=FALSE, xlab="Period", ylab=var, main="(a) Individual Plot for Period")
axis(2)
axis(1, at=c(1,2))
drawind(Yi11, Yi21, Yi12, Yi22, Si11, Si12)
plot(0, 0, type="n", ylim=c(0, y.max), xlim=c(0.5, 2.5), axes=FALSE, xlab="Treatment", ylab=var, main="(b) Individual Plot for Treatment")
axis(2)
axis(1, at=c(1,2), labels=c("Test", "Reference"))
drawind(Yi21, Yi11, Yi12, Yi22, Si11, Si12)
plot(0, 0, type="n", ylim=c(0, y.max), xlim=c(0.5, 2.5), axes=FALSE, xlab="Period", ylab=var, main="(c) Mean and SD by Period")
axis(2)
axis(1, at=c(1,2))
drawmeansd(Y.11, sY.11, Y.12, sY.12, Y.21, sY.21, Y.22, sY.22, y.max)
plot(0, 0, type="n", ylim=c(0, y.max), xlim=c(0.5, 2.5), axes=FALSE, xlab="Treatment", ylab=var, main="(d) Mean and SD by Treatment")
axis(2)
axis(1, at=c(1,2), labels=c("Test", "Reference"))
drawmeansd(Y.21, sY.21, Y.12, sY.12, Y.11, sY.11, Y.22, sY.22, y.max)
mtext(outer=T, side=3, paste("Equivalence Plot for", var), cex=1.5)
windows()
par(oma=c(1,1,3,1), mfrow=c(2,2))
boxplot(Yi11, Yi21, Yi12, Yi22, names=c("PRD=1", "PRD=2", "PRD=1", "PRD=2"), main="(a) By Sequence and Period", sub="SEQ=RT SEQ=TR")
boxplot(c(Yi11, Yi21), c(Yi12, Yi22), names=c("Sequence=RT", "Sequence=TR"), main="(b) By Sequence")
boxplot(c(Yi11, Yi12), c(Yi21, Yi22), names=c("Period=1", "Period=2"), main="(c) By Period")
boxplot(c(Yi12, Yi21), c(Yi11, Yi22), names=c("Treatment=T", "Treatment=R"), main="(d) By Treatment")
mtext(outer=T, side=3, paste("Box Plots for", var), cex=1.5)
}
# bedata <- read.csv("d:/csv/propofolbe.csv")
# windows()
# par(mfrow=c(2,2),oma=c(1,1,3,1))
# boxplot(Cmax ~ GRP + PRD, data=bedata)
# boxplot(Cmax ~ GRP, data=bedata)
# boxplot(Cmax ~ PRD, data=bedata)
# boxplot(Cmax ~ TRT, data=bedata)
# options(digits=3)
be <- function(filename)
{
bedata <- read.csv(filename);
# File should have the following columns
# SUBJ : Subject ID, any data type
# GRP: "RT" or "TR"
# PRD: 1 or 2
# TRT: "R" or "T"
# AUClast: numeric data type
# AUCinf: numeric data type
# Cmax: numeric data type
# Tmax: numeric data type
# Other columns as you wish
bedata <- bedata[order(bedata$GRP, bedata$PRD, bedata$SUBJ),];
if(!assert(bedata)) {
cat("\n Bad Data Format !\n");
return(NULL);
}
beplot(bedata, "AUClast")
beplot(bedata, "AUCinf")
beplot(bedata, "Cmax")
beplot(bedata, "Tmax")
bedata$lnAUClast <- log(bedata$AUClast);
bedata$lnAUCinf <- log(bedata$AUCinf);
bedata$lnCmax <- log(bedata$Cmax);
cat("\n\n[AUClast]\n\n");
print(betest(bedata, "lnAUClast", logtransformed=T));
cat("\n\n[AUCinf]\n\n");
print(betest(bedata, "lnAUCinf", logtransformed=T));
cat("\n\n[Cmax]\n\n");
print(betest(bedata, "lnCmax", logtransformed=T));
cat("\n\n[Tmax]\n\n");
print(hodges(bedata, "Tmax"));
}
kbe <- function(filename)
{
bedata <- read.csv(filename);
# File should have the following columns
# SUBJ : Subject ID, any data type
# GRP: "RT" or "TR"
# PRD: 1 or 2
# TRT: "R" or "T"
# AUClast: numeric data type
# Cmax: numeric data type
# Tmax: numeric data type
# Other columns as you wish
bedata <- bedata[order(bedata$GRP, bedata$PRD, bedata$SUBJ),];
if(!assert(bedata)) {
cat("\n Bad Data Format !\n");
return(NULL);
}
beplot(bedata, "AUClast")
beplot(bedata, "Cmax")
beplot(bedata, "Tmax")
bedata$lnAUClast <- log(bedata$AUClast);
bedata$lnCmax <- log(bedata$Cmax);
cat("\n\n[AUClast]\n\n");
print(betest(bedata, "lnAUClast", logtransformed=T));
cat("\n\n[Cmax]\n\n");
print(betest(bedata, "lnCmax", logtransformed=T));
cat("\n\n[Tmax]\n\n");
print(hodges(bedata, "Tmax"));
}
| /BELIB.R | no_license | shanmdphd/UsefulR | R | false | false | 14,863 | r | # R Libraries for Equivalence Test #
# Diletti E, Hauscheke D, Steinjans VW. Sample size determination for bioequivalence assessment by means of confidence intervals. Int J Clin Pharmacology, Therapy and Toxicology. 1992;29:S51-58
# Chow SC, Liu JP. Design and Analysis of Bioavailability and Bioequivalence Study. 2nd ed. p158. 2000, Marcel Dekker Inc
##################################
# Sample Size for BE : Difference
bpow.d.cv <- function(i, cv, delta=0, alpha=0.1, beta=0.2, thetaL=-20, thetaU=20)
{
if (delta <= thetaL | delta >= thetaU) return(0);
t0 <- qt(1-alpha/2, 2*i-2);
p1 <- pt(-1*t0, 2*i-2, ncp=(thetaL-delta)/(cv/sqrt(i)))
p2 <- pt(t0, 2*i-2, ncp=(thetaU-delta)/(cv/sqrt(i)))
power <- p1 - p2;
if (power < 0) power <- 0;
return(power);
}
bpow.d.mse <- function(i, mu.r, mse, true.d=0, alpha=0.1, beta=0.2, thetaL=-20, thetaU=20)
{
cv <- 100*sqrt(mse) / mu.r;
delta <- 100*true.d / mu.r;
return(bpow.d.cv(i, cv, delta, alpha, beta, thetaL, thetaU));
}
bss.d.cv <- function(cv, delta=0, alpha=0.1, beta=0.2, thetaL=-20, thetaU=20)
{
if (delta <= thetaL | delta >= thetaU) return(Inf);
for(i in 2:1000) {
power <- bpow.d.cv(i, cv, delta, alpha, beta, thetaL, thetaU);
if (power > 1 - beta) return(i);
}
return(">1000");
}
bss.d.mse <- function(mu.r, mse, true.d=0, alpha=0.1, beta=0.2, thetaL=-20, thetaU=20)
{
cv <- 100*sqrt(mse) / mu.r;
delta <- 100*true.d / mu.r;
return(bss.d.cv(cv, delta, alpha, beta, thetaL, thetaU));
}
# LL, UL like 85% - 115%
# N: total N, 2 * n per group
bss.d.ci <- function(N, LL, UL)
{
pe <- (LL + UL)/2
t0 <- qt(0.95, N-2);
cv <- sqrt(N/2)*(UL - LL)/(2*t0);
s1 <- bss.d.cv(cv)
s2 <- bss.d.cv(cv, delta=(pe-100));
sampsize <- cbind(s1, s2);
p1 <- round(100 * bpow.d.cv(N/2, cv));
p2 <- round(100 * bpow.d.cv(N/2, cv, delta=(pe-100)));
power <- cbind(p1, p2);
result <- rbind(sampsize, power);
dimnames(result) <- list(c("80% Power Sample Size", paste("Power at N =",N)), c("True Percent=100", sprintf("True Percent=%.2f", pe)));
return(result);
}
############################
# Sample Size for BE : Ratio
bpow.r.mse <- function(i, mse, true.r=1, alpha=0.1, beta=0.2, thetaL=0.8, thetaU=1.25)
{
if (true.r <= thetaL | true.r >= thetaU) return(0);
t0 <- qt(1-alpha/2,2*i-2);
p1 <- pt(-1*t0, 2*i-2, ncp=log(thetaL/true.r)/sqrt(mse/i));
p2 <- pt(t0, 2*i-2, ncp=log(thetaU/true.r)/sqrt(mse/i));
power <- p1 - p2;
if (power < 0) power <- 0;
return(power);
}
bpow.r.cv <- function(i, cv, true.r=1, alpha=0.1, beta=0.2, thetaL=0.8, thetaU=1.25)
{
mse <- log(1+(cv/100)^2);
return(bpow.r.mse(i, mse, true.r, alpha, beta, thetaL, thetaU));
}
bss.r.mse <- function(mse, true.r=1, alpha=0.1, beta=0.2, thetaL=0.8, thetaU=1.25)
{
if (true.r <= thetaL | true.r >= thetaU) return(Inf);
for (i in 2:1000) {
power <- bpow.r.mse(i, mse, true.r, alpha, beta, thetaL, thetaU);
if (power > 1 - beta) return(i);
}
return(">1000");
}
bss.r.cv <- function(cv, true.r=1, alpha=0.1, beta=0.2, thetaL=0.8, thetaU=1.25)
{
mse <- log(1+(cv/100)^2);
return(bss.r.mse(mse, true.r, alpha, beta, thetaL, thetaU));
}
# LL, UL like 0.85 ~ 1.15
# N: toal N, 2 * n per group
bss.r.ci <- function(N, LL, UL)
{
pe <- exp((log(UL)+log(LL))/2);
t0 <- qt(0.95, N-2);
sd <- (log(UL)-log(LL))/(2*t0);
mse <- sd^2 * N / 2;
s1 <- bss.r.mse(mse)
s2 <- bss.r.mse(mse, true.r=pe);
sampsize <- cbind(s1, s2);
p1 <- round(100 * bpow.r.mse(N/2, mse));
p2 <- round(100 * bpow.r.mse(N/2, mse, true.r=pe));
power <- cbind(p1, p2);
result <- rbind(sampsize, power);
dimnames(result) <- list(c("80% Power Sample Size", paste("Power at N =",N)), c("True Ratio=1", sprintf("True Ratio=%.4f", pe)));
return(result);
}
#########################
# 2x2 BE Test
assert <- function(bedata)
{
Si11 <- bedata[bedata$GRP=="RT" & bedata$PRD==1, "SUBJ"]
Si21 <- bedata[bedata$GRP=="RT" & bedata$PRD==2, "SUBJ"]
Si12 <- bedata[bedata$GRP=="TR" & bedata$PRD==1, "SUBJ"]
Si22 <- bedata[bedata$GRP=="TR" & bedata$PRD==2, "SUBJ"]
return(identical(Si11, Si21) & identical(Si12, Si22))
}
betest <- function(bedata, var, logtransformed)
{
if(!assert(bedata)) {
cat("\n Bad Data Format !\n");
return(NULL);
}
Yijk <- bedata[, var]
Yi11 <- bedata[bedata$GRP=="RT" & bedata$PRD==1, var]
Yi21 <- bedata[bedata$GRP=="RT" & bedata$PRD==2, var]
Yi12 <- bedata[bedata$GRP=="TR" & bedata$PRD==1, var]
Yi22 <- bedata[bedata$GRP=="TR" & bedata$PRD==2, var]
n1 <- length(Yi11)
n2 <- length(Yi12)
Y... <- mean(Yijk)
SStotal <- sum((Yijk - Y...)^2)
Y.11 <- mean(Yi11)
Y.21 <- mean(Yi21)
Y.12 <- mean(Yi12)
Y.22 <- mean(Yi22)
Yi.1 <- (Yi11 + Yi21) / 2
Yi.2 <- (Yi12 + Yi22) / 2
Y..1 <- mean(Yi.1);
Y..2 <- mean(Yi.2);
mu.r <- (Y.11 + Y.22) / 2
mu.t <- (Y.21 + Y.12) / 2
SScarry <- 2*n1*n2/(n1+n2)*(Y.12 + Y.22 - Y.11 - Y.21)^2 / 4
# SSinter <- (sum(Yi.1^2) + sum(Yi.2^2) - Y..1^2 * n1 - Y..2^2 * n2) * 2
SSinter <- (sum((Yi.1-Y..1)^2) + sum((Yi.2-Y..2)^2)) * 2
SSbetween <- SScarry + SSinter
SSperiod <- 2*n1*n2/(n1+n2)*(Y.21 + Y.22 - Y.11 - Y.12)^2 / 4
SSdrug <- 2*n1*n2/(n1+n2)*(Y.21 + Y.12 - Y.11 - Y.22)^2 / 4
SSintra <- SStotal - SScarry - SSinter - SSdrug - SSperiod
Source <- c("SUBJECT", "GROUP", "SUBJECT(GROUP)", "PERIOD", "DRUG", "ERROR", "TOTAL");
SS <- c(SSbetween, SScarry, SSinter, SSperiod, SSdrug, SSintra, SStotal);
DF <- c(n1+n2-1, 1, n1+n2-2, 1, 1, n1+n2-2, 2*n1+2*n2-1);
MS <- SS / DF
mse <- SSintra / (n1+n2-2);
F <- MS / c(mse, MS[3], mse, mse, mse, mse, mse);
p1 <- 1 - pf(F[1], n1+n2-1, n1+n2-2)
p2 <- 1 - pf(F[2], 1, n1+n2-2);
p3 <- 1 - pf(F[3], n1+n2-2, n1+n2-2);
p4 <- 1 - pf(F[4], 1, n1+n2-2);
p5 <- 1 - pf(F[5], 1, n1+n2-2);
p <- c(p1, p2, p3, p4, p5, NA, NA)
F[6] <- F[7] <- MS[7] <- NA
ANOVA <- cbind(SS, DF, MS, F, p)
dimnames(ANOVA) <- list(Source,c("SS", "DF", "MS", "F", "p"))
pe <- mu.t - mu.r
sd <- sqrt(mse / 2 * (1/n1 + 1/n2)) # See pp 62-63
t0 <- qt(0.95, n1+n2-2);
ci0 <- cbind(pe - t0 * sd, pe, pe + t0 * sd)
if (logtransformed == TRUE) {
lsm <- cbind(exp(mu.r), exp(mu.t))
dimnames(lsm) <- list("Geometric Means", cbind("Reference Drug", "Test Drug"))
ci <- exp(ci0);
dimnames(ci) <- list("90% CI for Ratio", c("Lower Limit", "Point Estimate", "Upper Limit"));
sampsize1 <- bss.r.mse(mse);
sampsize2 <- bss.r.mse(mse, true.r=exp(pe));
ss <- cbind(sampsize1, sampsize2)
dimnames(ss) <- list("80% Power Sample Size", c("True Ratio=1", "True Ratio=Point Estimate"));
} else {
lsm <- cbind(mu.r, mu.t)
dimnames(lsm) <- list("Arithmetic Means", cbind("Reference Drug", "Test Drug"))
ci1 <- (1 + ci0 / mu.r) * 100
ci <- rbind(ci0, ci1)
dimnames(ci) <- list(c("90% CI for Difference", "90% CI for Difference(%)"), c("Lower Limit", "Point Estimate", "Upper Limit"));
sampsize1 <- bss.d.mse(mu.r, mse);
sampsize2 <- bss.d.mse(mu.r, mse, true.d=pe);
ss <- cbind(sampsize1, sampsize2)
dimnames(ss) <- list("80% Power Sample Size", c("True Difference=0", "True Difference=Point Estimate"));
}
result <- list(ANOVA, lsm, ci, ss);
names(result) <- c("Analysis of Variance", "Least Square Means", "90% Confidence Interval", "Sample Size")
return(result);
}
hodges <- function(bedata, var)
{
if(!assert(bedata)) {
cat("\n Bad Data Format !\n");
return(NULL);
}
Yi11 <- bedata[bedata$GRP=="RT" & bedata$PRD==1, var]
Yi21 <- bedata[bedata$GRP=="RT" & bedata$PRD==2, var]
Yi12 <- bedata[bedata$GRP=="TR" & bedata$PRD==1, var]
Yi22 <- bedata[bedata$GRP=="TR" & bedata$PRD==2, var]
n1 <- length(Yi11)
n2 <- length(Yi12)
if(n1 * n2 < 12) {
cat("\n Too Small Sample Size for 90% Confidence Interval !\n");
return(NULL);
}
mu.r <- (mean(Yi11) + mean(Yi22)) / 2;
G1D <- (Yi21 - Yi11) / 2
G2D <- (Yi22 - Yi12) / 2
D <- sort(outer(G1D, G2D, "-"));
pval <- pwilcox(min(length(D[D>0]), length(D[D<0])), n1, n2)
w05 <- qwilcox(0.05, n1, n2)
w95 <- qwilcox(0.95, n1, n2)
names(pval) <- list(c("p-value"));
est1 <- cbind(D[w05-1], median(D), D[w95])
est2 <- (1 + est1 / mu.r ) * 100
est.a <- rbind(est1, est2)
dimnames(est.a) <- list(c("90% Confidence Interval", "90% Confidence Interval(%)"), c("Lower Limit", "Point Estimate", "Upper Limit"));
# est3 <- cbind(D[w05], median(D), D[w95+1])
# est4 <- (1 + est3 / mu.r ) * 100
# est.b <- rbind(est3, est4)
# dimnames(est.b) <- list(c("90% Confidence Interval", "90% Confidence Interval(%)"), c("Lower Limit", "Point Estimate", "Upper Limit"));
# result <- list(pval, est.a, est.b);
# names(result) <- c("Wilcoxon Signed-Rank Test", "Hodges-Lehmann Estimate", "Hodges-Lehmann Estimate Old")
result <- list(pval, est.a);
names(result) <- c("Wilcoxon Signed-Rank Test", "Hodges-Lehmann Estimate")
return(result);
}
########################################
# BE Plot
drawind <- function(g1l, g1r, g2l, g2r, g1s, g2s)
{
for (i in 1:length(g1l)) {
x <- jitter(c(1, 2), factor=0.3)
y <- c(g1l[i], g1r[i])
lines(x, y, type="l", lty=1, col="red")
text(x[1]-0.05, y[1], paste(g1s[i]), cex=0.6, col="red")
}
for (i in 1:length(g2l)) {
x <- jitter(c(1, 2), factor=0.3)
y <- c(g2l[i], g2r[i])
lines(x, y, type="l", lty=2, col="blue")
text(x[2]+0.05, y[2], paste(g2s[i]), cex=0.6, col="blue")
}
}
drawmeansd <- function(ma, sa, mb, sb, mc, sc, md, sd, y.max)
{
sft <- 0.03
delta <- mean(ma, mc) - mean(mb, md)
y.RT <- mean(ma, mc) + sign(delta) * y.max * 0.05
y.TR <- mean(mb, md) - sign(delta) * y.max * 0.05
lines(c(1-sft, 2-sft), c(ma, mc), type="l", lty=1, col="red")
text(1.5-sft, y.RT, "RT", col="red")
if (sa > 0) arrows(1-sft, ma-sa, 1-sft, ma+sa, length=0.1, code=3, angle=90, col="red")
if (sc > 0) arrows(2-sft, mc-sc, 2-sft, mc+sc, length=0.1, code=3, angle=90, col="red")
lines(c(1+sft, 2+sft), c(mb, md), type="l", lty=2, col="blue")
text(1.5+sft, y.TR, "TR", col="blue")
if (sb > 0) arrows(1+sft, mb-sb, 1+sft, mb+sd, length=0.1, code=3, angle=90, col="blue")
if (sd > 0) arrows(2+sft, md-sd, 2+sft, md+sd, length=0.1, code=3, angle=90, col="blue")
}
beplot <- function(bedata, var)
{
if(!assert(bedata)) {
cat("\n Bad Data Format !\n");
return(NULL);
}
Si11 <- bedata[bedata$GRP=="RT" & bedata$PRD==1, "SUBJ"]
Si21 <- bedata[bedata$GRP=="RT" & bedata$PRD==2, "SUBJ"]
Si12 <- bedata[bedata$GRP=="TR" & bedata$PRD==1, "SUBJ"]
Si22 <- bedata[bedata$GRP=="TR" & bedata$PRD==2, "SUBJ"]
Yi11 <- bedata[bedata$GRP=="RT" & bedata$PRD==1, var]
Yi21 <- bedata[bedata$GRP=="RT" & bedata$PRD==2, var]
Yi12 <- bedata[bedata$GRP=="TR" & bedata$PRD==1, var]
Yi22 <- bedata[bedata$GRP=="TR" & bedata$PRD==2, var]
n1 <- length(Yi11)
n2 <- length(Yi12)
Y.11 <- mean(Yi11)
Y.21 <- mean(Yi21)
Y.12 <- mean(Yi12)
Y.22 <- mean(Yi22)
sY.11 <- sd(Yi11)
sY.21 <- sd(Yi21)
sY.12 <- sd(Yi12)
sY.22 <- sd(Yi22)
y.max <- max(Y.11 + sY.11, Y.21 + sY.21, Y.12 + sY.12, Y.22 + sY.22, max(bedata[,var])) * 1.2
windows()
par(oma=c(1,1,3,1), mfrow=c(2,2))
plot(0, 0, type="n", ylim=c(0, y.max), xlim=c(0.5, 2.5), axes=FALSE, xlab="Period", ylab=var, main="(a) Individual Plot for Period")
axis(2)
axis(1, at=c(1,2))
drawind(Yi11, Yi21, Yi12, Yi22, Si11, Si12)
plot(0, 0, type="n", ylim=c(0, y.max), xlim=c(0.5, 2.5), axes=FALSE, xlab="Treatment", ylab=var, main="(b) Individual Plot for Treatment")
axis(2)
axis(1, at=c(1,2), labels=c("Test", "Reference"))
drawind(Yi21, Yi11, Yi12, Yi22, Si11, Si12)
plot(0, 0, type="n", ylim=c(0, y.max), xlim=c(0.5, 2.5), axes=FALSE, xlab="Period", ylab=var, main="(c) Mean and SD by Period")
axis(2)
axis(1, at=c(1,2))
drawmeansd(Y.11, sY.11, Y.12, sY.12, Y.21, sY.21, Y.22, sY.22, y.max)
plot(0, 0, type="n", ylim=c(0, y.max), xlim=c(0.5, 2.5), axes=FALSE, xlab="Treatment", ylab=var, main="(d) Mean and SD by Treatment")
axis(2)
axis(1, at=c(1,2), labels=c("Test", "Reference"))
drawmeansd(Y.21, sY.21, Y.12, sY.12, Y.11, sY.11, Y.22, sY.22, y.max)
mtext(outer=T, side=3, paste("Equivalence Plot for", var), cex=1.5)
windows()
par(oma=c(1,1,3,1), mfrow=c(2,2))
boxplot(Yi11, Yi21, Yi12, Yi22, names=c("PRD=1", "PRD=2", "PRD=1", "PRD=2"), main="(a) By Sequence and Period", sub="SEQ=RT SEQ=TR")
boxplot(c(Yi11, Yi21), c(Yi12, Yi22), names=c("Sequence=RT", "Sequence=TR"), main="(b) By Sequence")
boxplot(c(Yi11, Yi12), c(Yi21, Yi22), names=c("Period=1", "Period=2"), main="(c) By Period")
boxplot(c(Yi12, Yi21), c(Yi11, Yi22), names=c("Treatment=T", "Treatment=R"), main="(d) By Treatment")
mtext(outer=T, side=3, paste("Box Plots for", var), cex=1.5)
}
# bedata <- read.csv("d:/csv/propofolbe.csv")
# windows()
# par(mfrow=c(2,2),oma=c(1,1,3,1))
# boxplot(Cmax ~ GRP + PRD, data=bedata)
# boxplot(Cmax ~ GRP, data=bedata)
# boxplot(Cmax ~ PRD, data=bedata)
# boxplot(Cmax ~ TRT, data=bedata)
# options(digits=3)
be <- function(filename)
{
bedata <- read.csv(filename);
# File should have the following columns
# SUBJ : Subject ID, any data type
# GRP: "RT" or "TR"
# PRD: 1 or 2
# TRT: "R" or "T"
# AUClast: numeric data type
# AUCinf: numeric data type
# Cmax: numeric data type
# Tmax: numeric data type
# Other columns as you wish
bedata <- bedata[order(bedata$GRP, bedata$PRD, bedata$SUBJ),];
if(!assert(bedata)) {
cat("\n Bad Data Format !\n");
return(NULL);
}
beplot(bedata, "AUClast")
beplot(bedata, "AUCinf")
beplot(bedata, "Cmax")
beplot(bedata, "Tmax")
bedata$lnAUClast <- log(bedata$AUClast);
bedata$lnAUCinf <- log(bedata$AUCinf);
bedata$lnCmax <- log(bedata$Cmax);
cat("\n\n[AUClast]\n\n");
print(betest(bedata, "lnAUClast", logtransformed=T));
cat("\n\n[AUCinf]\n\n");
print(betest(bedata, "lnAUCinf", logtransformed=T));
cat("\n\n[Cmax]\n\n");
print(betest(bedata, "lnCmax", logtransformed=T));
cat("\n\n[Tmax]\n\n");
print(hodges(bedata, "Tmax"));
}
kbe <- function(filename)
{
bedata <- read.csv(filename);
# File should have the following columns
# SUBJ : Subject ID, any data type
# GRP: "RT" or "TR"
# PRD: 1 or 2
# TRT: "R" or "T"
# AUClast: numeric data type
# Cmax: numeric data type
# Tmax: numeric data type
# Other columns as you wish
bedata <- bedata[order(bedata$GRP, bedata$PRD, bedata$SUBJ),];
if(!assert(bedata)) {
cat("\n Bad Data Format !\n");
return(NULL);
}
beplot(bedata, "AUClast")
beplot(bedata, "Cmax")
beplot(bedata, "Tmax")
bedata$lnAUClast <- log(bedata$AUClast);
bedata$lnCmax <- log(bedata$Cmax);
cat("\n\n[AUClast]\n\n");
print(betest(bedata, "lnAUClast", logtransformed=T));
cat("\n\n[Cmax]\n\n");
print(betest(bedata, "lnCmax", logtransformed=T));
cat("\n\n[Tmax]\n\n");
print(hodges(bedata, "Tmax"));
}
|
dataset = read.csv('../data/Salary_Data.csv')
library(caTools)
set.seed(123)
split = sample.split(dataset$Salary, SplitRatio = 2/3)
training_set = subset(dataset, split== TRUE)
test_set = subset(dataset, split==FALSE)
regressor = lm(formula = Salary~YearsExperience,
data = training_set)
y_pred = predict(regressor,newdata = test_set)
y_pred
install.packages('ggplot2')
library(ggplot2)
ggplot() +
geom_point(aes(x= training_set$YearsExperience,
y=training_set$Salary),
color = 'red') +
geom_line(aes(x = training_set$YearsExperience,
y = predict(regressor, newdata = training_set)
),
color = 'blue') +
ggtitle('Salary vs Experiance (training set)') +
xlab('years of experience') +
ylab('Salary')
ggplot() +
geom_point(aes(x= test_set$YearsExperience,
y=test_set$Salary),
color = 'red') +
geom_line(aes(x = training_set$YearsExperience,
y = predict(regressor, newdata = training_set)
),
color = 'blue') +
ggtitle('Salary vs Experiance (training set)') +
xlab('years of experience') +
ylab('Salary')
| /rproj/linear.R | no_license | ducksfrogs/A_Zmachine_learning | R | false | false | 1,193 | r | dataset = read.csv('../data/Salary_Data.csv')
library(caTools)
set.seed(123)
split = sample.split(dataset$Salary, SplitRatio = 2/3)
training_set = subset(dataset, split== TRUE)
test_set = subset(dataset, split==FALSE)
regressor = lm(formula = Salary~YearsExperience,
data = training_set)
y_pred = predict(regressor,newdata = test_set)
y_pred
install.packages('ggplot2')
library(ggplot2)
ggplot() +
geom_point(aes(x= training_set$YearsExperience,
y=training_set$Salary),
color = 'red') +
geom_line(aes(x = training_set$YearsExperience,
y = predict(regressor, newdata = training_set)
),
color = 'blue') +
ggtitle('Salary vs Experiance (training set)') +
xlab('years of experience') +
ylab('Salary')
ggplot() +
geom_point(aes(x= test_set$YearsExperience,
y=test_set$Salary),
color = 'red') +
geom_line(aes(x = training_set$YearsExperience,
y = predict(regressor, newdata = training_set)
),
color = 'blue') +
ggtitle('Salary vs Experiance (training set)') +
xlab('years of experience') +
ylab('Salary')
|
Slopechart=slopeChart=function(FirstDatavector,SecondDatavector,Names,Labels,MaxNumberOfSlices,TopLabels=c('FirstDatavector','SecondDatavector'),main='Comparision of Descending Frequency'){
requireNamespace('plotrix')
x1=internpiechart(FirstDatavector,Names,Labels,MaxNumberOfSlices=MaxNumberOfSlices)
x2=internpiechart(SecondDatavector,Names,Labels,MaxNumberOfSlices = MaxNumberOfSlices)
Percents1=x1$Percents
Percents2=x2$Percents
names1=names(Percents1)
names2=names(Percents2)
ind=match(table = names2,x = names1)
n=length(Percents1)
xy=cbind(Percents1,Percents2[ind])
xy=xy[order(xy[,1],decreasing = T),]
plotrix::bumpchart(xy,lwd = 3,names(x1$Percents),col=DataVisualizations::DefaultColorSequence[1:n],top.labels = NA,rank=T,arrows=F)
xmin <- par("usr")[1]
xmax <- par("usr")[2]
ymin <- par("usr")[3]
ymax <- par("usr")[4]
plotrix::boxed.labels(x = mean(c(xmax,xmin))-xmin/xmax,y = mean(c(ymax,ymin))+mean(c(ymax,ymin)),labels = TopLabels[1],border = F)
plotrix::boxed.labels(x = mean(c(xmax,xmin))+xmin/xmax,y = 2*mean(c(ymax,ymin)),labels = TopLabels[2],border = F)
plotrix::boxed.labels(x = mean(c(xmax,xmin)),y = ymin-0.4*ymin,labels = main,border = F,cex = 1.5)
}
| /R/Slopechart.R | no_license | Mthrun/DataVisualizations | R | false | false | 1,251 | r | Slopechart=slopeChart=function(FirstDatavector,SecondDatavector,Names,Labels,MaxNumberOfSlices,TopLabels=c('FirstDatavector','SecondDatavector'),main='Comparision of Descending Frequency'){
requireNamespace('plotrix')
x1=internpiechart(FirstDatavector,Names,Labels,MaxNumberOfSlices=MaxNumberOfSlices)
x2=internpiechart(SecondDatavector,Names,Labels,MaxNumberOfSlices = MaxNumberOfSlices)
Percents1=x1$Percents
Percents2=x2$Percents
names1=names(Percents1)
names2=names(Percents2)
ind=match(table = names2,x = names1)
n=length(Percents1)
xy=cbind(Percents1,Percents2[ind])
xy=xy[order(xy[,1],decreasing = T),]
plotrix::bumpchart(xy,lwd = 3,names(x1$Percents),col=DataVisualizations::DefaultColorSequence[1:n],top.labels = NA,rank=T,arrows=F)
xmin <- par("usr")[1]
xmax <- par("usr")[2]
ymin <- par("usr")[3]
ymax <- par("usr")[4]
plotrix::boxed.labels(x = mean(c(xmax,xmin))-xmin/xmax,y = mean(c(ymax,ymin))+mean(c(ymax,ymin)),labels = TopLabels[1],border = F)
plotrix::boxed.labels(x = mean(c(xmax,xmin))+xmin/xmax,y = 2*mean(c(ymax,ymin)),labels = TopLabels[2],border = F)
plotrix::boxed.labels(x = mean(c(xmax,xmin)),y = ymin-0.4*ymin,labels = main,border = F,cex = 1.5)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ICLinearDiscriminantClassifier.R
\name{ICLinearDiscriminantClassifier}
\alias{ICLinearDiscriminantClassifier}
\title{Implicitly Constrained Semi-supervised Linear Discriminant Classifier}
\usage{
ICLinearDiscriminantClassifier(X, y, X_u, prior = NULL, scale = FALSE,
init = NULL, sup_prior = FALSE, x_center = FALSE, ...)
}
\arguments{
\item{X}{design matrix of the labeled objects}
\item{y}{vector with labels}
\item{X_u}{design matrix of the labeled objects}
\item{prior}{set a fixed class prior}
\item{scale}{logical; Should the features be normalized? (default: FALSE)}
\item{init}{not currently used}
\item{sup_prior}{logical; use the prior estimates based only on the labeled data, not the imputed labels (default: FALSE)}
\item{x_center}{logical; Whether the data should be centered}
\item{...}{Additional Parameters, Not used}
}
\description{
Semi-supervised version of Linear Discriminant Analysis using implicit constraints as described in (Krijthe & Loog 2014). This method finds the soft labeling of the unlabeled objects, whose resulting LDA solution gives the highest log-likelihood when evaluated on the labeled objects only. See also \code{\link{ICLeastSquaresClassifier}}.
}
\references{
Krijthe, J.H. & Loog, M., 2014. Implicitly Constrained Semi-Supervised Linear Discriminant Analysis. In International Conference on Pattern Recognition. Stockholm, pp. 3762-3767.
}
\seealso{
Other RSSL classifiers: \code{\link{EMLinearDiscriminantClassifier}},
\code{\link{GRFClassifier}},
\code{\link{ICLeastSquaresClassifier}},
\code{\link{KernelLeastSquaresClassifier}},
\code{\link{LaplacianKernelLeastSquaresClassifier}},
\code{\link{LaplacianSVM}},
\code{\link{LeastSquaresClassifier}},
\code{\link{LinearDiscriminantClassifier}},
\code{\link{LinearSVM}}, \code{\link{LinearTSVM}},
\code{\link{LogisticLossClassifier}},
\code{\link{LogisticRegression}},
\code{\link{MCLinearDiscriminantClassifier}},
\code{\link{MCNearestMeanClassifier}},
\code{\link{MCPLDA}},
\code{\link{MajorityClassClassifier}},
\code{\link{NearestMeanClassifier}},
\code{\link{QuadraticDiscriminantClassifier}},
\code{\link{S4VM}}, \code{\link{SVM}},
\code{\link{SelfLearning}}, \code{\link{TSVM}},
\code{\link{USMLeastSquaresClassifier}},
\code{\link{WellSVM}}, \code{\link{svmlin}}
}
| /man/ICLinearDiscriminantClassifier.Rd | no_license | kevinwkc/RSSL | R | false | true | 2,397 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ICLinearDiscriminantClassifier.R
\name{ICLinearDiscriminantClassifier}
\alias{ICLinearDiscriminantClassifier}
\title{Implicitly Constrained Semi-supervised Linear Discriminant Classifier}
\usage{
ICLinearDiscriminantClassifier(X, y, X_u, prior = NULL, scale = FALSE,
init = NULL, sup_prior = FALSE, x_center = FALSE, ...)
}
\arguments{
\item{X}{design matrix of the labeled objects}
\item{y}{vector with labels}
\item{X_u}{design matrix of the labeled objects}
\item{prior}{set a fixed class prior}
\item{scale}{logical; Should the features be normalized? (default: FALSE)}
\item{init}{not currently used}
\item{sup_prior}{logical; use the prior estimates based only on the labeled data, not the imputed labels (default: FALSE)}
\item{x_center}{logical; Whether the data should be centered}
\item{...}{Additional Parameters, Not used}
}
\description{
Semi-supervised version of Linear Discriminant Analysis using implicit constraints as described in (Krijthe & Loog 2014). This method finds the soft labeling of the unlabeled objects, whose resulting LDA solution gives the highest log-likelihood when evaluated on the labeled objects only. See also \code{\link{ICLeastSquaresClassifier}}.
}
\references{
Krijthe, J.H. & Loog, M., 2014. Implicitly Constrained Semi-Supervised Linear Discriminant Analysis. In International Conference on Pattern Recognition. Stockholm, pp. 3762-3767.
}
\seealso{
Other RSSL classifiers: \code{\link{EMLinearDiscriminantClassifier}},
\code{\link{GRFClassifier}},
\code{\link{ICLeastSquaresClassifier}},
\code{\link{KernelLeastSquaresClassifier}},
\code{\link{LaplacianKernelLeastSquaresClassifier}},
\code{\link{LaplacianSVM}},
\code{\link{LeastSquaresClassifier}},
\code{\link{LinearDiscriminantClassifier}},
\code{\link{LinearSVM}}, \code{\link{LinearTSVM}},
\code{\link{LogisticLossClassifier}},
\code{\link{LogisticRegression}},
\code{\link{MCLinearDiscriminantClassifier}},
\code{\link{MCNearestMeanClassifier}},
\code{\link{MCPLDA}},
\code{\link{MajorityClassClassifier}},
\code{\link{NearestMeanClassifier}},
\code{\link{QuadraticDiscriminantClassifier}},
\code{\link{S4VM}}, \code{\link{SVM}},
\code{\link{SelfLearning}}, \code{\link{TSVM}},
\code{\link{USMLeastSquaresClassifier}},
\code{\link{WellSVM}}, \code{\link{svmlin}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{createTestGraph}
\alias{createTestGraph}
\title{createTestGraph}
\usage{
createTestGraph(nodeCount, edgeCount)
}
\arguments{
\item{nodeCount}{1 or more}
\item{edgeCount}{0 or more}
}
\value{
a graphNEL with \code{nodeCount} nodes and \code{edgeCount} edges
}
\description{
\code{createTestGraph} With as many nodes and edges as you wish, but neither edge nor node attributes.
}
\examples{
g <- createTestGraph(5, 3)
}
| /man/createTestGaph.Rd | permissive | fengweijp/RCyjs | R | false | true | 514 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{createTestGraph}
\alias{createTestGraph}
\title{createTestGraph}
\usage{
createTestGraph(nodeCount, edgeCount)
}
\arguments{
\item{nodeCount}{1 or more}
\item{edgeCount}{0 or more}
}
\value{
a graphNEL with \code{nodeCount} nodes and \code{edgeCount} edges
}
\description{
\code{createTestGraph} With as many nodes and edges as you wish, but neither edge nor node attributes.
}
\examples{
g <- createTestGraph(5, 3)
}
|
# Script for running the Shiny app without having to use RStudio
dir <- "/Volumes/Prosecutor/doxy-mayne/DoxyApp/" #Application's directory
setwd(dir) #Set the working directory to be the application's directory
library(shiny) #Load the shiny package for the "runApp" function
runApp()
| /DoxyApp/runApp.R | no_license | wojjy001/doxy-mayne | R | false | false | 288 | r | # Script for running the Shiny app without having to use RStudio
dir <- "/Volumes/Prosecutor/doxy-mayne/DoxyApp/" #Application's directory
setwd(dir) #Set the working directory to be the application's directory
library(shiny) #Load the shiny package for the "runApp" function
runApp()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/market.R
\name{market_ping}
\alias{market_ping}
\title{Test connectivity to the Binance REST API}
\usage{
market_ping()
}
\value{
\code{TRUE} on success or \code{FALSE} on failure.
}
\description{
Test connectivity to the Binance REST API
}
\examples{
market_ping()
}
| /man/market_ping.Rd | no_license | snapbuy/binance | R | false | true | 346 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/market.R
\name{market_ping}
\alias{market_ping}
\title{Test connectivity to the Binance REST API}
\usage{
market_ping()
}
\value{
\code{TRUE} on success or \code{FALSE} on failure.
}
\description{
Test connectivity to the Binance REST API
}
\examples{
market_ping()
}
|
###############################
### 2009, David Ellinghaus ###
###############################
## input: data.frame with genotype counts
## - columns: geno.11: counts for AA
## geno.12: counts for AB
## geno.22: counts for BB
makeDefinetti <- function(snpSummary, label)
{
switchStrand <- rep(c(TRUE, FALSE), length = nrow(snpSummary))
snpSummary$n <- with(snpSummary, geno.11 + geno.12 + geno.22)
freqs <- with(snpSummary,
data.frame(freqsA = (geno.11 + 0.5 * geno.12) / n,
freqsAB = geno.12 / (geno.11 + geno.12 + geno.22)))
freqs$freqsA[switchStrand] <- 1 - freqs$freqsA[switchStrand]
#print(freqs)
plot(1, type="n", col="black", xlim = c(0, 1), ylim = c(0,1),
main=paste("De Finetti diagram", "- ", label),
xlab="p", ylab=expression(p[12]),
xaxs = "i", yaxs = "i", las = 1, xaxt="n", yaxt="n")
axis(1, seq(0, 1, by = 0.1), tick = T, labels = seq(0,1,by = 0.1))
axis(2, seq(0, 1, by = 0.1), tick = T, labels = seq(0,1,by = 0.1))
lines(c(0,0.5,1, 0),c(0,1,0, 0), type="l", col="black", ylab=expression(p[12])) # triangle
with(freqs, points(freqs$freqsA, freqs$freqsAB,
cex = 0.5, pch = 20))
text(x=0.2, y=0.55, labels=expression(p[22]), cex=1.2)
text(x=0.8, y=0.55, labels=expression(p[11]), cex=1.2)
# horizontal lines
for ( i in seq(0,1,by = 0.1))
lines(c(0,1), c(i,i), lty=3, lwd=0.5, col="black")
# triangle grid
for ( i in seq(0,1,by = 0.1))
lines(c(i,i+0.5-i/2), c(0,1-i), lty=1, lwd=0.5, col="black")
for ( i in seq(0,1,by = 0.1))
lines(c(i,i-i/2), c(0,i), lty=1, lwd=0.5, col="black")
for ( i in seq(0.05,0.95,by = 0.1))
lines(c(i,i+0.5-i/2), c(0,1-i), lty=3, lwd=0.5, col="black")
for ( i in seq(0.05,0.95,by = 0.1))
lines(c(i,i-i/2), c(0,i), lty=3, lwd=0.5, col="black")
for ( i in seq(0.05,0.95,by = 0.1))
lines(c(i/2,1-i/2), c(i,i), lty=3, lwd=0.5, col="black")
# ticks
y <- seq(0.1,0.9,by=.1)
x <- y/2
segments(x-.01, y, x, y, col="black")
text(x = x-.03, y = y, labels = as.character(rev(seq(0.1,0.9,by=.1))), cex=1.3)
y2 <- rev(seq(0.1,0.9,by=.1))
x2 <- y/2 + .5
segments(x2+.01, y2, x2, y2, col="black")
text(x = x2+.03, y = y2, labels = as.character(seq(0.1,0.9,by=.1)), cex=1.3)
## HWE curve
curve(2 * (x - x^2), from = 0, to = 1, col = "red", lwd = 2, add = TRUE)
}
file.hardy <- commandArgs()[4] # hardy input file
file.out <- commandArgs()[5] # output filename quant trait
hardy <-read.table(file=file.hardy,header=T,stringsAsFactor=F)
# quantitative trait
genos <-matrix(unlist(strsplit(subset(hardy, hardy$TEST == "ALL(QT)")$GENO, "/")), ncol=3, byrow=T)
genos <-data.frame(geno.11 = as.numeric(genos[,1]),
geno.12 = as.numeric(genos[,2]),
geno.22 = as.numeric(genos[,3]))
jpeg(file=paste(file.out, ".jpg", sep=""), quality=100,width=900,height=700)
makeDefinetti(genos, label = "Quantitative trait")
dev.off()
| /bin/DeFinetti_hardy_QuantTrait.r | permissive | JPMirandaM/gwas-qc | R | false | false | 3,051 | r | ###############################
### 2009, David Ellinghaus ###
###############################
## input: data.frame with genotype counts
## - columns: geno.11: counts for AA
## geno.12: counts for AB
## geno.22: counts for BB
makeDefinetti <- function(snpSummary, label)
{
switchStrand <- rep(c(TRUE, FALSE), length = nrow(snpSummary))
snpSummary$n <- with(snpSummary, geno.11 + geno.12 + geno.22)
freqs <- with(snpSummary,
data.frame(freqsA = (geno.11 + 0.5 * geno.12) / n,
freqsAB = geno.12 / (geno.11 + geno.12 + geno.22)))
freqs$freqsA[switchStrand] <- 1 - freqs$freqsA[switchStrand]
#print(freqs)
plot(1, type="n", col="black", xlim = c(0, 1), ylim = c(0,1),
main=paste("De Finetti diagram", "- ", label),
xlab="p", ylab=expression(p[12]),
xaxs = "i", yaxs = "i", las = 1, xaxt="n", yaxt="n")
axis(1, seq(0, 1, by = 0.1), tick = T, labels = seq(0,1,by = 0.1))
axis(2, seq(0, 1, by = 0.1), tick = T, labels = seq(0,1,by = 0.1))
lines(c(0,0.5,1, 0),c(0,1,0, 0), type="l", col="black", ylab=expression(p[12])) # triangle
with(freqs, points(freqs$freqsA, freqs$freqsAB,
cex = 0.5, pch = 20))
text(x=0.2, y=0.55, labels=expression(p[22]), cex=1.2)
text(x=0.8, y=0.55, labels=expression(p[11]), cex=1.2)
# horizontal lines
for ( i in seq(0,1,by = 0.1))
lines(c(0,1), c(i,i), lty=3, lwd=0.5, col="black")
# triangle grid
for ( i in seq(0,1,by = 0.1))
lines(c(i,i+0.5-i/2), c(0,1-i), lty=1, lwd=0.5, col="black")
for ( i in seq(0,1,by = 0.1))
lines(c(i,i-i/2), c(0,i), lty=1, lwd=0.5, col="black")
for ( i in seq(0.05,0.95,by = 0.1))
lines(c(i,i+0.5-i/2), c(0,1-i), lty=3, lwd=0.5, col="black")
for ( i in seq(0.05,0.95,by = 0.1))
lines(c(i,i-i/2), c(0,i), lty=3, lwd=0.5, col="black")
for ( i in seq(0.05,0.95,by = 0.1))
lines(c(i/2,1-i/2), c(i,i), lty=3, lwd=0.5, col="black")
# ticks
y <- seq(0.1,0.9,by=.1)
x <- y/2
segments(x-.01, y, x, y, col="black")
text(x = x-.03, y = y, labels = as.character(rev(seq(0.1,0.9,by=.1))), cex=1.3)
y2 <- rev(seq(0.1,0.9,by=.1))
x2 <- y/2 + .5
segments(x2+.01, y2, x2, y2, col="black")
text(x = x2+.03, y = y2, labels = as.character(seq(0.1,0.9,by=.1)), cex=1.3)
## HWE curve
curve(2 * (x - x^2), from = 0, to = 1, col = "red", lwd = 2, add = TRUE)
}
file.hardy <- commandArgs()[4] # hardy input file
file.out <- commandArgs()[5] # output filename quant trait
hardy <-read.table(file=file.hardy,header=T,stringsAsFactor=F)
# quantitative trait
genos <-matrix(unlist(strsplit(subset(hardy, hardy$TEST == "ALL(QT)")$GENO, "/")), ncol=3, byrow=T)
genos <-data.frame(geno.11 = as.numeric(genos[,1]),
geno.12 = as.numeric(genos[,2]),
geno.22 = as.numeric(genos[,3]))
jpeg(file=paste(file.out, ".jpg", sep=""), quality=100,width=900,height=700)
makeDefinetti(genos, label = "Quantitative trait")
dev.off()
|
## makeCacheMatrix(x) - create an object to store a matrix
# and it's cached invese
## cacheSolve(x, ...) - compute or retrive the inverse of the matrix stored
# in x, an object returned by the makeCacheMatrix function
## makeCacheMatrix(x)
# Stores a matrix and it's inverse to prevent needing
# to solve the matrix repeatedly
#
# arguments:
# x - (optional) matrix() class object to store
#
# return value:
# list containing the functions:
# get() - returns the stored matrix object
# set(m) - replaces the stored matrix with new matrix `x`,
# clearing the cached inverse matrix
# getSolved() - returns the cached inverse matrix object
# setSolved(s)- replaces the cached solved matrix object
makeCacheMatrix <- function(x = matrix()) {
# initialize the `solved` matrix object to NULL
solved <- NULL
# Store a new matrix in `x`
set <- function(m) {
# Store new matrix `m` in the lexical context of
# makeCacheMatrix's environment
x <<- m
# Clear `solved` since we are storing a new matrix
solved <<- NULL
}
# return the stored matrix `x`
get <- function() x
# return the inverted matrix `solved`
getSolved <- function()
solved
# store the inverted matrix `s` in `solved` in the lexical
# context of makeCacheMatrix's environment
setSolved <- function(s)
solved <<- s
# return a list() containing references to
# get/set functions for `m` and `solved`
list(set = set, get = get,
setSolved = setSolved,
getSolved = getSolved)
}
## cacheSolve(x, ...)
# Solves or returns the cached solved matrix stored in x, an object
# generated by the makeCacheMatrix() function
#
# arguments:
# x - object returned by the makeCacheMatrix() function containing
# a matrix and possibly it's cached inverse
#
# additional arguments are passed through to solve()
#
# return value:
# the inverse of the matrix stored in x, whether cached or solved
# on the fly
cacheSolve <- function(x, ...) {
# retrive the cached inverse, if it exists
solved <- x$getSolved()
# check to see if the inverse was cached, and return it's value
if (!is.null(solved)) {
message("Using cached value...")
return(solved)
}
# inverse was not cached, compute and cache, then return it's value
solved <- solve(x$get(), ...)
x$setSolved(solved)
solved
}
| /cachematrix.R | no_license | dstates/ProgrammingAssignment2 | R | false | false | 2,695 | r | ## makeCacheMatrix(x) - create an object to store a matrix
# and it's cached invese
## cacheSolve(x, ...) - compute or retrive the inverse of the matrix stored
# in x, an object returned by the makeCacheMatrix function
## makeCacheMatrix(x)
# Stores a matrix and it's inverse to prevent needing
# to solve the matrix repeatedly
#
# arguments:
# x - (optional) matrix() class object to store
#
# return value:
# list containing the functions:
# get() - returns the stored matrix object
# set(m) - replaces the stored matrix with new matrix `x`,
# clearing the cached inverse matrix
# getSolved() - returns the cached inverse matrix object
# setSolved(s)- replaces the cached solved matrix object
makeCacheMatrix <- function(x = matrix()) {
# initialize the `solved` matrix object to NULL
solved <- NULL
# Store a new matrix in `x`
set <- function(m) {
# Store new matrix `m` in the lexical context of
# makeCacheMatrix's environment
x <<- m
# Clear `solved` since we are storing a new matrix
solved <<- NULL
}
# return the stored matrix `x`
get <- function() x
# return the inverted matrix `solved`
getSolved <- function()
solved
# store the inverted matrix `s` in `solved` in the lexical
# context of makeCacheMatrix's environment
setSolved <- function(s)
solved <<- s
# return a list() containing references to
# get/set functions for `m` and `solved`
list(set = set, get = get,
setSolved = setSolved,
getSolved = getSolved)
}
## cacheSolve(x, ...)
# Solves or returns the cached solved matrix stored in x, an object
# generated by the makeCacheMatrix() function
#
# arguments:
# x - object returned by the makeCacheMatrix() function containing
# a matrix and possibly it's cached inverse
#
# additional arguments are passed through to solve()
#
# return value:
# the inverse of the matrix stored in x, whether cached or solved
# on the fly
cacheSolve <- function(x, ...) {
# retrive the cached inverse, if it exists
solved <- x$getSolved()
# check to see if the inverse was cached, and return it's value
if (!is.null(solved)) {
message("Using cached value...")
return(solved)
}
# inverse was not cached, compute and cache, then return it's value
solved <- solve(x$get(), ...)
x$setSolved(solved)
solved
}
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Classifier/urinary_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.35,family="gaussian",standardize=TRUE)
sink('./urinary_tract_047.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Classifier/urinary_tract/urinary_tract_047.R | no_license | esbgkannan/QSMART | R | false | false | 363 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Classifier/urinary_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.35,family="gaussian",standardize=TRUE)
sink('./urinary_tract_047.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
rawData <- read.table("household_power_consumption.txt",header=T,sep=";", na.strings="?",colClasses=c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"))
rawData$DateTime <- paste(rawData$Date, rawData$Time)
rawData$DateTimeActual <- strptime(rawData$DateTime,format="%d/%m/%Y %H:%M:%S")
tidy <- subset(rawData, rawData$DateTimeActual >= as.POSIXct("2007-02-01 00:00:00") & rawData$DateTimeActual <= as.POSIXct("2007-02-02 23:59:00"))
#First Plot
png(file="plot1.png",width = 480, height = 480)
hist(tidy$Global_active_power, col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
dev.off() | /plot1.R | no_license | huckdn/Exploratory-Data-Analysis---Project1 | R | false | false | 660 | r | rawData <- read.table("household_power_consumption.txt",header=T,sep=";", na.strings="?",colClasses=c("character","character","numeric","numeric","numeric","numeric","numeric","numeric","numeric"))
rawData$DateTime <- paste(rawData$Date, rawData$Time)
rawData$DateTimeActual <- strptime(rawData$DateTime,format="%d/%m/%Y %H:%M:%S")
tidy <- subset(rawData, rawData$DateTimeActual >= as.POSIXct("2007-02-01 00:00:00") & rawData$DateTimeActual <= as.POSIXct("2007-02-02 23:59:00"))
#First Plot
png(file="plot1.png",width = 480, height = 480)
hist(tidy$Global_active_power, col="red",main="Global Active Power",xlab="Global Active Power (kilowatts)")
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotSignatures.R
\name{plotSignatures}
\alias{plotSignatures}
\title{Find enriched markers per identified cluster and visualise these as a custom corrplot.}
\usage{
plotSignatures(
indata,
assay = "scaled",
clusterAssign = metadata(indata)[["Cluster"]],
funcSummarise = function(x) mean(x, na.rm = TRUE),
col = colorRampPalette(brewer.pal(9, "RdPu"))(100),
labCex = 1,
legendPosition = "right",
legendCex = 1,
labDegree = 90,
verbose = TRUE
)
}
\arguments{
\item{indata}{A data-frame or matrix, or \code{SingleCellExperiment} object. If a
data-frame or matrix, this should relate to expression data (cells as
columns; genes as rows). If a \code{SingleCellExperiment} object, data will be
extracted from an assay component named by \code{assay}.}
\item{assay}{Name of the assay slot in \code{indata} from which data will be
taken, assuming \code{indata} is a \code{SingleCellExperiment} object.}
\item{clusterAssign}{A vector of cell-to-cluster assignments. This can be
from any source but must align with your cells / variables. There is no
check to ensure this when \code{indata} is not a \code{SingleCellExperiment} object.}
\item{funcSummarise}{A mathematical function used to summarise expression
per marker, per cluster.}
\item{col}{colorRampPalette to be used for shading low-to-high expression.}
\item{labCex}{cex (size) of the main plot labels.}
\item{legendPosition}{position of legend. Can be one of 'top', 'right', 'bottom', 'left'}
\item{legendCex}{cex (size) of the legend labels.}
\item{labDegree}{Rotation angle of the main plot labels.}
\item{verbose}{Boolean (TRUE / FALSE) to print messages to console or not.}
}
\value{
A \code{corrplot} object.
}
\description{
Find enriched markers per identified cluster and visualise these as a custom corrplot.
}
\details{
Find enriched markers per identified cluster and visualise these as a custom corrplot. \code{plotSignatures} first collapses your input data's expression profiles from the level of cells to the level of clusters based on a mathematical function specified by \code{funcSummarise}. It then centers and scales the data range to be between -1 and +1 for visualisation purposes.
}
\examples{
# create random data that follows a negative binomial
mat <- jitter(matrix(
MASS::rnegbin(rexp(1000, rate=.1), theta = 4.5),
ncol = 20))
colnames(mat) <- paste0('CD', 1:ncol(mat))
rownames(mat) <- paste0('cell', 1:nrow(mat))
u <- umap::umap(mat)$layout
colnames(u) <- c('UMAP1','UMAP2')
rownames(u) <- rownames(mat)
clus <- clusKNN(u)
plotSignatures(t(mat), clusterAssign = clus)
}
\author{
Kevin Blighe <kevin@clinicalbioinformatics.co.uk>
}
| /man/plotSignatures.Rd | no_license | Huaichao2018/scDataviz | R | false | true | 2,724 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotSignatures.R
\name{plotSignatures}
\alias{plotSignatures}
\title{Find enriched markers per identified cluster and visualise these as a custom corrplot.}
\usage{
plotSignatures(
indata,
assay = "scaled",
clusterAssign = metadata(indata)[["Cluster"]],
funcSummarise = function(x) mean(x, na.rm = TRUE),
col = colorRampPalette(brewer.pal(9, "RdPu"))(100),
labCex = 1,
legendPosition = "right",
legendCex = 1,
labDegree = 90,
verbose = TRUE
)
}
\arguments{
\item{indata}{A data-frame or matrix, or \code{SingleCellExperiment} object. If a
data-frame or matrix, this should relate to expression data (cells as
columns; genes as rows). If a \code{SingleCellExperiment} object, data will be
extracted from an assay component named by \code{assay}.}
\item{assay}{Name of the assay slot in \code{indata} from which data will be
taken, assuming \code{indata} is a \code{SingleCellExperiment} object.}
\item{clusterAssign}{A vector of cell-to-cluster assignments. This can be
from any source but must align with your cells / variables. There is no
check to ensure this when \code{indata} is not a \code{SingleCellExperiment} object.}
\item{funcSummarise}{A mathematical function used to summarise expression
per marker, per cluster.}
\item{col}{colorRampPalette to be used for shading low-to-high expression.}
\item{labCex}{cex (size) of the main plot labels.}
\item{legendPosition}{position of legend. Can be one of 'top', 'right', 'bottom', 'left'}
\item{legendCex}{cex (size) of the legend labels.}
\item{labDegree}{Rotation angle of the main plot labels.}
\item{verbose}{Boolean (TRUE / FALSE) to print messages to console or not.}
}
\value{
A \code{corrplot} object.
}
\description{
Find enriched markers per identified cluster and visualise these as a custom corrplot.
}
\details{
Find enriched markers per identified cluster and visualise these as a custom corrplot. \code{plotSignatures} first collapses your input data's expression profiles from the level of cells to the level of clusters based on a mathematical function specified by \code{funcSummarise}. It then centers and scales the data range to be between -1 and +1 for visualisation purposes.
}
\examples{
# create random data that follows a negative binomial
mat <- jitter(matrix(
MASS::rnegbin(rexp(1000, rate=.1), theta = 4.5),
ncol = 20))
colnames(mat) <- paste0('CD', 1:ncol(mat))
rownames(mat) <- paste0('cell', 1:nrow(mat))
u <- umap::umap(mat)$layout
colnames(u) <- c('UMAP1','UMAP2')
rownames(u) <- rownames(mat)
clus <- clusKNN(u)
plotSignatures(t(mat), clusterAssign = clus)
}
\author{
Kevin Blighe <kevin@clinicalbioinformatics.co.uk>
}
|
library(magrittr)
library(xml2)
library(lubridate)
library(data.table)
library(dplyr)
library(ggplot2)
library(RPostgreSQL)
library(utils)
insert_report_data <- function(con, missing_dates){
trim<- function(x) return(tstrsplit(x, "\\s|[A-Z]", keep=1) %>% unlist)
raw.path <- 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/'
missing_dates <- paste0(missing_dates, '.csv')
# x <- missing_dates[[1]]
data.dt <- lapply(missing_dates, function(x, raw.path) {
tmp.dt <- read.csv(url(paste0(raw.path, x)))
colnames(tmp.dt) <- colnames(tmp.dt) %>% gsub('ï..', '', .) %>% gsub('[.|\\/]', '_', .)
tryCatch({
tmp.dt$Last_Update <- tmp.dt$Last_Update %>% paste
tmp.dt$Last_Update <- tmp.dt$Last_Update %>%
trim %>%
# parse_date_time(orders=c('%m/%d/%y','%m/%d/%Y','%Y-%m-%d'))
as.Date(tryFormats=c('%m/%d/%y','%m/%d/%Y','%Y-%m-%d'))
# Account for mis-labeled dates in the csvs
date.csv.name <- tstrsplit(x, '.csv', keep=1) %>% unlist %>%
as.Date(tryFormats=c('%m/%d/%y','%m-%d-%Y','%Y-%m-%d'))
if (any(tmp.dt$Last_Update != date.csv.name))
tmp.dt$Last_Update <- date.csv.name
return(tmp.dt)},
warning = function(w) {
message(paste('Warning! Check file:', x))
},
error = function(e) {
message(paste('Error! Check file', x))
}
)
}, raw.path) %>% rbindlist(fill=TRUE)
data.dt$Last_Update <- data.dt$Last_Update %>% as.Date()
if(grep('^lat$',names(data.dt),ignore.case=TRUE,value=FALSE) %>% length > 0){
data.dt$Latitude <-data.dt[[grep('^lat$',names(data.dt),ignore.case=TRUE,value=TRUE)]]
}
if(grep('^long_$',names(data.dt),ignore.case=TRUE,value=FALSE) %>% length > 0){
data.dt$Longitude <-data.dt[[grep('^long_$',names(data.dt),ignore.case=TRUE,value=TRUE)]]
}
colnames(data.dt) <- colnames(data.dt) %>% tolower
table.template <- dbGetQuery(con, 'SELECT * FROM covid_data.report_data') %>% data.table %>% .[0,]
table.template <- table.template[,-'id']
# # Remove old lat/lon columns in favor of better-named ones
# data.dt <- data.dt[, -c(grep('^lat$',names(data.dt),ignore.case=TRUE,value=TRUE),
# grep('^long_$',names(data.dt),ignore.case=TRUE,value=TRUE)
# ), with=FALSE]
# Change remote table so that incidence rate and case-fatality-ratio columns are included
data.dt <- data.dt[, -!(colnames(data.dt) %in% colnames(table.template)), with=FALSE]
rbind(table.template, data.dt, fill=TRUE)
tryCatch({
dbWriteTable(con, c('covid_data','report_data'), data.dt, append=TRUE, row.names=FALSE)
},
error = function(e) {
print('ERROR: Table not written')
printe(e)
})
}
#
# xml.path <- 'https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_daily_reports'
# report.file <- download_html(xml.path)
# html.read <- read_html(report.file)
# csv.list <- xml_text(html.read) %>% strsplit(split='\\n') %>% unlist
#
# dates.list <- lapply(csv.list, function(x) {
# if (grepl(pattern='.csv', x, fixed=TRUE)) {
# regex <- regexpr('\\d{2}-\\d{2}-\\d{4}.csv', x)
# return(substr(x, start=regex[[1]], stop=regex[[1]]+attr(regex, 'match.length')))
# }
# }) %>% unlist
# dates.list <- dates.list %>% lapply(., function(x){gsub('*.csv$', '', x)}) %>% unlist
#
# # -------- Access the COVID-19 Database --------- #
# source(file.path(git.path,'Code/config_files/db_config.R'))
# con <- db_connect.fn()
# # ----------------------------------------------- #
# report_data <- dbGetQuery(con, 'SELECT last_update FROM covid_data.report_data')
# dbDisconnect(con)
#
# missing_dates <- setdiff(dates.list %>% paste, format(report_data$last_update, "%m-%d-%Y") %>% paste %>% unique)
# missing_dates <- dates.list[1:10]
#
# if(length(missing_dates) > 0){
# insert_report_data(con, missing_dates)
# }
| /scripts/R/session2/insert_report_data.R | no_license | H4estu/COVID19-Data-Exploration | R | false | false | 3,960 | r | library(magrittr)
library(xml2)
library(lubridate)
library(data.table)
library(dplyr)
library(ggplot2)
library(RPostgreSQL)
library(utils)
insert_report_data <- function(con, missing_dates){
trim<- function(x) return(tstrsplit(x, "\\s|[A-Z]", keep=1) %>% unlist)
raw.path <- 'https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_daily_reports/'
missing_dates <- paste0(missing_dates, '.csv')
# x <- missing_dates[[1]]
data.dt <- lapply(missing_dates, function(x, raw.path) {
tmp.dt <- read.csv(url(paste0(raw.path, x)))
colnames(tmp.dt) <- colnames(tmp.dt) %>% gsub('ï..', '', .) %>% gsub('[.|\\/]', '_', .)
tryCatch({
tmp.dt$Last_Update <- tmp.dt$Last_Update %>% paste
tmp.dt$Last_Update <- tmp.dt$Last_Update %>%
trim %>%
# parse_date_time(orders=c('%m/%d/%y','%m/%d/%Y','%Y-%m-%d'))
as.Date(tryFormats=c('%m/%d/%y','%m/%d/%Y','%Y-%m-%d'))
# Account for mis-labeled dates in the csvs
date.csv.name <- tstrsplit(x, '.csv', keep=1) %>% unlist %>%
as.Date(tryFormats=c('%m/%d/%y','%m-%d-%Y','%Y-%m-%d'))
if (any(tmp.dt$Last_Update != date.csv.name))
tmp.dt$Last_Update <- date.csv.name
return(tmp.dt)},
warning = function(w) {
message(paste('Warning! Check file:', x))
},
error = function(e) {
message(paste('Error! Check file', x))
}
)
}, raw.path) %>% rbindlist(fill=TRUE)
data.dt$Last_Update <- data.dt$Last_Update %>% as.Date()
if(grep('^lat$',names(data.dt),ignore.case=TRUE,value=FALSE) %>% length > 0){
data.dt$Latitude <-data.dt[[grep('^lat$',names(data.dt),ignore.case=TRUE,value=TRUE)]]
}
if(grep('^long_$',names(data.dt),ignore.case=TRUE,value=FALSE) %>% length > 0){
data.dt$Longitude <-data.dt[[grep('^long_$',names(data.dt),ignore.case=TRUE,value=TRUE)]]
}
colnames(data.dt) <- colnames(data.dt) %>% tolower
table.template <- dbGetQuery(con, 'SELECT * FROM covid_data.report_data') %>% data.table %>% .[0,]
table.template <- table.template[,-'id']
# # Remove old lat/lon columns in favor of better-named ones
# data.dt <- data.dt[, -c(grep('^lat$',names(data.dt),ignore.case=TRUE,value=TRUE),
# grep('^long_$',names(data.dt),ignore.case=TRUE,value=TRUE)
# ), with=FALSE]
# Change remote table so that incidence rate and case-fatality-ratio columns are included
data.dt <- data.dt[, -!(colnames(data.dt) %in% colnames(table.template)), with=FALSE]
rbind(table.template, data.dt, fill=TRUE)
tryCatch({
dbWriteTable(con, c('covid_data','report_data'), data.dt, append=TRUE, row.names=FALSE)
},
error = function(e) {
print('ERROR: Table not written')
printe(e)
})
}
#
# xml.path <- 'https://github.com/CSSEGISandData/COVID-19/tree/master/csse_covid_19_data/csse_covid_19_daily_reports'
# report.file <- download_html(xml.path)
# html.read <- read_html(report.file)
# csv.list <- xml_text(html.read) %>% strsplit(split='\\n') %>% unlist
#
# dates.list <- lapply(csv.list, function(x) {
# if (grepl(pattern='.csv', x, fixed=TRUE)) {
# regex <- regexpr('\\d{2}-\\d{2}-\\d{4}.csv', x)
# return(substr(x, start=regex[[1]], stop=regex[[1]]+attr(regex, 'match.length')))
# }
# }) %>% unlist
# dates.list <- dates.list %>% lapply(., function(x){gsub('*.csv$', '', x)}) %>% unlist
#
# # -------- Access the COVID-19 Database --------- #
# source(file.path(git.path,'Code/config_files/db_config.R'))
# con <- db_connect.fn()
# # ----------------------------------------------- #
# report_data <- dbGetQuery(con, 'SELECT last_update FROM covid_data.report_data')
# dbDisconnect(con)
#
# missing_dates <- setdiff(dates.list %>% paste, format(report_data$last_update, "%m-%d-%Y") %>% paste %>% unique)
# missing_dates <- dates.list[1:10]
#
# if(length(missing_dates) > 0){
# insert_report_data(con, missing_dates)
# }
|
#' Retrieve raw R code tidytext tutorial
#'
#'
#' @name tidytext
#'
#' @usage tidytext(url)
#'
#' @param url Link to tidytext tutorial
#'
#' @return A character vector of length 1 containing the R code from the target url.
#'
#' @import dplyr jsonlite xml2
#' @importFrom rvest html_nodes html_text html_attr
#'
#' @export
#'
#' @examples
#' library(dplyr)
#' tidytext("https://www.tidytextmining.com/sentiment.html")
#'
#' # Same as above but provided to cat for easy viewing
#' tidytext("https://www.tidytextmining.com/sentiment.html") %>%
#' cat
#'
#'
tidytext <- function(url) {
url %>% xml2::read_html(.) %>%
html_nodes("code.sourceCode.r") %>%
html_text %>%
paste0(., collapse="\n")
}
#' @rdname tidytext
#' @export
tidytextmining <- tidytext
| /R/tidytext.R | no_license | krishnapsrinivasan/rawr | R | false | false | 772 | r | #' Retrieve raw R code tidytext tutorial
#'
#'
#' @name tidytext
#'
#' @usage tidytext(url)
#'
#' @param url Link to tidytext tutorial
#'
#' @return A character vector of length 1 containing the R code from the target url.
#'
#' @import dplyr jsonlite xml2
#' @importFrom rvest html_nodes html_text html_attr
#'
#' @export
#'
#' @examples
#' library(dplyr)
#' tidytext("https://www.tidytextmining.com/sentiment.html")
#'
#' # Same as above but provided to cat for easy viewing
#' tidytext("https://www.tidytextmining.com/sentiment.html") %>%
#' cat
#'
#'
tidytext <- function(url) {
url %>% xml2::read_html(.) %>%
html_nodes("code.sourceCode.r") %>%
html_text %>%
paste0(., collapse="\n")
}
#' @rdname tidytext
#' @export
tidytextmining <- tidytext
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BLUPGA.R
\name{blupga}
\alias{blupga}
\title{Basic BLUP|GA}
\usage{
blupga(G, Smat, phenodata, valset, verbose = T)
}
\arguments{
\item{G}{GRM constructed using all available SNPs and all samples Defaults to NULL. Use \code{\link{make_GRM}} to get this.}
\item{phenodata}{data frame with 2 or 3 columns. One col must be named 'ID' and contain sample IDs. Another col must be named 'y' and contain the phenotypes. If fixed effects are included then a 3rd col called 'FE' should contain the categorical effects.}
\item{valset}{numeric vector of indices that defines which rows in \code{phenodata} will be set to NA and used for cross-validation Defaults to NULL.}
\item{S}{Weighted G-matrix constructed using only selected SNPs and all samples Defaults to NULL. Use \code{\link{make_weighted_GRM}} to get this.}
}
\value{
A data frame containing the correlation between the genetic value (GEBV) and the fixed-effects adjusted phenotype of the individuals in the \code{valset}.
Since BLUP|GA is run for each value of omega (W) from 0.0 to 1.0 in increments of 0.10, each row of the returned data frame
contains the cross-validation correlation at one value of omega (W). This allows the user to find the value of W
at which the predictive ability (COR) is maximised.
Columns:
\describe{
\item{W}{omega weighting for selected SNPS in candidate genes (0.0--1.0)}
\item{COR}{cross validation predictive ability (0.0--1.0)}
}
}
\description{
This function runs the BLUP|GA method where certain SNPs are weighted in the a special GRM (S) prior to GBLUP.
}
\examples{
# get example genotype matrix and phenotype data
data(M)
data(pheno)
# select some 'special' SNPs from M to be weighted
GAsnps <- sample(1:ncol(M), 20)
# generate random weights for the 'special' SNPs
wt <- runif(length(GAsnps), min = 1, max = 10)
# make a weighted GRM for the 'special' SNPs
S <- make_weighted_GRM(M[,GAsnps], wt)
# make a standard GRM for all SNPs
G <- make_GRM(M)
# choose a validation set of 20 random individuals
val <- sample(1:nrow(pheno), 20)
results <- blupga(G, S, pheno, val)
}
\keyword{GBLUP,BLUP|GA,SNP}
\keyword{selection}
| /man/blupga.Rd | no_license | DPCscience/BLUPGA | R | false | true | 2,212 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/BLUPGA.R
\name{blupga}
\alias{blupga}
\title{Basic BLUP|GA}
\usage{
blupga(G, Smat, phenodata, valset, verbose = T)
}
\arguments{
\item{G}{GRM constructed using all available SNPs and all samples Defaults to NULL. Use \code{\link{make_GRM}} to get this.}
\item{phenodata}{data frame with 2 or 3 columns. One col must be named 'ID' and contain sample IDs. Another col must be named 'y' and contain the phenotypes. If fixed effects are included then a 3rd col called 'FE' should contain the categorical effects.}
\item{valset}{numeric vector of indices that defines which rows in \code{phenodata} will be set to NA and used for cross-validation Defaults to NULL.}
\item{S}{Weighted G-matrix constructed using only selected SNPs and all samples Defaults to NULL. Use \code{\link{make_weighted_GRM}} to get this.}
}
\value{
A data frame containing the correlation between the genetic value (GEBV) and the fixed-effects adjusted phenotype of the individuals in the \code{valset}.
Since BLUP|GA is run for each value of omega (W) from 0.0 to 1.0 in increments of 0.10, each row of the returned data frame
contains the cross-validation correlation at one value of omega (W). This allows the user to find the value of W
at which the predictive ability (COR) is maximised.
Columns:
\describe{
\item{W}{omega weighting for selected SNPS in candidate genes (0.0--1.0)}
\item{COR}{cross validation predictive ability (0.0--1.0)}
}
}
\description{
This function runs the BLUP|GA method where certain SNPs are weighted in the a special GRM (S) prior to GBLUP.
}
\examples{
# get example genotype matrix and phenotype data
data(M)
data(pheno)
# select some 'special' SNPs from M to be weighted
GAsnps <- sample(1:ncol(M), 20)
# generate random weights for the 'special' SNPs
wt <- runif(length(GAsnps), min = 1, max = 10)
# make a weighted GRM for the 'special' SNPs
S <- make_weighted_GRM(M[,GAsnps], wt)
# make a standard GRM for all SNPs
G <- make_GRM(M)
# choose a validation set of 20 random individuals
val <- sample(1:nrow(pheno), 20)
results <- blupga(G, S, pheno, val)
}
\keyword{GBLUP,BLUP|GA,SNP}
\keyword{selection}
|
library(glmnet)
mydata = read.table("./TrainingSet/ReliefF/liver.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.05,family="gaussian",standardize=FALSE)
sink('./Model/EN/ReliefF/liver/liver_022.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/ReliefF/liver/liver_022.R | no_license | leon1003/QSMART | R | false | false | 350 | r | library(glmnet)
mydata = read.table("./TrainingSet/ReliefF/liver.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.05,family="gaussian",standardize=FALSE)
sink('./Model/EN/ReliefF/liver/liver_022.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/csm_utilities.R
\name{exec_csm}
\alias{exec_csm}
\title{DSSAT45 execution command}
\usage{
exec_csm(
projdir,
csmdir,
rundir,
btype = "B",
csm = "DSCSM046.EXE",
bname = "R_DSSBatch.v45"
)
}
\arguments{
\item{projdir}{Your working directory}
\item{csmdir}{Master directory for DSSAT model}
\item{rundir}{CSM where model should execute (e.g. Maize)}
\item{btype}{Batch mode code ("B" normal batch (default), "S" spatial, etc)}
\item{csm}{CSM executable name, defaults to DSCSM046.EXE}
\item{bname}{Batch file name, default is R_DSSBatch.v45}
}
\value{
File_path for DSSAT
}
\description{
DSSAT45 execution command
}
| /man/exec_csm.Rd | no_license | ldemaz/rcropmod | R | false | true | 709 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/csm_utilities.R
\name{exec_csm}
\alias{exec_csm}
\title{DSSAT45 execution command}
\usage{
exec_csm(
projdir,
csmdir,
rundir,
btype = "B",
csm = "DSCSM046.EXE",
bname = "R_DSSBatch.v45"
)
}
\arguments{
\item{projdir}{Your working directory}
\item{csmdir}{Master directory for DSSAT model}
\item{rundir}{CSM where model should execute (e.g. Maize)}
\item{btype}{Batch mode code ("B" normal batch (default), "S" spatial, etc)}
\item{csm}{CSM executable name, defaults to DSCSM046.EXE}
\item{bname}{Batch file name, default is R_DSSBatch.v45}
}
\value{
File_path for DSSAT
}
\description{
DSSAT45 execution command
}
|
################library(rentrez)
##aim: give a topic, output a table of articles relate with this topic, table form
## pmid/pmcid, title abstract, journal, year, author list
ewrefxn::require0('rentrez')
keyword <- '"epithelial mesenchymal transition"[title/abstract]'
db <- 'pubmed'
web_env_search <- entrez_search(db=db, term = keyword, use_history=TRUE)
##on 07/06/2017, there are 9990 results...
nHits = web_env_search$count
cat('# of total paper found: ',nHits,"\n")
wh <- web_env_search$web_history
nfetch=1000 ## each time we fetch 1000 records
if(nHits %% nfetch == 0) {
nround= nHits / nfetch } else {
nround = nHits %/% nfetch +1
}
res <- vector(mode='list')
for(i in 1:nround ) {
cat(i,"\n")
start <- (i-1)*nfetch + 1 - 1 ##note that retstart starts with ZERO
if(i == nround) {
rtmax <- nHits %% nfetch } else {
rtmax <- nfetch
}
cat('fetch the resulti','\n')
resi <- entrez_fetch(db=db, web_history=wh,
retstart=start, retmax=rtmax,rettype='xml')
cat('parsing resulti','\n')
resi_parsed <- parse_pubmed_xml(resi)
res <- c(res, resi_parsed)
Sys.sleep(5) ##sleep for a while
}
save(res, file='EMT_paper_pubmed_07062017.rda')
##we got the results we want, but with one problem: there is no keyword information available in ncbi pubmed results....
##use abstract instead.
| /ncbiPubmed.r | no_license | htc502/whois | R | false | false | 1,445 | r | ################library(rentrez)
##aim: give a topic, output a table of articles relate with this topic, table form
## pmid/pmcid, title abstract, journal, year, author list
ewrefxn::require0('rentrez')
keyword <- '"epithelial mesenchymal transition"[title/abstract]'
db <- 'pubmed'
web_env_search <- entrez_search(db=db, term = keyword, use_history=TRUE)
##on 07/06/2017, there are 9990 results...
nHits = web_env_search$count
cat('# of total paper found: ',nHits,"\n")
wh <- web_env_search$web_history
nfetch=1000 ## each time we fetch 1000 records
if(nHits %% nfetch == 0) {
nround= nHits / nfetch } else {
nround = nHits %/% nfetch +1
}
res <- vector(mode='list')
for(i in 1:nround ) {
cat(i,"\n")
start <- (i-1)*nfetch + 1 - 1 ##note that retstart starts with ZERO
if(i == nround) {
rtmax <- nHits %% nfetch } else {
rtmax <- nfetch
}
cat('fetch the resulti','\n')
resi <- entrez_fetch(db=db, web_history=wh,
retstart=start, retmax=rtmax,rettype='xml')
cat('parsing resulti','\n')
resi_parsed <- parse_pubmed_xml(resi)
res <- c(res, resi_parsed)
Sys.sleep(5) ##sleep for a while
}
save(res, file='EMT_paper_pubmed_07062017.rda')
##we got the results we want, but with one problem: there is no keyword information available in ncbi pubmed results....
##use abstract instead.
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/render_graph_from_graph_series.R
\name{render_graph_from_graph_series}
\alias{render_graph_from_graph_series}
\title{Render a graph available in a series}
\usage{
render_graph_from_graph_series(
graph_series,
graph_no,
output = "graph",
width = NULL,
height = NULL
)
}
\arguments{
\item{graph_series}{A graph series object of type \code{dgr_graph_1D}.}
\item{graph_no}{The index of the graph in the graph series.}
\item{output}{A string specifying the output type; \code{graph} (the default)
renders the graph using the \code{\link[=grViz]{grViz()}} function and \code{visNetwork} renders the
graph using the \code{\link[=visnetwork]{visnetwork()}} function.}
\item{width}{An optional parameter for specifying the width of the resulting
graphic in pixels.}
\item{height}{An optional parameter for specifying the height of the
resulting graphic in pixels.}
}
\description{
Using a graph series object of type \code{dgr_graph_1D}, either render graph in
the Viewer or output in various formats.
}
\examples{
\dontrun{
# Create three graphs
graph_1 <-
create_graph() \%>\%
add_path(n = 4)
graph_2 <-
create_graph() \%>\%
add_cycle(n = 5)
graph_3 <-
create_graph() \%>\%
add_star(n = 6)
# Create an empty graph series
# and add the graphs
series <-
create_graph_series() \%>\%
add_graph_to_graph_series(
graph = graph_1) \%>\%
add_graph_to_graph_series(
graph = graph_2) \%>\%
add_graph_to_graph_series(
graph = graph_3)
# View the second graph in
# the series in the Viewer
render_graph_from_graph_series(
graph_series = series,
graph_no = 2)
}
}
\seealso{
Other Display and Save:
\code{\link{export_graph}()},
\code{\link{render_graph}()},
\code{\link{save_graph}()}
}
\concept{Display and Save}
| /man/render_graph_from_graph_series.Rd | permissive | rich-iannone/DiagrammeR | R | false | true | 1,828 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/render_graph_from_graph_series.R
\name{render_graph_from_graph_series}
\alias{render_graph_from_graph_series}
\title{Render a graph available in a series}
\usage{
render_graph_from_graph_series(
graph_series,
graph_no,
output = "graph",
width = NULL,
height = NULL
)
}
\arguments{
\item{graph_series}{A graph series object of type \code{dgr_graph_1D}.}
\item{graph_no}{The index of the graph in the graph series.}
\item{output}{A string specifying the output type; \code{graph} (the default)
renders the graph using the \code{\link[=grViz]{grViz()}} function and \code{visNetwork} renders the
graph using the \code{\link[=visnetwork]{visnetwork()}} function.}
\item{width}{An optional parameter for specifying the width of the resulting
graphic in pixels.}
\item{height}{An optional parameter for specifying the height of the
resulting graphic in pixels.}
}
\description{
Using a graph series object of type \code{dgr_graph_1D}, either render graph in
the Viewer or output in various formats.
}
\examples{
\dontrun{
# Create three graphs
graph_1 <-
create_graph() \%>\%
add_path(n = 4)
graph_2 <-
create_graph() \%>\%
add_cycle(n = 5)
graph_3 <-
create_graph() \%>\%
add_star(n = 6)
# Create an empty graph series
# and add the graphs
series <-
create_graph_series() \%>\%
add_graph_to_graph_series(
graph = graph_1) \%>\%
add_graph_to_graph_series(
graph = graph_2) \%>\%
add_graph_to_graph_series(
graph = graph_3)
# View the second graph in
# the series in the Viewer
render_graph_from_graph_series(
graph_series = series,
graph_no = 2)
}
}
\seealso{
Other Display and Save:
\code{\link{export_graph}()},
\code{\link{render_graph}()},
\code{\link{save_graph}()}
}
\concept{Display and Save}
|
vector <- -5:5
vector
vector2 <- c(-5, 5)
vector2
vector3 <- c(-5:5)
vector3
vector4 <- seq(from=-5, to=5, by=1)
vector4
vector5 <- seq(from=-5, to=5, by=0.5)
vector5
vector6 <- seq(15)
vector6 | /work/r데이터분석_예제파일/예제/2_14.R | no_license | bass4th/R | R | false | false | 212 | r | vector <- -5:5
vector
vector2 <- c(-5, 5)
vector2
vector3 <- c(-5:5)
vector3
vector4 <- seq(from=-5, to=5, by=1)
vector4
vector5 <- seq(from=-5, to=5, by=0.5)
vector5
vector6 <- seq(15)
vector6 |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_4d_point_sd.R
\name{plot_4d_point_sd}
\alias{plot_4d_point_sd}
\title{Plot mean & error bars for 2-way ANOVAs with or without a blocking factor.}
\usage{
plot_4d_point_sd(
data,
xcol,
ycol,
points,
shapes,
facet,
ErrorType = "SD",
symsize = 3.5,
s_alpha = 1,
symshape = 22,
all_alpha = 0.3,
all_size = 2.5,
all_shape = 0,
all_jitter = 0,
ewid = 0.2,
group_wid = 0.8,
TextXAngle = 0,
LogYTrans,
LogYBreaks = waiver(),
LogYLabels = waiver(),
LogYLimits = NULL,
facet_scales = "fixed",
fontsize = 20,
symthick,
ethick,
ColPal = c("okabe_ito", "all_grafify", "bright", "contrast", "dark", "fishy", "kelly",
"light", "muted", "pale", "r4", "safe", "vibrant"),
ColSeq = TRUE,
ColRev = FALSE,
...
)
}
\arguments{
\item{data}{a data table, e.g. data.frame or tibble.}
\item{xcol}{name of the column with the variable to plot on X axis (will be converted to a factor/categorical variable).}
\item{ycol}{name of the column to plot on quantitative variable on the Y axis.}
\item{points}{name of the column with grouping within the factor plotted on X-axis (will be converted to a factor/categorical variable).}
\item{shapes}{name of the column that contains matched observations (e.g. subject IDs, experiment number) or another variable to pass on to symbol shapes (will be converted to a factor/categorical variable). If not provided, the shapes for all groups is the same, and can be changed with \code{all_shapes}, \code{all_alpha}, \code{all_size} etc.}
\item{facet}{add another variable from the data table to create faceted graphs using \code{ggplot2}\link{facet_wrap}.}
\item{ErrorType}{select the type of error bars to display. Default is "SD" (standard deviation). Other options are "SEM" (standard error of the mean) and "CI95" (95\% confidence interval based on t distributions).}
\item{symsize}{size of symbols, default set to 3.5.}
\item{s_alpha}{fractional opacity of symbols, default set to 1 (i.e. fully opaque).}
\item{symshape}{The mean is shown with symbol of the shape number 21 (default, filled circle). Pick a number between 0-25 to pick a different type of symbol from ggplot2.}
\item{all_alpha}{fractional opacity of all data points (default = 0.3).}
\item{all_size}{size of symbols of all data points, if shown (default = 2.5).}
\item{all_shape}{all data points are shown with symbols of the shape number 0 (default, open square). Pick a number between 0-25 to pick a different type of symbol from ggplot2. This argument only has an effect if \code{shapes} argument is used.}
\item{all_jitter}{reduce overlap of all data points, if shown, by setting a value between 0-1 (default = 0).}
\item{ewid}{width of error bars, default set to 0.2.}
\item{group_wid}{space between the factors along X-axis, i.e., dodge width. Default \code{group_wid = 0.8} (range 0-1), which can be set to 0 if you'd like the two plotted as \code{position = position_identity()}.}
\item{TextXAngle}{orientation of text on X-axis; default 0 degrees. Change to 45 or 90 to remove overlapping text.}
\item{LogYTrans}{transform Y axis into "log10" or "log2"}
\item{LogYBreaks}{argument for \code{ggplot2[scale_y_continuous]} for Y axis breaks on log scales, default is \code{waiver()}, or provide a vector of desired breaks.}
\item{LogYLabels}{argument for \code{ggplot2[scale_y_continuous]} for Y axis labels on log scales, default is \code{waiver()}, or provide a vector of desired labels.}
\item{LogYLimits}{a vector of length two specifying the range (minimum and maximum) of the Y axis.}
\item{facet_scales}{whether or not to fix scales on X & Y axes for all facet facet graphs. Can be \code{fixed} (default), \code{free}, \code{free_y} or \code{free_x} (for Y and X axis one at a time, respectively).}
\item{fontsize}{parameter of \code{base_size} of fonts in \code{theme_classic}, default set to size 20.}
\item{symthick}{size (in 'pt' units) of outline of symbol lines (\code{stroke}), default = \code{fontsize}/22.}
\item{ethick}{thickness of error bar lines; default \code{fontsize}/22.}
\item{ColPal}{grafify colour palette to apply, default "okabe_ito"; see \code{\link{graf_palettes}} for available palettes.}
\item{ColSeq}{logical TRUE or FALSE. Default TRUE for sequential colours from chosen palette. Set to FALSE for distant colours, which will be applied using \code{scale_fill_grafify2}.}
\item{ColRev}{whether to reverse order of colour within the selected palette, default F (FALSE); can be set to T (TRUE).}
\item{...}{any additional arguments to pass to \code{ggplot2}\link{stat_summary} or \code{ggplot2}\link{geom_point}.}
}
\value{
This function returns a \code{ggplot2} object of class "gg" and "ggplot".
}
\description{
There are 4 related functions for 2-way ANOVA type plots. In addition to a categorical variable along the X-axis, a grouping factor is passed to either \code{points}, \code{bars} or \code{boxes} argument in these functions. A blocking factor (or any other categorical variable) can be optionally passed to the \code{shapes} argument.
\enumerate{
\item \code{\link{plot_4d_point_sd}} (mean & SD, SEM or CI95 error bars)
\item \code{\link{plot_4d_scatterbar}} (bar & SD, SEM or CI95 error bars)
\item \code{\link{plot_4d_scatterbox}} (box & whiskers)
\item \code{\link{plot_4d_scatterviolin}} (box & whiskers, violin)
}
}
\details{
These can be especially useful when the 4th variable \code{shapes} is a random factor or blocking factor (upto 25 levels are allowed; there will be an error with more levels). The \code{shapes} argument can be left blank to plot ordinary 2-way ANOVAs without blocking.
In \code{plot_4d_point_sd} and \code{plot_4d_scatterbar}, the default error bar is SD (can be changed to SEM or CI95). In \code{plot_4d_point_sd}, a large coloured symbol is plotted at the mean, all other data are shown as smaller symbols. Boxplot uses \code{\link[ggplot2]{geom_boxplot}} to depict median (thicker line), box (interquartile range (IQR)) and the whiskers (1.5*IQR).
Colours can be changed using \code{ColPal}, \code{ColRev} or \code{ColSeq} arguments.
\code{ColPal} can be one of the following: "okabe_ito", "dark", "light", "bright", "pale", "vibrant, "muted" or "contrast".
\code{ColRev} (logical TRUE/FALSE) decides whether colours are chosen from first-to-last or last-to-first from within the chosen palette.
\code{ColSeq} (logical TRUE/FALSE) decides whether colours are picked by respecting the order in the palette or the most distant ones using \code{\link[grDevices]{colorRampPalette}}.
The resulting \code{ggplot2} graph can take additional geometries or other layers.
}
\examples{
#4d version for 2-way data with blocking
plot_4d_point_sd(data = data_2w_Tdeath,
xcol = Genotype,
ycol = PI,
points = Time,
shapes = Experiment)
#4d version without blocking factor
#`shapes` can be left blank
plot_4d_point_sd(data = data_2w_Festing,
xcol = Strain,
ycol = GST,
points = Treatment)
}
| /man/plot_4d_point_sd.Rd | no_license | ashenoy-cmbi/grafify | R | false | true | 7,026 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_4d_point_sd.R
\name{plot_4d_point_sd}
\alias{plot_4d_point_sd}
\title{Plot mean & error bars for 2-way ANOVAs with or without a blocking factor.}
\usage{
plot_4d_point_sd(
data,
xcol,
ycol,
points,
shapes,
facet,
ErrorType = "SD",
symsize = 3.5,
s_alpha = 1,
symshape = 22,
all_alpha = 0.3,
all_size = 2.5,
all_shape = 0,
all_jitter = 0,
ewid = 0.2,
group_wid = 0.8,
TextXAngle = 0,
LogYTrans,
LogYBreaks = waiver(),
LogYLabels = waiver(),
LogYLimits = NULL,
facet_scales = "fixed",
fontsize = 20,
symthick,
ethick,
ColPal = c("okabe_ito", "all_grafify", "bright", "contrast", "dark", "fishy", "kelly",
"light", "muted", "pale", "r4", "safe", "vibrant"),
ColSeq = TRUE,
ColRev = FALSE,
...
)
}
\arguments{
\item{data}{a data table, e.g. data.frame or tibble.}
\item{xcol}{name of the column with the variable to plot on X axis (will be converted to a factor/categorical variable).}
\item{ycol}{name of the column to plot on quantitative variable on the Y axis.}
\item{points}{name of the column with grouping within the factor plotted on X-axis (will be converted to a factor/categorical variable).}
\item{shapes}{name of the column that contains matched observations (e.g. subject IDs, experiment number) or another variable to pass on to symbol shapes (will be converted to a factor/categorical variable). If not provided, the shapes for all groups is the same, and can be changed with \code{all_shapes}, \code{all_alpha}, \code{all_size} etc.}
\item{facet}{add another variable from the data table to create faceted graphs using \code{ggplot2}\link{facet_wrap}.}
\item{ErrorType}{select the type of error bars to display. Default is "SD" (standard deviation). Other options are "SEM" (standard error of the mean) and "CI95" (95\% confidence interval based on t distributions).}
\item{symsize}{size of symbols, default set to 3.5.}
\item{s_alpha}{fractional opacity of symbols, default set to 1 (i.e. fully opaque).}
\item{symshape}{The mean is shown with symbol of the shape number 21 (default, filled circle). Pick a number between 0-25 to pick a different type of symbol from ggplot2.}
\item{all_alpha}{fractional opacity of all data points (default = 0.3).}
\item{all_size}{size of symbols of all data points, if shown (default = 2.5).}
\item{all_shape}{all data points are shown with symbols of the shape number 0 (default, open square). Pick a number between 0-25 to pick a different type of symbol from ggplot2. This argument only has an effect if \code{shapes} argument is used.}
\item{all_jitter}{reduce overlap of all data points, if shown, by setting a value between 0-1 (default = 0).}
\item{ewid}{width of error bars, default set to 0.2.}
\item{group_wid}{space between the factors along X-axis, i.e., dodge width. Default \code{group_wid = 0.8} (range 0-1), which can be set to 0 if you'd like the two plotted as \code{position = position_identity()}.}
\item{TextXAngle}{orientation of text on X-axis; default 0 degrees. Change to 45 or 90 to remove overlapping text.}
\item{LogYTrans}{transform Y axis into "log10" or "log2"}
\item{LogYBreaks}{argument for \code{ggplot2[scale_y_continuous]} for Y axis breaks on log scales, default is \code{waiver()}, or provide a vector of desired breaks.}
\item{LogYLabels}{argument for \code{ggplot2[scale_y_continuous]} for Y axis labels on log scales, default is \code{waiver()}, or provide a vector of desired labels.}
\item{LogYLimits}{a vector of length two specifying the range (minimum and maximum) of the Y axis.}
\item{facet_scales}{whether or not to fix scales on X & Y axes for all facet facet graphs. Can be \code{fixed} (default), \code{free}, \code{free_y} or \code{free_x} (for Y and X axis one at a time, respectively).}
\item{fontsize}{parameter of \code{base_size} of fonts in \code{theme_classic}, default set to size 20.}
\item{symthick}{size (in 'pt' units) of outline of symbol lines (\code{stroke}), default = \code{fontsize}/22.}
\item{ethick}{thickness of error bar lines; default \code{fontsize}/22.}
\item{ColPal}{grafify colour palette to apply, default "okabe_ito"; see \code{\link{graf_palettes}} for available palettes.}
\item{ColSeq}{logical TRUE or FALSE. Default TRUE for sequential colours from chosen palette. Set to FALSE for distant colours, which will be applied using \code{scale_fill_grafify2}.}
\item{ColRev}{whether to reverse order of colour within the selected palette, default F (FALSE); can be set to T (TRUE).}
\item{...}{any additional arguments to pass to \code{ggplot2}\link{stat_summary} or \code{ggplot2}\link{geom_point}.}
}
\value{
This function returns a \code{ggplot2} object of class "gg" and "ggplot".
}
\description{
There are 4 related functions for 2-way ANOVA type plots. In addition to a categorical variable along the X-axis, a grouping factor is passed to either \code{points}, \code{bars} or \code{boxes} argument in these functions. A blocking factor (or any other categorical variable) can be optionally passed to the \code{shapes} argument.
\enumerate{
\item \code{\link{plot_4d_point_sd}} (mean & SD, SEM or CI95 error bars)
\item \code{\link{plot_4d_scatterbar}} (bar & SD, SEM or CI95 error bars)
\item \code{\link{plot_4d_scatterbox}} (box & whiskers)
\item \code{\link{plot_4d_scatterviolin}} (box & whiskers, violin)
}
}
\details{
These can be especially useful when the 4th variable \code{shapes} is a random factor or blocking factor (upto 25 levels are allowed; there will be an error with more levels). The \code{shapes} argument can be left blank to plot ordinary 2-way ANOVAs without blocking.
In \code{plot_4d_point_sd} and \code{plot_4d_scatterbar}, the default error bar is SD (can be changed to SEM or CI95). In \code{plot_4d_point_sd}, a large coloured symbol is plotted at the mean, all other data are shown as smaller symbols. Boxplot uses \code{\link[ggplot2]{geom_boxplot}} to depict median (thicker line), box (interquartile range (IQR)) and the whiskers (1.5*IQR).
Colours can be changed using \code{ColPal}, \code{ColRev} or \code{ColSeq} arguments.
\code{ColPal} can be one of the following: "okabe_ito", "dark", "light", "bright", "pale", "vibrant, "muted" or "contrast".
\code{ColRev} (logical TRUE/FALSE) decides whether colours are chosen from first-to-last or last-to-first from within the chosen palette.
\code{ColSeq} (logical TRUE/FALSE) decides whether colours are picked by respecting the order in the palette or the most distant ones using \code{\link[grDevices]{colorRampPalette}}.
The resulting \code{ggplot2} graph can take additional geometries or other layers.
}
\examples{
#4d version for 2-way data with blocking
plot_4d_point_sd(data = data_2w_Tdeath,
xcol = Genotype,
ycol = PI,
points = Time,
shapes = Experiment)
#4d version without blocking factor
#`shapes` can be left blank
plot_4d_point_sd(data = data_2w_Festing,
xcol = Strain,
ycol = GST,
points = Treatment)
}
|
library(readxl)
data_anova <- read_excel("C:/Users/HP PC/Downloads/1. Anova/data_anova.xlsx")
View(data_anova)
#Kruskal Wallish
kruskal.test(Nilai ~ Perlakuan, data = data_anova)
| /Anova dan Kruskal Wallish/Kruskal_Wallish.R | no_license | Statistikawan/R-Studio-Tutorial | R | false | false | 180 | r | library(readxl)
data_anova <- read_excel("C:/Users/HP PC/Downloads/1. Anova/data_anova.xlsx")
View(data_anova)
#Kruskal Wallish
kruskal.test(Nilai ~ Perlakuan, data = data_anova)
|
##Using terminal window to limit data after download
##head -n 1 household_power_consumption.txt > feb2007data.txt cat household_power_consumption.txt | grep '^0\{0,1\}[12]/0\{0,1\}2/2007' >> feb2007data.txt
##cat household_power_consumption.txt | grep '^0\{0,1\}[12]/0\{0,1\}2/2007' >> feb2007data.txt
##Set Working Directory
datafile <- "feb2007data.txt"
data <- read.table(datafile, sep = ";", header = T, na.strings = "?")
data$Datetime <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
png('plot3.png')
plot(data$Datetime, data$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
points(data$Datetime, data$Sub_metering_2, type = "l", xlab = "", ylab = "Energy sub metering",
col = "red")
points(data$Datetime, data$Sub_metering_3, type = "l", xlab = "", ylab = "Energy sub metering",
col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1",
"Sub_metering_2", "Sub_metering_3"))
dev.off() | /Plot3.R | no_license | mshanley/Data_Exploration | R | false | false | 1,063 | r | ##Using terminal window to limit data after download
##head -n 1 household_power_consumption.txt > feb2007data.txt cat household_power_consumption.txt | grep '^0\{0,1\}[12]/0\{0,1\}2/2007' >> feb2007data.txt
##cat household_power_consumption.txt | grep '^0\{0,1\}[12]/0\{0,1\}2/2007' >> feb2007data.txt
##Set Working Directory
datafile <- "feb2007data.txt"
data <- read.table(datafile, sep = ";", header = T, na.strings = "?")
data$Datetime <- strptime(paste(data$Date, data$Time), "%d/%m/%Y %H:%M:%S")
png('plot3.png')
plot(data$Datetime, data$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
points(data$Datetime, data$Sub_metering_2, type = "l", xlab = "", ylab = "Energy sub metering",
col = "red")
points(data$Datetime, data$Sub_metering_3, type = "l", xlab = "", ylab = "Energy sub metering",
col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1",
"Sub_metering_2", "Sub_metering_3"))
dev.off() |
# analyzeBehaviorspace is open-source software; you can redistribute it and/or
# modify it under the terms of the MIT License as published by the Open Source
# Initiative.
#
# analyzeBehaviorspace is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the MIT License for more details.
#
# You should have received a copy of the MIT License along with this program; if
# not, see <https://opensource.org/licenses/MIT>.
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
library(dplyr)
library(readr)
library(purrr)
library(tidyr)
library(stringr)
library(ggplot2)
library(shinyalert)
library(plotly)
library(analyzeBehaviorspace)
shiny_file_size <- getOption("analyzeBehaviorspace.maxFileSize", default = 300)
options(shiny.maxRequestSize = shiny_file_size * 1024^2)
# options(warn = 2)
shinyServer(function(input, output, session) {
snapshotPreprocessInput("table_state", function(value) {})
cdata <- session$clientData
expt_data <- reactiveValues(
data = NULL,
ind_vars = NULL,
dep_vars = NULL,
mapping = NULL
)
experiment <- reactive({
list(data = expt_data$data,
ind_vars = expt_data$ind_vars,
dep_vars = expt_data$dep_vars,
mapping = expt_data$mapping)
})
expt_vars <- reactive({
message("expt_vars")
vars <- analyzeBehaviorspace::get_expt_vars(experiment())
message("expt_vars = (", paste(vars$name, vars$col, sep = " = ",
collapse = ", "), ")")
vars
})
expt_yvars <- reactive({
message("expt_yvars")
vars <- analyzeBehaviorspace::get_yvars(experiment(), input$x_var)
message("expt_yvars = (", paste(vars$name, vars$col, sep = " = ",
collapse = ", "), ")")
vars
})
expt_group_vars <- reactive({
message("expt_group_vars")
vars <- analyzeBehaviorspace::get_group_vars(experiment(), input$x_var,
input$y_var)
message("expt_group_ vars = (", paste(vars$name, vars$col, sep = " = ",
collapse = ", "), ")")
vars
})
expt_plot_vars <- reactive({
message("expt_plot_vars")
vars <- analyzeBehaviorspace::get_plot_vars(experiment(), input$x_var,
input$y_var)
message("expt_plot_vars = (",
paste(vars$name, vars$col, sep = " = ", collapse = ", "), ")")
vars
})
bs_data <- reactive({
# input$file1 will be NULL initially. After the user selects
# and uploads a file, it will be a data frame with 'name',
# 'size', 'type', and 'datapath' columns. The 'datapath'
# column will contain the local filenames where the data can
# be found.
validate(
need(! is.null(input$file1),
"Please select a .csv file from a BehaviorSpace experiment.")
)
inFile <- input$file1
if (is.null(inFile)) return(NULL)
message("Reading input")
text <- read_file(inFile$datapath) %>%
str_replace_all("\r\n", "\n") %>%
str_replace_all("\r", "\n")
dat <- analyzeBehaviorspace::load_bs_file(text = text, quiet = FALSE)
message("returned from load_bs_file()")
message(" success = ", dat$success)
if (is.null(dat$data))
message(" data = NULL")
else
message(" data dimensions (", str_c(dim(dat$data), collapse = ", "),
")")
if (! dat$success) {
detail <- character(0)
text <-
"You must provide a .csv file containing the output of a NetLogo BehaviorSpace experiment in table format."
if (dat$cause == "not_bs") {
text <- "The file does not seem to be a BehaviorSpace experiment."
} else if (dat$cause == "spreadsheet") {
text <-
"The file seems to be a BehaviorSpace experiment in spreadsheet format.\nYou need to choose \"table\" format for the BehaviorSpace output."
}
shinyalert(title="Bad file format", text = text, type="error")
return(NULL)
}
invisible(list_modify(dat, success = zap(), cause = zap()))
})
observeEvent(bs_data(), {
message("New BehaviorSpace Data")
expt <- bs_data()
expt_data$data <- expt$data
expt_data$ind_vars <- expt$ind_vars
expt_data$dep_vars <- expt$dep_vars
expt_data$mapping <- expt$mapping
message("Experiment initialized")
updateSelectInput(session, "ren_from", "", selected = "")
updateSelectInput(session, "x_var", choices = "", selected = "")
updateSelectInput(session, "y_var", choices = "", selected = "")
updateSelectInput(session, "group_var", choices = "", selected = "")
})
observeEvent(expt_vars(), {
message("expt_vars changed")
xv <- input$x_var
rv <- input$ren_from
vars <- expt_vars() %>% {set_names(.$col, .$name)} %>% as.list()
if (! xv %in% vars) xv <- ''
updateSelectInput(session, "x_var", choices = vars, selected = xv)
message("Set x_var choices to (",
paste(names(vars), vars, sep = " = ", collapse=", "),
"), selection = ", xv)
rvars <- expt_vars()%>% {set_names(.$col, .$name)} %>% as.list()
if (! rv %in% rvars) rv <- ''
updateSelectInput(session, "ren_from", choices = rvars, selected = rv)
message("Set rename_from choices to (",
stringr::str_c(names(rvars), rvars, sep = " = ", collapse=", "),
"), selection = ", rv)
})
observeEvent(expt_yvars(), {
message("expt_yvars changed")
yv <- input$y_var
yvars <- expt_yvars() %>% {set_names(.$col, .$name)} %>% as.list()
if (! yv %in% yvars) yv <- ''
updateSelectInput(session, "y_var", choices = yvars, selected = yv)
message("Set y_var choices to (",
stringr::str_c(names(yvars), yvars, sep = " = ", collapse=", "),
"), selection = ", yv)
})
observeEvent(expt_group_vars(), {
message("expt_group_vars changed")
gv <- input$y_var
gvars <- expt_group_vars() %>% {set_names(.$col, .$name)} %>% as.list()
if (! gv %in% gvars) gv <- ''
updateSelectInput(session, "group_var", choices = gvars, selected = gv)
message("Set group_var choices to (",
stringr::str_c(names(gvars), gvars, sep = " = ", collapse=", "),
"), selection = ", gv)
})
observeEvent(input$rename, {
message("Rename")
mapping <- expt_data$mapping
ren_from <- input$ren_from
ren_to <- input$ren_to
vars <- expt_vars()
if (nrow(mapping) == 0 || is.null(vars)) return()
validate(
need(! (ren_to %in% filter(mapping, col != ren_from)$name),
paste("Variable name \"", ren_to, "\" already in use."))
)
mapping$name[mapping$col == ren_from] <- ren_to
rvars <- expt_vars()%>% {set_names(.$col, .$name)} %>% as.list()
if (! ren_from %in% rvars) ren_from <- ''
updateSelectInput(session, "ren_from", choices = rvars, selected = ren_from)
updateTextInput(session, "ren_to", value = "")
expt_data$mapping <- mapping
})
plot_data <- reactive({
message("plot_data")
data <- analyzeBehaviorspace::get_plot_data(experiment(), input$x_var,
input$y_var, input$group_var,
input$last_tick)
data
})
plot_mapping <- reactive({
message("plot_mapping")
plt_map <- analyzeBehaviorspace::get_plot_mapping(experiment(), plot_data(),
input$x_var, input$y_var,
input$group_var,
input$error_bars)
plt_map
})
makeplot <- reactive({
message("makeplot")
p <- analyzeBehaviorspace::make_plot(experiment(), input$points, input$lines,
input$x_var, input$y_var,
input$group_var, input$error_bars,
input$last_tick)
message("Done making plot")
p
})
maketable <- reactive({
message("making table")
tab_data <- expt_data$data
if (is.null(tab_data)) return(NULL)
new_names <- expt_data$mapping %>% {set_names(.$col, .$name)}
if (input$summary_tab) {
tab_data <- plot_data()
} else {
if (input$last_tick) {
# expt_data <- expt_data %>% extract_last_tick(expt_data$ind_vars)
tab_data <- tab_data %>% extract_last_tick()
}
}
new_names <- new_names %>% keep(~.x %in% names(expt_data)) %>% syms()
if (length(new_names) > 0) {
tab_data <- tab_data %>% rename(!!!new_names)
}
message("done making table")
return(tab_data)
})
output$plot <- renderPlotly({
p <- makeplot()
if (is.null(p))
return(NULL)
ggplotly(p, width = cdata$output_plot_width, height = cdata$output_plot_height)
})
output$table <- DT::renderDataTable(
maketable(),
server = TRUE, options = list(lengthChange = FALSE, bFilter = FALSE)
)
get_filename <- reactive({
if (is.null(input$file1)) return(NULL)
fname <- input$file1$name
message("Fixing up filename ", fname)
fname <- fname %>%
str_replace(regex("\\.csv$", ignore.case = TRUE), '') %>%
str_replace_all('[ .]+', '_')
message("Returning filename ", fname)
fname
})
output$save_plot <- downloadHandler(
filename <- function() {
mapping <- expt_data$mapping
if (is.null(mapping) || is.null(plot_data())) return()
fname <- get_filename()
suffix <- paste0('_', tx_col(input$x_var, mapping),
'_', tx_col(input$y_var, mapping))
if (input$group_var != '')
suffix <- paste0(suffix, '_', tx_col(input$group_var, mapping))
message("fname = ", fname, ", suffix = ", suffix)
suffix2 <- ''
if (input$points) suffix2 <- paste0(suffix2, 'p')
if (input$lines) suffix2 <- paste0(suffix2, 'l')
if (input$error_bars == 'error bars') suffix2 <- paste0(suffix2, 'e')
if (input$error_bars == 'bands') suffix2 <- paste0(suffix2, 'b')
if (input$last_tick) suffix2 <- paste0(suffix2, 't')
message("suffix2 = ", suffix2)
if (suffix2 != '') suffix <- paste0(suffix, '_', suffix2)
message("suffix = ", suffix)
fname <- paste0(fname, suffix, '.png')
fname
},
content = function(file) {
message("Saving plot to file ", file)
ggsave(filename = file, plot = makeplot(), device = "png",
width = 800 / 72, height = 600 / 72, dpi = 72, units = "in")
}
)
output$save_table <- downloadHandler(
filename = function() {
if (is.null(expt_data$data)) return()
if (input$summary_tab) {
suffix <- 'summary'
} else {
suffix <- 'data'
}
fname <- get_filename() %>% paste0(suffix, '.csv')
fname
},
content = function(file1) {
message("Writing to file ", file1)
write.csv(maketable(), file1)
}
)
observeEvent(input$quit_button, {
message("Quit pressed")
stopApp()
})
})
| /inst/abs_app/server.R | permissive | jonathan-g/analyzeBehaviorspace | R | false | false | 11,292 | r | # analyzeBehaviorspace is open-source software; you can redistribute it and/or
# modify it under the terms of the MIT License as published by the Open Source
# Initiative.
#
# analyzeBehaviorspace is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the MIT License for more details.
#
# You should have received a copy of the MIT License along with this program; if
# not, see <https://opensource.org/licenses/MIT>.
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
library(dplyr)
library(readr)
library(purrr)
library(tidyr)
library(stringr)
library(ggplot2)
library(shinyalert)
library(plotly)
library(analyzeBehaviorspace)
shiny_file_size <- getOption("analyzeBehaviorspace.maxFileSize", default = 300)
options(shiny.maxRequestSize = shiny_file_size * 1024^2)
# options(warn = 2)
shinyServer(function(input, output, session) {
snapshotPreprocessInput("table_state", function(value) {})
cdata <- session$clientData
expt_data <- reactiveValues(
data = NULL,
ind_vars = NULL,
dep_vars = NULL,
mapping = NULL
)
experiment <- reactive({
list(data = expt_data$data,
ind_vars = expt_data$ind_vars,
dep_vars = expt_data$dep_vars,
mapping = expt_data$mapping)
})
expt_vars <- reactive({
message("expt_vars")
vars <- analyzeBehaviorspace::get_expt_vars(experiment())
message("expt_vars = (", paste(vars$name, vars$col, sep = " = ",
collapse = ", "), ")")
vars
})
expt_yvars <- reactive({
message("expt_yvars")
vars <- analyzeBehaviorspace::get_yvars(experiment(), input$x_var)
message("expt_yvars = (", paste(vars$name, vars$col, sep = " = ",
collapse = ", "), ")")
vars
})
expt_group_vars <- reactive({
message("expt_group_vars")
vars <- analyzeBehaviorspace::get_group_vars(experiment(), input$x_var,
input$y_var)
message("expt_group_ vars = (", paste(vars$name, vars$col, sep = " = ",
collapse = ", "), ")")
vars
})
expt_plot_vars <- reactive({
message("expt_plot_vars")
vars <- analyzeBehaviorspace::get_plot_vars(experiment(), input$x_var,
input$y_var)
message("expt_plot_vars = (",
paste(vars$name, vars$col, sep = " = ", collapse = ", "), ")")
vars
})
bs_data <- reactive({
# input$file1 will be NULL initially. After the user selects
# and uploads a file, it will be a data frame with 'name',
# 'size', 'type', and 'datapath' columns. The 'datapath'
# column will contain the local filenames where the data can
# be found.
validate(
need(! is.null(input$file1),
"Please select a .csv file from a BehaviorSpace experiment.")
)
inFile <- input$file1
if (is.null(inFile)) return(NULL)
message("Reading input")
text <- read_file(inFile$datapath) %>%
str_replace_all("\r\n", "\n") %>%
str_replace_all("\r", "\n")
dat <- analyzeBehaviorspace::load_bs_file(text = text, quiet = FALSE)
message("returned from load_bs_file()")
message(" success = ", dat$success)
if (is.null(dat$data))
message(" data = NULL")
else
message(" data dimensions (", str_c(dim(dat$data), collapse = ", "),
")")
if (! dat$success) {
detail <- character(0)
text <-
"You must provide a .csv file containing the output of a NetLogo BehaviorSpace experiment in table format."
if (dat$cause == "not_bs") {
text <- "The file does not seem to be a BehaviorSpace experiment."
} else if (dat$cause == "spreadsheet") {
text <-
"The file seems to be a BehaviorSpace experiment in spreadsheet format.\nYou need to choose \"table\" format for the BehaviorSpace output."
}
shinyalert(title="Bad file format", text = text, type="error")
return(NULL)
}
invisible(list_modify(dat, success = zap(), cause = zap()))
})
observeEvent(bs_data(), {
message("New BehaviorSpace Data")
expt <- bs_data()
expt_data$data <- expt$data
expt_data$ind_vars <- expt$ind_vars
expt_data$dep_vars <- expt$dep_vars
expt_data$mapping <- expt$mapping
message("Experiment initialized")
updateSelectInput(session, "ren_from", "", selected = "")
updateSelectInput(session, "x_var", choices = "", selected = "")
updateSelectInput(session, "y_var", choices = "", selected = "")
updateSelectInput(session, "group_var", choices = "", selected = "")
})
observeEvent(expt_vars(), {
message("expt_vars changed")
xv <- input$x_var
rv <- input$ren_from
vars <- expt_vars() %>% {set_names(.$col, .$name)} %>% as.list()
if (! xv %in% vars) xv <- ''
updateSelectInput(session, "x_var", choices = vars, selected = xv)
message("Set x_var choices to (",
paste(names(vars), vars, sep = " = ", collapse=", "),
"), selection = ", xv)
rvars <- expt_vars()%>% {set_names(.$col, .$name)} %>% as.list()
if (! rv %in% rvars) rv <- ''
updateSelectInput(session, "ren_from", choices = rvars, selected = rv)
message("Set rename_from choices to (",
stringr::str_c(names(rvars), rvars, sep = " = ", collapse=", "),
"), selection = ", rv)
})
observeEvent(expt_yvars(), {
message("expt_yvars changed")
yv <- input$y_var
yvars <- expt_yvars() %>% {set_names(.$col, .$name)} %>% as.list()
if (! yv %in% yvars) yv <- ''
updateSelectInput(session, "y_var", choices = yvars, selected = yv)
message("Set y_var choices to (",
stringr::str_c(names(yvars), yvars, sep = " = ", collapse=", "),
"), selection = ", yv)
})
observeEvent(expt_group_vars(), {
message("expt_group_vars changed")
gv <- input$y_var
gvars <- expt_group_vars() %>% {set_names(.$col, .$name)} %>% as.list()
if (! gv %in% gvars) gv <- ''
updateSelectInput(session, "group_var", choices = gvars, selected = gv)
message("Set group_var choices to (",
stringr::str_c(names(gvars), gvars, sep = " = ", collapse=", "),
"), selection = ", gv)
})
observeEvent(input$rename, {
message("Rename")
mapping <- expt_data$mapping
ren_from <- input$ren_from
ren_to <- input$ren_to
vars <- expt_vars()
if (nrow(mapping) == 0 || is.null(vars)) return()
validate(
need(! (ren_to %in% filter(mapping, col != ren_from)$name),
paste("Variable name \"", ren_to, "\" already in use."))
)
mapping$name[mapping$col == ren_from] <- ren_to
rvars <- expt_vars()%>% {set_names(.$col, .$name)} %>% as.list()
if (! ren_from %in% rvars) ren_from <- ''
updateSelectInput(session, "ren_from", choices = rvars, selected = ren_from)
updateTextInput(session, "ren_to", value = "")
expt_data$mapping <- mapping
})
plot_data <- reactive({
message("plot_data")
data <- analyzeBehaviorspace::get_plot_data(experiment(), input$x_var,
input$y_var, input$group_var,
input$last_tick)
data
})
plot_mapping <- reactive({
message("plot_mapping")
plt_map <- analyzeBehaviorspace::get_plot_mapping(experiment(), plot_data(),
input$x_var, input$y_var,
input$group_var,
input$error_bars)
plt_map
})
makeplot <- reactive({
message("makeplot")
p <- analyzeBehaviorspace::make_plot(experiment(), input$points, input$lines,
input$x_var, input$y_var,
input$group_var, input$error_bars,
input$last_tick)
message("Done making plot")
p
})
maketable <- reactive({
message("making table")
tab_data <- expt_data$data
if (is.null(tab_data)) return(NULL)
new_names <- expt_data$mapping %>% {set_names(.$col, .$name)}
if (input$summary_tab) {
tab_data <- plot_data()
} else {
if (input$last_tick) {
# expt_data <- expt_data %>% extract_last_tick(expt_data$ind_vars)
tab_data <- tab_data %>% extract_last_tick()
}
}
new_names <- new_names %>% keep(~.x %in% names(expt_data)) %>% syms()
if (length(new_names) > 0) {
tab_data <- tab_data %>% rename(!!!new_names)
}
message("done making table")
return(tab_data)
})
output$plot <- renderPlotly({
p <- makeplot()
if (is.null(p))
return(NULL)
ggplotly(p, width = cdata$output_plot_width, height = cdata$output_plot_height)
})
output$table <- DT::renderDataTable(
maketable(),
server = TRUE, options = list(lengthChange = FALSE, bFilter = FALSE)
)
get_filename <- reactive({
if (is.null(input$file1)) return(NULL)
fname <- input$file1$name
message("Fixing up filename ", fname)
fname <- fname %>%
str_replace(regex("\\.csv$", ignore.case = TRUE), '') %>%
str_replace_all('[ .]+', '_')
message("Returning filename ", fname)
fname
})
output$save_plot <- downloadHandler(
filename <- function() {
mapping <- expt_data$mapping
if (is.null(mapping) || is.null(plot_data())) return()
fname <- get_filename()
suffix <- paste0('_', tx_col(input$x_var, mapping),
'_', tx_col(input$y_var, mapping))
if (input$group_var != '')
suffix <- paste0(suffix, '_', tx_col(input$group_var, mapping))
message("fname = ", fname, ", suffix = ", suffix)
suffix2 <- ''
if (input$points) suffix2 <- paste0(suffix2, 'p')
if (input$lines) suffix2 <- paste0(suffix2, 'l')
if (input$error_bars == 'error bars') suffix2 <- paste0(suffix2, 'e')
if (input$error_bars == 'bands') suffix2 <- paste0(suffix2, 'b')
if (input$last_tick) suffix2 <- paste0(suffix2, 't')
message("suffix2 = ", suffix2)
if (suffix2 != '') suffix <- paste0(suffix, '_', suffix2)
message("suffix = ", suffix)
fname <- paste0(fname, suffix, '.png')
fname
},
content = function(file) {
message("Saving plot to file ", file)
ggsave(filename = file, plot = makeplot(), device = "png",
width = 800 / 72, height = 600 / 72, dpi = 72, units = "in")
}
)
output$save_table <- downloadHandler(
filename = function() {
if (is.null(expt_data$data)) return()
if (input$summary_tab) {
suffix <- 'summary'
} else {
suffix <- 'data'
}
fname <- get_filename() %>% paste0(suffix, '.csv')
fname
},
content = function(file1) {
message("Writing to file ", file1)
write.csv(maketable(), file1)
}
)
observeEvent(input$quit_button, {
message("Quit pressed")
stopApp()
})
})
|
\name{unmaskAndGetSampleBatch}
\alias{unmaskAndGetSampleBatch}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Batch function that allows the user to unmask a number of variables as
well as to sample from the estimated joint density function.
Serves as a wrapper for the first and second functions the End-User
calls to obtain the samples from the
marginal distributions of the unmasked data.
}
\description{
Use the sample-moment-based density approximant method to estimate the
density function
of univariate distributions based noise multiplied data.
Afterwards if the Data Provider supplies all the means, standard deviations and the correlation matrix of the original data then these can be used as arguments. Otherwise, these are calculated using the mean of noise sample and the mean of the vector created by squaring each element of the noise sample.
A sample of the chosen size is then simulated from the estimated joint density function.
}
\usage{
unmaskAndGetSampleBatch(listOfMaskedVectorsToBeUnmasked,
listOfNoisefiles,
mu, s, rho_X,
cores = 1, size,
verbose = -1,
onlyUnmasked = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{listOfMaskedVectorsToBeUnmasked}{
list of masked vectors. The masked data were generated by R Function mask, or maskBatch.
}
\item{listOfNoisefiles}{
Noise files containing a sample of the noise used to mask the vectors in listOfMaskedVectorsToBeUnmasked from R function mask, or maskBatch
}
\item{mu}{
List of means of unmasked vectors - if not supplied will be estimated
}
\item{s}{
List of standard deviations of unmasked vectors - if not supplied will be estimated
}
\item{rho_X}{
Correlation matrix of unmasked vectors - if not supplied will be estimated
}
\item{cores}{
Passed to mclapply
}
\item{size}{
Passed to actualPosition
}
\item{verbose}{
If greater than 0 output is printed to tell the user at what stage the function is in, is also passed to many internal functions and will give more detailed output from them if it is greater than 1
}
\item{onlyUnmasked}{
If true then only the output from unmask is returned. Effectively this makes the function "unmaskBatch" - so to speak. False by default.
}
}
\details{
unmask is fully described in Lin and Fielding (2015). The theory used to support unmask can
be found in Lin (2014). unmask implements the sample-moment-based density approximate method
the estimated the smoothed density function of the original data based on their make data maskedVectorToBeUnmasked.
The output of the function unmask is a set of sample data from the estimated mouthed density
function.
Using this function is the equivalent of calling unmask once for each variable and then calling getSampleBasedOnUnmaskedData
}
\value{
If onlyUnmasked is false then returns a list with two elements, both of which are lists.
\item{unmaskedOutputs }{list containing the three (four if categorical) outputs from the function unmask namely: unmaskedVariable, meanOfNoise, meanOfSquaredNoise, prob}
\item{getSampleOutputs }{list containing the output from getSampleBasedOnUnmaskedData which is a list containing vectors corresponding to samples for each variable }
If onlyUnmasked is true then returns a list with only one element:
\item{unmaskedOutputs }{list containing the three (four if categorical) outputs from the function unmask namely: unmaskedVariable, meanOfNoise, meanOfSquaredNoise, prob}
}
\references{
Lin, Yan-Xia (2014). Density approximant based on noise multiplied data. In J. Domingo-Ferrer (Eds.), Privacy in Statistical Databases 2014,
LNCS 8744, Springer International Publishing Switzerland, 2014, pp. 89-104.
Lin, Yan-Xia and Fielding, Mark James (2015). MaskDensity14: An R Package for the Density Approximant of a Univariate
Based on Noise Multiplied Data, SoftwareX 34, 3743, doi:10.1016/j.softx.2015.11.002
}
\author{
Luke Mazur
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
##outputNL1 <- unmaskAndGetSampleBatch(listOfMaskedVectorsToBeUnmasked = ##list(ystar1, ystar2),
## listOfNoisefiles =
## (file.path(tempdir(),"noise1.bin"),file.path(tempdir(),"noise2.bin")),
## cores = 1, size = 1000,
## verbose = 2)
## not a real example because ultimately in order to demonstrate this
## function the entire package functionality must be demonstrated
## this is demonstrated in the package example
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/unmaskAndGetSampleBatch.Rd | no_license | cran/MaskJointDensity | R | false | false | 4,940 | rd | \name{unmaskAndGetSampleBatch}
\alias{unmaskAndGetSampleBatch}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Batch function that allows the user to unmask a number of variables as
well as to sample from the estimated joint density function.
Serves as a wrapper for the first and second functions the End-User
calls to obtain the samples from the
marginal distributions of the unmasked data.
}
\description{
Use the sample-moment-based density approximant method to estimate the
density function
of univariate distributions based noise multiplied data.
Afterwards if the Data Provider supplies all the means, standard deviations and the correlation matrix of the original data then these can be used as arguments. Otherwise, these are calculated using the mean of noise sample and the mean of the vector created by squaring each element of the noise sample.
A sample of the chosen size is then simulated from the estimated joint density function.
}
\usage{
unmaskAndGetSampleBatch(listOfMaskedVectorsToBeUnmasked,
listOfNoisefiles,
mu, s, rho_X,
cores = 1, size,
verbose = -1,
onlyUnmasked = FALSE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{listOfMaskedVectorsToBeUnmasked}{
list of masked vectors. The masked data were generated by R Function mask, or maskBatch.
}
\item{listOfNoisefiles}{
Noise files containing a sample of the noise used to mask the vectors in listOfMaskedVectorsToBeUnmasked from R function mask, or maskBatch
}
\item{mu}{
List of means of unmasked vectors - if not supplied will be estimated
}
\item{s}{
List of standard deviations of unmasked vectors - if not supplied will be estimated
}
\item{rho_X}{
Correlation matrix of unmasked vectors - if not supplied will be estimated
}
\item{cores}{
Passed to mclapply
}
\item{size}{
Passed to actualPosition
}
\item{verbose}{
If greater than 0 output is printed to tell the user at what stage the function is in, is also passed to many internal functions and will give more detailed output from them if it is greater than 1
}
\item{onlyUnmasked}{
If true then only the output from unmask is returned. Effectively this makes the function "unmaskBatch" - so to speak. False by default.
}
}
\details{
unmask is fully described in Lin and Fielding (2015). The theory used to support unmask can
be found in Lin (2014). unmask implements the sample-moment-based density approximate method
the estimated the smoothed density function of the original data based on their make data maskedVectorToBeUnmasked.
The output of the function unmask is a set of sample data from the estimated mouthed density
function.
Using this function is the equivalent of calling unmask once for each variable and then calling getSampleBasedOnUnmaskedData
}
\value{
If onlyUnmasked is false then returns a list with two elements, both of which are lists.
\item{unmaskedOutputs }{list containing the three (four if categorical) outputs from the function unmask namely: unmaskedVariable, meanOfNoise, meanOfSquaredNoise, prob}
\item{getSampleOutputs }{list containing the output from getSampleBasedOnUnmaskedData which is a list containing vectors corresponding to samples for each variable }
If onlyUnmasked is true then returns a list with only one element:
\item{unmaskedOutputs }{list containing the three (four if categorical) outputs from the function unmask namely: unmaskedVariable, meanOfNoise, meanOfSquaredNoise, prob}
}
\references{
Lin, Yan-Xia (2014). Density approximant based on noise multiplied data. In J. Domingo-Ferrer (Eds.), Privacy in Statistical Databases 2014,
LNCS 8744, Springer International Publishing Switzerland, 2014, pp. 89-104.
Lin, Yan-Xia and Fielding, Mark James (2015). MaskDensity14: An R Package for the Density Approximant of a Univariate
Based on Noise Multiplied Data, SoftwareX 34, 3743, doi:10.1016/j.softx.2015.11.002
}
\author{
Luke Mazur
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
##outputNL1 <- unmaskAndGetSampleBatch(listOfMaskedVectorsToBeUnmasked = ##list(ystar1, ystar2),
## listOfNoisefiles =
## (file.path(tempdir(),"noise1.bin"),file.path(tempdir(),"noise2.bin")),
## cores = 1, size = 1000,
## verbose = 2)
## not a real example because ultimately in order to demonstrate this
## function the entire package functionality must be demonstrated
## this is demonstrated in the package example
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DMMongoDB.R
\docType{package}
\name{DMMongoDB}
\alias{DMMongoDB}
\title{DMMongoDB: A package to access DataMuster}
\description{
The DMMongoDB package provides important functions to more easily access and process DataMuster data. This package is designed to help researchers,
data analysts and DataMuster partners that want to access data and want to run their own analyses or data presentation.
}
\section{DMMongoDB functions}{
The dmaccess function to help store username and password to enable easy database access..
The get_dailywts function to enable to access individual or groups of cattle daily weaights from the Automated Livestock Management Systems
The get_weeklywts function to enable to access individual or groups of cattle daily weaights from the Automated Livestock Management Systems
The get_cattle function to identify IDs of cattle from indivudal or groups of cattle properties and filter for individual paddocks.
}
\author{
Dave Swain \email{d.swain@cqu.edu.au}, Lauren O'Connor \email{l.r.oconnor@cqu.edu.au}, and Anita Chang \email{a.chang@cqu.edu.au}
}
| /man/DMMongoDB.Rd | no_license | PrecisionLivestockManagement/DMMongoDB | R | false | true | 1,157 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DMMongoDB.R
\docType{package}
\name{DMMongoDB}
\alias{DMMongoDB}
\title{DMMongoDB: A package to access DataMuster}
\description{
The DMMongoDB package provides important functions to more easily access and process DataMuster data. This package is designed to help researchers,
data analysts and DataMuster partners that want to access data and want to run their own analyses or data presentation.
}
\section{DMMongoDB functions}{
The dmaccess function to help store username and password to enable easy database access..
The get_dailywts function to enable to access individual or groups of cattle daily weaights from the Automated Livestock Management Systems
The get_weeklywts function to enable to access individual or groups of cattle daily weaights from the Automated Livestock Management Systems
The get_cattle function to identify IDs of cattle from indivudal or groups of cattle properties and filter for individual paddocks.
}
\author{
Dave Swain \email{d.swain@cqu.edu.au}, Lauren O'Connor \email{l.r.oconnor@cqu.edu.au}, and Anita Chang \email{a.chang@cqu.edu.au}
}
|
#' Día 3: Polígonos
#' Canciones más escuchadas por país en América
#'
#' 30 Day map Challenge (Noviembre, 2021)
#' México
#'
#' Por:
#' Karen Santoyo
#' David Garibay
# Importar librerías ------------------------------------------------------
# install.packages("sf")
# install.packages("leaflet")
# install.packages("dplyr")
# install.packages("mapview")
# install.packages("RColorBrewer")
# webshot::install_phantomjs()
library(sf) # para leer mapas
library(leaflet) # para crear mapas
library(dplyr)
library(mapview) # para guardar leaflet en png o jpg
library(RColorBrewer)
# Importar datos ----------------------------------------------------------
## Base con nombres de canciones por país ----
#' Esta base fue creada manualmente a partir de la información de:
#' https://spotifycharts.com/regional
#' Filtramos por país, a nivel semanal, con fecha 10/28/2021
df_america <- read.csv("data/américa_final.csv")
View(df_america)
## Shapefile a nivel país, América ----
#' Este mapa fue obtenido de
#' https://tapiquen-sig.jimdofree.com/english-version/free-downloads/americas/
#' En QGIS se le removieron los países que no pertenecen a América
#' para quedarnos con un total de 35 países
shp_america <- st_read("data/Americas_35.shp") %>%
arrange(COUNTRY)
# Renombramos columna de países
shp_america$COUNTRY <- c('Antigua and Barbuda', 'Argentina', 'Bahamas',
'Barbados', 'Belice', 'Bolivia', 'Brasil', 'Canadá',
'Chile', 'Colombia', 'Costa Rica', 'Cuba', 'Dominica',
'República Dominicana', 'Ecuador', 'El Salvador',
'Granada', 'Guatemala', 'Guyana', 'Haití', 'Honduras',
'Jamaica', 'México', 'Nicaragua', 'Panamá', 'Paraguay',
'Perú', 'San Cristóbal y Nieves', 'Santa Lucía',
'San Vincente y las Granadinas', 'Surinam',
'Trinidad y Tobago', 'Estados Unidos',
'Uruguay', 'Venezuela')
# Manipulación de datos ---------------------------------------------------
## Unimos las bases de datos ----
shp_datos <- merge(x = shp_america,
y = df_america,
by.x = "COUNTRY",
by.y = "País",
all.x = TRUE)
unique(shp_datos$Género)
sort(table(shp_datos$Género))
## Creamos variables categóricas (factors) ----
shp_datos$Género <- factor(x = shp_datos$Género,
levels = c("Pop", "Reguetón",
"Urbano", "Hip-Hop",
"Forró"))
unique(shp_datos$Género)
# Creación del mapa -------------------------------------------------------
## Paleta de colores ----
# palette_rev <- rev(brewer.pal(8, "Dark2")) # Para revertir el orden de la pal
pal_fac <- colorFactor(palette = "Spectral",
domain = shp_datos$Género)
## pop-up ----
# Son los mensajes que aparecen cuando damos click a alguno de los
# estados
popup <- paste0(
"<b>", "País: ", "</b>", as.character(shp_datos$COUNTRY), "<br>",
"<b>", "Canción: ", "</b>", as.character(shp_datos$Canción), "<br>",
"<b>", "Artista: ", "</b>", as.character(shp_datos$Artista), "<br>",
"<b>", "Género: ", "</b>", as.character(shp_datos$Género), "<br>")
## Leaflet (interactivo) ----
# En este caso es interactivo, seguí el tutorial de
# Juvenal Campos:
# https://juvenalcampos.com/2020/01/13/tutorial-de-mapas-en-leaflet/
lft_genero <- leaflet(data = shp_datos,
options = leafletOptions(zoomControl = FALSE)) %>%
addProviderTiles("Stamen.Watercolor") %>%
addPolygons(color = "#444444", # Color de los bordes
fillColor = ~pal_fac(shp_datos$Género), # Relleno de estados
layerId = ~shp_datos$COUNTRY,
opacity = 1, # Opacidad de los bordes
smoothFactor = 0.5,
weight = 1, # Grosor de los bordes
fillOpacity = 0.7,
highlightOptions = highlightOptions(color = "white",
weight = 2,
bringToFront = TRUE),
label = ~shp_datos$Género,
labelOptions = labelOptions(direction = "auto"),
popup = popup) %>%
addLegend(position = "bottomleft",
pal = pal_fac,
values = ~shp_datos$Género,
title = "Géneros más oídos en Spotify <br> última semana de Octubre (2021)")
# Guardar mapa ---------------------------------------------------
## como PNG ----
# https://r-spatial.github.io/mapview/reference/mapshot.html
mapshot(lft_genero, file = "figs/mapa_genero.png")
## Como HTML ----
htmlwidgets::saveWidget(lft_genero, "figs/mapa_genero.html")
| /03_Spotify/R/02 - mapa_genero.R | no_license | du-gartre/30_Day_Map_Challenge_2021 | R | false | false | 4,891 | r | #' Día 3: Polígonos
#' Canciones más escuchadas por país en América
#'
#' 30 Day map Challenge (Noviembre, 2021)
#' México
#'
#' Por:
#' Karen Santoyo
#' David Garibay
# Importar librerías ------------------------------------------------------
# install.packages("sf")
# install.packages("leaflet")
# install.packages("dplyr")
# install.packages("mapview")
# install.packages("RColorBrewer")
# webshot::install_phantomjs()
library(sf) # para leer mapas
library(leaflet) # para crear mapas
library(dplyr)
library(mapview) # para guardar leaflet en png o jpg
library(RColorBrewer)
# Importar datos ----------------------------------------------------------
## Base con nombres de canciones por país ----
#' Esta base fue creada manualmente a partir de la información de:
#' https://spotifycharts.com/regional
#' Filtramos por país, a nivel semanal, con fecha 10/28/2021
df_america <- read.csv("data/américa_final.csv")
View(df_america)
## Shapefile a nivel país, América ----
#' Este mapa fue obtenido de
#' https://tapiquen-sig.jimdofree.com/english-version/free-downloads/americas/
#' En QGIS se le removieron los países que no pertenecen a América
#' para quedarnos con un total de 35 países
shp_america <- st_read("data/Americas_35.shp") %>%
arrange(COUNTRY)
# Renombramos columna de países
shp_america$COUNTRY <- c('Antigua and Barbuda', 'Argentina', 'Bahamas',
'Barbados', 'Belice', 'Bolivia', 'Brasil', 'Canadá',
'Chile', 'Colombia', 'Costa Rica', 'Cuba', 'Dominica',
'República Dominicana', 'Ecuador', 'El Salvador',
'Granada', 'Guatemala', 'Guyana', 'Haití', 'Honduras',
'Jamaica', 'México', 'Nicaragua', 'Panamá', 'Paraguay',
'Perú', 'San Cristóbal y Nieves', 'Santa Lucía',
'San Vincente y las Granadinas', 'Surinam',
'Trinidad y Tobago', 'Estados Unidos',
'Uruguay', 'Venezuela')
# Manipulación de datos ---------------------------------------------------
## Unimos las bases de datos ----
shp_datos <- merge(x = shp_america,
y = df_america,
by.x = "COUNTRY",
by.y = "País",
all.x = TRUE)
unique(shp_datos$Género)
sort(table(shp_datos$Género))
## Creamos variables categóricas (factors) ----
shp_datos$Género <- factor(x = shp_datos$Género,
levels = c("Pop", "Reguetón",
"Urbano", "Hip-Hop",
"Forró"))
unique(shp_datos$Género)
# Creación del mapa -------------------------------------------------------
## Paleta de colores ----
# palette_rev <- rev(brewer.pal(8, "Dark2")) # Para revertir el orden de la pal
pal_fac <- colorFactor(palette = "Spectral",
domain = shp_datos$Género)
## pop-up ----
# Son los mensajes que aparecen cuando damos click a alguno de los
# estados
popup <- paste0(
"<b>", "País: ", "</b>", as.character(shp_datos$COUNTRY), "<br>",
"<b>", "Canción: ", "</b>", as.character(shp_datos$Canción), "<br>",
"<b>", "Artista: ", "</b>", as.character(shp_datos$Artista), "<br>",
"<b>", "Género: ", "</b>", as.character(shp_datos$Género), "<br>")
## Leaflet (interactivo) ----
# En este caso es interactivo, seguí el tutorial de
# Juvenal Campos:
# https://juvenalcampos.com/2020/01/13/tutorial-de-mapas-en-leaflet/
lft_genero <- leaflet(data = shp_datos,
options = leafletOptions(zoomControl = FALSE)) %>%
addProviderTiles("Stamen.Watercolor") %>%
addPolygons(color = "#444444", # Color de los bordes
fillColor = ~pal_fac(shp_datos$Género), # Relleno de estados
layerId = ~shp_datos$COUNTRY,
opacity = 1, # Opacidad de los bordes
smoothFactor = 0.5,
weight = 1, # Grosor de los bordes
fillOpacity = 0.7,
highlightOptions = highlightOptions(color = "white",
weight = 2,
bringToFront = TRUE),
label = ~shp_datos$Género,
labelOptions = labelOptions(direction = "auto"),
popup = popup) %>%
addLegend(position = "bottomleft",
pal = pal_fac,
values = ~shp_datos$Género,
title = "Géneros más oídos en Spotify <br> última semana de Octubre (2021)")
# Guardar mapa ---------------------------------------------------
## como PNG ----
# https://r-spatial.github.io/mapview/reference/mapshot.html
mapshot(lft_genero, file = "figs/mapa_genero.png")
## Como HTML ----
htmlwidgets::saveWidget(lft_genero, "figs/mapa_genero.html")
|
library(car)
library(shiny)
library(choroplethr)
library(choroplethrZip)
library(dplyr)
## Define Manhattan's neighborhood
man.nbhd=c("all neighborhoods", "Central Harlem", "Chelsea and Clinton",
"East Harlem", "Gramercy Park and Murray Hill",
"Greenwich Village and Soho", "Lower Manhattan",
"Lower East Side", "Upper East Side", "Upper West Side",
"Inwood and Washington Heights")
zip.nbhd=as.list(1:length(man.nbhd))
zip.nbhd[[1]]=as.character(c(10026, 10027, 10030, 10037, 10039))
zip.nbhd[[2]]=as.character(c(10001, 10011, 10018, 10019, 10020))
zip.nbhd[[3]]=as.character(c(10036, 10029, 10035))
zip.nbhd[[4]]=as.character(c(10010, 10016, 10017, 10022))
zip.nbhd[[5]]=as.character(c(10012, 10013, 10014))
zip.nbhd[[6]]=as.character(c(10004, 10005, 10006, 10007, 10038, 10280))
zip.nbhd[[7]]=as.character(c(10002, 10003, 10009))
zip.nbhd[[8]]=as.character(c(10021, 10028, 10044, 10065, 10075, 10128))
zip.nbhd[[9]]=as.character(c(10023, 10024, 10025))
zip.nbhd[[10]]=as.character(c(10031, 10032, 10033, 10034, 10040))
## Load housing data
load("../output/count.RData")
load("../output/mh2009use.RData")
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
## Neighborhood name
output$text = renderText({"Selected:"})
output$text1 = renderText({
paste("{ ", man.nbhd[as.numeric(input$nbhd)+1], " }")
})
## Panel 1: summary plots of time trends, unit price and full price of sales.
output$distPlot <- renderPlot({
## First filter data for selected neighborhood
mh2009.sel=mh2009.use
if(input$nbhd>0){
mh2009.sel=mh2009.use%>%
filter(region %in% zip.nbhd[[as.numeric(input$nbhd)]])
}
## Monthly counts
month.v=as.vector(table(mh2009.sel$sale.month))
## Price: unit (per sq. ft.) and full
type.price=data.frame(bldg.type=c("10", "13", "25", "28"))
type.price.sel=mh2009.sel%>%
group_by(bldg.type)%>%
summarise(
price.mean=mean(sale.price, na.rm=T),
price.median=median(sale.price, na.rm=T),
unit.mean=mean(unit.price, na.rm=T),
unit.median=median(unit.price, na.rm=T),
sale.n=n()
)
type.price=left_join(type.price, type.price.sel, by="bldg.type")
## Making the plots
layout(matrix(c(1,1,1,1,2,2,3,3,2,2,3,3), 3, 4, byrow=T))
par(cex.axis=1.3, cex.lab=1.5,
font.axis=2, font.lab=2, col.axis="dark gray", bty="n")
### Sales monthly counts
plot(1:12, month.v, xlab="Months", ylab="Total sales",
type="b", pch=21, col="black", bg="red",
cex=2, lwd=2, ylim=c(0, max(month.v,na.rm=T)*1.05))
### Price per square foot
plot(c(0, max(type.price[,c(4,5)], na.rm=T)),
c(0,5),
xlab="Price per square foot", ylab="",
bty="l", type="n")
text(rep(0, 4), 1:4+0.5, paste(c("coops", "condos", "luxury hotels", "comm. condos"),
type.price$sale.n, sep=": "), adj=0, cex=1.5)
points(type.price$unit.mean, 1:nrow(type.price), pch=16, col=2, cex=2)
points(type.price$unit.median, 1:nrow(type.price), pch=16, col=4, cex=2)
segments(type.price$unit.mean, 1:nrow(type.price),
type.price$unit.median, 1:nrow(type.price),
lwd=2)
### full price
plot(c(0, max(type.price[,-1], na.rm=T)),
c(0,5),
xlab="Sales Price", ylab="",
bty="l", type="n")
text(rep(0, 4), 1:4+0.5, paste(c("coops", "condos", "luxury hotels", "comm. condos"),
type.price$sale.n, sep=": "), adj=0, cex=1.5)
points(type.price$price.mean, 1:nrow(type.price), pch=16, col=2, cex=2)
points(type.price$price.median, 1:nrow(type.price), pch=16, col=4, cex=2)
segments(type.price$price.mean, 1:nrow(type.price),
type.price$price.median, 1:nrow(type.price),
lwd=2)
})
## Panel 2: map of sales distribution
output$distPlot1 <- renderPlot({
count.df.sel=count.df
if(input$nbhd>0){
count.df.sel=count.df%>%
filter(region %in% zip.nbhd[[as.numeric(input$nbhd)]])
}
# make the map for selected neighhoods
zip_choropleth(count.df.sel,
title = "2009 Manhattan housing sales",
legend = "Number of sales",
county_zoom = 36061)
})
}) | /app/server.R | no_license | TZstatsADS/Fall2016-Proj2-grp13 | R | false | false | 4,510 | r | library(car)
library(shiny)
library(choroplethr)
library(choroplethrZip)
library(dplyr)
## Define Manhattan's neighborhood
man.nbhd=c("all neighborhoods", "Central Harlem", "Chelsea and Clinton",
"East Harlem", "Gramercy Park and Murray Hill",
"Greenwich Village and Soho", "Lower Manhattan",
"Lower East Side", "Upper East Side", "Upper West Side",
"Inwood and Washington Heights")
zip.nbhd=as.list(1:length(man.nbhd))
zip.nbhd[[1]]=as.character(c(10026, 10027, 10030, 10037, 10039))
zip.nbhd[[2]]=as.character(c(10001, 10011, 10018, 10019, 10020))
zip.nbhd[[3]]=as.character(c(10036, 10029, 10035))
zip.nbhd[[4]]=as.character(c(10010, 10016, 10017, 10022))
zip.nbhd[[5]]=as.character(c(10012, 10013, 10014))
zip.nbhd[[6]]=as.character(c(10004, 10005, 10006, 10007, 10038, 10280))
zip.nbhd[[7]]=as.character(c(10002, 10003, 10009))
zip.nbhd[[8]]=as.character(c(10021, 10028, 10044, 10065, 10075, 10128))
zip.nbhd[[9]]=as.character(c(10023, 10024, 10025))
zip.nbhd[[10]]=as.character(c(10031, 10032, 10033, 10034, 10040))
## Load housing data
load("../output/count.RData")
load("../output/mh2009use.RData")
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
## Neighborhood name
output$text = renderText({"Selected:"})
output$text1 = renderText({
paste("{ ", man.nbhd[as.numeric(input$nbhd)+1], " }")
})
## Panel 1: summary plots of time trends, unit price and full price of sales.
output$distPlot <- renderPlot({
## First filter data for selected neighborhood
mh2009.sel=mh2009.use
if(input$nbhd>0){
mh2009.sel=mh2009.use%>%
filter(region %in% zip.nbhd[[as.numeric(input$nbhd)]])
}
## Monthly counts
month.v=as.vector(table(mh2009.sel$sale.month))
## Price: unit (per sq. ft.) and full
type.price=data.frame(bldg.type=c("10", "13", "25", "28"))
type.price.sel=mh2009.sel%>%
group_by(bldg.type)%>%
summarise(
price.mean=mean(sale.price, na.rm=T),
price.median=median(sale.price, na.rm=T),
unit.mean=mean(unit.price, na.rm=T),
unit.median=median(unit.price, na.rm=T),
sale.n=n()
)
type.price=left_join(type.price, type.price.sel, by="bldg.type")
## Making the plots
layout(matrix(c(1,1,1,1,2,2,3,3,2,2,3,3), 3, 4, byrow=T))
par(cex.axis=1.3, cex.lab=1.5,
font.axis=2, font.lab=2, col.axis="dark gray", bty="n")
### Sales monthly counts
plot(1:12, month.v, xlab="Months", ylab="Total sales",
type="b", pch=21, col="black", bg="red",
cex=2, lwd=2, ylim=c(0, max(month.v,na.rm=T)*1.05))
### Price per square foot
plot(c(0, max(type.price[,c(4,5)], na.rm=T)),
c(0,5),
xlab="Price per square foot", ylab="",
bty="l", type="n")
text(rep(0, 4), 1:4+0.5, paste(c("coops", "condos", "luxury hotels", "comm. condos"),
type.price$sale.n, sep=": "), adj=0, cex=1.5)
points(type.price$unit.mean, 1:nrow(type.price), pch=16, col=2, cex=2)
points(type.price$unit.median, 1:nrow(type.price), pch=16, col=4, cex=2)
segments(type.price$unit.mean, 1:nrow(type.price),
type.price$unit.median, 1:nrow(type.price),
lwd=2)
### full price
plot(c(0, max(type.price[,-1], na.rm=T)),
c(0,5),
xlab="Sales Price", ylab="",
bty="l", type="n")
text(rep(0, 4), 1:4+0.5, paste(c("coops", "condos", "luxury hotels", "comm. condos"),
type.price$sale.n, sep=": "), adj=0, cex=1.5)
points(type.price$price.mean, 1:nrow(type.price), pch=16, col=2, cex=2)
points(type.price$price.median, 1:nrow(type.price), pch=16, col=4, cex=2)
segments(type.price$price.mean, 1:nrow(type.price),
type.price$price.median, 1:nrow(type.price),
lwd=2)
})
## Panel 2: map of sales distribution
output$distPlot1 <- renderPlot({
count.df.sel=count.df
if(input$nbhd>0){
count.df.sel=count.df%>%
filter(region %in% zip.nbhd[[as.numeric(input$nbhd)]])
}
# make the map for selected neighhoods
zip_choropleth(count.df.sel,
title = "2009 Manhattan housing sales",
legend = "Number of sales",
county_zoom = 36061)
})
}) |
/man/tuyauxorgue.Rd | no_license | cran/LeLogicielR | R | false | false | 1,407 | rd | ||
setwd('C:\\Users\\fzhan\\Documents\\GitHub\\Data Science Coursera\\data cleaning\\class 3')
#Q1.
if (!file.exists('housing.csv')){download.file('https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv','housing.csv')}
data=read.csv('housing.csv')
agricultureLogical<-(data$ACR==3) & (data$AGS==6)
q1 <- data[which(agricultureLogical),]
head(q1)
#Q2
library(jpeg)
d2 <- readJPEG('getdata%2Fjeff.jpg',native = TRUE) #can just read system file, so download firstly!
quantile(d2,probs = 0.3)
quantile(d2,probs = 0.8)
#Q3
if (!file.exists('gdp.csv')){download.file('https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv','gdp.csv')}
if (!file.exists('education.csv')){download.file('https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv','education.csv')}
g=read.csv('gdp.csv',na.strings='')
g=g[5:194,]
e=read.csv('education.csv',na.strings='')
m=merge(g,e,by.x = 'X',by.y = 'CountryCode',all=FALSE)
m$Gross.domestic.product.2012=as.numeric(as.character(m$Gross.domestic.product.2012))
m_ordered=m[order(m$Gross.domestic.product.2012,decreasing = TRUE),]
m_ordered[13,4]
#Q4
library(dplyr)
m%>%
group_by(Income.Group)%>%
summarize(mean(Gross.domestic.product.2012)) %>%
print
#Q5
library(Hmisc)
m$group<-cut2(m$Gross.domestic.product.2012,g=5)
table(m$Income.Group,m$group)
| /data cleaning/class 3/quiz3.R | no_license | luojijiaren/Data-Science-Coursera | R | false | false | 1,324 | r | setwd('C:\\Users\\fzhan\\Documents\\GitHub\\Data Science Coursera\\data cleaning\\class 3')
#Q1.
if (!file.exists('housing.csv')){download.file('https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2Fss06hid.csv','housing.csv')}
data=read.csv('housing.csv')
agricultureLogical<-(data$ACR==3) & (data$AGS==6)
q1 <- data[which(agricultureLogical),]
head(q1)
#Q2
library(jpeg)
d2 <- readJPEG('getdata%2Fjeff.jpg',native = TRUE) #can just read system file, so download firstly!
quantile(d2,probs = 0.3)
quantile(d2,probs = 0.8)
#Q3
if (!file.exists('gdp.csv')){download.file('https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FGDP.csv','gdp.csv')}
if (!file.exists('education.csv')){download.file('https://d396qusza40orc.cloudfront.net/getdata%2Fdata%2FEDSTATS_Country.csv','education.csv')}
g=read.csv('gdp.csv',na.strings='')
g=g[5:194,]
e=read.csv('education.csv',na.strings='')
m=merge(g,e,by.x = 'X',by.y = 'CountryCode',all=FALSE)
m$Gross.domestic.product.2012=as.numeric(as.character(m$Gross.domestic.product.2012))
m_ordered=m[order(m$Gross.domestic.product.2012,decreasing = TRUE),]
m_ordered[13,4]
#Q4
library(dplyr)
m%>%
group_by(Income.Group)%>%
summarize(mean(Gross.domestic.product.2012)) %>%
print
#Q5
library(Hmisc)
m$group<-cut2(m$Gross.domestic.product.2012,g=5)
table(m$Income.Group,m$group)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/diffusivity.R
\name{hydraulic_diffusivity}
\alias{hydraulic_diffusivity}
\alias{rigid_hydraulic_diffusivity}
\title{Calculate hydraulic diffusivity of a fluid saturated medium}
\usage{
hydraulic_diffusivity(k., eta., mu., B., nu. = 1/4, nuu. = 1/3)
rigid_hydraulic_diffusivity(k., eta., phi., Beta.)
}
\arguments{
\item{k.}{numeric; the permeability}
\item{eta.}{numeric; the XXX}
\item{mu.}{numeric; the shear modulus in Pascals}
\item{B.}{numeric; the xxx}
\item{nu.}{numeric; the drained Poisson's ratio (typically 1/4)}
\item{nuu.}{numeric; the 'undrained' Poisson's ratio (typically 1/3)}
\item{phi.}{numeric; the XXX}
\item{Beta.}{numeric; the XXX}
}
\description{
Calculate hydraulic diffusivity of a fluid saturated medium
}
| /man/hydraulic_diffusivity.Rd | no_license | abarbour/deform | R | false | true | 819 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/diffusivity.R
\name{hydraulic_diffusivity}
\alias{hydraulic_diffusivity}
\alias{rigid_hydraulic_diffusivity}
\title{Calculate hydraulic diffusivity of a fluid saturated medium}
\usage{
hydraulic_diffusivity(k., eta., mu., B., nu. = 1/4, nuu. = 1/3)
rigid_hydraulic_diffusivity(k., eta., phi., Beta.)
}
\arguments{
\item{k.}{numeric; the permeability}
\item{eta.}{numeric; the XXX}
\item{mu.}{numeric; the shear modulus in Pascals}
\item{B.}{numeric; the xxx}
\item{nu.}{numeric; the drained Poisson's ratio (typically 1/4)}
\item{nuu.}{numeric; the 'undrained' Poisson's ratio (typically 1/3)}
\item{phi.}{numeric; the XXX}
\item{Beta.}{numeric; the XXX}
}
\description{
Calculate hydraulic diffusivity of a fluid saturated medium
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/utils.R
\name{\%|\%}
\alias{\%|\%}
\title{Maybe do monad}
\usage{
f \%|\% g
}
\arguments{
\item{f}{left hand function}
\item{g}{right hand function}
}
\description{
Use operator to evaluate functions from left to right, breaking and
returning NA if any function fails or returns NA.
}
| /man/grapes-or-grapes.Rd | permissive | dslaw/r-stuff | R | false | false | 373 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/utils.R
\name{\%|\%}
\alias{\%|\%}
\title{Maybe do monad}
\usage{
f \%|\% g
}
\arguments{
\item{f}{left hand function}
\item{g}{right hand function}
}
\description{
Use operator to evaluate functions from left to right, breaking and
returning NA if any function fails or returns NA.
}
|
rawdata <- read.csv("New York State Children in Foster Care annually.csv")
devtools::install_github("UrbanInstitute/urbnmapr")
devtools::install_github("UI-Research/urbnthemes")
install.packages('devtools')
install.packages('ggplot2')
install.packages('scales')
library(scales)
library(tidyverse)
library(urbnthemes)
library(urbnmapr)
library(ggplot2)
#----------------------------------------------------------------------------------------------------
#Functions
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
#----------------------------------------------------------------------------------------------------
#Test given data
test1 <- countydata
test2 <- counties
#----------------------------------------------------------------------------------------------------
#Clean data to get correct "county_name" and only consider year "2017"
q <- which(rawdata$Year == "2017")
cleandat <- rawdata[q,]
cleandat$County <- tolower(cleandat$County)
cleandat$County <- trim(cleandat$County)
colnames(cleandat)[1] <- "county_name"
q <- which(counties$state_name == "New York")
NY_counties <- counties[q,]
NY_counties$county_name <- gsub("County", "", NY_counties$county_name)
NY_counties$county_name <- tolower(NY_counties$county_name)
NY_counties$county_name <- trim(NY_counties$county_name)
#Join the data and the counties data
my_data <- left_join(cleandat, NY_counties, by = "county_name")
#----------------------------------------------------------------------------------------------------
#Create New York State Visualization
cleandat %>%
left_join(NY_counties, by = "county_name") %>%
filter(state_name =="New York") %>%
ggplot(mapping = aes(long, lat, group = group, fill = Number.of.Children.Served)) +
geom_polygon(color = "#ffffff", size = .25) +
scale_fill_gradientn(labels = scales::number,
guide = guide_colorbar(title.position = "top")) +
coord_map(projection = "albers", lat0 = 39, lat1 = 45) +
theme(legend.title = element_text(),
legend.key.width = unit(.5, "in")) +
labs(fill = "Number of Children served") +
theme_urbn_map()
#----------------------------------------------------------------------------------------------------
#WORK ON SECOND DATA SET TO GET FUNDING DETAILS
funding_data <-read.csv("New York State Child Welfare and Community Services Programs.csv")
colnames(funding_data)[6] <- "county_name"
funding_data$county_name <- tolower(funding_data$county_name)
funding_data$county_name <-trim(funding_data$county_name)
library(data.table)
dt <- data.table(funding_data)
dt2 <- dt[,list(sumamount = sum(Funding.Level), freq = .N), by = c("county_name")]
dt2 %>%
left_join(NY_counties, by = "county_name") %>%
filter(state_name =="New York") %>%
ggplot(mapping = aes(long, lat, group = group, fill = sumamount)) +
geom_polygon(color = "#ffffff", size = .25) +
scale_fill_gradientn(labels = scales::number,
guide = guide_colorbar(title.position = "top")) +
coord_map(projection = "albers", lat0 = 39, lat1 = 45) +
theme(legend.title = element_text(), legend.key.width = unit(.5, "in")) +
labs(fill = "Total Funding received (USD)") +
theme_urbn_map()
#----------------------------------------------------------------------------------------------------
#Income by County
incomedat <-read.csv("New York State Income by County.csv")
colnames(incomedat)[2] <- "county_name"
incomedat$county_name <- tolower(incomedat$county_name)
incomedat$county_name <-trim(incomedat$county_name)
incomedat$median.family.income <- as.numeric(gsub("[\\$,]", "", incomedat$median.family.income))
incomedat$per.capita.income <- as.numeric(gsub("[\\$,]", "", incomedat$per.capita.income))
incomedat %>%
left_join(NY_counties, by = "county_name") %>%
filter(state_name =="New York") %>%
ggplot(mapping = aes(long, lat, group = group, fill = per.capita.income)) +
geom_polygon(color = "#ffffff", size = .25) +
scale_fill_gradientn(labels = scales::number,
guide = guide_colorbar(title.position = "top")) +
coord_map(projection = "albers", lat0 = 39, lat1 = 45) +
theme(legend.title = element_text(),
legend.key.width = unit(.5, "in")) +
labs(fill = "Median Family Income (USD)") +
theme_urbn_map()
uniqueNY$c <- unique(NY_counties$county_name)
#----------------------------------------------------------------------------------------------------
#TEST CODE
q1 <- which(cleandat$county_name == "albany")
X <- cleandat[q1,] | /project1/Saxena-project1.R | permissive | m-vrhy/cosc4931-socialethicalimp-fa18 | R | false | false | 4,598 | r | rawdata <- read.csv("New York State Children in Foster Care annually.csv")
devtools::install_github("UrbanInstitute/urbnmapr")
devtools::install_github("UI-Research/urbnthemes")
install.packages('devtools')
install.packages('ggplot2')
install.packages('scales')
library(scales)
library(tidyverse)
library(urbnthemes)
library(urbnmapr)
library(ggplot2)
#----------------------------------------------------------------------------------------------------
#Functions
trim <- function (x) gsub("^\\s+|\\s+$", "", x)
#----------------------------------------------------------------------------------------------------
#Test given data
test1 <- countydata
test2 <- counties
#----------------------------------------------------------------------------------------------------
#Clean data to get correct "county_name" and only consider year "2017"
q <- which(rawdata$Year == "2017")
cleandat <- rawdata[q,]
cleandat$County <- tolower(cleandat$County)
cleandat$County <- trim(cleandat$County)
colnames(cleandat)[1] <- "county_name"
q <- which(counties$state_name == "New York")
NY_counties <- counties[q,]
NY_counties$county_name <- gsub("County", "", NY_counties$county_name)
NY_counties$county_name <- tolower(NY_counties$county_name)
NY_counties$county_name <- trim(NY_counties$county_name)
#Join the data and the counties data
my_data <- left_join(cleandat, NY_counties, by = "county_name")
#----------------------------------------------------------------------------------------------------
#Create New York State Visualization
cleandat %>%
left_join(NY_counties, by = "county_name") %>%
filter(state_name =="New York") %>%
ggplot(mapping = aes(long, lat, group = group, fill = Number.of.Children.Served)) +
geom_polygon(color = "#ffffff", size = .25) +
scale_fill_gradientn(labels = scales::number,
guide = guide_colorbar(title.position = "top")) +
coord_map(projection = "albers", lat0 = 39, lat1 = 45) +
theme(legend.title = element_text(),
legend.key.width = unit(.5, "in")) +
labs(fill = "Number of Children served") +
theme_urbn_map()
#----------------------------------------------------------------------------------------------------
#WORK ON SECOND DATA SET TO GET FUNDING DETAILS
funding_data <-read.csv("New York State Child Welfare and Community Services Programs.csv")
colnames(funding_data)[6] <- "county_name"
funding_data$county_name <- tolower(funding_data$county_name)
funding_data$county_name <-trim(funding_data$county_name)
library(data.table)
dt <- data.table(funding_data)
dt2 <- dt[,list(sumamount = sum(Funding.Level), freq = .N), by = c("county_name")]
dt2 %>%
left_join(NY_counties, by = "county_name") %>%
filter(state_name =="New York") %>%
ggplot(mapping = aes(long, lat, group = group, fill = sumamount)) +
geom_polygon(color = "#ffffff", size = .25) +
scale_fill_gradientn(labels = scales::number,
guide = guide_colorbar(title.position = "top")) +
coord_map(projection = "albers", lat0 = 39, lat1 = 45) +
theme(legend.title = element_text(), legend.key.width = unit(.5, "in")) +
labs(fill = "Total Funding received (USD)") +
theme_urbn_map()
#----------------------------------------------------------------------------------------------------
#Income by County
incomedat <-read.csv("New York State Income by County.csv")
colnames(incomedat)[2] <- "county_name"
incomedat$county_name <- tolower(incomedat$county_name)
incomedat$county_name <-trim(incomedat$county_name)
incomedat$median.family.income <- as.numeric(gsub("[\\$,]", "", incomedat$median.family.income))
incomedat$per.capita.income <- as.numeric(gsub("[\\$,]", "", incomedat$per.capita.income))
incomedat %>%
left_join(NY_counties, by = "county_name") %>%
filter(state_name =="New York") %>%
ggplot(mapping = aes(long, lat, group = group, fill = per.capita.income)) +
geom_polygon(color = "#ffffff", size = .25) +
scale_fill_gradientn(labels = scales::number,
guide = guide_colorbar(title.position = "top")) +
coord_map(projection = "albers", lat0 = 39, lat1 = 45) +
theme(legend.title = element_text(),
legend.key.width = unit(.5, "in")) +
labs(fill = "Median Family Income (USD)") +
theme_urbn_map()
uniqueNY$c <- unique(NY_counties$county_name)
#----------------------------------------------------------------------------------------------------
#TEST CODE
q1 <- which(cleandat$county_name == "albany")
X <- cleandat[q1,] |
data <- read.table("c:/Users/Owner/household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSettingData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(subSettingData$Date, subSettingData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
householdGlobalActivePower <- as.numeric(subSettingData$Global_active_power)
subMetering1 <- as.numeric(subSettingData$Sub_metering_1)
subMetering2 <- as.numeric(subSettingData$Sub_metering_2)
subMetering3 <- as.numeric(subSettingData$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off() | /plot3.R | no_license | SharvariPawar/ExData_Plotting1 | R | false | false | 895 | r |
data <- read.table("c:/Users/Owner/household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subSettingData <- data[data$Date %in% c("1/2/2007","2/2/2007") ,]
datetime <- strptime(paste(subSettingData$Date, subSettingData$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
householdGlobalActivePower <- as.numeric(subSettingData$Global_active_power)
subMetering1 <- as.numeric(subSettingData$Sub_metering_1)
subMetering2 <- as.numeric(subSettingData$Sub_metering_2)
subMetering3 <- as.numeric(subSettingData$Sub_metering_3)
png("plot3.png", width=480, height=480)
plot(datetime, subMetering1, type="l", ylab="Energy Submetering", xlab="")
lines(datetime, subMetering2, type="l", col="red")
lines(datetime, subMetering3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue"))
dev.off() |
rm(list=ls())
library(igraph)
a<-load(file="Data/Raw Data Objects.RData")
display.contexts<-c("Gear","Weather","Location","Activity","T. Bycatch", "Tech.", "Regs.","Finance","Capt.")
short.contexts<-short.names[4:12]
short.contexts.t1<-c("turtle",short.contexts[!short.contexts=="turtle"])
display.contexts.t1<-c("T. Bycatch",display.contexts[!display.contexts=="T. Bycatch"])
res.mat<-sapply(resp.just.infos,function(a)sapply(resp.just.infos,function(b)cor(as.numeric(a),as.numeric(b))))
#create overall results matrix
short.contexts<-short.names[4:12]
rownames(res.mat)<-colnames(res.mat)<-short.contexts
res.info.type.cors<-res.mat
resp.am.a.blank<-matrix(0,nrow(resp.just.infos[[1]]),ncol(resp.just.infos[[1]]),dimnames=list(rownames(resp.just.infos[[1]]),colnames(resp.just.infos[[1]]))) #am.a is going to be all individuals
resp.am.a.any<-resp.am.a.blank #if they were simply named as a nominee or not (directed matrix)
resp.am.a.any[cbind(resp.dat$resp.id.c,resp.dat$noms.id.c)]<-1
res.info.any.cors<-sapply(resp.just.infos,function(a)cor(as.numeric(a),as.numeric(resp.am.a.any)))
names(res.info.any.cors)<-short.contexts
for.any.cor.obs<-res.info.any.cors
any.cor.obs<-for.any.cor.obs[short.contexts.t1]
###################################################
cols.d<-"black"
dev.new(height=5.5,width=8.7);par(mar=c(4,4,1,0.2))
plot(any.cor.obs~c(1:length(any.cor.obs)),xlim=c(0.5,length(any.cor.obs)+0.5),ylim=range(0.75,1),cex=2,pch=16,col=cols.d,xaxt="n",main="",xlab="",ylab="",type="n",bty="n") #,yaxt="n"
axis(side=1,at=c(1:length(any.cor.obs)),labels=display.contexts.t1,tcl=-0.4,cex.axis=1.0)
#axis(side=2,at=seq(0,0.7,by=0.1),mgp=c(1.5, 0.6, 0),las=1,tcl=-0.4)
mtext("Any Nomination Network Corr.",side=2,line=2.2,adj=0.5,outer=F) #whatever the y axis is
#Add bootstrap
res.info.any.cors.bs<-sapply(resp.just.infos,function(a)bcor(x=as.numeric(a),y=as.numeric(resp.am.a.any),1000,F))
colnames(res.info.any.cors.bs)<-short.contexts
for.any.cor.obs.bs<-res.info.any.cors.bs
any.cor.obs.bs<-for.any.cor.obs.bs[,short.contexts.t1]
uq<-apply(any.cor.obs.bs,2,quantile,probs=0.975)
lq<-apply(any.cor.obs.bs,2,quantile,probs=0.025)
segments(1:length(any.cor.obs),uq,1:length(any.cor.obs),lq,col="darkgrey")
#add points
points(any.cor.obs,pch=20,col="red",cex=1.5) | /Figure S7 - Correlation with All.R | no_license | JoshFirth/bycatch_information_flow | R | false | false | 2,281 | r |
rm(list=ls())
library(igraph)
a<-load(file="Data/Raw Data Objects.RData")
display.contexts<-c("Gear","Weather","Location","Activity","T. Bycatch", "Tech.", "Regs.","Finance","Capt.")
short.contexts<-short.names[4:12]
short.contexts.t1<-c("turtle",short.contexts[!short.contexts=="turtle"])
display.contexts.t1<-c("T. Bycatch",display.contexts[!display.contexts=="T. Bycatch"])
res.mat<-sapply(resp.just.infos,function(a)sapply(resp.just.infos,function(b)cor(as.numeric(a),as.numeric(b))))
#create overall results matrix
short.contexts<-short.names[4:12]
rownames(res.mat)<-colnames(res.mat)<-short.contexts
res.info.type.cors<-res.mat
resp.am.a.blank<-matrix(0,nrow(resp.just.infos[[1]]),ncol(resp.just.infos[[1]]),dimnames=list(rownames(resp.just.infos[[1]]),colnames(resp.just.infos[[1]]))) #am.a is going to be all individuals
resp.am.a.any<-resp.am.a.blank #if they were simply named as a nominee or not (directed matrix)
resp.am.a.any[cbind(resp.dat$resp.id.c,resp.dat$noms.id.c)]<-1
res.info.any.cors<-sapply(resp.just.infos,function(a)cor(as.numeric(a),as.numeric(resp.am.a.any)))
names(res.info.any.cors)<-short.contexts
for.any.cor.obs<-res.info.any.cors
any.cor.obs<-for.any.cor.obs[short.contexts.t1]
###################################################
cols.d<-"black"
dev.new(height=5.5,width=8.7);par(mar=c(4,4,1,0.2))
plot(any.cor.obs~c(1:length(any.cor.obs)),xlim=c(0.5,length(any.cor.obs)+0.5),ylim=range(0.75,1),cex=2,pch=16,col=cols.d,xaxt="n",main="",xlab="",ylab="",type="n",bty="n") #,yaxt="n"
axis(side=1,at=c(1:length(any.cor.obs)),labels=display.contexts.t1,tcl=-0.4,cex.axis=1.0)
#axis(side=2,at=seq(0,0.7,by=0.1),mgp=c(1.5, 0.6, 0),las=1,tcl=-0.4)
mtext("Any Nomination Network Corr.",side=2,line=2.2,adj=0.5,outer=F) #whatever the y axis is
#Add bootstrap
res.info.any.cors.bs<-sapply(resp.just.infos,function(a)bcor(x=as.numeric(a),y=as.numeric(resp.am.a.any),1000,F))
colnames(res.info.any.cors.bs)<-short.contexts
for.any.cor.obs.bs<-res.info.any.cors.bs
any.cor.obs.bs<-for.any.cor.obs.bs[,short.contexts.t1]
uq<-apply(any.cor.obs.bs,2,quantile,probs=0.975)
lq<-apply(any.cor.obs.bs,2,quantile,probs=0.025)
segments(1:length(any.cor.obs),uq,1:length(any.cor.obs),lq,col="darkgrey")
#add points
points(any.cor.obs,pch=20,col="red",cex=1.5) |
#' @title Predict GMDH Twice-Multilayered Combinatorial
#'
#' @description Calculates GMDH Twice-Multilayered Combinatorial model predictions for new data.
#'
#' @param object an object of class 'combitwice'
#' @param newdata matrix containing dependent variables in the model, wich the predictions are calculated.
#' @param ... other undocumented arguments
#'
#' @return A matrix with predictions.
#'
#' @examples
#' set.seed(123)
#' x <- matrix(data = c(rnorm(1050)), ncol = 3, nrow = 350)
#' colnames(x) <- c("a", "b", "c")
#' y <- matrix(data = c(10 + x[, "a"] + x[, "b"]^2 + x[, "c"]^3), ncol = 1)
#' colnames(y) <- "y"
#' x.test <- x[1:10, ]
#' y.test <- y[1:10]
#' x <- x[-c(1:10), ]
#' y <- y[-c(1:10)]
#'
#' mod <- gmdh.combi.twice(X = x, y = y, criteria = "PRESS")
#' pred <- predict(mod, x.test)
#' summary(sqrt((pred - y.test)^2))
#'
#' @export
#'
predict.combitwice <- function(object, newdata, ...) {
try(na.fail(newdata))
G <- object$G
object <- object$results
n <- length(object) - 2
for (i in 1:n) {
message(paste("Estimando capa ", i, sep = ""))
datos <- vector(mode = "list", length = length(object[[i]]))
newdata <- fun.poly(newdata, G = G)
for(j in 1:length(datos)) datos[[j]] <- predict.svd(object[[i]][[j]], newdata)
newdata <- matrix(data = unlist(datos), ncol = length(datos))
colnames(newdata) <- names(object[[i]])
rm(datos)
}
message(paste("Estimando capa ", i + 1, sep = ""))
CV <- which.min(unlist(lapply(object[[i]], function(x){x$CV})))
newdata <- fun.poly(newdata, G = G)
return(predict(object[[i + 1]][[CV]], newdata))
}
| /R/predict.combitwice.R | no_license | cran/GMDHreg | R | false | false | 1,612 | r | #' @title Predict GMDH Twice-Multilayered Combinatorial
#'
#' @description Calculates GMDH Twice-Multilayered Combinatorial model predictions for new data.
#'
#' @param object an object of class 'combitwice'
#' @param newdata matrix containing dependent variables in the model, wich the predictions are calculated.
#' @param ... other undocumented arguments
#'
#' @return A matrix with predictions.
#'
#' @examples
#' set.seed(123)
#' x <- matrix(data = c(rnorm(1050)), ncol = 3, nrow = 350)
#' colnames(x) <- c("a", "b", "c")
#' y <- matrix(data = c(10 + x[, "a"] + x[, "b"]^2 + x[, "c"]^3), ncol = 1)
#' colnames(y) <- "y"
#' x.test <- x[1:10, ]
#' y.test <- y[1:10]
#' x <- x[-c(1:10), ]
#' y <- y[-c(1:10)]
#'
#' mod <- gmdh.combi.twice(X = x, y = y, criteria = "PRESS")
#' pred <- predict(mod, x.test)
#' summary(sqrt((pred - y.test)^2))
#'
#' @export
#'
predict.combitwice <- function(object, newdata, ...) {
try(na.fail(newdata))
G <- object$G
object <- object$results
n <- length(object) - 2
for (i in 1:n) {
message(paste("Estimando capa ", i, sep = ""))
datos <- vector(mode = "list", length = length(object[[i]]))
newdata <- fun.poly(newdata, G = G)
for(j in 1:length(datos)) datos[[j]] <- predict.svd(object[[i]][[j]], newdata)
newdata <- matrix(data = unlist(datos), ncol = length(datos))
colnames(newdata) <- names(object[[i]])
rm(datos)
}
message(paste("Estimando capa ", i + 1, sep = ""))
CV <- which.min(unlist(lapply(object[[i]], function(x){x$CV})))
newdata <- fun.poly(newdata, G = G)
return(predict(object[[i + 1]][[CV]], newdata))
}
|
#' Create a WebVR widget
#'
#' This function creates a WebVR widget using \pkg{htmlwidgets}.
#' It should work out of the box with RMarkdown, Shiny, or as a standalone call.
#' Note that the rendering device must support the WebVR spec or it will fail.
#'
#' @param width The width of the viewport prior to entering vr mode
#' @param height The height of the viewport prior to entering vr mode
#'
#' @import htmlwidgets
#'
#' @export
webvrr <- function(
width = NULL,
height = NULL
) {
htmlwidgets::createWidget(
name = "webvrr",
x = list(
calls = list()
),
width = width,
height = height
)
}
#' Add a background to a WebVR widget.
#' For drawing a skybox sphere, use \code{\link{addInnerTexturedSphere}}
#'
#' @param vr A webvr widget. See \code{\link{webvrr}}
#' @param backgroundType one of "color", "texture", or "cube"
#' @param backgroundColor Hex or numeric color representation. Required if backgroundType is "color".
#' The function call tolerates string and hexmode representations.
#' @param texture a string representing the texture in a base64 data URI or a URL. Required if
#' backgroundType is "texture".
#' @param shouldRepeat should the texture repeat?
#' @param repeatX repeat density over X portion of mesh
#' @param repeatY repeat density over Y portion of mesh
#' @param cube a list of 6 strings representing base64 data URIs or URLs corresponding to a cube map.
#' required if backgroundType is "cube".
#' @export
addBackground <- function(
vr,
backgroundType = c("color", "texture", "cube"),
backgroundColor = NULL,
texture = NULL,
shouldRepeat = FALSE,
repeatX = NULL,
repeatY = NULL,
cube = list()
) {
if (backgroundType == "color") {
if(is.character(backgroundColor)) backgroundColor <- as.hexmode(backgroundColor)
stopifnot(is.numeric(backgroundColor))
vr$x$background <- list(
type = backgroundType,
color = backgroundColor
)
} else if (backgroundType == "texture") {
stopifnot(is.character(texture))
if (shouldRepeat) {
stopifnot(is.numeric(repeatX) && is.numeric(repeatY))
vr$x$background <- list(
type = backgroundType,
texture = texture,
`repeat` = list(
repeatX = repeatX,
repeatY = repeatY
)
)
} else {
vr$x$background <- list(
type = backgroundType,
texture = texture,
`repeat` = FALSE
)
}
} else if (backgroundType == "cube") {
stopifnot(length(cube) == 6)
vr$x$background <- list(
type = backgroundType,
cube = cube
)
} else {
stop("Unknown backgroundType")
}
vr
}
#' Add an arbitrary object to the scene
#' Internal.
#'
#' @param vr a webvrr widget
#' @param call a specially crafted call object
addToScene <- function(
vr,
call = list()
) {
vr$x$calls <- lapply(1:(length(vr$x$calls)+1), function(y) {
if(y > length(vr$x$calls)) call
else vr$x$calls[[y]]
})
vr
}
#' Puts the viewer in a big sphere with a texture painted on the inside.
#' See https://threejs.org/docs/#api/geometries/SphereGeometry
#'
#' @param vr a webvrr widget. See \code{\link{webvrr}}
#' @param texture a string representing a texture in base64 data URI format or a URL.
#' if not provided, a wireframe will be used
#' @param repeat If false, do not repeat. Otherwise, should be a list in the following
#' format: \code{list( repeatX = 32, repeatY = 32)}. Replace 32 with your
#' preferred repeats.
#' @param radius sphere radius.
#' @param widthSegments how many width segments to draw
#' @param heightSegments how many height segments to draw
#' @export
addInnerTexturedSphere <- function(
vr,
texture = NULL,
`repeat`= list(
repeatX = 32,
repeatY = 32
),
radius = 5,
widthSegments = 32,
heightSegments = 32
) {
if(`repeat` != FALSE) stopifnot(is.list(`repeat`))
addToScene(vr, list(
type = "innerTexturedSphere",
texture = texture,
`repeat` = `repeat`,
radius = radius,
widthSegments = widthSegments,
heightSegments = heightSegments
))
}
| /R/webvrr.R | no_license | scottmmjackson/webvrr | R | false | false | 4,139 | r | #' Create a WebVR widget
#'
#' This function creates a WebVR widget using \pkg{htmlwidgets}.
#' It should work out of the box with RMarkdown, Shiny, or as a standalone call.
#' Note that the rendering device must support the WebVR spec or it will fail.
#'
#' @param width The width of the viewport prior to entering vr mode
#' @param height The height of the viewport prior to entering vr mode
#'
#' @import htmlwidgets
#'
#' @export
webvrr <- function(
width = NULL,
height = NULL
) {
htmlwidgets::createWidget(
name = "webvrr",
x = list(
calls = list()
),
width = width,
height = height
)
}
#' Add a background to a WebVR widget.
#' For drawing a skybox sphere, use \code{\link{addInnerTexturedSphere}}
#'
#' @param vr A webvr widget. See \code{\link{webvrr}}
#' @param backgroundType one of "color", "texture", or "cube"
#' @param backgroundColor Hex or numeric color representation. Required if backgroundType is "color".
#' The function call tolerates string and hexmode representations.
#' @param texture a string representing the texture in a base64 data URI or a URL. Required if
#' backgroundType is "texture".
#' @param shouldRepeat should the texture repeat?
#' @param repeatX repeat density over X portion of mesh
#' @param repeatY repeat density over Y portion of mesh
#' @param cube a list of 6 strings representing base64 data URIs or URLs corresponding to a cube map.
#' required if backgroundType is "cube".
#' @export
addBackground <- function(
vr,
backgroundType = c("color", "texture", "cube"),
backgroundColor = NULL,
texture = NULL,
shouldRepeat = FALSE,
repeatX = NULL,
repeatY = NULL,
cube = list()
) {
if (backgroundType == "color") {
if(is.character(backgroundColor)) backgroundColor <- as.hexmode(backgroundColor)
stopifnot(is.numeric(backgroundColor))
vr$x$background <- list(
type = backgroundType,
color = backgroundColor
)
} else if (backgroundType == "texture") {
stopifnot(is.character(texture))
if (shouldRepeat) {
stopifnot(is.numeric(repeatX) && is.numeric(repeatY))
vr$x$background <- list(
type = backgroundType,
texture = texture,
`repeat` = list(
repeatX = repeatX,
repeatY = repeatY
)
)
} else {
vr$x$background <- list(
type = backgroundType,
texture = texture,
`repeat` = FALSE
)
}
} else if (backgroundType == "cube") {
stopifnot(length(cube) == 6)
vr$x$background <- list(
type = backgroundType,
cube = cube
)
} else {
stop("Unknown backgroundType")
}
vr
}
#' Add an arbitrary object to the scene
#' Internal.
#'
#' @param vr a webvrr widget
#' @param call a specially crafted call object
addToScene <- function(
vr,
call = list()
) {
vr$x$calls <- lapply(1:(length(vr$x$calls)+1), function(y) {
if(y > length(vr$x$calls)) call
else vr$x$calls[[y]]
})
vr
}
#' Puts the viewer in a big sphere with a texture painted on the inside.
#' See https://threejs.org/docs/#api/geometries/SphereGeometry
#'
#' @param vr a webvrr widget. See \code{\link{webvrr}}
#' @param texture a string representing a texture in base64 data URI format or a URL.
#' if not provided, a wireframe will be used
#' @param repeat If false, do not repeat. Otherwise, should be a list in the following
#' format: \code{list( repeatX = 32, repeatY = 32)}. Replace 32 with your
#' preferred repeats.
#' @param radius sphere radius.
#' @param widthSegments how many width segments to draw
#' @param heightSegments how many height segments to draw
#' @export
addInnerTexturedSphere <- function(
vr,
texture = NULL,
`repeat`= list(
repeatX = 32,
repeatY = 32
),
radius = 5,
widthSegments = 32,
heightSegments = 32
) {
if(`repeat` != FALSE) stopifnot(is.list(`repeat`))
addToScene(vr, list(
type = "innerTexturedSphere",
texture = texture,
`repeat` = `repeat`,
radius = radius,
widthSegments = widthSegments,
heightSegments = heightSegments
))
}
|
library(shiny)
source("lib/libraries.r", encoding = "UTF-8")
source("uvoz/uvoz.r", encoding = "UTF-8")
runApp("shiny")
| /shiny.r | permissive | Anchiqua/APPR-2015-16 | R | false | false | 123 | r | library(shiny)
source("lib/libraries.r", encoding = "UTF-8")
source("uvoz/uvoz.r", encoding = "UTF-8")
runApp("shiny")
|
library(pacman)
p_load(devtools, roxygen2, readr)
# update datasets ---------------------------------------------------------
VES_general = read_rds("../data/VES_general.rds")
use_data(VES_general, overwrite = T)
# update package ----------------------------------------------------------
document()
install("../VES.datasets")
| /build.R | no_license | Deleetdk/VES.datasets | R | false | false | 332 | r | library(pacman)
p_load(devtools, roxygen2, readr)
# update datasets ---------------------------------------------------------
VES_general = read_rds("../data/VES_general.rds")
use_data(VES_general, overwrite = T)
# update package ----------------------------------------------------------
document()
install("../VES.datasets")
|
# multi-armed bandit simulator
#
# 3 machines, each return the same ammount, but with different chances of winning
# you get to play 100 times, try and maximise the number of wins
# assume we use a strategy of
# a) playing each machine k times,
# b) choosing a machine based on estimates of the winning probabilities,
# c) using that machine for our remaining plays.
# what is the best choice of k?
mab <- function(k) {
p <- runif(3)
phat <- sapply(p, function(x) rbinom(1, k, x))
i <- which.max(phat)
return(sum(phat) + rbinom(1, 100-3*k, p[i]))
}
nrep <- 10000
for (k in 1:20) {
cat("k =", k, "mean return =", mean(replicate(nrep, mab(k))), "\n")
} | /mab_sim2.r | no_license | sg-first/Multi-armed-Bandits | R | false | false | 679 | r | # multi-armed bandit simulator
#
# 3 machines, each return the same ammount, but with different chances of winning
# you get to play 100 times, try and maximise the number of wins
# assume we use a strategy of
# a) playing each machine k times,
# b) choosing a machine based on estimates of the winning probabilities,
# c) using that machine for our remaining plays.
# what is the best choice of k?
mab <- function(k) {
p <- runif(3)
phat <- sapply(p, function(x) rbinom(1, k, x))
i <- which.max(phat)
return(sum(phat) + rbinom(1, 100-3*k, p[i]))
}
nrep <- 10000
for (k in 1:20) {
cat("k =", k, "mean return =", mean(replicate(nrep, mab(k))), "\n")
} |
##################################################
## Project: SRC Generation
## Script purpose: Generate Paper figures
## Date: 01-06-2020
## Author: Mike Johnson
##################################################
library(ggplot2)
library(ggridges)
library(AOI)
library(dplyr)
library(sf)
# Helpful functions -------------------------------------------------------
mapTheme <- function() {
theme_void() +
theme(
text = element_text(size = 7),
plot.title = element_text(size = 14, color = "black", hjust = 0, vjust = 2, face = "bold"),
plot.subtitle = element_text(size = 8, color = "black", hjust = 0, vjust = 0),
axis.ticks = element_blank(),
legend.direction = "vertical",
legend.position = "right",
plot.margin = margin(1, 1, 1, 1, 'cm'),
legend.key.height = unit(1.5, "cm"), legend.key.width = unit(0.4, "cm")
)
}
rank = function(x){
y = rep(NA, length(x))
y[between(x, 0, 15)] = 1
y[between(x, 15, 30)] = 2
y[between(x, 30, 60)] = 3
y[between(x, 60, 100)] = 4
y[between(x, 100, 250)] = 5
y[between(x, 250, 25e100000)] = 6
y
}
prep_sf = function(input){
usgs_filter %>%
merge(input, by = 'siteID') %>%
dplyr::select(siteID, lon, lat,COMID = COMID.x, nrmse = nrmse, order = order ) %>%
mutate(error = rank(nrmse)) %>%
st_as_sf(coords = c("lon", 'lat'), crs = '+proj=longlat +datum=WGS84')
}
# External Data (not checked in) ------------------------------------------
load('/Users/mikejohnson/Documents/GitHub/hand_improvements/data/usgs_filter.rda')
files = list.files('/Users/mikejohnson/Documents/GitHub/hand_improvements/output/', full.names = T)
tmp = list()
for(i in 1:length(files)){
load(files[i])
tmp[[i]] = o
}
raw = bind_rows(tmp)
usgs_table = filter(raw, type == "table") %>% prep_sf()
usgs_lc = filter(raw, type == "nlcd") %>% prep_sf()
usgs_so = filter(raw, type == "lm") %>% prep_sf()
usgs_composite = filter(raw, type == "combo") %>% prep_sf()
usgs_catchment = filter(raw, type == "catchment") %>% prep_sf()
usgs_base = filter(raw, type == "base") %>% prep_sf()
conus = AOI::aoi_get(state = "conus") %>% st_transform(5070)
outline = sf::st_union(conus)
all = list(
Composite = usgs_composite,
`land cover` = usgs_lc,
`Stream Order Composite` = usgs_table,
Default = usgs_base,
Catchment = usgs_catchment
)
# Figure 2 ----------------------------------------------------------------
to_plot = raw %>%
filter(type %in% c("base", "nlcd", "catchment", 'combo', "table")) %>%
mutate(type = factor(type,levels=levels(as.factor(type))[c(1,4,2,3,5)]))
out = to_plot %>% group_by(type) %>% summarize(count = sum(nrmse > 250, na.rm = T))
png(file="/Users/mikejohnson/Documents/GitHub/SRCgeneration/imgs/methods_boxplot.png",height=12, width = 12, units = 'in', res=300)
par(mar = c(5,6,3,3))
boxplot(nrmse~type, data = to_plot,
names = c("Default", "Land\nCover", "Catchment", "Composite", "Stream Order\nComposite"),frame.plot = FALSE,
cex.axis = .7,
outline = F,
ylim = c(0,500),
xlab = "Method",
pch =16,
cex =.2,
ylab = "Normalized Root Mean Square Error \n(nRMSE)",
main = paste0("SRC Comparision by Method: " , length(unique(to_plot$COMID)), " Stations"),
notch = T,
lwd = .4,
horizontal = F,
las = 1,
col = c("#95D1C5", '#95D1C5',"#E3D5B8", '#E3D5B8', '#95D1C5'),
border = 'gray20')
abline(h = 250)
for(i in 1:5){ text(paste0('\u2191 ', out$count[i]), x = i, y = 260) }
legend("topright", inset=.02, title="Method Type",
c("Extendable","Non-extendable"), fill=c('#95D1C5',"#E3D5B8"), horiz=F, cex=1.2)
dev.off()
# Figure 3 ----------------------------------------------------------------
## PANEL A
for(i in 1:5){
p = st_transform(all[[i]], 5070) %>% filter(!is.na(error))
lab = paste0(c("0-15", "15-30", "30-60", "60-100", "100-250", ">250"), " (", table(p$error),")" )
g = ggplot() +
geom_sf(data = conus, fill = 'black', color = "gray70", lwd = 3) +
geom_sf(data = conus, fill = "black", color = "black") +
geom_sf(data = conus, fill = 'black', color = "gray80", lwd = .1) +
scale_colour_brewer("nRMSE (%)", palette = "RdYlGn",
labels = lab, direction = -1) +
scale_fill_brewer("nRMSE (%)", palette = "RdYlGn",
labels = lab, direction = -1) +
geom_sf(data = p, aes(fill = as.factor(error), color = as.factor(error)),
size = .07) +
labs(title = names(all)[i]) +
mapTheme()
ggsave(g, filename = paste0("/Users/mikejohnson/Documents/GitHub/SRCgeneration/imgs/map_", names(all)[i], "_method.png"), width = 9, height = 6, dpi = 300)
}
## PANEL B
composite$nrmse[composite$nrmse < 250] %>% median(na.rm = T)
tab = usgs_composite %>% st_transform(5070)
mer = st_intersection(tab, conus) %>%
group_by(state_abbr) %>%
summarise(m = median(nrmse, na.rm = TRUE), n= n()) %>%
ungroup() %>%
st_drop_geometry()
tab2 = usgs_table %>% st_transform(5070)
mer2 = st_intersection(tab2, conus) %>%
group_by(state_abbr) %>%
summarise(m = median(nrmse, na.rm = TRUE), n= n()) %>%
ungroup() %>%
st_drop_geometry()
tab_sp2 = merge(conus, mer2)
sorted = tab_sp$state_abbr[order(tab_sp$m)]
vals = tab_sp$m[order(tab_sp$m)]
sta = list(sorted[1], sorted[25], sorted[48])
val = list(vals[1], vals[25], vals[48])
cond = paste(c("Best", "Average", "Worst"), "Performing:\n ")
subs = aoi_get(state = unlist(sta))
### PART 1
g1 = ggplot() +
geom_sf(data = tab_sp, aes(fill = m/100), color = NA, lwd = .25) +
geom_sf(data = st_union(conus),
fill = NA, color = 'black', lwd = 1.1) +
#geom_sf(data = subs, fill = NA,color = 'gray70', lwd = 3) +
#geom_sf(data = subs, fill = NA,color = 'black', lwd = 1) +
scale_fill_gradient2(position="bottom" , low = "blue", mid = scales::muted("blue"), high = "darkred",
midpoint = .3) +
mapTheme() +
theme(legend.position = 'none')
g2 = ggplot() +
geom_sf(data = tab_sp2, aes(fill = m/100), color = NA, lwd = .25) +
geom_sf(data = st_union(conus),
fill = NA, color = 'black', lwd = 1.1) +
#geom_sf(data = subs, fill = NA,color = 'gray70', lwd = 3) +
#geom_sf(data = subs, fill = NA,color = 'black', lwd = 1) +
scale_fill_gradient2(position="bottom" , low = "blue", mid = scales::muted("blue"), high = "darkred",
midpoint = .3) +
mapTheme()
library(patchwork)
g1+g2
ggsave(file = paste0("/Users/mikejohnson/Documents/GitHub/SRCgeneration/imgs//state_map_soc.png"), height = 12, width = 18, dpi = 300)
### PART 2
for(i in 1:length(sta)){
fl = AOI::aoi_get(state = sta[[i]]) %>% st_transform(5070)
flc = AOI::aoi_get(state = sta[[i]], county = 'all') %>% st_transform(5070)
yy = tab[fl,]
lab = paste0(c("0-15", "15-30", "30-60", "60-100", "100-250", ">250"), " (", table(yy$error),")" )
ggplot() +
geom_sf(data = fl, fill = 'black', color = "gray70", lwd = 5) +
geom_sf(data = fl, fill = 'black', color = "black", lwd = 1) +
geom_sf(data = flc, fill = 'transparent', color = "gray80", lwd = .05) +
scale_colour_brewer("nRMSE (%)", palette = "RdYlGn",
labels = lab, direction = -1) +
scale_fill_brewer("nRMSE (%)", palette = "RdYlGn", labels = lab, direction = -1) +
geom_sf(data = yy, aes(fill = as.factor(error),
color = as.factor(error)),
size = 1.8) +
mapTheme() +
labs(caption= paste0(cond[i], round(val[[i]], 2), "% mean nRMSE (excluding outliers)")) +
theme(plot.caption = element_text(hjust=0.5, size=rel(3))) +
theme(legend.position = 'none')
ggsave(file = paste0("/Users/mikejohnson/Documents/GitHub/SRCgeneration/imgs/state_",sta[i], "_table_look.png"), height = 12, width = 12, dpi = 300)
}
# Figure 4 ----------------------------------------------------------------
## PART A
composite = filter(raw, type == "combo")
plots = list()
for(i in 1:10){
test = composite[composite$order == i,]
plots[[i]] = ggplot(data = test) +
geom_density(aes(x=inN), fill = 'red', alpha=0.4) +
geom_density(aes(x=outN), fill = 'blue', alpha=0.4) +
geom_vline(xintercept = mean(test$inN), col = 'red', lty = 2, lwd = 1) +
geom_vline(xintercept = mean(test$outN), col = 'blue', lty = 2, lwd = 1) +
labs(title = paste0("Stream Order ", i),
x = "",
y = ""
) + theme_light() +
theme(plot.title = element_text(size=rel(2))) +
theme(axis.text.x = element_text(size=16),
axis.text.y = element_text(size=16))
}
fin = gridExtra::grid.arrange(plots[[1]],
plots[[2]],
plots[[3]],
plots[[4]],
plots[[5]],
plots[[6]],
plots[[7]],
plots[[8]],
plots[[9]],
plots[[10]],
nrow = 2)
ggsave(plot = fin, file = "/Users/mikejohnson/Documents/GitHub/SRCgeneration/imgs/so_distribution_plots.png", height = 9, width = 20, dpi = 300)
## PART B
ridge_plot = function(d, col,method,bw = 4){
ggplot(d, aes(x = nrmse, y = as.factor(order))) +
geom_density_ridges(size = .25, fill = col, bandwidth = bw) +
xlim(-10,250) +
labs(title = paste0("nRMSE by Stream Order: \n", method, " method"),
subtitle = (paste(sum(d$nrmse > 250, na.rm = T), "sites with nRMSE > 250%")),
x = "nRMSE",
y = "Stream Order") + ggridges::theme_ridges() +
geom_vline(xintercept=30) +
geom_vline(xintercept=60) +
geom_vline(xintercept=100)
}
g1 = ridge_plot(usgs_base, "green", "Default")
g2 = ridge_plot(usgs_composite, "red", "Composite")
g3 = ridge_plot(usgs_table, "lightblue", "SO-composite")
g4 = gridExtra::grid.arrange(g1,g2, g3, nrow = 1)
ggsave(plot = g4, filename = "/Users/mikejohnson/Documents/GitHub/SRCgeneration/imgs/ggridges.png", width = 12, height = 5, dpi = 300)
tmp = usgs_so %>% st_transform(5070) %>% filter(nrmse > 100)
cities = read_sf('/Users/mikejohnson/Downloads/tufts-uscitiestowns1mil14-shapefile/GISPORTAL_GISOWNER01_USCITIESTOWNS1MIL14.shp') %>% st_transform(5070)
city = cities[conus,] %>% filter(POP_2010 >25000)
ggplot() +
geom_sf(data = st_union(conus), fill = NA, color = 'gray', lwd = 1.1) +
geom_sf(data = city, color = 'red', size= .5) +
geom_sf(data = tmp, color = 'black', size= .5) +
mapTheme()
| /data-raw/image_creation.R | permissive | mikejohnson51/SRCgeneration | R | false | false | 10,693 | r | ##################################################
## Project: SRC Generation
## Script purpose: Generate Paper figures
## Date: 01-06-2020
## Author: Mike Johnson
##################################################
library(ggplot2)
library(ggridges)
library(AOI)
library(dplyr)
library(sf)
# Helpful functions -------------------------------------------------------
mapTheme <- function() {
theme_void() +
theme(
text = element_text(size = 7),
plot.title = element_text(size = 14, color = "black", hjust = 0, vjust = 2, face = "bold"),
plot.subtitle = element_text(size = 8, color = "black", hjust = 0, vjust = 0),
axis.ticks = element_blank(),
legend.direction = "vertical",
legend.position = "right",
plot.margin = margin(1, 1, 1, 1, 'cm'),
legend.key.height = unit(1.5, "cm"), legend.key.width = unit(0.4, "cm")
)
}
rank = function(x){
y = rep(NA, length(x))
y[between(x, 0, 15)] = 1
y[between(x, 15, 30)] = 2
y[between(x, 30, 60)] = 3
y[between(x, 60, 100)] = 4
y[between(x, 100, 250)] = 5
y[between(x, 250, 25e100000)] = 6
y
}
prep_sf = function(input){
usgs_filter %>%
merge(input, by = 'siteID') %>%
dplyr::select(siteID, lon, lat,COMID = COMID.x, nrmse = nrmse, order = order ) %>%
mutate(error = rank(nrmse)) %>%
st_as_sf(coords = c("lon", 'lat'), crs = '+proj=longlat +datum=WGS84')
}
# External Data (not checked in) ------------------------------------------
load('/Users/mikejohnson/Documents/GitHub/hand_improvements/data/usgs_filter.rda')
files = list.files('/Users/mikejohnson/Documents/GitHub/hand_improvements/output/', full.names = T)
tmp = list()
for(i in 1:length(files)){
load(files[i])
tmp[[i]] = o
}
raw = bind_rows(tmp)
usgs_table = filter(raw, type == "table") %>% prep_sf()
usgs_lc = filter(raw, type == "nlcd") %>% prep_sf()
usgs_so = filter(raw, type == "lm") %>% prep_sf()
usgs_composite = filter(raw, type == "combo") %>% prep_sf()
usgs_catchment = filter(raw, type == "catchment") %>% prep_sf()
usgs_base = filter(raw, type == "base") %>% prep_sf()
conus = AOI::aoi_get(state = "conus") %>% st_transform(5070)
outline = sf::st_union(conus)
all = list(
Composite = usgs_composite,
`land cover` = usgs_lc,
`Stream Order Composite` = usgs_table,
Default = usgs_base,
Catchment = usgs_catchment
)
# Figure 2 ----------------------------------------------------------------
to_plot = raw %>%
filter(type %in% c("base", "nlcd", "catchment", 'combo', "table")) %>%
mutate(type = factor(type,levels=levels(as.factor(type))[c(1,4,2,3,5)]))
out = to_plot %>% group_by(type) %>% summarize(count = sum(nrmse > 250, na.rm = T))
png(file="/Users/mikejohnson/Documents/GitHub/SRCgeneration/imgs/methods_boxplot.png",height=12, width = 12, units = 'in', res=300)
par(mar = c(5,6,3,3))
boxplot(nrmse~type, data = to_plot,
names = c("Default", "Land\nCover", "Catchment", "Composite", "Stream Order\nComposite"),frame.plot = FALSE,
cex.axis = .7,
outline = F,
ylim = c(0,500),
xlab = "Method",
pch =16,
cex =.2,
ylab = "Normalized Root Mean Square Error \n(nRMSE)",
main = paste0("SRC Comparision by Method: " , length(unique(to_plot$COMID)), " Stations"),
notch = T,
lwd = .4,
horizontal = F,
las = 1,
col = c("#95D1C5", '#95D1C5',"#E3D5B8", '#E3D5B8', '#95D1C5'),
border = 'gray20')
abline(h = 250)
for(i in 1:5){ text(paste0('\u2191 ', out$count[i]), x = i, y = 260) }
legend("topright", inset=.02, title="Method Type",
c("Extendable","Non-extendable"), fill=c('#95D1C5',"#E3D5B8"), horiz=F, cex=1.2)
dev.off()
# Figure 3 ----------------------------------------------------------------
## PANEL A
for(i in 1:5){
p = st_transform(all[[i]], 5070) %>% filter(!is.na(error))
lab = paste0(c("0-15", "15-30", "30-60", "60-100", "100-250", ">250"), " (", table(p$error),")" )
g = ggplot() +
geom_sf(data = conus, fill = 'black', color = "gray70", lwd = 3) +
geom_sf(data = conus, fill = "black", color = "black") +
geom_sf(data = conus, fill = 'black', color = "gray80", lwd = .1) +
scale_colour_brewer("nRMSE (%)", palette = "RdYlGn",
labels = lab, direction = -1) +
scale_fill_brewer("nRMSE (%)", palette = "RdYlGn",
labels = lab, direction = -1) +
geom_sf(data = p, aes(fill = as.factor(error), color = as.factor(error)),
size = .07) +
labs(title = names(all)[i]) +
mapTheme()
ggsave(g, filename = paste0("/Users/mikejohnson/Documents/GitHub/SRCgeneration/imgs/map_", names(all)[i], "_method.png"), width = 9, height = 6, dpi = 300)
}
## PANEL B
composite$nrmse[composite$nrmse < 250] %>% median(na.rm = T)
tab = usgs_composite %>% st_transform(5070)
mer = st_intersection(tab, conus) %>%
group_by(state_abbr) %>%
summarise(m = median(nrmse, na.rm = TRUE), n= n()) %>%
ungroup() %>%
st_drop_geometry()
tab2 = usgs_table %>% st_transform(5070)
mer2 = st_intersection(tab2, conus) %>%
group_by(state_abbr) %>%
summarise(m = median(nrmse, na.rm = TRUE), n= n()) %>%
ungroup() %>%
st_drop_geometry()
tab_sp2 = merge(conus, mer2)
sorted = tab_sp$state_abbr[order(tab_sp$m)]
vals = tab_sp$m[order(tab_sp$m)]
sta = list(sorted[1], sorted[25], sorted[48])
val = list(vals[1], vals[25], vals[48])
cond = paste(c("Best", "Average", "Worst"), "Performing:\n ")
subs = aoi_get(state = unlist(sta))
### PART 1
g1 = ggplot() +
geom_sf(data = tab_sp, aes(fill = m/100), color = NA, lwd = .25) +
geom_sf(data = st_union(conus),
fill = NA, color = 'black', lwd = 1.1) +
#geom_sf(data = subs, fill = NA,color = 'gray70', lwd = 3) +
#geom_sf(data = subs, fill = NA,color = 'black', lwd = 1) +
scale_fill_gradient2(position="bottom" , low = "blue", mid = scales::muted("blue"), high = "darkred",
midpoint = .3) +
mapTheme() +
theme(legend.position = 'none')
g2 = ggplot() +
geom_sf(data = tab_sp2, aes(fill = m/100), color = NA, lwd = .25) +
geom_sf(data = st_union(conus),
fill = NA, color = 'black', lwd = 1.1) +
#geom_sf(data = subs, fill = NA,color = 'gray70', lwd = 3) +
#geom_sf(data = subs, fill = NA,color = 'black', lwd = 1) +
scale_fill_gradient2(position="bottom" , low = "blue", mid = scales::muted("blue"), high = "darkred",
midpoint = .3) +
mapTheme()
library(patchwork)
g1+g2
ggsave(file = paste0("/Users/mikejohnson/Documents/GitHub/SRCgeneration/imgs//state_map_soc.png"), height = 12, width = 18, dpi = 300)
### PART 2
for(i in 1:length(sta)){
fl = AOI::aoi_get(state = sta[[i]]) %>% st_transform(5070)
flc = AOI::aoi_get(state = sta[[i]], county = 'all') %>% st_transform(5070)
yy = tab[fl,]
lab = paste0(c("0-15", "15-30", "30-60", "60-100", "100-250", ">250"), " (", table(yy$error),")" )
ggplot() +
geom_sf(data = fl, fill = 'black', color = "gray70", lwd = 5) +
geom_sf(data = fl, fill = 'black', color = "black", lwd = 1) +
geom_sf(data = flc, fill = 'transparent', color = "gray80", lwd = .05) +
scale_colour_brewer("nRMSE (%)", palette = "RdYlGn",
labels = lab, direction = -1) +
scale_fill_brewer("nRMSE (%)", palette = "RdYlGn", labels = lab, direction = -1) +
geom_sf(data = yy, aes(fill = as.factor(error),
color = as.factor(error)),
size = 1.8) +
mapTheme() +
labs(caption= paste0(cond[i], round(val[[i]], 2), "% mean nRMSE (excluding outliers)")) +
theme(plot.caption = element_text(hjust=0.5, size=rel(3))) +
theme(legend.position = 'none')
ggsave(file = paste0("/Users/mikejohnson/Documents/GitHub/SRCgeneration/imgs/state_",sta[i], "_table_look.png"), height = 12, width = 12, dpi = 300)
}
# Figure 4 ----------------------------------------------------------------
## PART A
composite = filter(raw, type == "combo")
plots = list()
for(i in 1:10){
test = composite[composite$order == i,]
plots[[i]] = ggplot(data = test) +
geom_density(aes(x=inN), fill = 'red', alpha=0.4) +
geom_density(aes(x=outN), fill = 'blue', alpha=0.4) +
geom_vline(xintercept = mean(test$inN), col = 'red', lty = 2, lwd = 1) +
geom_vline(xintercept = mean(test$outN), col = 'blue', lty = 2, lwd = 1) +
labs(title = paste0("Stream Order ", i),
x = "",
y = ""
) + theme_light() +
theme(plot.title = element_text(size=rel(2))) +
theme(axis.text.x = element_text(size=16),
axis.text.y = element_text(size=16))
}
fin = gridExtra::grid.arrange(plots[[1]],
plots[[2]],
plots[[3]],
plots[[4]],
plots[[5]],
plots[[6]],
plots[[7]],
plots[[8]],
plots[[9]],
plots[[10]],
nrow = 2)
ggsave(plot = fin, file = "/Users/mikejohnson/Documents/GitHub/SRCgeneration/imgs/so_distribution_plots.png", height = 9, width = 20, dpi = 300)
## PART B
ridge_plot = function(d, col,method,bw = 4){
ggplot(d, aes(x = nrmse, y = as.factor(order))) +
geom_density_ridges(size = .25, fill = col, bandwidth = bw) +
xlim(-10,250) +
labs(title = paste0("nRMSE by Stream Order: \n", method, " method"),
subtitle = (paste(sum(d$nrmse > 250, na.rm = T), "sites with nRMSE > 250%")),
x = "nRMSE",
y = "Stream Order") + ggridges::theme_ridges() +
geom_vline(xintercept=30) +
geom_vline(xintercept=60) +
geom_vline(xintercept=100)
}
g1 = ridge_plot(usgs_base, "green", "Default")
g2 = ridge_plot(usgs_composite, "red", "Composite")
g3 = ridge_plot(usgs_table, "lightblue", "SO-composite")
g4 = gridExtra::grid.arrange(g1,g2, g3, nrow = 1)
ggsave(plot = g4, filename = "/Users/mikejohnson/Documents/GitHub/SRCgeneration/imgs/ggridges.png", width = 12, height = 5, dpi = 300)
tmp = usgs_so %>% st_transform(5070) %>% filter(nrmse > 100)
cities = read_sf('/Users/mikejohnson/Downloads/tufts-uscitiestowns1mil14-shapefile/GISPORTAL_GISOWNER01_USCITIESTOWNS1MIL14.shp') %>% st_transform(5070)
city = cities[conus,] %>% filter(POP_2010 >25000)
ggplot() +
geom_sf(data = st_union(conus), fill = NA, color = 'gray', lwd = 1.1) +
geom_sf(data = city, color = 'red', size= .5) +
geom_sf(data = tmp, color = 'black', size= .5) +
mapTheme()
|
library(sdmpredictors)
### Name: get_future_layers
### Title: Get the name of future climate layer(s) based on the current
### climate layer(s)
### Aliases: get_future_layers
### ** Examples
future_layers <- get_future_layers(c("BO_salinity", "BO_sstmean"),
scenario = "B1", year = 2100)
future_layers$layer_code
| /data/genthat_extracted_code/sdmpredictors/examples/get_future_layers.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 358 | r | library(sdmpredictors)
### Name: get_future_layers
### Title: Get the name of future climate layer(s) based on the current
### climate layer(s)
### Aliases: get_future_layers
### ** Examples
future_layers <- get_future_layers(c("BO_salinity", "BO_sstmean"),
scenario = "B1", year = 2100)
future_layers$layer_code
|
readUCIdata=read.table("household_power_consumption.txt",header=TRUE,sep=";",na.strings="?")
readUCIdata$Date=as.Date(readUCIdata$Date,"%d/%m/%Y")
finaldt=subset(readUCIdata,Date>=as.Date("2007-02-01") & Date <=as.Date("2007-02-02"))
datet=as.POSIXct(paste(finaldt$Date,finaldt$Time))
png(filename="plot3.png",width=480,height=480, units="px")
plot(datet,finaldt$Sub_metering_1,xlab="",ylab="Energy Sub metering",type="l")
lines(datet,finaldt$Sub_metering_2,type="l",col="red")
lines(datet,finaldt$Sub_metering_3,type="l",col="blue")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"),lwd=c(1,1,1))
dev.off() | /plot3.R | no_license | hprattipati/Exploratory-Data-Plotting | R | false | false | 666 | r | readUCIdata=read.table("household_power_consumption.txt",header=TRUE,sep=";",na.strings="?")
readUCIdata$Date=as.Date(readUCIdata$Date,"%d/%m/%Y")
finaldt=subset(readUCIdata,Date>=as.Date("2007-02-01") & Date <=as.Date("2007-02-02"))
datet=as.POSIXct(paste(finaldt$Date,finaldt$Time))
png(filename="plot3.png",width=480,height=480, units="px")
plot(datet,finaldt$Sub_metering_1,xlab="",ylab="Energy Sub metering",type="l")
lines(datet,finaldt$Sub_metering_2,type="l",col="red")
lines(datet,finaldt$Sub_metering_3,type="l",col="blue")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),col=c("black","red","blue"),lwd=c(1,1,1))
dev.off() |
#!/usr/bin/env Rscript
setwd("./files/")
source("Rinit")
out.file.boxplot = get.outname(commandArgs())
dat.exp2.trpzipper = dat.exp2[dat.exp2$molecule == "trp-zipper", ]
dat.exp2.trpzipper.residue = dat.exp2.trpzipper[dat.exp2.trpzipper$molecule.scale == "residue", ]
dat.exp2.trpzipper.atom = dat.exp2.trpzipper[dat.exp2.trpzipper$molecule.scale == "atom", ]
dat.exp2.trpcage = dat.exp2[dat.exp2$molecule == "trp-cage", ]
dat.exp2.trpcage.residue = dat.exp2.trpcage[dat.exp2.trpcage$molecule.scale == "residue", ]
dat.exp2.trpcage.atom = dat.exp2.trpcage[dat.exp2.trpcage$molecule.scale == "atom", ]
dat.time.trpzipper.residue = data.frame(
time.completion = dat.exp2.trpzipper.residue$time.completion,
group.number = dat.exp2.trpzipper.residue$group.number,
task = rep("1A",length(dat.exp2.trpzipper.residue$group.number))
)
dat.time.trpcage.residue = data.frame(
time.completion = dat.exp2.trpcage.residue$time.completion,
group.number = dat.exp2.trpcage.residue$group.number,
task = rep("1B",length(dat.exp2.trpcage.residue$group.number))
)
dat.time.trpzipper.atom = data.frame(
time.completion = dat.exp2.trpzipper.atom$time.completion,
group.number = dat.exp2.trpzipper.atom$group.number,
task = rep("2A",length(dat.exp2.trpzipper.atom$group.number))
)
dat.time.trpcage.atom = data.frame(
time.completion = dat.exp2.trpcage.atom$time.completion,
group.number = dat.exp2.trpcage.atom$group.number,
task = rep("2B",length(dat.exp2.trpcage.atom$group.number))
)
dat.time = rbind(dat.time.trpzipper.residue, dat.time.trpcage.residue, dat.time.trpzipper.atom, dat.time.trpcage.atom)
dat.time.monome = dat.time[dat.time$group.number == 1, ]
bp.monome = boxplot(dat.time.monome$time.completion~dat.time.monome$task, plot=FALSE)
colnames(bp.monome$stats) = c("\\myscenario{1a}","\\myscenario{1b}","\\myscenario{2a}","\\myscenario{2b}")
write.table(bp.monome$stats,
file = out.file.boxplot,
quote = FALSE,
sep = ",",
eol = "\n",
dec = ".",
row.names = FALSE,
col.names = TRUE)
dat.time.binome = dat.time[dat.time$group.number == 2, ]
bp.binome = boxplot(dat.time.binome$time.completion~dat.time.binome$task, plot=FALSE)
write.table(bp.binome$stats,
file = out.file.boxplot,
quote = FALSE,
sep = ",",
eol = "\n",
dec = ".",
row.names = FALSE,
col.names = FALSE,
append = TRUE)
dat.time.trpzipper = dat.exp2.trpzipper
shapiro = shapiro.test(dat.time.trpzipper$time.completion)
levene = levene.test(dat.time.trpzipper$time.completion, dat.time.trpzipper$group.number)
kruskal = kruskal.test(dat.time.trpzipper$time.completion~dat.time.trpzipper$group.number)
out.anova = kruskal2tex(kruskal)
out.file.anova = gsub(".csv", "-anova-trpzipper.tex", out.file.boxplot)
write(
out.anova,
file = out.file.anova
)
dat.time.trpcage = dat.exp2.trpcage
shapiro = shapiro.test(dat.time.trpcage$time.completion)
levene = levene.test(dat.time.trpcage$time.completion, dat.time.trpcage$group.number)
kruskal = kruskal.test(dat.time.trpcage$time.completion~dat.time.trpcage$group.number)
out.anova = kruskal2tex(kruskal)
out.file.anova = gsub(".csv", "-anova-trpcage.tex", out.file.boxplot)
write(
out.anova,
file = out.file.anova
)
dat = dat.time.trpcage
pop.a = dat[ dat$group.number == 1, ]$time.completion
pop.b = dat[ dat$group.number == 2, ]$time.completion
out.ratio = evolution2tex(pop.a, pop.b)
out.file.ratio = gsub(".csv", "-ratio-trpcage.tex", out.file.boxplot)
write(
out.ratio,
file = out.file.ratio
)
| /memoir/files/exp2-time-task-group.r | no_license | woshilapin/thesis | R | false | false | 3,628 | r | #!/usr/bin/env Rscript
setwd("./files/")
source("Rinit")
out.file.boxplot = get.outname(commandArgs())
dat.exp2.trpzipper = dat.exp2[dat.exp2$molecule == "trp-zipper", ]
dat.exp2.trpzipper.residue = dat.exp2.trpzipper[dat.exp2.trpzipper$molecule.scale == "residue", ]
dat.exp2.trpzipper.atom = dat.exp2.trpzipper[dat.exp2.trpzipper$molecule.scale == "atom", ]
dat.exp2.trpcage = dat.exp2[dat.exp2$molecule == "trp-cage", ]
dat.exp2.trpcage.residue = dat.exp2.trpcage[dat.exp2.trpcage$molecule.scale == "residue", ]
dat.exp2.trpcage.atom = dat.exp2.trpcage[dat.exp2.trpcage$molecule.scale == "atom", ]
dat.time.trpzipper.residue = data.frame(
time.completion = dat.exp2.trpzipper.residue$time.completion,
group.number = dat.exp2.trpzipper.residue$group.number,
task = rep("1A",length(dat.exp2.trpzipper.residue$group.number))
)
dat.time.trpcage.residue = data.frame(
time.completion = dat.exp2.trpcage.residue$time.completion,
group.number = dat.exp2.trpcage.residue$group.number,
task = rep("1B",length(dat.exp2.trpcage.residue$group.number))
)
dat.time.trpzipper.atom = data.frame(
time.completion = dat.exp2.trpzipper.atom$time.completion,
group.number = dat.exp2.trpzipper.atom$group.number,
task = rep("2A",length(dat.exp2.trpzipper.atom$group.number))
)
dat.time.trpcage.atom = data.frame(
time.completion = dat.exp2.trpcage.atom$time.completion,
group.number = dat.exp2.trpcage.atom$group.number,
task = rep("2B",length(dat.exp2.trpcage.atom$group.number))
)
dat.time = rbind(dat.time.trpzipper.residue, dat.time.trpcage.residue, dat.time.trpzipper.atom, dat.time.trpcage.atom)
dat.time.monome = dat.time[dat.time$group.number == 1, ]
bp.monome = boxplot(dat.time.monome$time.completion~dat.time.monome$task, plot=FALSE)
colnames(bp.monome$stats) = c("\\myscenario{1a}","\\myscenario{1b}","\\myscenario{2a}","\\myscenario{2b}")
write.table(bp.monome$stats,
file = out.file.boxplot,
quote = FALSE,
sep = ",",
eol = "\n",
dec = ".",
row.names = FALSE,
col.names = TRUE)
dat.time.binome = dat.time[dat.time$group.number == 2, ]
bp.binome = boxplot(dat.time.binome$time.completion~dat.time.binome$task, plot=FALSE)
write.table(bp.binome$stats,
file = out.file.boxplot,
quote = FALSE,
sep = ",",
eol = "\n",
dec = ".",
row.names = FALSE,
col.names = FALSE,
append = TRUE)
dat.time.trpzipper = dat.exp2.trpzipper
shapiro = shapiro.test(dat.time.trpzipper$time.completion)
levene = levene.test(dat.time.trpzipper$time.completion, dat.time.trpzipper$group.number)
kruskal = kruskal.test(dat.time.trpzipper$time.completion~dat.time.trpzipper$group.number)
out.anova = kruskal2tex(kruskal)
out.file.anova = gsub(".csv", "-anova-trpzipper.tex", out.file.boxplot)
write(
out.anova,
file = out.file.anova
)
dat.time.trpcage = dat.exp2.trpcage
shapiro = shapiro.test(dat.time.trpcage$time.completion)
levene = levene.test(dat.time.trpcage$time.completion, dat.time.trpcage$group.number)
kruskal = kruskal.test(dat.time.trpcage$time.completion~dat.time.trpcage$group.number)
out.anova = kruskal2tex(kruskal)
out.file.anova = gsub(".csv", "-anova-trpcage.tex", out.file.boxplot)
write(
out.anova,
file = out.file.anova
)
dat = dat.time.trpcage
pop.a = dat[ dat$group.number == 1, ]$time.completion
pop.b = dat[ dat$group.number == 2, ]$time.completion
out.ratio = evolution2tex(pop.a, pop.b)
out.file.ratio = gsub(".csv", "-ratio-trpcage.tex", out.file.boxplot)
write(
out.ratio,
file = out.file.ratio
)
|
library("DeliveryMan")
library("ggplot2")
smartWC = function(moveInfo, readings, positions, edges, probs) {
## Creates and stores the transition matrix if not saved in mem,
## else load saved one
if(length(moveInfo$mem$tmax) == 0) {
transitionMatrix = makeTransitionMatrix(edges)
moveInfo$mem$tmax = transitionMatrix
} else {
transitionMatrix = moveInfo$mem$tmax
}
## Current obeservations
currentObservation = get.observations(readings, probs)
## If backpacker dies set observation to 1, else if still alive
## set 0
for(i in 1:length(positions[1:2])) {
if(is.na(positions[i]) == FALSE) {
if(positions[i] < 0) {
currentObservation[-positions[i]] = 1
}
else {
currentObservation[positions[i]] = 0
}
}
}
## Store current observations to mem if no past exists, else load
## past and multiply with current and store new
if(length(moveInfo$mem$obs) == 0) {
moveInfo$mem$obs = currentObservation
} else {
pastObservation = moveInfo$mem$obs
currentObservation = normalize((pastObservation %*% transitionMatrix) * currentObservation)
moveInfo$mem$obs = currentObservation
}
## Get path from position to propable croc position
path = shortest.path(positions[3], which.max(currentObservation), edges)
## Return next 2 moves from path
if(length(path) == 1) {
moveInfo$moves = c(0, 0)
} else if(length(path) == 2) {
moveInfo$moves = c(path[2], 0)
} else {
moveInfo$moves = path[2:3]
}
return(moveInfo)
}
## Shortest path
shortest.path = function(start, goal, edges){
if(start == goal){
return(start)
} else if(any(apply(edges, 1, function(x, y) isTRUE(all.equal(x, y)), c(start,goal))) |
any(apply(edges, 1, function(x, y) isTRUE(all.equal(x, y)), c(goal,start)))) {
return(c(start,goal))
} else {
temp.edges = vector("list", max(edges))
temp.edges[] = NA
## Fill list with all edges to current node at index
for(i in 1:nrow(edges)) {
if(is.na(temp.edges[edges[i,1]])) {
temp.edges[[edges[i,1]]] = edges[i,2]
} else {
temp.edges[[edges[i,1]]] = c(temp.edges[[edges[i,1]]], edges[i,2])
}
if(is.na(temp.edges[edges[i,2]])) {
temp.edges[[edges[i,2]]] = edges[i,1]
} else {
temp.edges[[edges[i,2]]] = c(temp.edges[[edges[i,2]]], edges[i,1])
}
}
queue = list(start)
visited = vector("list", max(edges))
visited = NA
## Expands breadth first until goal is found, stores parents
while(queue[[1]] != goal) {
for(i in 1:length(temp.edges[[queue[[1]]]])) {
if(is.na(visited[temp.edges[[queue[[1]]]][[i]]])) {
queue = append(queue, temp.edges[[queue[[1]]]][[i]])
visited[[temp.edges[[queue[[1]]]][[i]]]] = queue[[1]]
}
}
queue = queue[-1]
}
current = queue[[1]]
path = list(current)
## Reconstruct shortest path
while(!(current == start)) {
current = visited[[current]]
path = append(path, current)
}
return(rev(path))
}
}
## Returns the probability distribution
get.observations = function(reads, probs){
numProbs = length(probs$salinity[,1])
salinity = dnorm(reads[1], mean=probs$salinity[1:numProbs,1], sd=probs$salinity[1:numProbs,2])
phosphate = dnorm(reads[2], mean=probs$phosphate[1:numProbs,1], sd=probs$phosphate[1:numProbs,2])
nitrogen = dnorm(reads[3], mean=probs$nitrogen[1:numProbs,1], sd=probs$nitrogen[1:numProbs,2])
return(salinity * phosphate * nitrogen)
}
## Simple normalizing function
normalize = function(x){
return(x/sum(x))
}
## Creates a transition matrix with edges
makeTransitionMatrix = function(edges) {
numPoints = max(edges)
transitionMatrix = matrix(0, ncol=numPoints, nrow=numPoints)
for(point in 1:numPoints) {
to = c(edges[which(edges[, 1] == point), 2],
edges[which(edges[, 2] == point), 1], point)
for(edge in 1:length(to)) {
transitionMatrix[point, to[edge]] = 1 / length(to)
}
}
return(transitionMatrix)
}
test.run <- replicate(1000,runWheresCroc(smartWC, pause=0))
cumulative.average = cumsum(test.run) / seq_along(test.run)
## qplot(1:1000, cumulative.average, geom=c("line","smooth"), ylab="Turns, Cumulatice Average",
## xlab="Test runs", main="Performance (smartWC)")
print(tail(cumulative.average, n=1))
| /assignment2/smartWC.R | no_license | normano64/ai | R | false | false | 4,798 | r | library("DeliveryMan")
library("ggplot2")
smartWC = function(moveInfo, readings, positions, edges, probs) {
## Creates and stores the transition matrix if not saved in mem,
## else load saved one
if(length(moveInfo$mem$tmax) == 0) {
transitionMatrix = makeTransitionMatrix(edges)
moveInfo$mem$tmax = transitionMatrix
} else {
transitionMatrix = moveInfo$mem$tmax
}
## Current obeservations
currentObservation = get.observations(readings, probs)
## If backpacker dies set observation to 1, else if still alive
## set 0
for(i in 1:length(positions[1:2])) {
if(is.na(positions[i]) == FALSE) {
if(positions[i] < 0) {
currentObservation[-positions[i]] = 1
}
else {
currentObservation[positions[i]] = 0
}
}
}
## Store current observations to mem if no past exists, else load
## past and multiply with current and store new
if(length(moveInfo$mem$obs) == 0) {
moveInfo$mem$obs = currentObservation
} else {
pastObservation = moveInfo$mem$obs
currentObservation = normalize((pastObservation %*% transitionMatrix) * currentObservation)
moveInfo$mem$obs = currentObservation
}
## Get path from position to propable croc position
path = shortest.path(positions[3], which.max(currentObservation), edges)
## Return next 2 moves from path
if(length(path) == 1) {
moveInfo$moves = c(0, 0)
} else if(length(path) == 2) {
moveInfo$moves = c(path[2], 0)
} else {
moveInfo$moves = path[2:3]
}
return(moveInfo)
}
## Shortest path
shortest.path = function(start, goal, edges){
if(start == goal){
return(start)
} else if(any(apply(edges, 1, function(x, y) isTRUE(all.equal(x, y)), c(start,goal))) |
any(apply(edges, 1, function(x, y) isTRUE(all.equal(x, y)), c(goal,start)))) {
return(c(start,goal))
} else {
temp.edges = vector("list", max(edges))
temp.edges[] = NA
## Fill list with all edges to current node at index
for(i in 1:nrow(edges)) {
if(is.na(temp.edges[edges[i,1]])) {
temp.edges[[edges[i,1]]] = edges[i,2]
} else {
temp.edges[[edges[i,1]]] = c(temp.edges[[edges[i,1]]], edges[i,2])
}
if(is.na(temp.edges[edges[i,2]])) {
temp.edges[[edges[i,2]]] = edges[i,1]
} else {
temp.edges[[edges[i,2]]] = c(temp.edges[[edges[i,2]]], edges[i,1])
}
}
queue = list(start)
visited = vector("list", max(edges))
visited = NA
## Expands breadth first until goal is found, stores parents
while(queue[[1]] != goal) {
for(i in 1:length(temp.edges[[queue[[1]]]])) {
if(is.na(visited[temp.edges[[queue[[1]]]][[i]]])) {
queue = append(queue, temp.edges[[queue[[1]]]][[i]])
visited[[temp.edges[[queue[[1]]]][[i]]]] = queue[[1]]
}
}
queue = queue[-1]
}
current = queue[[1]]
path = list(current)
## Reconstruct shortest path
while(!(current == start)) {
current = visited[[current]]
path = append(path, current)
}
return(rev(path))
}
}
## Returns the probability distribution
get.observations = function(reads, probs){
numProbs = length(probs$salinity[,1])
salinity = dnorm(reads[1], mean=probs$salinity[1:numProbs,1], sd=probs$salinity[1:numProbs,2])
phosphate = dnorm(reads[2], mean=probs$phosphate[1:numProbs,1], sd=probs$phosphate[1:numProbs,2])
nitrogen = dnorm(reads[3], mean=probs$nitrogen[1:numProbs,1], sd=probs$nitrogen[1:numProbs,2])
return(salinity * phosphate * nitrogen)
}
## Simple normalizing function
normalize = function(x){
return(x/sum(x))
}
## Creates a transition matrix with edges
makeTransitionMatrix = function(edges) {
numPoints = max(edges)
transitionMatrix = matrix(0, ncol=numPoints, nrow=numPoints)
for(point in 1:numPoints) {
to = c(edges[which(edges[, 1] == point), 2],
edges[which(edges[, 2] == point), 1], point)
for(edge in 1:length(to)) {
transitionMatrix[point, to[edge]] = 1 / length(to)
}
}
return(transitionMatrix)
}
test.run <- replicate(1000,runWheresCroc(smartWC, pause=0))
cumulative.average = cumsum(test.run) / seq_along(test.run)
## qplot(1:1000, cumulative.average, geom=c("line","smooth"), ylab="Turns, Cumulatice Average",
## xlab="Test runs", main="Performance (smartWC)")
print(tail(cumulative.average, n=1))
|
#' Access files in the current app
#'
#' @param ... Character vector specifying directory and or file to
#' point to inside the current package.
#'
#' @noRd
app_sys <- function(...){
system.file(..., package = "svm2")
}
#' Read App Config
#'
#' @param value Value to retrieve from the config file.
#' @param config R_CONFIG_ACTIVE value.
#' @param use_parent Logical, scan the parent directory for config file.
#'
#' @importFrom config get
#'
#' @noRd
get_golem_config <- function(
value,
config = Sys.getenv("R_CONFIG_ACTIVE", "default"),
use_parent = TRUE
){
config::get(
value = value,
config = config,
# Modify this if your config file is somewhere else:
file = app_sys("golem-config.yml"),
use_parent = use_parent
)
}
| /R/app_config.R | no_license | hbaldwin07/GK_shiny_app | R | false | false | 778 | r | #' Access files in the current app
#'
#' @param ... Character vector specifying directory and or file to
#' point to inside the current package.
#'
#' @noRd
app_sys <- function(...){
system.file(..., package = "svm2")
}
#' Read App Config
#'
#' @param value Value to retrieve from the config file.
#' @param config R_CONFIG_ACTIVE value.
#' @param use_parent Logical, scan the parent directory for config file.
#'
#' @importFrom config get
#'
#' @noRd
get_golem_config <- function(
value,
config = Sys.getenv("R_CONFIG_ACTIVE", "default"),
use_parent = TRUE
){
config::get(
value = value,
config = config,
# Modify this if your config file is somewhere else:
file = app_sys("golem-config.yml"),
use_parent = use_parent
)
}
|
as<-"D:/Eduardo Trujillo/Downloads/TESIS/PROGRAMA/TIENDAS/1.AS/"
source(paste0(as,"1.LIST_AS.R"))
source(paste0(as,"2.SEQUENCE_AS.R"))
source(paste0(as,"3.VECTOR_AS.R"))
AS<-as_list %>% rbind(as_sequence,as_vector)
TIPO_NIELSEN<-"AS"
AS <-AS %>% cbind(TIPO_NIELSEN)
| /TIENDAS/1.AS.R | no_license | 1Edtrujillo1/Websraping | R | false | false | 270 | r | as<-"D:/Eduardo Trujillo/Downloads/TESIS/PROGRAMA/TIENDAS/1.AS/"
source(paste0(as,"1.LIST_AS.R"))
source(paste0(as,"2.SEQUENCE_AS.R"))
source(paste0(as,"3.VECTOR_AS.R"))
AS<-as_list %>% rbind(as_sequence,as_vector)
TIPO_NIELSEN<-"AS"
AS <-AS %>% cbind(TIPO_NIELSEN)
|
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shinydashboard)
library(mvtnorm)
library(scatterplot3d)
library(ellipse)
library(plot3D)
BOUND1<-5
BOUND2<-5
ui <- dashboardPage(
dashboardHeader(title="InfoF422: Conditional probability", titleWidth = 500),
dashboardSidebar(
sidebarMenu(
sliderInput("N",
"Number of samples:",
min = 1,
max = 1000,
value = 500,step=2),
sliderInput("tdt",
"3D theta:",
min = -60,
max = 60,
value = 0,step=5),
sliderInput("tdp",
"3D phi:",
min = 0,
max = 90,
value = 75,step=1),
sliderInput("dx",
"X density:",
min = 0.1,
max = 0.3,
value = 0.15,step=0.01),
sliderInput("dy",
"Y density:",
min = 0.1,
max = 0.3,
value = 0.15,step=0.01),
menuItem("Conditional gaussian distribution", tabName = "Bivariatemixture", icon = icon("th")),
menuItem("Regression function", tabName = "Regression", icon = icon("th"))
)
),
dashboardBody(
tabItems(
# First tab content
# Second tab content
tabItem(tabName = "Bivariatemixture",
fluidRow(
box(width=4,collapsible = TRUE,sliderInput("rot1","Rotation 1:", min = -3/2,max = 3/2,
value = -0.75),
sliderInput("ax11","Axis1 1:",min = 0.01,max = BOUND2,value = 3,step=0.05),
sliderInput("ax21","Axis2 1:", min = 0.01, max = BOUND2, value = 0.15,step=0.05),
sliderInput("x","x:", min = -BOUND2, max = BOUND2, value = 0.15,step=0.05),
textOutput("textB")),
box(width=8,title = "Distribution",collapsible = TRUE,plotOutput("biPlotP"))),
fluidRow( box(width=6,collapsible = TRUE,title = "Data",plotOutput("biPlotD")),
box(width=6,collapsible = TRUE,title = "Conditional distribution",plotOutput("biCond")))
), ## tabItem
tabItem(tabName = "Regression",
fluidRow(box(width=4,collapsible = TRUE,
sliderInput("ord","Functions:", min = -3,max = 3,
value = 1,step=1),
sliderInput("sdw","Cond sdev:", min = 0.5,max = 2.5,
value = 1,step=0.1),
sliderInput("rx","x:", min = -BOUND2, max = BOUND2, value = 0.15,step=0.05)),
box(width=6,title = "Distribution",collapsible = TRUE,plotOutput("regrPlotP"))),## fluidRow
fluidRow( box(width=6,collapsible = TRUE,title = "Data",plotOutput("regrPlotD")),
box(width=6,collapsible = TRUE,title = "Conditional distribution",plotOutput("regrCond")))
) ## tabItem
)
)
) # ui
D<-NULL ## Univariate dataset
E<-NULL ## Bivariate eigenvalue matrix
server<-function(input, output,session) {
set.seed(122)
f<-function(x,ord){
f<-numeric(length(x))
if (ord==-1)
f<-sin(x)
if (ord==-2)
f<-cos(2*x)
if (ord==-3)
f<-cos(4*x)
if (ord==1)
f<-x
if (ord==2)
f<-x^2-2
if (ord==3)
f<--x^2+1
f
}
output$biPlotP <- renderPlot({
x <- seq(-BOUND2, BOUND2, by= .2)
y <- x
z<-array(0,dim=c(length(x),length(y)))
#th : rotation angle of the first principal axis
#ax1: length principal axis 1
#ax2: length principal axis 2
ax1<-input$ax11
th=input$rot1
ax2<-input$ax21
Rot<-array(c(cos(th), -sin(th), sin(th), cos(th)),dim=c(2,2)); #rotation matrix
A<-array(c(ax1, 0, 0, ax2),dim=c(2,2))
Sigma<-(Rot%*%A)%*%t(Rot)
E<<-eigen(Sigma)
for (i in 1:length(x)){
for (j in 1:length(y)){
z[i,j]<-dmvnorm(c(x[i],y[j]),sigma=Sigma)
}
}
z[is.na(z)] <- 1
op <- par(bg = "white")
prob.z<-z
surface<-persp3D(x, y, prob.z, theta = input$tdt, phi = input$tdp, expand = 0.5, col = "blue",facets=FALSE)
lines (trans3d(x=input$x, y = seq(-BOUND2, BOUND2, by= .2), z = 0, pmat = surface), col = "red",lwd=3)
})
output$biPlotD <- renderPlot( {
th=input$rot1
Rot<-array(c(cos(th), -sin(th), sin(th), cos(th)),dim=c(2,2)); #rotation matrix
A<-array(c(input$ax11, 0, 0, input$ax21),dim=c(2,2))
Sigma<-(Rot%*%A)%*%t(Rot)
D1=rmvnorm(input$N,sigma=Sigma)
D<<-D1
plot(D[,1],D[,2],xlim=c(-BOUND2,BOUND2),ylim=c(-BOUND2,BOUND2))
lines(ellipse(Sigma))
abline(v=input$x, col = "red",lwd=3)
})
output$biCond <- renderPlot( {
th=input$rot1
Rot<-array(c(cos(th), -sin(th), sin(th), cos(th)),dim=c(2,2)); #rotation matrix
A<-array(c(input$ax11, 0, 0, input$ax21),dim=c(2,2))
Sigma<-(Rot%*%A)%*%t(Rot)
sigma2=sqrt(Sigma[2,2])
sigma1=sqrt(Sigma[1,1])
rho=Sigma[1,2]/(sigma1*sigma2)
x=seq(-1.5*BOUND2, 1.5*BOUND2, by= .02)
plot(x,dnorm(x,rho*sigma2*(input$x)/sigma1,sd=sigma2^2*(1-rho^2)),type="l",col="red",
lwd=2,ylab="Conditional density")
lines(x,dnorm(x,0,sd=sigma2^2))
legend(x=BOUND2,y=1,legend=c("Conditional","Marginal"),lty=1,col=c("red","black"))
})
output$textB <- renderText({
input$rot
input$ax1
input$ax2
paste("Eigen1=", E$values[1], "\n Eigen2=", E$values[2])
})
output$regrPlotP <- renderPlot({
x <- seq(-BOUND2, BOUND2, by= input$dx)
y <- seq(-BOUND2, BOUND2, by= input$dy)
z<-array(0,dim=c(length(x),length(y)))
#th : rotation angle of the first principal axis
#ax1: length principal axis 1
#ax2: length principal axis 2
muy<-f(x,ord=input$ord)
for (i in 1:length(x)){
for (j in 1:length(y)){
z[i,j]<-dnorm(y[j],mean=muy[i],sd=input$sdw)
}
}
z[is.na(z)] <- 1
op <- par(bg = "white")
prob.z<-z
surface<-persp3D(x, y, prob.z, theta = input$tdt, phi =input$tdp, expand = 0.5, col = "blue",facets=FALSE)
lines (trans3D(x=input$rx, y = seq(-BOUND2, BOUND2, by= .2), z = 0, pmat = surface), col = "red",lwd=3)
})
output$regrPlotD <- renderPlot( {
X=seq(-BOUND2, BOUND2,length.out=input$N)
muy=f(X,ord=input$ord)
Y=muy+rnorm(input$N,sd=input$sdw)
D<<-cbind(X,Y)
plot(D[,1],D[,2],xlim=c(-BOUND2,BOUND2),ylim=c(-BOUND2,BOUND2))
lines(D[,1],muy)
abline(v=input$rx, col = "red",lwd=3)
})
output$regrCond <- renderPlot( {
th=input$rot1
muy=f(input$rx,input$ord)
x=seq(-1.5*BOUND2, 1.5*BOUND2, by= .02)
plot(x,dnorm(x,mean=muy,sd=input$sdw),type="l",col="red",lwd=2,ylab="Conditional density")
})
}
shinyApp(ui, server)
| /inst/shiny/condpro.R | no_license | niuneo/gbcode | R | false | false | 7,222 | r |
# This is the user-interface definition of a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shinydashboard)
library(mvtnorm)
library(scatterplot3d)
library(ellipse)
library(plot3D)
BOUND1<-5
BOUND2<-5
ui <- dashboardPage(
dashboardHeader(title="InfoF422: Conditional probability", titleWidth = 500),
dashboardSidebar(
sidebarMenu(
sliderInput("N",
"Number of samples:",
min = 1,
max = 1000,
value = 500,step=2),
sliderInput("tdt",
"3D theta:",
min = -60,
max = 60,
value = 0,step=5),
sliderInput("tdp",
"3D phi:",
min = 0,
max = 90,
value = 75,step=1),
sliderInput("dx",
"X density:",
min = 0.1,
max = 0.3,
value = 0.15,step=0.01),
sliderInput("dy",
"Y density:",
min = 0.1,
max = 0.3,
value = 0.15,step=0.01),
menuItem("Conditional gaussian distribution", tabName = "Bivariatemixture", icon = icon("th")),
menuItem("Regression function", tabName = "Regression", icon = icon("th"))
)
),
dashboardBody(
tabItems(
# First tab content
# Second tab content
tabItem(tabName = "Bivariatemixture",
fluidRow(
box(width=4,collapsible = TRUE,sliderInput("rot1","Rotation 1:", min = -3/2,max = 3/2,
value = -0.75),
sliderInput("ax11","Axis1 1:",min = 0.01,max = BOUND2,value = 3,step=0.05),
sliderInput("ax21","Axis2 1:", min = 0.01, max = BOUND2, value = 0.15,step=0.05),
sliderInput("x","x:", min = -BOUND2, max = BOUND2, value = 0.15,step=0.05),
textOutput("textB")),
box(width=8,title = "Distribution",collapsible = TRUE,plotOutput("biPlotP"))),
fluidRow( box(width=6,collapsible = TRUE,title = "Data",plotOutput("biPlotD")),
box(width=6,collapsible = TRUE,title = "Conditional distribution",plotOutput("biCond")))
), ## tabItem
tabItem(tabName = "Regression",
fluidRow(box(width=4,collapsible = TRUE,
sliderInput("ord","Functions:", min = -3,max = 3,
value = 1,step=1),
sliderInput("sdw","Cond sdev:", min = 0.5,max = 2.5,
value = 1,step=0.1),
sliderInput("rx","x:", min = -BOUND2, max = BOUND2, value = 0.15,step=0.05)),
box(width=6,title = "Distribution",collapsible = TRUE,plotOutput("regrPlotP"))),## fluidRow
fluidRow( box(width=6,collapsible = TRUE,title = "Data",plotOutput("regrPlotD")),
box(width=6,collapsible = TRUE,title = "Conditional distribution",plotOutput("regrCond")))
) ## tabItem
)
)
) # ui
D<-NULL ## Univariate dataset
E<-NULL ## Bivariate eigenvalue matrix
server<-function(input, output,session) {
set.seed(122)
f<-function(x,ord){
f<-numeric(length(x))
if (ord==-1)
f<-sin(x)
if (ord==-2)
f<-cos(2*x)
if (ord==-3)
f<-cos(4*x)
if (ord==1)
f<-x
if (ord==2)
f<-x^2-2
if (ord==3)
f<--x^2+1
f
}
output$biPlotP <- renderPlot({
x <- seq(-BOUND2, BOUND2, by= .2)
y <- x
z<-array(0,dim=c(length(x),length(y)))
#th : rotation angle of the first principal axis
#ax1: length principal axis 1
#ax2: length principal axis 2
ax1<-input$ax11
th=input$rot1
ax2<-input$ax21
Rot<-array(c(cos(th), -sin(th), sin(th), cos(th)),dim=c(2,2)); #rotation matrix
A<-array(c(ax1, 0, 0, ax2),dim=c(2,2))
Sigma<-(Rot%*%A)%*%t(Rot)
E<<-eigen(Sigma)
for (i in 1:length(x)){
for (j in 1:length(y)){
z[i,j]<-dmvnorm(c(x[i],y[j]),sigma=Sigma)
}
}
z[is.na(z)] <- 1
op <- par(bg = "white")
prob.z<-z
surface<-persp3D(x, y, prob.z, theta = input$tdt, phi = input$tdp, expand = 0.5, col = "blue",facets=FALSE)
lines (trans3d(x=input$x, y = seq(-BOUND2, BOUND2, by= .2), z = 0, pmat = surface), col = "red",lwd=3)
})
output$biPlotD <- renderPlot( {
th=input$rot1
Rot<-array(c(cos(th), -sin(th), sin(th), cos(th)),dim=c(2,2)); #rotation matrix
A<-array(c(input$ax11, 0, 0, input$ax21),dim=c(2,2))
Sigma<-(Rot%*%A)%*%t(Rot)
D1=rmvnorm(input$N,sigma=Sigma)
D<<-D1
plot(D[,1],D[,2],xlim=c(-BOUND2,BOUND2),ylim=c(-BOUND2,BOUND2))
lines(ellipse(Sigma))
abline(v=input$x, col = "red",lwd=3)
})
output$biCond <- renderPlot( {
th=input$rot1
Rot<-array(c(cos(th), -sin(th), sin(th), cos(th)),dim=c(2,2)); #rotation matrix
A<-array(c(input$ax11, 0, 0, input$ax21),dim=c(2,2))
Sigma<-(Rot%*%A)%*%t(Rot)
sigma2=sqrt(Sigma[2,2])
sigma1=sqrt(Sigma[1,1])
rho=Sigma[1,2]/(sigma1*sigma2)
x=seq(-1.5*BOUND2, 1.5*BOUND2, by= .02)
plot(x,dnorm(x,rho*sigma2*(input$x)/sigma1,sd=sigma2^2*(1-rho^2)),type="l",col="red",
lwd=2,ylab="Conditional density")
lines(x,dnorm(x,0,sd=sigma2^2))
legend(x=BOUND2,y=1,legend=c("Conditional","Marginal"),lty=1,col=c("red","black"))
})
output$textB <- renderText({
input$rot
input$ax1
input$ax2
paste("Eigen1=", E$values[1], "\n Eigen2=", E$values[2])
})
output$regrPlotP <- renderPlot({
x <- seq(-BOUND2, BOUND2, by= input$dx)
y <- seq(-BOUND2, BOUND2, by= input$dy)
z<-array(0,dim=c(length(x),length(y)))
#th : rotation angle of the first principal axis
#ax1: length principal axis 1
#ax2: length principal axis 2
muy<-f(x,ord=input$ord)
for (i in 1:length(x)){
for (j in 1:length(y)){
z[i,j]<-dnorm(y[j],mean=muy[i],sd=input$sdw)
}
}
z[is.na(z)] <- 1
op <- par(bg = "white")
prob.z<-z
surface<-persp3D(x, y, prob.z, theta = input$tdt, phi =input$tdp, expand = 0.5, col = "blue",facets=FALSE)
lines (trans3D(x=input$rx, y = seq(-BOUND2, BOUND2, by= .2), z = 0, pmat = surface), col = "red",lwd=3)
})
output$regrPlotD <- renderPlot( {
X=seq(-BOUND2, BOUND2,length.out=input$N)
muy=f(X,ord=input$ord)
Y=muy+rnorm(input$N,sd=input$sdw)
D<<-cbind(X,Y)
plot(D[,1],D[,2],xlim=c(-BOUND2,BOUND2),ylim=c(-BOUND2,BOUND2))
lines(D[,1],muy)
abline(v=input$rx, col = "red",lwd=3)
})
output$regrCond <- renderPlot( {
th=input$rot1
muy=f(input$rx,input$ord)
x=seq(-1.5*BOUND2, 1.5*BOUND2, by= .02)
plot(x,dnorm(x,mean=muy,sd=input$sdw),type="l",col="red",lwd=2,ylab="Conditional density")
})
}
shinyApp(ui, server)
|
library(testthat)
library(onlineFDR)
test_check("onlineFDR")
| /tests/testthat.R | no_license | dsrobertson/onlineFDR | R | false | false | 62 | r | library(testthat)
library(onlineFDR)
test_check("onlineFDR")
|
# @file Plots.R
#
# Copyright 2020 Observational Health Data Sciences and Informatics
#
# This file is part of VAP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Plot the outcome incidence over time
#'
#' @details
#' This creates a survival plot that can be used to pick a suitable time-at-risk period
#'
#' @param cohortDatabaseSchema Schema name where intermediate data can be stored. You will need to have
#' write priviliges in this schema. Note that for SQL Server, this should
#' include both the database and schema name, for example 'cdm_data.dbo'.
#' @param cohortTable The name of the table that will be created in the work database schema.
#' This table will hold the exposure and outcome cohorts used in this
#' study.
#' @param cohortId The number of cohort id
#' @param conceptSets Concept set json files created from ATLAS
#' @param drugExposureData The covariate data of drug exposure table obtaining by FeatureExtraction package
#' @param sequenceData The data reformatted by drug exposure date for visualization
#' @param outputFolder Name of local folder to place results; make sure to use forward slashes
#' (/)
#' @param StartDays The first date from index date (cohort start date) for investigating drug sequence
#' @param EndDays The last date from index date (cohort start date) for investigating drug sequence
#' @param pathLevel Level of pathway depth
#'
#' @import dplyr
#' @import plotly
#' @return
#' TRUE if it ran
#'
#' @export
PlotTxPathway <- function(cohortDatabaseSchema,
cohortTable,
cohortId,
conceptSets,
drugExposureData,
sequenceData,
outputFolder,
StartDays = 0,
EndDays = 365,
pathLevel = 2){
saveFolder <- file.path(outputFolder, "plots")
if (!file.exists(saveFolder))
dir.create(saveFolder, recursive = TRUE)
freqBarPlot <- freqBarPlot(cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
cohortId = cohortId,
conceptSets = conceptSets,
drugExposureData = drugExposureData)
htmlwidgets::saveWidget(freqBarPlot, file = file.path(saveFolder, "freqBarPlot.html"))
# Not use in VAP cohort
# longitudinalPlot <- longitudinalPlot(cohortDatabaseSchema,
# cohortTable,
# cohortId,
# conceptSets,
# drugExposureData,
# StartDays = 0,
# EndDays = 365)
# export(longitudinalPlot, file = file.path(saveFolder, "longitudinalPlot.png"))
dailyPlot <- dailyPlot(cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
cohortId = cohortId,
conceptSets = conceptSets,
drugExposureData = drugExposureData)
htmlwidgets::saveWidget(dailyPlot, file = file.path(saveFolder, "dailyPlot.html"))
dailyGroupPlot <- dailyGroupPlot(cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
cohortId = cohortId,
conceptSets = conceptSets,
drugExposureData = drugExposureData)
htmlwidgets::saveWidget(dailyGroupPlot, file = file.path(saveFolder, "dailyGroupPlot.html"))
sunburstPlot <- sunburstPlot(sequenceData = sequenceData,
pathLevel = pathLevel)
htmlwidgets::saveWidget(sunburstPlot, file = file.path(saveFolder, "sunburst.html"))
sankeyPlot <- sankeyPlot(sequenceData = sequenceData,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
cohortId = cohortId,
pathLevel = pathLevel)
htmlwidgets::saveWidget(sankeyPlot, file = file.path(saveFolder, "sankey.html"))
}
freqBarPlot <- function(cohortDatabaseSchema,
cohortTable,
cohortId,
conceptSets,
drugExposureData){
N <- totalN(connectionDetails = connectionDetails,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
cohortId = cohortId)
drugTable1 <- drugExposureData %>%
group_by(CONCEPT_NAME) %>%
summarise(records = n(),
person = n_distinct(SUBJECT_ID),
percentile = round(person/N*100,2)) %>% group_by()
drugTable1 <- as.data.frame(drugTable1)
xtitle <- list(title = "Antibiotics")
ytitle <- list(title = "Proportion of patient having prescription of antibiotics (%)")
Plot <- plotly::plot_ly(drugTable1, x = ~CONCEPT_NAME, y = ~percentile,
hoverinfo = 'y', type = "bar", name = 'percentile', text = drugTable1$percentile, textposition = 'outside') %>%
plotly::layout(xaxis = xtitle, yaxis = ytitle)
return(Plot)
}
longitudinalPlot <- function(cohortDatabaseSchema,
cohortTable,
cohortId,
conceptSets,
drugExposureData,
StartDays = 0,
EndDays = 365){
drugExposureDataT <- transform(drugExposureData,
timePeriod = cut(time, breaks = c(seq(StartDays,EndDays, by = 365)),
right = T,
labels = c(1:(length(seq(StartDays, EndDays, by = 365))-1))-0.5))
periodN <- drugExposureDataT %>% group_by(timePeriod) %>% summarise(n = n_distinct(SUBJECT_ID)) %>% group_by()
drugExposureFilteredT <- drugExposureDataT %>% filter(conceptId %in% unlist(conceptSets))
conceptList <- data.frame(setNum = NULL, conceptSetName = NULL, conceptId = NULL)
for(i in 1:length(conceptSets)){
conceptList <- rbind(conceptList,
data.frame(setNum = i,
conceptSetName = names(conceptSets[i]),
conceptId = conceptSets[[i]]))
}
drugExposureT <- merge(drugExposureFilteredT,
conceptList,
by = "conceptId",
all.x = T)
N <- totalN(cohortDatabaseSchema, cohortTable, cohortId)
drugTable1 <- drugExposureT %>%
group_by(conceptSetName, timePeriod) %>%
summarise(records = n(),
person = n_distinct(SUBJECT_ID),
percentile = round(person/N*100,2)) %>% group_by()
drugTable1 <- as.data.frame(drugTable1)
drugTable1 <- merge(drugTable1, periodN, by = "timePeriod", all.x = T) %>%
mutate(period_percentile = round(person/n*100,2))
Plot <- plotly::plot_ly(drugTable1, x = ~timePeriod, y = ~period_percentile,
color = ~conceptSetName, type = 'scatter', mode = 'lines+markers') %>%
layout(xaxis = list(title = "Follow-up time",
ticktext = seq(0, (EndDays-StartDays)/365)),
yaxis = list(title = "Percentage",
ticktext = seq(0, 100, by = 20)))
return(Plot)
}
dailyPlot <-function(cohortDatabaseSchema,
cohortTable,
cohortId,
conceptSets,
drugExposureData){
#
# conceptList <- data.frame(setNum = NULL, conceptSetName = NULL, conceptId = NULL)
#
# for(i in 1:length(conceptSets)){
#
# conceptList <- rbind(conceptList,
# data.frame(setNum = i,
# conceptSetName = names(conceptSets[i]),
# conceptId = conceptSets[[i]]))
# }
drugExposure <- drugExposureData
periodN <- drugExposure %>% group_by(timeFirstId) %>% summarise(n = n_distinct(SUBJECT_ID)) %>% group_by()
N <- totalN(connectionDetails, cohortDatabaseSchema, cohortTable, cohortId)
drugTable1 <- drugExposure %>%
group_by(CONCEPT_NAME, timeFirstId) %>%
summarise(records = n(),
person = n_distinct(SUBJECT_ID),
percentile = round(person/N*100,2)) %>% group_by()
drugTable1 <- as.data.frame(drugTable1)
drugTable1 <- merge(drugTable1, periodN, by= "timeFirstId", all.x = T) %>%
mutate(period_percentile = round(person/n*100,2))
Plot <- plotly::plot_ly(drugTable1, x = ~timeFirstId, y = ~period_percentile,
color = ~CONCEPT_NAME, type = 'scatter', mode = 'lines+markers') %>%
plotly::layout(xaxis = list(title = "Follow-up time"),
yaxis = list(title = "Percentage",
ticktext = seq(0, 100, by = 20)))
return(Plot)
}
dailyGroupPlot <-function(cohortDatabaseSchema,
cohortTable,
cohortId,
conceptSets,
drugExposureData){
pathToCsv <- system.file("settings", "drugClass.csv", package = "VAP")
drugClass <- read.csv(pathToCsv)
drugExposure <- merge(drugExposureData, drugClass, by.x = "CONCEPT_NAME", by.y = "conceptSetName")
periodN <- drugExposure %>% group_by(timeFirstId) %>% summarise(n = n_distinct(SUBJECT_ID)) %>% group_by()
N <- totalN(connectionDetails, cohortDatabaseSchema, cohortTable, cohortId)
drugTable1 <- drugExposure %>%
group_by(drugClass, timeFirstId) %>%
summarise(records = n(),
person = n_distinct(SUBJECT_ID),
percentile = round(person/N*100,2)) %>% group_by()
drugTable1 <- as.data.frame(drugTable1)
drugTable1 <- merge(drugTable1, periodN, by= "timeFirstId", all.x = T) %>%
mutate(period_percentile = round(person/n*100,2))
Plot <- plotly::plot_ly(drugTable1, x = ~timeFirstId, y = ~period_percentile,
color = ~drugClass, type = 'scatter', mode = 'lines+markers') %>%
plotly::layout(xaxis = list(title = "Follow-up time"),
yaxis = list(title = "Percentage",
ticktext = seq(0, 100, by = 20)))
return(Plot)
}
sunburstPlot <- function(sequenceData, pathLevel){
sequenceData <- as.data.frame(sequenceData %>%
group_by_at(vars(c(-INDEX_YEAR, -NUM_PERSONS))) %>%
summarise(NUM_PERSONS = sum(NUM_PERSONS)) %>% group_by())
sequenceCollapse <- do.call(paste, c(sequenceData[,1:(1+pathLevel-1)], sep = "-"))
sequenceCollapse <- data.frame(pathway = sequenceCollapse, NUM_PERSONS = sequenceData$NUM_PERSONS)
sequenceCollapse <- sequenceCollapse %>%
group_by(pathway) %>%
summarise(sum = sum(NUM_PERSONS)) %>%
group_by()
sequenceCollapse$pathway <- as.character(sequenceCollapse$pathway)
sequenceCollapse$pathway <- paste0(stringr::str_split(sequenceCollapse$pathway,
pattern = "-NA", simplify = T)[,1], "-end")
Plot <- sunburstR::sund2b(sequenceCollapse)
#Plot <- sunburstR::sunburst(sequenceCollapse)
return(Plot)
}
sankeyPlot <- function(sequenceData,
cohortDatabaseSchema,
cohortTable,
cohortId,
pathLevel){
sequenceData <- as.data.frame(sequenceData %>%
group_by_at(vars(c(-INDEX_YEAR, -NUM_PERSONS))) %>%
summarise(NUM_PERSONS = sum(NUM_PERSONS)) %>% group_by())
#nodeLink data for sankeyPlot
if(pathLevel > 20 | pathLevel < 1) cat("pathLevel must be between 1 and 20")
label <- vector()
name <- vector()
for (i in 1:pathLevel){
if(length(as.factor(sequenceData[,i][!is.na(sequenceData[,i])]))!= 0){
label <- c(label, paste0(levels(as.factor(sequenceData[,i][!is.na(sequenceData[,i])])), "_", i))
name <- c(name,levels(as.factor(sequenceData[,i][!is.na(sequenceData[,i])])))
}
}
n <- totalN(connectionDetails = connectionDetails,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
cohortId = cohortId)
node <- data.frame(name = name, label = label)
node$label <- as.character(node$label)
for (i in 1:pathLevel){
if(length(as.factor(sequenceData[,i][!is.na(sequenceData[,i])]))!=0){
pct <- data.frame(concept_name = paste0(sequenceData[,as.integer(i)], "_", i), NUM_PERSONS=sequenceData[,21])
if(!"pctTable" %in% ls()){
pctTable<-as.data.frame(pct %>%
group_by(concept_name) %>%
summarise(personCount = sum(NUM_PERSONS),
percent = round(sum(NUM_PERSONS)/n*100,2)) %>% group_by() )
}else{pctTable <- rbind(pctTable,
as.data.frame(pct %>%
group_by(concept_name) %>%
summarise(personCount = sum(NUM_PERSONS),
percent = round(sum(NUM_PERSONS)/n*100,2)) %>% group_by()))}
}
}
node <- merge(node, pctTable, by.x = "label", by.y = "concept_name", all.x = T)
node$label_2 <- as.factor(paste0(node$name, " (n=", node$personCount, ",", node$percent, "%)"))
color <- data.frame(name = levels(node$name), color = rainbow(length(levels(node$name))))
node <- merge(node, color, by = "name", all.x = T)
for (i in 1:pathLevel){
if(i == 1){
link <- data.frame(source = paste0(sequenceData[,as.integer(i)], "_", i),
target = paste0(sequenceData[,as.integer(i+1)], "_", i+1), NUM_PERSONS=sequenceData[,21])
link <- as.data.frame(link %>%
group_by(source, target) %>%
summarise(value = sum(NUM_PERSONS)) %>% group_by())
}else{
link2 <- as.data.frame(data.frame(source = paste0(sequenceData[,as.integer(i)], "_", i),
target = paste0(sequenceData[,as.integer(i+1)], "_", i+1), NUM_PERSONS=sequenceData[,21]) %>%
group_by(source, target) %>%
summarise(value = sum(NUM_PERSONS)) %>% group_by())
link <- rbind(link, link2)
}
}
link$source <- match(link$source, node$label) -1
link$target <- match(link$target, node$label) -1
Plot <- plotly::plot_ly(type = "sankey",
orientation = "c",
alpha = 0.5,
node = list(label = node$label_2,
pad = 15,
thickness = 15,
x = rep(0.2, length(node$label_2)),
color = node$color),
link = list(source = link$source, target = link$target, value = link$value)
)
return(Plot)
}
| /VAP/R/Plots.R | no_license | ABMI/PneumoniaTxPath | R | false | false | 16,169 | r | # @file Plots.R
#
# Copyright 2020 Observational Health Data Sciences and Informatics
#
# This file is part of VAP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' Plot the outcome incidence over time
#'
#' @details
#' This creates a survival plot that can be used to pick a suitable time-at-risk period
#'
#' @param cohortDatabaseSchema Schema name where intermediate data can be stored. You will need to have
#' write priviliges in this schema. Note that for SQL Server, this should
#' include both the database and schema name, for example 'cdm_data.dbo'.
#' @param cohortTable The name of the table that will be created in the work database schema.
#' This table will hold the exposure and outcome cohorts used in this
#' study.
#' @param cohortId The number of cohort id
#' @param conceptSets Concept set json files created from ATLAS
#' @param drugExposureData The covariate data of drug exposure table obtaining by FeatureExtraction package
#' @param sequenceData The data reformatted by drug exposure date for visualization
#' @param outputFolder Name of local folder to place results; make sure to use forward slashes
#' (/)
#' @param StartDays The first date from index date (cohort start date) for investigating drug sequence
#' @param EndDays The last date from index date (cohort start date) for investigating drug sequence
#' @param pathLevel Level of pathway depth
#'
#' @import dplyr
#' @import plotly
#' @return
#' TRUE if it ran
#'
#' @export
PlotTxPathway <- function(cohortDatabaseSchema,
cohortTable,
cohortId,
conceptSets,
drugExposureData,
sequenceData,
outputFolder,
StartDays = 0,
EndDays = 365,
pathLevel = 2){
saveFolder <- file.path(outputFolder, "plots")
if (!file.exists(saveFolder))
dir.create(saveFolder, recursive = TRUE)
freqBarPlot <- freqBarPlot(cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
cohortId = cohortId,
conceptSets = conceptSets,
drugExposureData = drugExposureData)
htmlwidgets::saveWidget(freqBarPlot, file = file.path(saveFolder, "freqBarPlot.html"))
# Not use in VAP cohort
# longitudinalPlot <- longitudinalPlot(cohortDatabaseSchema,
# cohortTable,
# cohortId,
# conceptSets,
# drugExposureData,
# StartDays = 0,
# EndDays = 365)
# export(longitudinalPlot, file = file.path(saveFolder, "longitudinalPlot.png"))
dailyPlot <- dailyPlot(cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
cohortId = cohortId,
conceptSets = conceptSets,
drugExposureData = drugExposureData)
htmlwidgets::saveWidget(dailyPlot, file = file.path(saveFolder, "dailyPlot.html"))
dailyGroupPlot <- dailyGroupPlot(cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
cohortId = cohortId,
conceptSets = conceptSets,
drugExposureData = drugExposureData)
htmlwidgets::saveWidget(dailyGroupPlot, file = file.path(saveFolder, "dailyGroupPlot.html"))
sunburstPlot <- sunburstPlot(sequenceData = sequenceData,
pathLevel = pathLevel)
htmlwidgets::saveWidget(sunburstPlot, file = file.path(saveFolder, "sunburst.html"))
sankeyPlot <- sankeyPlot(sequenceData = sequenceData,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
cohortId = cohortId,
pathLevel = pathLevel)
htmlwidgets::saveWidget(sankeyPlot, file = file.path(saveFolder, "sankey.html"))
}
freqBarPlot <- function(cohortDatabaseSchema,
cohortTable,
cohortId,
conceptSets,
drugExposureData){
N <- totalN(connectionDetails = connectionDetails,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
cohortId = cohortId)
drugTable1 <- drugExposureData %>%
group_by(CONCEPT_NAME) %>%
summarise(records = n(),
person = n_distinct(SUBJECT_ID),
percentile = round(person/N*100,2)) %>% group_by()
drugTable1 <- as.data.frame(drugTable1)
xtitle <- list(title = "Antibiotics")
ytitle <- list(title = "Proportion of patient having prescription of antibiotics (%)")
Plot <- plotly::plot_ly(drugTable1, x = ~CONCEPT_NAME, y = ~percentile,
hoverinfo = 'y', type = "bar", name = 'percentile', text = drugTable1$percentile, textposition = 'outside') %>%
plotly::layout(xaxis = xtitle, yaxis = ytitle)
return(Plot)
}
longitudinalPlot <- function(cohortDatabaseSchema,
cohortTable,
cohortId,
conceptSets,
drugExposureData,
StartDays = 0,
EndDays = 365){
drugExposureDataT <- transform(drugExposureData,
timePeriod = cut(time, breaks = c(seq(StartDays,EndDays, by = 365)),
right = T,
labels = c(1:(length(seq(StartDays, EndDays, by = 365))-1))-0.5))
periodN <- drugExposureDataT %>% group_by(timePeriod) %>% summarise(n = n_distinct(SUBJECT_ID)) %>% group_by()
drugExposureFilteredT <- drugExposureDataT %>% filter(conceptId %in% unlist(conceptSets))
conceptList <- data.frame(setNum = NULL, conceptSetName = NULL, conceptId = NULL)
for(i in 1:length(conceptSets)){
conceptList <- rbind(conceptList,
data.frame(setNum = i,
conceptSetName = names(conceptSets[i]),
conceptId = conceptSets[[i]]))
}
drugExposureT <- merge(drugExposureFilteredT,
conceptList,
by = "conceptId",
all.x = T)
N <- totalN(cohortDatabaseSchema, cohortTable, cohortId)
drugTable1 <- drugExposureT %>%
group_by(conceptSetName, timePeriod) %>%
summarise(records = n(),
person = n_distinct(SUBJECT_ID),
percentile = round(person/N*100,2)) %>% group_by()
drugTable1 <- as.data.frame(drugTable1)
drugTable1 <- merge(drugTable1, periodN, by = "timePeriod", all.x = T) %>%
mutate(period_percentile = round(person/n*100,2))
Plot <- plotly::plot_ly(drugTable1, x = ~timePeriod, y = ~period_percentile,
color = ~conceptSetName, type = 'scatter', mode = 'lines+markers') %>%
layout(xaxis = list(title = "Follow-up time",
ticktext = seq(0, (EndDays-StartDays)/365)),
yaxis = list(title = "Percentage",
ticktext = seq(0, 100, by = 20)))
return(Plot)
}
dailyPlot <-function(cohortDatabaseSchema,
cohortTable,
cohortId,
conceptSets,
drugExposureData){
#
# conceptList <- data.frame(setNum = NULL, conceptSetName = NULL, conceptId = NULL)
#
# for(i in 1:length(conceptSets)){
#
# conceptList <- rbind(conceptList,
# data.frame(setNum = i,
# conceptSetName = names(conceptSets[i]),
# conceptId = conceptSets[[i]]))
# }
drugExposure <- drugExposureData
periodN <- drugExposure %>% group_by(timeFirstId) %>% summarise(n = n_distinct(SUBJECT_ID)) %>% group_by()
N <- totalN(connectionDetails, cohortDatabaseSchema, cohortTable, cohortId)
drugTable1 <- drugExposure %>%
group_by(CONCEPT_NAME, timeFirstId) %>%
summarise(records = n(),
person = n_distinct(SUBJECT_ID),
percentile = round(person/N*100,2)) %>% group_by()
drugTable1 <- as.data.frame(drugTable1)
drugTable1 <- merge(drugTable1, periodN, by= "timeFirstId", all.x = T) %>%
mutate(period_percentile = round(person/n*100,2))
Plot <- plotly::plot_ly(drugTable1, x = ~timeFirstId, y = ~period_percentile,
color = ~CONCEPT_NAME, type = 'scatter', mode = 'lines+markers') %>%
plotly::layout(xaxis = list(title = "Follow-up time"),
yaxis = list(title = "Percentage",
ticktext = seq(0, 100, by = 20)))
return(Plot)
}
dailyGroupPlot <-function(cohortDatabaseSchema,
cohortTable,
cohortId,
conceptSets,
drugExposureData){
pathToCsv <- system.file("settings", "drugClass.csv", package = "VAP")
drugClass <- read.csv(pathToCsv)
drugExposure <- merge(drugExposureData, drugClass, by.x = "CONCEPT_NAME", by.y = "conceptSetName")
periodN <- drugExposure %>% group_by(timeFirstId) %>% summarise(n = n_distinct(SUBJECT_ID)) %>% group_by()
N <- totalN(connectionDetails, cohortDatabaseSchema, cohortTable, cohortId)
drugTable1 <- drugExposure %>%
group_by(drugClass, timeFirstId) %>%
summarise(records = n(),
person = n_distinct(SUBJECT_ID),
percentile = round(person/N*100,2)) %>% group_by()
drugTable1 <- as.data.frame(drugTable1)
drugTable1 <- merge(drugTable1, periodN, by= "timeFirstId", all.x = T) %>%
mutate(period_percentile = round(person/n*100,2))
Plot <- plotly::plot_ly(drugTable1, x = ~timeFirstId, y = ~period_percentile,
color = ~drugClass, type = 'scatter', mode = 'lines+markers') %>%
plotly::layout(xaxis = list(title = "Follow-up time"),
yaxis = list(title = "Percentage",
ticktext = seq(0, 100, by = 20)))
return(Plot)
}
sunburstPlot <- function(sequenceData, pathLevel){
sequenceData <- as.data.frame(sequenceData %>%
group_by_at(vars(c(-INDEX_YEAR, -NUM_PERSONS))) %>%
summarise(NUM_PERSONS = sum(NUM_PERSONS)) %>% group_by())
sequenceCollapse <- do.call(paste, c(sequenceData[,1:(1+pathLevel-1)], sep = "-"))
sequenceCollapse <- data.frame(pathway = sequenceCollapse, NUM_PERSONS = sequenceData$NUM_PERSONS)
sequenceCollapse <- sequenceCollapse %>%
group_by(pathway) %>%
summarise(sum = sum(NUM_PERSONS)) %>%
group_by()
sequenceCollapse$pathway <- as.character(sequenceCollapse$pathway)
sequenceCollapse$pathway <- paste0(stringr::str_split(sequenceCollapse$pathway,
pattern = "-NA", simplify = T)[,1], "-end")
Plot <- sunburstR::sund2b(sequenceCollapse)
#Plot <- sunburstR::sunburst(sequenceCollapse)
return(Plot)
}
sankeyPlot <- function(sequenceData,
cohortDatabaseSchema,
cohortTable,
cohortId,
pathLevel){
sequenceData <- as.data.frame(sequenceData %>%
group_by_at(vars(c(-INDEX_YEAR, -NUM_PERSONS))) %>%
summarise(NUM_PERSONS = sum(NUM_PERSONS)) %>% group_by())
#nodeLink data for sankeyPlot
if(pathLevel > 20 | pathLevel < 1) cat("pathLevel must be between 1 and 20")
label <- vector()
name <- vector()
for (i in 1:pathLevel){
if(length(as.factor(sequenceData[,i][!is.na(sequenceData[,i])]))!= 0){
label <- c(label, paste0(levels(as.factor(sequenceData[,i][!is.na(sequenceData[,i])])), "_", i))
name <- c(name,levels(as.factor(sequenceData[,i][!is.na(sequenceData[,i])])))
}
}
n <- totalN(connectionDetails = connectionDetails,
cohortDatabaseSchema = cohortDatabaseSchema,
cohortTable = cohortTable,
cohortId = cohortId)
node <- data.frame(name = name, label = label)
node$label <- as.character(node$label)
for (i in 1:pathLevel){
if(length(as.factor(sequenceData[,i][!is.na(sequenceData[,i])]))!=0){
pct <- data.frame(concept_name = paste0(sequenceData[,as.integer(i)], "_", i), NUM_PERSONS=sequenceData[,21])
if(!"pctTable" %in% ls()){
pctTable<-as.data.frame(pct %>%
group_by(concept_name) %>%
summarise(personCount = sum(NUM_PERSONS),
percent = round(sum(NUM_PERSONS)/n*100,2)) %>% group_by() )
}else{pctTable <- rbind(pctTable,
as.data.frame(pct %>%
group_by(concept_name) %>%
summarise(personCount = sum(NUM_PERSONS),
percent = round(sum(NUM_PERSONS)/n*100,2)) %>% group_by()))}
}
}
node <- merge(node, pctTable, by.x = "label", by.y = "concept_name", all.x = T)
node$label_2 <- as.factor(paste0(node$name, " (n=", node$personCount, ",", node$percent, "%)"))
color <- data.frame(name = levels(node$name), color = rainbow(length(levels(node$name))))
node <- merge(node, color, by = "name", all.x = T)
for (i in 1:pathLevel){
if(i == 1){
link <- data.frame(source = paste0(sequenceData[,as.integer(i)], "_", i),
target = paste0(sequenceData[,as.integer(i+1)], "_", i+1), NUM_PERSONS=sequenceData[,21])
link <- as.data.frame(link %>%
group_by(source, target) %>%
summarise(value = sum(NUM_PERSONS)) %>% group_by())
}else{
link2 <- as.data.frame(data.frame(source = paste0(sequenceData[,as.integer(i)], "_", i),
target = paste0(sequenceData[,as.integer(i+1)], "_", i+1), NUM_PERSONS=sequenceData[,21]) %>%
group_by(source, target) %>%
summarise(value = sum(NUM_PERSONS)) %>% group_by())
link <- rbind(link, link2)
}
}
link$source <- match(link$source, node$label) -1
link$target <- match(link$target, node$label) -1
Plot <- plotly::plot_ly(type = "sankey",
orientation = "c",
alpha = 0.5,
node = list(label = node$label_2,
pad = 15,
thickness = 15,
x = rep(0.2, length(node$label_2)),
color = node$color),
link = list(source = link$source, target = link$target, value = link$value)
)
return(Plot)
}
|
# Package hooks
#
# Author: renaud
# Creation: 26 Jun 2012
###############################################################################
#' @include utils.R
#' @include devutils.R
#' @import stats
#' @import methods
NULL
#' Default Load/Unload Functions
#'
#' @inheritParams base::.onLoad
#' @inheritParams base::library.dynam
#'
#' @export
#' @rdname load
#'
#' @examples
#'
#' # in a package namespace:
#' .onLoad <- function(libname=NULL, pkgname){
#'
#' pkgmaker::onLoad(libname, pkgname)
#'
#' }
onLoad <- function(libname=NULL, pkgname, chname=packageName()){
# load compiled library normally or in devmode
if( !is.null(libname) ){
if( file.exists(packagePath('libs')) ){
sapply(chname, library.dynam, package=pkgname, lib.loc=libname)
}
}else{
compile_src() # compile source files and load
}
}
#' @inheritParams base::.onUnload
#' @export
#' @rdname load
#'
#' @examples
#'
#' # in a package namespace:
#' .onUnload <- function(libpath){
#'
#' pkgmaker::onUnload(libpath)
#'
#' }
onUnload <- function(libpath) {
# unload compiled library normally or in devmode
dlls <- base::getLoadedDLLs()
pname <- packageName()
if ( pname %in% names(dlls) ){
if( !missing(libpath) ) library.dynam.unload(pname, libpath)
else dyn.unload(dlls[[pname]][['path']])
}
}
#' Postponing Actions
#'
#' This function implement a mechanism to postpone actions, which can be executed
#' at a later stage.
#' This is useful when developing packages, where actions that need to be run in the
#' \code{link{.onLoad}} function but can be defined close to their context.
#'
#' @param expr expression that define the action to postpone.
#' Currently only functions are supported.
#' @param key identifier for this specific action.
#' It should be unique across the postponed actions from the same group.
#' @param group optional parent action group.
#' This enables to define meaningful sets of actions that can be run all at once.
#' @param envir environment in which the action should be executed.
#' Currently not used.
#' @param verbose logical that toggles verbose messages.
#'
#' @import digest
#' @export
#'
#' @examples
#' opt <- options(verbose=2)
#'
#' # define actions
#' postponeAction(function(){print(10)}, "print")
#' postponeAction(function(){print(1:10)}, "more")
#' postponeAction()
#' # execute actions
#' runPostponedAction()
#' runPostponedAction()
#'
#' # restore options
#' options(opt)
#'
postponeAction <- function(expr, key=digest(tempfile()), group=NULL, envir=topns(strict=FALSE), verbose=getOption('verbose')){
# do not do anything if already running delayed actions
if( isRunningPostponedAction() ) return()
ns <- topns(strict=FALSE)
taskObj <- simpleRegistry('.__delayedTasks__', envir=ns)
if( !missing(expr) ){
if( missing(key) ){
stop("Missing required argument `key` for registering/cancelling delayed action.")
}
# add group prefix
if( !is.null(group) )
key <- str_c(group, '::', key)
#qe <- if( !is.language(expr) ) substitute(expr) else expr
qe <- expr
if( verbose ){
if( !is.null(qe) ) message("# Postponing action '", key, "'")
else{
message("# Cancelling postponed action '", key, "'")
}
}
taskObj$set(key, list(action=qe, envir=envir))
}else{
taskObj$names()
}
}
#' @rdname postponeAction
#' @export
runPostponedAction <- function(group=NULL, verbose=getOption('verbose')){
ns <- topns(strict=FALSE)
taskObj <- simpleRegistry('.__delayedTasks__', envir=ns)
if( verbose ){
message("# Executing postponed "
, if( !is.null(group) ) paste("'", group, "' ", sep='')
, "action(s) in package '"
, packageName(ns, .Global=TRUE), "' ... "
, appendLF = FALSE)
}
# set up running flag
isRunningPostponedAction(TRUE)
on.exit(isRunningPostponedAction(FALSE))
#
# execute actions
t <- taskObj$names()
if( !is.null(group) ) t <- grep(str_c("^", group), t, value=TRUE)
if( verbose > 1 && length(t) ) message()
sapply(t, function(x){
act <- taskObj$get(x)
if( verbose > 1 ){
message("** Action '", x, "' [", packageName(act$envir, .Global=TRUE), ']')
}
act$action()
taskObj$set(x, NULL)
#eval(x$expr, x$envir)
})
if( verbose ) message('OK [', length(t), ']')
invisible(length(t))
}
# Tells if one is executing deferred tasks via \code{onLoad}
isRunningPostponedAction <- sVariable(FALSE)
#' Simple Package Registry
#'
#' @param name name of the registry object, with which it will
#' be assigned in \code{envir}.
#' @param envir environment where to store the registry object.
#' Defaults to the caller's top environment.
#' @param verbose logical that toggle a verbose message when
#' the object is first created.
#'
#' @export
simpleRegistry <- function(name, envir=topenv(parent.frame()), verbose=FALSE){
# return stored instance if it exists
if( exists(name, envir=envir) ){
return( invisible(get(name, envir=envir)) )
}
if( verbose ) message("# Setup simple registry '", name, "' in ", packageName(envir, .Global=TRUE))
.name <- name
.envir <- envir
.data <- list()
.get <- function(x){
if( .has(x) ){
.data[[x]]
}
}
.set <- function(x, value){
if( is.null(value) ){
if( .has(x) ){
.data[[x]] <<- NULL
}
}else{
.data[[x]] <<- value
}
}
.has <- function(x){
x %in% names(.data)
}
.cleanup <- function(){
rm(.name, envir=.envir)
}
.names <- function(){
names(.data)
}
.length <- function(){
length(.data)
}
.obj <- list(get=.get, set=.set, has=.has
, cleanup=.cleanup, names=.names
, length = .length)
# assign container object
assign(.name, .obj, envir=.envir)
#
invisible(.obj)
}
#' Defunct Functions in pkgmaker
#'
#' These functions have been defunct or superseded by other
#' functions.
#'
#' @param ... extra arguments
#'
#' @rdname pkgmaker-defunct
#' @name pkgmaker-defunct
NULL | /pkgmaker/R/package.R | no_license | ingted/R-Examples | R | false | false | 5,856 | r | # Package hooks
#
# Author: renaud
# Creation: 26 Jun 2012
###############################################################################
#' @include utils.R
#' @include devutils.R
#' @import stats
#' @import methods
NULL
#' Default Load/Unload Functions
#'
#' @inheritParams base::.onLoad
#' @inheritParams base::library.dynam
#'
#' @export
#' @rdname load
#'
#' @examples
#'
#' # in a package namespace:
#' .onLoad <- function(libname=NULL, pkgname){
#'
#' pkgmaker::onLoad(libname, pkgname)
#'
#' }
onLoad <- function(libname=NULL, pkgname, chname=packageName()){
# load compiled library normally or in devmode
if( !is.null(libname) ){
if( file.exists(packagePath('libs')) ){
sapply(chname, library.dynam, package=pkgname, lib.loc=libname)
}
}else{
compile_src() # compile source files and load
}
}
#' @inheritParams base::.onUnload
#' @export
#' @rdname load
#'
#' @examples
#'
#' # in a package namespace:
#' .onUnload <- function(libpath){
#'
#' pkgmaker::onUnload(libpath)
#'
#' }
onUnload <- function(libpath) {
# unload compiled library normally or in devmode
dlls <- base::getLoadedDLLs()
pname <- packageName()
if ( pname %in% names(dlls) ){
if( !missing(libpath) ) library.dynam.unload(pname, libpath)
else dyn.unload(dlls[[pname]][['path']])
}
}
#' Postponing Actions
#'
#' This function implement a mechanism to postpone actions, which can be executed
#' at a later stage.
#' This is useful when developing packages, where actions that need to be run in the
#' \code{link{.onLoad}} function but can be defined close to their context.
#'
#' @param expr expression that define the action to postpone.
#' Currently only functions are supported.
#' @param key identifier for this specific action.
#' It should be unique across the postponed actions from the same group.
#' @param group optional parent action group.
#' This enables to define meaningful sets of actions that can be run all at once.
#' @param envir environment in which the action should be executed.
#' Currently not used.
#' @param verbose logical that toggles verbose messages.
#'
#' @import digest
#' @export
#'
#' @examples
#' opt <- options(verbose=2)
#'
#' # define actions
#' postponeAction(function(){print(10)}, "print")
#' postponeAction(function(){print(1:10)}, "more")
#' postponeAction()
#' # execute actions
#' runPostponedAction()
#' runPostponedAction()
#'
#' # restore options
#' options(opt)
#'
postponeAction <- function(expr, key=digest(tempfile()), group=NULL, envir=topns(strict=FALSE), verbose=getOption('verbose')){
# do not do anything if already running delayed actions
if( isRunningPostponedAction() ) return()
ns <- topns(strict=FALSE)
taskObj <- simpleRegistry('.__delayedTasks__', envir=ns)
if( !missing(expr) ){
if( missing(key) ){
stop("Missing required argument `key` for registering/cancelling delayed action.")
}
# add group prefix
if( !is.null(group) )
key <- str_c(group, '::', key)
#qe <- if( !is.language(expr) ) substitute(expr) else expr
qe <- expr
if( verbose ){
if( !is.null(qe) ) message("# Postponing action '", key, "'")
else{
message("# Cancelling postponed action '", key, "'")
}
}
taskObj$set(key, list(action=qe, envir=envir))
}else{
taskObj$names()
}
}
#' @rdname postponeAction
#' @export
runPostponedAction <- function(group=NULL, verbose=getOption('verbose')){
ns <- topns(strict=FALSE)
taskObj <- simpleRegistry('.__delayedTasks__', envir=ns)
if( verbose ){
message("# Executing postponed "
, if( !is.null(group) ) paste("'", group, "' ", sep='')
, "action(s) in package '"
, packageName(ns, .Global=TRUE), "' ... "
, appendLF = FALSE)
}
# set up running flag
isRunningPostponedAction(TRUE)
on.exit(isRunningPostponedAction(FALSE))
#
# execute actions
t <- taskObj$names()
if( !is.null(group) ) t <- grep(str_c("^", group), t, value=TRUE)
if( verbose > 1 && length(t) ) message()
sapply(t, function(x){
act <- taskObj$get(x)
if( verbose > 1 ){
message("** Action '", x, "' [", packageName(act$envir, .Global=TRUE), ']')
}
act$action()
taskObj$set(x, NULL)
#eval(x$expr, x$envir)
})
if( verbose ) message('OK [', length(t), ']')
invisible(length(t))
}
# Tells if one is executing deferred tasks via \code{onLoad}
isRunningPostponedAction <- sVariable(FALSE)
#' Simple Package Registry
#'
#' @param name name of the registry object, with which it will
#' be assigned in \code{envir}.
#' @param envir environment where to store the registry object.
#' Defaults to the caller's top environment.
#' @param verbose logical that toggle a verbose message when
#' the object is first created.
#'
#' @export
simpleRegistry <- function(name, envir=topenv(parent.frame()), verbose=FALSE){
# return stored instance if it exists
if( exists(name, envir=envir) ){
return( invisible(get(name, envir=envir)) )
}
if( verbose ) message("# Setup simple registry '", name, "' in ", packageName(envir, .Global=TRUE))
.name <- name
.envir <- envir
.data <- list()
.get <- function(x){
if( .has(x) ){
.data[[x]]
}
}
.set <- function(x, value){
if( is.null(value) ){
if( .has(x) ){
.data[[x]] <<- NULL
}
}else{
.data[[x]] <<- value
}
}
.has <- function(x){
x %in% names(.data)
}
.cleanup <- function(){
rm(.name, envir=.envir)
}
.names <- function(){
names(.data)
}
.length <- function(){
length(.data)
}
.obj <- list(get=.get, set=.set, has=.has
, cleanup=.cleanup, names=.names
, length = .length)
# assign container object
assign(.name, .obj, envir=.envir)
#
invisible(.obj)
}
#' Defunct Functions in pkgmaker
#'
#' These functions have been defunct or superseded by other
#' functions.
#'
#' @param ... extra arguments
#'
#' @rdname pkgmaker-defunct
#' @name pkgmaker-defunct
NULL |
# Plot 4
## Load data for 2 days
powConsumption <- read.table(file = "~/household_power_consumption.txt",
sep = ";",
skip = grep("^[1-2]/2/2007", readLines("~/household_power_consumption.txt"))-1,
nrows = 2880)
## Naming columns
colnames(powConsumption) <- colnames(read.table(file = "~/household_power_consumption.txt", sep = ";", nrows = 1, header = T))
## Converting rows to appropriate format
powConsumption$Date <- as.Date(powConsumption$Date, "%d/%m/%Y")
library(dplyr)
## Creating new column to contain both date and time
powConsumption <- powConsumption %>% mutate(datetime = paste(Date, Time))
powConsumption$datetime <- strptime(powConsumption$datetime, "%Y-%m-%d %H:%M:%S")
## Creating grid for 4 graphs
par(mfrow = c(2,2), mar = c(5,4,2,1))
## Adding graph 1
with(powConsumption, plot(datetime, Global_active_power, type = "l", xlab = "", ylab = "Global Active Power"))
## Adding graph 2
with(powConsumption, plot(datetime, Voltage, type = "l"))
## Adding graph 3
with(powConsumption, plot(datetime, Sub_metering_1, type = "n", xlab = "", ylab = "Energy sub metering"))
with(powConsumption, lines(datetime, Sub_metering_1))
with(powConsumption, lines(datetime, Sub_metering_2, col = "red"))
with(powConsumption, lines(datetime, Sub_metering_3, col = "blue"))
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n")
## Adding graph 4
with(powConsumption, plot(datetime, Global_reactive_power, type = "l"))
## Copying created plot to png file
dev.copy(device = png, filename = 'Plot4.png', width = 480, height = 480)
dev.off() | /plot4.R | no_license | akshayamrit/ExData_Plotting1 | R | false | false | 1,719 | r | # Plot 4
## Load data for 2 days
powConsumption <- read.table(file = "~/household_power_consumption.txt",
sep = ";",
skip = grep("^[1-2]/2/2007", readLines("~/household_power_consumption.txt"))-1,
nrows = 2880)
## Naming columns
colnames(powConsumption) <- colnames(read.table(file = "~/household_power_consumption.txt", sep = ";", nrows = 1, header = T))
## Converting rows to appropriate format
powConsumption$Date <- as.Date(powConsumption$Date, "%d/%m/%Y")
library(dplyr)
## Creating new column to contain both date and time
powConsumption <- powConsumption %>% mutate(datetime = paste(Date, Time))
powConsumption$datetime <- strptime(powConsumption$datetime, "%Y-%m-%d %H:%M:%S")
## Creating grid for 4 graphs
par(mfrow = c(2,2), mar = c(5,4,2,1))
## Adding graph 1
with(powConsumption, plot(datetime, Global_active_power, type = "l", xlab = "", ylab = "Global Active Power"))
## Adding graph 2
with(powConsumption, plot(datetime, Voltage, type = "l"))
## Adding graph 3
with(powConsumption, plot(datetime, Sub_metering_1, type = "n", xlab = "", ylab = "Energy sub metering"))
with(powConsumption, lines(datetime, Sub_metering_1))
with(powConsumption, lines(datetime, Sub_metering_2, col = "red"))
with(powConsumption, lines(datetime, Sub_metering_3, col = "blue"))
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), bty = "n")
## Adding graph 4
with(powConsumption, plot(datetime, Global_reactive_power, type = "l"))
## Copying created plot to png file
dev.copy(device = png, filename = 'Plot4.png', width = 480, height = 480)
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateThreshVsPerf.R
\name{plotROCCurves}
\alias{plotROCCurves}
\title{Plots a ROC curve using ggplot2}
\usage{
plotROCCurves(obj, measures = obj$measures[1:2], diagonal = TRUE,
pretty.names = TRUE)
}
\arguments{
\item{obj}{[(list of) \code{\link{Prediction}} | (list of) \code{\link{ResampleResult}} | \code{\link{BenchmarkResult}}]\cr
Single prediction object, list of them, single resample result, list of them, or a benchmark result.
In case of a list probably produced by different learners you want to compare, then
name the list with the names you want to see in the plots, probably
learner shortnames or ids.}
\item{measures}{[\code{\link{Measure}} | list of \code{\link{Measure}}]\cr
Performance measure(s) to evaluate.
Default is the default measure for the task, see here \code{\link{getDefaultMeasure}}.}
\item{diagonal}{[\code{logical(1)}]\cr
Whether to plot a dashed diagonal line.
Default is \code{TRUE}.}
\item{pretty.names}{[\code{logical(1)}]\cr
Whether to use the \code{\link{Measure}} name instead of the id in the plot.
Default is \code{TRUE}.}
}
\value{
a ggvis plot object.
}
\description{
Plots a ROC curve from predictions.
}
\examples{
\donttest{
lrn = makeLearner("classif.rpart", predict.type = "prob")
fit = train(lrn, sonar.task)
pred = predict(fit, task = sonar.task)
roc = generateThreshVsPerfData(pred, list(fpr, tpr))
plotROCCurves(roc)
r = bootstrapB632plus(lrn, sonar.task, iters = 3)
roc_r = generateThreshVsPerfData(r, list(fpr, tpr), aggregate = FALSE)
plotROCCurves(roc_r)
r2 = crossval(lrn, sonar.task, iters = 3)
roc_l = generateThreshVsPerfData(list(boot = r, cv = r2), list(fpr, tpr), aggregate = FALSE)
plotROCCurves(roc_l)
}
}
\seealso{
Other plot: \code{\link{plotBMRBoxplots}},
\code{\link{plotBMRRanksAsBarChart}},
\code{\link{plotBMRSummary}},
\code{\link{plotCalibration}},
\code{\link{plotCritDifferences}},
\code{\link{plotFilterValuesGGVIS}},
\code{\link{plotFilterValues}},
\code{\link{plotLearningCurveGGVIS}},
\code{\link{plotLearningCurve}},
\code{\link{plotPartialPredictionGGVIS}},
\code{\link{plotPartialPrediction}},
\code{\link{plotROCRCurvesGGVIS}},
\code{\link{plotROCRCurves}},
\code{\link{plotThreshVsPerfGGVIS}},
\code{\link{plotThreshVsPerf}}
Other thresh_vs_perf: \code{\link{generateThreshVsPerfData}},
\code{\link{plotThreshVsPerfGGVIS}},
\code{\link{plotThreshVsPerf}}
}
| /man/plotROCCurves.Rd | no_license | ageek/mlr | R | false | true | 2,465 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generateThreshVsPerf.R
\name{plotROCCurves}
\alias{plotROCCurves}
\title{Plots a ROC curve using ggplot2}
\usage{
plotROCCurves(obj, measures = obj$measures[1:2], diagonal = TRUE,
pretty.names = TRUE)
}
\arguments{
\item{obj}{[(list of) \code{\link{Prediction}} | (list of) \code{\link{ResampleResult}} | \code{\link{BenchmarkResult}}]\cr
Single prediction object, list of them, single resample result, list of them, or a benchmark result.
In case of a list probably produced by different learners you want to compare, then
name the list with the names you want to see in the plots, probably
learner shortnames or ids.}
\item{measures}{[\code{\link{Measure}} | list of \code{\link{Measure}}]\cr
Performance measure(s) to evaluate.
Default is the default measure for the task, see here \code{\link{getDefaultMeasure}}.}
\item{diagonal}{[\code{logical(1)}]\cr
Whether to plot a dashed diagonal line.
Default is \code{TRUE}.}
\item{pretty.names}{[\code{logical(1)}]\cr
Whether to use the \code{\link{Measure}} name instead of the id in the plot.
Default is \code{TRUE}.}
}
\value{
a ggvis plot object.
}
\description{
Plots a ROC curve from predictions.
}
\examples{
\donttest{
lrn = makeLearner("classif.rpart", predict.type = "prob")
fit = train(lrn, sonar.task)
pred = predict(fit, task = sonar.task)
roc = generateThreshVsPerfData(pred, list(fpr, tpr))
plotROCCurves(roc)
r = bootstrapB632plus(lrn, sonar.task, iters = 3)
roc_r = generateThreshVsPerfData(r, list(fpr, tpr), aggregate = FALSE)
plotROCCurves(roc_r)
r2 = crossval(lrn, sonar.task, iters = 3)
roc_l = generateThreshVsPerfData(list(boot = r, cv = r2), list(fpr, tpr), aggregate = FALSE)
plotROCCurves(roc_l)
}
}
\seealso{
Other plot: \code{\link{plotBMRBoxplots}},
\code{\link{plotBMRRanksAsBarChart}},
\code{\link{plotBMRSummary}},
\code{\link{plotCalibration}},
\code{\link{plotCritDifferences}},
\code{\link{plotFilterValuesGGVIS}},
\code{\link{plotFilterValues}},
\code{\link{plotLearningCurveGGVIS}},
\code{\link{plotLearningCurve}},
\code{\link{plotPartialPredictionGGVIS}},
\code{\link{plotPartialPrediction}},
\code{\link{plotROCRCurvesGGVIS}},
\code{\link{plotROCRCurves}},
\code{\link{plotThreshVsPerfGGVIS}},
\code{\link{plotThreshVsPerf}}
Other thresh_vs_perf: \code{\link{generateThreshVsPerfData}},
\code{\link{plotThreshVsPerfGGVIS}},
\code{\link{plotThreshVsPerf}}
}
|
\name{humidity}
\alias{hr}
\alias{pw}
\alias{pw.ai}
\alias{pw.aw}
\alias{Td}
\alias{Td.aw}
\alias{Tf.ai}
\title{
Converting Humidity Measures into Each Other
}
\description{
Converting dew point (of water in air) into vapor pressure or relative humidity and vice versa.
}
\usage{
hr(T, Td, warn = TRUE)
pw(Td, warn = TRUE)
pw.ai(Td, warn = TRUE)
pw.aw(Td, warn = TRUE)
Td(pw = NULL, T = NULL, hr = NULL, warn = TRUE)
Td.aw(pw = NULL, T = NULL, hr = NULL, warn = TRUE)
Tf.ai(pw = NULL, T = NULL, hr = NULL, warn = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{hr}{
Relative humidity in \eqn{[\textnormal{\%}]}.
}
\item{T}{
Temperature of air in \eqn{[^\circ \textnormal{C}]}.
}
\item{Td}{
Dew point of air in \eqn{[^\circ \textnormal{C}]}.
}
\item{pw}{
Saturation vapour pressure in \eqn{[\textnormal{hPa}]}.
}
\item{warn}{
When \code{TRUE} and arguments are out of range, a warning is thrown.
}
}
\details{
All vapour pressures correspond to pure water vapour and are not
adjusted to water vapour in air.
}
\value{
\code{hr} transforms dew point above ice / water at a certain temperature into
relative humidity (\eqn{-65^\circ\textnormal{C} <= T <=
60^\circ\textnormal{C}}).\cr
\code{pw} transforms dew point into saturation vapour pressure above ice
(\eqn{-65^\circ\textnormal{C} <= T <=
0^\circ\textnormal{C}}) and above water (\eqn{0^\circ\textnormal{C} < T <=
60^\circ\textnormal{C}}) respectively.\cr
\code{pw.ai} transforms dew point into saturation vapour
pressure above ice (\eqn{-65^\circ\textnormal{C} <= T <=
0^\circ\textnormal{C}}).\cr
\code{pw.aw} transforms dew point into saturation vapour
pressure above liquid water (\eqn{-45^\circ\textnormal{C} <= T <=
60^\circ\textnormal{C}}).\cr
\code{Td} transforms vapour pressure or relative humidity and
temperature into frost point above ice (\eqn{-65^\circ\textnormal{C} <= T <=
0^\circ\textnormal{C}}) and dew point above water (\eqn{0^\circ\textnormal{C} < T <=
60^\circ\textnormal{C}}) respectively.\cr
\code{Td.aw} transforms vapour pressure or relative humidity and
temperature into dew point above water (\eqn{-45^\circ\textnormal{C} <= T <=
60^\circ\textnormal{C}}).\cr
\code{Tf.ai} transforms vapour pressure or relative humidity and
temperature into frost point above ice (\eqn{-65^\circ\textnormal{C} <= T <=
0^\circ\textnormal{C}}).
}
\references{
Guide to Meteorological Instruments and Methods of Observation, WMO,
WMO-No. 8, Seventh edition, 2008, updated 2010, 1.4-29 Annex 4.B
}
\author{
Rene Locher
}
\examples{
pw(Td = c(-65, -45, 0, 30, 60))
Td(pw = pw(c(-20, 0, 20)))
hr(T = c(20, 30, 40), Td = c(0, 20, 30))
## [1] 26.20257 55.09561 57.46519
hr(T = seq(0, 60, 5), Td = 0)
}
\keyword{ arith }% use one of RShowDoc("KEYWORDS")
| /man/humidity.Rd | no_license | pmur002/IDPmisc | R | false | false | 2,960 | rd | \name{humidity}
\alias{hr}
\alias{pw}
\alias{pw.ai}
\alias{pw.aw}
\alias{Td}
\alias{Td.aw}
\alias{Tf.ai}
\title{
Converting Humidity Measures into Each Other
}
\description{
Converting dew point (of water in air) into vapor pressure or relative humidity and vice versa.
}
\usage{
hr(T, Td, warn = TRUE)
pw(Td, warn = TRUE)
pw.ai(Td, warn = TRUE)
pw.aw(Td, warn = TRUE)
Td(pw = NULL, T = NULL, hr = NULL, warn = TRUE)
Td.aw(pw = NULL, T = NULL, hr = NULL, warn = TRUE)
Tf.ai(pw = NULL, T = NULL, hr = NULL, warn = TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{hr}{
Relative humidity in \eqn{[\textnormal{\%}]}.
}
\item{T}{
Temperature of air in \eqn{[^\circ \textnormal{C}]}.
}
\item{Td}{
Dew point of air in \eqn{[^\circ \textnormal{C}]}.
}
\item{pw}{
Saturation vapour pressure in \eqn{[\textnormal{hPa}]}.
}
\item{warn}{
When \code{TRUE} and arguments are out of range, a warning is thrown.
}
}
\details{
All vapour pressures correspond to pure water vapour and are not
adjusted to water vapour in air.
}
\value{
\code{hr} transforms dew point above ice / water at a certain temperature into
relative humidity (\eqn{-65^\circ\textnormal{C} <= T <=
60^\circ\textnormal{C}}).\cr
\code{pw} transforms dew point into saturation vapour pressure above ice
(\eqn{-65^\circ\textnormal{C} <= T <=
0^\circ\textnormal{C}}) and above water (\eqn{0^\circ\textnormal{C} < T <=
60^\circ\textnormal{C}}) respectively.\cr
\code{pw.ai} transforms dew point into saturation vapour
pressure above ice (\eqn{-65^\circ\textnormal{C} <= T <=
0^\circ\textnormal{C}}).\cr
\code{pw.aw} transforms dew point into saturation vapour
pressure above liquid water (\eqn{-45^\circ\textnormal{C} <= T <=
60^\circ\textnormal{C}}).\cr
\code{Td} transforms vapour pressure or relative humidity and
temperature into frost point above ice (\eqn{-65^\circ\textnormal{C} <= T <=
0^\circ\textnormal{C}}) and dew point above water (\eqn{0^\circ\textnormal{C} < T <=
60^\circ\textnormal{C}}) respectively.\cr
\code{Td.aw} transforms vapour pressure or relative humidity and
temperature into dew point above water (\eqn{-45^\circ\textnormal{C} <= T <=
60^\circ\textnormal{C}}).\cr
\code{Tf.ai} transforms vapour pressure or relative humidity and
temperature into frost point above ice (\eqn{-65^\circ\textnormal{C} <= T <=
0^\circ\textnormal{C}}).
}
\references{
Guide to Meteorological Instruments and Methods of Observation, WMO,
WMO-No. 8, Seventh edition, 2008, updated 2010, 1.4-29 Annex 4.B
}
\author{
Rene Locher
}
\examples{
pw(Td = c(-65, -45, 0, 30, 60))
Td(pw = pw(c(-20, 0, 20)))
hr(T = c(20, 30, 40), Td = c(0, 20, 30))
## [1] 26.20257 55.09561 57.46519
hr(T = seq(0, 60, 5), Td = 0)
}
\keyword{ arith }% use one of RShowDoc("KEYWORDS")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kpAddLabels.R
\name{kpAddLabels}
\alias{kpAddLabels}
\title{kpAddLabels}
\usage{
kpAddLabels(karyoplot, labels, label.margin=0.01, side="left", pos=NULL, offset=0, r0=NULL, r1=NULL, data.panel=1, ...)
}
\arguments{
\item{karyoplot}{a \code{karyoplot} object returned by a call to \code{plotKaryotype}}
\item{labels}{(character) the text on the labels}
\item{label.margin}{(numeric) the additional the margin between the labels the first base of the chromosome. In plot coordinates. Usual value might be 0.05. Can be negative. (defaults to 0.01)}
\item{side}{("left" or "right") The side of the plot where to plot the labels. (defaults to "left")}
\item{pos}{(numeric) The standard graphical parameter. See \code{\link[graphics]{text}}. If NULL, pos will be selected automatically based on "side" (Defaults to NULL)}
\item{offset}{(numeric) The standard graphical parameter. See \code{\link[graphics]{text}}. (Defaults to 0)}
\item{r0}{(numeric) r0 and r1 define the vertical range of the data panel to be used to position the label. They can be used to split the data panel in different vertical ranges (similar to tracks in a genome browser) to plot differents data. If NULL, they are set to the min and max of the data panel, it is, to use all the available space. (defaults to NULL)}
\item{r1}{(numeric) r0 and r1 define the vertical range of the data panel to be used to position the label. They can be used to split the data panel in different vertical ranges (similar to tracks in a genome browser) to plot differents data. If NULL, they are set to the min and max of the data panel, it is, to use all the available space. (defaults to NULL)}
\item{data.panel}{(numeric) The identifier of the data panel where the labels are to be added. The available data panels depend on the plot type selected in the call to \code{\link{plotKaryotype}}. (defaults to 1)}
\item{...}{any additional parameter to be passed to the text plotting. All R base graphics params are passed along.}
}
\value{
invisibly returns the given karyoplot object
}
\description{
Add labels to identify the data in the plot
}
\details{
Given a KaryoPlot object, plot labels on the side of the data panels to help identify the different types of data plotted
}
\examples{
plot.params <- getDefaultPlotParams(plot.type=2)
plot.params$leftmargin <- 0.2
plot.params$rightmargin <- 0.2
#In standard whole karyotypes, labels are drawn for all chromosomes
kp <- plotKaryotype("hg19", chromosomes=c("chr1", "chr2"), plot.type=2, plot.params = plot.params)
#data panel 1
kpDataBackground(kp, r0=0, r1=0.5, col="#FFDDDD")
kpDataBackground(kp, r0=0.5, r1=1, col="#DDFFDD")
kpAddLabels(kp, "Everything", label.margin = 0.12, srt=90, pos=3, cex=0.8)
kpAddLabels(kp, "Red", r0=0, r1=0.5, cex=0.6)
kpAddLabels(kp, "Green", r0=0.5, r1=1, cex=0.6)
#data panel 2
kpDataBackground(kp, col="#DDDDFF", data.panel = 2)
kpAddLabels(kp, "BLUE", data.panel=2)
#Plot on the right
#data panel 1
kpAddLabels(kp, "Everything", label.margin = 0.12, srt=90, pos=1, cex=0.8, side="right")
kpAddLabels(kp, "Red", r0=0, r1=0.5, cex=0.6, side="right")
kpAddLabels(kp, "Green", r0=0.5, r1=1, cex=0.6, side="right")
#In karyotypes with all chromosomes in a single line,
#labels are added on the first (side="left") or last (side="right") chromosome
kp <- plotKaryotype("hg19", chromosomes=c("chr1", "chr2", "chr3"), plot.type=3, plot.params = plot.params)
#data panel 1
kpDataBackground(kp, r0=0, r1=0.5, col="#FFDDDD")
kpDataBackground(kp, r0=0.5, r1=1, col="#DDFFDD")
kpAddLabels(kp, "Everything", label.margin = 0.12, srt=90, pos=3, cex=0.8)
kpAddLabels(kp, "Red", r0=0, r1=0.5, cex=0.6)
kpAddLabels(kp, "Green", r0=0.5, r1=1, cex=0.6)
#data panel 2
kpDataBackground(kp, col="#DDDDFF", data.panel = 2)
kpAddLabels(kp, "BLUE", data.panel=2)
#Plot on the right
#data panel 1
kpAddLabels(kp, "Everything", label.margin = 0.12, srt=90, pos=1, cex=0.8, side="right")
kpAddLabels(kp, "Red", r0=0, r1=0.5, cex=0.6, side="right")
kpAddLabels(kp, "Green", r0=0.5, r1=1, cex=0.6, side="right")
#In Zoomed regions, they are placed at the correct position too
kp <- plotKaryotype("hg19", zoom="chr1:20000000-40000000", plot.type=2, plot.params = plot.params)
kpAddBaseNumbers(kp, tick.dist=5000000, add.units=TRUE)
#data panel 1
kpDataBackground(kp, r0=0, r1=0.5, col="#FFDDDD")
kpDataBackground(kp, r0=0.5, r1=1, col="#DDFFDD")
kpAddLabels(kp, "Everything", label.margin = 0.12, srt=90, pos=3, cex=0.8)
kpAddLabels(kp, "Red", r0=0, r1=0.5, cex=0.6)
kpAddLabels(kp, "Green", r0=0.5, r1=1, cex=0.6)
#data panel 2
kpDataBackground(kp, col="#DDDDFF", data.panel = 2)
kpAddLabels(kp, "BLUE", data.panel=2)
#Plot on the right
#data panel 1
kpAddLabels(kp, "Everything", label.margin = 0.12, srt=90, pos=1, cex=0.8, side="right")
kpAddLabels(kp, "Red", r0=0, r1=0.5, cex=0.6, side="right")
kpAddLabels(kp, "Green", r0=0.5, r1=1, cex=0.6, side="right")
}
\seealso{
\code{\link{plotKaryotype}}
}
| /man/kpAddLabels.Rd | no_license | bernatgel/karyoploteR | R | false | true | 5,023 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kpAddLabels.R
\name{kpAddLabels}
\alias{kpAddLabels}
\title{kpAddLabels}
\usage{
kpAddLabels(karyoplot, labels, label.margin=0.01, side="left", pos=NULL, offset=0, r0=NULL, r1=NULL, data.panel=1, ...)
}
\arguments{
\item{karyoplot}{a \code{karyoplot} object returned by a call to \code{plotKaryotype}}
\item{labels}{(character) the text on the labels}
\item{label.margin}{(numeric) the additional the margin between the labels the first base of the chromosome. In plot coordinates. Usual value might be 0.05. Can be negative. (defaults to 0.01)}
\item{side}{("left" or "right") The side of the plot where to plot the labels. (defaults to "left")}
\item{pos}{(numeric) The standard graphical parameter. See \code{\link[graphics]{text}}. If NULL, pos will be selected automatically based on "side" (Defaults to NULL)}
\item{offset}{(numeric) The standard graphical parameter. See \code{\link[graphics]{text}}. (Defaults to 0)}
\item{r0}{(numeric) r0 and r1 define the vertical range of the data panel to be used to position the label. They can be used to split the data panel in different vertical ranges (similar to tracks in a genome browser) to plot differents data. If NULL, they are set to the min and max of the data panel, it is, to use all the available space. (defaults to NULL)}
\item{r1}{(numeric) r0 and r1 define the vertical range of the data panel to be used to position the label. They can be used to split the data panel in different vertical ranges (similar to tracks in a genome browser) to plot differents data. If NULL, they are set to the min and max of the data panel, it is, to use all the available space. (defaults to NULL)}
\item{data.panel}{(numeric) The identifier of the data panel where the labels are to be added. The available data panels depend on the plot type selected in the call to \code{\link{plotKaryotype}}. (defaults to 1)}
\item{...}{any additional parameter to be passed to the text plotting. All R base graphics params are passed along.}
}
\value{
invisibly returns the given karyoplot object
}
\description{
Add labels to identify the data in the plot
}
\details{
Given a KaryoPlot object, plot labels on the side of the data panels to help identify the different types of data plotted
}
\examples{
plot.params <- getDefaultPlotParams(plot.type=2)
plot.params$leftmargin <- 0.2
plot.params$rightmargin <- 0.2
#In standard whole karyotypes, labels are drawn for all chromosomes
kp <- plotKaryotype("hg19", chromosomes=c("chr1", "chr2"), plot.type=2, plot.params = plot.params)
#data panel 1
kpDataBackground(kp, r0=0, r1=0.5, col="#FFDDDD")
kpDataBackground(kp, r0=0.5, r1=1, col="#DDFFDD")
kpAddLabels(kp, "Everything", label.margin = 0.12, srt=90, pos=3, cex=0.8)
kpAddLabels(kp, "Red", r0=0, r1=0.5, cex=0.6)
kpAddLabels(kp, "Green", r0=0.5, r1=1, cex=0.6)
#data panel 2
kpDataBackground(kp, col="#DDDDFF", data.panel = 2)
kpAddLabels(kp, "BLUE", data.panel=2)
#Plot on the right
#data panel 1
kpAddLabels(kp, "Everything", label.margin = 0.12, srt=90, pos=1, cex=0.8, side="right")
kpAddLabels(kp, "Red", r0=0, r1=0.5, cex=0.6, side="right")
kpAddLabels(kp, "Green", r0=0.5, r1=1, cex=0.6, side="right")
#In karyotypes with all chromosomes in a single line,
#labels are added on the first (side="left") or last (side="right") chromosome
kp <- plotKaryotype("hg19", chromosomes=c("chr1", "chr2", "chr3"), plot.type=3, plot.params = plot.params)
#data panel 1
kpDataBackground(kp, r0=0, r1=0.5, col="#FFDDDD")
kpDataBackground(kp, r0=0.5, r1=1, col="#DDFFDD")
kpAddLabels(kp, "Everything", label.margin = 0.12, srt=90, pos=3, cex=0.8)
kpAddLabels(kp, "Red", r0=0, r1=0.5, cex=0.6)
kpAddLabels(kp, "Green", r0=0.5, r1=1, cex=0.6)
#data panel 2
kpDataBackground(kp, col="#DDDDFF", data.panel = 2)
kpAddLabels(kp, "BLUE", data.panel=2)
#Plot on the right
#data panel 1
kpAddLabels(kp, "Everything", label.margin = 0.12, srt=90, pos=1, cex=0.8, side="right")
kpAddLabels(kp, "Red", r0=0, r1=0.5, cex=0.6, side="right")
kpAddLabels(kp, "Green", r0=0.5, r1=1, cex=0.6, side="right")
#In Zoomed regions, they are placed at the correct position too
kp <- plotKaryotype("hg19", zoom="chr1:20000000-40000000", plot.type=2, plot.params = plot.params)
kpAddBaseNumbers(kp, tick.dist=5000000, add.units=TRUE)
#data panel 1
kpDataBackground(kp, r0=0, r1=0.5, col="#FFDDDD")
kpDataBackground(kp, r0=0.5, r1=1, col="#DDFFDD")
kpAddLabels(kp, "Everything", label.margin = 0.12, srt=90, pos=3, cex=0.8)
kpAddLabels(kp, "Red", r0=0, r1=0.5, cex=0.6)
kpAddLabels(kp, "Green", r0=0.5, r1=1, cex=0.6)
#data panel 2
kpDataBackground(kp, col="#DDDDFF", data.panel = 2)
kpAddLabels(kp, "BLUE", data.panel=2)
#Plot on the right
#data panel 1
kpAddLabels(kp, "Everything", label.margin = 0.12, srt=90, pos=1, cex=0.8, side="right")
kpAddLabels(kp, "Red", r0=0, r1=0.5, cex=0.6, side="right")
kpAddLabels(kp, "Green", r0=0.5, r1=1, cex=0.6, side="right")
}
\seealso{
\code{\link{plotKaryotype}}
}
|
#------------------------------------------------------------
# BNP - Paribas - Claims Projections - 2016-02-15
#------------------------------------------------------------
setwd("/Volumes/TOSHIBA EXT/Verbatin64/R-cosas/2016-01 - Kaggle/02_BNP_Paribas")
#Library loading
library(data.table)
library(caret)
library(stringr)
library(lubridate)
library(readr)
library(h2o)
datIn <- read_csv("train.csv")
datIn <- datIn[, c(2,1, 3:ncol(datIn))]
datIn[is.na(datIn)] <- -999
datIn$target <- as.factor(datIn$target)
datIn <- as.data.frame(datIn)
datTest <- read_csv("test.csv")
datTest[is.na(datTest)] <- -999
datTest <- as.data.frame(datTest)
col_char <- 0
j <- 0
for( i in 2:ncol(datIn)) {
cltmp <- class(datIn[, i])
if(cltmp == "character") {
j <- j + 1
col_char[j] <- i
datIn[,i] <- as.numeric( as.factor(datIn[,i]) )
} else next
}
col_char <- 0
j <- 0
for( i in 1:ncol(datTest)) {
cltmp <- class(datTest[, i])
if(cltmp == "character") {
j <- j + 1
col_char[j] <- i
datTest[,i] <- as.numeric( as.factor(datTest[,i]) )
} else next
}
datIn_mx <- as.matrix(datIn[,3:ncol(datIn)])
datIn_ce <- scale(datIn_mx, center= TRUE, scale=TRUE)
datIn <- cbind.data.frame(datIn[,1:2], as.data.frame(datIn_ce))
#--------------------------------------------------------
#-------------- READY TO MODEL
#--------------------------------------------------------
library(h2o)
h2o.init(nthreads=-1,max_mem_size = '8G')
### load both files in using H2O's parallel import
#train<-h2o.uploadFile("train.csv",destination_frame = "train.hex")
#test<-h2o.uploadFile("test.csv",destination_frame = "test.hex")
train <- as.h2o(datIn, destination_frame = 'train.hex')
test<- as.h2o(datTest,destination_frame = "test.hex")
train$target<-as.factor(train$target)
splits<-h2o.splitFrame(train,0.9,destination_frames = c("trainSplit","validSplit"),seed=111111111)
#---------------------------------
#---------------------- DEEPLEARNING
#---------------------------------
a <- Sys.time();a
dlGrid <- expand.grid(
hi_dd_A = seq(50,300,50),
hi_dd_B = seq(50,300,50),
hi_dd_C = seq(2,300,50),
ac_ti = c('Tanh', 'RectifierWithDropout'),
#ac_ti = c('Tanh'),
#mx_w2 = seq(10,50, length.out = 2),
mx_w2 = 10,
#l1_va = seq(1e-5, 1e-3, length.out = 2),
l1_va = 1e-5,
#l2_va = seq(1e-5, 1e-3, length.out = 2),
l2_va = 1e-5,
#ep_si = seq(1e-4, 1e-10, length.out = 2),
ep_si = 1e-10,
#rh_oo = seq(0.9, 0.99, length.out = 2),
rh_oo = 0.90,
#ra_te = seq(1e-4, 1e-2, length.out = 2),
ra_te = 1e-2,
#ra_de = seq(0.5, 1, length.out = 2),
ra_de = 0.5,
#ra_an = seq(1e-5, 1e-9, length.out = 2),
ra_an = 1e-9,
#mo_st = seq(0.5, 0.9, length.out = 2),
mo_st = 0.7,
mo_rp = 1/0.7
)
row_sam <- sample(1:nrow(dlGrid), nrow(dlGrid))
dlGrid <- dlGrid[ row_sam , ]
res_df <- data.frame(
dlAcc=0, hi_dd_A=0, hi_dd_B=0, hi_dd_C=0,
ac_ti=0, mx_w2=0, l1_va=0, l2_va=0,
ep_si=0, rh_oo=0, ra_te=0, ra_de=0,
ra_an=0, mo_st=0, mo_rp=0, ex_t=0
)
j <- 0
for( i in 1:nrow(dlGrid)) {
print(i)
ex_a <- Sys.time();
res_dl <- h2o.deeplearning(
x = 3:133,
y=1,
training_frame = splits[[1]],
validation_frame = splits[[2]],
stopping_rounds = 1,
stopping_tolerance = 0,
seed = 222222222,
model_id = "baseDL",
stopping_metric = "logloss",
nesterov_accelerated_gradient = TRUE,
epochs = 500,
momentum_stable = 0.99,
input_dropout_ratio = 0.2,
initial_weight_distribution = 'Normal',
initial_weight_scale = 0.01,
loss = 'CrossEntropy',
fast_mode = TRUE,
diagnostics = TRUE,
ignore_const_cols = TRUE,
force_load_balance = FALSE,
hidden = c(dlGrid$hi_dd_A[i], dlGrid$hi_dd_B[i], dlGrid$hi_dd_C[i]),
activation = as.vector(dlGrid$ac_ti[i]),
max_w2 = dlGrid$mx_w2[i],
l1 = dlGrid$l1_va[i],
l2 = dlGrid$l2_va[i],
epsilon = dlGrid$ep_si[i],
rho = dlGrid$rh_oo[i],
rate = dlGrid$ra_te[i],
rate_decay = dlGrid$ra_de[i],
rate_annealing = dlGrid$ra_an[i],
momentum_start = dlGrid$mo_st[i],
momentum_ramp = dlGrid$mo_rp[i]
)
ex_b <- Sys.time();
ex_t <- as.numeric(as.character(ex_b - ex_a))
### look at some information about the model
Accdl <- h2o.logloss(res_dl,valid=T)
print(Accdl)
#if(Accdl < 0.46) {
j <- j+1
res_df[j,1] <- Accdl
res_df[j,2] <- dlGrid$hi_dd_A[i]
res_df[j,3] <- dlGrid$hi_dd_B[i]
res_df[j,4] <- dlGrid$hi_dd_C[i]
res_df[j,5] <- as.vector(dlGrid$ac_ti[i])
res_df[j,6] <- dlGrid$mx_w2[i]
res_df[j,7] <- dlGrid$l1_va[i]
res_df[j,8] <- dlGrid$l2_va[i]
res_df[j,9] <- dlGrid$ep_si[i]
res_df[j,10] <- dlGrid$rh_oo[i]
res_df[j,11] <- dlGrid$ra_te[i]
res_df[j,12] <- dlGrid$ra_de[i]
res_df[j,13] <- dlGrid$ra_an[i]
res_df[j,14] <- dlGrid$mo_st[i]
res_df[j,15] <- dlGrid$mo_rp[i]
res_df[j,16] <- ex_t
print(res_df)
print(max(res_df$Accdl))
if(Accdl < 0.49) {
#--------------------------------------------------------
#-------------- PREDICTION
#--------------------------------------------------------
### get predictions against the test set and create submission file
p<-as.data.frame(h2o.predict(res_dl,test))
testIds<-as.data.frame(test$ID)
submission<-data.frame(cbind(testIds,p$p1))
colnames(submission)<-c("ID","PredictedProb")
#--------------------------------------------------------
#-------------- FILE UPLOAD
#--------------------------------------------------------
file_tmp <- paste("Res_xxxxx_H2O_dl_", round(Accdl,5),"_.csv", sep="")
write.csv(submission,file_tmp,row.names=F)
#Also write the data.frame with the results..
write_csv(res_df, "res_df_dl_.csv")
} else next
} #for( i in 1:nco
write_csv(res_df, "res_df_dl_.csv")
b <- Sys.time();b; b-a
# names(res_df) <- c('rfAcc', 'max_depth','samp_rate','mtries','ex_time')
#
# library(lattice)
# xy_gr <- xyplot(
# rfAcc ~ samp_rate
# ,data = res_df
# ,type = "b"
# ,strip = strip.custom(strip.names = TRUE, strip.levels = TRUE)
# )
# print(xy_gr)
#--------------------------------------------------------
#-------------- CLOSE H20
#--------------------------------------------------------
### All done, shutdown H2O
h2o.shutdown(prompt=FALSE)
#--------------------------------------------------------
#-------------- Results
| /bnp_H2o_DeepLearning.R | no_license | coforfe/R_Kaggle_BNP | R | false | false | 6,468 | r | #------------------------------------------------------------
# BNP - Paribas - Claims Projections - 2016-02-15
#------------------------------------------------------------
setwd("/Volumes/TOSHIBA EXT/Verbatin64/R-cosas/2016-01 - Kaggle/02_BNP_Paribas")
#Library loading
library(data.table)
library(caret)
library(stringr)
library(lubridate)
library(readr)
library(h2o)
datIn <- read_csv("train.csv")
datIn <- datIn[, c(2,1, 3:ncol(datIn))]
datIn[is.na(datIn)] <- -999
datIn$target <- as.factor(datIn$target)
datIn <- as.data.frame(datIn)
datTest <- read_csv("test.csv")
datTest[is.na(datTest)] <- -999
datTest <- as.data.frame(datTest)
col_char <- 0
j <- 0
for( i in 2:ncol(datIn)) {
cltmp <- class(datIn[, i])
if(cltmp == "character") {
j <- j + 1
col_char[j] <- i
datIn[,i] <- as.numeric( as.factor(datIn[,i]) )
} else next
}
col_char <- 0
j <- 0
for( i in 1:ncol(datTest)) {
cltmp <- class(datTest[, i])
if(cltmp == "character") {
j <- j + 1
col_char[j] <- i
datTest[,i] <- as.numeric( as.factor(datTest[,i]) )
} else next
}
datIn_mx <- as.matrix(datIn[,3:ncol(datIn)])
datIn_ce <- scale(datIn_mx, center= TRUE, scale=TRUE)
datIn <- cbind.data.frame(datIn[,1:2], as.data.frame(datIn_ce))
#--------------------------------------------------------
#-------------- READY TO MODEL
#--------------------------------------------------------
library(h2o)
h2o.init(nthreads=-1,max_mem_size = '8G')
### load both files in using H2O's parallel import
#train<-h2o.uploadFile("train.csv",destination_frame = "train.hex")
#test<-h2o.uploadFile("test.csv",destination_frame = "test.hex")
train <- as.h2o(datIn, destination_frame = 'train.hex')
test<- as.h2o(datTest,destination_frame = "test.hex")
train$target<-as.factor(train$target)
splits<-h2o.splitFrame(train,0.9,destination_frames = c("trainSplit","validSplit"),seed=111111111)
#---------------------------------
#---------------------- DEEPLEARNING
#---------------------------------
a <- Sys.time();a
dlGrid <- expand.grid(
hi_dd_A = seq(50,300,50),
hi_dd_B = seq(50,300,50),
hi_dd_C = seq(2,300,50),
ac_ti = c('Tanh', 'RectifierWithDropout'),
#ac_ti = c('Tanh'),
#mx_w2 = seq(10,50, length.out = 2),
mx_w2 = 10,
#l1_va = seq(1e-5, 1e-3, length.out = 2),
l1_va = 1e-5,
#l2_va = seq(1e-5, 1e-3, length.out = 2),
l2_va = 1e-5,
#ep_si = seq(1e-4, 1e-10, length.out = 2),
ep_si = 1e-10,
#rh_oo = seq(0.9, 0.99, length.out = 2),
rh_oo = 0.90,
#ra_te = seq(1e-4, 1e-2, length.out = 2),
ra_te = 1e-2,
#ra_de = seq(0.5, 1, length.out = 2),
ra_de = 0.5,
#ra_an = seq(1e-5, 1e-9, length.out = 2),
ra_an = 1e-9,
#mo_st = seq(0.5, 0.9, length.out = 2),
mo_st = 0.7,
mo_rp = 1/0.7
)
row_sam <- sample(1:nrow(dlGrid), nrow(dlGrid))
dlGrid <- dlGrid[ row_sam , ]
res_df <- data.frame(
dlAcc=0, hi_dd_A=0, hi_dd_B=0, hi_dd_C=0,
ac_ti=0, mx_w2=0, l1_va=0, l2_va=0,
ep_si=0, rh_oo=0, ra_te=0, ra_de=0,
ra_an=0, mo_st=0, mo_rp=0, ex_t=0
)
j <- 0
for( i in 1:nrow(dlGrid)) {
print(i)
ex_a <- Sys.time();
res_dl <- h2o.deeplearning(
x = 3:133,
y=1,
training_frame = splits[[1]],
validation_frame = splits[[2]],
stopping_rounds = 1,
stopping_tolerance = 0,
seed = 222222222,
model_id = "baseDL",
stopping_metric = "logloss",
nesterov_accelerated_gradient = TRUE,
epochs = 500,
momentum_stable = 0.99,
input_dropout_ratio = 0.2,
initial_weight_distribution = 'Normal',
initial_weight_scale = 0.01,
loss = 'CrossEntropy',
fast_mode = TRUE,
diagnostics = TRUE,
ignore_const_cols = TRUE,
force_load_balance = FALSE,
hidden = c(dlGrid$hi_dd_A[i], dlGrid$hi_dd_B[i], dlGrid$hi_dd_C[i]),
activation = as.vector(dlGrid$ac_ti[i]),
max_w2 = dlGrid$mx_w2[i],
l1 = dlGrid$l1_va[i],
l2 = dlGrid$l2_va[i],
epsilon = dlGrid$ep_si[i],
rho = dlGrid$rh_oo[i],
rate = dlGrid$ra_te[i],
rate_decay = dlGrid$ra_de[i],
rate_annealing = dlGrid$ra_an[i],
momentum_start = dlGrid$mo_st[i],
momentum_ramp = dlGrid$mo_rp[i]
)
ex_b <- Sys.time();
ex_t <- as.numeric(as.character(ex_b - ex_a))
### look at some information about the model
Accdl <- h2o.logloss(res_dl,valid=T)
print(Accdl)
#if(Accdl < 0.46) {
j <- j+1
res_df[j,1] <- Accdl
res_df[j,2] <- dlGrid$hi_dd_A[i]
res_df[j,3] <- dlGrid$hi_dd_B[i]
res_df[j,4] <- dlGrid$hi_dd_C[i]
res_df[j,5] <- as.vector(dlGrid$ac_ti[i])
res_df[j,6] <- dlGrid$mx_w2[i]
res_df[j,7] <- dlGrid$l1_va[i]
res_df[j,8] <- dlGrid$l2_va[i]
res_df[j,9] <- dlGrid$ep_si[i]
res_df[j,10] <- dlGrid$rh_oo[i]
res_df[j,11] <- dlGrid$ra_te[i]
res_df[j,12] <- dlGrid$ra_de[i]
res_df[j,13] <- dlGrid$ra_an[i]
res_df[j,14] <- dlGrid$mo_st[i]
res_df[j,15] <- dlGrid$mo_rp[i]
res_df[j,16] <- ex_t
print(res_df)
print(max(res_df$Accdl))
if(Accdl < 0.49) {
#--------------------------------------------------------
#-------------- PREDICTION
#--------------------------------------------------------
### get predictions against the test set and create submission file
p<-as.data.frame(h2o.predict(res_dl,test))
testIds<-as.data.frame(test$ID)
submission<-data.frame(cbind(testIds,p$p1))
colnames(submission)<-c("ID","PredictedProb")
#--------------------------------------------------------
#-------------- FILE UPLOAD
#--------------------------------------------------------
file_tmp <- paste("Res_xxxxx_H2O_dl_", round(Accdl,5),"_.csv", sep="")
write.csv(submission,file_tmp,row.names=F)
#Also write the data.frame with the results..
write_csv(res_df, "res_df_dl_.csv")
} else next
} #for( i in 1:nco
write_csv(res_df, "res_df_dl_.csv")
b <- Sys.time();b; b-a
# names(res_df) <- c('rfAcc', 'max_depth','samp_rate','mtries','ex_time')
#
# library(lattice)
# xy_gr <- xyplot(
# rfAcc ~ samp_rate
# ,data = res_df
# ,type = "b"
# ,strip = strip.custom(strip.names = TRUE, strip.levels = TRUE)
# )
# print(xy_gr)
#--------------------------------------------------------
#-------------- CLOSE H20
#--------------------------------------------------------
### All done, shutdown H2O
h2o.shutdown(prompt=FALSE)
#--------------------------------------------------------
#-------------- Results
|
# Author: Robert J. Hijmans
# Date : July 2010
# Version 0.9
# Licence GPL v3
.plotCT <- function(x, maxpixels=500000, ext=NULL, interpolate=FALSE, axes, main, xlab='', ylab='', asp, add=FALSE, addfun=NULL, zlim=NULL, zlimcol=NULL, ...) {
# plotting with a color table
if (missing(main)) {
main <- ''
}
sethook <- FALSE
if (!add) {
plot.new()
if (missing(axes)) {
axes <- FALSE
}
if (!axes) {
# if (main != "") { } else {
old.par <- par(no.readonly = TRUE)
par(plt=c(0,1,0,1))
sethook <- TRUE
}
if (missing(asp)) {
if (couldBeLonLat(x)) {
ym <- mean(c(x@extent@ymax, x@extent@ymin))
asp <- 1/cos((ym * pi)/180)
} else {
asp <- 1
}
}
}
coltab <- x@legend@colortable
x <- sampleRegular(x, maxpixels, ext=ext, asRaster=TRUE, useGDAL=TRUE)
z <- getValues(x)
if (!is.null(zlim)) { # not that relevant here, but for consistency....
if (is.null(zlimcol)) {
z[ z<zlim[1] ] <- zlim[1]
z[ z>zlim[2] ] <- zlim[2]
} else { #if (is.na(zlimcol)) {
z[z<zlim[1] | z>zlim[2]] <- NA
}
}
if (NCOL(coltab) == 2) {
# not implemented
z <- as.numeric(cut(z, coltab[,1]))
coltab <- as.vector(coltab[,2])
}
z <- z + 1
z[is.na(z)] <- 1
if (! is.null(coltab) ) {
z <- matrix(coltab[z], nrow=nrow(x), ncol=ncol(x), byrow=T)
z <- as.raster(z)
} else {
z <- matrix(z, nrow=nrow(x), ncol=ncol(x), byrow=T)
z <- as.raster(z, max=max(z)) #, na.rm=TRUE))
}
requireNamespace("grDevices")
bb <- as.vector(t(bbox(x)))
if (! add) {
plot(c(bb[1], bb[2]), c(bb[3], bb[4]), type = "n", xlab=xlab, ylab=ylab, asp=asp, axes=axes, main=main, ...)
}
rasterImage(z, bb[1], bb[3], bb[2], bb[4], interpolate=interpolate, ...)
if (!is.null(addfun)) {
if (is.function(addfun)) {
addfun()
}
}
if (sethook) {
setHook("plot.new", function(...) {
w <- getOption('warn')
on.exit(options('warn' = w))
options('warn'=-1)
on.exit(par(old.par))
}, action="replace")
setHook("plot.new", function(...) setHook("plot.new", NULL, "replace"))
}
}
| /R/plotCT.R | no_license | kenahoo/raster | R | false | false | 2,155 | r | # Author: Robert J. Hijmans
# Date : July 2010
# Version 0.9
# Licence GPL v3
.plotCT <- function(x, maxpixels=500000, ext=NULL, interpolate=FALSE, axes, main, xlab='', ylab='', asp, add=FALSE, addfun=NULL, zlim=NULL, zlimcol=NULL, ...) {
# plotting with a color table
if (missing(main)) {
main <- ''
}
sethook <- FALSE
if (!add) {
plot.new()
if (missing(axes)) {
axes <- FALSE
}
if (!axes) {
# if (main != "") { } else {
old.par <- par(no.readonly = TRUE)
par(plt=c(0,1,0,1))
sethook <- TRUE
}
if (missing(asp)) {
if (couldBeLonLat(x)) {
ym <- mean(c(x@extent@ymax, x@extent@ymin))
asp <- 1/cos((ym * pi)/180)
} else {
asp <- 1
}
}
}
coltab <- x@legend@colortable
x <- sampleRegular(x, maxpixels, ext=ext, asRaster=TRUE, useGDAL=TRUE)
z <- getValues(x)
if (!is.null(zlim)) { # not that relevant here, but for consistency....
if (is.null(zlimcol)) {
z[ z<zlim[1] ] <- zlim[1]
z[ z>zlim[2] ] <- zlim[2]
} else { #if (is.na(zlimcol)) {
z[z<zlim[1] | z>zlim[2]] <- NA
}
}
if (NCOL(coltab) == 2) {
# not implemented
z <- as.numeric(cut(z, coltab[,1]))
coltab <- as.vector(coltab[,2])
}
z <- z + 1
z[is.na(z)] <- 1
if (! is.null(coltab) ) {
z <- matrix(coltab[z], nrow=nrow(x), ncol=ncol(x), byrow=T)
z <- as.raster(z)
} else {
z <- matrix(z, nrow=nrow(x), ncol=ncol(x), byrow=T)
z <- as.raster(z, max=max(z)) #, na.rm=TRUE))
}
requireNamespace("grDevices")
bb <- as.vector(t(bbox(x)))
if (! add) {
plot(c(bb[1], bb[2]), c(bb[3], bb[4]), type = "n", xlab=xlab, ylab=ylab, asp=asp, axes=axes, main=main, ...)
}
rasterImage(z, bb[1], bb[3], bb[2], bb[4], interpolate=interpolate, ...)
if (!is.null(addfun)) {
if (is.function(addfun)) {
addfun()
}
}
if (sethook) {
setHook("plot.new", function(...) {
w <- getOption('warn')
on.exit(options('warn' = w))
options('warn'=-1)
on.exit(par(old.par))
}, action="replace")
setHook("plot.new", function(...) setHook("plot.new", NULL, "replace"))
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/viewSim.R
\name{viewSim}
\alias{viewSim}
\title{visualize simulated scenario}
\usage{
viewSim(obj, layout = "rectangular", zoomScale = 1/20,
legend.theme = list(legend.position = c(0.15, 0.6), legend.background =
element_rect(fill = "transparent")), tip.label = FALSE,
legend.title = "Abundance", ...)
}
\arguments{
\item{obj}{The output from \code{simData}}
\item{layout}{The tree layout. Select one from 'rectangular', 'slanted',
'fan', 'circular', 'radial', 'equal_angle' and 'daylight'. The default is
"rectangular".}
\item{zoomScale}{A positive numeric value. If it is above one, branches with
fold change equal to one (non-signal branch) will be zoomed in; If below
one, they will be shrinked. Default is 0.05}
\item{legend.theme}{A list of arguments used for the theme in ggplot2
package (see \code{\link[ggplot2]{theme}} ) and starting with "legend."}
\item{tip.label}{TRUE or FALSE. Default is FALSE. If TRUE, the leaves with
fold change above or below 1 will be labelled.}
\item{legend.title}{The title of the legend. The default is "Abundance"}
}
\value{
a figure
}
\description{
\code{viewSim} is to visualize the output from the function \code{simData}.
}
\examples{
set.seed(1)
y <- matrix(rnbinom(100,size=1,mu=10),nrow=10)
colnames(y) <- paste("S", 1:10, sep = "")
rownames(y) <- tinyTree$tip.label
toy_lse <- leafSummarizedExperiment(tree = tinyTree,
assays = list(y))
res <- parEstimate(data = toy_lse)
set.seed(1122)
dat1 <- simData(obj = res)
viewSim(obj = dat1 )
}
| /man/viewSim.Rd | no_license | markrobinsonuzh/treeAGG | R | false | true | 1,615 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/viewSim.R
\name{viewSim}
\alias{viewSim}
\title{visualize simulated scenario}
\usage{
viewSim(obj, layout = "rectangular", zoomScale = 1/20,
legend.theme = list(legend.position = c(0.15, 0.6), legend.background =
element_rect(fill = "transparent")), tip.label = FALSE,
legend.title = "Abundance", ...)
}
\arguments{
\item{obj}{The output from \code{simData}}
\item{layout}{The tree layout. Select one from 'rectangular', 'slanted',
'fan', 'circular', 'radial', 'equal_angle' and 'daylight'. The default is
"rectangular".}
\item{zoomScale}{A positive numeric value. If it is above one, branches with
fold change equal to one (non-signal branch) will be zoomed in; If below
one, they will be shrinked. Default is 0.05}
\item{legend.theme}{A list of arguments used for the theme in ggplot2
package (see \code{\link[ggplot2]{theme}} ) and starting with "legend."}
\item{tip.label}{TRUE or FALSE. Default is FALSE. If TRUE, the leaves with
fold change above or below 1 will be labelled.}
\item{legend.title}{The title of the legend. The default is "Abundance"}
}
\value{
a figure
}
\description{
\code{viewSim} is to visualize the output from the function \code{simData}.
}
\examples{
set.seed(1)
y <- matrix(rnbinom(100,size=1,mu=10),nrow=10)
colnames(y) <- paste("S", 1:10, sep = "")
rownames(y) <- tinyTree$tip.label
toy_lse <- leafSummarizedExperiment(tree = tinyTree,
assays = list(y))
res <- parEstimate(data = toy_lse)
set.seed(1122)
dat1 <- simData(obj = res)
viewSim(obj = dat1 )
}
|
\name{plot.rank_stability}
\alias{plot.rank_stability}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Plot outputs of PARSEC function \code{\link{rank_stability}.}
}
\description{
The function generates four plots, to reproduce the sequence of the average ranks and of the positions of the elements, in the rankings associated to the alpha-cut posets.
Rankings and average ranks have to be evaluted with the function \code{\link{rank_stability}}.
First and third plots show the sequence of average ranks, second and fourth show the sequence of rankings. Sequences in first and second plots are shown against the sequence of alpha-cuts, in third and fourth plots as a function of alpha values.
}
\usage{
\method{plot}{rank_stability}(x,
which = 1:4, legend = TRUE, legend.x = "bottomleft",
legend.y = NULL, legend.bg = "white", grid = TRUE,
grid.lty = 2, grid.col = rgb(0, 0, 0, 1/7),
grid.lwd = 1, y_axis = "reversed", ask = dev.interactive(),
type = "l", col = gray(1:ncol(x$ranking)/ncol(x$ranking)/1.3),
lwd = 3, lty = 1, ...
)}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
object of class \code{rank_stability} generated by function \code{\link{rank_stability}}.
}
\item{which}{select a subset of the numbers 1:4, to specify the desired plots. See caption below (and the 'Details').
}
\item{legend}{
boolean argument to choose whether to show the legend in the plots.
}
\item{legend.x, legend.y, legend.bg}{
arguments \code{x}, \code{y} and \code{bg} of the function \code{\link{legend}} defining the coordinates and the backgroud color of the legend.
}
\item{grid}{
boolean argument to choose whether to show the grid in the plots.
}
\item{grid.lty, grid.col, grid.lwd}{
arguments defining the line type, color and width of the grid.
}
\item{y_axis}{if it is set equal to "reversed" plots show the y axis reversed.}
\item{ask}{
boolean argument indicating whether the system has to ask users before changing plots.
}
\item{type}{
1-character string giving the desired type of plot. See \code{\link{plot.default}} for details.
}
\item{col}{
vector of colors. See \code{\link{matplot}} for details.
}
\item{lwd}{
vector of line widths. See \code{\link{matplot}} for details.
}
\item{lty}{
vector of line types. See \code{\link{matplot}} for details.
}
\item{\dots}{
other arguments of function \code{\link{matplot}}.
}
}
\seealso{
\code{\link{rank_stability}}, \code{\link{legend}}, \code{\link{plot.default}}, \code{\link{matplot}}
}
\examples{
v1 <- as.ordered(c("a", "b", "c", "d"))
v2 <- 1:3
prof <- var2prof(varmod = list(v1 = as.ordered(c("a", "b", "c", "d")), v2 = 1:3))
np <- nrow(prof$profiles)
k <- 10 # number of populations
set.seed(0)
populations <- as.data.frame(lapply(1:k, function(x) round(runif(np)*100)))
rownames(populations) <- rownames(prof$profiles)
names(populations) <- paste0("P", 1:k)
x <- FFOD(profiles = prof, distributions = populations)
res <- rank_stability(x)
plot(res)
} | /man/plot.rank_stability.Rd | no_license | cran/parsec | R | false | false | 3,104 | rd | \name{plot.rank_stability}
\alias{plot.rank_stability}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Plot outputs of PARSEC function \code{\link{rank_stability}.}
}
\description{
The function generates four plots, to reproduce the sequence of the average ranks and of the positions of the elements, in the rankings associated to the alpha-cut posets.
Rankings and average ranks have to be evaluted with the function \code{\link{rank_stability}}.
First and third plots show the sequence of average ranks, second and fourth show the sequence of rankings. Sequences in first and second plots are shown against the sequence of alpha-cuts, in third and fourth plots as a function of alpha values.
}
\usage{
\method{plot}{rank_stability}(x,
which = 1:4, legend = TRUE, legend.x = "bottomleft",
legend.y = NULL, legend.bg = "white", grid = TRUE,
grid.lty = 2, grid.col = rgb(0, 0, 0, 1/7),
grid.lwd = 1, y_axis = "reversed", ask = dev.interactive(),
type = "l", col = gray(1:ncol(x$ranking)/ncol(x$ranking)/1.3),
lwd = 3, lty = 1, ...
)}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
object of class \code{rank_stability} generated by function \code{\link{rank_stability}}.
}
\item{which}{select a subset of the numbers 1:4, to specify the desired plots. See caption below (and the 'Details').
}
\item{legend}{
boolean argument to choose whether to show the legend in the plots.
}
\item{legend.x, legend.y, legend.bg}{
arguments \code{x}, \code{y} and \code{bg} of the function \code{\link{legend}} defining the coordinates and the backgroud color of the legend.
}
\item{grid}{
boolean argument to choose whether to show the grid in the plots.
}
\item{grid.lty, grid.col, grid.lwd}{
arguments defining the line type, color and width of the grid.
}
\item{y_axis}{if it is set equal to "reversed" plots show the y axis reversed.}
\item{ask}{
boolean argument indicating whether the system has to ask users before changing plots.
}
\item{type}{
1-character string giving the desired type of plot. See \code{\link{plot.default}} for details.
}
\item{col}{
vector of colors. See \code{\link{matplot}} for details.
}
\item{lwd}{
vector of line widths. See \code{\link{matplot}} for details.
}
\item{lty}{
vector of line types. See \code{\link{matplot}} for details.
}
\item{\dots}{
other arguments of function \code{\link{matplot}}.
}
}
\seealso{
\code{\link{rank_stability}}, \code{\link{legend}}, \code{\link{plot.default}}, \code{\link{matplot}}
}
\examples{
v1 <- as.ordered(c("a", "b", "c", "d"))
v2 <- 1:3
prof <- var2prof(varmod = list(v1 = as.ordered(c("a", "b", "c", "d")), v2 = 1:3))
np <- nrow(prof$profiles)
k <- 10 # number of populations
set.seed(0)
populations <- as.data.frame(lapply(1:k, function(x) round(runif(np)*100)))
rownames(populations) <- rownames(prof$profiles)
names(populations) <- paste0("P", 1:k)
x <- FFOD(profiles = prof, distributions = populations)
res <- rank_stability(x)
plot(res)
} |
#------------------------------------------------------------------------------------------#
# Function eps #
# Developed by Marcos Longo - EPS/Harvard University #
# #
# This function simply gives the machine epsilon for single precision #
#------------------------------------------------------------------------------------------#
eps <<- function(){
ans = 2^(-24)
return(ans)
}
#------------------------------------------------------------------------------------------#
| /R-utils/eps.r | no_license | EDmodel/ED2 | R | false | false | 707 | r | #------------------------------------------------------------------------------------------#
# Function eps #
# Developed by Marcos Longo - EPS/Harvard University #
# #
# This function simply gives the machine epsilon for single precision #
#------------------------------------------------------------------------------------------#
eps <<- function(){
ans = 2^(-24)
return(ans)
}
#------------------------------------------------------------------------------------------#
|
## Single Plate Analysis
##########################################################################################################################
# today's date
date = "160128_bis"
# today's directories
mine = "/home/marie/Documents/"
work_windows = "//home.files.med.harvard.edu/home/"
work_linux = "/home/marie/Documents/"
used_DR = work_linux
# today's output directories
dataset = "M12"
type = "total"
control_output = paste(used_DR,
paste("R_output",
paste(date,
paste(dataset,"control",sep="/"),sep = "/"),sep = ""),sep="")
output = paste(used_DR,
paste("R_output",
paste(date,dataset,sep="/"),sep = "/"),sep = "")
output_chdir = paste(used_DR,
paste("R_output",
paste(date,
paste(dataset,type,sep="/"),sep = "/"),sep = ""),sep="")
##########################################################################################################################
# Upload functions files
setwd(paste(used_DR,"R_transcriptomics",sep=""))
source('functions_systematic_comparison.R')
source('functions_data_format.R')
source('functions_data_processing.R')
source('functions_plot_data_description.R')
setwd(paste(used_DR,'R_transcriptomics/chdir_R',sep=""))
source('chdir.R')
source('nipals.R')
##########################################################################################################################
# Upload files
raw_design_files = Upload_design_files(paste(used_DR,"Stage 2016/RAWDATA/Cellcount_20150707",sep=""))
rawdata_name = Rawdata_files_name(raw_design_files$plateID,
raw_design_files[[3]], # Make sure it is the good design file
,
24)
print("Files names")
Show_rawdata_names(paste(used_DR,"Stage 2016/RAWDATA/DGE_20150707",sep=""),type)
rawdata_files_total = Upload_rawdata_files(paste(used_DR,"Stage 2016/RAWDATA/DGE_20150707",sep=""),
rawdata_name,
type,
c(6)) # Make sure it is the right file number
##########################################################################################################################
# Data pre-processing
good_quality_raw_data = Minimum_reads(rawdata_files_total[[1]],150000)
setwd(output)
dev.print(device = png, file = paste(date,
paste(dataset,
paste(type,"quality_wells.png",sep="_"),sep="_"),sep="_"), width = 600)
dev.off()
# coordinate wells/columns name
raw_total_count_name = Adjust_Well_Name(colnames(good_quality_raw_data ),T)
quality_wells = synchronize_quality_wells(raw_total_count_name,raw_design_files[[3]])
# normalize count data
normalized_total = list()
replicate = length(rawdata_files_total)-1
normalized_total = Total_counts_Norm(good_quality_raw_data) # Choose the right normalization method
# log transform count data
log_total = Log_transform(normalized_total,4)
# final name adjustement
processed_total = log_total
colnames(processed_total)=raw_total_count_name
##########################################################################################################################
# Data pre-processing -> Expressed genes selection
processed_total=list(processed_total)
Design = quality_wells
control_wells = Design[Design$pert_type == "ctl_vehicle",1]
control_total_counts = Select_DataFrame(processed_total[[1]],control_wells)
control_total_expression_values = Expression_summary(control_total_counts)
print(control_total_expression_values)
expressed_control_total = Select_DataFrame_ValueThreshold_mean(control_total_counts,
as.numeric(control_total_expression_values[7]))
setwd(output)
dev.print(device = png, file = paste(date,
paste(dataset,
paste(type,"expression_40.png",sep="_"),sep="_"),sep="_"), width = 600)
dev.off()
Expression_summary(expressed_control_total)
total = list(rep1 = Select_raws_other_DF(expressed_control_total,processed_total[[1]]))
##########################################################################################################################
# Dataset control splitting
CellLine_level = names(table(Design$CellLine))
control_total = list()
name_control = c()
for (j in 1:replicate){
for (i in 1:length(CellLine_level)){
raw <- paste(paste("rep", j, sep = ""), CellLine_level[i],sep="_")
name_control=c(name_control,raw)
wells = Design[Design$pert_type == "ctl_vehicle" &
Design$CellLine == CellLine_level[i],1]
temp = Select_DataFrame(total[[j]],wells)
control_total = c(control_total,
list(temp))
}
}
names(control_total) = name_control
##########################################################################################################################
# Dataset control quality checking
LFC_control = list()
for (i in 1:length(levels(factor(Design$CellLine)))){
cell_lines = levels(factor(Design$CellLine))
cell_line_data = Design[Design$CellLine == cell_lines[i],]
cell_control = cell_line_data[cell_line_data$pert_type == "ctl_vehicle",]
cell_control_data = Select_DataFrame(total[[1]],cell_control[,1])
cell_control_name = colnames(cell_control_data)
LFC = matrix(0,nrow(cell_control_data),ncol(cell_control_data))
LFC[,1] = cell_control_data[,1]
names_LFC = c("X")
for (j in 2:ncol(cell_control_data)){
LFC[,j] = as.numeric(cell_control_data[,2]) - as.numeric(cell_control_data[,j])
names_LFC = c(names_LFC,paste(cell_control_name[2],cell_control_name[j],sep = "/"))
}
colnames(LFC)=names_LFC
LFC_control = c(LFC_control,list(LFC))
}
names(LFC_control) = cell_lines
setwd(control_output)
for (i in 1:length(LFC_control)){
par(mfrow=c(2,3))
for (j in 2:ncol(LFC_control[[i]])){
temp = LFC_control[[i]]
temp_names = colnames(temp)
hist(as.numeric(temp[,j]),main = temp_names[j],col="dodgerblue4",xlab=quantile(as.numeric(temp[,j])))
# boxplot(as.numeric(temp[,j]),main = temp_names[j],col="dodgerblue4")
print(temp_names[j])
print(quantile(as.numeric(temp[,j]),probs=seq(0,1,.1)))
}
# dev.print(device = png, file = paste(date,paste(cell_lines[i],"_control.png",sep=""),sep="_"), width = 600)
# dev.off()
}
par(mfrow=c(1,1))
##########################################################################################################################
# Dataset conditions splitting
mean_total_control = list_expression_mean(control_total)
mean_cell_line = as.data.frame(mean_total_control)
colnames(mean_cell_line)=CellLine_level
names(control_total)=CellLine_level
names_wells = c()
list_wells = list()
# FOR EACH CELL LINE
for(i in 1:ncol(mean_cell_line)){
treatment = Design[Design$pert_type != "ctl_vehicle" &
Design$CellLine == CellLine_level[i],]
drugs = levels(factor(treatment$DrugName))
conc = vector("list", length(drugs))
# FOR EACH FIRST DRUG TREATMENT
for (d1 in 1:length(drugs)){
temp_d1 = treatment[treatment$DrugName == drugs[d1],]
conc[[d1]] = sort(levels(factor(temp_d1$Conc)))
# FOR EACH ASSOCIATED CONCENTRATION
for (c1 in 1:length(conc[[d1]])){
temp_d2 = temp_d1[temp_d1$Conc == conc[[d1]][c1],]
drugs_2 = levels(factor(temp_d2$DrugName2))
conc_2 = vector("list", length(drugs_2))
# FOR EACH ASSOCIATED SECOND DRUG TREATMENT
for (d2 in 1:length(drugs_2)){
temp = temp_d2[temp_d2$DrugName2 == drugs_2[d2],]
conc_2[[d2]] = sort(levels(factor(temp$Conc2)))
# FOR EACH ASSOCIATED SECOND CONCENTRATION
for(c2 in 1:length(conc_2[[d2]])){
name_temp = paste(CellLine_level[i],
paste(drugs[d1],
paste(conc[[d1]][c1],
paste(drugs_2[d2],conc_2[[d2]][c2],sep="_"),sep="_"),sep="_"),sep="_")
names_wells = c(names_wells,name_temp)
temp_wells = temp[temp$Conc2 == conc_2[[d2]][c2],1]
list_wells = c(list_wells,list(temp_wells))
}
}
}
}
}
names(list_wells)=names_wells
# View(list_wells)
print(length(list_wells))
# nrow(unique(Design[,2:9]))
##########################################################################################################################
# Dataset conditions testing
#######################################################
# Fold-Change Estimation #
#######################################################
#
# setwd(output)
#
# for (el in 1:length(list_wells)){
# print(names_wells[el])
# CL = unlist(strsplit(names_wells[el],"_"))
# control = mean_cell_line[,CL[1]]
# temp_count = Select_DataFrame(total[[1]],list_wells[[el]])
# LFC_temp = matrix(0,nrow(temp_count),ncol(temp_count))
# LFC_temp[,1]=temp_count[,1]
# for (rep in 1:length(list_wells[[el]])){
# LFC_temp[,rep+1] = as.numeric(temp_count[,rep+1]) - control
# }
#
# genes = list()
# par(mfrow=c(2,4))
# if (length(list_wells[[el]]) !=1){
# for (rep in 1:length(list_wells[[el]])){
# genes = c(genes,list(LFC_temp[abs(as.numeric(LFC_temp[,rep+1]))>2,1]))
#
# if(rep == length(list_wells[[el]])){
# nb_commun = length(intersect(genes[[1]],genes[[2]]))
# final = paste(nb_commun ,paste(" over ",paste(length(genes[[1]]),paste("_",length(genes[[2]])))))
# print(final)
# hist(as.numeric(LFC_temp[,rep+1]),col="dodgerblue4",
# xlab = paste("LFC replicate", paste(rep,list_wells[[el]][rep],sep="_"),sep=' '),
# main= final)
#
# }else{
# hist(as.numeric(LFC_temp[,rep+1]),col="dodgerblue4",
# xlab = paste("LFC replicate", paste(rep,list_wells[[el]][rep],sep="_"),sep=' '),main=names_wells[el])
# }
#
# # mapplot
# rbPal <- colorRampPalette(c('darkgreen','palegreen4','grey10','firebrick','darkred'))
# color<- rbPal(5)[cut(as.numeric(LFC_temp[,rep+1]),breaks = 5)]
# ma.plot(mean_total[[1]],as.numeric(LFC_temp[,rep+1]),cex=1,col=color,
# main=paste("LFC replicate", rep,sep=' '))
#
# }
# }else{
# for (rep in 1:length(list_wells[[el]])){
# genes = c(genes,list(LFC_temp[abs(as.numeric(LFC_temp[,rep+1]))>2,1]))
#
# if(rep == length(list_wells[[el]])){
# #nb_commun = length(intersect(genes[[1]],genes[[2]]))
# final = paste(names_wells[el],"low quality replicate")
# print(final)
# hist(as.numeric(LFC_temp[,rep+1]),col="dodgerblue4",
# xlab = paste("LFC replicate", paste(rep,list_wells[[el]][rep],sep="_"),sep=' '),
# main= final)
#
# }else{
# hist(as.numeric(LFC_temp[,rep+1]),col="dodgerblue4",
# xlab = paste("LFC replicate", paste(rep,list_wells[[el]][rep],sep="_"),sep=' '),main=names_wells[el])
# }
#
# # mapplot
# rbPal <- colorRampPalette(c('darkgreen','palegreen4','grey10','firebrick','darkred'))
# color<- rbPal(5)[cut(as.numeric(LFC_temp[,rep+1]),breaks = 5)]
# ma.plot(mean_total[[1]],as.numeric(LFC_temp[,rep+1]),cex=1,col=color,
# main=paste("LFC replicate", rep,sep=' '))
#
# }
# }
# dev.print(device = png, file = paste(date,paste(names_wells[el],".png",sep=""),sep="_"), width = 1000)
# dev.off()
# }
#######################################################
# Characteristic Direction #
#######################################################
setwd(output)
##################################################################################################################
# specific conditions testing #
print(names_wells)
#
# specific_wells = c("BT20_AZD8330_3.3333_-_0",
# "BT20_BEZ235_1.1111_-_0",
# "BT20_BYL719_10_-_0",
# "BT20_BYL719_3.3333_-_0",
# "BT20_Dasatinib_1.1111_-_0",
# "BT20_GSK1059615_3.3333_-_0",
# "BT20_Lapatinib_3.3333_-_0",
# "BT20_Neratinib_3.3333_-_0",
# "BT20_NVP-TAE684_3.3333_-_0",
# "BT20_Palbociclib_3.3333_-_0",
# "BT20_Rapamycin_3.3333_-_0",
# "BT20_Saracatinib_3.3333_-_0",
# "BT20_Trametinib_3.3333_-_0",
# "HCC1806_AZ20_3.1623_-_0",
# "HCC1806_AZD8055_3.1623_-_0",
# "HCC1806_BEZ235_1_-_0",
# "HCC1806_BYL719_10_-_0",
# "HCC1806_Dasatinib_2_-_0",
# "HCC1806_GSK1059615_10_-_0",
# "HCC1806_KU60019_3.1623_-_0",
# "HCC1806_Lapatinib_20_-_0",
# "HCC1806_Linsitinib_20_-_0",
# "HCC1806_Saracatinib_10_-_0",
# "HCC1806_NVP-TAE684_10_-_0",
# "HCC1806_Torin2_0.31623_-_0",
# "HCC1806_VE821_3.1623_-_0"
# )
specific_wells= c("MCF10A_BEZ235_1_-_0",
"MCF10A_BYL719_3.1623_-_0",
"MCF10A_Dasatinib_2_-_0",
"MCF10A_Lapatinib_3.1623_-_0",
"MCF10A_Linsitinib_20_-_0",
"MCF10A_NVP-TAE684_10_-_0",
"MCF10A_Palbociclib_3.1623_-_0",
"MCF10A_Rapamycin_1_-_0",
"MCF10A_Saracatinib_10_-_0",
"MCF10A_Torin2_0.31623_-_0",
"MCF10A_Trametinib_3.1623_-_0",
"MCF7_BEZ235_1_-_0" ,
"MCF7_BYL719_3.1623_-_0",
"MCF7_Dasatinib_2_-_0",
"MCF7_Lapatinib_10_-_0",
"MCF7_Linsitinib_3.1623_-_0",
"MCF7_NVP-TAE684_10_-_0",
"MCF7_Palbociclib_3.1623_-_0",
"MCF7_Saracatinib_10_-_0" ,
"MCF7_Trametinib_3.1623_-_0")
list_wells_save = list_wells
list_wells = list_wells_save[names(list_wells_save) %in% specific_wells]
names_wells = names(list_wells)
##################################################################################################################
#
# gene_intersect = matrix(0,56,length(list_wells))
# rownames(gene_intersect)= c("angle","top_100","top_500","top_1000","top_2500","top_5000",seq(1,50))
# colnames(gene_intersect)=names(list_wells)
# nb_gene=c(100,500,1000,2500,5000)
# rep_gene_intersect = matrix(0,56,length(list_wells)*2)
# rownames(rep_gene_intersect)= c("angle","top_100","top_500","top_1000","top_2500","top_5000",seq(1,50))
# names_replicate = c()
# indexes = seq(1,length(list_wells)*2,2)
#
#
#
# for (el in 1:length(list_wells)){
# print(names_wells[el])
# CL = unlist(strsplit(names_wells[el],"_"))
# control = control_total[[CL[1]]]
# temp_count = Select_DataFrame(total[[1]],list_wells[[el]])
# print(list_wells[[el]])
#
# # remove equals row
# equal_threshold = 1e-5;
# mat_ctl = as.matrix(control[,2:ncol(control)])
# ctl = control[diag(var(t(mat_ctl))) > equal_threshold,]
#
# if (length(list_wells[[el]]) != 1){
# mat_count = as.matrix(temp_count[,2:ncol(temp_count)])
# exp = temp_count[diag(var(t(mat_count))) > equal_threshold,]
# }else{
# exp = temp_count
# }
#
# # estimate chDir with replicates
# real_ctl = Select_raws_other_DF(exp,ctl)
# real_exp = Select_raws_other_DF(real_ctl,exp)
#
# chdir_result = chdir(as.matrix(real_ctl[,2:ncol(real_ctl)]),as.matrix(real_exp[,2:ncol(real_exp)]),real_ctl[,1])
# names_replicate = c(names_replicate,names_wells[el],names_wells[el])
#
# chdir_rep=list()
#
# #estimate chDir without replicates
# for (rep in 1:length(list_wells[[el]])){
# chdir_temp = chdir(as.matrix(real_ctl[,2:ncol(real_ctl)]),as.matrix(real_exp[,rep+1]),real_ctl[,1])
# chdir_rep=c(chdir_rep,list(chdir_temp))
# }
#
# if (length(list_wells[[el]]) == 1){
# tmp_0 = rownames(chdir_result)
# tmp_1 = rownames(chdir_rep[[1]])
#
# rep_gene_intersect[1,indexes[el]] = cos(angle(as.vector(chdir_rep[[1]]),as.vector(chdir_result)))
#
# for (g in 1:length(nb_gene)+1){
# rep_gene_intersect[g,indexes[el]] = length(intersect(tmp_1[1:nb_gene[g-1]],tmp_0[1:nb_gene[g-1]]))
# }
# for (g in 7:56){
# gene_intersect[g,el]=tmp_1[g-5]
# rep_gene_intersect[g,indexes[el]] = tmp_0[g-5]
# rep_gene_intersect[g,indexes[el]+1] = chdir_result[g-5,1]
# }
# }else{
# tmp_0 = rownames(chdir_result)
# tmp_1 = rownames(chdir_rep[[1]])
# tmp_2 = rownames(chdir_rep[[2]])
#
# # as.vector(chdir_rep[[1]])%*%as.vector(chdir_rep[[2]])
# # cos(angle(as.vector(chdir_rep[[1]]),as.vector(chdir_rep[[2]])))
# gene_intersect[1,el] = cos(angle(as.vector(chdir_rep[[1]]),as.vector(chdir_rep[[2]])))
# rep_gene_intersect[1,indexes[el]] = cos(angle(as.vector(chdir_rep[[1]]),as.vector(chdir_result)))
# rep_gene_intersect[1,indexes[el]+1] = cos(angle(as.vector(chdir_rep[[2]]) ,as.vector(chdir_result)))
#
# for (g in 1:length(nb_gene)+1){
# gene_intersect[g,el]=length(intersect(tmp_1[1:nb_gene[g-1]],tmp_2[1:nb_gene[g-1]]))
# rep_gene_intersect[g,indexes[el]] = length(intersect(tmp_1[1:nb_gene[g-1]],tmp_0[1:nb_gene[g-1]]))
# rep_gene_intersect[g,indexes[el]+1] = length(intersect(tmp_2[1:nb_gene[g-1]],tmp_0[1:nb_gene[g-1]]))
# }
# for (g in 7:51){
# gene_intersect[g,el]=paste(tmp_1[g-5],tmp_2[g-5],sep = " _ ")
# rep_gene_intersect[g,indexes[el]] = tmp_0[g-5]
# }
# }
#
# }
# colnames(rep_gene_intersect) = names_replicate
#
# write.table(gene_intersect,file=paste(dataset,paste(type,"exp_40.txt",sep="_"),sep="_"),sep = "\t")
# write.table(rep_gene_intersect,file=paste(dataset,paste(type,"exp_40_rep.txt",sep="_"),sep="_"),sep = "\t")
##################################################################################################################
nb_gene=c(100,500,1000,2500,5000)
rep_gene_intersect = matrix(0,56,length(list_wells)*4)
rownames(rep_gene_intersect)= c("angle","top_100","top_500","top_1000","top_2500","top_5000",seq(1,50))
names_replicate = matrix(0,1,length(list_wells)*4)
indexes = seq(1,length(list_wells)*4,4)
for (el in 1:length(list_wells)){
print(names_wells[el])
CL = unlist(strsplit(names_wells[el],"_"))
control = control_total[[CL[1]]]
temp_count = Select_DataFrame(total[[1]],list_wells[[el]])
print(list_wells[[el]])
# remove equals row
equal_threshold = 1e-5;
mat_ctl = as.matrix(control[,2:ncol(control)])
ctl = control[diag(var(t(mat_ctl))) > equal_threshold,]
mat_count = as.matrix(temp_count[,2:ncol(temp_count)])
exp = temp_count[diag(var(t(mat_count))) > equal_threshold,]
# estimate chDir with replicates
real_ctl = Select_raws_other_DF(exp,ctl)
real_exp = Select_raws_other_DF(real_ctl,exp)
chdir_result = chdir(as.matrix(real_ctl[,2:ncol(real_ctl)]),as.matrix(real_exp[,2:ncol(real_exp)]),real_ctl[,1])
names_replicate[indexes[el]] = names_wells[el]
chdir_rep=list()
#estimate chDir without replicates
for (rep in 1:length(list_wells[[el]])){
chdir_temp = chdir(as.matrix(real_ctl[,2:ncol(real_ctl)]),as.matrix(real_exp[,rep+1]),real_ctl[,1])
chdir_rep=c(chdir_rep,list(chdir_temp))
}
if (length(list_wells[[el]]) == 3){
tmp_0 = rownames(chdir_result)
tmp_1 = rownames(chdir_rep[[1]])
tmp_2 = rownames(chdir_rep[[2]])
tmp_3 = rownames(chdir_rep[[3]])
rep_gene_intersect[1,indexes[el]] = cos(angle(as.vector(chdir_rep[[1]]),as.vector(chdir_result)))
rep_gene_intersect[1,indexes[el]+1] = cos(angle(as.vector(chdir_rep[[2]]) ,as.vector(chdir_result)))
rep_gene_intersect[1,indexes[el]+2] = cos(angle(as.vector(chdir_rep[[3]]) ,as.vector(chdir_result)))
for (g in 1:length(nb_gene)+1){
rep_gene_intersect[g,indexes[el]] = length(intersect(tmp_1[1:nb_gene[g-1]],tmp_0[1:nb_gene[g-1]]))
rep_gene_intersect[g,indexes[el]+1] = length(intersect(tmp_2[1:nb_gene[g-1]],tmp_0[1:nb_gene[g-1]]))
rep_gene_intersect[g,indexes[el]+2] = length(intersect(tmp_3[1:nb_gene[g-1]],tmp_0[1:nb_gene[g-1]]))
}
for (g in 7:51){
rep_gene_intersect[g,indexes[el]] = tmp_0[g-5]
}
}else{
tmp_0 = rownames(chdir_result)
tmp_1 = rownames(chdir_rep[[1]])
tmp_2 = rownames(chdir_rep[[2]])
tmp_3 = rownames(chdir_rep[[3]])
tmp_4 = rownames(chdir_rep[[4]])
rep_gene_intersect[1,indexes[el]] = cos(angle(as.vector(chdir_rep[[1]]),as.vector(chdir_result)))
rep_gene_intersect[1,indexes[el]+1] = cos(angle(as.vector(chdir_rep[[2]]) ,as.vector(chdir_result)))
rep_gene_intersect[1,indexes[el]+2] = cos(angle(as.vector(chdir_rep[[3]]) ,as.vector(chdir_result)))
rep_gene_intersect[1,indexes[el]+3] = cos(angle(as.vector(chdir_rep[[4]]) ,as.vector(chdir_result)))
for (g in 1:length(nb_gene)+1){
rep_gene_intersect[g,indexes[el]] = length(intersect(tmp_1[1:nb_gene[g-1]],tmp_0[1:nb_gene[g-1]]))
rep_gene_intersect[g,indexes[el]+1] = length(intersect(tmp_2[1:nb_gene[g-1]],tmp_0[1:nb_gene[g-1]]))
rep_gene_intersect[g,indexes[el]+2] = length(intersect(tmp_3[1:nb_gene[g-1]],tmp_0[1:nb_gene[g-1]]))
rep_gene_intersect[g,indexes[el]+3] = length(intersect(tmp_4[1:nb_gene[g-1]],tmp_0[1:nb_gene[g-1]]))
}
for (g in 7:51){
rep_gene_intersect[g,indexes[el]] = tmp_0[g-5]
}
}
}
colnames(rep_gene_intersect) = names_replicate
write.table(rep_gene_intersect,file=paste(dataset,paste(type,"exp_40_rep.txt",sep="_"),sep="_"),sep = "\t")
| /main_single_dataset_pre_analysis.R | no_license | gepellet/R_transcriptomics | R | false | false | 22,156 | r | ## Single Plate Analysis
##########################################################################################################################
# today's date
date = "160128_bis"
# today's directories
mine = "/home/marie/Documents/"
work_windows = "//home.files.med.harvard.edu/home/"
work_linux = "/home/marie/Documents/"
used_DR = work_linux
# today's output directories
dataset = "M12"
type = "total"
control_output = paste(used_DR,
paste("R_output",
paste(date,
paste(dataset,"control",sep="/"),sep = "/"),sep = ""),sep="")
output = paste(used_DR,
paste("R_output",
paste(date,dataset,sep="/"),sep = "/"),sep = "")
output_chdir = paste(used_DR,
paste("R_output",
paste(date,
paste(dataset,type,sep="/"),sep = "/"),sep = ""),sep="")
##########################################################################################################################
# Upload functions files
setwd(paste(used_DR,"R_transcriptomics",sep=""))
source('functions_systematic_comparison.R')
source('functions_data_format.R')
source('functions_data_processing.R')
source('functions_plot_data_description.R')
setwd(paste(used_DR,'R_transcriptomics/chdir_R',sep=""))
source('chdir.R')
source('nipals.R')
##########################################################################################################################
# Upload files
raw_design_files = Upload_design_files(paste(used_DR,"Stage 2016/RAWDATA/Cellcount_20150707",sep=""))
rawdata_name = Rawdata_files_name(raw_design_files$plateID,
raw_design_files[[3]], # Make sure it is the good design file
,
24)
print("Files names")
Show_rawdata_names(paste(used_DR,"Stage 2016/RAWDATA/DGE_20150707",sep=""),type)
rawdata_files_total = Upload_rawdata_files(paste(used_DR,"Stage 2016/RAWDATA/DGE_20150707",sep=""),
rawdata_name,
type,
c(6)) # Make sure it is the right file number
##########################################################################################################################
# Data pre-processing
good_quality_raw_data = Minimum_reads(rawdata_files_total[[1]],150000)
setwd(output)
dev.print(device = png, file = paste(date,
paste(dataset,
paste(type,"quality_wells.png",sep="_"),sep="_"),sep="_"), width = 600)
dev.off()
# coordinate wells/columns name
raw_total_count_name = Adjust_Well_Name(colnames(good_quality_raw_data ),T)
quality_wells = synchronize_quality_wells(raw_total_count_name,raw_design_files[[3]])
# normalize count data
normalized_total = list()
replicate = length(rawdata_files_total)-1
normalized_total = Total_counts_Norm(good_quality_raw_data) # Choose the right normalization method
# log transform count data
log_total = Log_transform(normalized_total,4)
# final name adjustement
processed_total = log_total
colnames(processed_total)=raw_total_count_name
##########################################################################################################################
# Data pre-processing -> Expressed genes selection
processed_total=list(processed_total)
Design = quality_wells
control_wells = Design[Design$pert_type == "ctl_vehicle",1]
control_total_counts = Select_DataFrame(processed_total[[1]],control_wells)
control_total_expression_values = Expression_summary(control_total_counts)
print(control_total_expression_values)
expressed_control_total = Select_DataFrame_ValueThreshold_mean(control_total_counts,
as.numeric(control_total_expression_values[7]))
setwd(output)
dev.print(device = png, file = paste(date,
paste(dataset,
paste(type,"expression_40.png",sep="_"),sep="_"),sep="_"), width = 600)
dev.off()
Expression_summary(expressed_control_total)
total = list(rep1 = Select_raws_other_DF(expressed_control_total,processed_total[[1]]))
##########################################################################################################################
# Dataset control splitting
CellLine_level = names(table(Design$CellLine))
control_total = list()
name_control = c()
for (j in 1:replicate){
for (i in 1:length(CellLine_level)){
raw <- paste(paste("rep", j, sep = ""), CellLine_level[i],sep="_")
name_control=c(name_control,raw)
wells = Design[Design$pert_type == "ctl_vehicle" &
Design$CellLine == CellLine_level[i],1]
temp = Select_DataFrame(total[[j]],wells)
control_total = c(control_total,
list(temp))
}
}
names(control_total) = name_control
##########################################################################################################################
# Dataset control quality checking
LFC_control = list()
for (i in 1:length(levels(factor(Design$CellLine)))){
cell_lines = levels(factor(Design$CellLine))
cell_line_data = Design[Design$CellLine == cell_lines[i],]
cell_control = cell_line_data[cell_line_data$pert_type == "ctl_vehicle",]
cell_control_data = Select_DataFrame(total[[1]],cell_control[,1])
cell_control_name = colnames(cell_control_data)
LFC = matrix(0,nrow(cell_control_data),ncol(cell_control_data))
LFC[,1] = cell_control_data[,1]
names_LFC = c("X")
for (j in 2:ncol(cell_control_data)){
LFC[,j] = as.numeric(cell_control_data[,2]) - as.numeric(cell_control_data[,j])
names_LFC = c(names_LFC,paste(cell_control_name[2],cell_control_name[j],sep = "/"))
}
colnames(LFC)=names_LFC
LFC_control = c(LFC_control,list(LFC))
}
names(LFC_control) = cell_lines
setwd(control_output)
for (i in 1:length(LFC_control)){
par(mfrow=c(2,3))
for (j in 2:ncol(LFC_control[[i]])){
temp = LFC_control[[i]]
temp_names = colnames(temp)
hist(as.numeric(temp[,j]),main = temp_names[j],col="dodgerblue4",xlab=quantile(as.numeric(temp[,j])))
# boxplot(as.numeric(temp[,j]),main = temp_names[j],col="dodgerblue4")
print(temp_names[j])
print(quantile(as.numeric(temp[,j]),probs=seq(0,1,.1)))
}
# dev.print(device = png, file = paste(date,paste(cell_lines[i],"_control.png",sep=""),sep="_"), width = 600)
# dev.off()
}
par(mfrow=c(1,1))
##########################################################################################################################
# Dataset conditions splitting
mean_total_control = list_expression_mean(control_total)
mean_cell_line = as.data.frame(mean_total_control)
colnames(mean_cell_line)=CellLine_level
names(control_total)=CellLine_level
names_wells = c()
list_wells = list()
# FOR EACH CELL LINE
for(i in 1:ncol(mean_cell_line)){
treatment = Design[Design$pert_type != "ctl_vehicle" &
Design$CellLine == CellLine_level[i],]
drugs = levels(factor(treatment$DrugName))
conc = vector("list", length(drugs))
# FOR EACH FIRST DRUG TREATMENT
for (d1 in 1:length(drugs)){
temp_d1 = treatment[treatment$DrugName == drugs[d1],]
conc[[d1]] = sort(levels(factor(temp_d1$Conc)))
# FOR EACH ASSOCIATED CONCENTRATION
for (c1 in 1:length(conc[[d1]])){
temp_d2 = temp_d1[temp_d1$Conc == conc[[d1]][c1],]
drugs_2 = levels(factor(temp_d2$DrugName2))
conc_2 = vector("list", length(drugs_2))
# FOR EACH ASSOCIATED SECOND DRUG TREATMENT
for (d2 in 1:length(drugs_2)){
temp = temp_d2[temp_d2$DrugName2 == drugs_2[d2],]
conc_2[[d2]] = sort(levels(factor(temp$Conc2)))
# FOR EACH ASSOCIATED SECOND CONCENTRATION
for(c2 in 1:length(conc_2[[d2]])){
name_temp = paste(CellLine_level[i],
paste(drugs[d1],
paste(conc[[d1]][c1],
paste(drugs_2[d2],conc_2[[d2]][c2],sep="_"),sep="_"),sep="_"),sep="_")
names_wells = c(names_wells,name_temp)
temp_wells = temp[temp$Conc2 == conc_2[[d2]][c2],1]
list_wells = c(list_wells,list(temp_wells))
}
}
}
}
}
names(list_wells)=names_wells
# View(list_wells)
print(length(list_wells))
# nrow(unique(Design[,2:9]))
##########################################################################################################################
# Dataset conditions testing
#######################################################
# Fold-Change Estimation #
#######################################################
#
# setwd(output)
#
# for (el in 1:length(list_wells)){
# print(names_wells[el])
# CL = unlist(strsplit(names_wells[el],"_"))
# control = mean_cell_line[,CL[1]]
# temp_count = Select_DataFrame(total[[1]],list_wells[[el]])
# LFC_temp = matrix(0,nrow(temp_count),ncol(temp_count))
# LFC_temp[,1]=temp_count[,1]
# for (rep in 1:length(list_wells[[el]])){
# LFC_temp[,rep+1] = as.numeric(temp_count[,rep+1]) - control
# }
#
# genes = list()
# par(mfrow=c(2,4))
# if (length(list_wells[[el]]) !=1){
# for (rep in 1:length(list_wells[[el]])){
# genes = c(genes,list(LFC_temp[abs(as.numeric(LFC_temp[,rep+1]))>2,1]))
#
# if(rep == length(list_wells[[el]])){
# nb_commun = length(intersect(genes[[1]],genes[[2]]))
# final = paste(nb_commun ,paste(" over ",paste(length(genes[[1]]),paste("_",length(genes[[2]])))))
# print(final)
# hist(as.numeric(LFC_temp[,rep+1]),col="dodgerblue4",
# xlab = paste("LFC replicate", paste(rep,list_wells[[el]][rep],sep="_"),sep=' '),
# main= final)
#
# }else{
# hist(as.numeric(LFC_temp[,rep+1]),col="dodgerblue4",
# xlab = paste("LFC replicate", paste(rep,list_wells[[el]][rep],sep="_"),sep=' '),main=names_wells[el])
# }
#
# # mapplot
# rbPal <- colorRampPalette(c('darkgreen','palegreen4','grey10','firebrick','darkred'))
# color<- rbPal(5)[cut(as.numeric(LFC_temp[,rep+1]),breaks = 5)]
# ma.plot(mean_total[[1]],as.numeric(LFC_temp[,rep+1]),cex=1,col=color,
# main=paste("LFC replicate", rep,sep=' '))
#
# }
# }else{
# for (rep in 1:length(list_wells[[el]])){
# genes = c(genes,list(LFC_temp[abs(as.numeric(LFC_temp[,rep+1]))>2,1]))
#
# if(rep == length(list_wells[[el]])){
# #nb_commun = length(intersect(genes[[1]],genes[[2]]))
# final = paste(names_wells[el],"low quality replicate")
# print(final)
# hist(as.numeric(LFC_temp[,rep+1]),col="dodgerblue4",
# xlab = paste("LFC replicate", paste(rep,list_wells[[el]][rep],sep="_"),sep=' '),
# main= final)
#
# }else{
# hist(as.numeric(LFC_temp[,rep+1]),col="dodgerblue4",
# xlab = paste("LFC replicate", paste(rep,list_wells[[el]][rep],sep="_"),sep=' '),main=names_wells[el])
# }
#
# # mapplot
# rbPal <- colorRampPalette(c('darkgreen','palegreen4','grey10','firebrick','darkred'))
# color<- rbPal(5)[cut(as.numeric(LFC_temp[,rep+1]),breaks = 5)]
# ma.plot(mean_total[[1]],as.numeric(LFC_temp[,rep+1]),cex=1,col=color,
# main=paste("LFC replicate", rep,sep=' '))
#
# }
# }
# dev.print(device = png, file = paste(date,paste(names_wells[el],".png",sep=""),sep="_"), width = 1000)
# dev.off()
# }
#######################################################
# Characteristic Direction #
#######################################################
setwd(output)
##################################################################################################################
# specific conditions testing #
print(names_wells)
#
# specific_wells = c("BT20_AZD8330_3.3333_-_0",
# "BT20_BEZ235_1.1111_-_0",
# "BT20_BYL719_10_-_0",
# "BT20_BYL719_3.3333_-_0",
# "BT20_Dasatinib_1.1111_-_0",
# "BT20_GSK1059615_3.3333_-_0",
# "BT20_Lapatinib_3.3333_-_0",
# "BT20_Neratinib_3.3333_-_0",
# "BT20_NVP-TAE684_3.3333_-_0",
# "BT20_Palbociclib_3.3333_-_0",
# "BT20_Rapamycin_3.3333_-_0",
# "BT20_Saracatinib_3.3333_-_0",
# "BT20_Trametinib_3.3333_-_0",
# "HCC1806_AZ20_3.1623_-_0",
# "HCC1806_AZD8055_3.1623_-_0",
# "HCC1806_BEZ235_1_-_0",
# "HCC1806_BYL719_10_-_0",
# "HCC1806_Dasatinib_2_-_0",
# "HCC1806_GSK1059615_10_-_0",
# "HCC1806_KU60019_3.1623_-_0",
# "HCC1806_Lapatinib_20_-_0",
# "HCC1806_Linsitinib_20_-_0",
# "HCC1806_Saracatinib_10_-_0",
# "HCC1806_NVP-TAE684_10_-_0",
# "HCC1806_Torin2_0.31623_-_0",
# "HCC1806_VE821_3.1623_-_0"
# )
specific_wells= c("MCF10A_BEZ235_1_-_0",
"MCF10A_BYL719_3.1623_-_0",
"MCF10A_Dasatinib_2_-_0",
"MCF10A_Lapatinib_3.1623_-_0",
"MCF10A_Linsitinib_20_-_0",
"MCF10A_NVP-TAE684_10_-_0",
"MCF10A_Palbociclib_3.1623_-_0",
"MCF10A_Rapamycin_1_-_0",
"MCF10A_Saracatinib_10_-_0",
"MCF10A_Torin2_0.31623_-_0",
"MCF10A_Trametinib_3.1623_-_0",
"MCF7_BEZ235_1_-_0" ,
"MCF7_BYL719_3.1623_-_0",
"MCF7_Dasatinib_2_-_0",
"MCF7_Lapatinib_10_-_0",
"MCF7_Linsitinib_3.1623_-_0",
"MCF7_NVP-TAE684_10_-_0",
"MCF7_Palbociclib_3.1623_-_0",
"MCF7_Saracatinib_10_-_0" ,
"MCF7_Trametinib_3.1623_-_0")
list_wells_save = list_wells
list_wells = list_wells_save[names(list_wells_save) %in% specific_wells]
names_wells = names(list_wells)
##################################################################################################################
#
# gene_intersect = matrix(0,56,length(list_wells))
# rownames(gene_intersect)= c("angle","top_100","top_500","top_1000","top_2500","top_5000",seq(1,50))
# colnames(gene_intersect)=names(list_wells)
# nb_gene=c(100,500,1000,2500,5000)
# rep_gene_intersect = matrix(0,56,length(list_wells)*2)
# rownames(rep_gene_intersect)= c("angle","top_100","top_500","top_1000","top_2500","top_5000",seq(1,50))
# names_replicate = c()
# indexes = seq(1,length(list_wells)*2,2)
#
#
#
# for (el in 1:length(list_wells)){
# print(names_wells[el])
# CL = unlist(strsplit(names_wells[el],"_"))
# control = control_total[[CL[1]]]
# temp_count = Select_DataFrame(total[[1]],list_wells[[el]])
# print(list_wells[[el]])
#
# # remove equals row
# equal_threshold = 1e-5;
# mat_ctl = as.matrix(control[,2:ncol(control)])
# ctl = control[diag(var(t(mat_ctl))) > equal_threshold,]
#
# if (length(list_wells[[el]]) != 1){
# mat_count = as.matrix(temp_count[,2:ncol(temp_count)])
# exp = temp_count[diag(var(t(mat_count))) > equal_threshold,]
# }else{
# exp = temp_count
# }
#
# # estimate chDir with replicates
# real_ctl = Select_raws_other_DF(exp,ctl)
# real_exp = Select_raws_other_DF(real_ctl,exp)
#
# chdir_result = chdir(as.matrix(real_ctl[,2:ncol(real_ctl)]),as.matrix(real_exp[,2:ncol(real_exp)]),real_ctl[,1])
# names_replicate = c(names_replicate,names_wells[el],names_wells[el])
#
# chdir_rep=list()
#
# #estimate chDir without replicates
# for (rep in 1:length(list_wells[[el]])){
# chdir_temp = chdir(as.matrix(real_ctl[,2:ncol(real_ctl)]),as.matrix(real_exp[,rep+1]),real_ctl[,1])
# chdir_rep=c(chdir_rep,list(chdir_temp))
# }
#
# if (length(list_wells[[el]]) == 1){
# tmp_0 = rownames(chdir_result)
# tmp_1 = rownames(chdir_rep[[1]])
#
# rep_gene_intersect[1,indexes[el]] = cos(angle(as.vector(chdir_rep[[1]]),as.vector(chdir_result)))
#
# for (g in 1:length(nb_gene)+1){
# rep_gene_intersect[g,indexes[el]] = length(intersect(tmp_1[1:nb_gene[g-1]],tmp_0[1:nb_gene[g-1]]))
# }
# for (g in 7:56){
# gene_intersect[g,el]=tmp_1[g-5]
# rep_gene_intersect[g,indexes[el]] = tmp_0[g-5]
# rep_gene_intersect[g,indexes[el]+1] = chdir_result[g-5,1]
# }
# }else{
# tmp_0 = rownames(chdir_result)
# tmp_1 = rownames(chdir_rep[[1]])
# tmp_2 = rownames(chdir_rep[[2]])
#
# # as.vector(chdir_rep[[1]])%*%as.vector(chdir_rep[[2]])
# # cos(angle(as.vector(chdir_rep[[1]]),as.vector(chdir_rep[[2]])))
# gene_intersect[1,el] = cos(angle(as.vector(chdir_rep[[1]]),as.vector(chdir_rep[[2]])))
# rep_gene_intersect[1,indexes[el]] = cos(angle(as.vector(chdir_rep[[1]]),as.vector(chdir_result)))
# rep_gene_intersect[1,indexes[el]+1] = cos(angle(as.vector(chdir_rep[[2]]) ,as.vector(chdir_result)))
#
# for (g in 1:length(nb_gene)+1){
# gene_intersect[g,el]=length(intersect(tmp_1[1:nb_gene[g-1]],tmp_2[1:nb_gene[g-1]]))
# rep_gene_intersect[g,indexes[el]] = length(intersect(tmp_1[1:nb_gene[g-1]],tmp_0[1:nb_gene[g-1]]))
# rep_gene_intersect[g,indexes[el]+1] = length(intersect(tmp_2[1:nb_gene[g-1]],tmp_0[1:nb_gene[g-1]]))
# }
# for (g in 7:51){
# gene_intersect[g,el]=paste(tmp_1[g-5],tmp_2[g-5],sep = " _ ")
# rep_gene_intersect[g,indexes[el]] = tmp_0[g-5]
# }
# }
#
# }
# colnames(rep_gene_intersect) = names_replicate
#
# write.table(gene_intersect,file=paste(dataset,paste(type,"exp_40.txt",sep="_"),sep="_"),sep = "\t")
# write.table(rep_gene_intersect,file=paste(dataset,paste(type,"exp_40_rep.txt",sep="_"),sep="_"),sep = "\t")
##################################################################################################################
nb_gene=c(100,500,1000,2500,5000)
rep_gene_intersect = matrix(0,56,length(list_wells)*4)
rownames(rep_gene_intersect)= c("angle","top_100","top_500","top_1000","top_2500","top_5000",seq(1,50))
names_replicate = matrix(0,1,length(list_wells)*4)
indexes = seq(1,length(list_wells)*4,4)
for (el in 1:length(list_wells)){
print(names_wells[el])
CL = unlist(strsplit(names_wells[el],"_"))
control = control_total[[CL[1]]]
temp_count = Select_DataFrame(total[[1]],list_wells[[el]])
print(list_wells[[el]])
# remove equals row
equal_threshold = 1e-5;
mat_ctl = as.matrix(control[,2:ncol(control)])
ctl = control[diag(var(t(mat_ctl))) > equal_threshold,]
mat_count = as.matrix(temp_count[,2:ncol(temp_count)])
exp = temp_count[diag(var(t(mat_count))) > equal_threshold,]
# estimate chDir with replicates
real_ctl = Select_raws_other_DF(exp,ctl)
real_exp = Select_raws_other_DF(real_ctl,exp)
chdir_result = chdir(as.matrix(real_ctl[,2:ncol(real_ctl)]),as.matrix(real_exp[,2:ncol(real_exp)]),real_ctl[,1])
names_replicate[indexes[el]] = names_wells[el]
chdir_rep=list()
#estimate chDir without replicates
for (rep in 1:length(list_wells[[el]])){
chdir_temp = chdir(as.matrix(real_ctl[,2:ncol(real_ctl)]),as.matrix(real_exp[,rep+1]),real_ctl[,1])
chdir_rep=c(chdir_rep,list(chdir_temp))
}
if (length(list_wells[[el]]) == 3){
tmp_0 = rownames(chdir_result)
tmp_1 = rownames(chdir_rep[[1]])
tmp_2 = rownames(chdir_rep[[2]])
tmp_3 = rownames(chdir_rep[[3]])
rep_gene_intersect[1,indexes[el]] = cos(angle(as.vector(chdir_rep[[1]]),as.vector(chdir_result)))
rep_gene_intersect[1,indexes[el]+1] = cos(angle(as.vector(chdir_rep[[2]]) ,as.vector(chdir_result)))
rep_gene_intersect[1,indexes[el]+2] = cos(angle(as.vector(chdir_rep[[3]]) ,as.vector(chdir_result)))
for (g in 1:length(nb_gene)+1){
rep_gene_intersect[g,indexes[el]] = length(intersect(tmp_1[1:nb_gene[g-1]],tmp_0[1:nb_gene[g-1]]))
rep_gene_intersect[g,indexes[el]+1] = length(intersect(tmp_2[1:nb_gene[g-1]],tmp_0[1:nb_gene[g-1]]))
rep_gene_intersect[g,indexes[el]+2] = length(intersect(tmp_3[1:nb_gene[g-1]],tmp_0[1:nb_gene[g-1]]))
}
for (g in 7:51){
rep_gene_intersect[g,indexes[el]] = tmp_0[g-5]
}
}else{
tmp_0 = rownames(chdir_result)
tmp_1 = rownames(chdir_rep[[1]])
tmp_2 = rownames(chdir_rep[[2]])
tmp_3 = rownames(chdir_rep[[3]])
tmp_4 = rownames(chdir_rep[[4]])
rep_gene_intersect[1,indexes[el]] = cos(angle(as.vector(chdir_rep[[1]]),as.vector(chdir_result)))
rep_gene_intersect[1,indexes[el]+1] = cos(angle(as.vector(chdir_rep[[2]]) ,as.vector(chdir_result)))
rep_gene_intersect[1,indexes[el]+2] = cos(angle(as.vector(chdir_rep[[3]]) ,as.vector(chdir_result)))
rep_gene_intersect[1,indexes[el]+3] = cos(angle(as.vector(chdir_rep[[4]]) ,as.vector(chdir_result)))
for (g in 1:length(nb_gene)+1){
rep_gene_intersect[g,indexes[el]] = length(intersect(tmp_1[1:nb_gene[g-1]],tmp_0[1:nb_gene[g-1]]))
rep_gene_intersect[g,indexes[el]+1] = length(intersect(tmp_2[1:nb_gene[g-1]],tmp_0[1:nb_gene[g-1]]))
rep_gene_intersect[g,indexes[el]+2] = length(intersect(tmp_3[1:nb_gene[g-1]],tmp_0[1:nb_gene[g-1]]))
rep_gene_intersect[g,indexes[el]+3] = length(intersect(tmp_4[1:nb_gene[g-1]],tmp_0[1:nb_gene[g-1]]))
}
for (g in 7:51){
rep_gene_intersect[g,indexes[el]] = tmp_0[g-5]
}
}
}
colnames(rep_gene_intersect) = names_replicate
write.table(rep_gene_intersect,file=paste(dataset,paste(type,"exp_40_rep.txt",sep="_"),sep="_"),sep = "\t")
|
# Conversion tools for gamut
#' Create a polygon sf object from a raster
#'
#' Create a polygon sf object from a raster, stack, or brick.
#'
#' @param raster A raster, stack, or brick object
#' @param na.rm boolean. If TRUE will polygonize only non-NA cells. Defualt is FALSE.
#' @importFrom spex qm_rasterToPolygons
#' @author Chris R. Vernon (chris.vernon@pnnl.gov)
#' @export
raster_to_polygon <- function(raster, na.rm = FALSE) {
return(qm_rasterToPolygons(raster, na.rm))
}
| /R/conversion.R | permissive | jsta/gamut | R | false | false | 484 | r | # Conversion tools for gamut
#' Create a polygon sf object from a raster
#'
#' Create a polygon sf object from a raster, stack, or brick.
#'
#' @param raster A raster, stack, or brick object
#' @param na.rm boolean. If TRUE will polygonize only non-NA cells. Defualt is FALSE.
#' @importFrom spex qm_rasterToPolygons
#' @author Chris R. Vernon (chris.vernon@pnnl.gov)
#' @export
raster_to_polygon <- function(raster, na.rm = FALSE) {
return(qm_rasterToPolygons(raster, na.rm))
}
|
dense_umis <- fread("data/functional/citeseq/CML_NK_1_HTO/uncorrected_cells/dense_umis.tsv", gene.column = 1)
cml_nk_1_hto_seurat <- Read10X(data.dir = "data/functional/citeseq/CML_NK_1_HTO/umi_count/", gene.column = 1)
cml_nk_1_hto_seurat <- CreateSeuratObject(cml_nk_1_hto_seurat, project = "CML_NK_1")
cml_nk_3_hto_seurat <- Read10X(data.dir = "data/functional/citeseq/CML_NK_3_HTO/umi_count/", gene.column = 1)
cml_nk_3_hto_seurat <- CreateSeuratObject(cml_nk_3_hto_seurat, project = "CML_NK_3")
colnames(cml_nk_1_hto_seurat) <- paste0("CML_NK_1_", colnames(cml_nk_1_hto_seurat), "-1")
colnames(cml_nk_3_hto_seurat) <- paste0("CML_NK_3_", colnames(cml_nk_3_hto_seurat), "-1")
hto_names <- c(paste0("CML_NK_1_", colnames(cml_nk_1_hto_seurat), "-1"),
paste0("CML_NK_3_", colnames(cml_nk_3_hto_seurat), "-1"))
joint.bcs <- intersect(colnames(cml_seurat), hto_names)
cml_nk_1_hto_seurat_new <- cml_nk_1_hto_seurat[, c(paste0("CML_NK_1_", colnames(cml_nk_1_hto_seurat), "-1") %in% joint.bcs)]
cml_nk_2_hto_seurat_new <- cml_nk_2_hto_seurat[, c(paste0("CML_NK_2_", colnames(cml_nk_2_hto_seurat), "-1") %in% joint.bcs)]
cml_nk_3_hto_seurat_new <- cml_nk_3_hto_seurat[, c(paste0("CML_NK_3_", colnames(cml_nk_3_hto_seurat), "-1") %in% joint.bcs)]
cml_nk_1_hto_mtx <- cml_nk_1_hto_seurat_new@assays$RNA@counts
colnames(cml_nk_1_hto_mtx) <- c(paste0("CML_NK_1_", colnames(cml_nk_1_hto_mtx), "-1"))
rownames(cml_nk_1_hto_mtx) <- c(hto_id %>% filter(experiment == "CML_NK_1") %>% pull(toname), "unmapped")
cml_nk_3_hto_mtx <- cml_nk_3_hto_seurat_new@assays$RNA@counts
cml_nk_3_hto_mtx <- cml_nk_3_hto_mtx[c(1:10,15), ]
colnames(cml_nk_3_hto_mtx) <- c(paste0("CML_NK_3_", colnames(cml_nk_3_hto_mtx), "-1"))
rownames(cml_nk_3_hto_mtx) <- c(hto_id %>% filter(experiment == "CML_NK_3") %>% pull(toname), "unmapped")
cml_seurat_new1 <- subset(cml_seurat_new, orig.ident == "CML_NK_1")
cml_seurat_new3 <- subset(cml_seurat_new, orig.ident == "CML_NK_3")
cml_seurat_new1[["HTO"]] <- CreateAssayObject(counts = cml_nk_1_hto_mtx)
cml_seurat_new3[["HTO"]] <- CreateAssayObject(counts = cml_nk_3_hto_mtx)
#### HTO 1
cml_seurat_new1 <- cml_seurat_new1 %>% NormalizeData(assay = "HTO", normalization.method = "CLR") %>% HTODemux(assay = "HTO", positive.quantile = 0.99)
DefaultAssay(cml_seurat_new1) <- "HTO"
cml_seurat_new1 <- cml_seurat_new1 %>% ScaleData(features = rownames(cml_seurat_new1)) %>% RunPCA(features = rownames(cml_seurat_new1), approx = F) %>% RunTSNE(dims = 1:8, perplexity = 100)
cml_seurat_new1 <- cml_seurat_new1 %>% getDemuxEssi
cml_seurat_new_filt1 <- subset(cml_seurat_new1, my.demux2 == "Singlet")
cml_seurat_new_filt1 <- cml_seurat_new_filt1 %>% ScaleData(features = rownames(cml_seurat_new_filt1)) %>% RunPCA(features = rownames(cml_seurat_new_filt1), approx = F) %>% RunTSNE(dims = 1:8, perplexity = 100)
DimPlot(cml_seurat_new_filt1, label = T, repel = T, group.by = "HTO_classification.global")
DimPlot(cml_seurat_new_filt1, label = T, repel = T, group.by = "my.demux")
DimPlot(cml_seurat_new_filt1, label = T, repel = T, group.by = "my.demux2")
HTOHeatmap(cml_seurat_new_filt1, assay = "HTO", ncells = 5000)
cml_seurat_new_filt1 <- subset(cml_seurat_hto, orig.ident == "CML_NK_1")
DefaultAssay(cml_seurat_new_filt1) <- "RNA"
cml_seurat_new_filt1 <- cml_seurat_new_filt1 %>% preprocessSeuratCellCycle(cells.to.use = colnames(cml_seurat_new_filt1))
cml_seurat_new_filt1 <- cml_seurat_new_filt1 %>% getClustering()
a <- DimPlot(cml_seurat_new_filt1, label = F, repel = T, cols = getPalette(14)) + labs(title = "Seurat")
b <- DimPlot(cml_seurat_new_filt1, label = F, repel = T, cols = getPalette(14), group.by = "my.demux") + labs(title = "Essi")
c <- DimPlot(cml_seurat_new_filt1, label = F, repel = T, cols = getPalette(14), group.by = "HTO_classification.global") + labs(title = "Seurat")
d <- DimPlot(cml_seurat_new_filt1, label = F, repel = T, cols = getPalette(14), group.by = "my.demux2") + labs(title = "Seurat")
e <- a + b + c + d
ggsave(plot = e, "results/functional/demux/umap_hto1_rna.png", width = 12, height = 8)
a <- DimPlot(cml_seurat_new_filt1, label = T, repel = T, cols = getPalette(14), group.by = "RNA_snn_res.0.1")
b <- DimPlot(cml_seurat_new_filt1, label = T, repel = T, cols = getPalette(14), group.by = "singler_blueprint_pred")
# c <- DimPlot(cml_seurat_new_filt1, label = T, repel = T, cols = getPalette(14), group.by = "singler_hpca_pred")
cml_seurat_new_filt1$is_nk <- ifelse(cml_seurat_new_filt1$RNA_snn_res.0.1 == 2, "NK", "CML")
Idents(cml_seurat_new_filt1) <- cml_seurat_new_filt1$RNA_snn_res.0.1
cml_seurat_new_markers <- FindAllMarkers(cml_seurat_new_filt1, test.use = "t", max.cells.per.ident = 1e3) %>% filter(avg_log2FC > 0)
cml_seurat_new_markers <- cml_seurat_new_markers %>% filter(avg_log2FC > 0)
FeaturePlot(cml_seurat_new_filt1, features = big_markers)
ggsave("results/functional/demux/feature_hto1.png", width = 12, height = 18)
DefaultAssay(cml_seurat_new_filt1) <- "HTO"
cml_seurat_new_filt1 <- ScaleData(cml_seurat_new_filt1, features = rownames(cml_seurat_new_filt1), verbose = FALSE)
cml_seurat_new_filt1 <- RunPCA(cml_seurat_new_filt1, features = rownames(cml_seurat_new_filt1), approx = FALSE)
cml_seurat_new_filt1 <- RunTSNE(cml_seurat_new_filt1, dims = 1:8, perplexity = 100)
table(cml_seurat_new_filt1$HTO_classification.global)
a <- DimPlot(cml_seurat_new_filt1, label = F, repel = T, cols = getPalette(14)) + labs(title = "Seurat")
b <- DimPlot(cml_seurat_new_filt1, label = F, repel = T, cols = getPalette(14), group.by = "my.demux") + labs(title = "Essi")
c <- DimPlot(cml_seurat_new_filt1, label = F, repel = T, cols = getPalette(14), group.by = "HTO_classification.global") + labs(title = "Seurat")
d <- DimPlot(cml_seurat_new_filt1, label = F, repel = T, cols = getPalette(14), group.by = "my.demux2") + labs(title = "Seurat")
e <- DimPlot(cml_seurat_new_filt1, label = F, repel = T, cols = getPalette(2), group.by = "is_nk") + labs(title = "RNA")
f <- a + b + c + d + e
ggsave(plot = f, "results/functional/demux/umap_hto1_hto.png", width = 12, height = 6)
###### HTO 3
cml_seurat_new3 <- cml_seurat_new3 %>% NormalizeData(assay = "HTO", normalization.method = "CLR") %>% HTODemux(assay = "HTO", positive.quantile = 0.99)
DefaultAssay(cml_seurat_new3) <- "HTO"
cml_seurat_new3 <- cml_seurat_new3 %>% ScaleData(features = rownames(cml_seurat_new3)) %>% RunPCA(features = rownames(cml_seurat_new3), approx = F) %>% RunTSNE(dims = 1:8, perplexity = 100)
cml_seurat_new3 <- cml_seurat_new3 %>% getDemuxEssi
cml_seurat_new_filt3 <- subset(cml_seurat_new3, my.demux2 == "Singlet")
cml_seurat_new_filt3 <- cml_seurat_new_filt3 %>% ScaleData(features = rownames(cml_seurat_new_filt3)) %>% RunPCA(features = rownames(cml_seurat_new_filt3), approx = F) %>% RunTSNE(dims = 1:8, perplexity = 100)
DimPlot(cml_seurat_new_filt3, label = T, repel = T, group.by = "HTO_classification.global")
DimPlot(cml_seurat_new_filt3, label = T, repel = T, group.by = "my.demux")
DimPlot(cml_seurat_new_filt3, label = T, repel = T, group.by = "my.demux3")
HTOHeatmap(cml_seurat_new_filt3, assay = "HTO", ncells = 5000)
cml_seurat_new_filt <- merge(cml_seurat_new_filt1, list(cml_seurat_new_filt3))
cml_seurat_hto <- cml_seurat_new_filt
saveRDS(cml_seurat_hto, "results/functional/cml_seurat_hto.rds")
cml_seurat_hto <- readRDS("results/functional/cml_seurat_hto.rds")
| /R/run_createHTO.R | no_license | janihuuh/cml_stop_manu | R | false | false | 7,373 | r |
dense_umis <- fread("data/functional/citeseq/CML_NK_1_HTO/uncorrected_cells/dense_umis.tsv", gene.column = 1)
cml_nk_1_hto_seurat <- Read10X(data.dir = "data/functional/citeseq/CML_NK_1_HTO/umi_count/", gene.column = 1)
cml_nk_1_hto_seurat <- CreateSeuratObject(cml_nk_1_hto_seurat, project = "CML_NK_1")
cml_nk_3_hto_seurat <- Read10X(data.dir = "data/functional/citeseq/CML_NK_3_HTO/umi_count/", gene.column = 1)
cml_nk_3_hto_seurat <- CreateSeuratObject(cml_nk_3_hto_seurat, project = "CML_NK_3")
colnames(cml_nk_1_hto_seurat) <- paste0("CML_NK_1_", colnames(cml_nk_1_hto_seurat), "-1")
colnames(cml_nk_3_hto_seurat) <- paste0("CML_NK_3_", colnames(cml_nk_3_hto_seurat), "-1")
hto_names <- c(paste0("CML_NK_1_", colnames(cml_nk_1_hto_seurat), "-1"),
paste0("CML_NK_3_", colnames(cml_nk_3_hto_seurat), "-1"))
joint.bcs <- intersect(colnames(cml_seurat), hto_names)
cml_nk_1_hto_seurat_new <- cml_nk_1_hto_seurat[, c(paste0("CML_NK_1_", colnames(cml_nk_1_hto_seurat), "-1") %in% joint.bcs)]
cml_nk_2_hto_seurat_new <- cml_nk_2_hto_seurat[, c(paste0("CML_NK_2_", colnames(cml_nk_2_hto_seurat), "-1") %in% joint.bcs)]
cml_nk_3_hto_seurat_new <- cml_nk_3_hto_seurat[, c(paste0("CML_NK_3_", colnames(cml_nk_3_hto_seurat), "-1") %in% joint.bcs)]
cml_nk_1_hto_mtx <- cml_nk_1_hto_seurat_new@assays$RNA@counts
colnames(cml_nk_1_hto_mtx) <- c(paste0("CML_NK_1_", colnames(cml_nk_1_hto_mtx), "-1"))
rownames(cml_nk_1_hto_mtx) <- c(hto_id %>% filter(experiment == "CML_NK_1") %>% pull(toname), "unmapped")
cml_nk_3_hto_mtx <- cml_nk_3_hto_seurat_new@assays$RNA@counts
cml_nk_3_hto_mtx <- cml_nk_3_hto_mtx[c(1:10,15), ]
colnames(cml_nk_3_hto_mtx) <- c(paste0("CML_NK_3_", colnames(cml_nk_3_hto_mtx), "-1"))
rownames(cml_nk_3_hto_mtx) <- c(hto_id %>% filter(experiment == "CML_NK_3") %>% pull(toname), "unmapped")
cml_seurat_new1 <- subset(cml_seurat_new, orig.ident == "CML_NK_1")
cml_seurat_new3 <- subset(cml_seurat_new, orig.ident == "CML_NK_3")
cml_seurat_new1[["HTO"]] <- CreateAssayObject(counts = cml_nk_1_hto_mtx)
cml_seurat_new3[["HTO"]] <- CreateAssayObject(counts = cml_nk_3_hto_mtx)
#### HTO 1
cml_seurat_new1 <- cml_seurat_new1 %>% NormalizeData(assay = "HTO", normalization.method = "CLR") %>% HTODemux(assay = "HTO", positive.quantile = 0.99)
DefaultAssay(cml_seurat_new1) <- "HTO"
cml_seurat_new1 <- cml_seurat_new1 %>% ScaleData(features = rownames(cml_seurat_new1)) %>% RunPCA(features = rownames(cml_seurat_new1), approx = F) %>% RunTSNE(dims = 1:8, perplexity = 100)
cml_seurat_new1 <- cml_seurat_new1 %>% getDemuxEssi
cml_seurat_new_filt1 <- subset(cml_seurat_new1, my.demux2 == "Singlet")
cml_seurat_new_filt1 <- cml_seurat_new_filt1 %>% ScaleData(features = rownames(cml_seurat_new_filt1)) %>% RunPCA(features = rownames(cml_seurat_new_filt1), approx = F) %>% RunTSNE(dims = 1:8, perplexity = 100)
DimPlot(cml_seurat_new_filt1, label = T, repel = T, group.by = "HTO_classification.global")
DimPlot(cml_seurat_new_filt1, label = T, repel = T, group.by = "my.demux")
DimPlot(cml_seurat_new_filt1, label = T, repel = T, group.by = "my.demux2")
HTOHeatmap(cml_seurat_new_filt1, assay = "HTO", ncells = 5000)
cml_seurat_new_filt1 <- subset(cml_seurat_hto, orig.ident == "CML_NK_1")
DefaultAssay(cml_seurat_new_filt1) <- "RNA"
cml_seurat_new_filt1 <- cml_seurat_new_filt1 %>% preprocessSeuratCellCycle(cells.to.use = colnames(cml_seurat_new_filt1))
cml_seurat_new_filt1 <- cml_seurat_new_filt1 %>% getClustering()
a <- DimPlot(cml_seurat_new_filt1, label = F, repel = T, cols = getPalette(14)) + labs(title = "Seurat")
b <- DimPlot(cml_seurat_new_filt1, label = F, repel = T, cols = getPalette(14), group.by = "my.demux") + labs(title = "Essi")
c <- DimPlot(cml_seurat_new_filt1, label = F, repel = T, cols = getPalette(14), group.by = "HTO_classification.global") + labs(title = "Seurat")
d <- DimPlot(cml_seurat_new_filt1, label = F, repel = T, cols = getPalette(14), group.by = "my.demux2") + labs(title = "Seurat")
e <- a + b + c + d
ggsave(plot = e, "results/functional/demux/umap_hto1_rna.png", width = 12, height = 8)
a <- DimPlot(cml_seurat_new_filt1, label = T, repel = T, cols = getPalette(14), group.by = "RNA_snn_res.0.1")
b <- DimPlot(cml_seurat_new_filt1, label = T, repel = T, cols = getPalette(14), group.by = "singler_blueprint_pred")
# c <- DimPlot(cml_seurat_new_filt1, label = T, repel = T, cols = getPalette(14), group.by = "singler_hpca_pred")
cml_seurat_new_filt1$is_nk <- ifelse(cml_seurat_new_filt1$RNA_snn_res.0.1 == 2, "NK", "CML")
Idents(cml_seurat_new_filt1) <- cml_seurat_new_filt1$RNA_snn_res.0.1
cml_seurat_new_markers <- FindAllMarkers(cml_seurat_new_filt1, test.use = "t", max.cells.per.ident = 1e3) %>% filter(avg_log2FC > 0)
cml_seurat_new_markers <- cml_seurat_new_markers %>% filter(avg_log2FC > 0)
FeaturePlot(cml_seurat_new_filt1, features = big_markers)
ggsave("results/functional/demux/feature_hto1.png", width = 12, height = 18)
DefaultAssay(cml_seurat_new_filt1) <- "HTO"
cml_seurat_new_filt1 <- ScaleData(cml_seurat_new_filt1, features = rownames(cml_seurat_new_filt1), verbose = FALSE)
cml_seurat_new_filt1 <- RunPCA(cml_seurat_new_filt1, features = rownames(cml_seurat_new_filt1), approx = FALSE)
cml_seurat_new_filt1 <- RunTSNE(cml_seurat_new_filt1, dims = 1:8, perplexity = 100)
table(cml_seurat_new_filt1$HTO_classification.global)
a <- DimPlot(cml_seurat_new_filt1, label = F, repel = T, cols = getPalette(14)) + labs(title = "Seurat")
b <- DimPlot(cml_seurat_new_filt1, label = F, repel = T, cols = getPalette(14), group.by = "my.demux") + labs(title = "Essi")
c <- DimPlot(cml_seurat_new_filt1, label = F, repel = T, cols = getPalette(14), group.by = "HTO_classification.global") + labs(title = "Seurat")
d <- DimPlot(cml_seurat_new_filt1, label = F, repel = T, cols = getPalette(14), group.by = "my.demux2") + labs(title = "Seurat")
e <- DimPlot(cml_seurat_new_filt1, label = F, repel = T, cols = getPalette(2), group.by = "is_nk") + labs(title = "RNA")
f <- a + b + c + d + e
ggsave(plot = f, "results/functional/demux/umap_hto1_hto.png", width = 12, height = 6)
###### HTO 3
cml_seurat_new3 <- cml_seurat_new3 %>% NormalizeData(assay = "HTO", normalization.method = "CLR") %>% HTODemux(assay = "HTO", positive.quantile = 0.99)
DefaultAssay(cml_seurat_new3) <- "HTO"
cml_seurat_new3 <- cml_seurat_new3 %>% ScaleData(features = rownames(cml_seurat_new3)) %>% RunPCA(features = rownames(cml_seurat_new3), approx = F) %>% RunTSNE(dims = 1:8, perplexity = 100)
cml_seurat_new3 <- cml_seurat_new3 %>% getDemuxEssi
cml_seurat_new_filt3 <- subset(cml_seurat_new3, my.demux2 == "Singlet")
cml_seurat_new_filt3 <- cml_seurat_new_filt3 %>% ScaleData(features = rownames(cml_seurat_new_filt3)) %>% RunPCA(features = rownames(cml_seurat_new_filt3), approx = F) %>% RunTSNE(dims = 1:8, perplexity = 100)
DimPlot(cml_seurat_new_filt3, label = T, repel = T, group.by = "HTO_classification.global")
DimPlot(cml_seurat_new_filt3, label = T, repel = T, group.by = "my.demux")
DimPlot(cml_seurat_new_filt3, label = T, repel = T, group.by = "my.demux3")
HTOHeatmap(cml_seurat_new_filt3, assay = "HTO", ncells = 5000)
cml_seurat_new_filt <- merge(cml_seurat_new_filt1, list(cml_seurat_new_filt3))
cml_seurat_hto <- cml_seurat_new_filt
saveRDS(cml_seurat_hto, "results/functional/cml_seurat_hto.rds")
cml_seurat_hto <- readRDS("results/functional/cml_seurat_hto.rds")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phreeqc.R
\docType{data}
\name{ex4}
\alias{ex4}
\title{Example 4--Evaporation and Homogeneous Redox Reactions}
\source{
\url{http://wwwbrr.cr.usgs.gov/projects/GWC_coupled/phreeqc}
}
\description{
Evaporation is accomplished by removing water from the chemical
system. Water can be removed by several methods: (1) water can be specified
as an irreversible reactant with a negative reaction coefficient in the
REACTION keyword input, (2) the solution can be mixed with pure water which
is given a negative mixing fraction in MIX, or (3) "H2O" can be specified as
the alternative reaction in EQUILIBRIUM_PHASES keyword input, in which case
water is removed or added to the aqueous phase to attain equilibrium with a
specified phase. This example uses the first method; the REACTION data block
is used to simulate concentration of rainwater by approximately 20-fold by
removing 95 percent of the water. The resulting solution contains only about
0.05 kg of water. In a subsequent simulation, the MIX keyword is used to
generate a solution that has the same concentrations as the evaporated
solution, but has a total mass of water of approximately 1 kg. The example
can be run using the \code{\link{phrRunString}} routine.
}
\examples{
phrLoadDatabaseString(phreeqc.dat)
phrSetOutputStringsOn(TRUE)
phrRunString(ex4)
phrGetOutputStrings()
}
\references{
\url{http://pubs.usgs.gov/tm/06/a43/pdf/tm6-A43.pdf}
}
\seealso{
Other Examples:
\code{\link{ex10}},
\code{\link{ex11}},
\code{\link{ex12}},
\code{\link{ex13a}},
\code{\link{ex14}},
\code{\link{ex15}},
\code{\link{ex16}},
\code{\link{ex17}},
\code{\link{ex18}},
\code{\link{ex19}},
\code{\link{ex1}},
\code{\link{ex20a}},
\code{\link{ex21}},
\code{\link{ex22}},
\code{\link{ex2}},
\code{\link{ex3}},
\code{\link{ex5}},
\code{\link{ex6}},
\code{\link{ex7}},
\code{\link{ex8}},
\code{\link{ex9}}
}
\concept{Examples}
\keyword{dataset}
| /man/ex4.Rd | no_license | cran/phreeqc | R | false | true | 1,964 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/phreeqc.R
\docType{data}
\name{ex4}
\alias{ex4}
\title{Example 4--Evaporation and Homogeneous Redox Reactions}
\source{
\url{http://wwwbrr.cr.usgs.gov/projects/GWC_coupled/phreeqc}
}
\description{
Evaporation is accomplished by removing water from the chemical
system. Water can be removed by several methods: (1) water can be specified
as an irreversible reactant with a negative reaction coefficient in the
REACTION keyword input, (2) the solution can be mixed with pure water which
is given a negative mixing fraction in MIX, or (3) "H2O" can be specified as
the alternative reaction in EQUILIBRIUM_PHASES keyword input, in which case
water is removed or added to the aqueous phase to attain equilibrium with a
specified phase. This example uses the first method; the REACTION data block
is used to simulate concentration of rainwater by approximately 20-fold by
removing 95 percent of the water. The resulting solution contains only about
0.05 kg of water. In a subsequent simulation, the MIX keyword is used to
generate a solution that has the same concentrations as the evaporated
solution, but has a total mass of water of approximately 1 kg. The example
can be run using the \code{\link{phrRunString}} routine.
}
\examples{
phrLoadDatabaseString(phreeqc.dat)
phrSetOutputStringsOn(TRUE)
phrRunString(ex4)
phrGetOutputStrings()
}
\references{
\url{http://pubs.usgs.gov/tm/06/a43/pdf/tm6-A43.pdf}
}
\seealso{
Other Examples:
\code{\link{ex10}},
\code{\link{ex11}},
\code{\link{ex12}},
\code{\link{ex13a}},
\code{\link{ex14}},
\code{\link{ex15}},
\code{\link{ex16}},
\code{\link{ex17}},
\code{\link{ex18}},
\code{\link{ex19}},
\code{\link{ex1}},
\code{\link{ex20a}},
\code{\link{ex21}},
\code{\link{ex22}},
\code{\link{ex2}},
\code{\link{ex3}},
\code{\link{ex5}},
\code{\link{ex6}},
\code{\link{ex7}},
\code{\link{ex8}},
\code{\link{ex9}}
}
\concept{Examples}
\keyword{dataset}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.