content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lab5_17_Package.R
\docType{package}
\name{swelection}
\alias{swelection}
\alias{swelection-package}
\title{Sweden election 2014}
\description{
This package is used to get the 2014 election results in Sweden for the nine biggest parties.The package contains two functions which are "votes_spec" and "votes_sum".
}
\author{
Ahmet Akdeve \email{ahmak554@student.liu.se}
Zhixuan Duan \email{darinstu999@gmail.com}
}
| /swelection/man/swelection.Rd | no_license | ahmetakdeve/Lab5_17 | R | false | true | 491 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lab5_17_Package.R
\docType{package}
\name{swelection}
\alias{swelection}
\alias{swelection-package}
\title{Sweden election 2014}
\description{
This package is used to get the 2014 election results in Sweden for the nine biggest parties.The package contains two functions which are "votes_spec" and "votes_sum".
}
\author{
Ahmet Akdeve \email{ahmak554@student.liu.se}
Zhixuan Duan \email{darinstu999@gmail.com}
}
|
# Household Air Conditioning Analysis, Part 7
# Written By: Vineeth CR, Renee Obringer
# Last Ran: 30 April 2021
rm(list = ls())
rdatadir <- "" # set directory for rdata files
outputdir <- "" # set directory for non-rdata output files
datadir <- "" # set directory for input data (efficiency/HHD analysis)
datadir2 <- "" # set directory for RECS dataset
datadir3 <- "" # set directory for census data
# libraries
library(ggplot2)
library(gridExtra)
library(cowplot)
# load rdata files
load("06_allvars_All_ProjectedFractions.RDATA")
load("03_ProcessedInputData.RDATA")
load("05.1_GeneratedFractionalDistributions.RDATA")
########################### PERCENT CHANGE #####################################
# percent change after 1.5 degrees (pos == % increase) [new-old/old]
indices <- list(c(125,184),c(61,120),c(65,124),c(65,124),c(105,164))
# indices based on years each model reaches threshold, to check: allmonths[c(1,60)] == 200506, 201909 (baseline)
# models: GDFL (2036-2050), HADGEM (2020-2034), IPSL (2021-2035), MIROC (2021-2035), NORESM (2031-2045)
perc15 <- list()
for (d in 1:27) {
change <- c()
for (m in 1:5) {
base <- mean(finaldata[[d]][[m]][1:60])
change[m] <- ((mean(finaldata[[d]][[m]][indices[[m]][1]:indices[[m]][2]])-base)/base)*100
}
perc15[[d]] <- change
}
# percent change after 2.0 degrees (pos == % increase) [new-old/old]
indices <- list(c(193,252),c(105,164),c(113,172),c(109,168),c(165,224))
# models: GDFL (2053-2067), HADGEM (2031-2045), IPSL (2033-2047), MIROC (2032-2046), NORESM (2046-2060)
perc20 <- list()
for (d in 1:27) {
change <- c()
for (m in 1:5) {
base <- mean(finaldata[[d]][[m]][1:60])
change[m] <- ((mean(finaldata[[d]][[m]][indices[[m]][1]:indices[[m]][2]])-base)/base)*100
}
perc20[[d]] <- change
}
# plots
# percent change per seasons (all models combined)
avgs15 <- c(); meds15 <- c(); min15 <- c(); max15 <- c(); sd15 <- c()
avgs20 <- c(); meds20 <- c(); min20 <- c(); max20 <- c(); sd20 <- c()
for (i in 1:27) {
avgs15[i] <- mean(perc15[[i]])
meds15[i] <- median(perc15[[i]])
min15[i] <- min(perc15[[i]])
max15[i] <- max(perc15[[i]])
sd15[i] <- sd(perc15[[i]])
avgs20[i] <- mean(perc20[[i]])
meds20[i] <- median(perc20[[i]])
min20[i] <- min(perc20[[i]])
max20[i] <- max(perc20[[i]])
sd20[i] <- sd(perc20[[i]])
}
pchange <- c(meds15,meds20)
mins <- c(min15,min20)
maxs <- c(max15,max20)
sds <- c(sd15,sd20)
Threshold <- c(rep("1.5 degC",27),rep("2.0 degC",27))
domains <- c(rep(1:27, 2))
domainnames <- c('CT, ME, NH, RI, VT','MA','NY','NJ','PA','IL','IN, OH', 'MI','WI','IA, MN, ND, SD',
'KS, NE','MO','VA','DE, DC, MD, WV','GA','NC, SC','FL','AL, KY, MS','TN','AR, LA, OK',
'TX','CO','ID, MT, UT, WY','AZ','NV, NM','CA','OR, WA')
dnames <- rep(domainnames,2)
pcdata <- data.frame(pchange,Threshold,mins,maxs,dnames, sds)
ggplot(pcdata, aes(x=dnames, y=pchange, fill=Threshold)) + geom_bar(position="dodge", stat="identity") +
geom_errorbar(aes(x=dnames, ymin=pchange-sds, ymax=pchange+sds), width=0.5, position=position_dodge(.9)) +
xlab('Domain') + ylab('Percent Change (%)') +
theme_light(base_size=20) + theme(plot.margin=unit(c(1,1,1,2),"cm")) +
theme(axis.text.x = element_text(angle=45,hjust=1))
########################### EFFICIENCY/HOUSEHOLD DAYS ANALYSIS ########################
# EFFICIENCY
base <- c()
for (d in 1:27) {
basem <- c()
for (m in 1:5) {
basem[m] <- mean(finaldata[[d]][[m]][1:60])
}
base[d] <- mean(basem)
}
indices <- list(c(193,252),c(105,164),c(113,172),c(109,168),c(165,224))
# models: GDFL (2053-2067), HADGEM (2031-2045), IPSL (2033-2047), MIROC (2032-2046), NORESM (2046-2060)
kwh20 <- c()
for (d in 1:27) {
mods <- c()
for (m in 1:5) {
mods[m] <- mean(finaldata[[d]][[m]][indices[[m]][1]:indices[[m]][2]])
}
kwh20[d] <- mean(mods)
}
delt_base20 <- kwh20 - base
# Equations from: McNeil & Letschert (2008) Future air conditioning energy consumption...
# load data
setwd(datadir)
gspdata <- read.csv('gsp_percap2009.csv') # from Bureau of Economic Analysis
setwd(datadir2)
recs2009 <- read.csv(file = "05_recs2009_public.csv")
CDDdata <- cbind.data.frame(recs2009$REPORTABLE_DOMAIN, recs2009$CDD65)
cdd <- aggregate(CDDdata[,2], by=list(CDDdata[,1]), FUN = mean)
# convert from states to domains
domains <- c(18, 20, 24, 26, 22, 1, 14, 14, 17, 15, 10, 23, 6, 7, 11, 18, 20, 2, 14, 1, 8, 10, 12, 18, 23, 16, 10, 11, 1, 4, 25, 25, 3, 7, 20, 27, 5, 1, 16, 10, 19, 21, 23, 13, 1, 27, 9, 14, 23)
data <- cbind.data.frame(domains, gspdata)
incomedata <- aggregate(data[,4], by=list(domains), FUN = mean)
# adjust annual per capita GSP for Purchase Power Parity (cost of living)
income_adj <- 20.9 * incomedata[,2]^0.7088
# convert annual per capita GSP to monthly per household GSP
Domain_avgHHMembers <- unlist(list_avgHHMembers)
income <- income_adj/12 * unname(Domain_avgHHMembers)
# annual baseline unit energy consumption (kwh)
UEC <- 0.345*income + 1.44*cdd[,2] - 823
# monthly UEC
monUEC <- UEC/12
# baseline summer efficiency
eff_baseline <- base/monUEC*100
# future summer efficiency
eff_future <- kwh20/monUEC*100 # 2.0 degrees
# difference
eff_difference <- eff_future - eff_baseline
# figure
domainnames <- c('CT, ME, NH, RI, VT','MA','NY','NJ','PA','IL','IN, OH', 'MI','WI','IA, MN, ND, SD',
'KS, NE','MO','VA','DE, DC, MD, WV','GA','NC, SC','FL','AL, KY, MS','TN','AR, LA, OK',
'TX','CO','ID, MT, UT, WY','AZ','NV, NM','CA','OR, WA')
plotdata <- data.frame(domainnames, eff_difference)
p1 <- ggplot(plotdata, aes(x=domainnames, y=eff_difference)) + geom_bar(stat = 'identity' , position = 'dodge') +
xlab('') + ylab('Required Increase\nin Efficiency (%)') + theme_light() +
#theme(axis.text.x = element_text(angle = 45, hjust = 1))
theme(axis.text.x = element_blank())
# calculating sd for efficiency
mod1 <- c(); mod2 <- c(); mod3 <- c(); mod4 <- c(); mod5 <- c()
for (d in 1:27) {
basem <- c()
for (m in 1:5) {
basem[m] <- mean(finaldata[[d]][[m]][1:60])
}
mod1[d] <- mean(basem[1]); mod2[d] <- mean(basem[2])
mod3[d] <- mean(basem[3]); mod4[d] <- mean(basem[4])
mod5[d] <- mean(basem[5])
}
Fmod1 <- c(); Fmod2 <- c(); Fmod3 <- c(); Fmod4 <- c(); Fmod5 <- c()
for (d in 1:27) {
mods <- c()
for (m in 1:5) {
mods[m] <- mean(finaldata[[d]][[m]][indices[[m]][1]:indices[[m]][2]])
}
Fmod1[d] <- mean(mods[1]); Fmod2[d] <- mean(mods[2])
Fmod3[d] <- mean(mods[3]); Fmod4[d] <- mean(mods[4])
Fmod5[d] <- mean(mods[5])
}
mods <- cbind(mod1, mod2, mod3, mod4, mod5)
Fmods <- cbind(Fmod1, Fmod2, Fmod3, Fmod4, Fmod5)
eff_d <- matrix(ncol = 5, nrow = 27)
for (i in 1:5) {
eff_b <- mods[,i]/monUEC*100
eff_f <- Fmods[,i]/monUEC*100 # 2.0 degrees
eff_d[,i] <- eff_f - eff_b
}
eff_sd <- apply(eff_d,1, sd, na.rm = TRUE)
eff_sd[which(rowMeans(eff_d) == max(rowMeans(eff_d)))]
# HOUSEHOLD DAYS
# number of days without air conditioning per summer per household
hhd <- delt_base20/(base/30)*4
setwd(datadir3)
pop2019 <- read.csv(file = "nst-est2019-alldata.csv", header=TRUE, stringsAsFactors = F)
pop2019 <- subset(pop2019, STATE >= 1, select=c(NAME,POPESTIMATE2019))
pop2019 <- pop2019[-c(2,12,52),] #Remove: Alaska, Hawaii, Puerto Rico
stateids <- c('AL','AZ','AR','CA','CO','CT','DE','DC','FL','GA','ID','IL','IN','IA','KS','KY',
'LA','ME','MD','MA','MI','MN','MS','MO','MT','NE','NV','NH','NJ','NM','NY','NC','ND',
'OH','OK','OR','PA','RI','SC','SD','TN','TX','UT','VT','VA','WA','WV','WI','WY')
data <- cbind.data.frame(stateids,pop2019)
data <- data[order(data$stateids),]
domains <- c(18, 20, 24, 26, 22, 1, 14, 14, 17, 15, 10, 23, 6, 7, 11, 18, 20, 2, 14, 1, 8, 10, 12, 18, 23, 16, 10, 11, 1, 4, 25, 25, 3, 7, 20, 27, 5, 1, 16, 10, 19, 21, 23, 13, 1, 27, 9, 14, 23)
popdata <- cbind.data.frame(domains,data)
domainpop <- aggregate(popdata[,4], by=list(domains), FUN = sum)
hhnum <- round(domainpop[,2]/unlist(list_avgHHMembers))
hhd_total <- hhd*hhnum
setwd(datadir)
povdata <- read.csv('povrate_data.csv')
povrate <- cbind.data.frame(domains,povdata)
domainpov <- aggregate(povdata[,c(3,4)], by=list(domains), FUN=sum)
dompovrate <- domainpov$Under.200./domainpov$Total
hhd_pov <- hhd_total*dompovrate
hhd_other <- hhd_total*(1-dompovrate)
# figure
plotdata <- data.frame(domainnames, hhd)
p2 <- ggplot(plotdata, aes(x=domainnames, y=hhd, label = round(hhd))) + #geom_bar(stat = 'identity' , position = 'dodge') +
geom_col() + geom_text(nudge_y = 1, color = 'black') +
xlab('') + ylab('Days without Air Conditioning per\nSummer per Household') + theme_light() +
theme(text = element_text(size = 18), axis.text.x = element_text(angle = 45, hjust = 1))
hhd2 <- c(hhd_pov, hhd_other)/1000000
class <- c(rep('zImpoverished',27),rep('Other',27))
plotdata2 <- data.frame(domainnames, hhd2, class)
options(scipen=10000)
p3 <- ggplot(plotdata2, aes(x=domainnames, y=hhd2, fill=class)) + geom_bar(stat = 'identity' , position = 'stack') +
xlab('') + ylab('Millions of Household-Days\nwithout Air Conditioning') + theme_light() +
theme(axis.text.x = element_text(angle = 45, hjust = 1), text = element_text(size = 18)) +
scale_fill_manual(name = 'Household Income', labels = c('Above 200% of the PL','Below 200% of the PL'), values = c('#a6cee3','#1f78b4'))+
theme(legend.position = 'bottom')
# COMBINE FIGURES
plot_grid(p1,p2,p3,align = 'v',nrow = 3, rel_heights = c(2/7,2/7,3/7))
########################### ACTUAL VS PREDICTED DATA ########################
actual <- c()
for (i in 1:27) {
actual[i] <- mean(actdata[[i]])
}
domainnames <- c('CT, ME, NH, RI, VT','MA','NY','NJ','PA','IL','IN, OH', 'MI','WI','IA, MN, ND, SD',
'KS, NE','MO','VA','DE, DC, MD, WV','GA','NC, SC','FL','AL, KY, MS','TN','AR, LA, OK',
'TX','CO','ID, MT, UT, WY','AZ','NV, NM','CA','OR, WA')
anomalies <- base - actual
anom_perc <- (base-actual)/actual*100
errors <- c(modperf[,9],modperf[,11])
Measure <- rep(c('RMSE','MAE'),each = 27)
domain2 <- rep(domainnames, 2)
kwhdata <- data.frame(domainnames, actual)
preddata <- data.frame(domainnames,base)
diffdata <- data.frame(domainnames, anomalies)
diffdata2 <- data.frame(domainnames, anom_perc)
errdata <- data.frame(errors, Measure, domain2)
p1 <- ggplot(kwhdata, aes(x=domainnames, y=actual)) + geom_bar(stat='identity') +
theme_light() + xlab('') + ylab('Actual Values\n(kwh/household)') + theme(axis.text.x=element_blank())
p2 <- ggplot(preddata, aes(x=domainnames, y=base)) + geom_bar(stat='identity') +
theme_light() + xlab('') + ylab('Predicted Values\n(kwh/household)') + theme(axis.text.x=element_blank())
p3 <- ggplot(diffdata, aes(x=domainnames, y=anomalies, fill = anomalies > 0)) + geom_bar(stat='identity') +
theme_light() + xlab('') + ylab('Difference\n(kwh/household)') + theme(axis.text.x=element_blank()) +
theme(legend.position = "none")
p3b <- ggplot(diffdata2, aes(x=domainnames, y=anom_perc, fill = anomalies > 0)) + geom_bar(stat='identity') +
theme_light() + xlab('') + ylab('Difference (%)') + theme(axis.text.x=element_blank()) +
theme(legend.position = "none")
p4 <- ggplot(errdata, aes(x=domain2, y=errors, fill = Measure)) + geom_bar(stat='identity', position = 'dodge') +
theme_light() + xlab('') + ylab('Errors\n(kwh/household)') + #theme(axis.text.x=element_blank()) +
theme(legend.position = "bottom") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
plot_grid(p1,p2,p3b,p4,align = 'v',nrow = 4, rel_heights = c(1/5,1/5,1/5,2/5))
| /rcode/07_Miscellaneous.R | permissive | robringer/HouseholdAirConditioningUse | R | false | false | 11,563 | r | # Household Air Conditioning Analysis, Part 7
# Written By: Vineeth CR, Renee Obringer
# Last Ran: 30 April 2021
rm(list = ls())
rdatadir <- "" # set directory for rdata files
outputdir <- "" # set directory for non-rdata output files
datadir <- "" # set directory for input data (efficiency/HHD analysis)
datadir2 <- "" # set directory for RECS dataset
datadir3 <- "" # set directory for census data
# libraries
library(ggplot2)
library(gridExtra)
library(cowplot)
# load rdata files
load("06_allvars_All_ProjectedFractions.RDATA")
load("03_ProcessedInputData.RDATA")
load("05.1_GeneratedFractionalDistributions.RDATA")
########################### PERCENT CHANGE #####################################
# percent change after 1.5 degrees (pos == % increase) [new-old/old]
indices <- list(c(125,184),c(61,120),c(65,124),c(65,124),c(105,164))
# indices based on years each model reaches threshold, to check: allmonths[c(1,60)] == 200506, 201909 (baseline)
# models: GDFL (2036-2050), HADGEM (2020-2034), IPSL (2021-2035), MIROC (2021-2035), NORESM (2031-2045)
perc15 <- list()
for (d in 1:27) {
change <- c()
for (m in 1:5) {
base <- mean(finaldata[[d]][[m]][1:60])
change[m] <- ((mean(finaldata[[d]][[m]][indices[[m]][1]:indices[[m]][2]])-base)/base)*100
}
perc15[[d]] <- change
}
# percent change after 2.0 degrees (pos == % increase) [new-old/old]
indices <- list(c(193,252),c(105,164),c(113,172),c(109,168),c(165,224))
# models: GDFL (2053-2067), HADGEM (2031-2045), IPSL (2033-2047), MIROC (2032-2046), NORESM (2046-2060)
perc20 <- list()
for (d in 1:27) {
change <- c()
for (m in 1:5) {
base <- mean(finaldata[[d]][[m]][1:60])
change[m] <- ((mean(finaldata[[d]][[m]][indices[[m]][1]:indices[[m]][2]])-base)/base)*100
}
perc20[[d]] <- change
}
# plots
# percent change per seasons (all models combined)
avgs15 <- c(); meds15 <- c(); min15 <- c(); max15 <- c(); sd15 <- c()
avgs20 <- c(); meds20 <- c(); min20 <- c(); max20 <- c(); sd20 <- c()
for (i in 1:27) {
avgs15[i] <- mean(perc15[[i]])
meds15[i] <- median(perc15[[i]])
min15[i] <- min(perc15[[i]])
max15[i] <- max(perc15[[i]])
sd15[i] <- sd(perc15[[i]])
avgs20[i] <- mean(perc20[[i]])
meds20[i] <- median(perc20[[i]])
min20[i] <- min(perc20[[i]])
max20[i] <- max(perc20[[i]])
sd20[i] <- sd(perc20[[i]])
}
pchange <- c(meds15,meds20)
mins <- c(min15,min20)
maxs <- c(max15,max20)
sds <- c(sd15,sd20)
Threshold <- c(rep("1.5 degC",27),rep("2.0 degC",27))
domains <- c(rep(1:27, 2))
domainnames <- c('CT, ME, NH, RI, VT','MA','NY','NJ','PA','IL','IN, OH', 'MI','WI','IA, MN, ND, SD',
'KS, NE','MO','VA','DE, DC, MD, WV','GA','NC, SC','FL','AL, KY, MS','TN','AR, LA, OK',
'TX','CO','ID, MT, UT, WY','AZ','NV, NM','CA','OR, WA')
dnames <- rep(domainnames,2)
pcdata <- data.frame(pchange,Threshold,mins,maxs,dnames, sds)
ggplot(pcdata, aes(x=dnames, y=pchange, fill=Threshold)) + geom_bar(position="dodge", stat="identity") +
geom_errorbar(aes(x=dnames, ymin=pchange-sds, ymax=pchange+sds), width=0.5, position=position_dodge(.9)) +
xlab('Domain') + ylab('Percent Change (%)') +
theme_light(base_size=20) + theme(plot.margin=unit(c(1,1,1,2),"cm")) +
theme(axis.text.x = element_text(angle=45,hjust=1))
########################### EFFICIENCY/HOUSEHOLD DAYS ANALYSIS ########################
# EFFICIENCY
base <- c()
for (d in 1:27) {
basem <- c()
for (m in 1:5) {
basem[m] <- mean(finaldata[[d]][[m]][1:60])
}
base[d] <- mean(basem)
}
indices <- list(c(193,252),c(105,164),c(113,172),c(109,168),c(165,224))
# models: GDFL (2053-2067), HADGEM (2031-2045), IPSL (2033-2047), MIROC (2032-2046), NORESM (2046-2060)
kwh20 <- c()
for (d in 1:27) {
mods <- c()
for (m in 1:5) {
mods[m] <- mean(finaldata[[d]][[m]][indices[[m]][1]:indices[[m]][2]])
}
kwh20[d] <- mean(mods)
}
delt_base20 <- kwh20 - base
# Equations from: McNeil & Letschert (2008) Future air conditioning energy consumption...
# load data
setwd(datadir)
gspdata <- read.csv('gsp_percap2009.csv') # from Bureau of Economic Analysis
setwd(datadir2)
recs2009 <- read.csv(file = "05_recs2009_public.csv")
CDDdata <- cbind.data.frame(recs2009$REPORTABLE_DOMAIN, recs2009$CDD65)
cdd <- aggregate(CDDdata[,2], by=list(CDDdata[,1]), FUN = mean)
# convert from states to domains
domains <- c(18, 20, 24, 26, 22, 1, 14, 14, 17, 15, 10, 23, 6, 7, 11, 18, 20, 2, 14, 1, 8, 10, 12, 18, 23, 16, 10, 11, 1, 4, 25, 25, 3, 7, 20, 27, 5, 1, 16, 10, 19, 21, 23, 13, 1, 27, 9, 14, 23)
data <- cbind.data.frame(domains, gspdata)
incomedata <- aggregate(data[,4], by=list(domains), FUN = mean)
# adjust annual per capita GSP for Purchase Power Parity (cost of living)
income_adj <- 20.9 * incomedata[,2]^0.7088
# convert annual per capita GSP to monthly per household GSP
Domain_avgHHMembers <- unlist(list_avgHHMembers)
income <- income_adj/12 * unname(Domain_avgHHMembers)
# annual baseline unit energy consumption (kwh)
UEC <- 0.345*income + 1.44*cdd[,2] - 823
# monthly UEC
monUEC <- UEC/12
# baseline summer efficiency
eff_baseline <- base/monUEC*100
# future summer efficiency
eff_future <- kwh20/monUEC*100 # 2.0 degrees
# difference
eff_difference <- eff_future - eff_baseline
# figure
domainnames <- c('CT, ME, NH, RI, VT','MA','NY','NJ','PA','IL','IN, OH', 'MI','WI','IA, MN, ND, SD',
'KS, NE','MO','VA','DE, DC, MD, WV','GA','NC, SC','FL','AL, KY, MS','TN','AR, LA, OK',
'TX','CO','ID, MT, UT, WY','AZ','NV, NM','CA','OR, WA')
plotdata <- data.frame(domainnames, eff_difference)
p1 <- ggplot(plotdata, aes(x=domainnames, y=eff_difference)) + geom_bar(stat = 'identity' , position = 'dodge') +
xlab('') + ylab('Required Increase\nin Efficiency (%)') + theme_light() +
#theme(axis.text.x = element_text(angle = 45, hjust = 1))
theme(axis.text.x = element_blank())
# calculating sd for efficiency
mod1 <- c(); mod2 <- c(); mod3 <- c(); mod4 <- c(); mod5 <- c()
for (d in 1:27) {
basem <- c()
for (m in 1:5) {
basem[m] <- mean(finaldata[[d]][[m]][1:60])
}
mod1[d] <- mean(basem[1]); mod2[d] <- mean(basem[2])
mod3[d] <- mean(basem[3]); mod4[d] <- mean(basem[4])
mod5[d] <- mean(basem[5])
}
Fmod1 <- c(); Fmod2 <- c(); Fmod3 <- c(); Fmod4 <- c(); Fmod5 <- c()
for (d in 1:27) {
mods <- c()
for (m in 1:5) {
mods[m] <- mean(finaldata[[d]][[m]][indices[[m]][1]:indices[[m]][2]])
}
Fmod1[d] <- mean(mods[1]); Fmod2[d] <- mean(mods[2])
Fmod3[d] <- mean(mods[3]); Fmod4[d] <- mean(mods[4])
Fmod5[d] <- mean(mods[5])
}
mods <- cbind(mod1, mod2, mod3, mod4, mod5)
Fmods <- cbind(Fmod1, Fmod2, Fmod3, Fmod4, Fmod5)
eff_d <- matrix(ncol = 5, nrow = 27)
for (i in 1:5) {
eff_b <- mods[,i]/monUEC*100
eff_f <- Fmods[,i]/monUEC*100 # 2.0 degrees
eff_d[,i] <- eff_f - eff_b
}
eff_sd <- apply(eff_d,1, sd, na.rm = TRUE)
eff_sd[which(rowMeans(eff_d) == max(rowMeans(eff_d)))]
# HOUSEHOLD DAYS
# number of days without air conditioning per summer per household
hhd <- delt_base20/(base/30)*4
setwd(datadir3)
pop2019 <- read.csv(file = "nst-est2019-alldata.csv", header=TRUE, stringsAsFactors = F)
pop2019 <- subset(pop2019, STATE >= 1, select=c(NAME,POPESTIMATE2019))
pop2019 <- pop2019[-c(2,12,52),] #Remove: Alaska, Hawaii, Puerto Rico
stateids <- c('AL','AZ','AR','CA','CO','CT','DE','DC','FL','GA','ID','IL','IN','IA','KS','KY',
'LA','ME','MD','MA','MI','MN','MS','MO','MT','NE','NV','NH','NJ','NM','NY','NC','ND',
'OH','OK','OR','PA','RI','SC','SD','TN','TX','UT','VT','VA','WA','WV','WI','WY')
data <- cbind.data.frame(stateids,pop2019)
data <- data[order(data$stateids),]
domains <- c(18, 20, 24, 26, 22, 1, 14, 14, 17, 15, 10, 23, 6, 7, 11, 18, 20, 2, 14, 1, 8, 10, 12, 18, 23, 16, 10, 11, 1, 4, 25, 25, 3, 7, 20, 27, 5, 1, 16, 10, 19, 21, 23, 13, 1, 27, 9, 14, 23)
popdata <- cbind.data.frame(domains,data)
domainpop <- aggregate(popdata[,4], by=list(domains), FUN = sum)
hhnum <- round(domainpop[,2]/unlist(list_avgHHMembers))
hhd_total <- hhd*hhnum
setwd(datadir)
povdata <- read.csv('povrate_data.csv')
povrate <- cbind.data.frame(domains,povdata)
domainpov <- aggregate(povdata[,c(3,4)], by=list(domains), FUN=sum)
dompovrate <- domainpov$Under.200./domainpov$Total
hhd_pov <- hhd_total*dompovrate
hhd_other <- hhd_total*(1-dompovrate)
# figure
plotdata <- data.frame(domainnames, hhd)
p2 <- ggplot(plotdata, aes(x=domainnames, y=hhd, label = round(hhd))) + #geom_bar(stat = 'identity' , position = 'dodge') +
geom_col() + geom_text(nudge_y = 1, color = 'black') +
xlab('') + ylab('Days without Air Conditioning per\nSummer per Household') + theme_light() +
theme(text = element_text(size = 18), axis.text.x = element_text(angle = 45, hjust = 1))
hhd2 <- c(hhd_pov, hhd_other)/1000000
class <- c(rep('zImpoverished',27),rep('Other',27))
plotdata2 <- data.frame(domainnames, hhd2, class)
options(scipen=10000)
p3 <- ggplot(plotdata2, aes(x=domainnames, y=hhd2, fill=class)) + geom_bar(stat = 'identity' , position = 'stack') +
xlab('') + ylab('Millions of Household-Days\nwithout Air Conditioning') + theme_light() +
theme(axis.text.x = element_text(angle = 45, hjust = 1), text = element_text(size = 18)) +
scale_fill_manual(name = 'Household Income', labels = c('Above 200% of the PL','Below 200% of the PL'), values = c('#a6cee3','#1f78b4'))+
theme(legend.position = 'bottom')
# COMBINE FIGURES
plot_grid(p1,p2,p3,align = 'v',nrow = 3, rel_heights = c(2/7,2/7,3/7))
########################### ACTUAL VS PREDICTED DATA ########################
actual <- c()
for (i in 1:27) {
actual[i] <- mean(actdata[[i]])
}
domainnames <- c('CT, ME, NH, RI, VT','MA','NY','NJ','PA','IL','IN, OH', 'MI','WI','IA, MN, ND, SD',
'KS, NE','MO','VA','DE, DC, MD, WV','GA','NC, SC','FL','AL, KY, MS','TN','AR, LA, OK',
'TX','CO','ID, MT, UT, WY','AZ','NV, NM','CA','OR, WA')
anomalies <- base - actual
anom_perc <- (base-actual)/actual*100
errors <- c(modperf[,9],modperf[,11])
Measure <- rep(c('RMSE','MAE'),each = 27)
domain2 <- rep(domainnames, 2)
kwhdata <- data.frame(domainnames, actual)
preddata <- data.frame(domainnames,base)
diffdata <- data.frame(domainnames, anomalies)
diffdata2 <- data.frame(domainnames, anom_perc)
errdata <- data.frame(errors, Measure, domain2)
p1 <- ggplot(kwhdata, aes(x=domainnames, y=actual)) + geom_bar(stat='identity') +
theme_light() + xlab('') + ylab('Actual Values\n(kwh/household)') + theme(axis.text.x=element_blank())
p2 <- ggplot(preddata, aes(x=domainnames, y=base)) + geom_bar(stat='identity') +
theme_light() + xlab('') + ylab('Predicted Values\n(kwh/household)') + theme(axis.text.x=element_blank())
p3 <- ggplot(diffdata, aes(x=domainnames, y=anomalies, fill = anomalies > 0)) + geom_bar(stat='identity') +
theme_light() + xlab('') + ylab('Difference\n(kwh/household)') + theme(axis.text.x=element_blank()) +
theme(legend.position = "none")
p3b <- ggplot(diffdata2, aes(x=domainnames, y=anom_perc, fill = anomalies > 0)) + geom_bar(stat='identity') +
theme_light() + xlab('') + ylab('Difference (%)') + theme(axis.text.x=element_blank()) +
theme(legend.position = "none")
p4 <- ggplot(errdata, aes(x=domain2, y=errors, fill = Measure)) + geom_bar(stat='identity', position = 'dodge') +
theme_light() + xlab('') + ylab('Errors\n(kwh/household)') + #theme(axis.text.x=element_blank()) +
theme(legend.position = "bottom") +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
plot_grid(p1,p2,p3b,p4,align = 'v',nrow = 4, rel_heights = c(1/5,1/5,1/5,2/5))
|
library(robustbase)
source(system.file("test-tools-1.R", package="Matrix", mustWork=TRUE))
##-> assertError(), etc
set.seed(1) # since now .Random.seed is used by default!
## EX 1
data(coleman)
## "empty model" (not really a lot of sense)
(m0 <- lmrob(Y ~ 0, data = coleman))
summary(m0)
stopifnot(is.numeric(coef(m0)), length(coef(m0)) == 0,
residuals(m0) == coleman[,"Y"])
## "Intercept" only: robust mean
(m1 <- lmrob(Y ~ 1, data = coleman))
summary(m1)
stopifnot(all.equal(coef(m1),
c("(Intercept)" = 35.56048875388), tol = 1e-11))
(mC <- lmrob(Y ~ ., data = coleman,
control = lmrob.control(refine.tol = 1e-8, rel.tol = 1e-9)))
summary(mC)
## Values will change once we use R's random number generator !
stopifnot(
all.equal(unname(coef(mC)),
c(30.50232, -1.666147, 0.08425381, 0.6677366, 1.167777, -4.136569),
tolerance = 2e-7)# 6.112 e-8 (32-b)
)
dput(signif(unname(coef(mC)), 7))
## 64b(0.2-0): c(30.50232, -1.666147, 0.08425381, 0.6677366, 1.167777, -4.136569)
## 32b(0.2-0): "exactly" same !
## Full precision:
dput(unname(coef(mC)))
## 2012-06-04:
## 32-bit:c(30.5023184450149, -1.66614687548007, 0.0842538074792178, 0.667736590070332, 1.16777744029117, -4.13656885405815)
## 64-bit:c(30.5023184450148, -1.66614687548008, 0.0842538074792178, 0.667736590070332, 1.16777744029117, -4.13656885405814)
##
## 32-bit:c(30.5023183940104, -1.66614687550933, 0.0842538074635567, 0.667736589938547, 1.16777744089398, -4.13656884777543)
## 64-bit:c(30.5023184150851, -1.66614687537736, 0.0842538074722959, 0.667736589980183, 1.16777744061092, -4.1365688503035)
str(mC)
## EX 2
gen <- function(n,p, n0, y0, x0, beta = rep(1, p))
{
stopifnot(n >= 1, p >= 1, n0 >= 0, length(beta) == p)
x <- matrix(rnorm(n*p),n,p) # iid x's
y <- x %*% beta + rnorm(n)
xc <- matrix(0,n0,p)
xc[,1] <- x0
xc <- xc + 0.1*matrix(rnorm(n0*p),n0,p)
x[1:n0,] <- xc
y[1:n0] <- y0 + .001*rnorm(n0)
list(x=x, y=y)
}
## generate --a sample of n observations with p variables
## and 10% of outliers near (x1,y) = (10,10)
n <- 500 ; n0 <- n %/% 10
p <- 7 ## p = 20 is more impressive but too slow for "standard test"
set.seed(17)
a <- gen(n=n, p=p, n0= n0, y0=10, x0=10)
plot(a$x[,1], a$y, col = c(rep(2, n0), rep(1, n-n0)))
system.time( m1 <- lmrob(y~x, data = a,
control = lmrob.control(compute.rd = TRUE, trace.lev=4)))
plot(m1, ask=FALSE)
##-> currently 5 plots; MM:I don't like #3 (Response vs fitted)
S1 <- m1$init.S
resS1 <- drop(a$y - model.matrix(m1, data=a) %*% coef(S1))
all.equal(S1$residuals, resS1)## hmm, but still close
## "Mean relative difference: 2.655326e-07"
ctr.t3 <- lmrob.control(trace.lev = 3)
(mS <- lmrob.S(x=a$x, y=residuals(S1), only.scale=TRUE, control = ctr.t3))
all.equal(S1$scale, mS)
## "Mean relative difference: 0.003015849" -- too different, why?
(mS. <- lmrob.S(x=a$x, y=resS1, only.scale=TRUE, control = ctr.t3))
all.equal(mS, mS., tol=0)# 2.401 e -10 -- ok/perfect
stopifnot(all.equal(mS, mS.),
all.equal(mS, S1$scale, tol = 0.008)) # at least that
## don't compute robust distances --> faster by factor of two:
system.time(m2 <- lmrob(y~x, data = a,
control = lmrob.control(compute.rd = FALSE)))
## ==> half of the CPU time is spent in covMcd()!
(sm2 <- summary(m2))
l1 <- lm(y~x, data = a)
cbind(robust = coef(sm2)[,1:2],
lm = coef(summary(l1))[,1:2])
m2.S1 <- with(a, lmrob.S(cbind(1,x), y, trace.lev = 2,
## trace.lev = 2 : quite a bit of output
control= lmrob.control(seed = .Random.seed,
nRes = 80, k.max = 20, refine.tol = 1e-4)))
S.ctrl <- lmrob.control(seed = .Random.seed,## << keeps .Random.seed unchanged
nResample = 1000, best.r.s = 15, refine.tol = 1e-9)
m2.S <- with(a, lmrob.S(cbind(1,x), y, control = S.ctrl, trace.lev = 1))
str(m2.S)
##--- Now use n > 2000 --> so we use C internal fast_s_large_n(...)
n <- 2500 ; n0 <- n %/% 10
a2 <- gen(n=n, p = 3, n0= n0, y0=10, x0=10)
plot(a2$x[,1], a2$y, col = c(rep(2, n0), rep(1, n-n0)))
rs <- .Random.seed
system.time( m3 <- lmrob(y~x, data = a2) )
m3
nrs <- .Random.seed # <-- to check that using 'seed' keeps .Random.seed
system.time( m4 <- lmrob(y~x, data = a2, seed = rs, compute.rd = FALSE))
(sm4 <- summary(m4))
## random seed must be the same because we used 'seed = *' :
stopifnot(nrs == .Random.seed, identical(coef(m3), coef(m4)))
dput(signif(cf <- unname(coef(m3)), 7))
## 2012-06-04:c(-0.05108914, 1.005971, 1.003201, 0.9833263) - 32 AND 64 bit
##
## 0.2-0: c(0.007446546, 1.000712, 1.027921, 0.9896527)
## 0.2-1: c(0.03148659, 0.9980933, 1.016364, 1.03243)
## both for 32 and 64 bit
dput(signif(100 * (sd <- unname(coef(sm4)[, "Std. Error"])), 7))
## 2012-06-04:c(2.213815, 0.2864678, 2.202318, 2.180886) - 32 AND 64 bit
##
## 0.2-0: c(2.219388, 0.274644, 2.196982, 2.26253)
## 0.2-1: c(2.194914, 0.2737579, 2.371728, 2.206261)
## both for 32 and 64 bit
stopifnot(
all.equal(cf, c(-0.05108914, 1.00597115, 1.00320052, 0.98332632), tolerance= 7e-7)
, # ... e-7 needed on 64b
all.equal(100*sd,c(2.2138147, 0.2864678, 2.2023182, 2.1808862),tolerance= 7e-7)
) # 1.334 e-7 needed on 64b
cat('Time elapsed: ', proc.time(),'\n') # "stats"
## rm(a,m1, m2, m3, m4, sm2, l1)
## Small examples from R-SIG-robust
## First example from René Locher :
dat1 <- data.frame(lconc= log(c(21.8, 23.7, 12.2, 38.5, 21, 38.9)),
dist = c( 100, 180, 280, 30, 220, 6))
m5 <- lmrob(lconc ~ dist, data = dat1)
## Warning messages:
## ... S refinements did not converge (to tol=1e-07) in 200 iterations
## " " "
m5$init.S$converged # FALSE
m5. <- lmrob(lconc ~ dist, data = dat1,
control = lmrob.control(refine.tol = 1e-5))
m5.$init.S$converged # TRUE
## gives TRUE as the IRWLS iterations after the lmrob.S() have converged.
## 2nd example from René Locher , 6 Jun 2007
dat2 <- data.frame(lconc=log(c(29.5,40.1,21.1,25.3,27.3,25.2,26.9,19.1,16.4)),
dist = c(520, 1480,1780, 740, 540,1050,1100,1640,1860))
res2 <- lmrob(lconc~dist, data = dat2)
## Used to give Warning messages:
## 1: rwls(): not converged in 1000 lambda iterations
## ...
## 4: rwls(): ............
res2 <- lmrob(lconc~dist, data = dat2, trace.lev = 3)
## -------------
summary(res2)
stopifnot(dim(model.matrix(res2)) == c(9,2))
## Check predict():
dd <- seq(300, 2000, by = 50)
with(dat2, plot(dist, lconc, pch=20, cex=2, xlim = range(dd)))
new.d <- data.frame(dist=dd)
fit.dd <- predict(res2, new.d)
lines(dd, fit.dd, col=2, type="o")
predict(res2, new.d, se=TRUE)$se.fit
matlines(dd, predict(res2, new.d, interval="confidence")[, 2:3], col=3)
## Check handling of X of not full rank
test <- function(n, ...) {
X <- matrix(c(rep(1:3, length.out = n), rnorm(2*n)), n, 4)
y <- rnorm(n)
X[,4] <- X[,2] + X[,3]
X <- data.frame(X)
X$X1 <- factor(X$X1)
fail <- suppressWarnings(try(lmrob(y ~ ., X, ...), silent=TRUE))
stopifnot(is(fail, "lmrob"))
}
set.seed(0)
test(12) ## fast_S()
test(2500) ## fast_S_large_n()
test(200, trace.lev = TRUE)
## Check a case, where cov() matrix needs "posdefify":
coleman16 <- coleman[ -c(2, 7, 16, 19),]
(m16 <- lmrob(Y ~ ., data = coleman16, tuning.psi = 3.44, trace.lev = TRUE))
## failed in 0.9_0
assertWarning(
lmrob(Y ~ ., data = coleman, setting = "KS2011", control = lmrob.control())
)
cat('Time elapsed: ', proc.time(),'\n') # "stats"
| /packrat/lib/x86_64-apple-darwin18.2.0/3.5.2/robustbase/tests/lmrob-ex12.R | no_license | teyden/asthma-research | R | false | false | 7,399 | r |
library(robustbase)
source(system.file("test-tools-1.R", package="Matrix", mustWork=TRUE))
##-> assertError(), etc
set.seed(1) # since now .Random.seed is used by default!
## EX 1
data(coleman)
## "empty model" (not really a lot of sense)
(m0 <- lmrob(Y ~ 0, data = coleman))
summary(m0)
stopifnot(is.numeric(coef(m0)), length(coef(m0)) == 0,
residuals(m0) == coleman[,"Y"])
## "Intercept" only: robust mean
(m1 <- lmrob(Y ~ 1, data = coleman))
summary(m1)
stopifnot(all.equal(coef(m1),
c("(Intercept)" = 35.56048875388), tol = 1e-11))
(mC <- lmrob(Y ~ ., data = coleman,
control = lmrob.control(refine.tol = 1e-8, rel.tol = 1e-9)))
summary(mC)
## Values will change once we use R's random number generator !
stopifnot(
all.equal(unname(coef(mC)),
c(30.50232, -1.666147, 0.08425381, 0.6677366, 1.167777, -4.136569),
tolerance = 2e-7)# 6.112 e-8 (32-b)
)
dput(signif(unname(coef(mC)), 7))
## 64b(0.2-0): c(30.50232, -1.666147, 0.08425381, 0.6677366, 1.167777, -4.136569)
## 32b(0.2-0): "exactly" same !
## Full precision:
dput(unname(coef(mC)))
## 2012-06-04:
## 32-bit:c(30.5023184450149, -1.66614687548007, 0.0842538074792178, 0.667736590070332, 1.16777744029117, -4.13656885405815)
## 64-bit:c(30.5023184450148, -1.66614687548008, 0.0842538074792178, 0.667736590070332, 1.16777744029117, -4.13656885405814)
##
## 32-bit:c(30.5023183940104, -1.66614687550933, 0.0842538074635567, 0.667736589938547, 1.16777744089398, -4.13656884777543)
## 64-bit:c(30.5023184150851, -1.66614687537736, 0.0842538074722959, 0.667736589980183, 1.16777744061092, -4.1365688503035)
str(mC)
## EX 2
gen <- function(n,p, n0, y0, x0, beta = rep(1, p))
{
stopifnot(n >= 1, p >= 1, n0 >= 0, length(beta) == p)
x <- matrix(rnorm(n*p),n,p) # iid x's
y <- x %*% beta + rnorm(n)
xc <- matrix(0,n0,p)
xc[,1] <- x0
xc <- xc + 0.1*matrix(rnorm(n0*p),n0,p)
x[1:n0,] <- xc
y[1:n0] <- y0 + .001*rnorm(n0)
list(x=x, y=y)
}
## generate --a sample of n observations with p variables
## and 10% of outliers near (x1,y) = (10,10)
n <- 500 ; n0 <- n %/% 10
p <- 7 ## p = 20 is more impressive but too slow for "standard test"
set.seed(17)
a <- gen(n=n, p=p, n0= n0, y0=10, x0=10)
plot(a$x[,1], a$y, col = c(rep(2, n0), rep(1, n-n0)))
system.time( m1 <- lmrob(y~x, data = a,
control = lmrob.control(compute.rd = TRUE, trace.lev=4)))
plot(m1, ask=FALSE)
##-> currently 5 plots; MM:I don't like #3 (Response vs fitted)
S1 <- m1$init.S
resS1 <- drop(a$y - model.matrix(m1, data=a) %*% coef(S1))
all.equal(S1$residuals, resS1)## hmm, but still close
## "Mean relative difference: 2.655326e-07"
ctr.t3 <- lmrob.control(trace.lev = 3)
(mS <- lmrob.S(x=a$x, y=residuals(S1), only.scale=TRUE, control = ctr.t3))
all.equal(S1$scale, mS)
## "Mean relative difference: 0.003015849" -- too different, why?
(mS. <- lmrob.S(x=a$x, y=resS1, only.scale=TRUE, control = ctr.t3))
all.equal(mS, mS., tol=0)# 2.401 e -10 -- ok/perfect
stopifnot(all.equal(mS, mS.),
all.equal(mS, S1$scale, tol = 0.008)) # at least that
## don't compute robust distances --> faster by factor of two:
system.time(m2 <- lmrob(y~x, data = a,
control = lmrob.control(compute.rd = FALSE)))
## ==> half of the CPU time is spent in covMcd()!
(sm2 <- summary(m2))
l1 <- lm(y~x, data = a)
cbind(robust = coef(sm2)[,1:2],
lm = coef(summary(l1))[,1:2])
m2.S1 <- with(a, lmrob.S(cbind(1,x), y, trace.lev = 2,
## trace.lev = 2 : quite a bit of output
control= lmrob.control(seed = .Random.seed,
nRes = 80, k.max = 20, refine.tol = 1e-4)))
S.ctrl <- lmrob.control(seed = .Random.seed,## << keeps .Random.seed unchanged
nResample = 1000, best.r.s = 15, refine.tol = 1e-9)
m2.S <- with(a, lmrob.S(cbind(1,x), y, control = S.ctrl, trace.lev = 1))
str(m2.S)
##--- Now use n > 2000 --> so we use C internal fast_s_large_n(...)
n <- 2500 ; n0 <- n %/% 10
a2 <- gen(n=n, p = 3, n0= n0, y0=10, x0=10)
plot(a2$x[,1], a2$y, col = c(rep(2, n0), rep(1, n-n0)))
rs <- .Random.seed
system.time( m3 <- lmrob(y~x, data = a2) )
m3
nrs <- .Random.seed # <-- to check that using 'seed' keeps .Random.seed
system.time( m4 <- lmrob(y~x, data = a2, seed = rs, compute.rd = FALSE))
(sm4 <- summary(m4))
## random seed must be the same because we used 'seed = *' :
stopifnot(nrs == .Random.seed, identical(coef(m3), coef(m4)))
dput(signif(cf <- unname(coef(m3)), 7))
## 2012-06-04:c(-0.05108914, 1.005971, 1.003201, 0.9833263) - 32 AND 64 bit
##
## 0.2-0: c(0.007446546, 1.000712, 1.027921, 0.9896527)
## 0.2-1: c(0.03148659, 0.9980933, 1.016364, 1.03243)
## both for 32 and 64 bit
dput(signif(100 * (sd <- unname(coef(sm4)[, "Std. Error"])), 7))
## 2012-06-04:c(2.213815, 0.2864678, 2.202318, 2.180886) - 32 AND 64 bit
##
## 0.2-0: c(2.219388, 0.274644, 2.196982, 2.26253)
## 0.2-1: c(2.194914, 0.2737579, 2.371728, 2.206261)
## both for 32 and 64 bit
stopifnot(
all.equal(cf, c(-0.05108914, 1.00597115, 1.00320052, 0.98332632), tolerance= 7e-7)
, # ... e-7 needed on 64b
all.equal(100*sd,c(2.2138147, 0.2864678, 2.2023182, 2.1808862),tolerance= 7e-7)
) # 1.334 e-7 needed on 64b
cat('Time elapsed: ', proc.time(),'\n') # "stats"
## rm(a,m1, m2, m3, m4, sm2, l1)
## Small examples from R-SIG-robust
## First example from René Locher :
dat1 <- data.frame(lconc= log(c(21.8, 23.7, 12.2, 38.5, 21, 38.9)),
dist = c( 100, 180, 280, 30, 220, 6))
m5 <- lmrob(lconc ~ dist, data = dat1)
## Warning messages:
## ... S refinements did not converge (to tol=1e-07) in 200 iterations
## " " "
m5$init.S$converged # FALSE
m5. <- lmrob(lconc ~ dist, data = dat1,
control = lmrob.control(refine.tol = 1e-5))
m5.$init.S$converged # TRUE
## gives TRUE as the IRWLS iterations after the lmrob.S() have converged.
## 2nd example from René Locher , 6 Jun 2007
dat2 <- data.frame(lconc=log(c(29.5,40.1,21.1,25.3,27.3,25.2,26.9,19.1,16.4)),
dist = c(520, 1480,1780, 740, 540,1050,1100,1640,1860))
res2 <- lmrob(lconc~dist, data = dat2)
## Used to give Warning messages:
## 1: rwls(): not converged in 1000 lambda iterations
## ...
## 4: rwls(): ............
res2 <- lmrob(lconc~dist, data = dat2, trace.lev = 3)
## -------------
summary(res2)
stopifnot(dim(model.matrix(res2)) == c(9,2))
## Check predict():
dd <- seq(300, 2000, by = 50)
with(dat2, plot(dist, lconc, pch=20, cex=2, xlim = range(dd)))
new.d <- data.frame(dist=dd)
fit.dd <- predict(res2, new.d)
lines(dd, fit.dd, col=2, type="o")
predict(res2, new.d, se=TRUE)$se.fit
matlines(dd, predict(res2, new.d, interval="confidence")[, 2:3], col=3)
## Check handling of X of not full rank
test <- function(n, ...) {
X <- matrix(c(rep(1:3, length.out = n), rnorm(2*n)), n, 4)
y <- rnorm(n)
X[,4] <- X[,2] + X[,3]
X <- data.frame(X)
X$X1 <- factor(X$X1)
fail <- suppressWarnings(try(lmrob(y ~ ., X, ...), silent=TRUE))
stopifnot(is(fail, "lmrob"))
}
set.seed(0)
test(12) ## fast_S()
test(2500) ## fast_S_large_n()
test(200, trace.lev = TRUE)
## Check a case, where cov() matrix needs "posdefify":
coleman16 <- coleman[ -c(2, 7, 16, 19),]
(m16 <- lmrob(Y ~ ., data = coleman16, tuning.psi = 3.44, trace.lev = TRUE))
## failed in 0.9_0
assertWarning(
lmrob(Y ~ ., data = coleman, setting = "KS2011", control = lmrob.control())
)
cat('Time elapsed: ', proc.time(),'\n') # "stats"
|
# This script allows to use the facebook100 dataset available at https://archive.org/details/oxford-2005-facebook-matrix
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
# Load required packages
source('setup.R')
# Read directly from MatLab
data <- readMat("../data/facebook100/facebook100/Caltech36.mat")
# str(data) # check structure of dataset
net <- graph_from_adjacency_matrix(data$A)
# Check for connected components
components <- components(net, mode = "strong")
# Select only largest connected component
vert_ids <- V(net)[components$membership == which.max(components$csize)]
g <- as.undirected(induced_subgraph(net, vert_ids))
save(g, file = "../data/netCaltech.RData")
#### Check size of networks ####
size_networks = rbindlist(lapply(list.files("../data/facebook100/facebook100")[-c(22, 23, 58)], function(f){
data.frame(name = f, size = nrow(readMat(paste0("../data/facebook100/facebook100/", f))$A))
}))
| /src/simulations_final/facebook100_parser.R | no_license | pepaaran/diagnostic-gate-estimation | R | false | false | 970 | r | # This script allows to use the facebook100 dataset available at https://archive.org/details/oxford-2005-facebook-matrix
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
# Load required packages
source('setup.R')
# Read directly from MatLab
data <- readMat("../data/facebook100/facebook100/Caltech36.mat")
# str(data) # check structure of dataset
net <- graph_from_adjacency_matrix(data$A)
# Check for connected components
components <- components(net, mode = "strong")
# Select only largest connected component
vert_ids <- V(net)[components$membership == which.max(components$csize)]
g <- as.undirected(induced_subgraph(net, vert_ids))
save(g, file = "../data/netCaltech.RData")
#### Check size of networks ####
size_networks = rbindlist(lapply(list.files("../data/facebook100/facebook100")[-c(22, 23, 58)], function(f){
data.frame(name = f, size = nrow(readMat(paste0("../data/facebook100/facebook100/", f))$A))
}))
|
weights_for_country <- function(country, weights, models_this_week, models_prev_week) {
message(country)
idx_curr <- names(purrr::keep(models_this_week, ~ country %in% .))
models_curr <- strsplit(x = idx_curr, split = "-")[[1]]
message("Current models ", idx_curr)
if (idx_curr %in% names(models_prev_week)) {
models_old <- models_curr
} else {
## For our simple case this will suffice.
## We have M1, M2, M3 for some countries
## And for some countries we have M1, M2, M3, M4.
## We want to use the model weights from last weekf or M1, M2, M3
## and for M4 we want to assign 1/4.
## This will get complicated when we start having diff countries
## for diff models.
models_old <- names(models_prev_week)[1]
}
message("Old models ", models_old)
n_models <- length(models_curr)
new_models <- length(setdiff(models_curr, models_old))
names(models_curr) <- models_curr
out <- purrr::map(
models_curr,
function(model) {
if (!model %in% models_old) {
1 / n_models
} else {
unassigned_wt <- (n_models - new_models) / n_models
normalised_wt <- weights$weight[weights$model == model] /
sum(weights$weight[weights$model %in% models_curr])
normalised_wt * unassigned_wt
}
}
)
out
}
normalise_weights <- function(weights, models_this_week, models_prev_week) {
this_week <- names(models_this_week)
prev_week <- names(models_prev_week)
countries <- unname(unlist(models_this_week))
names(countries) <- countries
normalised_wts <- purrr::map(
countries, ~ weights_for_country(., weights, models_this_week, models_prev_week)
)
normalised_wts
}
| /src/produce_ensemble_outputs/R/normalise_weights.R | no_license | rcleoni/covid19-forecasts-orderly | R | false | false | 1,688 | r | weights_for_country <- function(country, weights, models_this_week, models_prev_week) {
message(country)
idx_curr <- names(purrr::keep(models_this_week, ~ country %in% .))
models_curr <- strsplit(x = idx_curr, split = "-")[[1]]
message("Current models ", idx_curr)
if (idx_curr %in% names(models_prev_week)) {
models_old <- models_curr
} else {
## For our simple case this will suffice.
## We have M1, M2, M3 for some countries
## And for some countries we have M1, M2, M3, M4.
## We want to use the model weights from last weekf or M1, M2, M3
## and for M4 we want to assign 1/4.
## This will get complicated when we start having diff countries
## for diff models.
models_old <- names(models_prev_week)[1]
}
message("Old models ", models_old)
n_models <- length(models_curr)
new_models <- length(setdiff(models_curr, models_old))
names(models_curr) <- models_curr
out <- purrr::map(
models_curr,
function(model) {
if (!model %in% models_old) {
1 / n_models
} else {
unassigned_wt <- (n_models - new_models) / n_models
normalised_wt <- weights$weight[weights$model == model] /
sum(weights$weight[weights$model %in% models_curr])
normalised_wt * unassigned_wt
}
}
)
out
}
normalise_weights <- function(weights, models_this_week, models_prev_week) {
this_week <- names(models_this_week)
prev_week <- names(models_prev_week)
countries <- unname(unlist(models_this_week))
names(countries) <- countries
normalised_wts <- purrr::map(
countries, ~ weights_for_country(., weights, models_this_week, models_prev_week)
)
normalised_wts
}
|
\name{delete.repository.from.team}
\alias{delete.repository.from.team}
\title{remove repository from team}
\usage{
delete.repository.from.team(id, owner, repo,
ctx = get.github.context())
}
\arguments{
\item{id}{team id}
\item{owner}{the repo owner}
\item{repo}{the repo name}
\item{ctx}{the github context object}
}
\value{
none
}
\description{
remove repository from team
}
| /man/delete.repository.from.team.Rd | permissive | prateek05/rgithub | R | false | false | 398 | rd | \name{delete.repository.from.team}
\alias{delete.repository.from.team}
\title{remove repository from team}
\usage{
delete.repository.from.team(id, owner, repo,
ctx = get.github.context())
}
\arguments{
\item{id}{team id}
\item{owner}{the repo owner}
\item{repo}{the repo name}
\item{ctx}{the github context object}
}
\value{
none
}
\description{
remove repository from team
}
|
dashboardPage(
dashboardHeader(title = "Eurostat"),
dashboardSidebar(
sidebarMenu(
uiOutput("sel_Item"),
uiOutput("sel_Sector"),
uiOutput("sel_Unit"),
menuItem("Faceted Plot", tabName = "facet"),
menuItem("Map", tabName = "map"),
menuItem("Table", tabName = "table"),
menuItem("", icon = icon("twitter-square"),
href = "https://twitter.com/pssGuy"),
menuItem("", icon = icon("envelope"),
href = "mailto:agcur@rogers.com")
)
),
dashboardBody(
tabItems(
tabItem("facet",
fluidRow(column(width=10,offset=1,
box(
width=12,status = "info", solidHeader = FALSE,
plotOutput("gg")
)
)
)),
tabItem("map",
fluidRow(column(width=10,offset=1,
box(
width=12,status = "info", solidHeader = FALSE,
leafletOutput("map")
)
)
)),
tabItem("table",
fluidRow(column(width=10,offset=1,
box(
width=12,status = "info", solidHeader = FALSE,
DT::dataTableOutput("table")
)
)
))
) # tabItems
) # body
) # page
| /ui.R | no_license | pssguy/eurostatOpen | R | false | false | 1,625 | r |
dashboardPage(
dashboardHeader(title = "Eurostat"),
dashboardSidebar(
sidebarMenu(
uiOutput("sel_Item"),
uiOutput("sel_Sector"),
uiOutput("sel_Unit"),
menuItem("Faceted Plot", tabName = "facet"),
menuItem("Map", tabName = "map"),
menuItem("Table", tabName = "table"),
menuItem("", icon = icon("twitter-square"),
href = "https://twitter.com/pssGuy"),
menuItem("", icon = icon("envelope"),
href = "mailto:agcur@rogers.com")
)
),
dashboardBody(
tabItems(
tabItem("facet",
fluidRow(column(width=10,offset=1,
box(
width=12,status = "info", solidHeader = FALSE,
plotOutput("gg")
)
)
)),
tabItem("map",
fluidRow(column(width=10,offset=1,
box(
width=12,status = "info", solidHeader = FALSE,
leafletOutput("map")
)
)
)),
tabItem("table",
fluidRow(column(width=10,offset=1,
box(
width=12,status = "info", solidHeader = FALSE,
DT::dataTableOutput("table")
)
)
))
) # tabItems
) # body
) # page
|
ceste = data.frame()
ceste[,1] = character()
ceste[,2] = character()
ceste[,3] = character()
ceste[,4] = logical()
ceste[,5] = numeric()
ceste[,6] = numeric()
ceste[,7] = numeric()
ceste["a_b",] = c("a", "b", 'blue', TRUE, 1050, 70, 0.25)
ceste["a_d",] = c("a", "d", 'gold3', TRUE, 900, 110, 0.3)
ceste["b_c",] = c("b", "c", 'red', FALSE, 700, 70, 0)
ceste["b_d",] = c("b", "d", 'green4', FALSE, 800, 70, 0)
ceste["d_c",] = c("d", "c", 'purple', FALSE, 700, 110, 0)
ceste[,4] = as.logical(ceste[,4])
ceste[,5] = as.numeric(ceste[,5])
ceste[,6] = as.numeric(ceste[,6])
ceste[,7] = as.numeric(ceste[,7])
# ceste[c,] == c(Od, Do, barva, zacetna cesta, dolzina, zacetna omejitev, zacetna intenzivnost prihodov)
povezave = NULL
povezave$"a_b" = c("b_c","b_d")
povezave$"b_d" = c("d_c")
povezave$"a_d" = c("d_c")
verjetnosti = list()
for(i in names(povezave)){
n = length(povezave[[i]])
verjetnosti[[i]] = rep(1/n, n)
}
koor = data.frame()
koor[,1] = numeric()
koor[,2] = numeric()
koor["a",] = c(0,500)
koor["b",] = c(600,1000)
koor["c",] = c(1000,600)
koor["d",] = c(650,50)
semaforji = list()
semaforji$"d" = data.frame(matrix(nrow = 2, ncol = 2))
colnames(semaforji$"d") = c("a_d__d_c", "b_d__d_c")
semaforji$d[1,] = c(TRUE, FALSE)
semaforji$d[2,] = c(FALSE, TRUE)
| /podatki_ceste.R | no_license | stifler9/Dinamika-cestnega-omrezja | R | false | false | 1,271 | r | ceste = data.frame()
ceste[,1] = character()
ceste[,2] = character()
ceste[,3] = character()
ceste[,4] = logical()
ceste[,5] = numeric()
ceste[,6] = numeric()
ceste[,7] = numeric()
ceste["a_b",] = c("a", "b", 'blue', TRUE, 1050, 70, 0.25)
ceste["a_d",] = c("a", "d", 'gold3', TRUE, 900, 110, 0.3)
ceste["b_c",] = c("b", "c", 'red', FALSE, 700, 70, 0)
ceste["b_d",] = c("b", "d", 'green4', FALSE, 800, 70, 0)
ceste["d_c",] = c("d", "c", 'purple', FALSE, 700, 110, 0)
ceste[,4] = as.logical(ceste[,4])
ceste[,5] = as.numeric(ceste[,5])
ceste[,6] = as.numeric(ceste[,6])
ceste[,7] = as.numeric(ceste[,7])
# ceste[c,] == c(Od, Do, barva, zacetna cesta, dolzina, zacetna omejitev, zacetna intenzivnost prihodov)
povezave = NULL
povezave$"a_b" = c("b_c","b_d")
povezave$"b_d" = c("d_c")
povezave$"a_d" = c("d_c")
verjetnosti = list()
for(i in names(povezave)){
n = length(povezave[[i]])
verjetnosti[[i]] = rep(1/n, n)
}
koor = data.frame()
koor[,1] = numeric()
koor[,2] = numeric()
koor["a",] = c(0,500)
koor["b",] = c(600,1000)
koor["c",] = c(1000,600)
koor["d",] = c(650,50)
semaforji = list()
semaforji$"d" = data.frame(matrix(nrow = 2, ncol = 2))
colnames(semaforji$"d") = c("a_d__d_c", "b_d__d_c")
semaforji$d[1,] = c(TRUE, FALSE)
semaforji$d[2,] = c(FALSE, TRUE)
|
testlist <- list(A = structure(c(9.18596488373068e+205, 9.53818252179844e+295 ), .Dim = 1:2), B = structure(c(2.19477802979261e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613122167-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 323 | r | testlist <- list(A = structure(c(9.18596488373068e+205, 9.53818252179844e+295 ), .Dim = 1:2), B = structure(c(2.19477802979261e+294, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/refset-data.R
\docType{data}
\name{refset}
\alias{refset}
\title{refset - A set of potential reference genes}
\format{
A data frame with 3484 rows and 7 variables:
\describe{
\item{subject}{participant id}
\item{time}{time-point, w0, week 0 (baseline); w2pre, week 2 before acute exercise; w2post, week 2 after acute exercise; w12, week 12}
\item{condition}{volume condition used in exercises single and multiple set exercises}
\item{target}{gene identifier}
\item{cq}{quantification cycle}
\item{efficiency}{amplification efficiency, averaged over target}
\item{type}{type of target, potential reference genes are "ref", genes of interest are "goi"}
}
}
\usage{
data(refset)
}
\description{
Raw abundance data of 11 potential reference genes and 2 target genes
from a study with data nested within participants.
}
\examples{
}
\references{
Hammarström et al. (2020) J Physiol, 598: 543-565. https://doi.org/10.1113/JP278455
}
\keyword{datasets}
| /man/refset.Rd | permissive | dhammarstrom/generefer2 | R | false | true | 1,026 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/refset-data.R
\docType{data}
\name{refset}
\alias{refset}
\title{refset - A set of potential reference genes}
\format{
A data frame with 3484 rows and 7 variables:
\describe{
\item{subject}{participant id}
\item{time}{time-point, w0, week 0 (baseline); w2pre, week 2 before acute exercise; w2post, week 2 after acute exercise; w12, week 12}
\item{condition}{volume condition used in exercises single and multiple set exercises}
\item{target}{gene identifier}
\item{cq}{quantification cycle}
\item{efficiency}{amplification efficiency, averaged over target}
\item{type}{type of target, potential reference genes are "ref", genes of interest are "goi"}
}
}
\usage{
data(refset)
}
\description{
Raw abundance data of 11 potential reference genes and 2 target genes
from a study with data nested within participants.
}
\examples{
}
\references{
Hammarström et al. (2020) J Physiol, 598: 543-565. https://doi.org/10.1113/JP278455
}
\keyword{datasets}
|
# Function that creates boxplots with pairwise comparisons
#' @param data - a data.frame
#' @param vars - character. Selection of variables to analyze from the data.frame
#' @param id.var - character. Identifier
#' @param group.var - character. Grouping variable
#' @param paired - logical. Type of analysis paired or independent
#' @param my_comparisons - list. List of 2-dimensional vectors with the comparisons to make. (default:NULL)
#' @param method - character. Method of analysis to select between 'wilcox.test' and 't.test'
#' @param pval - character. Allowed values include "p.signif" (shows the significance levels), "p.format" (shows the formatted p value).
#' @param save - logical. Save the plot in pdf
#' @param path.output - character. Path to save the results.
#' @param filename - character. Name
compare_groups_table = function(data,
vars,
id.var,
group.var,
paired=FALSE,
method = 'wilcox.test',
my_comparisons = NULL,
save=TRUE,
path.output = './',
filename = 'compare_groups_table',
sep=';',
date = FALSE){
pToSign = function(x){
ELSE=TRUE
case_when(
(x>0.05) ~ 'ns',
(x<0.0001) ~ '****',
(x<0.001) ~ '***',
(x<0.01)~ '**',
ELSE ~ '*'
)
}
# Results table for Independent group comparison:
if(!paired){
tab = lapply(1:length(vars),function(i){
mydata = data %>% data.frame
# test statistics table_
form = reformulate(termlabels = paste0(group.var), response = vars[i])
tab = compare_means(form,data=mydata %>% arrange_(group.var),
method=method,paired=paired)
if(!is.null(my_comparisons)) tab = tab %>% dplyr::filter(group1%in%unlist(my_comparisons) & group2%in%unlist(my_comparisons))
# Median/mean value of each group_
form1 = reformulate(termlabels = paste0(group.var), response = vars[i])
if(method=='wilcox.test'){
med1 = aggregate(form1,data=mydata %>% arrange_(group.var), median) %>%
mutate_if(is.factor,as.character)
colnames(med1) =c('group1','median1')
med2 = aggregate(form1,data=mydata %>% arrange_(group.var), median) %>%
mutate_if(is.factor,as.character)
colnames(med2) =c('group2','median2')
# Create output table_
tab = left_join(tab,med1) %>% left_join(.,med2)
tab = tab %>% mutate(Param = vars[i],
Comp=paste0(group1,'-',group2),
p.adj.signif = pToSign(p.adj),
test = method,
non.parametric = ifelse(test=='Wilcoxon',TRUE,FALSE),
paired = paired) %>%
select(Param,Comp,median1,median2,p.format,
p.signif,Holm.p.adj=p.adj,p.adj.signif,test,non.parametric,paired) %>%
data.frame
return(tab)
} else{
med1 = aggregate(form1,data=mydata %>% arrange_(group.var), mean) %>% mutate_if(is.factor,as.character)
colnames(med1) =c('group1','mean1')
med2 = aggregate(form1,data=mydata %>% arrange_(group.var), mean) %>% mutate_if(is.factor,as.character)
colnames(med2) =c('group2','mean2')
# Create output table_
tab = left_join(tab,med1) %>% left_join(.,med2)
tab = tab %>% mutate(Param = vars[i],
Comp=paste0(group1,'-',group2),
p.adj.signif = pToSign(p.adj),
test = method,
non.parametric = ifelse(test=='Wilcoxon',TRUE,FALSE),
paired = paired) %>%
select(Param,Comp,mean1,mean2,p.format,
p.signif,Holm.p.adj=p.adj,p.adj.signif,test,non.parametric,paired) %>%
data.frame
return(tab)
}
})
tab = tab %>% bind_rows()
if(save){
p = 'Independent'
uniexport(tab,type = 'excel',
path = path.output,
filename = paste0(filename,'_',method,'_',p),
date=date)
} else{
return(tab)
}
}
# Results table for Paired group comparison:
if(paired){
# Make sure we use samples
mydata = data %>% group_by_(id.var) %>%
mutate(n=n()) %>% dplyr::filter(n==2) %>% select(-n) %>%
ungroup %>% data.frame # only individuals with data at both condition
tab = lapply(1:length(vars),function(i){
# test statistics table_
form = reformulate(termlabels = paste0(group.var), response = vars[i])
tab = compare_means(form,data=mydata %>% arrange_(group.var),
method=method,paired=TRUE)
# Median/mean value of each group_
form1 = reformulate(termlabels = paste0(group.var), response = vars[i])
if(method=='wilcox.test'){
med1 = aggregate(form1,data=mydata %>% arrange_(group.var), median) %>%
mutate_if(is.factor,as.character)
colnames(med1) =c('group1','median1')
med2 = aggregate(form1,data=mydata %>% arrange_(group.var), median) %>%
mutate_if(is.factor,as.character)
colnames(med2) =c('group2','median2')
# Create output table_
tab = left_join(tab,med1) %>% left_join(.,med2)
tab = tab %>% mutate(Param = vars[i],
Comp=paste0(group1,'-',group2),
p.adj.signif = pToSign(p.adj),
test = method,
non.parametric = ifelse(test=='Wilcoxon',TRUE,FALSE),
paired=paired) %>%
select(Param,Comp,median1,median2,p.format,
p.signif,Holm.p.adj=p.adj,p.adj.signif,test,non.parametric,paired) %>%
data.frame
} else{
med1 = aggregate(form1,data=mydata %>% arrange_(group.var), mean) %>%
mutate_if(is.factor,as.character)
colnames(med1) =c('group1','mean1')
med2 = aggregate(form1,data=mydata %>% arrange_(group.var), mean) %>%
mutate_if(is.factor,as.character)
colnames(med2) =c('group2','mean2')
# Create output table_
tab = left_join(tab,med1) %>% left_join(.,med2)
tab = tab %>% mutate(Param = vars[i],
Comp=paste0(group1,'-',group2),
p.adj.signif = pToSign(p.adj),
test = method,
non.parametric = ifelse(test=='Wilcoxon',TRUE,FALSE),
paired=paired) %>%
select(Param,Comp,mean1,mean2,p.format,
p.signif,Holm.p.adj=p.adj,p.adj.signif,test,non.parametric,paired) %>%
data.frame
}
return(tab)
})
tab = tab %>% bind_rows()
if(save){
p = 'Paired'
uniexport(tab,type = 'excel',
path = path.output,
filename = paste0(filename,'_',method,'_',p),
date=date)
} else{
return(tab)
}
}
}
| /R/compare_groups_table.R | no_license | douve/viRievac | R | false | false | 7,316 | r |
# Function that creates boxplots with pairwise comparisons
#' @param data - a data.frame
#' @param vars - character. Selection of variables to analyze from the data.frame
#' @param id.var - character. Identifier
#' @param group.var - character. Grouping variable
#' @param paired - logical. Type of analysis paired or independent
#' @param my_comparisons - list. List of 2-dimensional vectors with the comparisons to make. (default:NULL)
#' @param method - character. Method of analysis to select between 'wilcox.test' and 't.test'
#' @param pval - character. Allowed values include "p.signif" (shows the significance levels), "p.format" (shows the formatted p value).
#' @param save - logical. Save the plot in pdf
#' @param path.output - character. Path to save the results.
#' @param filename - character. Name
compare_groups_table = function(data,
vars,
id.var,
group.var,
paired=FALSE,
method = 'wilcox.test',
my_comparisons = NULL,
save=TRUE,
path.output = './',
filename = 'compare_groups_table',
sep=';',
date = FALSE){
pToSign = function(x){
ELSE=TRUE
case_when(
(x>0.05) ~ 'ns',
(x<0.0001) ~ '****',
(x<0.001) ~ '***',
(x<0.01)~ '**',
ELSE ~ '*'
)
}
# Results table for Independent group comparison:
if(!paired){
tab = lapply(1:length(vars),function(i){
mydata = data %>% data.frame
# test statistics table_
form = reformulate(termlabels = paste0(group.var), response = vars[i])
tab = compare_means(form,data=mydata %>% arrange_(group.var),
method=method,paired=paired)
if(!is.null(my_comparisons)) tab = tab %>% dplyr::filter(group1%in%unlist(my_comparisons) & group2%in%unlist(my_comparisons))
# Median/mean value of each group_
form1 = reformulate(termlabels = paste0(group.var), response = vars[i])
if(method=='wilcox.test'){
med1 = aggregate(form1,data=mydata %>% arrange_(group.var), median) %>%
mutate_if(is.factor,as.character)
colnames(med1) =c('group1','median1')
med2 = aggregate(form1,data=mydata %>% arrange_(group.var), median) %>%
mutate_if(is.factor,as.character)
colnames(med2) =c('group2','median2')
# Create output table_
tab = left_join(tab,med1) %>% left_join(.,med2)
tab = tab %>% mutate(Param = vars[i],
Comp=paste0(group1,'-',group2),
p.adj.signif = pToSign(p.adj),
test = method,
non.parametric = ifelse(test=='Wilcoxon',TRUE,FALSE),
paired = paired) %>%
select(Param,Comp,median1,median2,p.format,
p.signif,Holm.p.adj=p.adj,p.adj.signif,test,non.parametric,paired) %>%
data.frame
return(tab)
} else{
med1 = aggregate(form1,data=mydata %>% arrange_(group.var), mean) %>% mutate_if(is.factor,as.character)
colnames(med1) =c('group1','mean1')
med2 = aggregate(form1,data=mydata %>% arrange_(group.var), mean) %>% mutate_if(is.factor,as.character)
colnames(med2) =c('group2','mean2')
# Create output table_
tab = left_join(tab,med1) %>% left_join(.,med2)
tab = tab %>% mutate(Param = vars[i],
Comp=paste0(group1,'-',group2),
p.adj.signif = pToSign(p.adj),
test = method,
non.parametric = ifelse(test=='Wilcoxon',TRUE,FALSE),
paired = paired) %>%
select(Param,Comp,mean1,mean2,p.format,
p.signif,Holm.p.adj=p.adj,p.adj.signif,test,non.parametric,paired) %>%
data.frame
return(tab)
}
})
tab = tab %>% bind_rows()
if(save){
p = 'Independent'
uniexport(tab,type = 'excel',
path = path.output,
filename = paste0(filename,'_',method,'_',p),
date=date)
} else{
return(tab)
}
}
# Results table for Paired group comparison:
if(paired){
# Make sure we use samples
mydata = data %>% group_by_(id.var) %>%
mutate(n=n()) %>% dplyr::filter(n==2) %>% select(-n) %>%
ungroup %>% data.frame # only individuals with data at both condition
tab = lapply(1:length(vars),function(i){
# test statistics table_
form = reformulate(termlabels = paste0(group.var), response = vars[i])
tab = compare_means(form,data=mydata %>% arrange_(group.var),
method=method,paired=TRUE)
# Median/mean value of each group_
form1 = reformulate(termlabels = paste0(group.var), response = vars[i])
if(method=='wilcox.test'){
med1 = aggregate(form1,data=mydata %>% arrange_(group.var), median) %>%
mutate_if(is.factor,as.character)
colnames(med1) =c('group1','median1')
med2 = aggregate(form1,data=mydata %>% arrange_(group.var), median) %>%
mutate_if(is.factor,as.character)
colnames(med2) =c('group2','median2')
# Create output table_
tab = left_join(tab,med1) %>% left_join(.,med2)
tab = tab %>% mutate(Param = vars[i],
Comp=paste0(group1,'-',group2),
p.adj.signif = pToSign(p.adj),
test = method,
non.parametric = ifelse(test=='Wilcoxon',TRUE,FALSE),
paired=paired) %>%
select(Param,Comp,median1,median2,p.format,
p.signif,Holm.p.adj=p.adj,p.adj.signif,test,non.parametric,paired) %>%
data.frame
} else{
med1 = aggregate(form1,data=mydata %>% arrange_(group.var), mean) %>%
mutate_if(is.factor,as.character)
colnames(med1) =c('group1','mean1')
med2 = aggregate(form1,data=mydata %>% arrange_(group.var), mean) %>%
mutate_if(is.factor,as.character)
colnames(med2) =c('group2','mean2')
# Create output table_
tab = left_join(tab,med1) %>% left_join(.,med2)
tab = tab %>% mutate(Param = vars[i],
Comp=paste0(group1,'-',group2),
p.adj.signif = pToSign(p.adj),
test = method,
non.parametric = ifelse(test=='Wilcoxon',TRUE,FALSE),
paired=paired) %>%
select(Param,Comp,mean1,mean2,p.format,
p.signif,Holm.p.adj=p.adj,p.adj.signif,test,non.parametric,paired) %>%
data.frame
}
return(tab)
})
tab = tab %>% bind_rows()
if(save){
p = 'Paired'
uniexport(tab,type = 'excel',
path = path.output,
filename = paste0(filename,'_',method,'_',p),
date=date)
} else{
return(tab)
}
}
}
|
#' @title Contraceptive Method Choice
#'
#' @description
#' This dataset is a subset of the 1987 National Indonesia
#' Contraceptive Prevalence Survey.
#'
#' @details
#' The samples are married women
#' who were either not pregnant or do not know if they were at the
#' time of interview. The problem is to predict the current
#' contraceptive method choice (no use, long-term methods,
#' or short-term methods) of a woman based on her demographic and
#' socio-economic characteristics.
#'
#' @format A data frame with 1473 rows and 10 variables:
#' \describe{
#' \item{\code{wife_age}}{integer.}
#' \item{\code{wife_education}}{integer. \code{1=low, 2, 3, 4=high}.}
#' \item{\code{number_of_children_ever_born}}{integer. }
#' \item{\code{wife_religion}}{factor. \code{Non-Islam, Islam}.}
#' \item{\code{standard_of_living}}{integer. \code{1=low, 2, 3, 4=high}.}
#' \item{\code{media_exposure}}{factor. \code{Good, Not Good}.}
#' \item{\code{contraceptive}}{factor. \code{No-use, Long-term, Short-term}.}
#' \item{\code{wife_now_working}}{factor. \code{Yes, No}.}
#' \item{\code{husband_education}}{integer. \code{1=low, 2, 3, 4=high.}}
#' \item{\code{husband_occupation}}{integer. \code{1,2,3,4}.}
#' }
#' @examples
#' summary(contraception)
#' @source
#' Dua, D. and Graff, C. (2019).
#' UCI Machine Learning Repository [http://archive.ics.uci.edu/ml].
#' Irvine, CA: University of California, School of Information and Computer Science
"contraception" | /R/contraception.R | permissive | Rkabacoff/qacData | R | false | false | 1,486 | r | #' @title Contraceptive Method Choice
#'
#' @description
#' This dataset is a subset of the 1987 National Indonesia
#' Contraceptive Prevalence Survey.
#'
#' @details
#' The samples are married women
#' who were either not pregnant or do not know if they were at the
#' time of interview. The problem is to predict the current
#' contraceptive method choice (no use, long-term methods,
#' or short-term methods) of a woman based on her demographic and
#' socio-economic characteristics.
#'
#' @format A data frame with 1473 rows and 10 variables:
#' \describe{
#' \item{\code{wife_age}}{integer.}
#' \item{\code{wife_education}}{integer. \code{1=low, 2, 3, 4=high}.}
#' \item{\code{number_of_children_ever_born}}{integer. }
#' \item{\code{wife_religion}}{factor. \code{Non-Islam, Islam}.}
#' \item{\code{standard_of_living}}{integer. \code{1=low, 2, 3, 4=high}.}
#' \item{\code{media_exposure}}{factor. \code{Good, Not Good}.}
#' \item{\code{contraceptive}}{factor. \code{No-use, Long-term, Short-term}.}
#' \item{\code{wife_now_working}}{factor. \code{Yes, No}.}
#' \item{\code{husband_education}}{integer. \code{1=low, 2, 3, 4=high.}}
#' \item{\code{husband_occupation}}{integer. \code{1,2,3,4}.}
#' }
#' @examples
#' summary(contraception)
#' @source
#' Dua, D. and Graff, C. (2019).
#' UCI Machine Learning Repository [http://archive.ics.uci.edu/ml].
#' Irvine, CA: University of California, School of Information and Computer Science
"contraception" |
#load libs
library( knitr )
library(ANTsR)
library(visreg)
library(robustbase)
library(groupdata2)
library(ggplot2)
library(caret)
require(broom)
#Manganese Enhanced MRI predicts cognitive performnace Alex Badea and Natalie Delpratt
#reuses results of sparsedecom2 for best performing fold Alex Badea 7 dept 2018
#uses RMSE as prediction error, rather than goodness of fit error 30 August 2018
#legacy from Natalie to rememebr paths
#setwd( '/Users/omega/alex/adforesight/' )
#output.path <- '/Users/omega/alex/adforesight/mydata/outdata/sd2_projall_noscale/' #ND
mypath<-'/Volumes/CivmUsers/omega/alex/GitHub/adforesight/'
mypath <- '/Users/alex/GitHub/adforesight/' #flavors of serifos
mypath <- '/Users/alex/Documents/GitHub/adforesight/' #flavors of ithaka
setwd(mypath)
source(paste(mypath, '/R/myr2score.R',sep=''))
mysp <- 0.05 #0.05 # 0.01 # 0.05 #0.2 #0.05 # was 0.005 sparseness
#if mynvecs is set to one eig1 neetds to be transposed
mynvecs <- 2 # 5 vecs is better # 10 shows ventral thalamic nuclei 10 # 50 #put back to 50 alex #tested with 10
myell1 <- 1 # make this smaller 0.5, Brian says it is not what i think just switch between l0 and l1
myits<-15 #15 #5
mysmooth<-0.1 #0.1 # 0.1 #0.01 #0 # was 0.01
myclus<-250 #was 250
# Load in Behavior and Imaging Data
behavior <- read.csv('./mydata/All_Behavior.csv')
labled.set <-read.csv('./mydata/legendsCHASS2symmetric.csv')
labeled.brain.img <- antsImageRead('./mydata/MDT_labels_chass_symmetric.nii.gz')
mask <- antsImageRead('./mydata/MDT_mask_e3.nii')
mask <- thresholdImage( mask, 0.1, Inf )
#read all 3 contrast files
mang_files <- list.files(path = "./mydata/imdata/", pattern = "T2_to_MDT",full.names = T,recursive = T)
jac_files <- list.files(path = "./mydata/imdata/", pattern = "jac_to_MDT",full.names = T,recursive = T)
chi_files <- list.files(path = "./mydata/imdata/", pattern = "X_to_MDT",full.names = T,recursive = T)
########################################
#build a place to save results
extension<-paste('sd2SINGLEJAC', 'sp', toString(mysp), 'vecs', toString(mynvecs), 's', toString(mysmooth),'clus', toString(myclus), sep='') # 'JACsp0p005s0'
output.path <- paste(mypath,'/mydata/outdata_sd2/',extension, '/', sep='') #sd2_projall_noscale/'
if (dir.exists(output.path)){ 1} else {dir.create(output.path, recursive=TRUE)}
#pick yourcontrast
mang_mat <- imagesToMatrix(jac_files,mask)
#######################################
#let things flow from here
mygroup <- behavior$genotype[1:24]
myindex <- c(1:24)
mydfb <- data.frame("mysubject_index" = factor(as.integer(myindex)),"mygenotype"=mygroup)
kable(mydfb, align = 'c')
set.seed(1)
k<-4
performances <- c()
myBICs <- c()
myR2score<-c()
myps<-c()
gfit<-c()
###build k models and retain the best performing one in terms of RMSE2
#considet using LOOCV to replace folds, but results may be unstable
#k<-length(rows.train)-1
k<-4
set.seed(1)
res_train<-createFolds(behavior$genotype,k, list = TRUE, returnTrain = TRUE)
set.seed(1)
res_test<-createFolds(behavior$genotype,k)
for (myfold in 1:k){
# for (myfold in 3){
gc(verbose = TRUE, reset = FALSE)
print('myfold:',myfold)
print(myfold)
rows.train<-as.integer(unlist(res_train[myfold]))
rows.test<-as.integer(unlist(res_test[myfold]))
mang.train <- mang_mat[rows.train, ]
mang.test <- mang_mat[rows.test, ]
behav.train <- behavior[rows.train, ]
behav.test <- behavior[rows.test, ]
dist4.train <- behav.train[,'d4']
dist4.test <- behav.test[,'d4']
start_time <- Sys.time()
#negative sparseness is what? allows for negative weights!
myeig2_mang<-sparseDecom2(inmatrix = list(mang.train,as.matrix(behav.train$d4)),its = myits, cthresh=c(myclus,0), smooth = mysmooth, mycoption = 0, sparseness = c(mysp,1), nvecs = mynvecs, verbose=1, statdir=paste(output.path))
#myeig2_mang<-sparseDecom(inmatrix = mang.train,its = myits, cthresh=c(myclus), smooth = mysmooth, mycoption = 0, sparseness = c(mysp), nvecs = mynvecs, verbose=1, statdir=paste(output.path2))
end_time <- Sys.time()
t1time<-end_time - start_time
print(t1time)
imgpredtrain_mang<-mang.train %*% (myeig2_mang$eig1)
imgpredtest_mang<-mang.test %*% (myeig2_mang$eig1)
####start do single alex
ncolcombo<-ncol( imgpredtrain_mang)
projs.train <- data.frame(dist4.train, imgpredtrain_mang) # column combind the behavior wth the projections
colnames(projs.train) <- c('Dist_4', paste0('Proj', c(1:ncolcombo)))
projs.test <- data.frame(dist4.test, imgpredtest_mang ) # column combind the behavior wth the projections
colnames(projs.test) <- c('Dist_4', paste0('Proj', c(1:ncolcombo)))
###end do single alex
mylm <- lm('Dist_4 ~ .', data=projs.train) # behavior correlation with projections
summylm<-summary(mylm)
summanovalm<-anova(mylm)
rSquared <- summary(mylm)$r.squared
pVal <- anova(mylm)$'Pr(>F)'[1]
mylmsummary<-glance(mylm)
pval1<-mylmsummary$p.value
e2i_mang<-matrixToImages((t(myeig2_mang$eig1)),mask = mask)
for (i in 1:mynvecs){
antsImageWrite(e2i_mang[[i]],paste(output.path,extension,'sd2eig_' ,as.character(i), 'fold', toString(myfold), '_Mn.nii.gz',sep=''))
# antsImageWrite(e2i_jac[[i]],paste(output.path,extension,'sd2eig_' ,as.character(i), 'fold', toString(myfold), '_jac.nii.gz',sep=''))
# antsImageWrite(e2i_chi[[i]],paste(output.path,extension,'sd2eig_' ,as.character(i), 'fold', toString(myfold), '_chi.nii.gz',sep=''))
}
distpred4 <- predict.lm(mylm, newdata=projs.test) # based on the linear model predict the distances for the same day
glance(cor.test(projs.test$Dist_4,(distpred4)))
glance(cor.test(distpred4,dist4.test))
#remove next lines for LOOCV
mymodel<-lm(distpred4~dist4.test)
modsum <-summary(mymodel)
r2 <- modsum$r.squared #modsum$adj.r.squared
# my.p <- modsum$coefficients[2,4]
RMSE2<-sqrt(mean((distpred4 - dist4.test)^2))
performances[myfold]<-RMSE2
myR2score[myfold]<-myr2score(distpred4,dist4.test)
myps[myfold]<-pval1<-mylmsummary$p.value #my.p
myBICs[myfold] <- BIC(mylm)
###
mytheme <- theme(panel.grid.minor = element_blank(), panel.grid.major = element_blank(),
panel.background = element_rect(fill = "white"))
myplot<- visreg(mymodel, gg=TRUE)
myplot + theme(panel.grid.minor = element_blank(), panel.grid.major = element_blank(),
panel.background = element_rect(fill = "transparent", colour = NA),
#xaxs="i", yaxs="i",
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid')) +
ggtitle(paste("RMSE=",formatC(RMSE2,digits=2, format="f"), "p=",formatC(myps[myfold],digits=4, format="f"), " BIC=", formatC(BIC(mymodel),digits=2, format="f")))
ggsave(paste(output.path,extension,'Mnfold',toString(myfold),'.pdf',sep=''), plot = last_plot(), device = 'pdf',
scale = 1, width = 4, height = 4, units = c("in"),dpi = 300)
save(mylm, file=paste(output.path , "model2", toString(myfold), ".Rdata", sep=''))
save(mymodel, file=paste(output.path , "behavmodelsd2", toString(myfold), ".Rdata", sep=''))
myperf<-data.frame(rbind(distpred4,dist4.test),row.names=c("d_predicted","d_valid"))
write.csv(myperf, file = paste(output.path ,extension,'distances4_pv_fold' , toString(myfold), '.csv',sep=''))
myperf<-data.frame(c(RMSE2,myR2score[myfold],myps[myfold],myBICs[myfold], r2),row.names=c("RMSE2","R2score","p","BIC", "R2"))
write.csv(myperf, file = paste(output.path ,extension,'distances4_stats_fold' , toString(myfold), '.csv',sep=''))
gc(verbose = TRUE, reset = FALSE)
}
###################################
##### for validation now ####
###################################
myminfold<-which(performances == min(performances), arr.ind = TRUE)
myfold<-myminfold
load(file=paste(output.path , "model2", toString(myfold), ".Rdata", sep='')) # loads mylm
ncolcombo<-mynvecs
rows.valid <- c(1:24)
mang.valid <- mang_mat[rows.valid, ]
#read eigenregions for best myfold
#paste(output.path,extension,'sd2eig' ,as.character(i), 'fold', toString(myfold), '.nii.gz',sep='')
eig_files_Mn <- list.files(path = paste(output.path,sep=''), pattern=paste('*', 'fold', toString(myfold), '_Mn.nii.gz', sep=''),full.names = T,recursive = T)
eig_mat_Mn <- imagesToMatrix(eig_files_Mn,mask)
imgmat_mang_valid <- mang.valid %*% t(eig_mat_Mn) # [24,numvox] [nvecsx3,numvox]
dist4.valid <- behavior[rows.valid, 'd4']
projs.valid <- data.frame(cbind(dist4.valid,imgmat_mang_valid))
colnames(projs.valid) <- c('Dist_4', paste0('Proj', c(1:ncolcombo)))
distpred <- predict.lm(mylm, newdata=projs.valid)
mymodel<-lm(distpred~dist4.valid)
RSS <- c(crossprod(mymodel$residuals))
MSE <- RSS / length(mymodel$residuals)
RMSE <- sqrt(MSE)
RMSE2<-sqrt(mean((distpred - dist4.valid)^2))
mysummary <-summary(mymodel)
r2pred <- mysummary$adj.r.squared
ppred <- mysummary$coefficients[2,4]
max(behavior$d4[1:24])
RMSE_valid<-RMSE2
BIC_valid <- BIC(mymodel)
R2score_valid<-myr2score(distpred,dist4.valid)
res_cor<-cor.test(dist4.valid,distpred)
myplot<- visreg(mymodel, gg=TRUE, scale='linear', plot=TRUE, xlim=c(0,max(dist4.valid)),ylim=c(0,max(dist4.valid)))
myplot2<-plot(myplot,xlim=c(0,max(dist4.valid)),ylim=c(0,max(dist4.valid)))
ggsave(paste(output.path,extension,'MnValidationSet',toString(myfold),'sd2plainjane.pdf',sep=''), plot = last_plot(), device = 'pdf',
scale = 1, width = 4, height = 4, units = c("in"),dpi = 300)
myplot + theme(panel.grid.minor = element_blank(), panel.grid.major = element_blank(),
panel.background = element_rect(fill = "transparent", colour = NA),
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'))+
#xlim(0,1200)+ylim(0,1200)+coord_cartesian(xlim = c(1200, 1200),ylim = c(1200, 1200)) + coord_equal()+
ggtitle(paste("RMSE=",formatC(RMSE_valid,digits=2, format="f"),
# "R2score=",formatC(R2score_valid,digits=2, format="f"),
# " R2=", formatC(r2pred,digits=2, format="f"),
" p= ", formatC(ppred,digits=4, format="f"),
" BIC=", formatC(BIC_valid,digits=2, format="f")))
ggsave(paste(output.path,extension,'MnValidationSet',toString(myfold),'sd2.pdf',sep=''), plot = last_plot(), device = 'pdf',
scale = 1, width = 4, height = 4, units = c("in"),dpi = 300)
numcols<-dim(projs.valid)[2]
rd4 <- t(t(dist4.valid)[rep(1,c(3*mynvecs)),])
pcor<-c(numcols)
corval<-c(numcols)
for (i in 1:numcols) {
mypcor<-cor.test(t(dist4.valid),t(projs.valid[,i]))
pcor[i]<-mypcor$p.value
corval[i]<-mypcor$estimate
}
rt<-glance(cor.test(dist4.valid,distpred))
corval[1]<-rt$estimate
pcor[1]<-rt$p.value
mycorsdf_eig2d4<-data.frame(rbind(pcor,corval),row.names=c("pcor","cor"))
colnames(mycorsdf_eig2d4)<-c('total', paste0('Proj', c(1:ncolcombo)))
write.csv(mycorsdf_eig2d4, file = paste(output.path ,extension,'fold', toString(myfold), 'd4corsvalidsd2.csv',sep=''))
myperf<-data.frame(rbind(distpred,dist4.valid),row.names=c("d_predicted","d_valid"))
write.csv(myperf, file = paste(output.path ,extension,'fold', toString(myfold), 'distances4_validsd2.csv',sep=''))
myeig2_mang_valid<-sparseDecom2(inmatrix = list(mang.valid,as.matrix(dist4.valid)),its = myits, cthresh=c(myclus,0), smooth = mysmooth, mycoption = 0, sparseness = c(mysp,1), nvecs = mynvecs, verbose=1, statdir=paste(output.path))
#myeig2_mang<-sparseDecom2(inmatrix = list(mang.train,as.matrix(behav.train$d4)),its = myits, cthresh=c(myclus,0), smooth = mysmooth, mycoption = 0, sparseness = c(mysp,1), nvecs = mynvecs, verbose=1, statdir=paste(output.path))
e2i_mang_valid<-matrixToImages((t(myeig2_mang_valid$eig1)),mask = mask)
#imgpredtrain_mang<-mang.train %*% (myeig2_mang$eig1)
for (i in 1:mynvecs){
antsImageWrite(e2i_mang_valid[[i]],paste(output.path,extension,'full_eig' ,as.character(i), '_Mn.nii.gz',sep=''))
}
#redo fold min or save models
imgmat_mang_valid <- mang.valid %*% t(e2i_mang_valid) # [24,numvox] [nvecsx3,numvox]
projs.valid <- data.frame(cbind(dist4.valid,imgmat_mang_valid))
colnames(projs.valid) <- c('Dist_4', paste0('Proj', c(1:ncolcombo)))
distpred <- predict.lm(mylm, newdata=projs.valid)
mymodel<-lm(distpred~dist4.valid)
RSS <- c(crossprod(mymodel$residuals))
MSE <- RSS / length(mymodel$residuals)
RMSE <- sqrt(MSE)
RMSE2<-sqrt(mean((distpred - dist4.valid)^2))
mysummary <-summary(mymodel)
r2pred <- mysummary$adj.r.squared
ppred <- mysummary$coefficients[2,4]
max(behavior$d4[1:24])
RMSE_valid<-RMSE2
BIC_valid <- BIC(mymodel)
R2score_valid<-myr2score(distpred,dist4.valid)
res_cor<-cor.test(dist4.valid,distpred)
myplot<- visreg(mymodel, gg=TRUE, scale='linear', plot=TRUE, xlim=c(0,max(dist4.valid)),ylim=c(0,max(dist4.valid)))
myplot2<-plot(myplot,xlim=c(0,max(dist4.valid)),ylim=c(0,max(dist4.valid)))
ggsave(paste(output.path,extension,'newMnValidationSetFULL',toString(myfold),'sd2plainjane.pdf',sep=''), plot = last_plot(), device = 'pdf',
scale = 1, width = 4, height = 4, units = c("in"),dpi = 300)
myplot + theme(panel.grid.minor = element_blank(), panel.grid.major = element_blank(),
panel.background = element_rect(fill = "transparent", colour = NA),
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'))+
#xlim(0,1200)+ylim(0,1200)+coord_cartesian(xlim = c(1200, 1200),ylim = c(1200, 1200)) + coord_equal()+
ggtitle(paste("RMSE=",formatC(RMSE_valid,digits=2, format="f"),
# "R2score=",formatC(R2score_valid,digits=2, format="f"),
# " R2=", formatC(r2pred,digits=2, format="f"),
" p= ", formatC(ppred,digits=4, format="f"),
" BIC=", formatC(BIC_valid,digits=2, format="f")))
ggsave(paste(output.path,extension,'newMnValidationSetFULL',toString(myfold),'sd2.pdf',sep=''), plot = last_plot(), device = 'pdf',
scale = 1, width = 4, height = 4, units = c("in"),dpi = 300)
numcols<-dim(projs.valid)[2]
rd4 <- t(t(dist4.valid)[rep(1,c(3*mynvecs)),])
pcor<-c(numcols)
corval<-c(numcols)
for (i in 1:numcols) {
mypcor<-cor.test(t(dist4.valid),t(projs.valid[,i]))
pcor[i]<-mypcor$p.value
corval[i]<-mypcor$estimate
}
rt<-glance(cor.test(dist4.valid,distpred))
corval[1]<-rt$estimate
pcor[1]<-rt$p.value
mycorsdf_eig2d4<-data.frame(rbind(pcor,corval),row.names=c("pcor","cor"))
colnames(mycorsdf_eig2d4)<-c('total', paste0('Proj', c(1:ncolcombo)))
write.csv(mycorsdf_eig2d4, file = paste(output.path ,extension,'FULL', 'd4corsvalidsd2.csv',sep=''))
myperf<-data.frame(rbind(distpred,dist4.valid),row.names=c("d_predicted","d_valid"))
write.csv(myperf, file = paste(output.path ,extension,'FULL', 'distances4_validsd2.csv',sep=''))
| /R/mysd2_250918single.R | no_license | portokalh/adforesight | R | false | false | 14,945 | r | #load libs
library( knitr )
library(ANTsR)
library(visreg)
library(robustbase)
library(groupdata2)
library(ggplot2)
library(caret)
require(broom)
#Manganese Enhanced MRI predicts cognitive performnace Alex Badea and Natalie Delpratt
#reuses results of sparsedecom2 for best performing fold Alex Badea 7 dept 2018
#uses RMSE as prediction error, rather than goodness of fit error 30 August 2018
#legacy from Natalie to rememebr paths
#setwd( '/Users/omega/alex/adforesight/' )
#output.path <- '/Users/omega/alex/adforesight/mydata/outdata/sd2_projall_noscale/' #ND
mypath<-'/Volumes/CivmUsers/omega/alex/GitHub/adforesight/'
mypath <- '/Users/alex/GitHub/adforesight/' #flavors of serifos
mypath <- '/Users/alex/Documents/GitHub/adforesight/' #flavors of ithaka
setwd(mypath)
source(paste(mypath, '/R/myr2score.R',sep=''))
mysp <- 0.05 #0.05 # 0.01 # 0.05 #0.2 #0.05 # was 0.005 sparseness
#if mynvecs is set to one eig1 neetds to be transposed
mynvecs <- 2 # 5 vecs is better # 10 shows ventral thalamic nuclei 10 # 50 #put back to 50 alex #tested with 10
myell1 <- 1 # make this smaller 0.5, Brian says it is not what i think just switch between l0 and l1
myits<-15 #15 #5
mysmooth<-0.1 #0.1 # 0.1 #0.01 #0 # was 0.01
myclus<-250 #was 250
# Load in Behavior and Imaging Data
behavior <- read.csv('./mydata/All_Behavior.csv')
labled.set <-read.csv('./mydata/legendsCHASS2symmetric.csv')
labeled.brain.img <- antsImageRead('./mydata/MDT_labels_chass_symmetric.nii.gz')
mask <- antsImageRead('./mydata/MDT_mask_e3.nii')
mask <- thresholdImage( mask, 0.1, Inf )
#read all 3 contrast files
mang_files <- list.files(path = "./mydata/imdata/", pattern = "T2_to_MDT",full.names = T,recursive = T)
jac_files <- list.files(path = "./mydata/imdata/", pattern = "jac_to_MDT",full.names = T,recursive = T)
chi_files <- list.files(path = "./mydata/imdata/", pattern = "X_to_MDT",full.names = T,recursive = T)
########################################
#build a place to save results
extension<-paste('sd2SINGLEJAC', 'sp', toString(mysp), 'vecs', toString(mynvecs), 's', toString(mysmooth),'clus', toString(myclus), sep='') # 'JACsp0p005s0'
output.path <- paste(mypath,'/mydata/outdata_sd2/',extension, '/', sep='') #sd2_projall_noscale/'
if (dir.exists(output.path)){ 1} else {dir.create(output.path, recursive=TRUE)}
#pick yourcontrast
mang_mat <- imagesToMatrix(jac_files,mask)
#######################################
#let things flow from here
mygroup <- behavior$genotype[1:24]
myindex <- c(1:24)
mydfb <- data.frame("mysubject_index" = factor(as.integer(myindex)),"mygenotype"=mygroup)
kable(mydfb, align = 'c')
set.seed(1)
k<-4
performances <- c()
myBICs <- c()
myR2score<-c()
myps<-c()
gfit<-c()
###build k models and retain the best performing one in terms of RMSE2
#considet using LOOCV to replace folds, but results may be unstable
#k<-length(rows.train)-1
k<-4
set.seed(1)
res_train<-createFolds(behavior$genotype,k, list = TRUE, returnTrain = TRUE)
set.seed(1)
res_test<-createFolds(behavior$genotype,k)
for (myfold in 1:k){
# for (myfold in 3){
gc(verbose = TRUE, reset = FALSE)
print('myfold:',myfold)
print(myfold)
rows.train<-as.integer(unlist(res_train[myfold]))
rows.test<-as.integer(unlist(res_test[myfold]))
mang.train <- mang_mat[rows.train, ]
mang.test <- mang_mat[rows.test, ]
behav.train <- behavior[rows.train, ]
behav.test <- behavior[rows.test, ]
dist4.train <- behav.train[,'d4']
dist4.test <- behav.test[,'d4']
start_time <- Sys.time()
#negative sparseness is what? allows for negative weights!
myeig2_mang<-sparseDecom2(inmatrix = list(mang.train,as.matrix(behav.train$d4)),its = myits, cthresh=c(myclus,0), smooth = mysmooth, mycoption = 0, sparseness = c(mysp,1), nvecs = mynvecs, verbose=1, statdir=paste(output.path))
#myeig2_mang<-sparseDecom(inmatrix = mang.train,its = myits, cthresh=c(myclus), smooth = mysmooth, mycoption = 0, sparseness = c(mysp), nvecs = mynvecs, verbose=1, statdir=paste(output.path2))
end_time <- Sys.time()
t1time<-end_time - start_time
print(t1time)
imgpredtrain_mang<-mang.train %*% (myeig2_mang$eig1)
imgpredtest_mang<-mang.test %*% (myeig2_mang$eig1)
####start do single alex
ncolcombo<-ncol( imgpredtrain_mang)
projs.train <- data.frame(dist4.train, imgpredtrain_mang) # column combind the behavior wth the projections
colnames(projs.train) <- c('Dist_4', paste0('Proj', c(1:ncolcombo)))
projs.test <- data.frame(dist4.test, imgpredtest_mang ) # column combind the behavior wth the projections
colnames(projs.test) <- c('Dist_4', paste0('Proj', c(1:ncolcombo)))
###end do single alex
mylm <- lm('Dist_4 ~ .', data=projs.train) # behavior correlation with projections
summylm<-summary(mylm)
summanovalm<-anova(mylm)
rSquared <- summary(mylm)$r.squared
pVal <- anova(mylm)$'Pr(>F)'[1]
mylmsummary<-glance(mylm)
pval1<-mylmsummary$p.value
e2i_mang<-matrixToImages((t(myeig2_mang$eig1)),mask = mask)
for (i in 1:mynvecs){
antsImageWrite(e2i_mang[[i]],paste(output.path,extension,'sd2eig_' ,as.character(i), 'fold', toString(myfold), '_Mn.nii.gz',sep=''))
# antsImageWrite(e2i_jac[[i]],paste(output.path,extension,'sd2eig_' ,as.character(i), 'fold', toString(myfold), '_jac.nii.gz',sep=''))
# antsImageWrite(e2i_chi[[i]],paste(output.path,extension,'sd2eig_' ,as.character(i), 'fold', toString(myfold), '_chi.nii.gz',sep=''))
}
distpred4 <- predict.lm(mylm, newdata=projs.test) # based on the linear model predict the distances for the same day
glance(cor.test(projs.test$Dist_4,(distpred4)))
glance(cor.test(distpred4,dist4.test))
#remove next lines for LOOCV
mymodel<-lm(distpred4~dist4.test)
modsum <-summary(mymodel)
r2 <- modsum$r.squared #modsum$adj.r.squared
# my.p <- modsum$coefficients[2,4]
RMSE2<-sqrt(mean((distpred4 - dist4.test)^2))
performances[myfold]<-RMSE2
myR2score[myfold]<-myr2score(distpred4,dist4.test)
myps[myfold]<-pval1<-mylmsummary$p.value #my.p
myBICs[myfold] <- BIC(mylm)
###
mytheme <- theme(panel.grid.minor = element_blank(), panel.grid.major = element_blank(),
panel.background = element_rect(fill = "white"))
myplot<- visreg(mymodel, gg=TRUE)
myplot + theme(panel.grid.minor = element_blank(), panel.grid.major = element_blank(),
panel.background = element_rect(fill = "transparent", colour = NA),
#xaxs="i", yaxs="i",
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid')) +
ggtitle(paste("RMSE=",formatC(RMSE2,digits=2, format="f"), "p=",formatC(myps[myfold],digits=4, format="f"), " BIC=", formatC(BIC(mymodel),digits=2, format="f")))
ggsave(paste(output.path,extension,'Mnfold',toString(myfold),'.pdf',sep=''), plot = last_plot(), device = 'pdf',
scale = 1, width = 4, height = 4, units = c("in"),dpi = 300)
save(mylm, file=paste(output.path , "model2", toString(myfold), ".Rdata", sep=''))
save(mymodel, file=paste(output.path , "behavmodelsd2", toString(myfold), ".Rdata", sep=''))
myperf<-data.frame(rbind(distpred4,dist4.test),row.names=c("d_predicted","d_valid"))
write.csv(myperf, file = paste(output.path ,extension,'distances4_pv_fold' , toString(myfold), '.csv',sep=''))
myperf<-data.frame(c(RMSE2,myR2score[myfold],myps[myfold],myBICs[myfold], r2),row.names=c("RMSE2","R2score","p","BIC", "R2"))
write.csv(myperf, file = paste(output.path ,extension,'distances4_stats_fold' , toString(myfold), '.csv',sep=''))
gc(verbose = TRUE, reset = FALSE)
}
###################################
##### for validation now ####
###################################
myminfold<-which(performances == min(performances), arr.ind = TRUE)
myfold<-myminfold
load(file=paste(output.path , "model2", toString(myfold), ".Rdata", sep='')) # loads mylm
ncolcombo<-mynvecs
rows.valid <- c(1:24)
mang.valid <- mang_mat[rows.valid, ]
#read eigenregions for best myfold
#paste(output.path,extension,'sd2eig' ,as.character(i), 'fold', toString(myfold), '.nii.gz',sep='')
eig_files_Mn <- list.files(path = paste(output.path,sep=''), pattern=paste('*', 'fold', toString(myfold), '_Mn.nii.gz', sep=''),full.names = T,recursive = T)
eig_mat_Mn <- imagesToMatrix(eig_files_Mn,mask)
imgmat_mang_valid <- mang.valid %*% t(eig_mat_Mn) # [24,numvox] [nvecsx3,numvox]
dist4.valid <- behavior[rows.valid, 'd4']
projs.valid <- data.frame(cbind(dist4.valid,imgmat_mang_valid))
colnames(projs.valid) <- c('Dist_4', paste0('Proj', c(1:ncolcombo)))
distpred <- predict.lm(mylm, newdata=projs.valid)
mymodel<-lm(distpred~dist4.valid)
RSS <- c(crossprod(mymodel$residuals))
MSE <- RSS / length(mymodel$residuals)
RMSE <- sqrt(MSE)
RMSE2<-sqrt(mean((distpred - dist4.valid)^2))
mysummary <-summary(mymodel)
r2pred <- mysummary$adj.r.squared
ppred <- mysummary$coefficients[2,4]
max(behavior$d4[1:24])
RMSE_valid<-RMSE2
BIC_valid <- BIC(mymodel)
R2score_valid<-myr2score(distpred,dist4.valid)
res_cor<-cor.test(dist4.valid,distpred)
myplot<- visreg(mymodel, gg=TRUE, scale='linear', plot=TRUE, xlim=c(0,max(dist4.valid)),ylim=c(0,max(dist4.valid)))
myplot2<-plot(myplot,xlim=c(0,max(dist4.valid)),ylim=c(0,max(dist4.valid)))
ggsave(paste(output.path,extension,'MnValidationSet',toString(myfold),'sd2plainjane.pdf',sep=''), plot = last_plot(), device = 'pdf',
scale = 1, width = 4, height = 4, units = c("in"),dpi = 300)
myplot + theme(panel.grid.minor = element_blank(), panel.grid.major = element_blank(),
panel.background = element_rect(fill = "transparent", colour = NA),
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'))+
#xlim(0,1200)+ylim(0,1200)+coord_cartesian(xlim = c(1200, 1200),ylim = c(1200, 1200)) + coord_equal()+
ggtitle(paste("RMSE=",formatC(RMSE_valid,digits=2, format="f"),
# "R2score=",formatC(R2score_valid,digits=2, format="f"),
# " R2=", formatC(r2pred,digits=2, format="f"),
" p= ", formatC(ppred,digits=4, format="f"),
" BIC=", formatC(BIC_valid,digits=2, format="f")))
ggsave(paste(output.path,extension,'MnValidationSet',toString(myfold),'sd2.pdf',sep=''), plot = last_plot(), device = 'pdf',
scale = 1, width = 4, height = 4, units = c("in"),dpi = 300)
numcols<-dim(projs.valid)[2]
rd4 <- t(t(dist4.valid)[rep(1,c(3*mynvecs)),])
pcor<-c(numcols)
corval<-c(numcols)
for (i in 1:numcols) {
mypcor<-cor.test(t(dist4.valid),t(projs.valid[,i]))
pcor[i]<-mypcor$p.value
corval[i]<-mypcor$estimate
}
rt<-glance(cor.test(dist4.valid,distpred))
corval[1]<-rt$estimate
pcor[1]<-rt$p.value
mycorsdf_eig2d4<-data.frame(rbind(pcor,corval),row.names=c("pcor","cor"))
colnames(mycorsdf_eig2d4)<-c('total', paste0('Proj', c(1:ncolcombo)))
write.csv(mycorsdf_eig2d4, file = paste(output.path ,extension,'fold', toString(myfold), 'd4corsvalidsd2.csv',sep=''))
myperf<-data.frame(rbind(distpred,dist4.valid),row.names=c("d_predicted","d_valid"))
write.csv(myperf, file = paste(output.path ,extension,'fold', toString(myfold), 'distances4_validsd2.csv',sep=''))
myeig2_mang_valid<-sparseDecom2(inmatrix = list(mang.valid,as.matrix(dist4.valid)),its = myits, cthresh=c(myclus,0), smooth = mysmooth, mycoption = 0, sparseness = c(mysp,1), nvecs = mynvecs, verbose=1, statdir=paste(output.path))
#myeig2_mang<-sparseDecom2(inmatrix = list(mang.train,as.matrix(behav.train$d4)),its = myits, cthresh=c(myclus,0), smooth = mysmooth, mycoption = 0, sparseness = c(mysp,1), nvecs = mynvecs, verbose=1, statdir=paste(output.path))
e2i_mang_valid<-matrixToImages((t(myeig2_mang_valid$eig1)),mask = mask)
#imgpredtrain_mang<-mang.train %*% (myeig2_mang$eig1)
for (i in 1:mynvecs){
antsImageWrite(e2i_mang_valid[[i]],paste(output.path,extension,'full_eig' ,as.character(i), '_Mn.nii.gz',sep=''))
}
#redo fold min or save models
imgmat_mang_valid <- mang.valid %*% t(e2i_mang_valid) # [24,numvox] [nvecsx3,numvox]
projs.valid <- data.frame(cbind(dist4.valid,imgmat_mang_valid))
colnames(projs.valid) <- c('Dist_4', paste0('Proj', c(1:ncolcombo)))
distpred <- predict.lm(mylm, newdata=projs.valid)
mymodel<-lm(distpred~dist4.valid)
RSS <- c(crossprod(mymodel$residuals))
MSE <- RSS / length(mymodel$residuals)
RMSE <- sqrt(MSE)
RMSE2<-sqrt(mean((distpred - dist4.valid)^2))
mysummary <-summary(mymodel)
r2pred <- mysummary$adj.r.squared
ppred <- mysummary$coefficients[2,4]
max(behavior$d4[1:24])
RMSE_valid<-RMSE2
BIC_valid <- BIC(mymodel)
R2score_valid<-myr2score(distpred,dist4.valid)
res_cor<-cor.test(dist4.valid,distpred)
myplot<- visreg(mymodel, gg=TRUE, scale='linear', plot=TRUE, xlim=c(0,max(dist4.valid)),ylim=c(0,max(dist4.valid)))
myplot2<-plot(myplot,xlim=c(0,max(dist4.valid)),ylim=c(0,max(dist4.valid)))
ggsave(paste(output.path,extension,'newMnValidationSetFULL',toString(myfold),'sd2plainjane.pdf',sep=''), plot = last_plot(), device = 'pdf',
scale = 1, width = 4, height = 4, units = c("in"),dpi = 300)
myplot + theme(panel.grid.minor = element_blank(), panel.grid.major = element_blank(),
panel.background = element_rect(fill = "transparent", colour = NA),
axis.line.x = element_line(colour = 'black', size=0.5, linetype='solid'),
axis.line.y = element_line(colour = 'black', size=0.5, linetype='solid'))+
#xlim(0,1200)+ylim(0,1200)+coord_cartesian(xlim = c(1200, 1200),ylim = c(1200, 1200)) + coord_equal()+
ggtitle(paste("RMSE=",formatC(RMSE_valid,digits=2, format="f"),
# "R2score=",formatC(R2score_valid,digits=2, format="f"),
# " R2=", formatC(r2pred,digits=2, format="f"),
" p= ", formatC(ppred,digits=4, format="f"),
" BIC=", formatC(BIC_valid,digits=2, format="f")))
ggsave(paste(output.path,extension,'newMnValidationSetFULL',toString(myfold),'sd2.pdf',sep=''), plot = last_plot(), device = 'pdf',
scale = 1, width = 4, height = 4, units = c("in"),dpi = 300)
numcols<-dim(projs.valid)[2]
rd4 <- t(t(dist4.valid)[rep(1,c(3*mynvecs)),])
pcor<-c(numcols)
corval<-c(numcols)
for (i in 1:numcols) {
mypcor<-cor.test(t(dist4.valid),t(projs.valid[,i]))
pcor[i]<-mypcor$p.value
corval[i]<-mypcor$estimate
}
rt<-glance(cor.test(dist4.valid,distpred))
corval[1]<-rt$estimate
pcor[1]<-rt$p.value
mycorsdf_eig2d4<-data.frame(rbind(pcor,corval),row.names=c("pcor","cor"))
colnames(mycorsdf_eig2d4)<-c('total', paste0('Proj', c(1:ncolcombo)))
write.csv(mycorsdf_eig2d4, file = paste(output.path ,extension,'FULL', 'd4corsvalidsd2.csv',sep=''))
myperf<-data.frame(rbind(distpred,dist4.valid),row.names=c("d_predicted","d_valid"))
write.csv(myperf, file = paste(output.path ,extension,'FULL', 'distances4_validsd2.csv',sep=''))
|
############################
# Great Sea Reef Data Set-up
############################
# Clean environment
rm(list = ls())
# Setup
###########################
# Preparing packages
if (!require("pacman")) install.packages("pacman")
# get the most updated version of ggrepel
# devtools::install_github("slowkow/ggrepel") # add library(ggrepel) if version is not 0.9.0 [can check using sessionInfo()]
# Load packages
pacman::p_load(dplyr,fasterize,ggplot2,ggrepel,ggsn,ggspatial,metR,raster,RColorBrewer,rgdal,rgeos,scales,sf,sp,tidyr) # may have to use library(metR) to load that package
# Setting data directory
gis_dir <- setwd("C:\\Users\\free\\Dropbox (MPAMystery)\\RICOTTA_GIS\\oceans_program\\dom\\fiji_report\\gis_r")
gis_dir # check that correct directory has been set
# Verify that all associated files are in directory
list.files(gis_dir)
# Setting output map directories
gsr_map_dir <- "C:\\Users\\free\\Dropbox (MPAMystery)\\RICOTTA_GIS\\oceans_program\\dom\\fiji_report\\maps\\gsr"
province_map_dir <- "C:\\Users\\free\\Dropbox (MPAMystery)\\RICOTTA_GIS\\oceans_program\\dom\\fiji_report\\maps\\province"
qoliqoli_map_dir <- "C:\\Users\\free\\Dropbox (MPAMystery)\\RICOTTA_GIS\\oceans_program\\dom\\fiji_report\\maps\\qoliqoli"
# Read data
###########################
# Loading the required data
# Administrative boundary data
fiji <- st_read(dsn = gis_dir, layer = "fiji") # Fiji land administrative boundary
fji_eez <- st_read(dsn = gis_dir, layer = "fiji_eez") # Fijian exclusive economic zone
qoliqoli <- st_read(dsn = gis_dir, layer = "qoliqoli") # Qoliqoli (district) boundaries where survey sites occurred
provinces <- st_read(dsn = gis_dir, layer = "province") # Fiji provinces where survey sites occurred
gsr <- st_read(dsn = gis_dir, layer = "gsr") # Great Sea Reef boundary
# Ecological data
fji_coral <- st_read(dsn = gis_dir, layer = "fiji_coral") # Coral data extent in Great Sea Reef
fji_seagrass <- st_read(dsn = gis_dir, layer = "fiji_seagrass") %>% # Seagrass extent in Great Sea Reef
st_buffer(100) %>% # add a 100-meter buffer around the data
st_cast("MULTIPOLYGON") # make as multipolygon again
fji_mangrove96 <- st_read(dsn = gis_dir, layer = "fji_mang96") # Mangrove data in Great Sea Reef for 1996
fji_mangrove16 <- st_read(dsn = gis_dir, layer = "fji_mang16") # Mangrove data in Great Sea Reef for 2016
fji_mang_gain <- st_read(dsn = gis_dir, layer = "fji_mang_gain") %>% # Mangrove extent gained in Great Sea Reef between 1996 and 2016
st_buffer(100) %>% # add a 50-meter buffer around the data
st_cast("MULTIPOLYGON") # make as multipolygon again
fji_mang_loss <- st_read(dsn = gis_dir, layer = "fji_mang_loss") %>% # Mangrove extent lost in Great Sea Reef between 1996 and 2016
st_buffer(100) %>% # add a 50-meter buffer around the data
st_cast("MULTIPOLYGON") # make as multipolygon again
# Geomorphic data
fji_geo <- st_read(dsn = gis_dir, layer = "fiji_geo") # loads the geomorphic data
levels(fji_geo$Geo_Zone) # get list of unique geomorphic zones
object.size(fji_geo) # 0.16 GB
# Separate geomorphic zones
fji_irf <- filter(fji_geo,Geo_Zone == "Inner Reef Flat") # inner reef flat
fji_orf <- filter(fji_geo,Geo_Zone == "Outer Reef Flat") # outer reef flat
fji_plat <- filter(fji_geo,Geo_Zone == "Plateau") # plateau
fji_rc <- filter(fji_geo,Geo_Zone == "Reef Crest") # reef crest
fji_rs <- filter(fji_geo, Geo_Zone == "Reef Slope") # reef slope
fji_sl <- filter(fji_geo,Geo_Zone == "Shallow Lagoon") # shallow lagoon
fji_srs <- filter(fji_geo,Geo_Zone == "Sheltered Reef Slope") # sheltered reef slope
fji_trf <- filter(fji_geo,Geo_Zone == "Terrestrial Reef Flat") # terrestrial reef flat
fji_unk <- filter(fji_geo, Geo_Zone == "Unknown") # unknown
# Rasterize geomorphic data
# All data
geo_temp <- raster(extent(fji_geo), res = 25, crs = fji_geo) # create a template raster with the Fiji geomorphic extent
geo_rast <- fasterize(fji_geo, geo_temp) # rasterize the geomorphic data
geo_map <- raster::as.data.frame(geo_rast, xy=T) %>%
dplyr::filter(!is.na(layer))
object.size(geo_map) # check how large data are --> 0.18 GB
# Inner Reef Flat
irf_temp <- raster(extent(fji_irf), res = 25, crs = fji_irf) # create a template raster with the inner reef flat extent
irf_rast <- fasterize(fji_irf, irf_temp) # rasterize the inner reef flat data
irf_map <- raster::as.data.frame(irf_rast, xy=T) %>%
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "irf")) %>%
mutate(irf = "Inner Reef Flat")
plot(irf_rast)
# Outer Reef Flat
orf_temp <- raster(extent(fji_orf), res = 25, crs = fji_orf) # create a template raster with the inner reef flat extent
orf_rast <- fasterize(fji_orf, orf_temp) # rasterize the inner reef flat data
orf_map <- raster::as.data.frame(orf_rast, xy=T) %>%
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "orf")) %>%
mutate(orf = "Outer Reef Flat")
plot(orf_rast)
# Plateau
plat_temp <- raster(extent(fji_plat), res = 25, crs = fji_plat) # create a template raster with the inner reef flat extent
plat_rast <- fasterize(fji_plat, plat_temp) # rasterize the inner reef flat data
plat_map <- raster::as.data.frame(plat_rast, xy=T) %>%
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "plat")) %>%
mutate(plat = "Plateau")
plot(plat_rast)
# Reef Crest
rc_temp <- raster(extent(fji_rc), res = 25, crs = fji_rc) # create a template raster with the inner reef flat extent
rc_rast <- fasterize(fji_rc, rc_temp) # rasterize the inner reef flat data
rc_map <- raster::as.data.frame(rc_rast, xy=T) %>%
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "rc")) %>%
mutate(rc = "Reef Crest")
plot(rc_rast)
# Reef Slope
rs_temp <- raster(extent(fji_rs), res = 25, crs = fji_rs) # create a template raster with the inner reef flat extent
rs_rast <- fasterize(fji_rs, rs_temp) # rasterize the inner reef flat data
rs_map <- raster::as.data.frame(rs_rast, xy=T) %>%
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "rs")) %>%
mutate(rs = "Reef Slope")
plot(rs_rast)
# Shallow Lagoon
sl_temp <- raster(extent(fji_sl), res = 25, crs = fji_sl) # create a template raster with the inner reef flat extent
sl_rast <- fasterize(fji_sl, sl_temp) # rasterize the inner reef flat data
sl_map <- raster::as.data.frame(sl_rast, xy=T) %>%
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "sl")) %>%
mutate(sl = "Shallow Lagoon")
plot(sl_rast)
# Sheltered Reef Slope
srs_temp <- raster(extent(fji_srs), res = 25, crs = fji_srs) # create a template raster with the inner reef flat extent
srs_rast <- fasterize(fji_srs, srs_temp) # rasterize the inner reef flat data
srs_map <- raster::as.data.frame(srs_rast, xy=T) %>%
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "srs")) %>%
mutate(srs = "Sheltered Reef Slope")
plot(srs_rast)
# Terrestrial Reef Flat
trf_temp <- raster(extent(fji_trf), res = 25, crs = fji_trf) # create a template raster with the inner reef flat extent
trf_rast <- fasterize(fji_trf, trf_temp) # rasterize the inner reef flat data
trf_map <- raster::as.data.frame(trf_rast, xy=T) %>%
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "trf")) %>%
mutate(trf = "Terrestrial Reef Flat")
plot(trf_rast)
# Unknown
unk_temp <- raster(extent(fji_unk), res = 25, crs = fji_unk) # create a template raster with the inner reef flat extent
unk_rast <- fasterize(fji_unk, unk_temp) # rasterize the inner reef flat data
unk_map <- raster::as.data.frame(unk_rast, xy=T) %>%
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "unk")) %>%
mutate(unk = "Unknown")
plot(unk_rast)
# Rasterize ecological data
# Coral data
coral_temp <- raster(extent(fji_coral),res = 25, crs = fji_coral) # create a template raster with the coral reef extent
coral_rast <- fasterize(fji_coral,coral_temp) # rasterizing the coral data
coral_map <- raster::as.data.frame(coral_rast, xy=T) %>% # Convert to dataframe to have mapped later
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "coral")) %>%
mutate(coral = "Coral")
plot(coral_rast) # check to make sure that the data appears correctly
object.size (coral_map) # size = 0.03 GB
# Mangrove data
# Mangrove 2016
mangrove16_temp <- raster(extent(fji_mangrove16), res = 25, crs = fji_mangrove16) # create a template raster with the Fiji mangrove 2016 extent
mangrove16_rast <- fasterize(fji_mangrove16, mangrove16_temp) # rasterizing the mangrove 2016 data
mangrove16_map <- raster::as.data.frame(mangrove16_rast,xy=T) %>% # convert to datafram to have mapped later
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "mangrove")) %>%
mutate(mangrove = "Mangrove")
plot(mangrove16_rast)
# Mangrove 1996
mangrove96_temp <- raster(extent(fji_mangrove96), res = 25, crs = fji_mangrove96) # create a template raster with the Fiji mangrove 1996 extent
mangrove96_rast <- fasterize(fji_mangrove96, mangrove96_temp) # rasterizing the mangrove 1996 data
mangrove96_map <- raster::as.data.frame(mangrove96_rast,xy=T) %>% # convert to datafram to have mapped later
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "mangrove")) %>%
mutate(mangrove = "Mangrove")
plot(mangrove96_rast)
# Mangrove gain -- 100 meter buffer gain
mangrove_gain_temp <- raster(extent(fji_mang_gain), res = 25, crs = fji_mang_gain) # create a template raster with the Fiji mangrove gain extent
mangrove_gain_rast <- fasterize(fji_mang_gain, mangrove_gain_temp) # rasterizing the mangrove gain data
mangrove_gain_map <- raster::as.data.frame(mangrove_gain_rast,xy=T) %>% # convert to datafram to have mapped later
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "gain")) %>%
mutate(gain = "Gain")
plot(mangrove_gain_rast)
# Mangrove loss -- 100 meter buffer gain
mangrove_loss_temp <- raster(extent(fji_mang_loss), res = 25, crs = fji_mang_loss) # create a template raster with the Fiji mangrove loss extent
mangrove_loss_rast <- fasterize(fji_mang_loss, mangrove_loss_temp) # rasterizing the mangrove loss data
mangrove_loss_map <- raster::as.data.frame(mangrove_loss_rast,xy=T) %>% # convert to datafram to have mapped later
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "loss")) %>%
mutate(loss = "Loss") #%>%
#sf::st_crs("+proj=utm +zone=60 +south +datum=WGS84 +units=m +no_defs")
plot(mangrove_loss_rast)
# Seagrass data -- 100 meter buffer
seagrass_temp <- raster(extent(fji_seagrass),res = 25, crs = fji_seagrass) # create a template raster with the seagrass extent
seagrass_rast <- fasterize(fji_seagrass,seagrass_temp) # rasterizing the seagrass data
seagrass_map <- raster::as.data.frame(seagrass_rast, xy=T) %>% # Convert to dataframe to have mapped later
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "seagrass")) %>%
mutate(seagrass = "Seagrass")
plot(seagrass_rast) # check to make sure that the data appears correctly
# Geophysical
# Bathymetry
fji_bath <- raster("fji_bath.tif")
object.size(fji_bath)
# To make size smaller, we can focus on an area slightly larger than the extent of the Great Barrier Reef
extent(gsr)
# Create mask on bathymetry data
gsr_aoi <- data.frame(list(rbind(c(470000,7910000), # southwest corner
c(855000,7910000), # southeast corner
c(855000,8255000), # northeast corner
c(470000,8255000)))) %>% #northwest corner
# Rename columns
rename(Long = X1,
Lat = X2) %>%
# Change to be a simple feature
st_as_sf(coords = c("Long", "Lat"),crs=32760) %>% # match coordinate reference system as all the other data
# Change to be a multipoint
summarise(geomtery = st_combine(geometry)) %>%
# Change to be a polygon
st_cast("POLYGON")
class(gsr_aoi) # verify that bath_df is a simple feature / data frame
# Extract the bathymetry data for the smaller extent
bath_aoi <- mask(x = fji_bath, mask = gsr_aoi)
bath_map <- raster::as.data.frame(bath_aoi, xy=T)
bath_map <- setNames(bath_map, c("longitude", "latitude", "depth"))
bath_map <- dplyr::filter(bath_map, depth >= 1)
# alternative way when memory issues are not a thing....
# bath_map <- raster::as.data.frame(bath_aoi, xy=T) %>%
# setNames(c("longitude", "latitude", "depth")) %>%
# dplyr::filter(depth >= 1)
plot(bath_aoi)
# Turbidity
fji_sed <- raster("Fiji_turbidity.tif")
sediment_map <- raster::as.data.frame(fji_sed, xy=T) %>%
dplyr::filter(Fiji_turbidity > 1, Fiji_turbidity < 11) %>%
setNames(c("longitude", "latitude", "relative"))
plot(fji_sed)
# Historic survey site data
surv_site <- st_read(dsn = gis_dir, layer = "gsr_survey_sites") %>% # all survey sites
# Rearrange columns
dplyr::select(Site,District,Historic_f,Qoliqoli_I,Place,Villages,
Province,Latitude,Longitude,
Province_1,Sub_group,Historic_b,
Past_data_,geometry) %>%
# Remove place, villages, province, sub-group, historic_b, past data
dplyr::select(-Place,
-Villages,
-Qoliqoli_I,
-Province,
-Sub_group,
-Historic_b,
-Past_data_) %>%
# Rename columns
rename(site=Site,
district=District,
surveyor=Historic_f,
latitude=Latitude,
longitude=Longitude,
province=Province_1)
levels(surv_site$surveyor) # get order of the surveyors
levels(surv_site$province) # survey sites were not conducted in Ra Province
# Add Ra province to the provinces field to assist with the map for loops
levels(surv_site$province) <- c(levels(surv_site$province), "Ra")
levels(surv_site$province) # see that Ra now appears for the field
# subset out the survey sites by surveyor
wwf_site <- surv_site %>%
dplyr::filter(surveyor == "WWF") # only historic WWF sites
eia_site <- surv_site %>%
dplyr::filter(surveyor == "Ba EIA") # only historic EIA sites in Ba province
rfc_site <- surv_site %>%
dplyr::filter(surveyor == "Reef Check") # only hisoric Reef Check sites
new_site <- surv_site %>%
dplyr::filter(surveyor == "New Site") # only new site locations
# Quick map of the area
map1 <- ggplot() +
geom_sf(data = fji_eez, fill = NA, size = 0.05) +
geom_sf(data = fiji, fill = "red") +
geom_sf(data = gsr) +
geom_sf(data = qoliqoli) +
geom_sf(data = provinces, fill = "green") +
geom_sf(data = surv_site) +
scale_x_longitude(breaks = seq(-178,180,2)) +
xlab("Longitude") +
ylab("Latitude") +
ggtitle("Quick map") +
theme_bw()
map1 | /wwf_fiji_maps/gsr_maps_r_setup.R | no_license | bpfree/work_sample | R | false | false | 14,612 | r | ############################
# Great Sea Reef Data Set-up
############################
# Clean environment
rm(list = ls())
# Setup
###########################
# Preparing packages
if (!require("pacman")) install.packages("pacman")
# get the most updated version of ggrepel
# devtools::install_github("slowkow/ggrepel") # add library(ggrepel) if version is not 0.9.0 [can check using sessionInfo()]
# Load packages
pacman::p_load(dplyr,fasterize,ggplot2,ggrepel,ggsn,ggspatial,metR,raster,RColorBrewer,rgdal,rgeos,scales,sf,sp,tidyr) # may have to use library(metR) to load that package
# Setting data directory
gis_dir <- setwd("C:\\Users\\free\\Dropbox (MPAMystery)\\RICOTTA_GIS\\oceans_program\\dom\\fiji_report\\gis_r")
gis_dir # check that correct directory has been set
# Verify that all associated files are in directory
list.files(gis_dir)
# Setting output map directories
gsr_map_dir <- "C:\\Users\\free\\Dropbox (MPAMystery)\\RICOTTA_GIS\\oceans_program\\dom\\fiji_report\\maps\\gsr"
province_map_dir <- "C:\\Users\\free\\Dropbox (MPAMystery)\\RICOTTA_GIS\\oceans_program\\dom\\fiji_report\\maps\\province"
qoliqoli_map_dir <- "C:\\Users\\free\\Dropbox (MPAMystery)\\RICOTTA_GIS\\oceans_program\\dom\\fiji_report\\maps\\qoliqoli"
# Read data
###########################
# Loading the required data
# Administrative boundary data
fiji <- st_read(dsn = gis_dir, layer = "fiji") # Fiji land administrative boundary
fji_eez <- st_read(dsn = gis_dir, layer = "fiji_eez") # Fijian exclusive economic zone
qoliqoli <- st_read(dsn = gis_dir, layer = "qoliqoli") # Qoliqoli (district) boundaries where survey sites occurred
provinces <- st_read(dsn = gis_dir, layer = "province") # Fiji provinces where survey sites occurred
gsr <- st_read(dsn = gis_dir, layer = "gsr") # Great Sea Reef boundary
# Ecological data
fji_coral <- st_read(dsn = gis_dir, layer = "fiji_coral") # Coral data extent in Great Sea Reef
fji_seagrass <- st_read(dsn = gis_dir, layer = "fiji_seagrass") %>% # Seagrass extent in Great Sea Reef
st_buffer(100) %>% # add a 100-meter buffer around the data
st_cast("MULTIPOLYGON") # make as multipolygon again
fji_mangrove96 <- st_read(dsn = gis_dir, layer = "fji_mang96") # Mangrove data in Great Sea Reef for 1996
fji_mangrove16 <- st_read(dsn = gis_dir, layer = "fji_mang16") # Mangrove data in Great Sea Reef for 2016
fji_mang_gain <- st_read(dsn = gis_dir, layer = "fji_mang_gain") %>% # Mangrove extent gained in Great Sea Reef between 1996 and 2016
st_buffer(100) %>% # add a 50-meter buffer around the data
st_cast("MULTIPOLYGON") # make as multipolygon again
fji_mang_loss <- st_read(dsn = gis_dir, layer = "fji_mang_loss") %>% # Mangrove extent lost in Great Sea Reef between 1996 and 2016
st_buffer(100) %>% # add a 50-meter buffer around the data
st_cast("MULTIPOLYGON") # make as multipolygon again
# Geomorphic data
fji_geo <- st_read(dsn = gis_dir, layer = "fiji_geo") # loads the geomorphic data
levels(fji_geo$Geo_Zone) # get list of unique geomorphic zones
object.size(fji_geo) # 0.16 GB
# Separate geomorphic zones
fji_irf <- filter(fji_geo,Geo_Zone == "Inner Reef Flat") # inner reef flat
fji_orf <- filter(fji_geo,Geo_Zone == "Outer Reef Flat") # outer reef flat
fji_plat <- filter(fji_geo,Geo_Zone == "Plateau") # plateau
fji_rc <- filter(fji_geo,Geo_Zone == "Reef Crest") # reef crest
fji_rs <- filter(fji_geo, Geo_Zone == "Reef Slope") # reef slope
fji_sl <- filter(fji_geo,Geo_Zone == "Shallow Lagoon") # shallow lagoon
fji_srs <- filter(fji_geo,Geo_Zone == "Sheltered Reef Slope") # sheltered reef slope
fji_trf <- filter(fji_geo,Geo_Zone == "Terrestrial Reef Flat") # terrestrial reef flat
fji_unk <- filter(fji_geo, Geo_Zone == "Unknown") # unknown
# Rasterize geomorphic data
# All data
geo_temp <- raster(extent(fji_geo), res = 25, crs = fji_geo) # create a template raster with the Fiji geomorphic extent
geo_rast <- fasterize(fji_geo, geo_temp) # rasterize the geomorphic data
geo_map <- raster::as.data.frame(geo_rast, xy=T) %>%
dplyr::filter(!is.na(layer))
object.size(geo_map) # check how large data are --> 0.18 GB
# Inner Reef Flat
irf_temp <- raster(extent(fji_irf), res = 25, crs = fji_irf) # create a template raster with the inner reef flat extent
irf_rast <- fasterize(fji_irf, irf_temp) # rasterize the inner reef flat data
irf_map <- raster::as.data.frame(irf_rast, xy=T) %>%
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "irf")) %>%
mutate(irf = "Inner Reef Flat")
plot(irf_rast)
# Outer Reef Flat
orf_temp <- raster(extent(fji_orf), res = 25, crs = fji_orf) # create a template raster with the inner reef flat extent
orf_rast <- fasterize(fji_orf, orf_temp) # rasterize the inner reef flat data
orf_map <- raster::as.data.frame(orf_rast, xy=T) %>%
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "orf")) %>%
mutate(orf = "Outer Reef Flat")
plot(orf_rast)
# Plateau
plat_temp <- raster(extent(fji_plat), res = 25, crs = fji_plat) # create a template raster with the inner reef flat extent
plat_rast <- fasterize(fji_plat, plat_temp) # rasterize the inner reef flat data
plat_map <- raster::as.data.frame(plat_rast, xy=T) %>%
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "plat")) %>%
mutate(plat = "Plateau")
plot(plat_rast)
# Reef Crest
rc_temp <- raster(extent(fji_rc), res = 25, crs = fji_rc) # create a template raster with the inner reef flat extent
rc_rast <- fasterize(fji_rc, rc_temp) # rasterize the inner reef flat data
rc_map <- raster::as.data.frame(rc_rast, xy=T) %>%
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "rc")) %>%
mutate(rc = "Reef Crest")
plot(rc_rast)
# Reef Slope
rs_temp <- raster(extent(fji_rs), res = 25, crs = fji_rs) # create a template raster with the inner reef flat extent
rs_rast <- fasterize(fji_rs, rs_temp) # rasterize the inner reef flat data
rs_map <- raster::as.data.frame(rs_rast, xy=T) %>%
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "rs")) %>%
mutate(rs = "Reef Slope")
plot(rs_rast)
# Shallow Lagoon
sl_temp <- raster(extent(fji_sl), res = 25, crs = fji_sl) # create a template raster with the inner reef flat extent
sl_rast <- fasterize(fji_sl, sl_temp) # rasterize the inner reef flat data
sl_map <- raster::as.data.frame(sl_rast, xy=T) %>%
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "sl")) %>%
mutate(sl = "Shallow Lagoon")
plot(sl_rast)
# Sheltered Reef Slope
srs_temp <- raster(extent(fji_srs), res = 25, crs = fji_srs) # create a template raster with the inner reef flat extent
srs_rast <- fasterize(fji_srs, srs_temp) # rasterize the inner reef flat data
srs_map <- raster::as.data.frame(srs_rast, xy=T) %>%
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "srs")) %>%
mutate(srs = "Sheltered Reef Slope")
plot(srs_rast)
# Terrestrial Reef Flat
trf_temp <- raster(extent(fji_trf), res = 25, crs = fji_trf) # create a template raster with the inner reef flat extent
trf_rast <- fasterize(fji_trf, trf_temp) # rasterize the inner reef flat data
trf_map <- raster::as.data.frame(trf_rast, xy=T) %>%
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "trf")) %>%
mutate(trf = "Terrestrial Reef Flat")
plot(trf_rast)
# Unknown
unk_temp <- raster(extent(fji_unk), res = 25, crs = fji_unk) # create a template raster with the inner reef flat extent
unk_rast <- fasterize(fji_unk, unk_temp) # rasterize the inner reef flat data
unk_map <- raster::as.data.frame(unk_rast, xy=T) %>%
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "unk")) %>%
mutate(unk = "Unknown")
plot(unk_rast)
# Rasterize ecological data
# Coral data
coral_temp <- raster(extent(fji_coral),res = 25, crs = fji_coral) # create a template raster with the coral reef extent
coral_rast <- fasterize(fji_coral,coral_temp) # rasterizing the coral data
coral_map <- raster::as.data.frame(coral_rast, xy=T) %>% # Convert to dataframe to have mapped later
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "coral")) %>%
mutate(coral = "Coral")
plot(coral_rast) # check to make sure that the data appears correctly
object.size (coral_map) # size = 0.03 GB
# Mangrove data
# Mangrove 2016
mangrove16_temp <- raster(extent(fji_mangrove16), res = 25, crs = fji_mangrove16) # create a template raster with the Fiji mangrove 2016 extent
mangrove16_rast <- fasterize(fji_mangrove16, mangrove16_temp) # rasterizing the mangrove 2016 data
mangrove16_map <- raster::as.data.frame(mangrove16_rast,xy=T) %>% # convert to datafram to have mapped later
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "mangrove")) %>%
mutate(mangrove = "Mangrove")
plot(mangrove16_rast)
# Mangrove 1996
mangrove96_temp <- raster(extent(fji_mangrove96), res = 25, crs = fji_mangrove96) # create a template raster with the Fiji mangrove 1996 extent
mangrove96_rast <- fasterize(fji_mangrove96, mangrove96_temp) # rasterizing the mangrove 1996 data
mangrove96_map <- raster::as.data.frame(mangrove96_rast,xy=T) %>% # convert to datafram to have mapped later
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "mangrove")) %>%
mutate(mangrove = "Mangrove")
plot(mangrove96_rast)
# Mangrove gain -- 100 meter buffer gain
mangrove_gain_temp <- raster(extent(fji_mang_gain), res = 25, crs = fji_mang_gain) # create a template raster with the Fiji mangrove gain extent
mangrove_gain_rast <- fasterize(fji_mang_gain, mangrove_gain_temp) # rasterizing the mangrove gain data
mangrove_gain_map <- raster::as.data.frame(mangrove_gain_rast,xy=T) %>% # convert to datafram to have mapped later
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "gain")) %>%
mutate(gain = "Gain")
plot(mangrove_gain_rast)
# Mangrove loss -- 100 meter buffer gain
mangrove_loss_temp <- raster(extent(fji_mang_loss), res = 25, crs = fji_mang_loss) # create a template raster with the Fiji mangrove loss extent
mangrove_loss_rast <- fasterize(fji_mang_loss, mangrove_loss_temp) # rasterizing the mangrove loss data
mangrove_loss_map <- raster::as.data.frame(mangrove_loss_rast,xy=T) %>% # convert to datafram to have mapped later
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "loss")) %>%
mutate(loss = "Loss") #%>%
#sf::st_crs("+proj=utm +zone=60 +south +datum=WGS84 +units=m +no_defs")
plot(mangrove_loss_rast)
# Seagrass data -- 100 meter buffer
seagrass_temp <- raster(extent(fji_seagrass),res = 25, crs = fji_seagrass) # create a template raster with the seagrass extent
seagrass_rast <- fasterize(fji_seagrass,seagrass_temp) # rasterizing the seagrass data
seagrass_map <- raster::as.data.frame(seagrass_rast, xy=T) %>% # Convert to dataframe to have mapped later
dplyr::filter(!is.na(layer)) %>%
setNames(c("longitude", "latitude", "seagrass")) %>%
mutate(seagrass = "Seagrass")
plot(seagrass_rast) # check to make sure that the data appears correctly
# Geophysical
# Bathymetry
fji_bath <- raster("fji_bath.tif")
object.size(fji_bath)
# To make size smaller, we can focus on an area slightly larger than the extent of the Great Barrier Reef
extent(gsr)
# Create mask on bathymetry data
gsr_aoi <- data.frame(list(rbind(c(470000,7910000), # southwest corner
c(855000,7910000), # southeast corner
c(855000,8255000), # northeast corner
c(470000,8255000)))) %>% #northwest corner
# Rename columns
rename(Long = X1,
Lat = X2) %>%
# Change to be a simple feature
st_as_sf(coords = c("Long", "Lat"),crs=32760) %>% # match coordinate reference system as all the other data
# Change to be a multipoint
summarise(geomtery = st_combine(geometry)) %>%
# Change to be a polygon
st_cast("POLYGON")
class(gsr_aoi) # verify that bath_df is a simple feature / data frame
# Extract the bathymetry data for the smaller extent
bath_aoi <- mask(x = fji_bath, mask = gsr_aoi)
bath_map <- raster::as.data.frame(bath_aoi, xy=T)
bath_map <- setNames(bath_map, c("longitude", "latitude", "depth"))
bath_map <- dplyr::filter(bath_map, depth >= 1)
# alternative way when memory issues are not a thing....
# bath_map <- raster::as.data.frame(bath_aoi, xy=T) %>%
# setNames(c("longitude", "latitude", "depth")) %>%
# dplyr::filter(depth >= 1)
plot(bath_aoi)
# Turbidity
fji_sed <- raster("Fiji_turbidity.tif")
sediment_map <- raster::as.data.frame(fji_sed, xy=T) %>%
dplyr::filter(Fiji_turbidity > 1, Fiji_turbidity < 11) %>%
setNames(c("longitude", "latitude", "relative"))
plot(fji_sed)
# Historic survey site data
surv_site <- st_read(dsn = gis_dir, layer = "gsr_survey_sites") %>% # all survey sites
# Rearrange columns
dplyr::select(Site,District,Historic_f,Qoliqoli_I,Place,Villages,
Province,Latitude,Longitude,
Province_1,Sub_group,Historic_b,
Past_data_,geometry) %>%
# Remove place, villages, province, sub-group, historic_b, past data
dplyr::select(-Place,
-Villages,
-Qoliqoli_I,
-Province,
-Sub_group,
-Historic_b,
-Past_data_) %>%
# Rename columns
rename(site=Site,
district=District,
surveyor=Historic_f,
latitude=Latitude,
longitude=Longitude,
province=Province_1)
levels(surv_site$surveyor) # get order of the surveyors
levels(surv_site$province) # survey sites were not conducted in Ra Province
# Add Ra province to the provinces field to assist with the map for loops
levels(surv_site$province) <- c(levels(surv_site$province), "Ra")
levels(surv_site$province) # see that Ra now appears for the field
# subset out the survey sites by surveyor
wwf_site <- surv_site %>%
dplyr::filter(surveyor == "WWF") # only historic WWF sites
eia_site <- surv_site %>%
dplyr::filter(surveyor == "Ba EIA") # only historic EIA sites in Ba province
rfc_site <- surv_site %>%
dplyr::filter(surveyor == "Reef Check") # only hisoric Reef Check sites
new_site <- surv_site %>%
dplyr::filter(surveyor == "New Site") # only new site locations
# Quick map of the area
map1 <- ggplot() +
geom_sf(data = fji_eez, fill = NA, size = 0.05) +
geom_sf(data = fiji, fill = "red") +
geom_sf(data = gsr) +
geom_sf(data = qoliqoli) +
geom_sf(data = provinces, fill = "green") +
geom_sf(data = surv_site) +
scale_x_longitude(breaks = seq(-178,180,2)) +
xlab("Longitude") +
ylab("Latitude") +
ggtitle("Quick map") +
theme_bw()
map1 |
test.betaConstraints.illegalBounds <- function(){
Log.info("Importing prostate dataset...")
prostate_h2o <- h2o.importFile(locate("smalldata/prostate/prostate.csv"))
myX <- c("AGE","RACE", "DPROS", "DCAPS", "PSA", "VOL", "GLEASON")
Log.info("Create beta constraints frame...")
lowerbound <- rep(100, times = length(myX))
upperbound <- rep(0, times = length(myX))
betaConstraints <- data.frame(names = myX, lower_bounds = lowerbound, upper_bounds = upperbound)
Log.info("Run a Linear Regression with CAPSULE ~ . with illegal bounds beta->[100,0] in H2O...")
expect_error(h2o.glm(x = myX, y = "CAPSULE", training_frame = prostate_h2o, family = "gaussian", alpha = 0, solver="L_BFGS", beta_constraints = betaConstraints))
}
doTest("GLM Test: Beta Constraints Illegal Bounds", test.betaConstraints.illegalBounds)
| /h2o-r/tests/testdir_jira/runit_NOPASS_pubdev_1739_invalid_bc.R | permissive | StephRoark/h2o-3 | R | false | false | 840 | r |
test.betaConstraints.illegalBounds <- function(){
Log.info("Importing prostate dataset...")
prostate_h2o <- h2o.importFile(locate("smalldata/prostate/prostate.csv"))
myX <- c("AGE","RACE", "DPROS", "DCAPS", "PSA", "VOL", "GLEASON")
Log.info("Create beta constraints frame...")
lowerbound <- rep(100, times = length(myX))
upperbound <- rep(0, times = length(myX))
betaConstraints <- data.frame(names = myX, lower_bounds = lowerbound, upper_bounds = upperbound)
Log.info("Run a Linear Regression with CAPSULE ~ . with illegal bounds beta->[100,0] in H2O...")
expect_error(h2o.glm(x = myX, y = "CAPSULE", training_frame = prostate_h2o, family = "gaussian", alpha = 0, solver="L_BFGS", beta_constraints = betaConstraints))
}
doTest("GLM Test: Beta Constraints Illegal Bounds", test.betaConstraints.illegalBounds)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_tools.R
\name{wbt_clean_vector}
\alias{wbt_clean_vector}
\title{Clean vector}
\usage{
wbt_clean_vector(input, output, wd = NULL, verbose_mode = FALSE)
}
\arguments{
\item{input}{Input vector file.}
\item{output}{Output vector file.}
\item{wd}{Changes the working directory.}
\item{verbose_mode}{Sets verbose mode. If verbose mode is False, tools will not print output messages.}
}
\value{
Returns the tool text outputs.
}
\description{
Removes null features and lines/polygons with fewer than the required number of vertices.
}
| /man/wbt_clean_vector.Rd | permissive | gitWayneZhang/whiteboxR | R | false | true | 614 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_tools.R
\name{wbt_clean_vector}
\alias{wbt_clean_vector}
\title{Clean vector}
\usage{
wbt_clean_vector(input, output, wd = NULL, verbose_mode = FALSE)
}
\arguments{
\item{input}{Input vector file.}
\item{output}{Output vector file.}
\item{wd}{Changes the working directory.}
\item{verbose_mode}{Sets verbose mode. If verbose mode is False, tools will not print output messages.}
}
\value{
Returns the tool text outputs.
}
\description{
Removes null features and lines/polygons with fewer than the required number of vertices.
}
|
#####################################
## Script to trigger all analyses. ##
#####################################
library(digest)
library(dplyr)
library(openxlsx)
## Declare simulation type and scope ##
# Declare argon opts
argon_opts <- "-q UI,all.q"
# Running gon HPC?
is.argon <- TRUE
# Is this a debug run?
isDebug <- FALSE
# Determine the simulation grid
states_to_run <- c(16)
intervention_types_to_run <- c(1,2,3,4,5)
templates_to_use <- c("default.template.mortality.R")
analysis_date <- Sys.Date()
if (!dir.exists(paste0("../Results/", analysis_date))){
dir.create(paste0("../Results/", analysis_date))
}
## Update the datasets ##
system("Rscript UpdateData.R")
## Get intervention data ##
stateData <- read.csv("../Data/covid-19-data/us-states.csv", stringsAsFactors = FALSE)
censusData <- read.xlsx("../Data/nst-est2019-01.xlsx")
uqStates <- sort(unique(intersect(gsub(".", "", censusData$State, fixed = TRUE), stateData$state)))
states_to_run <- data.frame(state_idx=states_to_run,state=uqStates[states_to_run])
interv <- read.csv("../Data/intervention_info.csv") %>%
inner_join(states_to_run, by="state")
grid <- expand.grid(template=templates_to_use,
intervention=intervention_types_to_run,
state=states_to_run$state, stringsAsFactors = FALSE) %>% full_join(interv, by = "state") %>%
select(-fips, -curated)
# short unique identifiers for templates
grid$template_short <- vapply(as.character(grid$template), function(x){
substr(digest::digest(x, algo = "md5"),1,6)},"STRING")
cleanDt <- function(x){
gsub("/", ".", x, fixed = TRUE)
}
grid$outputfile <- apply(grid, 1, function(x){
paste0(paste0("../Results/", analysis_date, "/"),
paste0(paste0(c(x[["state_idx"]],
x[["intervention"]],
cleanDt(x[["intervDate"]]),
cleanDt(x[["reopenDate"]]),
x[["template_short"]]),
collapse = "_"), ".rda"))
})
submission_strings <- vapply(1:nrow(grid), FUN.VALUE = "string", FUN = function(i){
args <- list(s = as.character(grid$state_idx[i]),
m = as.character(grid$template[i]),
d = as.character(grid$intervDate[i]),
r = as.character(grid$reopenDate[i]),
t = as.character(grid$intervention[i]),
b = 1*isDebug,
o = as.character(grid$outputfile[i]))
if (is.argon){
return(paste0("qsub -pe smp 16 -cwd ", argon_opts, " submitJob.sh ", paste0(args, collapse = " ")))
} else {
return(
paste0("Rscript AnalyzeNYT.R ",
paste0("-", paste(names(args), unlist(args), sep =" "), collapse = " "))
)
}
})
for (i in 1:length(submission_strings)){
print(submission_strings[i])
system(submission_strings[i])
}
| /Scripts/RunAllAnalyses.R | permissive | grantbrown/COVID19-US | R | false | false | 2,803 | r | #####################################
## Script to trigger all analyses. ##
#####################################
library(digest)
library(dplyr)
library(openxlsx)
## Declare simulation type and scope ##
# Declare argon opts
argon_opts <- "-q UI,all.q"
# Running gon HPC?
is.argon <- TRUE
# Is this a debug run?
isDebug <- FALSE
# Determine the simulation grid
states_to_run <- c(16)
intervention_types_to_run <- c(1,2,3,4,5)
templates_to_use <- c("default.template.mortality.R")
analysis_date <- Sys.Date()
if (!dir.exists(paste0("../Results/", analysis_date))){
dir.create(paste0("../Results/", analysis_date))
}
## Update the datasets ##
system("Rscript UpdateData.R")
## Get intervention data ##
stateData <- read.csv("../Data/covid-19-data/us-states.csv", stringsAsFactors = FALSE)
censusData <- read.xlsx("../Data/nst-est2019-01.xlsx")
uqStates <- sort(unique(intersect(gsub(".", "", censusData$State, fixed = TRUE), stateData$state)))
states_to_run <- data.frame(state_idx=states_to_run,state=uqStates[states_to_run])
interv <- read.csv("../Data/intervention_info.csv") %>%
inner_join(states_to_run, by="state")
grid <- expand.grid(template=templates_to_use,
intervention=intervention_types_to_run,
state=states_to_run$state, stringsAsFactors = FALSE) %>% full_join(interv, by = "state") %>%
select(-fips, -curated)
# short unique identifiers for templates
grid$template_short <- vapply(as.character(grid$template), function(x){
substr(digest::digest(x, algo = "md5"),1,6)},"STRING")
cleanDt <- function(x){
gsub("/", ".", x, fixed = TRUE)
}
grid$outputfile <- apply(grid, 1, function(x){
paste0(paste0("../Results/", analysis_date, "/"),
paste0(paste0(c(x[["state_idx"]],
x[["intervention"]],
cleanDt(x[["intervDate"]]),
cleanDt(x[["reopenDate"]]),
x[["template_short"]]),
collapse = "_"), ".rda"))
})
submission_strings <- vapply(1:nrow(grid), FUN.VALUE = "string", FUN = function(i){
args <- list(s = as.character(grid$state_idx[i]),
m = as.character(grid$template[i]),
d = as.character(grid$intervDate[i]),
r = as.character(grid$reopenDate[i]),
t = as.character(grid$intervention[i]),
b = 1*isDebug,
o = as.character(grid$outputfile[i]))
if (is.argon){
return(paste0("qsub -pe smp 16 -cwd ", argon_opts, " submitJob.sh ", paste0(args, collapse = " ")))
} else {
return(
paste0("Rscript AnalyzeNYT.R ",
paste0("-", paste(names(args), unlist(args), sep =" "), collapse = " "))
)
}
})
for (i in 1:length(submission_strings)){
print(submission_strings[i])
system(submission_strings[i])
}
|
install.packages("tidyverse")
library(tidyverse)
install.packages("ggthemes")
library(ggthemes)
iris
#pacotes e conjunto de dados carregados
#filtrando os conjuntos
setosa<-iris %>% filter(Species=="setosa")
summary(setosa)
virginica<-iris %>% filter(Species=="virginica")
summary(virginica)
versicolor<-iris %>% filter(Species=="versicolor")
summary(versicolor)
#teste t nas petalas
t.test(setosa$Petal.Length)
t.test(virginica$Petal.Length)
t.test(versicolor$Petal.Length)
t.test(setosa$Petal.Width)
t.test(virginica$Petal.Width)
t.test(versicolor$Petal.Width)
#TESTE NAS SEPALAS
t.test(setosa$Sepal.Length)
t.test(virginica$Sepal.Length)
t.test(versicolor$Sepal.Length)
t.test(setosa$Sepal.Width)
t.test(virginica$Sepal.Width)
t.test(versicolor$Sepal.Width)
#testes de correlacao
cor.test(setosa$Petal.Length,setosa$Petal.Width)
cor.test(versicolor$Petal.Length,versicolor$Petal.Width)
cor.test(virginica$Petal.Length,virginica$Petal.Width)
cor.test(setosa$Sepal.Length,setosa$Sepal.Width)
cor.test(versicolor$Sepal.Length,versicolor$Sepal.Width)
cor.test(virginica$Sepal.Length,virginica$Sepal.Width)
#graficos
plot1=ggplot(dado,aes(x=dado$Sepal.Length,y=dado$Sepal.Width,color=Species,fill=Species))+geom_point()+stat_smooth(se=F,method = "lm",color="red")+
theme_few()+labs(x="Comprimento da sépala",y="Largura da sépala",color="Espécies")
plot2=ggplot(dado,aes(x=Petal.Length,y=Petal.Width,color=Species,fill=Species))+geom_point()+stat_smooth(se=F,method = "lm",color="red")+
theme_few()+labs(x="Comprimento da pétala",y="Largura da pétala",color="Espécies")
require(gridExtra)#varios graficos em um plot
grid.arrange(plot1,plot2,ncol=2)
| /Iris(portuguese).R | no_license | marreapato/Data_analysis | R | false | false | 1,665 | r | install.packages("tidyverse")
library(tidyverse)
install.packages("ggthemes")
library(ggthemes)
iris
#pacotes e conjunto de dados carregados
#filtrando os conjuntos
setosa<-iris %>% filter(Species=="setosa")
summary(setosa)
virginica<-iris %>% filter(Species=="virginica")
summary(virginica)
versicolor<-iris %>% filter(Species=="versicolor")
summary(versicolor)
#teste t nas petalas
t.test(setosa$Petal.Length)
t.test(virginica$Petal.Length)
t.test(versicolor$Petal.Length)
t.test(setosa$Petal.Width)
t.test(virginica$Petal.Width)
t.test(versicolor$Petal.Width)
#TESTE NAS SEPALAS
t.test(setosa$Sepal.Length)
t.test(virginica$Sepal.Length)
t.test(versicolor$Sepal.Length)
t.test(setosa$Sepal.Width)
t.test(virginica$Sepal.Width)
t.test(versicolor$Sepal.Width)
#testes de correlacao
cor.test(setosa$Petal.Length,setosa$Petal.Width)
cor.test(versicolor$Petal.Length,versicolor$Petal.Width)
cor.test(virginica$Petal.Length,virginica$Petal.Width)
cor.test(setosa$Sepal.Length,setosa$Sepal.Width)
cor.test(versicolor$Sepal.Length,versicolor$Sepal.Width)
cor.test(virginica$Sepal.Length,virginica$Sepal.Width)
#graficos
plot1=ggplot(dado,aes(x=dado$Sepal.Length,y=dado$Sepal.Width,color=Species,fill=Species))+geom_point()+stat_smooth(se=F,method = "lm",color="red")+
theme_few()+labs(x="Comprimento da sépala",y="Largura da sépala",color="Espécies")
plot2=ggplot(dado,aes(x=Petal.Length,y=Petal.Width,color=Species,fill=Species))+geom_point()+stat_smooth(se=F,method = "lm",color="red")+
theme_few()+labs(x="Comprimento da pétala",y="Largura da pétala",color="Espécies")
require(gridExtra)#varios graficos em um plot
grid.arrange(plot1,plot2,ncol=2)
|
library(flexdashboard)
library(Quandl)
library(readr)
library(dygraphs)
library(stats)
library(ggplot2)
url<-"http://www.richardtwatson.com/data/manheim.csv"
manheim<-read_csv(url)
# creating table
tab<-xtabs(~model + sale, data=manheim)
ftable(tab)
#creating boxplot
ggplot(manheim,aes(model,price)) +
geom_boxplot(outlier.colour='red') +
xlab("Model") + ylab("Price $")
#creating scatterplot - miles by price for each model
ggplot(manheim,aes(miles,price,color=model)) +
geom_point() +
xlab("Miles") + ylab("Price $")
#scatterplot - miles by price for each sale
ggplot(manheim,aes(miles,price,color=sale)) +
geom_point() +
xlab("Miles") + ylab("Price $") | /A15 - FlexDashboard/A15.R | no_license | DataDiamond/Rboot | R | false | false | 672 | r | library(flexdashboard)
library(Quandl)
library(readr)
library(dygraphs)
library(stats)
library(ggplot2)
url<-"http://www.richardtwatson.com/data/manheim.csv"
manheim<-read_csv(url)
# creating table
tab<-xtabs(~model + sale, data=manheim)
ftable(tab)
#creating boxplot
ggplot(manheim,aes(model,price)) +
geom_boxplot(outlier.colour='red') +
xlab("Model") + ylab("Price $")
#creating scatterplot - miles by price for each model
ggplot(manheim,aes(miles,price,color=model)) +
geom_point() +
xlab("Miles") + ylab("Price $")
#scatterplot - miles by price for each sale
ggplot(manheim,aes(miles,price,color=sale)) +
geom_point() +
xlab("Miles") + ylab("Price $") |
\name{ShortenLongNames}
\alias{ShortenLongNames}
\title{Shorten long names for physical constants}
\usage{ShortenLongNames(s)}
\description{\code{ShortenLongNames} shortens long names for physical constants
because of the possible error with paths over 100 characters long
}
\arguments{
\item{s}{ string with the physical constants names' }
}
\value{short string}
\author{Jose Gama}
\examples{
ShortenLongNames('degree Fahrenheit hour square foot per British thermal unitth inch')
}
\keyword{programming}
| /man/ShortenLongNames.Rd | no_license | cran/NISTunits | R | false | false | 507 | rd | \name{ShortenLongNames}
\alias{ShortenLongNames}
\title{Shorten long names for physical constants}
\usage{ShortenLongNames(s)}
\description{\code{ShortenLongNames} shortens long names for physical constants
because of the possible error with paths over 100 characters long
}
\arguments{
\item{s}{ string with the physical constants names' }
}
\value{short string}
\author{Jose Gama}
\examples{
ShortenLongNames('degree Fahrenheit hour square foot per British thermal unitth inch')
}
\keyword{programming}
|
##########################################################################################
# Designed and developed by Tinniam V Ganesh
# Date : 22 Jun 2016
# Function: CNOT2_01
# This function performs CNOT operation on 2 qubits(Control qubit 0, Target qubit 1)
#
###########################################################################################
#' @title
#' 2 qubit CNOT gate (control-0,target-1)
#'
#' @description
#' This function applies a CNOT gate to 2 qubits. The qubits start from 0,1,2,3,4. Here
#' control is qubit 0 and target is qubit 1
#'
#' @usage
#' CNOT2_01(a)
#'
#' @param a
#' The input
#'
#' @return result
#' The result of applying the CNOT2_01 gate
#'
#' @references
#' \url{https://quantumexperience.ng.bluemix.net/}\cr
#' \url{https://gigadom.wordpress.com/2016/06/23/introducing-qcsimulator-a-5-qubit-quantum-computing-simulator-in-r/}\cr
#'
#' @author
#' Tinniam V Ganesh
#' @note
#' Maintainer: Tinniam V Ganesh \email{tvganesh.85@gmail.com}
#'
#' @examples
#' # Initialze global variables
#' init()
#' CNOT2_01(q11_)
#' CNOT2_01(I4)
#'
#' @seealso
#' \code{\link{CNOT2_10}}\cr
#' \code{\link{PauliX}}\cr
#' \code{\link{measurement}}\cr
#' \code{\link{plotMeasurement}}\cr
#' \code{\link{CNOT5_03}}\cr
#' \code{\link{CNOT3_12}}\cr
#'
#' @export
#'
CNOT2_01 <- function(a){
cnot= matrix(c(1,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0),nrow=4,ncol=4)
result <-cnot%*%a
result
}
| /R/CNOT2_01.R | no_license | cran/QCSimulator | R | false | false | 1,470 | r | ##########################################################################################
# Designed and developed by Tinniam V Ganesh
# Date : 22 Jun 2016
# Function: CNOT2_01
# This function performs CNOT operation on 2 qubits(Control qubit 0, Target qubit 1)
#
###########################################################################################
#' @title
#' 2 qubit CNOT gate (control-0,target-1)
#'
#' @description
#' This function applies a CNOT gate to 2 qubits. The qubits start from 0,1,2,3,4. Here
#' control is qubit 0 and target is qubit 1
#'
#' @usage
#' CNOT2_01(a)
#'
#' @param a
#' The input
#'
#' @return result
#' The result of applying the CNOT2_01 gate
#'
#' @references
#' \url{https://quantumexperience.ng.bluemix.net/}\cr
#' \url{https://gigadom.wordpress.com/2016/06/23/introducing-qcsimulator-a-5-qubit-quantum-computing-simulator-in-r/}\cr
#'
#' @author
#' Tinniam V Ganesh
#' @note
#' Maintainer: Tinniam V Ganesh \email{tvganesh.85@gmail.com}
#'
#' @examples
#' # Initialze global variables
#' init()
#' CNOT2_01(q11_)
#' CNOT2_01(I4)
#'
#' @seealso
#' \code{\link{CNOT2_10}}\cr
#' \code{\link{PauliX}}\cr
#' \code{\link{measurement}}\cr
#' \code{\link{plotMeasurement}}\cr
#' \code{\link{CNOT5_03}}\cr
#' \code{\link{CNOT3_12}}\cr
#'
#' @export
#'
CNOT2_01 <- function(a){
cnot= matrix(c(1,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0),nrow=4,ncol=4)
result <-cnot%*%a
result
}
|
## Final Project - R code
##
## This file stores the R code for monte carlo study in Final Project
## Author(s): Zhihao Xu, xuzhihao@umich.edu
## Updated: December 4, 2020
# 79: -------------------------------------------------------------------------
# libraries: ------------------------------------------------------------------
library(parallel)
library(MASS)
# monte carlo study
compute_fdr = function(rej, sig){
# compute the fdr of rejection set
# Inputs:
# rej - rejection set
# sig - index of signal
# Output: estimation of fdr
if (length(rej)==0){
fdr = 0
} else{
fdr = sum(rej %in% sig == FALSE)/length(rej)
}
return(fdr)
}
compute_power = function(rej, sig){
# compute the power of rejection set
# Inputs:
# rej - rejection set
# sig - index of signal
# Output: estimation of power
if (length(rej)==0){
power = 0
} else{
power = sum(rej %in% sig)/length(sig)
}
return(power)
}
## Independent
ind_sim = function(idx, mu, method="BH", q=0.05){
# simulation of independent case
# Inputs:
# mu - mean of simulation
# method - method for p-value correction
# q - significant level
# Output: rejection set
d = matrix(rnorm(200*100, mean=mu,sd=1), nrow=100)
p = 2*(1-pnorm(abs(apply(d, 1, mean)), mean=0, sd=1/sqrt(200)))
rej = which(p.adjust(p, method)<q)
return(rej)
}
result_ind_null_bh = mclapply(1:1000, ind_sim,mu=0, method="BH",
q=0.05, mc.cores = 4)
result_ind_null_by = mclapply(1:1000, ind_sim,mu=0, method="BY",
q=0.05, mc.cores = 4)
bh_ind_null = mean(unlist(lapply(result_ind_null_bh, compute_fdr, sig=0)))
by_ind_null = mean(unlist(lapply(result_ind_null_by, compute_fdr, sig=0)))
result_ind_null = rbind(bh_ind_null, by_ind_null)
result_ind_sig_bh = mclapply(1:1000, ind_sim,
mu=c(rep(seq(0.2,1,0.4), each=2), rep(0,94)),
method="BH", q=0.05, mc.cores = 4)
result_ind_sig_by = mclapply(1:1000, ind_sim,
mu=c(rep(seq(0.2,1,0.4), each=2), rep(0,94)),
method="BY", q=0.05, mc.cores = 4)
bh_ind_sig = c(mean(unlist(lapply(result_ind_sig_bh, compute_fdr, sig=1:6))),
mean(unlist(lapply(result_ind_sig_bh, compute_power, sig=1:6))))
by_ind_sig = c(mean(unlist(lapply(result_ind_sig_by, compute_fdr, sig=1:6))),
mean(unlist(lapply(result_ind_sig_by, compute_power, sig=1:6))))
result_ind_sig = rbind(bh_ind_sig, by_ind_sig)
## Positive Correlation
Sigma = matrix(rep(0.8,100*100),100)
post_cor_sim = function(idx, mu, method="BH", q=0.05){
# simulation of positive correlation case
# Inputs:
# mu - mean of simulation
# method - method for p-value correction
# q - significant level
# Output: rejection set
d = mvrnorm(n=200, mu=mu, Sigma=Sigma)
p = 2*(1-pnorm(abs(apply(d, 2, mean)), mean=0, sd=1/sqrt(200)))
rej = which(p.adjust(p, method)<q)
return(rej)
}
result_post_cor_null_bh = mclapply(1:1000, post_cor_sim,mu=rep(0,100),
method="BH", q=0.05, mc.cores = 4)
result_post_cor_null_by = mclapply(1:1000, post_cor_sim,mu=rep(0,100),
method="BY", q=0.05, mc.cores = 4)
bh_post_cor_null = mean(unlist(lapply(result_post_cor_null_bh,
compute_fdr, sig=0)))
by_post_cor_null = mean(unlist(lapply(result_post_cor_null_by,
compute_fdr, sig=0)))
result_post_cor_null = rbind(bh_post_cor_null, by_post_cor_null)
result_post_cor_sig_bh = mclapply(1:1000, post_cor_sim,
mu=c(rep(seq(0.2,1,0.4), each=2), rep(0,94)),
method="BH",q=0.05, mc.cores = 4)
result_post_cor_sig_by = mclapply(1:1000, post_cor_sim,
mu=c(rep(seq(0.2,1,0.4), each=2), rep(0,94)),
method="BY",q=0.05, mc.cores = 4)
bh_post_cor_sig = c(mean(unlist(lapply(result_post_cor_sig_bh,
compute_fdr, sig=1:6))),
mean(unlist(lapply(result_post_cor_sig_bh,
compute_power, sig=1:6))))
by_post_cor_sig = c(mean(unlist(lapply(result_post_cor_sig_by,
compute_fdr, sig=1:6))),
mean(unlist(lapply(result_post_cor_sig_by,
compute_power, sig=1:6))))
result_post_cor_sig = rbind(bh_post_cor_sig, by_post_cor_sig)
## Negative Correlation
Sigma = matrix(rep(0.8,50*50),50)
neg_cor_sim = function(idx, mu, method="BH", q=0.05){
# simulation of negative correlation case
# Inputs:
# mu - mean of simulation
# method - method for p-value correction
# q - significant level
# Output: rejection set
d = mvrnorm(n=200, mu=mu, Sigma=Sigma)
d = cbind(d,-d)
p = 2*(1-pnorm(abs(apply(d, 2, mean)), mean=0, sd=1/sqrt(200)))
rej = which(p.adjust(p,method)<q)
return(rej)
}
result_neg_cor_null_bh = mclapply(1:1000, neg_cor_sim, mu=rep(0,50),
method="BH", q=0.05, mc.cores = 4)
result_neg_cor_null_by = mclapply(1:1000, neg_cor_sim, mu=rep(0,50),
method="BY", q=0.05, mc.cores = 4)
bh_neg_cor_null = mean(unlist(lapply(result_neg_cor_null_bh,
compute_fdr, sig=0)))
by_neg_cor_null = mean(unlist(lapply(result_neg_cor_null_by,
compute_fdr, sig=0)))
result_neg_cor_null = rbind(bh_neg_cor_null, by_neg_cor_null)
result_neg_cor_sig_bh = mclapply(1:1000, neg_cor_sim,
mu=c(rep(seq(0.2,1,0.4),each=2), rep(0,44)),
method="BH",q=0.05, mc.cores = 4)
result_neg_cor_sig_by = mclapply(1:1000, neg_cor_sim,
mu=c(rep(seq(0.2,1,0.4),each=2), rep(0,44)),
method="BY",q=0.05, mc.cores = 4)
bh_neg_cor_sig = c(mean(unlist(lapply(result_neg_cor_sig_bh,
compute_fdr, sig=c(1:6,51:56)))),
mean(unlist(lapply(result_neg_cor_sig_bh,
compute_power, sig=c(1:6,51:56)))))
by_neg_cor_sig = c(mean(unlist(lapply(result_neg_cor_sig_by,
compute_fdr, sig=c(1:6,51:56)))),
mean(unlist(lapply(result_neg_cor_sig_by,
compute_power, sig=c(1:6,51:56)))))
result_neg_cor_sig = rbind(bh_neg_cor_sig, by_neg_cor_sig)
result_null = rbind(result_ind_null, result_post_cor_null, result_neg_cor_null)
result_sig = rbind(result_ind_sig, result_post_cor_sig, result_neg_cor_sig)
| /final_proj_code.R | no_license | ZhihaoXu/Stats506_finalproject | R | false | false | 6,807 | r | ## Final Project - R code
##
## This file stores the R code for monte carlo study in Final Project
## Author(s): Zhihao Xu, xuzhihao@umich.edu
## Updated: December 4, 2020
# 79: -------------------------------------------------------------------------
# libraries: ------------------------------------------------------------------
library(parallel)
library(MASS)
# monte carlo study
compute_fdr = function(rej, sig){
# compute the fdr of rejection set
# Inputs:
# rej - rejection set
# sig - index of signal
# Output: estimation of fdr
if (length(rej)==0){
fdr = 0
} else{
fdr = sum(rej %in% sig == FALSE)/length(rej)
}
return(fdr)
}
compute_power = function(rej, sig){
# compute the power of rejection set
# Inputs:
# rej - rejection set
# sig - index of signal
# Output: estimation of power
if (length(rej)==0){
power = 0
} else{
power = sum(rej %in% sig)/length(sig)
}
return(power)
}
## Independent
ind_sim = function(idx, mu, method="BH", q=0.05){
# simulation of independent case
# Inputs:
# mu - mean of simulation
# method - method for p-value correction
# q - significant level
# Output: rejection set
d = matrix(rnorm(200*100, mean=mu,sd=1), nrow=100)
p = 2*(1-pnorm(abs(apply(d, 1, mean)), mean=0, sd=1/sqrt(200)))
rej = which(p.adjust(p, method)<q)
return(rej)
}
result_ind_null_bh = mclapply(1:1000, ind_sim,mu=0, method="BH",
q=0.05, mc.cores = 4)
result_ind_null_by = mclapply(1:1000, ind_sim,mu=0, method="BY",
q=0.05, mc.cores = 4)
bh_ind_null = mean(unlist(lapply(result_ind_null_bh, compute_fdr, sig=0)))
by_ind_null = mean(unlist(lapply(result_ind_null_by, compute_fdr, sig=0)))
result_ind_null = rbind(bh_ind_null, by_ind_null)
result_ind_sig_bh = mclapply(1:1000, ind_sim,
mu=c(rep(seq(0.2,1,0.4), each=2), rep(0,94)),
method="BH", q=0.05, mc.cores = 4)
result_ind_sig_by = mclapply(1:1000, ind_sim,
mu=c(rep(seq(0.2,1,0.4), each=2), rep(0,94)),
method="BY", q=0.05, mc.cores = 4)
bh_ind_sig = c(mean(unlist(lapply(result_ind_sig_bh, compute_fdr, sig=1:6))),
mean(unlist(lapply(result_ind_sig_bh, compute_power, sig=1:6))))
by_ind_sig = c(mean(unlist(lapply(result_ind_sig_by, compute_fdr, sig=1:6))),
mean(unlist(lapply(result_ind_sig_by, compute_power, sig=1:6))))
result_ind_sig = rbind(bh_ind_sig, by_ind_sig)
## Positive Correlation
Sigma = matrix(rep(0.8,100*100),100)
post_cor_sim = function(idx, mu, method="BH", q=0.05){
# simulation of positive correlation case
# Inputs:
# mu - mean of simulation
# method - method for p-value correction
# q - significant level
# Output: rejection set
d = mvrnorm(n=200, mu=mu, Sigma=Sigma)
p = 2*(1-pnorm(abs(apply(d, 2, mean)), mean=0, sd=1/sqrt(200)))
rej = which(p.adjust(p, method)<q)
return(rej)
}
result_post_cor_null_bh = mclapply(1:1000, post_cor_sim,mu=rep(0,100),
method="BH", q=0.05, mc.cores = 4)
result_post_cor_null_by = mclapply(1:1000, post_cor_sim,mu=rep(0,100),
method="BY", q=0.05, mc.cores = 4)
bh_post_cor_null = mean(unlist(lapply(result_post_cor_null_bh,
compute_fdr, sig=0)))
by_post_cor_null = mean(unlist(lapply(result_post_cor_null_by,
compute_fdr, sig=0)))
result_post_cor_null = rbind(bh_post_cor_null, by_post_cor_null)
result_post_cor_sig_bh = mclapply(1:1000, post_cor_sim,
mu=c(rep(seq(0.2,1,0.4), each=2), rep(0,94)),
method="BH",q=0.05, mc.cores = 4)
result_post_cor_sig_by = mclapply(1:1000, post_cor_sim,
mu=c(rep(seq(0.2,1,0.4), each=2), rep(0,94)),
method="BY",q=0.05, mc.cores = 4)
bh_post_cor_sig = c(mean(unlist(lapply(result_post_cor_sig_bh,
compute_fdr, sig=1:6))),
mean(unlist(lapply(result_post_cor_sig_bh,
compute_power, sig=1:6))))
by_post_cor_sig = c(mean(unlist(lapply(result_post_cor_sig_by,
compute_fdr, sig=1:6))),
mean(unlist(lapply(result_post_cor_sig_by,
compute_power, sig=1:6))))
result_post_cor_sig = rbind(bh_post_cor_sig, by_post_cor_sig)
## Negative Correlation
Sigma = matrix(rep(0.8,50*50),50)
neg_cor_sim = function(idx, mu, method="BH", q=0.05){
# simulation of negative correlation case
# Inputs:
# mu - mean of simulation
# method - method for p-value correction
# q - significant level
# Output: rejection set
d = mvrnorm(n=200, mu=mu, Sigma=Sigma)
d = cbind(d,-d)
p = 2*(1-pnorm(abs(apply(d, 2, mean)), mean=0, sd=1/sqrt(200)))
rej = which(p.adjust(p,method)<q)
return(rej)
}
result_neg_cor_null_bh = mclapply(1:1000, neg_cor_sim, mu=rep(0,50),
method="BH", q=0.05, mc.cores = 4)
result_neg_cor_null_by = mclapply(1:1000, neg_cor_sim, mu=rep(0,50),
method="BY", q=0.05, mc.cores = 4)
bh_neg_cor_null = mean(unlist(lapply(result_neg_cor_null_bh,
compute_fdr, sig=0)))
by_neg_cor_null = mean(unlist(lapply(result_neg_cor_null_by,
compute_fdr, sig=0)))
result_neg_cor_null = rbind(bh_neg_cor_null, by_neg_cor_null)
result_neg_cor_sig_bh = mclapply(1:1000, neg_cor_sim,
mu=c(rep(seq(0.2,1,0.4),each=2), rep(0,44)),
method="BH",q=0.05, mc.cores = 4)
result_neg_cor_sig_by = mclapply(1:1000, neg_cor_sim,
mu=c(rep(seq(0.2,1,0.4),each=2), rep(0,44)),
method="BY",q=0.05, mc.cores = 4)
bh_neg_cor_sig = c(mean(unlist(lapply(result_neg_cor_sig_bh,
compute_fdr, sig=c(1:6,51:56)))),
mean(unlist(lapply(result_neg_cor_sig_bh,
compute_power, sig=c(1:6,51:56)))))
by_neg_cor_sig = c(mean(unlist(lapply(result_neg_cor_sig_by,
compute_fdr, sig=c(1:6,51:56)))),
mean(unlist(lapply(result_neg_cor_sig_by,
compute_power, sig=c(1:6,51:56)))))
result_neg_cor_sig = rbind(bh_neg_cor_sig, by_neg_cor_sig)
result_null = rbind(result_ind_null, result_post_cor_null, result_neg_cor_null)
result_sig = rbind(result_ind_sig, result_post_cor_sig, result_neg_cor_sig)
|
#' Cross-validation to find the optimum number of features
#' (variables) in LDA
#'
#' This function provids an illustration of the process of finding
#' out the optimum number of variables using k-fold cross-validation
#' in a linear discriminant analysis (LDA).
#'
#' For a classification problem, usually we wish to use as less
#' variables as possible because of difficulties brought by the high
#' dimension.
#'
#' The selection procedure is like this:
#'
#' \itemize{
#' \item Split the whole data randomly into \eqn{k} folds:
#' \itemize{
#' \item For the number of features \eqn{g = 1, 2, \cdots, g_{max}}{g = 1, 2,
#' ..., gmax}, choose \eqn{g} features that have the largest discriminatory
#' power (measured by the F-statistic in ANOVA):
#' \itemize{
#' \item For the fold \eqn{i} (\eqn{i = 1, 2, \cdots, k}{i = 1, 2, ..., k}):
#' \itemize{
#' \item
#' Train a LDA model without the \eqn{i}-th fold data, and predict with the
#' \eqn{i}-th fold for a proportion of correct predictions
#' \eqn{p_{gi}}{p[gi]};
#' }
#' }
#' \item Average the \eqn{k} proportions to get the correct rate \eqn{p_g}{p[g]};
#' }
#' \item Determine the optimum number of features with the largest \eqn{p}.
#' }
#'
#' Note that \eqn{g_{max}} is set by \code{ani.options("nmax")}
#' (i.e. the maximum number of features we want to choose).
#'
#' @param data a data matrix containg the predictors in columns
#' @param cl a factor indicating the classification of the rows of
#' \code{data}
#' @param k the number of folds
#' @param cex.rg the range of the magnification to be used to the
#' points in the plot
#' @param col.av the two colors used to respectively denote rates of
#' correct predictions in the i-th fold and the average rates for all
#' k folds
#' @param ... arguments passed to \code{\link[graphics]{points}} to
#' draw the points which denote the correct rate
#' @return A list containing \item{accuracy }{a matrix in which the
#' element in the i-th row and j-th column is the rate of correct
#' predictions based on LDA, i.e. build a LDA model with j variables
#' and predict with data in the i-th fold (the test set) }
#' \item{optimum }{the optimum number of features based on the
#' cross-validation}
#' @author Yihui Xie <\url{http://yihui.name}>
#' @seealso \code{\link{kfcv}}, \code{\link{cv.ani}}, \code{\link[MASS]{lda}}
#' @references Maindonald J, Braun J (2007). \emph{Data Analysis and
#' Graphics Using R - An Example-Based Approach}. Cambridge
#' University Press, 2nd edition. pp. 400
#'
#' \url{http://animation.yihui.name/da:biostat:select_features_via_cv}
#' @export
#' @example inst/examples/cv.nfeaturesLDA-ex.R
cv.nfeaturesLDA = function(data = matrix(rnorm(600),
60), cl = gl(3, 20), k = 5, cex.rg = c(0.5, 3), col.av = c("blue",
"red"), ...) {
nmax = min(ncol(data), ani.options("nmax"))
cl = as.factor(cl)
dat = data.frame(data, cl)
N = nrow(dat)
n = sample(N)
dat = dat[n, ]
kf = cumsum(c(1, kfcv(k, N)))
aovF = function(x, cl) {
qr.obj <- qr(model.matrix(~cl))
qty.obj <- qr.qty(qr.obj, x)
tab <- table(factor(cl))
dfb <- length(tab) - 1
dfw <- sum(tab) - dfb - 1
ms.between <- apply(qty.obj[2:(dfb + 1), , drop = FALSE]^2,
2, sum)/dfb
ms.within <- apply(qty.obj[-(1:(dfb + 1)), , drop = FALSE]^2,
2, sum)/dfw
Fstat <- ms.between/ms.within
}
acc = matrix(nrow = k, ncol = nmax)
loc = cbind(rep(1:nmax, each = k), rep(1:k, nmax))
op = par(mfrow = c(1, 2))
for (j in 1:nmax) {
for (i in 2:(k + 1)) {
dev.hold()
idx = kf[i - 1]:(kf[i] - 1)
trdat = dat[-idx, ]
slct = order(aovF(as.matrix(trdat[, -ncol(trdat)]),
trdat[, ncol(trdat)]), decreasing = TRUE) <=
j
fit = MASS::lda(as.formula(paste(colnames(dat)[ncol(dat)],
"~", paste(colnames(dat)[-ncol(dat)][slct], collapse = "+"))),
data = dat)
pred = MASS:::predict.lda(fit, dat[idx, ], dimen = 2)
acc[i - 1, j] = mean(dat[idx, ncol(dat)] == pred$class)
plot(1, xlim = c(1, nmax), ylim = c(0, k), type = "n",
xlab = "Number of Features", ylab = "Fold", yaxt = "n",
panel.first = grid())
axis(2, 1:k)
axis(2, 0, expression(bar(p)))
if ((j - 1) * k + i - 1 < nmax * k)
text(matrix(loc[-(1:((j - 1) * k + i - 1)), ],
ncol = 2), "?")
points(matrix(loc[1:((j - 1) * k + i - 1), ], ncol = 2),
cex = c(acc)^2 * diff(cex.rg) + min(cex.rg), col = col.av[1], ...)
points(1:nmax, rep(0, nmax), cex = apply(acc, 2,
mean, na.rm = TRUE) * diff(cex.rg) + min(cex.rg),
col = col.av[2], ...)
styl.pch = as.integer(dat[idx, ncol(dat)])
styl.col = 2 - as.integer(dat[idx, ncol(dat)] ==
pred$class)
plot(pred$x, pch = styl.pch, col = styl.col)
legend("topright", legend = c("correct", "wrong"),
fill = 1:2, bty = "n", cex = 0.8)
legend("bottomleft", legend = levels(dat[idx, ncol(dat)])[unique(styl.pch)],
pch = unique(styl.pch), bty = "n", cex = 0.8)
ani.pause()
}
}
par(op)
rownames(acc) = paste("Fold", 1:k, sep = "")
colnames(acc) = 1:nmax
nf = which.max(apply(acc, 2, mean))
names(nf) = NULL
invisible(list(accuracy = acc, optimum = nf))
}
| /R/cv.nfeaturesLDA.R | no_license | snowdj/animation | R | false | false | 5,632 | r | #' Cross-validation to find the optimum number of features
#' (variables) in LDA
#'
#' This function provids an illustration of the process of finding
#' out the optimum number of variables using k-fold cross-validation
#' in a linear discriminant analysis (LDA).
#'
#' For a classification problem, usually we wish to use as less
#' variables as possible because of difficulties brought by the high
#' dimension.
#'
#' The selection procedure is like this:
#'
#' \itemize{
#' \item Split the whole data randomly into \eqn{k} folds:
#' \itemize{
#' \item For the number of features \eqn{g = 1, 2, \cdots, g_{max}}{g = 1, 2,
#' ..., gmax}, choose \eqn{g} features that have the largest discriminatory
#' power (measured by the F-statistic in ANOVA):
#' \itemize{
#' \item For the fold \eqn{i} (\eqn{i = 1, 2, \cdots, k}{i = 1, 2, ..., k}):
#' \itemize{
#' \item
#' Train a LDA model without the \eqn{i}-th fold data, and predict with the
#' \eqn{i}-th fold for a proportion of correct predictions
#' \eqn{p_{gi}}{p[gi]};
#' }
#' }
#' \item Average the \eqn{k} proportions to get the correct rate \eqn{p_g}{p[g]};
#' }
#' \item Determine the optimum number of features with the largest \eqn{p}.
#' }
#'
#' Note that \eqn{g_{max}} is set by \code{ani.options("nmax")}
#' (i.e. the maximum number of features we want to choose).
#'
#' @param data a data matrix containg the predictors in columns
#' @param cl a factor indicating the classification of the rows of
#' \code{data}
#' @param k the number of folds
#' @param cex.rg the range of the magnification to be used to the
#' points in the plot
#' @param col.av the two colors used to respectively denote rates of
#' correct predictions in the i-th fold and the average rates for all
#' k folds
#' @param ... arguments passed to \code{\link[graphics]{points}} to
#' draw the points which denote the correct rate
#' @return A list containing \item{accuracy }{a matrix in which the
#' element in the i-th row and j-th column is the rate of correct
#' predictions based on LDA, i.e. build a LDA model with j variables
#' and predict with data in the i-th fold (the test set) }
#' \item{optimum }{the optimum number of features based on the
#' cross-validation}
#' @author Yihui Xie <\url{http://yihui.name}>
#' @seealso \code{\link{kfcv}}, \code{\link{cv.ani}}, \code{\link[MASS]{lda}}
#' @references Maindonald J, Braun J (2007). \emph{Data Analysis and
#' Graphics Using R - An Example-Based Approach}. Cambridge
#' University Press, 2nd edition. pp. 400
#'
#' \url{http://animation.yihui.name/da:biostat:select_features_via_cv}
#' @export
#' @example inst/examples/cv.nfeaturesLDA-ex.R
cv.nfeaturesLDA = function(data = matrix(rnorm(600),
60), cl = gl(3, 20), k = 5, cex.rg = c(0.5, 3), col.av = c("blue",
"red"), ...) {
nmax = min(ncol(data), ani.options("nmax"))
cl = as.factor(cl)
dat = data.frame(data, cl)
N = nrow(dat)
n = sample(N)
dat = dat[n, ]
kf = cumsum(c(1, kfcv(k, N)))
aovF = function(x, cl) {
qr.obj <- qr(model.matrix(~cl))
qty.obj <- qr.qty(qr.obj, x)
tab <- table(factor(cl))
dfb <- length(tab) - 1
dfw <- sum(tab) - dfb - 1
ms.between <- apply(qty.obj[2:(dfb + 1), , drop = FALSE]^2,
2, sum)/dfb
ms.within <- apply(qty.obj[-(1:(dfb + 1)), , drop = FALSE]^2,
2, sum)/dfw
Fstat <- ms.between/ms.within
}
acc = matrix(nrow = k, ncol = nmax)
loc = cbind(rep(1:nmax, each = k), rep(1:k, nmax))
op = par(mfrow = c(1, 2))
for (j in 1:nmax) {
for (i in 2:(k + 1)) {
dev.hold()
idx = kf[i - 1]:(kf[i] - 1)
trdat = dat[-idx, ]
slct = order(aovF(as.matrix(trdat[, -ncol(trdat)]),
trdat[, ncol(trdat)]), decreasing = TRUE) <=
j
fit = MASS::lda(as.formula(paste(colnames(dat)[ncol(dat)],
"~", paste(colnames(dat)[-ncol(dat)][slct], collapse = "+"))),
data = dat)
pred = MASS:::predict.lda(fit, dat[idx, ], dimen = 2)
acc[i - 1, j] = mean(dat[idx, ncol(dat)] == pred$class)
plot(1, xlim = c(1, nmax), ylim = c(0, k), type = "n",
xlab = "Number of Features", ylab = "Fold", yaxt = "n",
panel.first = grid())
axis(2, 1:k)
axis(2, 0, expression(bar(p)))
if ((j - 1) * k + i - 1 < nmax * k)
text(matrix(loc[-(1:((j - 1) * k + i - 1)), ],
ncol = 2), "?")
points(matrix(loc[1:((j - 1) * k + i - 1), ], ncol = 2),
cex = c(acc)^2 * diff(cex.rg) + min(cex.rg), col = col.av[1], ...)
points(1:nmax, rep(0, nmax), cex = apply(acc, 2,
mean, na.rm = TRUE) * diff(cex.rg) + min(cex.rg),
col = col.av[2], ...)
styl.pch = as.integer(dat[idx, ncol(dat)])
styl.col = 2 - as.integer(dat[idx, ncol(dat)] ==
pred$class)
plot(pred$x, pch = styl.pch, col = styl.col)
legend("topright", legend = c("correct", "wrong"),
fill = 1:2, bty = "n", cex = 0.8)
legend("bottomleft", legend = levels(dat[idx, ncol(dat)])[unique(styl.pch)],
pch = unique(styl.pch), bty = "n", cex = 0.8)
ani.pause()
}
}
par(op)
rownames(acc) = paste("Fold", 1:k, sep = "")
colnames(acc) = 1:nmax
nf = which.max(apply(acc, 2, mean))
names(nf) = NULL
invisible(list(accuracy = acc, optimum = nf))
}
|
##Middle
FM_model%>%
filter(value_kWh<1e+11)%>%
ggplot(aes(x = value_kWh , y = EPI_new)) +
labs(title = "EPI Vs Electricity consumption - middle income - 2016", x = "kWh", y = "EPI score") +
geom_point(aes(colour = region.x)) + geom_smooth(method = "lm", se = FALSE, colour = "red")
## statistics
cor.test(FM_model$value_kWh, FM_model$EPI_new)
##Low
FL_model%>%
ggplot(aes(x = value_kWh , y = EPI_new)) +
labs(title = "EPI Vs Electricity consumption - Low income - 2016", x = "kWh", y = "EPI score") +
geom_point(aes(colour = region.x)) + geom_smooth(method = "lm", se = FALSE, colour = "red")
## statistics
shapiro.test(FL_model$EPI_new)
shapiro.test(FL_model$value_kWh)
cor.test(FL_model$value_kWh, FL_model$EPI_new, method = "spearman")
##High
FH_model%>%
filter(value_kWh<1e+11)%>%
ggplot(aes(x = value_kWh , y = EPI_new)) +
labs(title = "EPI Vs Electricity consumption - High income - 2016", x = "kWh", y = "EPI score") +
geom_point(aes(colour = region.x)) + geom_smooth(method = "lm", se = FALSE, colour = "red")
## statistics
cor.test(FH_model$value_kWh, FH_model$EPI_new)
## World
FW_model%>%
filter(value_kWh<1e+11)%>%
ggplot(aes(x = value_kWh , y = EPI_new)) +
labs(title = "EPI Vs Electricity consumption - World - 2016", x = "kWh", y = "EPI score") +
geom_point(aes(colour = region.x)) + geom_smooth(method = "lm", se = FALSE, colour = "red")
## statistics
cor.test(FW_model$value_kWh, FW_model$EPI_new)
| /energy_con.r | no_license | asafvanunu/project_data_science | R | false | false | 1,457 | r |
##Middle
FM_model%>%
filter(value_kWh<1e+11)%>%
ggplot(aes(x = value_kWh , y = EPI_new)) +
labs(title = "EPI Vs Electricity consumption - middle income - 2016", x = "kWh", y = "EPI score") +
geom_point(aes(colour = region.x)) + geom_smooth(method = "lm", se = FALSE, colour = "red")
## statistics
cor.test(FM_model$value_kWh, FM_model$EPI_new)
##Low
FL_model%>%
ggplot(aes(x = value_kWh , y = EPI_new)) +
labs(title = "EPI Vs Electricity consumption - Low income - 2016", x = "kWh", y = "EPI score") +
geom_point(aes(colour = region.x)) + geom_smooth(method = "lm", se = FALSE, colour = "red")
## statistics
shapiro.test(FL_model$EPI_new)
shapiro.test(FL_model$value_kWh)
cor.test(FL_model$value_kWh, FL_model$EPI_new, method = "spearman")
##High
FH_model%>%
filter(value_kWh<1e+11)%>%
ggplot(aes(x = value_kWh , y = EPI_new)) +
labs(title = "EPI Vs Electricity consumption - High income - 2016", x = "kWh", y = "EPI score") +
geom_point(aes(colour = region.x)) + geom_smooth(method = "lm", se = FALSE, colour = "red")
## statistics
cor.test(FH_model$value_kWh, FH_model$EPI_new)
## World
FW_model%>%
filter(value_kWh<1e+11)%>%
ggplot(aes(x = value_kWh , y = EPI_new)) +
labs(title = "EPI Vs Electricity consumption - World - 2016", x = "kWh", y = "EPI score") +
geom_point(aes(colour = region.x)) + geom_smooth(method = "lm", se = FALSE, colour = "red")
## statistics
cor.test(FW_model$value_kWh, FW_model$EPI_new)
|
library(lpSolve)
z <- scan()
20 15 16 5 4 7
17 15 33 12 8 6
9 12 18 16 30 13
12 8 11 27 19 14
-99 7 10 21 10 32
-99 -99 -99 6 11 13
cost<-matrix(z, nr = 6, byrow = TRUE)
assign.sol<-lp.assign(cost.mat = cost, direction = "max")
assign.sol
assign.sol$solution
| /chap03/exam0318.R | no_license | KSDeng/Mathematical-Modeling-with-R | R | false | false | 346 | r | library(lpSolve)
z <- scan()
20 15 16 5 4 7
17 15 33 12 8 6
9 12 18 16 30 13
12 8 11 27 19 14
-99 7 10 21 10 32
-99 -99 -99 6 11 13
cost<-matrix(z, nr = 6, byrow = TRUE)
assign.sol<-lp.assign(cost.mat = cost, direction = "max")
assign.sol
assign.sol$solution
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prep_bardtags.R
\name{prep_bardtags}
\alias{prep_bardtags}
\title{format TagIDs for BARD query}
\usage{
prep_bardtags(
tagids,
df = alltags,
tagVar = "TagID",
codeVar = "CodeSpace",
freq = "A69"
)
}
\arguments{
\item{tagids}{a vector of numeric TagIDs}
\item{df}{a data frame that contains (at minimum) columns for TagID and Codespace}
\item{tagVar}{The name of the TagID column in df}
\item{codeVar}{The name of the CodeSpace column in df}
\item{freq}{Frequency of tagids; defaults to "A69"}
}
\value{
A vector of tagids formatted for a query of the BARD database
}
\description{
format TagIDs for BARD query
}
\examples{
bard_tags(tagids = c(2841, 2842))
}
| /man/prep_bardtags.Rd | no_license | Myfanwy/tagtales | R | false | true | 752 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prep_bardtags.R
\name{prep_bardtags}
\alias{prep_bardtags}
\title{format TagIDs for BARD query}
\usage{
prep_bardtags(
tagids,
df = alltags,
tagVar = "TagID",
codeVar = "CodeSpace",
freq = "A69"
)
}
\arguments{
\item{tagids}{a vector of numeric TagIDs}
\item{df}{a data frame that contains (at minimum) columns for TagID and Codespace}
\item{tagVar}{The name of the TagID column in df}
\item{codeVar}{The name of the CodeSpace column in df}
\item{freq}{Frequency of tagids; defaults to "A69"}
}
\value{
A vector of tagids formatted for a query of the BARD database
}
\description{
format TagIDs for BARD query
}
\examples{
bard_tags(tagids = c(2841, 2842))
}
|
# CHECKS ----
check_classes <- function(data, col, accept_classes = c("model_fit", "workflow")) {
.col <- rlang::enquo(col)
# Class Check
ret_1 <- data %>%
dplyr::mutate(last_class = purrr::map_chr(!! .col, .f = function(obj) {
class(obj)[length(class(obj))]
})) %>%
dplyr::mutate(first_class = purrr::map_chr(!! .col, .f = function(obj) {
class(obj)[1]
})) %>%
dplyr::mutate(fail_check = purrr::map_lgl(!! .col, .f = function(obj) {
!inherits(obj, accept_classes)
}))
return(ret_1)
}
check_ncols <- function(data, col, accept_ncol = 3) {
.col <- rlang::enquo(col)
# Class Number of Columns
ret_1 <- data %>%
dplyr::mutate(ncol = purrr::map_dbl(!! .col, .f = function(obj) {
ncol(obj)
})) %>%
dplyr::mutate(fail_check = ifelse(ncol != accept_ncol, TRUE, FALSE))
return(ret_1)
}
check_models_are_trained <- function(data) {
# Class Check
ret_1 <- data %>%
dplyr::mutate(fail_check = purrr::map_lgl(.model, .f = function(obj) {
!is_trained(obj)
}))
return(ret_1)
}
check_models_are_not_null <- function(data) {
# Class Check
ret_1 <- data %>%
dplyr::mutate(fail_check = purrr::map_lgl(.model, .f = is.null))
return(ret_1)
}
check_non_bad_class_data <- function(data, bad_classes = c("character")) {
# Bad Class Check
ret_1 <- data %>%
purrr::map_dfr(~ inherits(., bad_classes)) %>%
tidyr::gather(key = "key", value = "bad_class", dplyr::everything()) %>%
dplyr::mutate(fail_check = ifelse(bad_class == 1, TRUE, FALSE))
# Class Description
ret_2 <- data %>%
purrr::map_dfr(~ class(.) %>% stringr::str_c(collapse = ", ")) %>%
tidyr::gather(key = "key", value = "class_desc", dplyr::everything())
return(dplyr::left_join(ret_1, ret_2, by = "key"))
}
check_non_unique_contrasts <- function(data) {
ret <- data %>%
purrr::map_dfr(~ length(unique(.))) %>%
tidyr::gather(key = "key", value = "unique_count", dplyr::everything()) %>%
dplyr::mutate(fail_check = ifelse(unique_count == 1, TRUE, FALSE))
ret
}
check_unused_factor_levels <- function(data) {
ret_factor_count <- data %>%
purrr::map_dfr(.f = function(x) {
if (is.factor(x)) {
length(levels(x))
} else {
0
}
}) %>%
tidyr::gather(key = "key", value = "factor_count", dplyr::everything())
ret_unique_count <- check_non_unique_contrasts(data) %>%
dplyr::select(-fail_check)
ret <- dplyr::left_join(ret_factor_count, ret_unique_count, by = "key") %>%
dplyr::mutate(fail_check = ifelse(factor_count > unique_count, TRUE, FALSE))
ret
}
# VALIDATIONS ----
validate_model_classes <- function(data, accept_classes = c("model_fit", "workflow")) {
result_tbl <- check_classes(data, .model, accept_classes) %>%
dplyr::filter(fail_check)
if (nrow(result_tbl) > 0) {
bad_models <- result_tbl$.model_id
bad_values <- glue::single_quote(result_tbl$first_class)
bad_msg <- glue::glue("- Model {bad_models}: Is class {bad_values}")
bad_msg <- glue::glue_collapse(bad_msg, sep = "\n")
rlang::abort(glue::glue(
"All objects must be fitted workflow or parsnip models inheriting class 'workflow' or 'model_fit'. The following are not:",
"\n",
"{bad_msg}")
)
}
}
validate_modeltime_table_classes <- function(data, accept_classes = c("mdl_time_tbl")) {
result_tbl <- check_classes(data, .model_table, accept_classes) %>%
dplyr::filter(fail_check)
if (nrow(result_tbl) > 0) {
bad_tables <- result_tbl$.id
bad_values <- glue::single_quote(result_tbl$first_class)
bad_msg <- glue::glue("- Model Table {bad_tables}: Is class {bad_values}")
bad_msg <- glue::glue_collapse(bad_msg, sep = "\n")
rlang::abort(glue::glue(
"All objects must be Modeltime Tables inheriting class 'mdl_time_tbl'. The following are not:",
"\n",
"{bad_msg}")
)
}
}
validate_ncols <- function(data, accept_ncol = 3) {
result_tbl <- check_ncols(data, .model_table, accept_ncol) %>%
dplyr::filter(fail_check)
if (nrow(result_tbl) > 0) {
bad_tables <- result_tbl$.id
bad_values <- glue::single_quote(result_tbl$ncol)
bad_msg <- glue::glue("- Model Table {bad_tables}: Has {bad_values} and should have {accept_ncol}")
bad_msg <- glue::glue_collapse(bad_msg, sep = "\n")
rlang::abort(glue::glue(
"All objects must be Modeltime Tables with 3 columns. The following are not:",
"\n",
"{bad_msg}",
"\n",
"This problem may have occurred if some tables are calibrated.")
)
}
}
validate_models_are_trained <- function(data) {
result_tbl <- check_models_are_trained(data) %>%
dplyr::filter(fail_check)
if (nrow(result_tbl) > 0) {
bad_models <- result_tbl$.model_id
bad_msg <- glue::glue("- Model {bad_models}: Is not trained. Try using `fit()` to train the model.")
bad_msg <- glue::glue_collapse(bad_msg, sep = "\n")
glubort(
"All objects must be fitted workflow or parsnip models. The following are not:",
"\n",
"{bad_msg}"
)
}
}
validate_models_are_not_null <- function(data) {
result_tbl <- check_models_are_not_null(data) %>%
dplyr::filter(fail_check)
if (nrow(result_tbl) > 0) {
bad_models <- result_tbl$.model_id
bad_msg <- glue::glue("- Model {bad_models}: Is NULL.")
bad_msg <- glue::glue_collapse(bad_msg, sep = "\n")
message("\nModel Failure Report: ")
print(data)
glubort(
"The following models had NULL errors <NULL>:",
"\n",
"{bad_msg}",
"\n",
" Potential Solution: Make sure required modeling packages are loaded.\n"
)
}
}
validate_non_bad_class_data <- function(data, bad_classes = c("character")) {
result_tbl <- check_non_bad_class_data(data, bad_classes) %>%
dplyr::filter(fail_check)
if (nrow(result_tbl) > 0) {
bad_cols <- glue::single_quote(result_tbl$key)
bad_values <- glue::single_quote(result_tbl$class_desc)
bad_msg <- glue::glue("{bad_cols}: Is class {bad_values}")
bad_msg <- glue::glue_collapse(bad_msg, sep = "\n")
rlang::abort(glue::glue(
"All variables must be categorical (factor) or date-like, but the following are not:",
"\n",
"{bad_msg}")
)
}
}
validate_non_unique_contrasts <- function(data) {
result_tbl <- check_non_unique_contrasts(data) %>%
dplyr::filter(fail_check)
if (nrow(result_tbl) > 0) {
bad_cols <- glue::single_quote(result_tbl$key)
bad_values <- purrr::map(result_tbl$unique_count, glue_quote_collapse)
bad_msg <- glue::glue("{bad_cols}: {bad_values} unique value")
bad_msg <- glue::glue_collapse(bad_msg, sep = "\n")
rlang::abort(glue::glue(
"All variables must have more than one unique value, but the following do not:",
"\n",
"{bad_msg}")
)
}
}
validate_unused_factor_levels <- function(data) {
result_tbl <- check_unused_factor_levels(data) %>%
dplyr::filter(fail_check)
if (nrow(result_tbl) > 0) {
bad_cols <- glue::single_quote(result_tbl$key)
bad_values_1 <- purrr::map(result_tbl$factor_count, glue_quote_collapse)
bad_values_2 <- purrr::map(result_tbl$unique_count, glue_quote_collapse)
bad_msg <- glue::glue("{bad_cols}: levels {bad_values_1} > levels used {bad_values_2}")
bad_msg <- glue::glue_collapse(bad_msg, sep = "\n")
rlang::abort(glue::glue(
"All factor variables must use all levels, but the following do not:",
"\n",
"{bad_msg}")
)
}
}
# HELPERS ----
is_trained <- function(x) {
trained <- FALSE
if (inherits(x, "model_fit")) {
trained <- TRUE
}
if (inherits(x, "workflow")) {
trained <- x$trained
}
if (inherits(x, "mdl_time_ensemble")) {
trained <- TRUE
}
return(trained)
}
glue_quote_collapse <- function(x) {
glue::glue_collapse(glue::single_quote(x), sep = ", ")
}
glubort <- function(..., .sep = "", .envir = parent.frame()) {
rlang::abort(glue::glue(..., .sep = .sep, .envir = .envir))
}
| /R/utils-checks-validations.R | permissive | topepo/modeltime | R | false | false | 8,778 | r |
# CHECKS ----
check_classes <- function(data, col, accept_classes = c("model_fit", "workflow")) {
.col <- rlang::enquo(col)
# Class Check
ret_1 <- data %>%
dplyr::mutate(last_class = purrr::map_chr(!! .col, .f = function(obj) {
class(obj)[length(class(obj))]
})) %>%
dplyr::mutate(first_class = purrr::map_chr(!! .col, .f = function(obj) {
class(obj)[1]
})) %>%
dplyr::mutate(fail_check = purrr::map_lgl(!! .col, .f = function(obj) {
!inherits(obj, accept_classes)
}))
return(ret_1)
}
check_ncols <- function(data, col, accept_ncol = 3) {
.col <- rlang::enquo(col)
# Class Number of Columns
ret_1 <- data %>%
dplyr::mutate(ncol = purrr::map_dbl(!! .col, .f = function(obj) {
ncol(obj)
})) %>%
dplyr::mutate(fail_check = ifelse(ncol != accept_ncol, TRUE, FALSE))
return(ret_1)
}
check_models_are_trained <- function(data) {
# Class Check
ret_1 <- data %>%
dplyr::mutate(fail_check = purrr::map_lgl(.model, .f = function(obj) {
!is_trained(obj)
}))
return(ret_1)
}
check_models_are_not_null <- function(data) {
# Class Check
ret_1 <- data %>%
dplyr::mutate(fail_check = purrr::map_lgl(.model, .f = is.null))
return(ret_1)
}
check_non_bad_class_data <- function(data, bad_classes = c("character")) {
# Bad Class Check
ret_1 <- data %>%
purrr::map_dfr(~ inherits(., bad_classes)) %>%
tidyr::gather(key = "key", value = "bad_class", dplyr::everything()) %>%
dplyr::mutate(fail_check = ifelse(bad_class == 1, TRUE, FALSE))
# Class Description
ret_2 <- data %>%
purrr::map_dfr(~ class(.) %>% stringr::str_c(collapse = ", ")) %>%
tidyr::gather(key = "key", value = "class_desc", dplyr::everything())
return(dplyr::left_join(ret_1, ret_2, by = "key"))
}
check_non_unique_contrasts <- function(data) {
ret <- data %>%
purrr::map_dfr(~ length(unique(.))) %>%
tidyr::gather(key = "key", value = "unique_count", dplyr::everything()) %>%
dplyr::mutate(fail_check = ifelse(unique_count == 1, TRUE, FALSE))
ret
}
check_unused_factor_levels <- function(data) {
ret_factor_count <- data %>%
purrr::map_dfr(.f = function(x) {
if (is.factor(x)) {
length(levels(x))
} else {
0
}
}) %>%
tidyr::gather(key = "key", value = "factor_count", dplyr::everything())
ret_unique_count <- check_non_unique_contrasts(data) %>%
dplyr::select(-fail_check)
ret <- dplyr::left_join(ret_factor_count, ret_unique_count, by = "key") %>%
dplyr::mutate(fail_check = ifelse(factor_count > unique_count, TRUE, FALSE))
ret
}
# VALIDATIONS ----
validate_model_classes <- function(data, accept_classes = c("model_fit", "workflow")) {
result_tbl <- check_classes(data, .model, accept_classes) %>%
dplyr::filter(fail_check)
if (nrow(result_tbl) > 0) {
bad_models <- result_tbl$.model_id
bad_values <- glue::single_quote(result_tbl$first_class)
bad_msg <- glue::glue("- Model {bad_models}: Is class {bad_values}")
bad_msg <- glue::glue_collapse(bad_msg, sep = "\n")
rlang::abort(glue::glue(
"All objects must be fitted workflow or parsnip models inheriting class 'workflow' or 'model_fit'. The following are not:",
"\n",
"{bad_msg}")
)
}
}
validate_modeltime_table_classes <- function(data, accept_classes = c("mdl_time_tbl")) {
result_tbl <- check_classes(data, .model_table, accept_classes) %>%
dplyr::filter(fail_check)
if (nrow(result_tbl) > 0) {
bad_tables <- result_tbl$.id
bad_values <- glue::single_quote(result_tbl$first_class)
bad_msg <- glue::glue("- Model Table {bad_tables}: Is class {bad_values}")
bad_msg <- glue::glue_collapse(bad_msg, sep = "\n")
rlang::abort(glue::glue(
"All objects must be Modeltime Tables inheriting class 'mdl_time_tbl'. The following are not:",
"\n",
"{bad_msg}")
)
}
}
validate_ncols <- function(data, accept_ncol = 3) {
result_tbl <- check_ncols(data, .model_table, accept_ncol) %>%
dplyr::filter(fail_check)
if (nrow(result_tbl) > 0) {
bad_tables <- result_tbl$.id
bad_values <- glue::single_quote(result_tbl$ncol)
bad_msg <- glue::glue("- Model Table {bad_tables}: Has {bad_values} and should have {accept_ncol}")
bad_msg <- glue::glue_collapse(bad_msg, sep = "\n")
rlang::abort(glue::glue(
"All objects must be Modeltime Tables with 3 columns. The following are not:",
"\n",
"{bad_msg}",
"\n",
"This problem may have occurred if some tables are calibrated.")
)
}
}
validate_models_are_trained <- function(data) {
result_tbl <- check_models_are_trained(data) %>%
dplyr::filter(fail_check)
if (nrow(result_tbl) > 0) {
bad_models <- result_tbl$.model_id
bad_msg <- glue::glue("- Model {bad_models}: Is not trained. Try using `fit()` to train the model.")
bad_msg <- glue::glue_collapse(bad_msg, sep = "\n")
glubort(
"All objects must be fitted workflow or parsnip models. The following are not:",
"\n",
"{bad_msg}"
)
}
}
validate_models_are_not_null <- function(data) {
result_tbl <- check_models_are_not_null(data) %>%
dplyr::filter(fail_check)
if (nrow(result_tbl) > 0) {
bad_models <- result_tbl$.model_id
bad_msg <- glue::glue("- Model {bad_models}: Is NULL.")
bad_msg <- glue::glue_collapse(bad_msg, sep = "\n")
message("\nModel Failure Report: ")
print(data)
glubort(
"The following models had NULL errors <NULL>:",
"\n",
"{bad_msg}",
"\n",
" Potential Solution: Make sure required modeling packages are loaded.\n"
)
}
}
validate_non_bad_class_data <- function(data, bad_classes = c("character")) {
result_tbl <- check_non_bad_class_data(data, bad_classes) %>%
dplyr::filter(fail_check)
if (nrow(result_tbl) > 0) {
bad_cols <- glue::single_quote(result_tbl$key)
bad_values <- glue::single_quote(result_tbl$class_desc)
bad_msg <- glue::glue("{bad_cols}: Is class {bad_values}")
bad_msg <- glue::glue_collapse(bad_msg, sep = "\n")
rlang::abort(glue::glue(
"All variables must be categorical (factor) or date-like, but the following are not:",
"\n",
"{bad_msg}")
)
}
}
validate_non_unique_contrasts <- function(data) {
result_tbl <- check_non_unique_contrasts(data) %>%
dplyr::filter(fail_check)
if (nrow(result_tbl) > 0) {
bad_cols <- glue::single_quote(result_tbl$key)
bad_values <- purrr::map(result_tbl$unique_count, glue_quote_collapse)
bad_msg <- glue::glue("{bad_cols}: {bad_values} unique value")
bad_msg <- glue::glue_collapse(bad_msg, sep = "\n")
rlang::abort(glue::glue(
"All variables must have more than one unique value, but the following do not:",
"\n",
"{bad_msg}")
)
}
}
validate_unused_factor_levels <- function(data) {
result_tbl <- check_unused_factor_levels(data) %>%
dplyr::filter(fail_check)
if (nrow(result_tbl) > 0) {
bad_cols <- glue::single_quote(result_tbl$key)
bad_values_1 <- purrr::map(result_tbl$factor_count, glue_quote_collapse)
bad_values_2 <- purrr::map(result_tbl$unique_count, glue_quote_collapse)
bad_msg <- glue::glue("{bad_cols}: levels {bad_values_1} > levels used {bad_values_2}")
bad_msg <- glue::glue_collapse(bad_msg, sep = "\n")
rlang::abort(glue::glue(
"All factor variables must use all levels, but the following do not:",
"\n",
"{bad_msg}")
)
}
}
# HELPERS ----
is_trained <- function(x) {
trained <- FALSE
if (inherits(x, "model_fit")) {
trained <- TRUE
}
if (inherits(x, "workflow")) {
trained <- x$trained
}
if (inherits(x, "mdl_time_ensemble")) {
trained <- TRUE
}
return(trained)
}
glue_quote_collapse <- function(x) {
glue::glue_collapse(glue::single_quote(x), sep = ", ")
}
glubort <- function(..., .sep = "", .envir = parent.frame()) {
rlang::abort(glue::glue(..., .sep = .sep, .envir = .envir))
}
|
## File Name: tam_mml_3pl_create_E.R
## File Version: 0.06
####################################################
# create E matrix
tam_mml_3pl_create_E <- function( resp, E, Q, gammaslope.des,
Q.fixed=NULL )
{
Qdes <- NULL
gammaslope.fixed <- NULL
if ( is.null(E) ){
maxKi <- apply( resp, 2, max, na.rm=TRUE )
I <- ncol(resp)
if ( is.null(Q) ){ Q <- matrix( 1, nrow=I, ncol=1 ) }
D <- ncol(Q)
maxK <- max( maxKi ) + 1
if ( gammaslope.des=="2PL" ){
Ngam <- sum( abs(Q) > 0 )
}
if ( gammaslope.des=="2PLcat" ){
Ngam <- sum( rowSums(( abs(Q) > 0 ) )*maxKi )
}
ng <- 1
kk <- 1
vv <- 1
Qdes <- matrix( 0, nrow=maxK*I*D, ncol=5 )
colnames(Qdes) <- c("gammapar", "item", "dim", "category", "Qval")
for (ii in 1:I){
for (dd in 1:D){
if ( Q[ii,dd] !=0 ){
for (kk in 1:maxKi[ii]){
Qdes[vv,1] <- ng
Qdes[vv,2:3] <- c(ii,dd)
Qdes[vv,4] <- kk
if ( gammaslope.des=="2PL" ){
Qdes[vv,5] <- Q[ii,dd]*kk
}
if ( gammaslope.des=="2PLcat" ){
Qdes[vv,5] <- Q[ii,dd]
}
vv <- vv + 1
if ( ( kk==maxKi[ii] ) & ( gammaslope.des=="2PL") ){
ng <- ng + 1
}
if ( gammaslope.des=="2PLcat" ){
ng <- ng + 1
}
}
}
} # end dd
} # end ii
Qdes <- as.data.frame( Qdes[ 1:(vv-1), ] )
Ngam <- max( Qdes$gammapar )
gammaslope.fixed <- NULL
# fixed gammaslope parameters
Qdes$gamma.fixed <- NA
if ( ! is.null(Q.fixed) ){
for (dd in 1:D){
# dd <- 1
Q1 <- Q.fixed[, dd ]
ind.dd <- which( ! is.na( Q1) )
if ( length(ind.dd) > 0 ){
I1 <- length(ind.dd)
for (ii in 1:I1){
i2 <- which( ( Qdes$item==ind.dd[ii] ) &
( Qdes$dim==dd ) )
Qdes[i2,"gamma.fixed"] <- Q1[ ind.dd[ii] ]
}
} # end if len(ind.dd) > 0
} # end dd
gam1 <- stats::aggregate( Qdes$gamma.fixed, list(Qdes$gammapar), mean )
gam1 <- stats::na.omit(gam1)
gammaslope.fixed <- gam1[, c(1,2) ]
colnames(gammaslope.fixed) <- NULL
} # end ! is.null(Q.fixed)
#****
E <- array( 0, dim=c(I,maxK, D, Ngam ) )
for (ee in 1:(nrow(Qdes)) ){
# ee <- 1
Qdes.ii <- Qdes[ee,]
E[ Qdes.ii$item, Qdes.ii$category + 1, Qdes.ii$dim, Qdes.ii$gammapar ] <- Qdes.ii$Qval
}
}
#--- OUTPUT
res <- list(E=E, Qdes=Qdes, gammaslope.fixed=gammaslope.fixed )
return(res)
}
####################################################################
.mml.3pl.create.E <- tam_mml_3pl_create_E
| /R/tam_mml_3pl_create_E.R | no_license | jeppebundsgaard/TAM | R | false | false | 3,282 | r | ## File Name: tam_mml_3pl_create_E.R
## File Version: 0.06
####################################################
# create E matrix
tam_mml_3pl_create_E <- function( resp, E, Q, gammaslope.des,
Q.fixed=NULL )
{
Qdes <- NULL
gammaslope.fixed <- NULL
if ( is.null(E) ){
maxKi <- apply( resp, 2, max, na.rm=TRUE )
I <- ncol(resp)
if ( is.null(Q) ){ Q <- matrix( 1, nrow=I, ncol=1 ) }
D <- ncol(Q)
maxK <- max( maxKi ) + 1
if ( gammaslope.des=="2PL" ){
Ngam <- sum( abs(Q) > 0 )
}
if ( gammaslope.des=="2PLcat" ){
Ngam <- sum( rowSums(( abs(Q) > 0 ) )*maxKi )
}
ng <- 1
kk <- 1
vv <- 1
Qdes <- matrix( 0, nrow=maxK*I*D, ncol=5 )
colnames(Qdes) <- c("gammapar", "item", "dim", "category", "Qval")
for (ii in 1:I){
for (dd in 1:D){
if ( Q[ii,dd] !=0 ){
for (kk in 1:maxKi[ii]){
Qdes[vv,1] <- ng
Qdes[vv,2:3] <- c(ii,dd)
Qdes[vv,4] <- kk
if ( gammaslope.des=="2PL" ){
Qdes[vv,5] <- Q[ii,dd]*kk
}
if ( gammaslope.des=="2PLcat" ){
Qdes[vv,5] <- Q[ii,dd]
}
vv <- vv + 1
if ( ( kk==maxKi[ii] ) & ( gammaslope.des=="2PL") ){
ng <- ng + 1
}
if ( gammaslope.des=="2PLcat" ){
ng <- ng + 1
}
}
}
} # end dd
} # end ii
Qdes <- as.data.frame( Qdes[ 1:(vv-1), ] )
Ngam <- max( Qdes$gammapar )
gammaslope.fixed <- NULL
# fixed gammaslope parameters
Qdes$gamma.fixed <- NA
if ( ! is.null(Q.fixed) ){
for (dd in 1:D){
# dd <- 1
Q1 <- Q.fixed[, dd ]
ind.dd <- which( ! is.na( Q1) )
if ( length(ind.dd) > 0 ){
I1 <- length(ind.dd)
for (ii in 1:I1){
i2 <- which( ( Qdes$item==ind.dd[ii] ) &
( Qdes$dim==dd ) )
Qdes[i2,"gamma.fixed"] <- Q1[ ind.dd[ii] ]
}
} # end if len(ind.dd) > 0
} # end dd
gam1 <- stats::aggregate( Qdes$gamma.fixed, list(Qdes$gammapar), mean )
gam1 <- stats::na.omit(gam1)
gammaslope.fixed <- gam1[, c(1,2) ]
colnames(gammaslope.fixed) <- NULL
} # end ! is.null(Q.fixed)
#****
E <- array( 0, dim=c(I,maxK, D, Ngam ) )
for (ee in 1:(nrow(Qdes)) ){
# ee <- 1
Qdes.ii <- Qdes[ee,]
E[ Qdes.ii$item, Qdes.ii$category + 1, Qdes.ii$dim, Qdes.ii$gammapar ] <- Qdes.ii$Qval
}
}
#--- OUTPUT
res <- list(E=E, Qdes=Qdes, gammaslope.fixed=gammaslope.fixed )
return(res)
}
####################################################################
.mml.3pl.create.E <- tam_mml_3pl_create_E
|
\name{gabor}
\alias{gabor}
\title{
Generate Gabor function
}
\description{
Generates a Gabor for given location and frequency.
}
\usage{
gabor(sigsize, location, frequency, scale)
}
\arguments{
\item{sigsize}{
length of the Gabor function.
}
\item{location}{
position of the Gabor function.
}
\item{frequency}{
frequency of the Gabor function.
}
\item{scale}{
size parameter for the Gabor function. See details.
}}
\value{
complex 1D array of size sigsize.
}
\details{The size parameter here corresponds to the
standard deviation for a gaussian. In the Carmona (1998, eBook ISBN:978008053942) book, equation 3.23 has a different scale factor. }
\references{
See discussions in the text of \dQuote{Practical Time-Frequency Analysis}.
}
\seealso{
\code{\link{morlet}}.
}
\examples{
m1 = gabor(1024, 512, 2 * pi, 20 )
plot.ts(Re(m1) )
}
\keyword{ts}
| /man/gabor.Rd | no_license | cran/Rwave | R | false | false | 854 | rd | \name{gabor}
\alias{gabor}
\title{
Generate Gabor function
}
\description{
Generates a Gabor for given location and frequency.
}
\usage{
gabor(sigsize, location, frequency, scale)
}
\arguments{
\item{sigsize}{
length of the Gabor function.
}
\item{location}{
position of the Gabor function.
}
\item{frequency}{
frequency of the Gabor function.
}
\item{scale}{
size parameter for the Gabor function. See details.
}}
\value{
complex 1D array of size sigsize.
}
\details{The size parameter here corresponds to the
standard deviation for a gaussian. In the Carmona (1998, eBook ISBN:978008053942) book, equation 3.23 has a different scale factor. }
\references{
See discussions in the text of \dQuote{Practical Time-Frequency Analysis}.
}
\seealso{
\code{\link{morlet}}.
}
\examples{
m1 = gabor(1024, 512, 2 * pi, 20 )
plot.ts(Re(m1) )
}
\keyword{ts}
|
d <- matrix(c(10, 22, 24, 24, 29, 37, 27, 22, 30, 26, 25, 42, 33, 32, 21, 32, 41, 20, 30, 21,
11.5, 3.9, 5.5, 3.9, 4.1, 7.1, 4.4, 3.7, 4.8, 5.5, 4.3, 2.83, 7.52, 4.1, 4.7, 3, 19.8, 18.2, 2.9, 2.9,
419, 149.99, 159.99, 299,349,449,249,229,269,279,239,2649,2299,419,189,399,489,369,319,219),nrow=20,ncol=3)
w <- c(1/3,1/3,1/3)
cb <- c('max', 'min', 'min')
v <- 0.3
VIKOR(d,w,cb,v) | /script2.r | no_license | Leharis/SMPD-cw-2 | R | false | false | 415 | r | d <- matrix(c(10, 22, 24, 24, 29, 37, 27, 22, 30, 26, 25, 42, 33, 32, 21, 32, 41, 20, 30, 21,
11.5, 3.9, 5.5, 3.9, 4.1, 7.1, 4.4, 3.7, 4.8, 5.5, 4.3, 2.83, 7.52, 4.1, 4.7, 3, 19.8, 18.2, 2.9, 2.9,
419, 149.99, 159.99, 299,349,449,249,229,269,279,239,2649,2299,419,189,399,489,369,319,219),nrow=20,ncol=3)
w <- c(1/3,1/3,1/3)
cb <- c('max', 'min', 'min')
v <- 0.3
VIKOR(d,w,cb,v) |
#' Confidence interval function
#'
#' This function generates confidence intervals around point estimate(s) or around difference of two point estimates.
#' @param study_sample The study sample list. No default.
#' @param model_names Character vector. Names model names. No default.
#' @param the_func Function that generates key statistic. For example, model.review.AUROCC that generates AUROCC of given model(s), or model.review.reclassification that generates reclassification elements of given model(s). No default.
#' @param samples Samples prepared as the study_sample. List of study_sample lists. No default.
#' @param diffci_or_ci String. Whether to return confidence interval on difference of model_or_pointestimates, or return confidence intervals on model_or_pointestimates separately or no confidence intervals. Must be one of c("diff", "ci", "none")
#' @param outcome_name Name of outcome variable. No default.
#' @param digits Integer. Number of decimals when rounded. Defaults to 2.
#' @export
generate.confidence.intervals.v2 <- function(
study_sample,
model_names,
the_func,
samples,
diffci_or_ci,
outcome_name,
digits = 2,
...
)
{
## Get function name
review_function_name <- as.character(substitute(the_func))[[3]]
## Error handling
if (length(diffci_or_ci) != 1) stop("Argument diffci_or_ci > length 1")
if (!diffci_or_ci %in% c("diff","ci","none")) stop("Accepted strings are diff, ci, or none")
if (review_function_name == "model.review.reclassification" && diffci_or_ci == "diff") stop ("Diff ci not useful for reclassification.")
if (!(length(model_names) == 2) && diffci_or_ci == "diff") stop ("Input two models for diff ci.")
performance_point_estimates <- the_func(study_sample = study_sample,
model_names = model_names,
outcome_name = outcome_name,
...)
if (diffci_or_ci == "diff" && !all(lengths(performance_point_estimates) == length(performance_point_estimates[[1]]))) stop("Measure is not useful for diff, rocr estimates not of same length")
## Return point estimates untouched if argument diffci_or_ci equals is "none"
if (diffci_or_ci == "none") return_object <- list(performance_point_estimates = performance_point_estimates)
## Return confidence intervals around difference of point estimates
if (diffci_or_ci == "diff"){
## Calculate difference of point estimates
diff <- performance_point_estimates[[1]] - performance_point_estimates[[2]]
## Generate statistic on every bootstrap samples
generate_statistics_bssamples <- lapply(samples, function (sample)
the_func(study_sample = sample,
model_names = model_names,
outcome_name = outcome_name,
...))
## Matrixify samples, i.e. generate matrix with point estimate names
## as rows and samples as cols. Model estimates are binded rowwise.
matrixify <- sapply(generate_statistics_bssamples, unlist)
## Calculate difference between AUROCCs in every sample
diff_samples <- matrixify[1,] - matrixify[2,]
## Calculate deltastar, i.e difference between sample estimates
## and study_sample estimates.
deltastar <- diff_samples - diff
## Get 2.5% and 97.5% percentiles from difference of samples
quantiles <- quantile(deltastar, c(.025, 0.975))
## Generate confidence intervals
confidence_intervals <- diff - quantiles
## Format confidence intervals
confidence_intervals <- c(lb = round(min(confidence_intervals), digits),
ub = round(max(confidence_intervals), digits))
## Return confidence intervals with study_sample point_estimates
return_object <- list(diff_point_estimate = round(diff, digits),
CI_diff = confidence_intervals,
performance_point_estimates = performance_point_estimates)
}
## Get confidence_intervals around point estimates
if (diffci_or_ci == "ci"){
## Merge point estimates from each model into numeric vector
performance_point_estimates <- lapply(performance_point_estimates,
function (p_ests)
unlist(p_ests))
## Generate statistic on every bootstrap samples
generate_statistics_bssamples <- lapply(samples, function (sample) {
func_sample <- the_func(study_sample = sample,
model_names = model_names,
outcome_name = outcome_name,
...)
}
)
## Matrixify. NRI estimates are merged into one vector.
## Vector for each sample as columns
matrixify <- sapply(generate_statistics_bssamples, unlist)
## Adjust for model_names input of length 1
if (length(model_names) == 1 && !is.matrix(matrixify)) {
## Make point estimate matrix
pe_matrix <- rep(performance_point_estimates[[model_names]],
length(matrixify))
## Calculate deltastar, i.e difference between sample estimates
## and study_sample estimates.
deltastar <- data.frame(t(matrixify - pe_matrix))
## Get 2.5% and 97.5% percentiles from difference of samples
quantiles <- t(apply(deltastar, 1,
quantile, probs = c(.025,0.975)))
## Generate confidence_intevals
confidence_intervals <- performance_point_estimates[[model_names]] - quantiles
} else {
## Point estimate matrix for each model. Each sample as column.
pe_matrices <- lapply(performance_point_estimates,
function(nri_estimates){
matrix(rep(nri_estimates,
ncol(matrixify)),
ncol = ncol(matrixify))})
## Merge point estimate matrices of each model to one matrix
pe_matrix <- do.call(rbind, pe_matrices)
## Calculate deltastar, i.e difference between sample estimates
## and study_sample estimates.
deltastar <- data.frame(t(matrixify - pe_matrix))
## Get percentiles for each model estimate, i.e. percentiles column wise.
## Then bind list elements into data frame
quantiles <- do.call(rbind,
lapply(deltastar, quantile, probs = c(0.025,0.975)))
## Subtract point estimates from quantiles to get confidence intervals
confidence_intervals <- pe_matrix[, 1:ncol(quantiles)] - quantiles
}
## Format confidence intervals
fmt_confidence_intervals <- t(apply(confidence_intervals,
1,
function(row) c(lb = round(min(row), digits),
ub = round(max(row), digits))))
## Return confidence intervals with study_sample point_estimates
cis <- as.data.frame(cbind(fmt_confidence_intervals,
round(unlist(performance_point_estimates),
digits)),
row.names = rownames(matrixify))
## Split confidence_intervals data frame in list for each model
cis <- lapply(setNames(model_names,model_names),
function(name){
model_cis <- cis[grep(name,
rownames(cis)),]
return (model_cis)
})
for (iter in 1:length(cis)){
## Set more appropriate colnames
colnames(cis[[iter]]) <- c("lb", "ub", "point_estimate")
## Remove model names from row names if in rownames
if (any(grepl(model_names[iter], rownames(cis[[iter]])))) {
rownames(cis[[iter]]) <- gsub(pattern = ".*\\.",
replacement = "",
rownames(cis[[iter]]))
} else {
rownames(cis[[iter]]) <- "perf"
}
}
## Set return object
return_object <- cis
}
return(return_object)
}
| /R/generate.confidence.intervals.v2.r | no_license | warnbergg/superlearnerr | R | false | false | 9,191 | r | #' Confidence interval function
#'
#' This function generates confidence intervals around point estimate(s) or around difference of two point estimates.
#' @param study_sample The study sample list. No default.
#' @param model_names Character vector. Names model names. No default.
#' @param the_func Function that generates key statistic. For example, model.review.AUROCC that generates AUROCC of given model(s), or model.review.reclassification that generates reclassification elements of given model(s). No default.
#' @param samples Samples prepared as the study_sample. List of study_sample lists. No default.
#' @param diffci_or_ci String. Whether to return confidence interval on difference of model_or_pointestimates, or return confidence intervals on model_or_pointestimates separately or no confidence intervals. Must be one of c("diff", "ci", "none")
#' @param outcome_name Name of outcome variable. No default.
#' @param digits Integer. Number of decimals when rounded. Defaults to 2.
#' @export
generate.confidence.intervals.v2 <- function(
study_sample,
model_names,
the_func,
samples,
diffci_or_ci,
outcome_name,
digits = 2,
...
)
{
## Get function name
review_function_name <- as.character(substitute(the_func))[[3]]
## Error handling
if (length(diffci_or_ci) != 1) stop("Argument diffci_or_ci > length 1")
if (!diffci_or_ci %in% c("diff","ci","none")) stop("Accepted strings are diff, ci, or none")
if (review_function_name == "model.review.reclassification" && diffci_or_ci == "diff") stop ("Diff ci not useful for reclassification.")
if (!(length(model_names) == 2) && diffci_or_ci == "diff") stop ("Input two models for diff ci.")
performance_point_estimates <- the_func(study_sample = study_sample,
model_names = model_names,
outcome_name = outcome_name,
...)
if (diffci_or_ci == "diff" && !all(lengths(performance_point_estimates) == length(performance_point_estimates[[1]]))) stop("Measure is not useful for diff, rocr estimates not of same length")
## Return point estimates untouched if argument diffci_or_ci equals is "none"
if (diffci_or_ci == "none") return_object <- list(performance_point_estimates = performance_point_estimates)
## Return confidence intervals around difference of point estimates
if (diffci_or_ci == "diff"){
## Calculate difference of point estimates
diff <- performance_point_estimates[[1]] - performance_point_estimates[[2]]
## Generate statistic on every bootstrap samples
generate_statistics_bssamples <- lapply(samples, function (sample)
the_func(study_sample = sample,
model_names = model_names,
outcome_name = outcome_name,
...))
## Matrixify samples, i.e. generate matrix with point estimate names
## as rows and samples as cols. Model estimates are binded rowwise.
matrixify <- sapply(generate_statistics_bssamples, unlist)
## Calculate difference between AUROCCs in every sample
diff_samples <- matrixify[1,] - matrixify[2,]
## Calculate deltastar, i.e difference between sample estimates
## and study_sample estimates.
deltastar <- diff_samples - diff
## Get 2.5% and 97.5% percentiles from difference of samples
quantiles <- quantile(deltastar, c(.025, 0.975))
## Generate confidence intervals
confidence_intervals <- diff - quantiles
## Format confidence intervals
confidence_intervals <- c(lb = round(min(confidence_intervals), digits),
ub = round(max(confidence_intervals), digits))
## Return confidence intervals with study_sample point_estimates
return_object <- list(diff_point_estimate = round(diff, digits),
CI_diff = confidence_intervals,
performance_point_estimates = performance_point_estimates)
}
## Get confidence_intervals around point estimates
if (diffci_or_ci == "ci"){
## Merge point estimates from each model into numeric vector
performance_point_estimates <- lapply(performance_point_estimates,
function (p_ests)
unlist(p_ests))
## Generate statistic on every bootstrap samples
generate_statistics_bssamples <- lapply(samples, function (sample) {
func_sample <- the_func(study_sample = sample,
model_names = model_names,
outcome_name = outcome_name,
...)
}
)
## Matrixify. NRI estimates are merged into one vector.
## Vector for each sample as columns
matrixify <- sapply(generate_statistics_bssamples, unlist)
## Adjust for model_names input of length 1
if (length(model_names) == 1 && !is.matrix(matrixify)) {
## Make point estimate matrix
pe_matrix <- rep(performance_point_estimates[[model_names]],
length(matrixify))
## Calculate deltastar, i.e difference between sample estimates
## and study_sample estimates.
deltastar <- data.frame(t(matrixify - pe_matrix))
## Get 2.5% and 97.5% percentiles from difference of samples
quantiles <- t(apply(deltastar, 1,
quantile, probs = c(.025,0.975)))
## Generate confidence_intevals
confidence_intervals <- performance_point_estimates[[model_names]] - quantiles
} else {
## Point estimate matrix for each model. Each sample as column.
pe_matrices <- lapply(performance_point_estimates,
function(nri_estimates){
matrix(rep(nri_estimates,
ncol(matrixify)),
ncol = ncol(matrixify))})
## Merge point estimate matrices of each model to one matrix
pe_matrix <- do.call(rbind, pe_matrices)
## Calculate deltastar, i.e difference between sample estimates
## and study_sample estimates.
deltastar <- data.frame(t(matrixify - pe_matrix))
## Get percentiles for each model estimate, i.e. percentiles column wise.
## Then bind list elements into data frame
quantiles <- do.call(rbind,
lapply(deltastar, quantile, probs = c(0.025,0.975)))
## Subtract point estimates from quantiles to get confidence intervals
confidence_intervals <- pe_matrix[, 1:ncol(quantiles)] - quantiles
}
## Format confidence intervals
fmt_confidence_intervals <- t(apply(confidence_intervals,
1,
function(row) c(lb = round(min(row), digits),
ub = round(max(row), digits))))
## Return confidence intervals with study_sample point_estimates
cis <- as.data.frame(cbind(fmt_confidence_intervals,
round(unlist(performance_point_estimates),
digits)),
row.names = rownames(matrixify))
## Split confidence_intervals data frame in list for each model
cis <- lapply(setNames(model_names,model_names),
function(name){
model_cis <- cis[grep(name,
rownames(cis)),]
return (model_cis)
})
for (iter in 1:length(cis)){
## Set more appropriate colnames
colnames(cis[[iter]]) <- c("lb", "ub", "point_estimate")
## Remove model names from row names if in rownames
if (any(grepl(model_names[iter], rownames(cis[[iter]])))) {
rownames(cis[[iter]]) <- gsub(pattern = ".*\\.",
replacement = "",
rownames(cis[[iter]]))
} else {
rownames(cis[[iter]]) <- "perf"
}
}
## Set return object
return_object <- cis
}
return(return_object)
}
|
# Get data
library(lubridate)
library(dplyr)
zipFile <- 'household_power_consumption.zip'
zipFilePath <- file.path(getwd(),zipFile)
if (!file.exists(zipFilePath)){
zipURL <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
download.file(zipURL, destfile = zipFilePath, method = 'curl')
}
dataFile<- 'household_power_consumption.txt'
if (!file.exists(dataFile)){
unzip(zipfile = zipFile)
}
power<-read.csv2(dataFile, sep = ";", na.strings = "?", dec=".",
colClasses =
c(
# 'DDMMYYYY','HHMMSS',
rep('character',2),
rep("numeric", 7)
),
stringsAsFactors = FALSE)
power$Date_Time <- with(power, paste(Date, Time))
power$Date <- with(power, as.POSIXct(Date, format = "%d/%m/%Y"))
power$Date_Time <- with(power, as.POSIXct(Date_Time, format = "%d/%m/%Y %H:%M:%S"))
power_sample<- filter(power, Date >= '2007-02-01', Date<= '2007-02-02')
rm('power')
png("./Plot2.png", width=480, height=480)
with(power_sample, plot(Date_Time, Global_active_power, xlab = '', ylab = "Global Active Power (kilowatts)", type = "n"),
x)
with(power_sample, lines(Global_active_power ~ Date_Time))
dev.off()
| /Plot2.R | no_license | brntxr/ExData_Plotting1 | R | false | false | 1,340 | r | # Get data
library(lubridate)
library(dplyr)
zipFile <- 'household_power_consumption.zip'
zipFilePath <- file.path(getwd(),zipFile)
if (!file.exists(zipFilePath)){
zipURL <- 'https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip'
download.file(zipURL, destfile = zipFilePath, method = 'curl')
}
dataFile<- 'household_power_consumption.txt'
if (!file.exists(dataFile)){
unzip(zipfile = zipFile)
}
power<-read.csv2(dataFile, sep = ";", na.strings = "?", dec=".",
colClasses =
c(
# 'DDMMYYYY','HHMMSS',
rep('character',2),
rep("numeric", 7)
),
stringsAsFactors = FALSE)
power$Date_Time <- with(power, paste(Date, Time))
power$Date <- with(power, as.POSIXct(Date, format = "%d/%m/%Y"))
power$Date_Time <- with(power, as.POSIXct(Date_Time, format = "%d/%m/%Y %H:%M:%S"))
power_sample<- filter(power, Date >= '2007-02-01', Date<= '2007-02-02')
rm('power')
png("./Plot2.png", width=480, height=480)
with(power_sample, plot(Date_Time, Global_active_power, xlab = '', ylab = "Global Active Power (kilowatts)", type = "n"),
x)
with(power_sample, lines(Global_active_power ~ Date_Time))
dev.off()
|
setwd("../../analysis/customLoops")
a=read.delim("loops/loops.cpb.logFC.edger.final.cluster.txt")
b=read.delim("loop_to_tad/loop_over_tad.pgl",header=F)
d=read.delim("loop_to_tad/loop_inter_tad.pgl",header=F)
b$name = paste(b$V1,b$V2,b$V5)
d$name = paste(d$V1,d$V2,d$V5)
a$tad = "intra"
a$tad[which(a$name %in% b$name)] = "boundary"
a$tad[which(a$name %in% d$name)] ="inter"
t = table(a$tad,a$cluster)
p = sweep(t,2,colSums(t),'/')
library(gplots)
#heatmap.2(as.matrix(p),Colv=FALSE,Rowv=FALSE,
#dendrogram="none",cexRow=1,cexCol=1,notecol='black',margins=c(5,5),tracecol=F)
rownames(p) = c("TAD-boundary","inter-TAD","intra-TAD")
melted = melt(p)
melted$Var1 = factor(melted$Var1,levels=c("inter-TAD","TAD-boundary","intra-TAD"))
pdf("figures/loop_type_to_TAD.pdf",height=5,width=5)
ggplot(melted, aes(x=Var2,fill=Var1,y=value)) +
geom_bar(stat="identity",position="stack") +
scale_fill_brewer(palette="Blues") +
theme_bw()
dev.off()
write.table(a[,c(1,25)],"loop_to_tad/loops.tad.txt",row.names=F,sep='\t',quote=F)
| /customLoops/compare_loop_tad.r | no_license | bioinfx/cvdc_scripts | R | false | false | 1,036 | r | setwd("../../analysis/customLoops")
a=read.delim("loops/loops.cpb.logFC.edger.final.cluster.txt")
b=read.delim("loop_to_tad/loop_over_tad.pgl",header=F)
d=read.delim("loop_to_tad/loop_inter_tad.pgl",header=F)
b$name = paste(b$V1,b$V2,b$V5)
d$name = paste(d$V1,d$V2,d$V5)
a$tad = "intra"
a$tad[which(a$name %in% b$name)] = "boundary"
a$tad[which(a$name %in% d$name)] ="inter"
t = table(a$tad,a$cluster)
p = sweep(t,2,colSums(t),'/')
library(gplots)
#heatmap.2(as.matrix(p),Colv=FALSE,Rowv=FALSE,
#dendrogram="none",cexRow=1,cexCol=1,notecol='black',margins=c(5,5),tracecol=F)
rownames(p) = c("TAD-boundary","inter-TAD","intra-TAD")
melted = melt(p)
melted$Var1 = factor(melted$Var1,levels=c("inter-TAD","TAD-boundary","intra-TAD"))
pdf("figures/loop_type_to_TAD.pdf",height=5,width=5)
ggplot(melted, aes(x=Var2,fill=Var1,y=value)) +
geom_bar(stat="identity",position="stack") +
scale_fill_brewer(palette="Blues") +
theme_bw()
dev.off()
write.table(a[,c(1,25)],"loop_to_tad/loops.tad.txt",row.names=F,sep='\t',quote=F)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plottingfunctions.R
\name{plotDistCountAnalysis}
\alias{plotDistCountAnalysis}
\title{Creates a boxplot to see the distribution of read counts in type-specific and
shared enhancers}
\usage{
plotDistCountAnalysis(analysisresults, counts)
}
\arguments{
\item{analysisresults}{output generated from countanalysis() then categAltrePeaks()}
\item{counts}{output generated from getCounts()}
}
\value{
a ggplot
}
\description{
Takes the rlog transformation of the RRKM (Reads Per Kilobase of transcript
per Million) of the read counts of type-specific and shared regulatory regions
and plots the distribution of those read counts in all sample types analyzed
in the workflow.
}
\examples{
\dontrun{
dir <- system.file('extdata', package='ALTRE', mustWork=TRUE)
csvfile <- file.path(dir, 'lung.csv')
sampleinfo <- loadCSVFile(csvfile)
samplePeaks <- loadBedFiles(sampleinfo)
consPeaks <- getConsensusPeaks(samplepeaks=samplePeaks,minreps=2)
plotConsensusPeaks(samplepeaks=consPeaks)
TSSannot<- getTSS()
consPeaksAnnotated <- combineAnnotatePeaks(conspeaks = consPeaks,
TSS = TSSannot,
merge = TRUE,
regionspecific = TRUE,
mergedistenh = 1500,
mergedistprom = 1000)
counts_consPeaks <- getCounts(annotpeaks = consPeaksAnnotated,
sampleinfo = sampleinfo,
reference = 'SAEC',
chrom = 'chr21')
altre_peaks <- countanalysis(counts = counts_consPeaks,
pval = 0.01,
lfcvalue = 1)
categaltre_peaks <- categAltrePeaks(altre_peaks,
lfctypespecific = 1.5,
lfcshared = 1.2,
pvaltypespecific = 0.01,
pvalshared = 0.05)
plotDistCountAnalysis(categaltre_peaks, counts_consPeaks)
}
}
| /man/plotDistCountAnalysis.Rd | no_license | ewymathe/testALTREinstall | R | false | true | 2,142 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plottingfunctions.R
\name{plotDistCountAnalysis}
\alias{plotDistCountAnalysis}
\title{Creates a boxplot to see the distribution of read counts in type-specific and
shared enhancers}
\usage{
plotDistCountAnalysis(analysisresults, counts)
}
\arguments{
\item{analysisresults}{output generated from countanalysis() then categAltrePeaks()}
\item{counts}{output generated from getCounts()}
}
\value{
a ggplot
}
\description{
Takes the rlog transformation of the RRKM (Reads Per Kilobase of transcript
per Million) of the read counts of type-specific and shared regulatory regions
and plots the distribution of those read counts in all sample types analyzed
in the workflow.
}
\examples{
\dontrun{
dir <- system.file('extdata', package='ALTRE', mustWork=TRUE)
csvfile <- file.path(dir, 'lung.csv')
sampleinfo <- loadCSVFile(csvfile)
samplePeaks <- loadBedFiles(sampleinfo)
consPeaks <- getConsensusPeaks(samplepeaks=samplePeaks,minreps=2)
plotConsensusPeaks(samplepeaks=consPeaks)
TSSannot<- getTSS()
consPeaksAnnotated <- combineAnnotatePeaks(conspeaks = consPeaks,
TSS = TSSannot,
merge = TRUE,
regionspecific = TRUE,
mergedistenh = 1500,
mergedistprom = 1000)
counts_consPeaks <- getCounts(annotpeaks = consPeaksAnnotated,
sampleinfo = sampleinfo,
reference = 'SAEC',
chrom = 'chr21')
altre_peaks <- countanalysis(counts = counts_consPeaks,
pval = 0.01,
lfcvalue = 1)
categaltre_peaks <- categAltrePeaks(altre_peaks,
lfctypespecific = 1.5,
lfcshared = 1.2,
pvaltypespecific = 0.01,
pvalshared = 0.05)
plotDistCountAnalysis(categaltre_peaks, counts_consPeaks)
}
}
|
#Question 1
data=load(file = '~/Desktop/Midterm/geno.R', envir = parent.frame(), verbose = FALSE)
set.seed(182006660)
x0=t(x0)
index <- sort(sample(nrow(x0), nrow(x0)*.8))
train_x0 <- x0[index,]
test_x0 <- x0[-index,]
train_y <- y[index]
test_y <- y[-index]
# Question 2
design_matrix=cbind(x0==0, x0==1, x0==2)*1
train_design_x0 <- design_matrix[index,]
test_design_x0 <- design_matrix[-index,]
# Question 3
# SVM
library(e1071)
#svmfit <- svm(x=train_design_x0, y=as.factor(train_y), kernel='linear')
svm_model <- svm(x=train_design_x0, y=as.factor(train_y))
print(svm_model)
summary(svm_model)
svm_predict <- predict(svm_model, test_design_x0)
table(svm_predict,as.factor(test_y))
# Random Forest
library(randomForest)
rf_model <- randomForest(x=train_design_x0, y=as.factor(train_y))
rf_predict <- predict(rf_model, test_design_x0)
table(rf_predict,as.factor(test_y))
# Naive Bayes
nb_model <- naiveBayes(x=train_design_x0, y=as.factor(train_y))
nb_predict <- predict(nb_model, test_design_x0)
table(nb_predict, as.factor(test_y))
# Question 4
# GLMNET
library(glmnet)
glm_model <- glmnet(x=train_design_x0, y=as.factor(train_y), family='multinomial')
glm_predict <- predict(glm_model, test_design_x0, type="response")[,,1]
glm_labels <- colnames(glm_predict)[apply(glm_predict,1,which.max)]
glm_factors<-factor(c(glm_labels), levels = colnames(glm_predict))
table(glm_factors, as.factor(test_y))
# PenalizedSVM using elastic net
#install.packages("penalizedSVM", version='1.0')
#library(penalizedSVM)
#y_0_train = sign(train_y - 0.5)
#y_0_test = sign(test_y - 0.5)
#svmfs_model = svmfs(x = train_design_x0, y=y_0_train, fs.method = c("DrHSVM"),maxIter = 10,verbose = FALSE)
#svmfs_predict = predict.penSVM(svmfs_model, test_design_x0, as.factor(y_0_test))
#print(pl_predict$tab)
library(sparseSVM)
# 0 as class 1, 1,2 as class 0
y_0_train <- train_y
for(i in 1:length(train_y)){
y_0_train[i]<-0
if(train_y[i] == 0){
y_0_train[i]<-1
}
}
y_0_test <- test_y
for(i in 1:length(test_y)){
y_0_test[i]<-0
if(test_y[i] == 0){
y_0_test[i]<-1
}
}
pen_svm_model = sparseSVM(X=train_design_x0, y = y_0_train, alpha = 0.5)
pen_svm_pred = predict(pen_svm_model, test_design_x0)
table(pen_svm_pred[,1], y_0_test)
# 1 as class 1, 0,2 as class 0
y_1_train <- train_y
for(i in 1:length(train_y)){
y_1_train[i]<-0
if(train_y[i] == 1){
y_1_train[i]<-1
}
}
y_1_test <- test_y
for(i in 1:length(test_y)){
y_1_test[i]<-0
if(test_y[i] == 1){
y_1_test[i]<-1
}
}
pen_svm_model = sparseSVM(X=train_design_x0, y = y_1_train, alpha = 0.5)
pen_svm_pred = predict(pen_svm_model, test_design_x0)
table(pen_svm_pred[,1], y_1_test)
# 2 as class 1, 0,1 as class 0
y_2_train <- train_y
for(i in 1:length(train_y)){
y_2_train[i]<-0
if(train_y[i] == 2){
y_2_train[i]<-1
}
}
y_2_test <- test_y
for(i in 1:length(test_y)){
y_2_test[i]<-0
if(test_y[i] == 2){
y_2_test[i]<-1
}
}
pen_svm_model = sparseSVM(X=train_design_x0, y = y_2_train, alpha = 0.5)
pen_svm_pred = predict(pen_svm_model, test_design_x0)
table(pen_svm_pred[,1], y_2_test)
| /Midterm/Xin_Yang_xy213.R | no_license | NeilYXIN/RU_STAT588_DataMining | R | false | false | 3,082 | r | #Question 1
data=load(file = '~/Desktop/Midterm/geno.R', envir = parent.frame(), verbose = FALSE)
set.seed(182006660)
x0=t(x0)
index <- sort(sample(nrow(x0), nrow(x0)*.8))
train_x0 <- x0[index,]
test_x0 <- x0[-index,]
train_y <- y[index]
test_y <- y[-index]
# Question 2
design_matrix=cbind(x0==0, x0==1, x0==2)*1
train_design_x0 <- design_matrix[index,]
test_design_x0 <- design_matrix[-index,]
# Question 3
# SVM
library(e1071)
#svmfit <- svm(x=train_design_x0, y=as.factor(train_y), kernel='linear')
svm_model <- svm(x=train_design_x0, y=as.factor(train_y))
print(svm_model)
summary(svm_model)
svm_predict <- predict(svm_model, test_design_x0)
table(svm_predict,as.factor(test_y))
# Random Forest
library(randomForest)
rf_model <- randomForest(x=train_design_x0, y=as.factor(train_y))
rf_predict <- predict(rf_model, test_design_x0)
table(rf_predict,as.factor(test_y))
# Naive Bayes
nb_model <- naiveBayes(x=train_design_x0, y=as.factor(train_y))
nb_predict <- predict(nb_model, test_design_x0)
table(nb_predict, as.factor(test_y))
# Question 4
# GLMNET
library(glmnet)
glm_model <- glmnet(x=train_design_x0, y=as.factor(train_y), family='multinomial')
glm_predict <- predict(glm_model, test_design_x0, type="response")[,,1]
glm_labels <- colnames(glm_predict)[apply(glm_predict,1,which.max)]
glm_factors<-factor(c(glm_labels), levels = colnames(glm_predict))
table(glm_factors, as.factor(test_y))
# PenalizedSVM using elastic net
#install.packages("penalizedSVM", version='1.0')
#library(penalizedSVM)
#y_0_train = sign(train_y - 0.5)
#y_0_test = sign(test_y - 0.5)
#svmfs_model = svmfs(x = train_design_x0, y=y_0_train, fs.method = c("DrHSVM"),maxIter = 10,verbose = FALSE)
#svmfs_predict = predict.penSVM(svmfs_model, test_design_x0, as.factor(y_0_test))
#print(pl_predict$tab)
library(sparseSVM)
# 0 as class 1, 1,2 as class 0
y_0_train <- train_y
for(i in 1:length(train_y)){
y_0_train[i]<-0
if(train_y[i] == 0){
y_0_train[i]<-1
}
}
y_0_test <- test_y
for(i in 1:length(test_y)){
y_0_test[i]<-0
if(test_y[i] == 0){
y_0_test[i]<-1
}
}
pen_svm_model = sparseSVM(X=train_design_x0, y = y_0_train, alpha = 0.5)
pen_svm_pred = predict(pen_svm_model, test_design_x0)
table(pen_svm_pred[,1], y_0_test)
# 1 as class 1, 0,2 as class 0
y_1_train <- train_y
for(i in 1:length(train_y)){
y_1_train[i]<-0
if(train_y[i] == 1){
y_1_train[i]<-1
}
}
y_1_test <- test_y
for(i in 1:length(test_y)){
y_1_test[i]<-0
if(test_y[i] == 1){
y_1_test[i]<-1
}
}
pen_svm_model = sparseSVM(X=train_design_x0, y = y_1_train, alpha = 0.5)
pen_svm_pred = predict(pen_svm_model, test_design_x0)
table(pen_svm_pred[,1], y_1_test)
# 2 as class 1, 0,1 as class 0
y_2_train <- train_y
for(i in 1:length(train_y)){
y_2_train[i]<-0
if(train_y[i] == 2){
y_2_train[i]<-1
}
}
y_2_test <- test_y
for(i in 1:length(test_y)){
y_2_test[i]<-0
if(test_y[i] == 2){
y_2_test[i]<-1
}
}
pen_svm_model = sparseSVM(X=train_design_x0, y = y_2_train, alpha = 0.5)
pen_svm_pred = predict(pen_svm_model, test_design_x0)
table(pen_svm_pred[,1], y_2_test)
|
library(glmnet)
mydata = read.table("./TrainingSet/Correlation/kidney.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.35,family="gaussian",standardize=FALSE)
sink('./Model/EN/Correlation/kidney/kidney_046.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Correlation/kidney/kidney_046.R | no_license | leon1003/QSMART | R | false | false | 361 | r | library(glmnet)
mydata = read.table("./TrainingSet/Correlation/kidney.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.35,family="gaussian",standardize=FALSE)
sink('./Model/EN/Correlation/kidney/kidney_046.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bigquery_objects.R
\name{JobStatus}
\alias{JobStatus}
\title{JobStatus Object}
\usage{
JobStatus(errorResult = NULL, errors = NULL, state = NULL)
}
\arguments{
\item{errorResult}{[Output-only] Final error result of the job}
\item{errors}{[Output-only] All errors encountered during the running of the job}
\item{state}{[Output-only] Running state of the job}
}
\value{
JobStatus object
}
\description{
JobStatus Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
| /googlebigqueryv2.auto/man/JobStatus.Rd | permissive | Phippsy/autoGoogleAPI | R | false | true | 597 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bigquery_objects.R
\name{JobStatus}
\alias{JobStatus}
\title{JobStatus Object}
\usage{
JobStatus(errorResult = NULL, errors = NULL, state = NULL)
}
\arguments{
\item{errorResult}{[Output-only] Final error result of the job}
\item{errors}{[Output-only] All errors encountered during the running of the job}
\item{state}{[Output-only] Running state of the job}
}
\value{
JobStatus object
}
\description{
JobStatus Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
No description
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/RapResults.R, R/RapSolved.R
\name{score}
\alias{score}
\alias{score.RapResults}
\alias{score.RapSolved}
\title{Solution score}
\usage{
score(x, y)
\method{score}{RapResults}(x, y = 0)
\method{score}{RapSolved}(x, y = 0)
}
\arguments{
\item{x}{\code{\link[=RapResults]{RapResults()}} or \code{\link[=RapSolved]{RapSolved()}} object.}
\item{y}{Available inputs include: \code{NULL} to return all scores,
\code{integer} number specifying the solution for which the score should
be returned, and \code{0} to return score for the best solution.}
}
\value{
\code{matrix} or \code{numeric} vector with solution score(s)
depending on arguments.
}
\description{
Extract solution score from \code{\link[=RapResults]{RapResults()}} or
\code{\link[=RapSolved]{RapSolved()}} object.
}
\examples{
\dontrun{
# load data
data(sim_rs)
# score for the best solution
score(sim_rs, 0)
# score for the second solution
score(sim_rs, 2)
# score for all solutions
score(sim_rs, NULL)
}
}
\seealso{
\code{\link[=RapResults]{RapResults()}}, \code{\link[=RapSolved]{RapSolved()}}.
}
| /man/score.Rd | no_license | jeffreyhanson/raptr | R | false | true | 1,154 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/RapResults.R, R/RapSolved.R
\name{score}
\alias{score}
\alias{score.RapResults}
\alias{score.RapSolved}
\title{Solution score}
\usage{
score(x, y)
\method{score}{RapResults}(x, y = 0)
\method{score}{RapSolved}(x, y = 0)
}
\arguments{
\item{x}{\code{\link[=RapResults]{RapResults()}} or \code{\link[=RapSolved]{RapSolved()}} object.}
\item{y}{Available inputs include: \code{NULL} to return all scores,
\code{integer} number specifying the solution for which the score should
be returned, and \code{0} to return score for the best solution.}
}
\value{
\code{matrix} or \code{numeric} vector with solution score(s)
depending on arguments.
}
\description{
Extract solution score from \code{\link[=RapResults]{RapResults()}} or
\code{\link[=RapSolved]{RapSolved()}} object.
}
\examples{
\dontrun{
# load data
data(sim_rs)
# score for the best solution
score(sim_rs, 0)
# score for the second solution
score(sim_rs, 2)
# score for all solutions
score(sim_rs, NULL)
}
}
\seealso{
\code{\link[=RapResults]{RapResults()}}, \code{\link[=RapSolved]{RapSolved()}}.
}
|
#' @title Make List of GenomicRanges Object
#'
#' @description
#' Creates a GenomicRanges object for each methclone output file
#'
#' @param files A vector of input files containing methclone output files,
#' the suffix of files should be methClone_out.gz
#' @param ids A vector of sample ids for the files
#' @param cores The number of cores to be used for parallel execution
#' (default: 5)
#' @param sve A boolean to save the GenomicRanges object (default: FALSE)
#' @return A list, each element is a data frame of GenomicRanges objects
#' containing pdr, epipolymorphism, and Shannon entropy values for each
#' input file. Saves as an epi.gr.rda extension
#' @examples
#' path <- system.file('extdata', package = 'epihet')
#' files <- dir(path = path, pattern = 'methClone_out.gz',
#' recursive = TRUE, full.names = TRUE)
#' ids <- basename(dirname(files))
#' GR.List <- epihet::makeGR(files = files, ids = ids,
#' cores = 1, sve = FALSE)
#' @export
makeGR <- function(files, ids, cores = 5, sve = FALSE) {
# validate Methclone files
for (f in files) {
parts <- unlist(strsplit(f, "[.]"))
parts <- tail(parts,n=2)
extension <- paste(parts[1],parts[2],sep=".")
if (extension != "methClone_out.gz") {
message <- paste(f,
"file type is not supported, only supports files generated from methclone",
sep = " ")
stop(message)
}
}
doParallel::registerDoParallel(cores = cores)
n <- NULL
epi.gr <- foreach(n = seq_len(length(files))) %dopar% {
f <- files[n]
x <- suppressMessages(data.table::fread(paste("gzip -dc", f), sep = "\t")) #[,-27]
myVar = c(grep("s1:",colnames(x),value=T),"V27")#new add
x[, (myVar):=NULL]#new add
x$pdr = rowSums(x[, c(12:25), with = FALSE])/100
x$epipoly = 1 - rowSums((x[, c(11:26), with = FALSE]/100)^2)
x$shannon = apply(x[, c(11:26), with = FALSE], 1, shannon)
x <- as.data.frame(x)
x.gr <- GRanges(Rle(x$chr), IRanges(start = x$start, end = x$end),
strand = x$strand, values = x[, c(7, 8, 9, 27:29)])
x.gr
}
names(epi.gr) <- ids
if (sve) {
save(epi.gr, file = "epi.gr.rda")
} else {
return(epi.gr)
}
}
| /R/makeGR.R | no_license | wisekh6/epihet | R | false | false | 2,180 | r | #' @title Make List of GenomicRanges Object
#'
#' @description
#' Creates a GenomicRanges object for each methclone output file
#'
#' @param files A vector of input files containing methclone output files,
#' the suffix of files should be methClone_out.gz
#' @param ids A vector of sample ids for the files
#' @param cores The number of cores to be used for parallel execution
#' (default: 5)
#' @param sve A boolean to save the GenomicRanges object (default: FALSE)
#' @return A list, each element is a data frame of GenomicRanges objects
#' containing pdr, epipolymorphism, and Shannon entropy values for each
#' input file. Saves as an epi.gr.rda extension
#' @examples
#' path <- system.file('extdata', package = 'epihet')
#' files <- dir(path = path, pattern = 'methClone_out.gz',
#' recursive = TRUE, full.names = TRUE)
#' ids <- basename(dirname(files))
#' GR.List <- epihet::makeGR(files = files, ids = ids,
#' cores = 1, sve = FALSE)
#' @export
makeGR <- function(files, ids, cores = 5, sve = FALSE) {
# validate Methclone files
for (f in files) {
parts <- unlist(strsplit(f, "[.]"))
parts <- tail(parts,n=2)
extension <- paste(parts[1],parts[2],sep=".")
if (extension != "methClone_out.gz") {
message <- paste(f,
"file type is not supported, only supports files generated from methclone",
sep = " ")
stop(message)
}
}
doParallel::registerDoParallel(cores = cores)
n <- NULL
epi.gr <- foreach(n = seq_len(length(files))) %dopar% {
f <- files[n]
x <- suppressMessages(data.table::fread(paste("gzip -dc", f), sep = "\t")) #[,-27]
myVar = c(grep("s1:",colnames(x),value=T),"V27")#new add
x[, (myVar):=NULL]#new add
x$pdr = rowSums(x[, c(12:25), with = FALSE])/100
x$epipoly = 1 - rowSums((x[, c(11:26), with = FALSE]/100)^2)
x$shannon = apply(x[, c(11:26), with = FALSE], 1, shannon)
x <- as.data.frame(x)
x.gr <- GRanges(Rle(x$chr), IRanges(start = x$start, end = x$end),
strand = x$strand, values = x[, c(7, 8, 9, 27:29)])
x.gr
}
names(epi.gr) <- ids
if (sve) {
save(epi.gr, file = "epi.gr.rda")
} else {
return(epi.gr)
}
}
|
sendPush <- function(apiKey, deviceId, ...) {
joinUrl <- 'https://joinjoaomgcd.appspot.com'
path <- '/_ah/api/messaging/v1/sendPush'
query <- list(apikey = apiKey,
deviceId = deviceId)
url <- modify_url(joinUrl, path = path, query = query)
response <- POST(url)
if (http_type(response) != "application/json") {
stop("API did not return json", call. = FALSE)
}
parsed <- fromJSON(content(response, "text"), simplifyVector = TRUE)
if (http_error(response)) {
stop(
sprintf(
"Join API request failed [%s]\n%s\n<%s>",
status_code(response),
parsed$message,
parsed$documentation_url
),
call. = FALSE
)
}
if (!parsed$success) {
stop(
sprintf(
"Join API request failed \n %s",
parsed$errorMessage
),
call. = FALSE
)
}
}
| /R/sendPush.R | permissive | 0tertra/JoinR | R | false | false | 862 | r | sendPush <- function(apiKey, deviceId, ...) {
joinUrl <- 'https://joinjoaomgcd.appspot.com'
path <- '/_ah/api/messaging/v1/sendPush'
query <- list(apikey = apiKey,
deviceId = deviceId)
url <- modify_url(joinUrl, path = path, query = query)
response <- POST(url)
if (http_type(response) != "application/json") {
stop("API did not return json", call. = FALSE)
}
parsed <- fromJSON(content(response, "text"), simplifyVector = TRUE)
if (http_error(response)) {
stop(
sprintf(
"Join API request failed [%s]\n%s\n<%s>",
status_code(response),
parsed$message,
parsed$documentation_url
),
call. = FALSE
)
}
if (!parsed$success) {
stop(
sprintf(
"Join API request failed \n %s",
parsed$errorMessage
),
call. = FALSE
)
}
}
|
testlist <- list(id = NULL, id = NULL, booklet_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), person_id = integer(0))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result) | /dexterMST/inst/testfiles/is_person_booklet_sorted/libFuzzer_is_person_booklet_sorted/is_person_booklet_sorted_valgrind_files/1612726229-test.R | no_license | akhikolla/updatedatatype-list1 | R | false | false | 350 | r | testlist <- list(id = NULL, id = NULL, booklet_id = c(0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L), person_id = integer(0))
result <- do.call(dexterMST:::is_person_booklet_sorted,testlist)
str(result) |
context("check-scrapeURLs-output")
library(testthat)
library(webmockr)
library(archiveRetriever)
#Several tests are skipped_on_cran as they require an internet connection and it is always possible that the Internet Archive might be inaccessible at times. The tests do run successfully on our machines and were originally run on Cran using mock files with the vcr package. As the testing environment with vcr is not working at the moment due to problems with the package vcr, we are working towards a new solution to run our tests with mock files and will update our testing environment as soon as possible!
#Check whether function output is data frame
test_that("scrape_urls() returns a data frame", {
skip_on_cran()
output <-
scrape_urls(
"http://web.archive.org/web/20190502052859/http://www.taz.de/Praesident-Trong-scheut-Oeffentlichkeit/!5588752/",
Paths = c(title = "//article//h1", content = "//article//p[contains(@class, 'article')]//text()"),
encoding = "bytes"
)
expect_is(output, "data.frame")
})
#Check whether function only takes Archive links
test_that("scrape_urls() only takes Internet Archive URLs as input", {
expect_error(
scrape_urls(
"https://labour.org.uk/about/labours-legacy/",
Paths = c(title = "//h1", content = "//p")
),
"Urls do not originate"
)
})
#Check whether Paths is character vector
test_that("scrape_urls() only takes character vectors as Paths", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
c(title = 1)
),
"Paths is not a character vector"
)
})
#Check whether XPath vector is named
test_that("scrape_urls() only takes named XPath/CSS vector as Paths", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"//header//h1"
),
"Paths is not a named vector"
)
})
#Check whether Archive date is taken from the URL
test_that("scrape_urls() option archiveDate stores archiving date", {
skip_on_cran()
output <-
scrape_urls(
"http://web.archive.org/web/20170125090337/http://www.ilsole24ore.com/art/motori/2017-01-23/toyota-yaris-205049.shtml?uuid=AEAqSFG&nmll=2707",
Paths = c(title = "(//div[contains(@class,'title art11_title')]//h1 | //header/h1 | //h1[@class='atitle'] | //h1[@class='atitle '] | //article//article/header/h2[@class = 'title'] | //h2[@class = 'title'])", content = "(//*[@class='grid-8 top art11_body body']//p//text() | //article/div[@class='article-content ']/div/div/div//p//text() | //div[@class='aentry aentry--lined']//p//text())"),
archiveDate = T,
encoding = "bytes"
)
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "//article//h1", content = "//article//p[contains(@class, 'article')]//text()"),
archiveDate = TRUE,
encoding = "bytes"
)
expect_equal(names(output)[4], "archiveDate")
})
#Check whether function takes CSS instead of XPath
test_that("scrape_urls() takes CSS instead of XPath", {
skip_on_cran()
output <-
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = TRUE
)
expect_is(output, "data.frame")
})
#Check whether startnum is numeric
test_that("scrape_urls() needs numeric startnum", {
expect_error(scrape_urls(
c(
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/"
),
c(title = "//header//h1"),
startnum = "2"
), "startnum is not numeric")
})
#Check whether startnum exceeds number of Urls
test_that("scrape_urls() needs startnum smaller than input vector", {
expect_error(scrape_urls(
c(
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/"
),
c(title = "//header//h1"),
startnum = 3
),
"startnum value exceeds number of Urls given")
})
#Check whether startnum is single value
test_that("scrape_urls() needs startnum to be a single value", {
expect_error(scrape_urls(
c(
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/"
),
c(title = "//header//h1"),
startnum = c(1, 3)
),
"startnum is not a single value")
})
#Check whether CSS is a logical value
test_that("scrape_urls() needs CSS to be a logical value", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = "T"
),
"CSS is not a logical value"
)
})
#Check whether CSS is single value
test_that("scrape_urls() needs CSS to be a single logical value", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = c(TRUE, TRUE)
),
"CSS is not a single value"
)
})
#Check whether archiveDate is a logical value
test_that("scrape_urls() needs archiveDate to be a logical value", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = TRUE,
archiveDate = "T"
),
"archiveDate is not a logical value"
)
})
#Check whether archiveDate is single value
test_that("scrape_urls() needs archiveDate to be a single logical value", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = TRUE,
archiveDate = c(TRUE, TRUE)
),
"archiveDate is not a single value"
)
})
#Check whether ignoreErrors is a logical value
test_that("scrape_urls() needs ignoreErrors to be a logical value", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = TRUE,
archiveDate = TRUE,
ignoreErrors = "T"
),
"ignoreErrors is not a logical value"
)
})
#Check whether ignoreErrors is single value
test_that("scrape_urls() needs ignoreErrors to be a single logical value", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = TRUE,
archiveDate = TRUE,
ignoreErrors = c(TRUE, TRUE)
),
"ignoreErrors is not a single value"
)
})
#Check whether stopatempty is a logical value
test_that("scrape_urls() needs stopatempty to be a logical value", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = TRUE,
archiveDate = TRUE,
ignoreErrors = TRUE,
stopatempty = "T"
),
"stopatempty is not a logical value"
)
})
#Check whether stopatempty is single value
test_that("scrape_urls() needs stopatempty to be a single logical value", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = TRUE,
archiveDate = TRUE,
ignoreErrors = TRUE,
stopatempty = c(TRUE, TRUE)
),
"stopatempty is not a single value"
)
})
#Check whether emptylim is a numeric value
test_that("scrape_urls() needs emptylim to be a numeric value", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = TRUE,
archiveDate = TRUE,
ignoreErrors = TRUE,
stopatempty = TRUE,
emptylim = "5"
),
"emptylim is not numeric"
)
})
#Check whether emptylim is single value
test_that("scrape_urls() needs emptylim to be a numeric value", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = TRUE,
archiveDate = TRUE,
ignoreErrors = TRUE,
stopatempty = TRUE,
emptylim = c(5, 6)
),
"emptylim is not a single value"
)
})
#Check whether encoding is a character value
test_that("scrape_urls() needs encoding to be a character value", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = TRUE,
archiveDate = TRUE,
ignoreErrors = TRUE,
stopatempty = TRUE,
emptylim = 5,
encoding = 1991
),
"encoding is not a character value"
)
})
#Check whether encoding is single value
test_that("scrape_urls() needs encoding to be a character value", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = TRUE,
archiveDate = TRUE,
ignoreErrors = TRUE,
stopatempty = TRUE,
emptylim = 5,
encoding = c("UTF-8", "bytes")
),
"encoding is not a single value"
)
})
#Check whether data is being correctly attached to existing data set
test_that("scrape_urls() needs to start with second row when startnum is 2", {
skip_on_cran()
output <-
scrape_urls(
c(
"http://web.archive.org/web/20190310015353/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/"
),
c(title = "//header//h1"),
startnum = 2
)
expect_equal(output$Urls[1], "http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/")
})
#Check whether only some XPaths could be scraped
test_that("scrape_urls() needs to warn if only some XPaths can be scraped", {
skip_on_cran()
expect_warning(
scrape_urls(
"http://web.archive.org/web/20190502052859/http://www.taz.de/Praesident-Trong-scheut-Oeffentlichkeit/!5588752/",
Paths = c(title = "/blablabla", content = "//article//p[contains(@class, 'article')]//text()"),
ignoreErrors = FALSE,
encoding = "bytes"
),
"Only some of your Paths"
)
})
#Check whether data is being correctly processed
test_that("scrape_urls() needs to set NA if page cannot be scraped", {
skip_on_cran()
output <-
scrape_urls(
c(
"http://web.archive.org/web/20190502052859/http://www.taz.de/Praesident-Trong-scheut-Oeffentlichkeit/!5588752/",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/",
"http://web.archive.org/web/20190502052859/http://www.taz.de/Galerie/Die-Revolution-im-Sudan/!g5591075/"
),
Paths = c(title = "//article//h1", content = "//article//p[contains(@class, 'article')]//text()")
)
expect_equal(is.na(output$title[3]), TRUE)
})
#Check whether process stop if too many rows are empty
test_that("scrape_urls() needs to stop if too many row are empty", {
skip_on_cran()
expect_warning(
scrape_urls(
c(
"http://web.archive.org/web/20190502052859/http://www.taz.de/Praesident-Trong-scheut-Oeffentlichkeit/!5588752/",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/lostineurope",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/lostineurope"
),
Paths = c(title = "//article//h1", content = "//article//p[contains(@class, 'article')]//text()"),
stopatempty = TRUE,
emptylim = 2
),
"Too many empty outputs in a row"
)
})
#Check if re-start after break and attachto works
test_that("scrape_urls() needs to take up process if it breaks", {
skip_on_cran()
output <-
scrape_urls(
c(
"http://web.archive.org/web/20190502052859/http://www.taz.de/Praesident-Trong-scheut-Oeffentlichkeit/!5588752/",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/lostineurope",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/lostineurope/blogfeed/"
),
Paths = c(title = "//article//h1", content = "//article//p[contains(@class, 'article')]//text()"),
stopatempty = FALSE,
attachto = tibble::tibble(
Urls = c(
"http://web.archive.org/web/20190502052859/http://www.taz.de/Praesident-Trong-scheut-Oeffentlichkeit/!5588752/",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/lostineurope"
),
title = c("Vietnamesen rätseln um Staatschef",
"",
""),
content = c(
"Wer regiert Vietnam? Offenbar ist Partei- und Staatschef Nguyen Phu Trong dazu nicht mehr fähig:",
"",
""
),
stoppedat = 4
)
)
expect_equal(ncol(output), 3)
})
#Check if re-start after break and attachto works
test_that("scrape_urls() should not take up process if it stems from other process",
{
expect_error(
scrape_urls(
c(
"http://web.archive.org/web/20190502052859/http://www.taz.de/Praesident-Trong-scheut-Oeffentlichkeit/!5588752/",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/lostineurope",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/lostineurope/blogfeed/"
),
Paths = c(title = "//article//h1", content = "//article//p[contains(@class, 'article')]//text()"),
stopatempty = FALSE,
attachto = tibble::tibble(
Urls = c(
"http://web.archive.org/web/20190502052859/http://www.taz.de/Praesident-Trong-scheut-Oeffentlichkeit/!5588752/",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/lostineurope"
),
title = c("Vietnamesen rätseln um Staatschef",
"",
""),
inhalt = c(
"Wer regiert Vietnam? Offenbar ist Partei- und Staatschef Nguyen Phu Trong dazu nicht mehr fähig:",
"",
""
),
progress = c(1, 0, 0)
)
),
"attachto must be a failed output of this function"
)
})
#Check whether sleeper is activated after 20 Urls
test_that("scrape_urls() needs to sleep every 20 Urls", {
skip_on_cran()
output <-
scrape_urls(
c(
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/"
),
c(title = "//header//h1")
)
expect_equal(nrow(output), 21)
})
#Check whether script runs without problems in case of timeout of website
test_that("scrape_urls() should not fail if website has timeout", {
webmockr::enable()
webmockr::to_timeout(
webmockr::stub_request(
"get", "http://web.archive.org/web/20190502052859/http://www.taz.de/Praesident-Trong-scheut-Oeffentlichkeit/!5588752/")
)
output <- scrape_urls(
"http://web.archive.org/web/20190502052859/http://www.taz.de/Praesident-Trong-scheut-Oeffentlichkeit/!5588752/",
Paths = c(title = "//article//h1", content = "//article//p[contains(@class, 'article')]//text()"),
encoding = "bytes"
)
expect_is(output, "data.frame")
webmockr::disable()
})
#Check whether script runs without problems when collapse is FALSE
test_that("scrape_urls() needs to output 5 rows", {
skip_on_cran()
output <-
scrape_urls(Urls = "http://web.archive.org/web/20201216060059/https://www.reddit.com/r/de/",
Paths = c(title = "//div/h3",
type = "//div[@class='rpBJOHq2PR60pnwJlUyP0']//a//div[contains(@class,'2X6EB3ZhEeXCh1eIVA64XM')]/span"),
collapse = FALSE,
ignoreErrors = TRUE)
expect_equal(nrow(output), 5)
})
#Check whether new content is being correctly attached to existing object
test_that("scrape_urls() needs to output 4 rows", {
skip_on_cran()
input <-
data.frame(Urls = c("http://web.archive.org/web/20171112174048/http://reddit.com:80/r/de", "http://web.archive.org/web/20171115220704/https://reddit.com/r/de"),
title = c("Der Frauen höchstes Glück ist das stillen des Hungers", "Am besten mit Frankfurter Kranz."),
author = c("Wilhelm_Blumberg", "NebuKadneZaar"),
stoppedat = 3)
output <-
scrape_urls(
c(
"http://web.archive.org/web/20171112174048/http://reddit.com:80/r/de",
"http://web.archive.org/web/20171115220704/https://reddit.com/r/de",
"http://web.archive.org/web/20171120193529/http://reddit.com/r/de",
"http://web.archive.org/web/20171123081007/https://www.reddit.com/r/de/",
"http://web.archive.org/web/20171129231144/https://reddit.com/r/de"
),
Paths = c(title = "(//p[@class='title']/a | //div//a/h2 | //div//h3)",
author = "(//p[contains(@class,'tagline')]/a | //div[contains(@class,'scrollerItem')]//a[starts-with(.,'u/')]/text() | //div[contains(@class,'NAURX0ARMmhJ5eqxQrlQW')]//span)"),
startnum = 4,
attachto = input)
expect_equal(nrow(output), 4)
})
#Check whether script runs without problems when collapse is TRUE
test_that("scrape_urls() needs to output 1 row", {
skip_on_cran()
output <-
scrape_urls(Urls = "http://web.archive.org/web/20201216060059/https://www.reddit.com/r/de/",
Paths = c(title = "//div/h3",
type = "//div[@class='rpBJOHq2PR60pnwJlUyP0']//a//div[contains(@class,'2X6EB3ZhEeXCh1eIVA64XM')]/span"),
collapse = TRUE)
expect_equal(nrow(output), 1)
})
#Check whether number of elements for paths differs
test_that("scrape_urls() needs the number of elements for paths to be equal", {
skip_on_cran()
expect_warning(
output <- scrape_urls(Urls = "http://web.archive.org/web/20201216060059/https://www.reddit.com/r/de/",
Paths = c(title = "//div/h3",
type = "//div[@class='rpBJOHq2PR60pnwJlUyP0']//a//div[contains(@class,'2X6EB3ZhEeXCh1eIVA64XM')]/span"),
collapse = FALSE,
ignoreErrors = FALSE
),
"Number of elements for paths differs"
)
expect_is(output, "data.frame")
})
#Check whether script runs without problems when collapse & ignoreErrors is TRUE
test_that("scrape_urls() needs to output 1 row", {
skip_on_cran()
output <-
scrape_urls(Urls = "http://web.archive.org/web/20201216060059/https://www.reddit.com/r/de/",
Paths = c(title = "//div/h3",
type = "//div[@class='rpBJOHq2PR60pnwJlUyP0']//a//div[contains(@class,'2X6EB3ZhEeXCh1eIVA64XM')]/span"),
collapse = TRUE,
ignoreErrors = TRUE)
expect_equal(nrow(output), 1)
})
#Check whether script runs without problems when collapse & ignoreErrors is FALSE
test_that("scrape_urls() needs to output 5 rows", {
skip_on_cran()
output <-
scrape_urls(Urls = "http://web.archive.org/web/20201230202327/https://www.reddit.com/r/de/",
Paths = c(title = "(//p[@class='title']/a | //div//a/h2 | //div//h3)",
type = "//div[@class='rpBJOHq2PR60pnwJlUyP0']//a//div[contains(@class,'2X6EB3ZhEeXCh1eIVA64XM')]/span"),
collapse = FALSE,
ignoreErrors = FALSE)
expect_equal(nrow(output), 5)
})
| /tests/testthat/test_scrape_urls.R | permissive | KostaGav/archiveRetriever | R | false | false | 23,453 | r | context("check-scrapeURLs-output")
library(testthat)
library(webmockr)
library(archiveRetriever)
#Several tests are skipped_on_cran as they require an internet connection and it is always possible that the Internet Archive might be inaccessible at times. The tests do run successfully on our machines and were originally run on Cran using mock files with the vcr package. As the testing environment with vcr is not working at the moment due to problems with the package vcr, we are working towards a new solution to run our tests with mock files and will update our testing environment as soon as possible!
#Check whether function output is data frame
test_that("scrape_urls() returns a data frame", {
skip_on_cran()
output <-
scrape_urls(
"http://web.archive.org/web/20190502052859/http://www.taz.de/Praesident-Trong-scheut-Oeffentlichkeit/!5588752/",
Paths = c(title = "//article//h1", content = "//article//p[contains(@class, 'article')]//text()"),
encoding = "bytes"
)
expect_is(output, "data.frame")
})
#Check whether function only takes Archive links
test_that("scrape_urls() only takes Internet Archive URLs as input", {
expect_error(
scrape_urls(
"https://labour.org.uk/about/labours-legacy/",
Paths = c(title = "//h1", content = "//p")
),
"Urls do not originate"
)
})
#Check whether Paths is character vector
test_that("scrape_urls() only takes character vectors as Paths", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
c(title = 1)
),
"Paths is not a character vector"
)
})
#Check whether XPath vector is named
test_that("scrape_urls() only takes named XPath/CSS vector as Paths", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"//header//h1"
),
"Paths is not a named vector"
)
})
#Check whether Archive date is taken from the URL
test_that("scrape_urls() option archiveDate stores archiving date", {
skip_on_cran()
output <-
scrape_urls(
"http://web.archive.org/web/20170125090337/http://www.ilsole24ore.com/art/motori/2017-01-23/toyota-yaris-205049.shtml?uuid=AEAqSFG&nmll=2707",
Paths = c(title = "(//div[contains(@class,'title art11_title')]//h1 | //header/h1 | //h1[@class='atitle'] | //h1[@class='atitle '] | //article//article/header/h2[@class = 'title'] | //h2[@class = 'title'])", content = "(//*[@class='grid-8 top art11_body body']//p//text() | //article/div[@class='article-content ']/div/div/div//p//text() | //div[@class='aentry aentry--lined']//p//text())"),
archiveDate = T,
encoding = "bytes"
)
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "//article//h1", content = "//article//p[contains(@class, 'article')]//text()"),
archiveDate = TRUE,
encoding = "bytes"
)
expect_equal(names(output)[4], "archiveDate")
})
#Check whether function takes CSS instead of XPath
test_that("scrape_urls() takes CSS instead of XPath", {
skip_on_cran()
output <-
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = TRUE
)
expect_is(output, "data.frame")
})
#Check whether startnum is numeric
test_that("scrape_urls() needs numeric startnum", {
expect_error(scrape_urls(
c(
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/"
),
c(title = "//header//h1"),
startnum = "2"
), "startnum is not numeric")
})
#Check whether startnum exceeds number of Urls
test_that("scrape_urls() needs startnum smaller than input vector", {
expect_error(scrape_urls(
c(
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/"
),
c(title = "//header//h1"),
startnum = 3
),
"startnum value exceeds number of Urls given")
})
#Check whether startnum is single value
test_that("scrape_urls() needs startnum to be a single value", {
expect_error(scrape_urls(
c(
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/"
),
c(title = "//header//h1"),
startnum = c(1, 3)
),
"startnum is not a single value")
})
#Check whether CSS is a logical value
test_that("scrape_urls() needs CSS to be a logical value", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = "T"
),
"CSS is not a logical value"
)
})
#Check whether CSS is single value
test_that("scrape_urls() needs CSS to be a single logical value", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = c(TRUE, TRUE)
),
"CSS is not a single value"
)
})
#Check whether archiveDate is a logical value
test_that("scrape_urls() needs archiveDate to be a logical value", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = TRUE,
archiveDate = "T"
),
"archiveDate is not a logical value"
)
})
#Check whether archiveDate is single value
test_that("scrape_urls() needs archiveDate to be a single logical value", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = TRUE,
archiveDate = c(TRUE, TRUE)
),
"archiveDate is not a single value"
)
})
#Check whether ignoreErrors is a logical value
test_that("scrape_urls() needs ignoreErrors to be a logical value", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = TRUE,
archiveDate = TRUE,
ignoreErrors = "T"
),
"ignoreErrors is not a logical value"
)
})
#Check whether ignoreErrors is single value
test_that("scrape_urls() needs ignoreErrors to be a single logical value", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = TRUE,
archiveDate = TRUE,
ignoreErrors = c(TRUE, TRUE)
),
"ignoreErrors is not a single value"
)
})
#Check whether stopatempty is a logical value
test_that("scrape_urls() needs stopatempty to be a logical value", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = TRUE,
archiveDate = TRUE,
ignoreErrors = TRUE,
stopatempty = "T"
),
"stopatempty is not a logical value"
)
})
#Check whether stopatempty is single value
test_that("scrape_urls() needs stopatempty to be a single logical value", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = TRUE,
archiveDate = TRUE,
ignoreErrors = TRUE,
stopatempty = c(TRUE, TRUE)
),
"stopatempty is not a single value"
)
})
#Check whether emptylim is a numeric value
test_that("scrape_urls() needs emptylim to be a numeric value", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = TRUE,
archiveDate = TRUE,
ignoreErrors = TRUE,
stopatempty = TRUE,
emptylim = "5"
),
"emptylim is not numeric"
)
})
#Check whether emptylim is single value
test_that("scrape_urls() needs emptylim to be a numeric value", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = TRUE,
archiveDate = TRUE,
ignoreErrors = TRUE,
stopatempty = TRUE,
emptylim = c(5, 6)
),
"emptylim is not a single value"
)
})
#Check whether encoding is a character value
test_that("scrape_urls() needs encoding to be a character value", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = TRUE,
archiveDate = TRUE,
ignoreErrors = TRUE,
stopatempty = TRUE,
emptylim = 5,
encoding = 1991
),
"encoding is not a character value"
)
})
#Check whether encoding is single value
test_that("scrape_urls() needs encoding to be a character value", {
expect_error(
scrape_urls(
"http://web.archive.org/web/20190528072311/https://www.taz.de/Fusionsangebot-in-der-Autobranche/!5598075/",
Paths = c(title = "article h1"),
CSS = TRUE,
archiveDate = TRUE,
ignoreErrors = TRUE,
stopatempty = TRUE,
emptylim = 5,
encoding = c("UTF-8", "bytes")
),
"encoding is not a single value"
)
})
#Check whether data is being correctly attached to existing data set
test_that("scrape_urls() needs to start with second row when startnum is 2", {
skip_on_cran()
output <-
scrape_urls(
c(
"http://web.archive.org/web/20190310015353/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/"
),
c(title = "//header//h1"),
startnum = 2
)
expect_equal(output$Urls[1], "http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/")
})
#Check whether only some XPaths could be scraped
test_that("scrape_urls() needs to warn if only some XPaths can be scraped", {
skip_on_cran()
expect_warning(
scrape_urls(
"http://web.archive.org/web/20190502052859/http://www.taz.de/Praesident-Trong-scheut-Oeffentlichkeit/!5588752/",
Paths = c(title = "/blablabla", content = "//article//p[contains(@class, 'article')]//text()"),
ignoreErrors = FALSE,
encoding = "bytes"
),
"Only some of your Paths"
)
})
#Check whether data is being correctly processed
test_that("scrape_urls() needs to set NA if page cannot be scraped", {
skip_on_cran()
output <-
scrape_urls(
c(
"http://web.archive.org/web/20190502052859/http://www.taz.de/Praesident-Trong-scheut-Oeffentlichkeit/!5588752/",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/",
"http://web.archive.org/web/20190502052859/http://www.taz.de/Galerie/Die-Revolution-im-Sudan/!g5591075/"
),
Paths = c(title = "//article//h1", content = "//article//p[contains(@class, 'article')]//text()")
)
expect_equal(is.na(output$title[3]), TRUE)
})
#Check whether process stop if too many rows are empty
test_that("scrape_urls() needs to stop if too many row are empty", {
skip_on_cran()
expect_warning(
scrape_urls(
c(
"http://web.archive.org/web/20190502052859/http://www.taz.de/Praesident-Trong-scheut-Oeffentlichkeit/!5588752/",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/lostineurope",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/lostineurope"
),
Paths = c(title = "//article//h1", content = "//article//p[contains(@class, 'article')]//text()"),
stopatempty = TRUE,
emptylim = 2
),
"Too many empty outputs in a row"
)
})
#Check if re-start after break and attachto works
test_that("scrape_urls() needs to take up process if it breaks", {
skip_on_cran()
output <-
scrape_urls(
c(
"http://web.archive.org/web/20190502052859/http://www.taz.de/Praesident-Trong-scheut-Oeffentlichkeit/!5588752/",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/lostineurope",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/lostineurope/blogfeed/"
),
Paths = c(title = "//article//h1", content = "//article//p[contains(@class, 'article')]//text()"),
stopatempty = FALSE,
attachto = tibble::tibble(
Urls = c(
"http://web.archive.org/web/20190502052859/http://www.taz.de/Praesident-Trong-scheut-Oeffentlichkeit/!5588752/",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/lostineurope"
),
title = c("Vietnamesen rätseln um Staatschef",
"",
""),
content = c(
"Wer regiert Vietnam? Offenbar ist Partei- und Staatschef Nguyen Phu Trong dazu nicht mehr fähig:",
"",
""
),
stoppedat = 4
)
)
expect_equal(ncol(output), 3)
})
#Check if re-start after break and attachto works
test_that("scrape_urls() should not take up process if it stems from other process",
{
expect_error(
scrape_urls(
c(
"http://web.archive.org/web/20190502052859/http://www.taz.de/Praesident-Trong-scheut-Oeffentlichkeit/!5588752/",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/lostineurope",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/lostineurope/blogfeed/"
),
Paths = c(title = "//article//h1", content = "//article//p[contains(@class, 'article')]//text()"),
stopatempty = FALSE,
attachto = tibble::tibble(
Urls = c(
"http://web.archive.org/web/20190502052859/http://www.taz.de/Praesident-Trong-scheut-Oeffentlichkeit/!5588752/",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/",
"http://web.archive.org/web/20190502052859/http://blogs.taz.de/lostineurope"
),
title = c("Vietnamesen rätseln um Staatschef",
"",
""),
inhalt = c(
"Wer regiert Vietnam? Offenbar ist Partei- und Staatschef Nguyen Phu Trong dazu nicht mehr fähig:",
"",
""
),
progress = c(1, 0, 0)
)
),
"attachto must be a failed output of this function"
)
})
#Check whether sleeper is activated after 20 Urls
test_that("scrape_urls() needs to sleep every 20 Urls", {
skip_on_cran()
output <-
scrape_urls(
c(
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/",
"http://web.archive.org/web/20201009174440/https://www.uni-mannheim.de/universitaet/profil/geschichte/"
),
c(title = "//header//h1")
)
expect_equal(nrow(output), 21)
})
#Check whether script runs without problems in case of timeout of website
test_that("scrape_urls() should not fail if website has timeout", {
webmockr::enable()
webmockr::to_timeout(
webmockr::stub_request(
"get", "http://web.archive.org/web/20190502052859/http://www.taz.de/Praesident-Trong-scheut-Oeffentlichkeit/!5588752/")
)
output <- scrape_urls(
"http://web.archive.org/web/20190502052859/http://www.taz.de/Praesident-Trong-scheut-Oeffentlichkeit/!5588752/",
Paths = c(title = "//article//h1", content = "//article//p[contains(@class, 'article')]//text()"),
encoding = "bytes"
)
expect_is(output, "data.frame")
webmockr::disable()
})
#Check whether script runs without problems when collapse is FALSE
test_that("scrape_urls() needs to output 5 rows", {
skip_on_cran()
output <-
scrape_urls(Urls = "http://web.archive.org/web/20201216060059/https://www.reddit.com/r/de/",
Paths = c(title = "//div/h3",
type = "//div[@class='rpBJOHq2PR60pnwJlUyP0']//a//div[contains(@class,'2X6EB3ZhEeXCh1eIVA64XM')]/span"),
collapse = FALSE,
ignoreErrors = TRUE)
expect_equal(nrow(output), 5)
})
#Check whether new content is being correctly attached to existing object
test_that("scrape_urls() needs to output 4 rows", {
skip_on_cran()
input <-
data.frame(Urls = c("http://web.archive.org/web/20171112174048/http://reddit.com:80/r/de", "http://web.archive.org/web/20171115220704/https://reddit.com/r/de"),
title = c("Der Frauen höchstes Glück ist das stillen des Hungers", "Am besten mit Frankfurter Kranz."),
author = c("Wilhelm_Blumberg", "NebuKadneZaar"),
stoppedat = 3)
output <-
scrape_urls(
c(
"http://web.archive.org/web/20171112174048/http://reddit.com:80/r/de",
"http://web.archive.org/web/20171115220704/https://reddit.com/r/de",
"http://web.archive.org/web/20171120193529/http://reddit.com/r/de",
"http://web.archive.org/web/20171123081007/https://www.reddit.com/r/de/",
"http://web.archive.org/web/20171129231144/https://reddit.com/r/de"
),
Paths = c(title = "(//p[@class='title']/a | //div//a/h2 | //div//h3)",
author = "(//p[contains(@class,'tagline')]/a | //div[contains(@class,'scrollerItem')]//a[starts-with(.,'u/')]/text() | //div[contains(@class,'NAURX0ARMmhJ5eqxQrlQW')]//span)"),
startnum = 4,
attachto = input)
expect_equal(nrow(output), 4)
})
#Check whether script runs without problems when collapse is TRUE
test_that("scrape_urls() needs to output 1 row", {
skip_on_cran()
output <-
scrape_urls(Urls = "http://web.archive.org/web/20201216060059/https://www.reddit.com/r/de/",
Paths = c(title = "//div/h3",
type = "//div[@class='rpBJOHq2PR60pnwJlUyP0']//a//div[contains(@class,'2X6EB3ZhEeXCh1eIVA64XM')]/span"),
collapse = TRUE)
expect_equal(nrow(output), 1)
})
#Check whether number of elements for paths differs
test_that("scrape_urls() needs the number of elements for paths to be equal", {
skip_on_cran()
expect_warning(
output <- scrape_urls(Urls = "http://web.archive.org/web/20201216060059/https://www.reddit.com/r/de/",
Paths = c(title = "//div/h3",
type = "//div[@class='rpBJOHq2PR60pnwJlUyP0']//a//div[contains(@class,'2X6EB3ZhEeXCh1eIVA64XM')]/span"),
collapse = FALSE,
ignoreErrors = FALSE
),
"Number of elements for paths differs"
)
expect_is(output, "data.frame")
})
#Check whether script runs without problems when collapse & ignoreErrors is TRUE
test_that("scrape_urls() needs to output 1 row", {
skip_on_cran()
output <-
scrape_urls(Urls = "http://web.archive.org/web/20201216060059/https://www.reddit.com/r/de/",
Paths = c(title = "//div/h3",
type = "//div[@class='rpBJOHq2PR60pnwJlUyP0']//a//div[contains(@class,'2X6EB3ZhEeXCh1eIVA64XM')]/span"),
collapse = TRUE,
ignoreErrors = TRUE)
expect_equal(nrow(output), 1)
})
#Check whether script runs without problems when collapse & ignoreErrors is FALSE
test_that("scrape_urls() needs to output 5 rows", {
skip_on_cran()
output <-
scrape_urls(Urls = "http://web.archive.org/web/20201230202327/https://www.reddit.com/r/de/",
Paths = c(title = "(//p[@class='title']/a | //div//a/h2 | //div//h3)",
type = "//div[@class='rpBJOHq2PR60pnwJlUyP0']//a//div[contains(@class,'2X6EB3ZhEeXCh1eIVA64XM')]/span"),
collapse = FALSE,
ignoreErrors = FALSE)
expect_equal(nrow(output), 5)
})
|
install.packages("arsenal")
library(arsenal)
library(gtools)
#Compare 02/14/2017 data with Sean's march 1 data
data_SR <- read.csv("~/Documents/GitHub/OysterSeedProject/raw_data/ABACUS_output021417.tsv", sep = "\t" , header=TRUE, stringsAsFactors = FALSE)
data_SB <- read.csv("~/Documents/GitHub/OysterSeedProject/raw_data/ABACUS_outputMar1.tsv", sep = "\t" , header=TRUE, stringsAsFactors = FALSE)
compare(data_SR,data_SB)
###shows no difference in files
#Compare Object
#Function Call:
# compare.data.frame(x = data_SR, y = data_SB)
#Shared: 457 variables and 8443 observations.
#Not shared: 0 variables and 0 observations.
#Differences found in 0/456 variables compared.
#0 variables compared have non-identical attributes.
#confirmed by command line diff
#D-10-18-212-233:Desktop Shelly$ diff ~/Documents/GitHub/OysterSeedProject/raw_data/ABACUS_outputMar1.tsv ~/Documents/GitHub/OysterSeedProject/raw_data/ABACUS_output021417.tsv
#D-10-18-212-233:Desktop Shelly$
# determine what said 'NSAF' values are
data_SR_NSAF <- read.csv("~/Documents/GitHub/OysterSeedProject/raw_data/ABACUS_output021417NSAF.tsv", sep = "\t", header = TRUE, stringsAsFactors = FALSE)
data_SB_NUMSPECADJ <- data_SB[,c(1,grep("NUMSPECSADJ", colnames(data_SB)))]
colnames(data_SB_NUMSPECADJ) <- gsub("NUMSPECSADJ","ADJNSAF", colnames(data_SB_NUMSPECADJ))
compare(data_SR_NSAF,data_SB_NUMSPECADJ)
#Compare Object
#Function Call:
#compare.data.frame(x = data_SR_NSAF, y = data_SB_NUMSPECADJ)
#Shared: 46 variables and 8443 observations.
#Not shared: 0 variables and 0 observations.
#Differences found in 0/45 variables compared.
#0 variables compared have non-identical attributes.
#What is Kaitlyn's data?
data_KM <- read.csv("~/Documents/GitHub/OysterSeedProject/raw_data/ABACUSdata_only.csv", header = TRUE, stringsAsFactors = FALSE)
#convert steven's column names
data_SR_NSAF_avg <- data_SR_NSAF[,c(1,grep("NSAF", colnames(data_SR_NSAF)))]
colnames(data_SR_NSAF_avg) <- gsub(pattern = "X20161205_SAMPLE_", "", colnames(data_SR_NSAF_avg))
colnames(data_SR_NSAF_avg) <- gsub(pattern = "_ADJNSAF", "", colnames(data_SR_NSAF_avg))
data_SR_NSAF_avg <- data_SR_NSAF_avg[,c("PROTID",mixedsort(colnames(data_SR_NSAF_avg[,-1])))]
#find avg of each technical rep (https://stackoverflow.com/questions/13739243/average-pairs-of-columns-in-r)
data_SR_NSAFavg <- data.frame(sapply(seq(2,ncol(data_SR_NSAF_avg),2), function(i) {
rowMeans(data_SR_NSAF_avg[,c(i, i+1)], na.rm=T)
}))
#create column names similar to Kaitlyn's file
colnames(data_SR_NSAFavg) <- mixedsort(colnames(data_SR_NSAF_avg[,c(-1,-grep("A",colnames(data_SR_NSAF_avg)))]))
meta_data <- read.csv("~/Documents/GitHub/OysterSeedProject/analysis/nmds_R/Rhonda_new_sample_names.csv", header = TRUE, stringsAsFactors = FALSE)
meta_data$silo <- substr(meta_data$Contents,5,5)
meta_data$day <- substr(meta_data$SampleName,5,6)
new_colnames <- list()
for (i in 1:length(colnames(data_SR_NSAFavg))){
sample <- colnames(data_SR_NSAFavg)[i]
new_colnames[[i]] <- paste(meta_data[meta_data$SampleID == sample,"silo"],meta_data[meta_data$SampleID == sample, "day"], sep = "_")
}
colnames(data_SR_NSAFavg) <- new_colnames
data_SR_NSAF_avg <- cbind(data.frame(data_SR_NSAF_avg[,"PROTID"],stringsAsFactors = FALSE), data_SR_NSAFavg)
colnames(data_KM)
#[1] "Protein.ID" "CompetentLarvae_1" "X2_3" "X3_3" "X9_3"
#[6] "X2_5" "X3_5" "X9_5" "X2_7" "X3_7"
#[11] "X9_7" "X2_9" "X3_9" "X9_9" "X2_11"
#[16] "X3_11" "X9_11" "X2_13" "X3_13" "X9_13"
#[21] "X2_15" "X3_15" "X9_15"
colnames(data_SR_NSAF_avg)
#[1] "data_SR_NSAF_avg$PROTID" "e_0" "2_3" "3_3"
#[5] "9_3" "2_5" "3_5" "9_5"
#[9] "2_7" "3_7" "9_7" "2_9"
#[13] "3_9" "9_9" "2_11" "3_11"
#[17] "9_11" "2_13" "3_13" "9_13"
#[21] "2_15" "3_15" "9_15"
colnames(data_SR_NSAF_avg) <- colnames(data_KM)
str(data_KM)
str(data_SR_NSAF_avg)
#sort files so they are in the same order
data_KM <- data_KM[order(data_KM$Protein.ID),]
data_SR_NSAF_avg <- data_SR_NSAF_avg[order(data_SR_NSAF_avg$Protein.ID),]
compare(data_KM,data_SR_NSAF_avg)
| /analysis/nmds_R/CompareAbacusOutputFiles.R | no_license | shellywanamaker/OysterSeedProject | R | false | false | 4,763 | r | install.packages("arsenal")
library(arsenal)
library(gtools)
#Compare 02/14/2017 data with Sean's march 1 data
data_SR <- read.csv("~/Documents/GitHub/OysterSeedProject/raw_data/ABACUS_output021417.tsv", sep = "\t" , header=TRUE, stringsAsFactors = FALSE)
data_SB <- read.csv("~/Documents/GitHub/OysterSeedProject/raw_data/ABACUS_outputMar1.tsv", sep = "\t" , header=TRUE, stringsAsFactors = FALSE)
compare(data_SR,data_SB)
###shows no difference in files
#Compare Object
#Function Call:
# compare.data.frame(x = data_SR, y = data_SB)
#Shared: 457 variables and 8443 observations.
#Not shared: 0 variables and 0 observations.
#Differences found in 0/456 variables compared.
#0 variables compared have non-identical attributes.
#confirmed by command line diff
#D-10-18-212-233:Desktop Shelly$ diff ~/Documents/GitHub/OysterSeedProject/raw_data/ABACUS_outputMar1.tsv ~/Documents/GitHub/OysterSeedProject/raw_data/ABACUS_output021417.tsv
#D-10-18-212-233:Desktop Shelly$
# determine what said 'NSAF' values are
data_SR_NSAF <- read.csv("~/Documents/GitHub/OysterSeedProject/raw_data/ABACUS_output021417NSAF.tsv", sep = "\t", header = TRUE, stringsAsFactors = FALSE)
data_SB_NUMSPECADJ <- data_SB[,c(1,grep("NUMSPECSADJ", colnames(data_SB)))]
colnames(data_SB_NUMSPECADJ) <- gsub("NUMSPECSADJ","ADJNSAF", colnames(data_SB_NUMSPECADJ))
compare(data_SR_NSAF,data_SB_NUMSPECADJ)
#Compare Object
#Function Call:
#compare.data.frame(x = data_SR_NSAF, y = data_SB_NUMSPECADJ)
#Shared: 46 variables and 8443 observations.
#Not shared: 0 variables and 0 observations.
#Differences found in 0/45 variables compared.
#0 variables compared have non-identical attributes.
#What is Kaitlyn's data?
data_KM <- read.csv("~/Documents/GitHub/OysterSeedProject/raw_data/ABACUSdata_only.csv", header = TRUE, stringsAsFactors = FALSE)
#convert steven's column names
data_SR_NSAF_avg <- data_SR_NSAF[,c(1,grep("NSAF", colnames(data_SR_NSAF)))]
colnames(data_SR_NSAF_avg) <- gsub(pattern = "X20161205_SAMPLE_", "", colnames(data_SR_NSAF_avg))
colnames(data_SR_NSAF_avg) <- gsub(pattern = "_ADJNSAF", "", colnames(data_SR_NSAF_avg))
data_SR_NSAF_avg <- data_SR_NSAF_avg[,c("PROTID",mixedsort(colnames(data_SR_NSAF_avg[,-1])))]
#find avg of each technical rep (https://stackoverflow.com/questions/13739243/average-pairs-of-columns-in-r)
data_SR_NSAFavg <- data.frame(sapply(seq(2,ncol(data_SR_NSAF_avg),2), function(i) {
rowMeans(data_SR_NSAF_avg[,c(i, i+1)], na.rm=T)
}))
#create column names similar to Kaitlyn's file
colnames(data_SR_NSAFavg) <- mixedsort(colnames(data_SR_NSAF_avg[,c(-1,-grep("A",colnames(data_SR_NSAF_avg)))]))
meta_data <- read.csv("~/Documents/GitHub/OysterSeedProject/analysis/nmds_R/Rhonda_new_sample_names.csv", header = TRUE, stringsAsFactors = FALSE)
meta_data$silo <- substr(meta_data$Contents,5,5)
meta_data$day <- substr(meta_data$SampleName,5,6)
new_colnames <- list()
for (i in 1:length(colnames(data_SR_NSAFavg))){
sample <- colnames(data_SR_NSAFavg)[i]
new_colnames[[i]] <- paste(meta_data[meta_data$SampleID == sample,"silo"],meta_data[meta_data$SampleID == sample, "day"], sep = "_")
}
colnames(data_SR_NSAFavg) <- new_colnames
data_SR_NSAF_avg <- cbind(data.frame(data_SR_NSAF_avg[,"PROTID"],stringsAsFactors = FALSE), data_SR_NSAFavg)
colnames(data_KM)
#[1] "Protein.ID" "CompetentLarvae_1" "X2_3" "X3_3" "X9_3"
#[6] "X2_5" "X3_5" "X9_5" "X2_7" "X3_7"
#[11] "X9_7" "X2_9" "X3_9" "X9_9" "X2_11"
#[16] "X3_11" "X9_11" "X2_13" "X3_13" "X9_13"
#[21] "X2_15" "X3_15" "X9_15"
colnames(data_SR_NSAF_avg)
#[1] "data_SR_NSAF_avg$PROTID" "e_0" "2_3" "3_3"
#[5] "9_3" "2_5" "3_5" "9_5"
#[9] "2_7" "3_7" "9_7" "2_9"
#[13] "3_9" "9_9" "2_11" "3_11"
#[17] "9_11" "2_13" "3_13" "9_13"
#[21] "2_15" "3_15" "9_15"
colnames(data_SR_NSAF_avg) <- colnames(data_KM)
str(data_KM)
str(data_SR_NSAF_avg)
#sort files so they are in the same order
data_KM <- data_KM[order(data_KM$Protein.ID),]
data_SR_NSAF_avg <- data_SR_NSAF_avg[order(data_SR_NSAF_avg$Protein.ID),]
compare(data_KM,data_SR_NSAF_avg)
|
# ---- Loading necessary packages
library(gtrendsR)
library(lubridate)
library(ggplot2)
# this is a comment indicating a google trends query is about to happen
# ---- Google trends query
# search google trends for the keyword "lose weight",
# in Greece between 01.01.2015 and 31.12.2018 and save
# the result in a variable called df
df <- gtrends(keyword = c("lose weight"),geo=c("GR"),time="2015-01-01 2018-12-31")
# save interest over time from df in a variable called df_iot
df_iot <- df$interest_over_time
# save all lines of the variable df_iot with a date in december,
# january or february in a variable called winter
winter <- df_iot[month(df_iot$date) == 12 |month(df_iot$date) == 1 |month(df_iot$date) == 2, ]
# save all lines of the variable df_iot with a date in june,
# july and august in a variable called summer
summer <- df_iot[month(df_iot$date) == 6 |month(df_iot$date) == 7 |month(df_iot$date) == 8, ]
# search google trends for the keyword "lose weight",
# in Greece between 01.01.2015 and 31.12.2018 an save
# the result in a variable called trends
trends <- gtrendsR::gtrends(keyword = c("lose weight"),geo = c("GR"),time = "2015-01-01 2018-12-31")
# this is a comment indicating to get interest over time
# ---- Getting interest over time
# save interest over time from the variable trends
# in a variable called interest_over_time
interest_over_time <- trends$interest_over_time
# this is a comment indicating to save data
# ---- Saving data
# search google trends for the keyword "lose weight",
# in Greece between 01.12.2016 and 31.12.2018 - again
# and save the result in a variable called my_data
my_data <- gtrends(keyword = c("lose weight"), geo = c("GR"), time = "2015-01-01 2018-12-31")
# save interest over time of the last search in a file
# called interestovertime.csv in the current directory
write.csv(my_data$interest_over_time,"interestovertime.csv")
# ---- Analysing data
group_a<-summer$hits
group_b<-winter$hits
# ---- Generating data
df_wide <- data.frame(group_a,group_b)
# ---- Showing data in wide format
knitr::kable((df_wide))
# ---- Running the analyses
# data from Kolmogorov-Smirnov analysis
ks.test(group_a, pnorm, mean(group_a), sd(group_a) )
ks.test(group_b, pnorm, mean(group_b), sd(group_b) )
# data from T-test analysis
t.test(group_a,group_b)
# ---- Creating df_final variable
winter$season <- "winter"
summer$season <- "summer"
df_final <- rbind(summer, winter)
# ---- Data visualization
#produce boxplot
ggplot(data = df_final, aes(x = season, y = hits)) +
geom_boxplot()
# ---- Generate code
a <- group_a; b <- group_b
result_1 <- ks.test(group_a, pnorm, mean(group_a), sd(group_a) )
result_2 <- ks.test(group_b, pnorm, mean(group_b), sd(group_b) )
result_3 <- t.test(x = a, y = b)
# ---- Results of ks-test
result_1
str(result_1)
names(result_1)
result_2
str(result_2)
names(result_2)
# ---- Results of t-test
result_3
str(result_3)
names(result_3)
| /data_collection.R | no_license | metallinoskonstantinos/MetallinosKon | R | false | false | 2,949 | r | # ---- Loading necessary packages
library(gtrendsR)
library(lubridate)
library(ggplot2)
# this is a comment indicating a google trends query is about to happen
# ---- Google trends query
# search google trends for the keyword "lose weight",
# in Greece between 01.01.2015 and 31.12.2018 and save
# the result in a variable called df
df <- gtrends(keyword = c("lose weight"),geo=c("GR"),time="2015-01-01 2018-12-31")
# save interest over time from df in a variable called df_iot
df_iot <- df$interest_over_time
# save all lines of the variable df_iot with a date in december,
# january or february in a variable called winter
winter <- df_iot[month(df_iot$date) == 12 |month(df_iot$date) == 1 |month(df_iot$date) == 2, ]
# save all lines of the variable df_iot with a date in june,
# july and august in a variable called summer
summer <- df_iot[month(df_iot$date) == 6 |month(df_iot$date) == 7 |month(df_iot$date) == 8, ]
# search google trends for the keyword "lose weight",
# in Greece between 01.01.2015 and 31.12.2018 an save
# the result in a variable called trends
trends <- gtrendsR::gtrends(keyword = c("lose weight"),geo = c("GR"),time = "2015-01-01 2018-12-31")
# this is a comment indicating to get interest over time
# ---- Getting interest over time
# save interest over time from the variable trends
# in a variable called interest_over_time
interest_over_time <- trends$interest_over_time
# this is a comment indicating to save data
# ---- Saving data
# search google trends for the keyword "lose weight",
# in Greece between 01.12.2016 and 31.12.2018 - again
# and save the result in a variable called my_data
my_data <- gtrends(keyword = c("lose weight"), geo = c("GR"), time = "2015-01-01 2018-12-31")
# save interest over time of the last search in a file
# called interestovertime.csv in the current directory
write.csv(my_data$interest_over_time,"interestovertime.csv")
# ---- Analysing data
group_a<-summer$hits
group_b<-winter$hits
# ---- Generating data
df_wide <- data.frame(group_a,group_b)
# ---- Showing data in wide format
knitr::kable((df_wide))
# ---- Running the analyses
# data from Kolmogorov-Smirnov analysis
ks.test(group_a, pnorm, mean(group_a), sd(group_a) )
ks.test(group_b, pnorm, mean(group_b), sd(group_b) )
# data from T-test analysis
t.test(group_a,group_b)
# ---- Creating df_final variable
winter$season <- "winter"
summer$season <- "summer"
df_final <- rbind(summer, winter)
# ---- Data visualization
#produce boxplot
ggplot(data = df_final, aes(x = season, y = hits)) +
geom_boxplot()
# ---- Generate code
a <- group_a; b <- group_b
result_1 <- ks.test(group_a, pnorm, mean(group_a), sd(group_a) )
result_2 <- ks.test(group_b, pnorm, mean(group_b), sd(group_b) )
result_3 <- t.test(x = a, y = b)
# ---- Results of ks-test
result_1
str(result_1)
names(result_1)
result_2
str(result_2)
names(result_2)
# ---- Results of t-test
result_3
str(result_3)
names(result_3)
|
/shumi/zemi/dp/Dynamic_Pattern_Alignment.R | no_license | jecht1014/book | R | false | false | 636 | r | ||
# Read data
data<-read.table("household_power_consumption.txt",header=T,sep=";")
# change first row as character
data[,1]<-as.character(data[,1]) # Date
# find 2007-2-1 and 2007-2-2 and make subset
subData<-subset(data,Date=="1/2/2007" | Date=="2/2/2007")
# remove data to save memory
rm(data)
# third low is "factor". transform twice to change factor to numeric
subData[,3]<-as.numeric(as.character(subData[,3])) #Global_active_power
# draw graph
png("plot2.png",width=480,height=480)
plot(subData[,3],type="l",xlab="",ylab="Global Active Power (killowatts)",xaxt='n')
axis(1,at=c(1,1440,2880),labels=c("Thu","Fri","Sat"))
dev.off()
| /plot2.R | no_license | zeningamrosa/ExData_Plotting1 | R | false | false | 655 | r | # Read data
data<-read.table("household_power_consumption.txt",header=T,sep=";")
# change first row as character
data[,1]<-as.character(data[,1]) # Date
# find 2007-2-1 and 2007-2-2 and make subset
subData<-subset(data,Date=="1/2/2007" | Date=="2/2/2007")
# remove data to save memory
rm(data)
# third low is "factor". transform twice to change factor to numeric
subData[,3]<-as.numeric(as.character(subData[,3])) #Global_active_power
# draw graph
png("plot2.png",width=480,height=480)
plot(subData[,3],type="l",xlab="",ylab="Global Active Power (killowatts)",xaxt='n')
axis(1,at=c(1,1440,2880),labels=c("Thu","Fri","Sat"))
dev.off()
|
###居酒屋メニューの分類###
居酒屋 <- read.csv("居酒屋.csv",header=T,row.names=1) #データの読み込み
居酒屋 #データの確認
居酒屋クラスター <-kmeans(居酒屋,centers=4) #クラスター分析の実行
居酒屋クラスター #結果の確認
tapply(names(居酒屋クラスター$cluster), 居酒屋クラスター$cluster,unique) #データの整理
| /chapter6_3.R | no_license | mk102/datamining_nyumon | R | false | false | 536 | r | ###居酒屋メニューの分類###
居酒屋 <- read.csv("居酒屋.csv",header=T,row.names=1) #データの読み込み
居酒屋 #データの確認
居酒屋クラスター <-kmeans(居酒屋,centers=4) #クラスター分析の実行
居酒屋クラスター #結果の確認
tapply(names(居酒屋クラスター$cluster), 居酒屋クラスター$cluster,unique) #データの整理
|
#' @rdname weighted_sd
#' @export
weighted_ttest <- function(data, ...) {
UseMethod("weighted_ttest")
}
#' @rdname weighted_sd
#' @export
weighted_ttest.default <- function(data, x, y = NULL, weights, mu = 0, paired = FALSE, ci.lvl = 0.95, alternative = c("two.sided", "less", "greater"), ...) {
if (!missing(ci.lvl) & (length(ci.lvl) != 1 || !is.finite(ci.lvl) || ci.lvl < 0 || ci.lvl > 1))
stop("'ci.lvl' must be a single number between 0 and 1")
alternative <- match.arg(alternative)
x.name <- deparse(substitute(x))
y.name <- deparse(substitute(y))
w.name <- deparse(substitute(weights))
if (y.name == "NULL") y.name <- NULL
if (w.name == "NULL") {
w.name <- "weights"
data$weights <- 1
}
# create string with variable names
vars <- c(x.name, y.name, w.name)
# get data
dat <- suppressMessages(dplyr::select(data, !! vars))
dat <- na.omit(dat)
if (sjmisc::is_empty(dat) || nrow(dat) == 1) {
warning("Too less data to compute t-test.")
return(NULL)
}
xv <- dat[[x.name]]
wx <- wy <- dat[[w.name]]
if (!is.null(y.name))
yv <- dat[[y.name]]
else
yv <- NULL
nx <- ny <- nrow(dat)
weighted_ttest_helper(xv, yv, wx, wy, nx, ny, mu, paired, alternative, ci.lvl, x.name, y.name, NULL)
}
#' @rdname weighted_sd
#' @export
weighted_ttest.formula <- function(formula, data, mu = 0, paired = FALSE, ci.lvl = 0.95, alternative = c("two.sided", "less", "greater"), ...) {
if (!missing(ci.lvl) & (length(ci.lvl) != 1 || !is.finite(ci.lvl) || ci.lvl < 0 || ci.lvl > 1))
stop("'ci.lvl' must be a single number between 0 and 1")
alternative <- match.arg(alternative)
vars <- all.vars(formula)
g <- data[[vars[2]]]
if (is.factor(g))
grps <- levels(g)
else
grps <- na.omit(sort(unique(g)))
if (length(grps) > 2)
stop("Grouping factor has more than two levels.")
if (length(vars) < 3) {
vars <- c(vars, "weights")
data$weights <- 1
}
x <- data[[vars[1]]]
y <- data[[vars[2]]]
w <- data[[vars[3]]]
xv <- x[y == grps[1]]
yv <- x[y == grps[2]]
wx <- w[y == grps[1]]
wy <- w[y == grps[2]]
mxv <- is.na(xv)
xv <- xv[!mxv]
wx <- wx[!mxv]
myv <- is.na(yv)
yv <- yv[!myv]
wy <- wy[!myv]
nx <- length(xv)
ny <- length(yv)
labs <- sjlabelled::get_labels(
data[[vars[2]]],
attr.only = FALSE,
values = "p",
drop.na = TRUE,
drop.unused = TRUE
)
weighted_ttest_helper(xv, yv, wx, wy, nx, ny, mu, paired, alternative, ci.lvl, vars[1], vars[2], labs)
}
weighted_ttest_helper <- function(xv, yv, wx, wy, nx, ny, mu, paired, alternative, ci.lvl, x.name, y.name, group.name) {
if (paired) {
xv <- xv - yv
yv <- NULL
}
mu.x.w <- stats::weighted.mean(xv, wx)
var.x.w <- weighted_sd(xv, wx)^2
se.x <- sqrt(var.x.w / nx)
if (!is.null(yv)) {
mu.y.w <- stats::weighted.mean(yv, wy)
var.y.w <- weighted_sd(yv, wy)^2
se.y <- sqrt(var.y.w / ny)
se <- sqrt(se.x^2 + se.y^2)
df <- se^4 / (se.x^4 / (nx - 1) + se.y^4 / (ny - 1))
tstat <- (mu.x.w - mu.y.w - mu) / se
estimate <- c(mu.x.w, mu.y.w)
names(estimate) <- c("mean of x", "mean of y")
method <- "Two-Sample t-test"
} else {
se <- se.x
df <- nx - 1
tstat <- (mu.x.w - mu) / se
estimate <- stats::setNames(mu.x.w, if (paired) "mean of the differences" else "mean of x")
method <- if (paired) "Paired t-test" else "One Sample t-test"
}
if (alternative == "less") {
pval <- stats::pt(tstat, df)
cint <- c(-Inf, tstat + stats::qt(ci.lvl, df))
} else if (alternative == "greater") {
pval <- stats::pt(tstat, df, lower.tail = FALSE)
cint <- c(tstat - stats::qt(ci.lvl, df), Inf)
} else {
pval <- 2 * stats::pt(-abs(tstat), df)
alpha <- 1 - ci.lvl
cint <- stats::qt(1 - alpha / 2, df)
cint <- tstat + c(-cint, cint)
}
cint <- mu + cint * se
names(tstat) <- "t"
names(df) <- "df"
names(mu) <- if (paired || !is.null(yv)) "difference in means" else "mean"
tt <- structure(
class = "sj_ttest",
list(
estimate = estimate,
statistic = tstat,
df = df,
p.value = pval,
ci = cint,
alternative = alternative,
method = method
)
)
attr(tt, "x.name") <- x.name
attr(tt, "y.name") <- y.name
attr(tt, "group.name") <- group.name
tt
}
| /R/wtd_ttest.R | no_license | cran/sjstats | R | false | false | 4,514 | r | #' @rdname weighted_sd
#' @export
weighted_ttest <- function(data, ...) {
UseMethod("weighted_ttest")
}
#' @rdname weighted_sd
#' @export
weighted_ttest.default <- function(data, x, y = NULL, weights, mu = 0, paired = FALSE, ci.lvl = 0.95, alternative = c("two.sided", "less", "greater"), ...) {
if (!missing(ci.lvl) & (length(ci.lvl) != 1 || !is.finite(ci.lvl) || ci.lvl < 0 || ci.lvl > 1))
stop("'ci.lvl' must be a single number between 0 and 1")
alternative <- match.arg(alternative)
x.name <- deparse(substitute(x))
y.name <- deparse(substitute(y))
w.name <- deparse(substitute(weights))
if (y.name == "NULL") y.name <- NULL
if (w.name == "NULL") {
w.name <- "weights"
data$weights <- 1
}
# create string with variable names
vars <- c(x.name, y.name, w.name)
# get data
dat <- suppressMessages(dplyr::select(data, !! vars))
dat <- na.omit(dat)
if (sjmisc::is_empty(dat) || nrow(dat) == 1) {
warning("Too less data to compute t-test.")
return(NULL)
}
xv <- dat[[x.name]]
wx <- wy <- dat[[w.name]]
if (!is.null(y.name))
yv <- dat[[y.name]]
else
yv <- NULL
nx <- ny <- nrow(dat)
weighted_ttest_helper(xv, yv, wx, wy, nx, ny, mu, paired, alternative, ci.lvl, x.name, y.name, NULL)
}
#' @rdname weighted_sd
#' @export
weighted_ttest.formula <- function(formula, data, mu = 0, paired = FALSE, ci.lvl = 0.95, alternative = c("two.sided", "less", "greater"), ...) {
if (!missing(ci.lvl) & (length(ci.lvl) != 1 || !is.finite(ci.lvl) || ci.lvl < 0 || ci.lvl > 1))
stop("'ci.lvl' must be a single number between 0 and 1")
alternative <- match.arg(alternative)
vars <- all.vars(formula)
g <- data[[vars[2]]]
if (is.factor(g))
grps <- levels(g)
else
grps <- na.omit(sort(unique(g)))
if (length(grps) > 2)
stop("Grouping factor has more than two levels.")
if (length(vars) < 3) {
vars <- c(vars, "weights")
data$weights <- 1
}
x <- data[[vars[1]]]
y <- data[[vars[2]]]
w <- data[[vars[3]]]
xv <- x[y == grps[1]]
yv <- x[y == grps[2]]
wx <- w[y == grps[1]]
wy <- w[y == grps[2]]
mxv <- is.na(xv)
xv <- xv[!mxv]
wx <- wx[!mxv]
myv <- is.na(yv)
yv <- yv[!myv]
wy <- wy[!myv]
nx <- length(xv)
ny <- length(yv)
labs <- sjlabelled::get_labels(
data[[vars[2]]],
attr.only = FALSE,
values = "p",
drop.na = TRUE,
drop.unused = TRUE
)
weighted_ttest_helper(xv, yv, wx, wy, nx, ny, mu, paired, alternative, ci.lvl, vars[1], vars[2], labs)
}
weighted_ttest_helper <- function(xv, yv, wx, wy, nx, ny, mu, paired, alternative, ci.lvl, x.name, y.name, group.name) {
if (paired) {
xv <- xv - yv
yv <- NULL
}
mu.x.w <- stats::weighted.mean(xv, wx)
var.x.w <- weighted_sd(xv, wx)^2
se.x <- sqrt(var.x.w / nx)
if (!is.null(yv)) {
mu.y.w <- stats::weighted.mean(yv, wy)
var.y.w <- weighted_sd(yv, wy)^2
se.y <- sqrt(var.y.w / ny)
se <- sqrt(se.x^2 + se.y^2)
df <- se^4 / (se.x^4 / (nx - 1) + se.y^4 / (ny - 1))
tstat <- (mu.x.w - mu.y.w - mu) / se
estimate <- c(mu.x.w, mu.y.w)
names(estimate) <- c("mean of x", "mean of y")
method <- "Two-Sample t-test"
} else {
se <- se.x
df <- nx - 1
tstat <- (mu.x.w - mu) / se
estimate <- stats::setNames(mu.x.w, if (paired) "mean of the differences" else "mean of x")
method <- if (paired) "Paired t-test" else "One Sample t-test"
}
if (alternative == "less") {
pval <- stats::pt(tstat, df)
cint <- c(-Inf, tstat + stats::qt(ci.lvl, df))
} else if (alternative == "greater") {
pval <- stats::pt(tstat, df, lower.tail = FALSE)
cint <- c(tstat - stats::qt(ci.lvl, df), Inf)
} else {
pval <- 2 * stats::pt(-abs(tstat), df)
alpha <- 1 - ci.lvl
cint <- stats::qt(1 - alpha / 2, df)
cint <- tstat + c(-cint, cint)
}
cint <- mu + cint * se
names(tstat) <- "t"
names(df) <- "df"
names(mu) <- if (paired || !is.null(yv)) "difference in means" else "mean"
tt <- structure(
class = "sj_ttest",
list(
estimate = estimate,
statistic = tstat,
df = df,
p.value = pval,
ci = cint,
alternative = alternative,
method = method
)
)
attr(tt, "x.name") <- x.name
attr(tt, "y.name") <- y.name
attr(tt, "group.name") <- group.name
tt
}
|
\name{relative.effect}
\alias{relative.effect}
\title{Relative effects of covariates}
\description{
Estimate the extent to which a covariate is confounding the treatment
effect
}
\usage{
relative.effect(formula=NULL, data, sel=NULL, resp=NULL, treat=NULL, ...)
}
\arguments{
\item{formula}{an object of class 'formula' (or one that can be
coerced to that class): a symbolic description of a model to be
fitted.}
\item{data}{a data frame containing outcome, treatment and
covariates.}
\item{sel}{a vector of integers or strings indicating the covariates.}
\item{resp}{an integer or a string indicating the outcome variable.}
\item{treat}{an integer or a string indicating the treatment
variable.}
\item{...}{further arguments passed to or from other methods.}
}
\details{
The decision about the inclusion of covariates in the propensity score
model is mostly difficult. A measure describing the extent to which a
covariate is confounding the treatment effect on outcome can help to
decide on it. Covariates with a large impact are potential candidates
for the propensity score model.
The relative effect is defined as difference between adjusted and
unadjusted treatment effect related to the unadjusted effect (per
cent). Therefore, treatment effects on outcome, unadjusted and
adjusted for covariates, are estimated using internally \code{glm}.
Two options are available to fit appropriate regression models. Either
a formula is specified, typically as 'resp ~ treat + cov'
(\code{formula}), or \code{resp}, \code{treat} and \code{sel} are
given to specify the outcome and treatment variable and the
covariates.
}
\value{ \code{relative.effect} returns a list containing the following
components:
\item{unadj.treat}{the estimated unadjusted treatment effect on
outcome.}
\item{adj.treat.cov}{a vector containing the estimated treatment
effects on outcome, individually adjusted for the selected
covariates.}
\item{rel.treat}{a vector containing the relative effect for each
covariate.}
\item{name.treat}{a string indicating the name of the treatment.}
\item{name.resp}{a string indicating the name of the outcome.}
\item{name.sel}{a vector of strings indicating the names of the
selected covariates.}
\item{family}{the error distribution and link function used in the
model (see \code{glm}).}
}
\author{Susanne Stampf \email{susanne.stampf@usb.ch}
}
\seealso{
\code{\link{glm}}, \link{formula}
}
\examples{
## STU1
data(stu1)
stu1.effect <-
relative.effect(data = stu1,
formula = pst~therapie+tgr+age)
## PRIDE
data(pride)
pride.effect <-
relative.effect(data = pride,
sel = c(2:14),
resp = 15,
treat = 1)
}
\keyword{
models
}
| /man/relative.effect.Rd | no_license | cran/nonrandom | R | false | false | 2,842 | rd | \name{relative.effect}
\alias{relative.effect}
\title{Relative effects of covariates}
\description{
Estimate the extent to which a covariate is confounding the treatment
effect
}
\usage{
relative.effect(formula=NULL, data, sel=NULL, resp=NULL, treat=NULL, ...)
}
\arguments{
\item{formula}{an object of class 'formula' (or one that can be
coerced to that class): a symbolic description of a model to be
fitted.}
\item{data}{a data frame containing outcome, treatment and
covariates.}
\item{sel}{a vector of integers or strings indicating the covariates.}
\item{resp}{an integer or a string indicating the outcome variable.}
\item{treat}{an integer or a string indicating the treatment
variable.}
\item{...}{further arguments passed to or from other methods.}
}
\details{
The decision about the inclusion of covariates in the propensity score
model is mostly difficult. A measure describing the extent to which a
covariate is confounding the treatment effect on outcome can help to
decide on it. Covariates with a large impact are potential candidates
for the propensity score model.
The relative effect is defined as difference between adjusted and
unadjusted treatment effect related to the unadjusted effect (per
cent). Therefore, treatment effects on outcome, unadjusted and
adjusted for covariates, are estimated using internally \code{glm}.
Two options are available to fit appropriate regression models. Either
a formula is specified, typically as 'resp ~ treat + cov'
(\code{formula}), or \code{resp}, \code{treat} and \code{sel} are
given to specify the outcome and treatment variable and the
covariates.
}
\value{ \code{relative.effect} returns a list containing the following
components:
\item{unadj.treat}{the estimated unadjusted treatment effect on
outcome.}
\item{adj.treat.cov}{a vector containing the estimated treatment
effects on outcome, individually adjusted for the selected
covariates.}
\item{rel.treat}{a vector containing the relative effect for each
covariate.}
\item{name.treat}{a string indicating the name of the treatment.}
\item{name.resp}{a string indicating the name of the outcome.}
\item{name.sel}{a vector of strings indicating the names of the
selected covariates.}
\item{family}{the error distribution and link function used in the
model (see \code{glm}).}
}
\author{Susanne Stampf \email{susanne.stampf@usb.ch}
}
\seealso{
\code{\link{glm}}, \link{formula}
}
\examples{
## STU1
data(stu1)
stu1.effect <-
relative.effect(data = stu1,
formula = pst~therapie+tgr+age)
## PRIDE
data(pride)
pride.effect <-
relative.effect(data = pride,
sel = c(2:14),
resp = 15,
treat = 1)
}
\keyword{
models
}
|
# Page Number : 417
I = 2.5*(10^-3)
t = 30*(10^-3)
Q = I*t
e = 1.602*(10^-19)
N = Q/e
print(N)
| /A_Textbook_Of_Electrical_Engineering_Materials_by_P._L._Kapoor/CH15/EX15.37/Ex15_37.R | permissive | FOSSEE/R_TBC_Uploads | R | false | false | 97 | r | # Page Number : 417
I = 2.5*(10^-3)
t = 30*(10^-3)
Q = I*t
e = 1.602*(10^-19)
N = Q/e
print(N)
|
data<-read.table("https://raw.githubusercontent.com/Shicheng-Guo/ASCs/master/extdata/vdjtools.basicstats.txt",head=T,sep="\t")
head(data)
idx1<-unlist(lapply(data[,1],function(x) substr(x,1,1)))
idx2<-unlist(lapply(colnames(input),function(x) unlist(strsplit(x,"[_]"))[2]))
idx<-paste(idx1,idx2,sep="_")
dim(data)
par(mfrow=c(3,4),mar=c(2,2,3,1))
for(i in 3:ncol(data)){
boxplot(data[,i]~idx,col=2:5,main=colnames(data)[i])
}
data<-read.table("vdjtools.segments.wt.J.txt",head=T,sep="\t")
head(data)
idx1<-unlist(lapply(data[,1],function(x) substr(x,1,1)))
idx2<-unlist(lapply(colnames(input),function(x) unlist(strsplit(x,"[_]"))[2]))
idx<-paste(idx1,idx2,sep="_")
dim(data)
par(mfrow=c(2,4),mar=c(2,2,3,1))
for(i in 3:ncol(data)){
boxplot(data[,i]~idx,col=2:5,main=colnames(data)[i])
}
data<-read.table("vdjtools.segments.wt.V.txt",head=T,sep="\t")
head(data)
dim(data)
idx1<-unlist(lapply(data[,1],function(x) substr(x,1,1)))
idx2<-unlist(lapply(colnames(input),function(x) unlist(strsplit(x,"[_]"))[2]))
idx<-paste(idx1,idx2,sep="_")
dim(data)
par(mfrow=c(4,4),mar=c(2,2,3,1))
for(i in 3:ncol(data)){
if(sd(data[,i])>0.0055){
boxplot(data[,i]~idx,col=2:5,main=colnames(data)[i],ylim=c(0,0.12))
}
}
s1<-read.table("S2.txt",head=T,sep="\t")
s2<-read.table("vdjtools.m.txt",head=T,sep="\t")
s<-s2[match(s1[,1],gsub(".sed","",s2[,1])),]
write.table(s,file="vdjtools.m2.txt",col.names = T,row.names = F,sep="\t",quote=F)
vdjtools Convert -S mixcr -m vdjtools.m2.txt metadata.txt
vdjtools CalcSegmentUsage -m metadata.txt vdjtools
data1<-read.table("vdjtools.spectratype.aa.wt.txt",head=T,sep="\t",check.names = F)
Len<-c()
for(i in 1:nrow(data1)){
len<-sum(data1[i,7:22]*as.numeric(colnames(data1)[7:22]))
Len<-c(Len,len)
}
tapply(Len,paste(data1$SampeType,data1$CellType,sep="_"),mean)
table(paste(data1$SampeType,data1$CellType,sep="_"))
pdf("Len.boxplot.pdf")
boxplot(Len~paste(data1$CellType,data1$SampeType,sep="_"),cex.axis=0.6,col=c(3,2,3,2,3,2,3,2))
dev.off()
x<-boxplot(Len~paste(data1$CellType,data1$SampeType,sep="_"),cex.axis=0.6,col=c(2,3,2,3,2,3,2,3))
colnames(x$stats)<-x$names
y<-x$stats
P<-c()
for(i in c(1,3,5,7)){
p<-t.test(y[,i],y[,i+1])$p.value
P<-c(P,p)
}
P
data2<-read.table("vdjtools.celltype.segments.wt.J.txt",head=T,sep="\t",check.names = F)
data2$IGHJ6
input<-data.frame(data1,Len,data2)
pdf("lenJH6.pdf")
plot(x=input$Len,y=input$IGHJ6,col=as.numeric(input$SampeType)+1,pch=as.numeric(input$CellType),cex=1.5)
dev.off()
data<-read.table("vdjtools.celltype.segments.wt.V.txt",head=T,sep="\t",check.names = F)
P<-c()
for(i in 6:ncol(data)){
p<-t.test(data[,i]~data[,4])$p.value
P<-c(P,p)
}
names(P)<-colnames(data)[6:ncol(data)]
qqplot(P)
data<-read.table("vdjtools.celltype.segments.wt.J.txt",head=T,sep="\t",check.names = F)
P<-c()
for(i in 6:ncol(data)){
p<-t.test(data[,i]~data[,4])$p.value
P<-c(P,p)
}
names(P)<-colnames(data)[6:ncol(data)]
qqplot(P)
meta<-read.table("metadata.txt",head=T,sep="\t")
for(i in unique(meta$SampleID)){
input<-subset(meta,SampleID==i)
write.table(input,file=paste("metadata",i,"txt",sep="."),col.names =T,row.names = F,quote=F,sep="\t")
}
meta<-read.table("metadata.txt",head=T,sep="\t")
for(i in unique(meta$SampleType)){
input<-subset(meta,SampleType==i)
write.table(input,file=paste("metadata",i,"txt",sep="."),col.names =T,row.names = F,quote=F,sep="\t")
}
library(reshape2)
library("ggplot2")
setwd("//mcrfnas2/bigdata/Genetic/Projects/shg047/rheumatology/SLE/BCR/vdj")
Hs<-read.table("vdj.H.intersect.batch.aa.txt",head=T,sep="\t")
x<-subset(Hs,X1_SampleID==X2_SampleID &(X1_CellType=="BND" |X2_CellType=="BND" ))$R
Ss<-read.table("vdj.S.intersect.batch.aa.txt",head=T,sep="\t")
y<-subset(Ss,X1_SampleID==X2_SampleID &(X1_CellType=="BND" |X2_CellType=="BND" ))$R
input<-melt(data.frame(H=x,S=y))
input<-melt(data.frame(H=log(x,10),S=log(y,10)))
head(input)
wilcox.test(value~variable,input)
t.test(value~variable,input)
boxplot(value~variable,input)
pplot<-ggplot(input, aes(x=variable, y=value)) +
geom_boxplot(outlier.shape=NA,colour=c("green","red"),fill=c("green","red"))+
geom_jitter(position=position_jitter(width=.1, height=0))+
scale_y_continuous(name = "Log(D,10)")+
scale_x_discrete(name = "") +
theme_bw()
add_pval(pplot, pairs = list(c(1, 2)), test='wilcox.test')
ggsave("BND.boxplot.ggplot.pdf")
| /Figure.R | no_license | Shicheng-Guo/ASCs | R | false | false | 4,346 | r | data<-read.table("https://raw.githubusercontent.com/Shicheng-Guo/ASCs/master/extdata/vdjtools.basicstats.txt",head=T,sep="\t")
head(data)
idx1<-unlist(lapply(data[,1],function(x) substr(x,1,1)))
idx2<-unlist(lapply(colnames(input),function(x) unlist(strsplit(x,"[_]"))[2]))
idx<-paste(idx1,idx2,sep="_")
dim(data)
par(mfrow=c(3,4),mar=c(2,2,3,1))
for(i in 3:ncol(data)){
boxplot(data[,i]~idx,col=2:5,main=colnames(data)[i])
}
data<-read.table("vdjtools.segments.wt.J.txt",head=T,sep="\t")
head(data)
idx1<-unlist(lapply(data[,1],function(x) substr(x,1,1)))
idx2<-unlist(lapply(colnames(input),function(x) unlist(strsplit(x,"[_]"))[2]))
idx<-paste(idx1,idx2,sep="_")
dim(data)
par(mfrow=c(2,4),mar=c(2,2,3,1))
for(i in 3:ncol(data)){
boxplot(data[,i]~idx,col=2:5,main=colnames(data)[i])
}
data<-read.table("vdjtools.segments.wt.V.txt",head=T,sep="\t")
head(data)
dim(data)
idx1<-unlist(lapply(data[,1],function(x) substr(x,1,1)))
idx2<-unlist(lapply(colnames(input),function(x) unlist(strsplit(x,"[_]"))[2]))
idx<-paste(idx1,idx2,sep="_")
dim(data)
par(mfrow=c(4,4),mar=c(2,2,3,1))
for(i in 3:ncol(data)){
if(sd(data[,i])>0.0055){
boxplot(data[,i]~idx,col=2:5,main=colnames(data)[i],ylim=c(0,0.12))
}
}
s1<-read.table("S2.txt",head=T,sep="\t")
s2<-read.table("vdjtools.m.txt",head=T,sep="\t")
s<-s2[match(s1[,1],gsub(".sed","",s2[,1])),]
write.table(s,file="vdjtools.m2.txt",col.names = T,row.names = F,sep="\t",quote=F)
vdjtools Convert -S mixcr -m vdjtools.m2.txt metadata.txt
vdjtools CalcSegmentUsage -m metadata.txt vdjtools
data1<-read.table("vdjtools.spectratype.aa.wt.txt",head=T,sep="\t",check.names = F)
Len<-c()
for(i in 1:nrow(data1)){
len<-sum(data1[i,7:22]*as.numeric(colnames(data1)[7:22]))
Len<-c(Len,len)
}
tapply(Len,paste(data1$SampeType,data1$CellType,sep="_"),mean)
table(paste(data1$SampeType,data1$CellType,sep="_"))
pdf("Len.boxplot.pdf")
boxplot(Len~paste(data1$CellType,data1$SampeType,sep="_"),cex.axis=0.6,col=c(3,2,3,2,3,2,3,2))
dev.off()
x<-boxplot(Len~paste(data1$CellType,data1$SampeType,sep="_"),cex.axis=0.6,col=c(2,3,2,3,2,3,2,3))
colnames(x$stats)<-x$names
y<-x$stats
P<-c()
for(i in c(1,3,5,7)){
p<-t.test(y[,i],y[,i+1])$p.value
P<-c(P,p)
}
P
data2<-read.table("vdjtools.celltype.segments.wt.J.txt",head=T,sep="\t",check.names = F)
data2$IGHJ6
input<-data.frame(data1,Len,data2)
pdf("lenJH6.pdf")
plot(x=input$Len,y=input$IGHJ6,col=as.numeric(input$SampeType)+1,pch=as.numeric(input$CellType),cex=1.5)
dev.off()
data<-read.table("vdjtools.celltype.segments.wt.V.txt",head=T,sep="\t",check.names = F)
P<-c()
for(i in 6:ncol(data)){
p<-t.test(data[,i]~data[,4])$p.value
P<-c(P,p)
}
names(P)<-colnames(data)[6:ncol(data)]
qqplot(P)
data<-read.table("vdjtools.celltype.segments.wt.J.txt",head=T,sep="\t",check.names = F)
P<-c()
for(i in 6:ncol(data)){
p<-t.test(data[,i]~data[,4])$p.value
P<-c(P,p)
}
names(P)<-colnames(data)[6:ncol(data)]
qqplot(P)
meta<-read.table("metadata.txt",head=T,sep="\t")
for(i in unique(meta$SampleID)){
input<-subset(meta,SampleID==i)
write.table(input,file=paste("metadata",i,"txt",sep="."),col.names =T,row.names = F,quote=F,sep="\t")
}
meta<-read.table("metadata.txt",head=T,sep="\t")
for(i in unique(meta$SampleType)){
input<-subset(meta,SampleType==i)
write.table(input,file=paste("metadata",i,"txt",sep="."),col.names =T,row.names = F,quote=F,sep="\t")
}
library(reshape2)
library("ggplot2")
setwd("//mcrfnas2/bigdata/Genetic/Projects/shg047/rheumatology/SLE/BCR/vdj")
Hs<-read.table("vdj.H.intersect.batch.aa.txt",head=T,sep="\t")
x<-subset(Hs,X1_SampleID==X2_SampleID &(X1_CellType=="BND" |X2_CellType=="BND" ))$R
Ss<-read.table("vdj.S.intersect.batch.aa.txt",head=T,sep="\t")
y<-subset(Ss,X1_SampleID==X2_SampleID &(X1_CellType=="BND" |X2_CellType=="BND" ))$R
input<-melt(data.frame(H=x,S=y))
input<-melt(data.frame(H=log(x,10),S=log(y,10)))
head(input)
wilcox.test(value~variable,input)
t.test(value~variable,input)
boxplot(value~variable,input)
pplot<-ggplot(input, aes(x=variable, y=value)) +
geom_boxplot(outlier.shape=NA,colour=c("green","red"),fill=c("green","red"))+
geom_jitter(position=position_jitter(width=.1, height=0))+
scale_y_continuous(name = "Log(D,10)")+
scale_x_discrete(name = "") +
theme_bw()
add_pval(pplot, pairs = list(c(1, 2)), test='wilcox.test')
ggsave("BND.boxplot.ggplot.pdf")
|
#' Dataset for Exercise C, Chapter 03
#'
#' Dataset for Exercise C, Chapter 03
#'
#' @format A \code{data.frame} with 13 rows and 2 variables:
#' \describe{
#' \item{x}{}
#' \item{y}{}
#' }
#' @source Draper, N.R., Smith, H., (1998) Applied Regression Analyis, 3rd ed., New York: Wiley
#' @examples
#' dse03c
'dse03c'
| /aprean3/R/dse03c.R | no_license | ingted/R-Examples | R | false | false | 320 | r | #' Dataset for Exercise C, Chapter 03
#'
#' Dataset for Exercise C, Chapter 03
#'
#' @format A \code{data.frame} with 13 rows and 2 variables:
#' \describe{
#' \item{x}{}
#' \item{y}{}
#' }
#' @source Draper, N.R., Smith, H., (1998) Applied Regression Analyis, 3rd ed., New York: Wiley
#' @examples
#' dse03c
'dse03c'
|
usethis::use_github_action(url = "https://raw.githubusercontent.com/ropenscilabs/actions_sandbox/master/.github/workflows/deploy_bookdown.yml")
| /dev_history.R | permissive | itohanosa/arabesque-doc | R | false | false | 144 | r | usethis::use_github_action(url = "https://raw.githubusercontent.com/ropenscilabs/actions_sandbox/master/.github/workflows/deploy_bookdown.yml")
|
testlist <- list(A = structure(c(2.44588080933563e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) | /multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613115150-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 251 | r | testlist <- list(A = structure(c(2.44588080933563e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/test_pos_strings.R
\name{test_pos_strings}
\alias{test_pos_strings}
\title{Test that parts of speech values within main- and sub-entry lines are well-formatted}
\usage{
test_pos_strings(wlp_lexicon)
}
\arguments{
\item{wlp_lexicon}{a Warlpiri lexicon data frame, or path to a Warlpiri dictionary file}
}
\description{
Test that parts of speech values within main- and sub-entry lines are well-formatted
}
| /man/test_pos_strings.Rd | permissive | CoEDL/yinarlingi | R | false | true | 483 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/test_pos_strings.R
\name{test_pos_strings}
\alias{test_pos_strings}
\title{Test that parts of speech values within main- and sub-entry lines are well-formatted}
\usage{
test_pos_strings(wlp_lexicon)
}
\arguments{
\item{wlp_lexicon}{a Warlpiri lexicon data frame, or path to a Warlpiri dictionary file}
}
\description{
Test that parts of speech values within main- and sub-entry lines are well-formatted
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gwscaR_popgen.R
\name{pairwise.pst}
\alias{pairwise.pst}
\title{Calculate pairwise Pst between population pairs}
\usage{
pairwise.pst(dat, pop.order)
}
\arguments{
\item{dat}{A dataframe with the trait values, first column must be the pop ID}
\item{pop.order}{A list of the order of the populations}
}
\value{
A data.frame with the pairwise Pst values
}
\description{
Calculate pairwise Pst between population pairs
}
| /man/pairwise.pst.Rd | no_license | dyerlab/gwscaR | R | false | true | 497 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gwscaR_popgen.R
\name{pairwise.pst}
\alias{pairwise.pst}
\title{Calculate pairwise Pst between population pairs}
\usage{
pairwise.pst(dat, pop.order)
}
\arguments{
\item{dat}{A dataframe with the trait values, first column must be the pop ID}
\item{pop.order}{A list of the order of the populations}
}
\value{
A data.frame with the pairwise Pst values
}
\description{
Calculate pairwise Pst between population pairs
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/path.R
\name{path}
\alias{path}
\title{Extract Path Model Equation}
\usage{
path(x, from, to, round = 3)
}
\arguments{
\item{x}{object of class "netSEM", which is the return value of function \code{netSEMp1}.}
\item{from}{character string. Name of the predictor.}
\item{to}{character string. Name of the endogenous variable.}
\item{round}{a positive integer. The coefficients are rounded to this decimal place.}
}
\value{
a list of the following items:
\itemize{
\item "model": the best fitted model.
\item "model.print": a character string of the model equation.
}
}
\description{
Extract and display an equation of a pairwise path between two variables.
}
\details{
Extract the "best" model from principle 1 between any two variables.
The model name and the model equation are printed on screen.
The model coefficients, as well as the model R object are also returned.
}
\examples{
## Load the sample acrylic data set
data(acrylic)
# Run netSEM principle one
ans <- netSEMp1(acrylic)
# Extract relations between IrradTot and IAD2
cf <- path(ans, from = "IrradTot", to = "IAD2")
print(cf)
}
\seealso{
\link[netSEM]{netSEMp1}
}
| /man/path.Rd | no_license | cran/netSEM | R | false | true | 1,219 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/path.R
\name{path}
\alias{path}
\title{Extract Path Model Equation}
\usage{
path(x, from, to, round = 3)
}
\arguments{
\item{x}{object of class "netSEM", which is the return value of function \code{netSEMp1}.}
\item{from}{character string. Name of the predictor.}
\item{to}{character string. Name of the endogenous variable.}
\item{round}{a positive integer. The coefficients are rounded to this decimal place.}
}
\value{
a list of the following items:
\itemize{
\item "model": the best fitted model.
\item "model.print": a character string of the model equation.
}
}
\description{
Extract and display an equation of a pairwise path between two variables.
}
\details{
Extract the "best" model from principle 1 between any two variables.
The model name and the model equation are printed on screen.
The model coefficients, as well as the model R object are also returned.
}
\examples{
## Load the sample acrylic data set
data(acrylic)
# Run netSEM principle one
ans <- netSEMp1(acrylic)
# Extract relations between IrradTot and IAD2
cf <- path(ans, from = "IrradTot", to = "IAD2")
print(cf)
}
\seealso{
\link[netSEM]{netSEMp1}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/module.R
\name{combViewUI}
\alias{combViewUI}
\title{shiny module, view UI}
\usage{
combViewUI(id)
}
\arguments{
\item{id}{id for namespace}
}
\value{
no value to be returned
}
\description{
Should not be called by users. shiny module, view UI
}
\keyword{internal}
| /man/combViewUI.Rd | no_license | emilliman5/proturn | R | false | true | 343 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/module.R
\name{combViewUI}
\alias{combViewUI}
\title{shiny module, view UI}
\usage{
combViewUI(id)
}
\arguments{
\item{id}{id for namespace}
}
\value{
no value to be returned
}
\description{
Should not be called by users. shiny module, view UI
}
\keyword{internal}
|
#' @name obi
#' @title Measuring Japanese readability
#' @author Jaehyun SONG
#' @description Measuring Japanese readability with single text
#' @docType package
#'
#' @usage obi(file)
#'
#' @param file a name of text file(include its path)
#' @return List type(Raw text, Liklihood table, Grades with maximum liklihood, Median)
#'
#' @examples
#' setwd("/Users/Username/Documents/")
#' obi("sample_text.txt")
#'
#' @references Satoshi Sato, Suguru Matsuyoshi and Yohsuke Kondoh. 2008. Automatic Assessment of Japanese Text Readability Based on a Textbook Corpus. Proceedings of the Sixth International Language Resources and Evaluation (LREC’08). European Language Resources Association (ELRA). Marrakech, Morocco
NULL
obi <- function(file){
sample.text <- paste(scan(file, character(0)), collapse = "", quiet = TRUE)
raw.text <- sample.text
sample.text <- substring(sample.text, 1:nchar(sample.text), 1:nchar(sample.text))
text.table <- table(sample.text)
text.df <- data.frame(text.table)
colnames(text.df) <- c("word", "count")
text.df$word <- as.character(text.df$word)
na.vec <- rep(NA, length = nrow(text.df))
lik.df <- data.frame(lg1 = na.vec, lg2 = na.vec, lg3 = na.vec, lg4 = na.vec, lg5 = na.vec,
lg6 = na.vec, lg7 = na.vec, lg8 = na.vec, lg9 = na.vec, lg10 = na.vec,
lg11 = na.vec, lg12 = na.vec, lg13 = na.vec)
for(i in 1:nrow(text.df)){
if(nrow(dic[dic$word == text.df[i, 1], 3:15]) == 0){
lik.df[i, ] <- rep(0, length = 13)
}else{
lik.df[i, ] <- text.df[i, 2] * dic[dic$word == text.df[i, 1], 3:15]
}
}
lik.vec <- c()
for(i in 1:13){
lik.vec[i] <- sum(lik.df[, i])
}
result.df <- data.frame(grade = c(1:13), ns = lik.vec, s2 = rep(NA, 13), s3 = rep(NA, 13))
s2.coef <- lm(ns ~ poly(grade, 2, raw = TRUE), data = result.df)$coefficients
s3.coef <- lm(ns ~ poly(grade, 3, raw = TRUE), data = result.df)$coefficients
for(i in 1:13){
result.df[i, 3] <- s2.coef[1] + s2.coef[2] * result.df[i, 1] + s2.coef[3] * result.df[i, 1]^2
result.df[i, 4] <- s3.coef[1] + s3.coef[2] * result.df[i, 1] + s3.coef[3] * result.df[i, 1]^2 + s3.coef[4] * result.df[i, 1]^3
}
max.lik.vec <- c("ns" = result.df[result.df$ns == max(result.df$ns), 1],
"s2" = result.df[result.df$s2 == max(result.df$s2), 1],
"s3" = result.df[result.df$s3 == max(result.df$s3), 1])
T13scale <- median(max.lik.vec)
result <- list(raw.text,
result.df,
max.lik.vec,
T13scale)
return(result)
}
| /R/obi.R | no_license | JaehyunSong/obi | R | false | false | 2,589 | r | #' @name obi
#' @title Measuring Japanese readability
#' @author Jaehyun SONG
#' @description Measuring Japanese readability with single text
#' @docType package
#'
#' @usage obi(file)
#'
#' @param file a name of text file(include its path)
#' @return List type(Raw text, Liklihood table, Grades with maximum liklihood, Median)
#'
#' @examples
#' setwd("/Users/Username/Documents/")
#' obi("sample_text.txt")
#'
#' @references Satoshi Sato, Suguru Matsuyoshi and Yohsuke Kondoh. 2008. Automatic Assessment of Japanese Text Readability Based on a Textbook Corpus. Proceedings of the Sixth International Language Resources and Evaluation (LREC’08). European Language Resources Association (ELRA). Marrakech, Morocco
NULL
obi <- function(file){
sample.text <- paste(scan(file, character(0)), collapse = "", quiet = TRUE)
raw.text <- sample.text
sample.text <- substring(sample.text, 1:nchar(sample.text), 1:nchar(sample.text))
text.table <- table(sample.text)
text.df <- data.frame(text.table)
colnames(text.df) <- c("word", "count")
text.df$word <- as.character(text.df$word)
na.vec <- rep(NA, length = nrow(text.df))
lik.df <- data.frame(lg1 = na.vec, lg2 = na.vec, lg3 = na.vec, lg4 = na.vec, lg5 = na.vec,
lg6 = na.vec, lg7 = na.vec, lg8 = na.vec, lg9 = na.vec, lg10 = na.vec,
lg11 = na.vec, lg12 = na.vec, lg13 = na.vec)
for(i in 1:nrow(text.df)){
if(nrow(dic[dic$word == text.df[i, 1], 3:15]) == 0){
lik.df[i, ] <- rep(0, length = 13)
}else{
lik.df[i, ] <- text.df[i, 2] * dic[dic$word == text.df[i, 1], 3:15]
}
}
lik.vec <- c()
for(i in 1:13){
lik.vec[i] <- sum(lik.df[, i])
}
result.df <- data.frame(grade = c(1:13), ns = lik.vec, s2 = rep(NA, 13), s3 = rep(NA, 13))
s2.coef <- lm(ns ~ poly(grade, 2, raw = TRUE), data = result.df)$coefficients
s3.coef <- lm(ns ~ poly(grade, 3, raw = TRUE), data = result.df)$coefficients
for(i in 1:13){
result.df[i, 3] <- s2.coef[1] + s2.coef[2] * result.df[i, 1] + s2.coef[3] * result.df[i, 1]^2
result.df[i, 4] <- s3.coef[1] + s3.coef[2] * result.df[i, 1] + s3.coef[3] * result.df[i, 1]^2 + s3.coef[4] * result.df[i, 1]^3
}
max.lik.vec <- c("ns" = result.df[result.df$ns == max(result.df$ns), 1],
"s2" = result.df[result.df$s2 == max(result.df$s2), 1],
"s3" = result.df[result.df$s3 == max(result.df$s3), 1])
T13scale <- median(max.lik.vec)
result <- list(raw.text,
result.df,
max.lik.vec,
T13scale)
return(result)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/decimal_minute_to_string.R
\name{decimal_minute_to_string}
\alias{decimal_minute_to_string}
\title{Function to transform decimal minutes into a formatted \code{"HH:MM:SS"} or
\code{"MM:SS"} string.}
\usage{
decimal_minute_to_string(x, hour = TRUE)
}
\arguments{
\item{x}{Numeric vector of decimal minutes.}
\item{hour}{Should the hour also be included in the formatted string? Default
is \code{TRUE}.}
}
\description{
Function to transform decimal minutes into a formatted \code{"HH:MM:SS"} or
\code{"MM:SS"} string.
}
\examples{
\dontrun{
decimal_minute_to_string(5)
decimal_minute_to_string(5.5)
decimal_minute_to_string(5.34)
decimal_minute_to_string(5.954226)
# Without the hour piece
decimal_minute_to_string(5.954226, hour = FALSE)
}
}
\author{
Stuart K. Grange
}
| /man/decimal_minute_to_string.Rd | no_license | MohoWu/threadr | R | false | true | 857 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/decimal_minute_to_string.R
\name{decimal_minute_to_string}
\alias{decimal_minute_to_string}
\title{Function to transform decimal minutes into a formatted \code{"HH:MM:SS"} or
\code{"MM:SS"} string.}
\usage{
decimal_minute_to_string(x, hour = TRUE)
}
\arguments{
\item{x}{Numeric vector of decimal minutes.}
\item{hour}{Should the hour also be included in the formatted string? Default
is \code{TRUE}.}
}
\description{
Function to transform decimal minutes into a formatted \code{"HH:MM:SS"} or
\code{"MM:SS"} string.
}
\examples{
\dontrun{
decimal_minute_to_string(5)
decimal_minute_to_string(5.5)
decimal_minute_to_string(5.34)
decimal_minute_to_string(5.954226)
# Without the hour piece
decimal_minute_to_string(5.954226, hour = FALSE)
}
}
\author{
Stuart K. Grange
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/complement-data.R
\name{complement_data}
\alias{complement_data}
\title{Complement data}
\usage{
complement_data(data, config)
}
\arguments{
\item{data}{A data frame containing asset, date and return columns.}
\item{config}{A list of lists containing the complement methods and arguments. They are
\code{method} (method name, one of \code{proxy}, \code{spread}, \code{regress} or \code{mix}) and
\code{arguments} (method arguments).}
}
\value{
A data frame containing the following columns:
\itemize{
\item asset
\item date
\item return
}
and arranged by asset and date.
}
\description{
Complement the data with the following functionalities:
\itemize{
\item Proxy: Add "proxied" asset that is a copy of "base" asset
\item Spread: Spread non-daily returns to daily returns of "spreaded" asset
\item Regress: Extend "regressed" asset returns as a linear function of "base"
asset returns
\item Mix: Add "mixed" asset whose returns are the average of "base" assets
returns for each day.
}
}
| /man/complement_data.Rd | permissive | GiuseppeTT/RiskParityBrazil | R | false | true | 1,068 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/complement-data.R
\name{complement_data}
\alias{complement_data}
\title{Complement data}
\usage{
complement_data(data, config)
}
\arguments{
\item{data}{A data frame containing asset, date and return columns.}
\item{config}{A list of lists containing the complement methods and arguments. They are
\code{method} (method name, one of \code{proxy}, \code{spread}, \code{regress} or \code{mix}) and
\code{arguments} (method arguments).}
}
\value{
A data frame containing the following columns:
\itemize{
\item asset
\item date
\item return
}
and arranged by asset and date.
}
\description{
Complement the data with the following functionalities:
\itemize{
\item Proxy: Add "proxied" asset that is a copy of "base" asset
\item Spread: Spread non-daily returns to daily returns of "spreaded" asset
\item Regress: Extend "regressed" asset returns as a linear function of "base"
asset returns
\item Mix: Add "mixed" asset whose returns are the average of "base" assets
returns for each day.
}
}
|
library(tidyverse)
library(plotly)
data <- read_csv("./data/datos_torreajedrez.csv")
colnames(data)
data$sta_siata.mean_pm25 <- data$sta_siata.mean_pm25 %>% lag(4)
data$sta_siata.mean_pm25[data$sta_siata.mean_pm25 < -100] <- -1
data$date <- lubridate::mdy_hms(data$date)
data_g <- gather(data, -date,key = "sensor", value = "valor")
gg <- ggplot(data_g %>%
filter(date > "2019-12-01", date < "2020-04-01"),
aes(date, valor, color=sensor)) + geom_line(stat = "identity")
ggplotly(gg)
| /analisis.R | no_license | unloquer/calibracion_aqa | R | false | false | 516 | r | library(tidyverse)
library(plotly)
data <- read_csv("./data/datos_torreajedrez.csv")
colnames(data)
data$sta_siata.mean_pm25 <- data$sta_siata.mean_pm25 %>% lag(4)
data$sta_siata.mean_pm25[data$sta_siata.mean_pm25 < -100] <- -1
data$date <- lubridate::mdy_hms(data$date)
data_g <- gather(data, -date,key = "sensor", value = "valor")
gg <- ggplot(data_g %>%
filter(date > "2019-12-01", date < "2020-04-01"),
aes(date, valor, color=sensor)) + geom_line(stat = "identity")
ggplotly(gg)
|
# Dependencies ----
library(BSgenome.Btaurus.UCSC.bosTau6)
# names(BSgenome.Btaurus.UCSC.bosTau6)
# Note the "chr" prefix (UCSC-style) to trim later
library(biomaRt)
library(bsseq)
library(xlsx)
# Set parameters ----
sequences <- c(
TNFa = "TAGAGAAGCCCACTCAGAATCCGAGCGGGCGGAGTGTAGGAAGTATCCTTGATGCCTGGGTGTCCCCAACTTTCCAAACCCCCGCCCCCGCGATGGAGAAGAAACCGAGACAGAAGGTGTAGGGCCCGCTACCGCTTCCTCCAGATGAGCTCATGGGTTTCTCCACCAAGGAAGTTTTCCGCTGG",
IL12A = "CAACCAGAGCGCTAGGCTGGTTACTCACTGCGAAGCGGGCACATGCTGAGCGGAGCGGCGGGGACGCGGAACCGAGCCGGCAGTTGGACGCAGACCGGTGCACGCGGCAGGTGAGGGTGGTGGTTGGGAGGCCAAACCAGGGGTCACATTTTTAT",
TLR2 = "GGGGATGCCAGCGGATCCTAATTCCTGACCGACGTACCTGGGACTTGCGCGGCCTTGCAGCGCCTTCCACAGCCTCCGGCCGGGAGCGGCCCGGGAAAAGCGCGGGAACGTGCGCACCCCCTCCTCGCGGGTGCGGGACCGCCGGTTCCGCGGAGTGCGCGTAACCCCTGTGGCCCAGCGCGCCGCCGCGCTTCCCCACGGTCTCCGGCGGGGACCGTGACCCGGGTGCTGCCCGGGTCGGAGGAGGGCGCTGGGGC",
NFKB2 = "CCTGGTGGTGGGAGAGGTGTCGCGACCCGTCCGAGGTGGGTCCGGCCGGGAGAGAATCCTGAACCGGAGCCGCCGCCGCGGTGAGTGGCCGGGTTCAGACCCCTGGGTGGTGGGACACCGGCAAGGGTGGGAGGAGG"
)
outdir <- 'bsseq'
# Prepare sequences in Bioconductor format --------------------------------
DNAset <- DNAStringSet(x = sequences)
DNAset <- append(x = DNAset, values = reverseComplement(DNAset))
# width(DNAset)
# UCSC/BLAT approach ------------------------------------------------------
# https://genome.ucsc.edu/cgi-bin/hgBlat?hgsid=500819951_dRPcuiRElemGq9YK1y9QhKv89cWv&command=start
# Paste the sequences and retrieve the coordinates of each longest match
# which should have a width equal to the full length of the corresponding
# sequence.
# Looks like UCSC/BLAT gives correct coordinates
# Note that the example below (TNFa) is on the complementary strand
TNFa.blat <- GRanges(
seqnames = "chr23",
ranges = IRanges(start = 27536746, end = 27536930, names = "TNFa"),
strand = "-")
TNFa.seq <- getSeq(BSgenome.Btaurus.UCSC.bosTau6, TNFa.blat)
reverseComplement(TNFa.seq) == sequences[["TNFa"]]
IL12A.blat <- GRanges(
seqnames = "chr1",
ranges = IRanges(start = 108291940, end = 108292094, names = "IL12A"),
strand = "+")
IL12A.seq <- getSeq(BSgenome.Btaurus.UCSC.bosTau6, IL12A.blat)
IL12A.seq == sequences[["IL12A"]]
TLR2.blat <- GRanges(
seqnames = "chr17",
ranges = IRanges(start = 3962393, end = 3962649, names = "TLR2"),
strand = "+")
TLR2.seq <- getSeq(BSgenome.Btaurus.UCSC.bosTau6, TLR2.blat)
TLR2.seq == sequences[["TLR2"]]
NFKB2.blat <- GRanges(
seqnames = "chr26",
ranges = IRanges(start = 22890256, end = 22890392, names = "NFKB2"),
strand = "+")
NFKB2.seq <- getSeq(BSgenome.Btaurus.UCSC.bosTau6, NFKB2.blat)
NFKB2.seq == sequences[["NFKB2"]]
# Import methylation calls ------------------------------------------------
# Import all sites covered by at least one read in at least one sample
BS.smoothed <- readRDS(file = file.path(outdir, "BS.smoothed.rds"))
# GRanges listing all regions of interest
targets.gr <- suppressWarnings(c(TNFa.blat, IL12A.blat, TLR2.blat, NFKB2.blat))
# Edit from UCSC to Ensembl chromosome naming
seqinfo(targets.gr, new2old=1:length(targets.gr), force=FALSE) <- Seqinfo(
seqnames = as.character(gsub("chr", "", seqnames(targets.gr))))
# Get methylation % in each region in each sample
methGenes <- getMeth(
BSseq = BS.smoothed,
regions = targets.gr,
type = "raw", what = "perRegion")
rownames(methGenes) <- names(targets.gr)
# Summarise
summary(t(methGenes))
## One NaN suggests no coverage in the region
## Another 1 suggests a single call, considering that all other values are low
covGenes <- getCoverage(
BSseq = BS.smoothed, regions = targets.gr, type = "Cov",
what = "perRegionTotal")
rownames(covGenes) <- names(targets.gr)
mGenes <- getCoverage(
BSseq = BS.smoothed, regions = targets.gr, type = "M",
what = "perRegionTotal")
rownames(mGenes) <- names(targets.gr)
write.xlsx(
x = methGenes,
file = file.path(outdir, "pyroseq.xlsx"),
sheetName = "Percentage")
write.xlsx(
x = covGenes,
file = file.path(outdir, "pyroseq.xlsx"),
sheetName = "Coverage", append = TRUE)
write.xlsx(
x = mGenes,
file = file.path(outdir, "pyroseq.xlsx"),
sheetName = "Methylated", append = TRUE)
# Plot regions ------------------------------------------------------------
for (geneName in names(targets.gr)){
pdf(
file = file.path(
outdir,
paste(paste("Pyroseq_validation", geneName, sep = "_"), "pdf", sep = ".")),
width = 6, height = 4)
plotRegion(
BSseq = BS.smoothed,
region = targets.gr[geneName],
extend = 2 * width(targets.gr[geneName]),
addRegions = targets.gr[geneName],
col = rep(c("blue", "red"), each = 8)
)
dev.off()
}
| /022_pyroseq_validate.R | permissive | kevinrue/WGBS_UCD | R | false | false | 4,635 | r |
# Dependencies ----
library(BSgenome.Btaurus.UCSC.bosTau6)
# names(BSgenome.Btaurus.UCSC.bosTau6)
# Note the "chr" prefix (UCSC-style) to trim later
library(biomaRt)
library(bsseq)
library(xlsx)
# Set parameters ----
sequences <- c(
TNFa = "TAGAGAAGCCCACTCAGAATCCGAGCGGGCGGAGTGTAGGAAGTATCCTTGATGCCTGGGTGTCCCCAACTTTCCAAACCCCCGCCCCCGCGATGGAGAAGAAACCGAGACAGAAGGTGTAGGGCCCGCTACCGCTTCCTCCAGATGAGCTCATGGGTTTCTCCACCAAGGAAGTTTTCCGCTGG",
IL12A = "CAACCAGAGCGCTAGGCTGGTTACTCACTGCGAAGCGGGCACATGCTGAGCGGAGCGGCGGGGACGCGGAACCGAGCCGGCAGTTGGACGCAGACCGGTGCACGCGGCAGGTGAGGGTGGTGGTTGGGAGGCCAAACCAGGGGTCACATTTTTAT",
TLR2 = "GGGGATGCCAGCGGATCCTAATTCCTGACCGACGTACCTGGGACTTGCGCGGCCTTGCAGCGCCTTCCACAGCCTCCGGCCGGGAGCGGCCCGGGAAAAGCGCGGGAACGTGCGCACCCCCTCCTCGCGGGTGCGGGACCGCCGGTTCCGCGGAGTGCGCGTAACCCCTGTGGCCCAGCGCGCCGCCGCGCTTCCCCACGGTCTCCGGCGGGGACCGTGACCCGGGTGCTGCCCGGGTCGGAGGAGGGCGCTGGGGC",
NFKB2 = "CCTGGTGGTGGGAGAGGTGTCGCGACCCGTCCGAGGTGGGTCCGGCCGGGAGAGAATCCTGAACCGGAGCCGCCGCCGCGGTGAGTGGCCGGGTTCAGACCCCTGGGTGGTGGGACACCGGCAAGGGTGGGAGGAGG"
)
outdir <- 'bsseq'
# Prepare sequences in Bioconductor format --------------------------------
DNAset <- DNAStringSet(x = sequences)
DNAset <- append(x = DNAset, values = reverseComplement(DNAset))
# width(DNAset)
# UCSC/BLAT approach ------------------------------------------------------
# https://genome.ucsc.edu/cgi-bin/hgBlat?hgsid=500819951_dRPcuiRElemGq9YK1y9QhKv89cWv&command=start
# Paste the sequences and retrieve the coordinates of each longest match
# which should have a width equal to the full length of the corresponding
# sequence.
# Looks like UCSC/BLAT gives correct coordinates
# Note that the example below (TNFa) is on the complementary strand
TNFa.blat <- GRanges(
seqnames = "chr23",
ranges = IRanges(start = 27536746, end = 27536930, names = "TNFa"),
strand = "-")
TNFa.seq <- getSeq(BSgenome.Btaurus.UCSC.bosTau6, TNFa.blat)
reverseComplement(TNFa.seq) == sequences[["TNFa"]]
IL12A.blat <- GRanges(
seqnames = "chr1",
ranges = IRanges(start = 108291940, end = 108292094, names = "IL12A"),
strand = "+")
IL12A.seq <- getSeq(BSgenome.Btaurus.UCSC.bosTau6, IL12A.blat)
IL12A.seq == sequences[["IL12A"]]
TLR2.blat <- GRanges(
seqnames = "chr17",
ranges = IRanges(start = 3962393, end = 3962649, names = "TLR2"),
strand = "+")
TLR2.seq <- getSeq(BSgenome.Btaurus.UCSC.bosTau6, TLR2.blat)
TLR2.seq == sequences[["TLR2"]]
NFKB2.blat <- GRanges(
seqnames = "chr26",
ranges = IRanges(start = 22890256, end = 22890392, names = "NFKB2"),
strand = "+")
NFKB2.seq <- getSeq(BSgenome.Btaurus.UCSC.bosTau6, NFKB2.blat)
NFKB2.seq == sequences[["NFKB2"]]
# Import methylation calls ------------------------------------------------
# Import all sites covered by at least one read in at least one sample
BS.smoothed <- readRDS(file = file.path(outdir, "BS.smoothed.rds"))
# GRanges listing all regions of interest
targets.gr <- suppressWarnings(c(TNFa.blat, IL12A.blat, TLR2.blat, NFKB2.blat))
# Edit from UCSC to Ensembl chromosome naming
seqinfo(targets.gr, new2old=1:length(targets.gr), force=FALSE) <- Seqinfo(
seqnames = as.character(gsub("chr", "", seqnames(targets.gr))))
# Get methylation % in each region in each sample
methGenes <- getMeth(
BSseq = BS.smoothed,
regions = targets.gr,
type = "raw", what = "perRegion")
rownames(methGenes) <- names(targets.gr)
# Summarise
summary(t(methGenes))
## One NaN suggests no coverage in the region
## Another 1 suggests a single call, considering that all other values are low
covGenes <- getCoverage(
BSseq = BS.smoothed, regions = targets.gr, type = "Cov",
what = "perRegionTotal")
rownames(covGenes) <- names(targets.gr)
mGenes <- getCoverage(
BSseq = BS.smoothed, regions = targets.gr, type = "M",
what = "perRegionTotal")
rownames(mGenes) <- names(targets.gr)
write.xlsx(
x = methGenes,
file = file.path(outdir, "pyroseq.xlsx"),
sheetName = "Percentage")
write.xlsx(
x = covGenes,
file = file.path(outdir, "pyroseq.xlsx"),
sheetName = "Coverage", append = TRUE)
write.xlsx(
x = mGenes,
file = file.path(outdir, "pyroseq.xlsx"),
sheetName = "Methylated", append = TRUE)
# Plot regions ------------------------------------------------------------
for (geneName in names(targets.gr)){
pdf(
file = file.path(
outdir,
paste(paste("Pyroseq_validation", geneName, sep = "_"), "pdf", sep = ".")),
width = 6, height = 4)
plotRegion(
BSseq = BS.smoothed,
region = targets.gr[geneName],
extend = 2 * width(targets.gr[geneName]),
addRegions = targets.gr[geneName],
col = rep(c("blue", "red"), each = 8)
)
dev.off()
}
|
\name{stats.g}
\alias{stats.g}
\alias{sd.g}
\alias{limits.g}
\title{Statistics used in computing and drawing a Shewhart g chart}
\description{
These functions are used to compute statistics required by the g chart (geometric distribution) for use with the \pkg{qcc} package.
}
\usage{
stats.g(data, sizes)
sd.g(data, sizes, \dots)
limits.g(center, std.dev, sizes, nsigmas = NULL, conf = NULL)
}
\arguments{
\item{data}{ the observed data values }
\item{center}{ sample center statistic }
\item{sizes}{ sample sizes (not used) }
\item{std.dev}{ standard deviation of geometric distribution }
\item{nsigmas}{a numeric value specifying the number of sigmas to use for computing control limits. It is ignored when the \code{conf} argument is provided.}
\item{conf}{a numeric value in \eqn{(0,1)} specifying the confidence level to use for computing control limits.}
\item{\dots}{catches further ignored arguments.}
}
\value{
The function \code{stats.g()} returns a list with components \code{statistics} and \code{center}.
The function \code{sd.g()} returns \code{std.dev} the standard deviation
\eqn{sqrt(1-p)/p}.
The function \code{limits.g()} returns a matrix with lower and upper control limits.
}
\details{
The g chart plots the number of non-events between events. np charts
do not work well when the probability of an event is rare (see example
below). Instead of plotting the number of events, the g chart plots
the number of non-events between events.
}
\references{
Kaminsky, FC et. al. (1992) \emph{Statistical Control Charts Based on a Geometric Distribution}, Journal of Quality Technology, 24, pp 63--69.
Yang, Z et. al. (2002) On the Performance of Geometric Charts with
Estimated Control Limits, \emph{Journal of Quality Technology}, 34, pp 448--458.
}
\author{ Greg Snow \email{greg.snow@ihc.com} }
\note{ The geometric distribution is quite skewed so it is best to set
conf at the required confidence interval (0 < conf < 1) rather than as
a multiplier of sigma.}
% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{\code{qcc}}
\examples{
success <- rbinom(1000, 1, 0.01)
num.noevent <- diff(which(c(1,success)==1))-1
qcc(success, type = "np", sizes = 1)
qcc(num.noevent, type = "g")
}
\keyword{ hplot }% __ONLY ONE__ keyword per line
| /man/stats.g.Rd | no_license | luca-scr/qcc | R | false | false | 2,320 | rd | \name{stats.g}
\alias{stats.g}
\alias{sd.g}
\alias{limits.g}
\title{Statistics used in computing and drawing a Shewhart g chart}
\description{
These functions are used to compute statistics required by the g chart (geometric distribution) for use with the \pkg{qcc} package.
}
\usage{
stats.g(data, sizes)
sd.g(data, sizes, \dots)
limits.g(center, std.dev, sizes, nsigmas = NULL, conf = NULL)
}
\arguments{
\item{data}{ the observed data values }
\item{center}{ sample center statistic }
\item{sizes}{ sample sizes (not used) }
\item{std.dev}{ standard deviation of geometric distribution }
\item{nsigmas}{a numeric value specifying the number of sigmas to use for computing control limits. It is ignored when the \code{conf} argument is provided.}
\item{conf}{a numeric value in \eqn{(0,1)} specifying the confidence level to use for computing control limits.}
\item{\dots}{catches further ignored arguments.}
}
\value{
The function \code{stats.g()} returns a list with components \code{statistics} and \code{center}.
The function \code{sd.g()} returns \code{std.dev} the standard deviation
\eqn{sqrt(1-p)/p}.
The function \code{limits.g()} returns a matrix with lower and upper control limits.
}
\details{
The g chart plots the number of non-events between events. np charts
do not work well when the probability of an event is rare (see example
below). Instead of plotting the number of events, the g chart plots
the number of non-events between events.
}
\references{
Kaminsky, FC et. al. (1992) \emph{Statistical Control Charts Based on a Geometric Distribution}, Journal of Quality Technology, 24, pp 63--69.
Yang, Z et. al. (2002) On the Performance of Geometric Charts with
Estimated Control Limits, \emph{Journal of Quality Technology}, 34, pp 448--458.
}
\author{ Greg Snow \email{greg.snow@ihc.com} }
\note{ The geometric distribution is quite skewed so it is best to set
conf at the required confidence interval (0 < conf < 1) rather than as
a multiplier of sigma.}
% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{\code{qcc}}
\examples{
success <- rbinom(1000, 1, 0.01)
num.noevent <- diff(which(c(1,success)==1))-1
qcc(success, type = "np", sizes = 1)
qcc(num.noevent, type = "g")
}
\keyword{ hplot }% __ONLY ONE__ keyword per line
|
# USAGE
# get help:
# Rscript sourcetracker_for_qiime.r-h
#
# run sink predictions using QIIME taxon abundance file:
# Rscript sourcetracker_for_qiime.r-t taxa.txt -m map.txt
#
# run leave-one-out source-sample predictions using QIIME taxon abundance file:
# Rscript sourcetracker_for_qiime.r-t taxa.txt -m map.txt -s
#
# run sink predictions using QIIME OTU table:
# Rscript sourcetracker_for_qiime.r-i otutable.txt -m map.txt
#
# run sink predictions using QIIME OTU table with 1000 burnins, 25 random restarts, and rarefaction depth of 100:
# Rscript sourcetracker_for_qiime.r-i otutable.txt -m map.txt -b 1000 -n 25 -r 100< sourcetracker_for_qiime.r
#
# run sink predictions using QIIME taxon abundance file and and input file listing the sampleids to predict:
# Rscript sourcetracker_for_qiime.r-t taxa.txt -m map.txt -f sampleid_file.txt
#
# Note: you must add the path to your SourceTracker.r file to your path, e.g.:
# echo "" >> ~/.bash_profile; echo "export SOURCETRACKER_PATH=$HOME/path/to/your/SourceTracker.r" >> ~/.bash_profile; source ~/.bash_profile
# load SourceTracker package
envvars <- as.list(Sys.getenv())
if(is.element('SOURCETRACKER_PATH', names(envvars))){
sourcefile <- sprintf('%s/src/SourceTracker.r',envvars[['SOURCETRACKER_PATH']])
source(sourcefile)
} else {
stop("Please add SOURCETRACKER_PATH environment variable pointing to the SourceTracker top-level directory (containing 'sourcetracker_for_qiime.r')")
}
helpstr <- c(
"-i otu table: QIIME-formatted OTU table (first line is a comment line starting with '#', second line starts with '#OTU ID' followed by sample names). You must supply either this or the taxon table via '-t'.",
"-t taxon table: output from QIIME script summarize_taxa.py. You must supply either this or the otu table via '-i'.",
"-m mapfile: mapping file with an 'Env' column giving the source environments, and a 'SourceSink' column giving 'source' for source samples and 'sink' for sink samples.",
"-n number of restarts of Gibbs sampling (default 10)",
"-b number of burn-in iterations for Gibbs sampling (default 100)",
"-r rarefaction depth, 0 for none (default 1000)",
"--train_rarefaction training data rarefaction depth, 0 for none (default 1000)",
"-f sampleid_file, file containing list of samples to predict. Useful for parallel processing (default None).",
"-o outdir: output directory; default is '.'",
"-s predict source samples using leave-one-out predictions (default: FALSE)",
"--suppress_full_results suppress writing of full per-taxon predictions (default: FALSE)",
"--alpha1 alpha1: Dirichlet hyperparameter for taxa/genes in known environments (default: 1e-3)",
"--alpha2 alpha2: Dirichlet hyperparameter for taxa/genes in unknown environments (default: 1e-1)",
"--beta beta: Dirichlet hyperparameter for mixture of environments (default: 1e-2)",
"-R results file from previous run. If given, no predictions are made, only plotting and output files)",
"--tune_alphas: tune_ntrials Tune alpha values using cross-validation on the training set with this many trials (suggest at least 25); (default: 0, no tuning)",
"--color_ix: comma-separated list of color indices for alphabetical source environments",
"--eval_fit: fit_ntrials Evaluate quality of fit to the data using simulations. Ignored if less than or equal to --tune_alpha ntrials (default: 0)",
"-v: verbose output (default FALSE)")
allowed.args <- list('-i'=NULL,'-t'=NULL,'-m'=NULL,'-n'=10,'-b'=100,'-r'=1000,
'--train_rarefaction'=1000,
'-o'='.', '-v'=FALSE, '-s'=FALSE, '-f'=NULL,'-R'=NULL,
'--alpha1'=1e-3, '--alpha2'=1e-1, '--beta'=1e-2, '--tune_alphas'=0, '--eval_fit'=0,
'--color_ix'=NULL, "--suppress_full_results"=FALSE)
# Parse command-line params
# assumes that NO args are positional
# allows flags without argument
"parse.args" <- function(allowed.args,helplist=NULL){
argv <- commandArgs(trailingOnly=TRUE)
# print help string if requested
if(!is.null(helpstr) && sum(argv == '-h')>0){
cat('',helpstr,'',sep='\n')
q(runLast=FALSE)
}
argpos <- 1
for(name in names(allowed.args)){
argpos <- which(argv == name)
if(length(argpos) > 0){
# test for flag without argument
if(argpos == length(argv) || substring(argv[argpos + 1],1,1) == '-')
allowed.args[[name]] <- TRUE
else {
allowed.args[[name]] <- argv[argpos + 1]
}
}
}
return(allowed.args)
}
# parse arg list
arglist <- parse.args(allowed.args)
if(is.null(arglist[['-m']])) stop('Please supply a mapping file.')
if(is.null(arglist[['-R']])){
if( (is.null(arglist[['-i']]) && is.null(arglist[['-t']]))
|| (!is.null(arglist[['-i']]) && !is.null(arglist[['-t']]))) stop('Please supply a QIIME OTU table or a QIIME taxon summary.')
}
nrestarts <- as.numeric(arglist[['-n']])
burnin <- as.numeric(arglist[['-b']])
rarefaction <- as.numeric(arglist[['-r']])
train.rarefaction <- as.numeric(arglist[['--train_rarefaction']])
sourceonly <- arglist[['-s']]
outdir <- arglist[['-o']]
predictfile <- arglist[['-f']]
resultsfile <- arglist[['-R']]
alpha1 <- as.numeric(arglist[['--alpha1']])
alpha2 <- as.numeric(arglist[['--alpha2']])
beta <- as.numeric(arglist[['--beta']])
tune.alphas.ntrials <- as.numeric(arglist[['--tune_alphas']])
eval.fit.ntrials <- as.numeric(arglist[['--eval_fit']])
if(rarefaction==0) rarefaction <- NULL
env.colors <- NULL
if(!is.null(arglist[['--color_ix']])){
env.ix <- as.numeric(strsplit(arglist[['--color_ix']],',')[[1]])
env.colors <- std.env.colors[env.ix]
}
# create output directory
if(!is.null(outdir)) {
dir.create(outdir,showWarnings=FALSE, recursive=TRUE)
} else outdir <- '.'
# save command that was run
sink(sprintf('%s/command.txt',arglist[['-o']]))
cat(paste(commandArgs(),collapse=' '),'\n',sep='')
sink(NULL)
# load list of samples to predict
predictlist <- NULL
if(!is.null(predictfile)){
predictlist <- as.character(read.table(predictfile)[,1])
}
# load mapping file
map <- read.table(arglist[['-m']],sep='\t',comment='',head=T,row.names=1,check=FALSE,colClasses='character')
if(sum(colnames(map)=='Env')==0) stop("The mapping file must contain an 'Env' column naming the source environment for each sample.")
if(sourceonly){
# if no sourcesink column, use all samples for leave-one-out
if(sum(colnames(map)=='SourceSink')==0){
map$SourceSink <- factor(rep('source',nrow(map)),levels=c('source','sink','ignore'))
}
} else {
if(sum(colnames(map)=='SourceSink')==0) stop("The mapping file must contain a 'SourceSink' column indicating 'source' or 'sink' for each sample.")
map$SourceSink <- factor(map$SourceSink,levels=c('source','sink','ignore'))
}
if(!is.null(resultsfile)){
load(resultsfile)
if(sourceonly){
filebase <- 'source_predictions'
} else {
filebase <- 'sink_predictions'
}
} else {
# load otus/taxa
if(!is.null(arglist[['-i']])){
otus <- read.table(arglist[['-i']],sep='\t',comment='',head=T,row.names=1,check=FALSE, skip=1)
} else {
otus <- read.table(arglist[['-t']],sep='\t',head=T,row.names=1,check=FALSE)
}
# drop "Consensus Lineage" column if present
otus <- as.matrix(t(otus[,!grepl('Consensus|Metadata|taxonomy',colnames(otus),ignore=TRUE)]))
# ensure map and data table contain the same samples in the same order
ix <- intersect(rownames(map), rownames(otus))
otus <- otus[ix,]
map <- map[ix,]
# ensure there are no "empty" samples
rs <- rowSums(otus)
num.empties <- sum(rs == 0)
if(num.empties > 0){
stop(sprintf("The following %d samples have zero sequences: %s",
num.empties,
paste(rownames(otus)[rs==0],collapse=', '))
)
}
# extract metadata from mapping file
if(!is.null(predictlist)){
if(sourceonly){
# there is a specific list to predict, and we're doing source only
# therefore ignore sink samples
sourcesink <- map$SourceSink
sourcesink[sourcesink=='sink'] <- NA
names(sourcesink) <- rownames(map)
sourcesink[predictlist] <- 'sink'
source.ix <- which(sourcesink=='source')
sink.ix <- which(sourcesink=='sink')
sourceonly <- FALSE
} else {
source.ix <- which(map$SourceSink=='source')
sink.ix <- which(map$SourceSink=='sink')
names(sink.ix) <- rownames(map)[sink.ix]
sink.ix <- sink.ix[predictlist]
}
} else {
source.ix <- which(map$SourceSink=='source')
sink.ix <- which(map$SourceSink=='sink')
}
envs <- map$Env
if(length(source.ix) < 1) stop("No samples are identified as sources")
if(length(sink.ix) < 1 && !sourceonly) stop("No samples are identified as sinks")
# train SourceTracker object on training data
st <- sourcetracker(otus[source.ix,,drop=F], envs[source.ix], rarefaction_depth=train.rarefaction)
# if tuning is requested, obtain alpha values by cross-validation
if(tune.alphas.ntrials > 0){
verbosity <- 0
if(arglist[['-v']]) verbosity <- 2
tune.res <- tune.st(otus[source.ix,,drop=F], envs[source.ix], ntrials=tune.alphas.ntrials,
rarefaction_depth=rarefaction, verbosity=verbosity, beta=beta)
alpha1 <- tune.res$best.alpha1
alpha2 <- tune.res$best.alpha2
cat(sprintf('After tuning: alpha1 = %f, alpha2 = %f, with RMSE= %.3f\n', alpha1, alpha2, tune.res$best.rmse))
# save alphas
sink(sprintf('%s/tuned.alphas.txt', outdir))
cat(sprintf('# Final alpha1=%f, alpha2=%f, RMSE=%.5f, pseudo-R2=%.5f +/- %.5f\n',
alpha1, alpha2, tune.res$best.rmse,
tune.res$best.pseudo.r2.sem, tune.res$best.pseudo.r2.sem))
cat('alpha1\talpha2\tRMSE\tRMSE s.e.m.\n')
cat(sprintf('%f\t%f\t%.5f\t%.5f\n',
tune.res$alphas[,1], tune.res$alphas[,2],
tune.res$rmse, tune.res$rmse.sem))
sink(NULL)
}
# plot tuning results
if(tune.alphas.ntrials > 0 || eval.fit.ntrials > 0) {
verbosity <- 0
if(arglist[['-v']]) verbosity <- 2
if(eval.fit.ntrials > tune.alphas.ntrials) {
if(arglist[['-v']]) cat(sprintf('Evaluating fit at alpha1=%f, alpha2=%f with %d trials\n',
alpha1, alpha2, eval.fit.ntrials))
results.to.plot <- eval.fit(otus[source.ix,,drop=F], envs[source.ix],
ntrials=eval.fit.ntrials, rarefaction_depth=rarefaction,
alpha1=alpha1, alpha2=alpha2, beta=beta, verbosity=verbosity-1)
sink(sprintf('%s/eval.fit.txt',outdir))
cat(sprintf('alpha1\t%.5f\nalpha2\t%.5f\nrmse\t%.5f\nrmse.sem\t%.5f\n',alpha1, alpha2, results.to.plot$rmse,results.to.plot$rmse.sem))
sink(NULL)
} else {
results.to.plot <- tune.res$results[[which.min(tune.res$rmse)]]
}
plot.eval(results.to.plot,plot.type=1,
filename=sprintf('%s/confusion_scatterplot_combined.pdf', outdir))
plot.eval(results.to.plot,plot.type=2,
filename=sprintf('%s/confusion_scatterplot_pairwise.pdf', outdir))
}
if(sourceonly){
# Estimate leave-one-out source proportions in training data
results <- predict(st, rarefaction_depth=rarefaction, nrestarts=nrestarts, burnin=burnin, alpha1=alpha1, alpha2=alpha2, beta=beta, full=!arglist[['--suppress_full_results']])
filebase <- 'source_predictions'
} else {
# Estimate source proportions in test data
testdata <- otus[sink.ix,,drop=F]
if(length(sink.ix)==1){
testdata <- matrix(testdata,nrow=1)
rownames(testdata) <- rownames(otus)[sink.ix]
}
results <- predict(st,testdata, rarefaction_depth=rarefaction, nrestarts=nrestarts, burnin=burnin, alpha1=alpha1, alpha2=alpha2, beta=beta, full=!arglist[['--suppress_full_results']])
filebase <- 'sink_predictions'
}
# save full results object
save(results,file=sprintf('%s/results.RData',outdir))
}
# save results file
sink(sprintf('%s/%s.txt', outdir, filebase))
cat('SampleID\t')
write.table(results$proportions,quote=F,sep='\t')
sink(NULL)
sink(sprintf('%s/%s_stdev.txt', outdir, filebase))
cat('SampleID\t')
write.table(results$proportions_sd,quote=F,sep='\t')
sink(NULL)
if(!arglist[['--suppress_full_results']]){
# get average of full results across restarts
res.mean <- apply(results$full.results,c(2,3,4),mean)
sample.sums <- apply(results$full.results[1,,,,drop=F],3,sum)
# create dir
subdir <- paste(outdir,'full_results',sep='/')
dir.create(subdir,showWarnings=FALSE, recursive=TRUE)
# write each env separate file
for(i in 1:length(results$train.envs)){
env.name <- results$train.envs[i]
filename <- sprintf('%s/%s_%s_contributions.txt', subdir, filebase, env.name)
res.mean.i <- res.mean[i,,]
# handle the case where there is only one sink sample
if(is.null(dim(res.mean.i))) res.mean.i <- matrix(res.mean.i,ncol=1)
env.mat <- sweep(res.mean.i,1,sample.sums,'/')
sink(filename)
cat('SampleID\t')
write.table(env.mat,quote=F,sep='\t')
sink(NULL)
}
}
save.mapping.file(results, map,
filename=sprintf('%s/map.txt',outdir),
include.contamination.predictions=sourceonly)
# make plots
if(dim(results$draws)[2] > 1) {
plot.types <- c('pie', 'bar', 'dist')
} else plot.types <- c('pie', 'bar')
envs <- as.factor(map[rownames(results$proportions),'Env'])
labels = sprintf('%s_%s',envs, rownames(results$proportions))
plotixs <- sort(as.numeric(envs),index=TRUE)$ix
for(plot.type in plot.types){
# plot each env separately
for(env in unique(envs)){
plotixs <- which(envs == env)
pdf(sprintf('%s/%s_%s_%s.pdf',outdir,filebase,plot.type,env),width=5,height=5)
plot(results, type=plot.type, labels=labels, include.legend=TRUE, indices=plotixs, env.colors=env.colors)
dev.off()
}
}
| /sourcetracker_for_qiime.r | no_license | IUEayhu/sourcetracker | R | false | false | 14,260 | r | # USAGE
# get help:
# Rscript sourcetracker_for_qiime.r-h
#
# run sink predictions using QIIME taxon abundance file:
# Rscript sourcetracker_for_qiime.r-t taxa.txt -m map.txt
#
# run leave-one-out source-sample predictions using QIIME taxon abundance file:
# Rscript sourcetracker_for_qiime.r-t taxa.txt -m map.txt -s
#
# run sink predictions using QIIME OTU table:
# Rscript sourcetracker_for_qiime.r-i otutable.txt -m map.txt
#
# run sink predictions using QIIME OTU table with 1000 burnins, 25 random restarts, and rarefaction depth of 100:
# Rscript sourcetracker_for_qiime.r-i otutable.txt -m map.txt -b 1000 -n 25 -r 100< sourcetracker_for_qiime.r
#
# run sink predictions using QIIME taxon abundance file and and input file listing the sampleids to predict:
# Rscript sourcetracker_for_qiime.r-t taxa.txt -m map.txt -f sampleid_file.txt
#
# Note: you must add the path to your SourceTracker.r file to your path, e.g.:
# echo "" >> ~/.bash_profile; echo "export SOURCETRACKER_PATH=$HOME/path/to/your/SourceTracker.r" >> ~/.bash_profile; source ~/.bash_profile
# load SourceTracker package
envvars <- as.list(Sys.getenv())
if(is.element('SOURCETRACKER_PATH', names(envvars))){
sourcefile <- sprintf('%s/src/SourceTracker.r',envvars[['SOURCETRACKER_PATH']])
source(sourcefile)
} else {
stop("Please add SOURCETRACKER_PATH environment variable pointing to the SourceTracker top-level directory (containing 'sourcetracker_for_qiime.r')")
}
helpstr <- c(
"-i otu table: QIIME-formatted OTU table (first line is a comment line starting with '#', second line starts with '#OTU ID' followed by sample names). You must supply either this or the taxon table via '-t'.",
"-t taxon table: output from QIIME script summarize_taxa.py. You must supply either this or the otu table via '-i'.",
"-m mapfile: mapping file with an 'Env' column giving the source environments, and a 'SourceSink' column giving 'source' for source samples and 'sink' for sink samples.",
"-n number of restarts of Gibbs sampling (default 10)",
"-b number of burn-in iterations for Gibbs sampling (default 100)",
"-r rarefaction depth, 0 for none (default 1000)",
"--train_rarefaction training data rarefaction depth, 0 for none (default 1000)",
"-f sampleid_file, file containing list of samples to predict. Useful for parallel processing (default None).",
"-o outdir: output directory; default is '.'",
"-s predict source samples using leave-one-out predictions (default: FALSE)",
"--suppress_full_results suppress writing of full per-taxon predictions (default: FALSE)",
"--alpha1 alpha1: Dirichlet hyperparameter for taxa/genes in known environments (default: 1e-3)",
"--alpha2 alpha2: Dirichlet hyperparameter for taxa/genes in unknown environments (default: 1e-1)",
"--beta beta: Dirichlet hyperparameter for mixture of environments (default: 1e-2)",
"-R results file from previous run. If given, no predictions are made, only plotting and output files)",
"--tune_alphas: tune_ntrials Tune alpha values using cross-validation on the training set with this many trials (suggest at least 25); (default: 0, no tuning)",
"--color_ix: comma-separated list of color indices for alphabetical source environments",
"--eval_fit: fit_ntrials Evaluate quality of fit to the data using simulations. Ignored if less than or equal to --tune_alpha ntrials (default: 0)",
"-v: verbose output (default FALSE)")
allowed.args <- list('-i'=NULL,'-t'=NULL,'-m'=NULL,'-n'=10,'-b'=100,'-r'=1000,
'--train_rarefaction'=1000,
'-o'='.', '-v'=FALSE, '-s'=FALSE, '-f'=NULL,'-R'=NULL,
'--alpha1'=1e-3, '--alpha2'=1e-1, '--beta'=1e-2, '--tune_alphas'=0, '--eval_fit'=0,
'--color_ix'=NULL, "--suppress_full_results"=FALSE)
# Parse command-line params
# assumes that NO args are positional
# allows flags without argument
"parse.args" <- function(allowed.args,helplist=NULL){
argv <- commandArgs(trailingOnly=TRUE)
# print help string if requested
if(!is.null(helpstr) && sum(argv == '-h')>0){
cat('',helpstr,'',sep='\n')
q(runLast=FALSE)
}
argpos <- 1
for(name in names(allowed.args)){
argpos <- which(argv == name)
if(length(argpos) > 0){
# test for flag without argument
if(argpos == length(argv) || substring(argv[argpos + 1],1,1) == '-')
allowed.args[[name]] <- TRUE
else {
allowed.args[[name]] <- argv[argpos + 1]
}
}
}
return(allowed.args)
}
# parse arg list
arglist <- parse.args(allowed.args)
if(is.null(arglist[['-m']])) stop('Please supply a mapping file.')
if(is.null(arglist[['-R']])){
if( (is.null(arglist[['-i']]) && is.null(arglist[['-t']]))
|| (!is.null(arglist[['-i']]) && !is.null(arglist[['-t']]))) stop('Please supply a QIIME OTU table or a QIIME taxon summary.')
}
nrestarts <- as.numeric(arglist[['-n']])
burnin <- as.numeric(arglist[['-b']])
rarefaction <- as.numeric(arglist[['-r']])
train.rarefaction <- as.numeric(arglist[['--train_rarefaction']])
sourceonly <- arglist[['-s']]
outdir <- arglist[['-o']]
predictfile <- arglist[['-f']]
resultsfile <- arglist[['-R']]
alpha1 <- as.numeric(arglist[['--alpha1']])
alpha2 <- as.numeric(arglist[['--alpha2']])
beta <- as.numeric(arglist[['--beta']])
tune.alphas.ntrials <- as.numeric(arglist[['--tune_alphas']])
eval.fit.ntrials <- as.numeric(arglist[['--eval_fit']])
if(rarefaction==0) rarefaction <- NULL
env.colors <- NULL
if(!is.null(arglist[['--color_ix']])){
env.ix <- as.numeric(strsplit(arglist[['--color_ix']],',')[[1]])
env.colors <- std.env.colors[env.ix]
}
# create output directory
if(!is.null(outdir)) {
dir.create(outdir,showWarnings=FALSE, recursive=TRUE)
} else outdir <- '.'
# save command that was run
sink(sprintf('%s/command.txt',arglist[['-o']]))
cat(paste(commandArgs(),collapse=' '),'\n',sep='')
sink(NULL)
# load list of samples to predict
predictlist <- NULL
if(!is.null(predictfile)){
predictlist <- as.character(read.table(predictfile)[,1])
}
# load mapping file
map <- read.table(arglist[['-m']],sep='\t',comment='',head=T,row.names=1,check=FALSE,colClasses='character')
if(sum(colnames(map)=='Env')==0) stop("The mapping file must contain an 'Env' column naming the source environment for each sample.")
if(sourceonly){
# if no sourcesink column, use all samples for leave-one-out
if(sum(colnames(map)=='SourceSink')==0){
map$SourceSink <- factor(rep('source',nrow(map)),levels=c('source','sink','ignore'))
}
} else {
if(sum(colnames(map)=='SourceSink')==0) stop("The mapping file must contain a 'SourceSink' column indicating 'source' or 'sink' for each sample.")
map$SourceSink <- factor(map$SourceSink,levels=c('source','sink','ignore'))
}
if(!is.null(resultsfile)){
load(resultsfile)
if(sourceonly){
filebase <- 'source_predictions'
} else {
filebase <- 'sink_predictions'
}
} else {
# load otus/taxa
if(!is.null(arglist[['-i']])){
otus <- read.table(arglist[['-i']],sep='\t',comment='',head=T,row.names=1,check=FALSE, skip=1)
} else {
otus <- read.table(arglist[['-t']],sep='\t',head=T,row.names=1,check=FALSE)
}
# drop "Consensus Lineage" column if present
otus <- as.matrix(t(otus[,!grepl('Consensus|Metadata|taxonomy',colnames(otus),ignore=TRUE)]))
# ensure map and data table contain the same samples in the same order
ix <- intersect(rownames(map), rownames(otus))
otus <- otus[ix,]
map <- map[ix,]
# ensure there are no "empty" samples
rs <- rowSums(otus)
num.empties <- sum(rs == 0)
if(num.empties > 0){
stop(sprintf("The following %d samples have zero sequences: %s",
num.empties,
paste(rownames(otus)[rs==0],collapse=', '))
)
}
# extract metadata from mapping file
if(!is.null(predictlist)){
if(sourceonly){
# there is a specific list to predict, and we're doing source only
# therefore ignore sink samples
sourcesink <- map$SourceSink
sourcesink[sourcesink=='sink'] <- NA
names(sourcesink) <- rownames(map)
sourcesink[predictlist] <- 'sink'
source.ix <- which(sourcesink=='source')
sink.ix <- which(sourcesink=='sink')
sourceonly <- FALSE
} else {
source.ix <- which(map$SourceSink=='source')
sink.ix <- which(map$SourceSink=='sink')
names(sink.ix) <- rownames(map)[sink.ix]
sink.ix <- sink.ix[predictlist]
}
} else {
source.ix <- which(map$SourceSink=='source')
sink.ix <- which(map$SourceSink=='sink')
}
envs <- map$Env
if(length(source.ix) < 1) stop("No samples are identified as sources")
if(length(sink.ix) < 1 && !sourceonly) stop("No samples are identified as sinks")
# train SourceTracker object on training data
st <- sourcetracker(otus[source.ix,,drop=F], envs[source.ix], rarefaction_depth=train.rarefaction)
# if tuning is requested, obtain alpha values by cross-validation
if(tune.alphas.ntrials > 0){
verbosity <- 0
if(arglist[['-v']]) verbosity <- 2
tune.res <- tune.st(otus[source.ix,,drop=F], envs[source.ix], ntrials=tune.alphas.ntrials,
rarefaction_depth=rarefaction, verbosity=verbosity, beta=beta)
alpha1 <- tune.res$best.alpha1
alpha2 <- tune.res$best.alpha2
cat(sprintf('After tuning: alpha1 = %f, alpha2 = %f, with RMSE= %.3f\n', alpha1, alpha2, tune.res$best.rmse))
# save alphas
sink(sprintf('%s/tuned.alphas.txt', outdir))
cat(sprintf('# Final alpha1=%f, alpha2=%f, RMSE=%.5f, pseudo-R2=%.5f +/- %.5f\n',
alpha1, alpha2, tune.res$best.rmse,
tune.res$best.pseudo.r2.sem, tune.res$best.pseudo.r2.sem))
cat('alpha1\talpha2\tRMSE\tRMSE s.e.m.\n')
cat(sprintf('%f\t%f\t%.5f\t%.5f\n',
tune.res$alphas[,1], tune.res$alphas[,2],
tune.res$rmse, tune.res$rmse.sem))
sink(NULL)
}
# plot tuning results
if(tune.alphas.ntrials > 0 || eval.fit.ntrials > 0) {
verbosity <- 0
if(arglist[['-v']]) verbosity <- 2
if(eval.fit.ntrials > tune.alphas.ntrials) {
if(arglist[['-v']]) cat(sprintf('Evaluating fit at alpha1=%f, alpha2=%f with %d trials\n',
alpha1, alpha2, eval.fit.ntrials))
results.to.plot <- eval.fit(otus[source.ix,,drop=F], envs[source.ix],
ntrials=eval.fit.ntrials, rarefaction_depth=rarefaction,
alpha1=alpha1, alpha2=alpha2, beta=beta, verbosity=verbosity-1)
sink(sprintf('%s/eval.fit.txt',outdir))
cat(sprintf('alpha1\t%.5f\nalpha2\t%.5f\nrmse\t%.5f\nrmse.sem\t%.5f\n',alpha1, alpha2, results.to.plot$rmse,results.to.plot$rmse.sem))
sink(NULL)
} else {
results.to.plot <- tune.res$results[[which.min(tune.res$rmse)]]
}
plot.eval(results.to.plot,plot.type=1,
filename=sprintf('%s/confusion_scatterplot_combined.pdf', outdir))
plot.eval(results.to.plot,plot.type=2,
filename=sprintf('%s/confusion_scatterplot_pairwise.pdf', outdir))
}
if(sourceonly){
# Estimate leave-one-out source proportions in training data
results <- predict(st, rarefaction_depth=rarefaction, nrestarts=nrestarts, burnin=burnin, alpha1=alpha1, alpha2=alpha2, beta=beta, full=!arglist[['--suppress_full_results']])
filebase <- 'source_predictions'
} else {
# Estimate source proportions in test data
testdata <- otus[sink.ix,,drop=F]
if(length(sink.ix)==1){
testdata <- matrix(testdata,nrow=1)
rownames(testdata) <- rownames(otus)[sink.ix]
}
results <- predict(st,testdata, rarefaction_depth=rarefaction, nrestarts=nrestarts, burnin=burnin, alpha1=alpha1, alpha2=alpha2, beta=beta, full=!arglist[['--suppress_full_results']])
filebase <- 'sink_predictions'
}
# save full results object
save(results,file=sprintf('%s/results.RData',outdir))
}
# save results file
sink(sprintf('%s/%s.txt', outdir, filebase))
cat('SampleID\t')
write.table(results$proportions,quote=F,sep='\t')
sink(NULL)
sink(sprintf('%s/%s_stdev.txt', outdir, filebase))
cat('SampleID\t')
write.table(results$proportions_sd,quote=F,sep='\t')
sink(NULL)
if(!arglist[['--suppress_full_results']]){
# get average of full results across restarts
res.mean <- apply(results$full.results,c(2,3,4),mean)
sample.sums <- apply(results$full.results[1,,,,drop=F],3,sum)
# create dir
subdir <- paste(outdir,'full_results',sep='/')
dir.create(subdir,showWarnings=FALSE, recursive=TRUE)
# write each env separate file
for(i in 1:length(results$train.envs)){
env.name <- results$train.envs[i]
filename <- sprintf('%s/%s_%s_contributions.txt', subdir, filebase, env.name)
res.mean.i <- res.mean[i,,]
# handle the case where there is only one sink sample
if(is.null(dim(res.mean.i))) res.mean.i <- matrix(res.mean.i,ncol=1)
env.mat <- sweep(res.mean.i,1,sample.sums,'/')
sink(filename)
cat('SampleID\t')
write.table(env.mat,quote=F,sep='\t')
sink(NULL)
}
}
save.mapping.file(results, map,
filename=sprintf('%s/map.txt',outdir),
include.contamination.predictions=sourceonly)
# make plots
if(dim(results$draws)[2] > 1) {
plot.types <- c('pie', 'bar', 'dist')
} else plot.types <- c('pie', 'bar')
envs <- as.factor(map[rownames(results$proportions),'Env'])
labels = sprintf('%s_%s',envs, rownames(results$proportions))
plotixs <- sort(as.numeric(envs),index=TRUE)$ix
for(plot.type in plot.types){
# plot each env separately
for(env in unique(envs)){
plotixs <- which(envs == env)
pdf(sprintf('%s/%s_%s_%s.pdf',outdir,filebase,plot.type,env),width=5,height=5)
plot(results, type=plot.type, labels=labels, include.legend=TRUE, indices=plotixs, env.colors=env.colors)
dev.off()
}
}
|
#' Trim a dfm using frequency threshold-based feature selection
#'
#' @description Returns a document by feature matrix reduced in size based on
#' document and term frequency, usually in terms of a minimum frequency, but
#' may also be in terms of maximum frequencies. Setting a combination of
#' minimum and maximum frequencies will select features based on a range.
#'
#' @description Feature selection is implemented by considering features across
#' all documents, by summing them for term frequency, or counting the
#' documents in which they occur for document frequency. Rank and quantile
#' versions of these are also implemented, for taking the first \eqn{n}
#' features in terms of descending order of overall global counts or document
#' frequencies, or as a quantile of all frequencies.
#' @param x a \link{dfm} object
#' @param min_termfreq,max_termfreq minimum/maximum values of feature frequencies
#' across all documents, below/above which features will
#' be removed
#' @param termfreq_type how \code{min_termfreq} and \code{max_termfreq} are
#' intepreted. \code{"count"} sums the frequencies; \code{"rank"} is matched
#' against the inverted ranking of features in terms of overall frequency, so
#' that 1, 2, ... are the highest and second highest frequency features, and
#' so on; \code{"quantile"} sets the cutoffs according to the quantiles (see
#' \code{\link{quantile}}) of term frequencies.
#' @param min_docfreq,max_docfreq minimum/maximum values of a feature's document
#' frequency, below/above which features will be removed
#' @param docfreq_type specify how \code{min_docfreq} and \code{max_docfreq} are
#' intepreted. \code{"count"} is the same as \code{\link{docfreq}(x, scheme
#' = "count")}; \code{"rank"} is matched against the inverted ranking of
#' document frequency, so that 1, 2, ... are the features with the highest and
#' second highest document frequencies, and so on; \code{"quantile"} sets the
#' cutoffs according to the quantiles (see \code{\link{quantile}}) of document
#' frequencies.
#' @param sparsity equivalent to \code{1 - min_docfreq}, included for comparison
#' with \pkg{tm}
#' @param verbose print messages
#' @param ... not used
#' @return A \link{dfm} reduced in features (with the same number of documents)
#' @export
#' @note Trimming a \link{dfm} object is an operation based on the \emph{values}
#' in the document-feature matrix. To select subsets of a dfm based on the
#' features themselves (meaning the feature labels from
#' \code{\link{featnames}}) -- such as those matching a regular expression, or
#' removing features matching a stopword list, use \code{\link{dfm_select}}.
#' @seealso \code{\link{dfm_select}}, \code{\link{dfm_sample}}
#' @examples
#' (mydfm <- dfm(data_corpus_inaugural[1:5]))
#'
#' # keep only words occurring >= 10 times and in >= 2 documents
#' dfm_trim(mydfm, min_termfreq = 10, min_docfreq = 2)
#'
#' # keep only words occurring >= 10 times and in at least 0.4 of the documents
#' dfm_trim(mydfm, min_termfreq = 10, min_docfreq = 0.4)
#'
#' # keep only words occurring <= 10 times and in <=2 documents
#' dfm_trim(mydfm, max_termfreq = 10, max_docfreq = 2)
#'
#' # keep only words occurring <= 10 times and in at most 3/4 of the documents
#' dfm_trim(mydfm, max_termfreq = 10, max_docfreq = 0.75)
#'
#' # keep only words occurring 5 times in 1000, and in 2 of 5 of documents
#' dfm_trim(mydfm, min_docfreq = 0.4, min_termfreq = 0.005, termfreq_type = "prop")
#'
#' # keep only words occurring frequently (top 20%) and in <=2 documents
#' dfm_trim(mydfm, min_termfreq = 0.2, max_docfreq = 2, termfreq_type = "quantile")
#'
#' \dontrun{
#' # compare to removeSparseTerms from the tm package
#' (mydfm_tm <- convert(mydfm, "tm"))
#' tm::removeSparseTerms(mydfm_tm, 0.7)
#' dfm_trim(mydfm, min_docfreq = 0.3)
#' dfm_trim(mydfm, sparsity = 0.7)
#' }
#'
#' @export
dfm_trim <- function(x,
min_termfreq = NULL, max_termfreq = NULL, termfreq_type = c("count", "rank", "quantile"),
min_docfreq = NULL, max_docfreq = NULL, docfreq_type = c("count", "rank", "quantile"),
sparsity = NULL,
verbose = quanteda_options("verbose"),
...) {
UseMethod("dfm_trim")
}
#' @export
dfm_trim.default <- function(x,
min_termfreq = NULL, max_termfreq = NULL, termfreq_type = c("count", "prop", "rank", "quantile"),
min_docfreq = NULL, max_docfreq = NULL, docfreq_type = c("count", "prop", "rank", "quantile"),
sparsity = NULL,
verbose = quanteda_options("verbose"),
...) {
stop(friendly_class_undefined_message(class(x), "dfm_trim"))
}
#' @export
dfm_trim.dfm <- function(x,
min_termfreq = NULL, max_termfreq = NULL, termfreq_type = c("count", "prop", "rank", "quantile"),
min_docfreq = NULL, max_docfreq = NULL, docfreq_type = c("count", "prop", "rank", "quantile"),
sparsity = NULL,
verbose = quanteda_options("verbose"),
...) {
x <- as.dfm(x)
if (!nfeat(x) || !ndoc(x)) return(x)
dots <- list(...)
if ('min_count' %in% names(dots)) {
warning('min_count is deprecated, use min_termfreq')
min_termfreq <- dots[['min_count']]
}
if ('max_count' %in% names(dots)) {
warning('max_count is deprecated, use max_termfreq')
max_termfreq <- dots[['max_count']]
}
termfreq_type <- match.arg(termfreq_type)
docfreq_type <- match.arg(docfreq_type)
# warning if already fractional
if ((!is.null(min_termfreq) && !is.null(max_termfreq)) &&
x@weightTf$scheme != "count" || x@weightDf$scheme != "unary") {
warning("dfm has been previously weighted")
}
freq <- unname(colSums(x))
freq_doc <- unname(docfreq(x))
if (!is.null(sparsity)) {
if (!is.null(max_docfreq) && !is.null(sparsity))
stop("min/max_docfreq and sparsity both refer to a document ",
"threshold, both should not be specified")
if (verbose)
catm("Note: converting sparsity into min_docfreq = 1 -",
sparsity, "=", format(min_docfreq, big.mark=","), ".\n")
min_docfreq <- 1.0 - sparsity
docfreq_type <- "prop"
}
s <- sum(freq)
if (termfreq_type == "count") {
if (is.null(min_termfreq))
min_termfreq <- 1
if (is.null(max_termfreq))
max_termfreq <- max(freq)
} else if (termfreq_type == "prop") {
if (is.null(min_termfreq))
min_termfreq <- 0
if (is.null(max_termfreq))
max_termfreq <- 1
min_termfreq <- min_termfreq * s
max_termfreq <- max_termfreq * s
} else if (termfreq_type == "quantile") {
if (is.null(min_termfreq))
min_termfreq <- 0
if (is.null(max_termfreq))
max_termfreq <- 1
min_termfreq <- quantile(freq, min_termfreq, names = FALSE, type = 1)
max_termfreq <- quantile(freq, max_termfreq, names = FALSE, type = 1)
} else if (termfreq_type == "rank") {
if (is.null(min_termfreq))
min_termfreq <- nfeat(x)
if (is.null(max_termfreq))
max_termfreq <- 1
r <- rank(freq * -1, ties.method = 'min')
min_termfreq <- min(freq[r <= min_termfreq])
max_termfreq <- max(freq[r >= max_termfreq])
}
n <- ndoc(x)
if (docfreq_type == "count") {
if (is.null(min_docfreq))
min_docfreq <- 1
if (is.null(max_docfreq))
max_docfreq <- max(freq_doc)
} else if (docfreq_type == "prop") {
if (is.null(min_docfreq))
min_docfreq <- 0
if (is.null(max_docfreq))
max_docfreq <- 1
min_docfreq <- min_docfreq * n
max_docfreq <- max_docfreq * n
} else if (docfreq_type == "quantile") {
if (is.null(min_docfreq))
min_docfreq <- 0
if (is.null(max_docfreq))
max_docfreq <- 1
min_docfreq <- quantile(freq_doc, min_docfreq, names = FALSE, type = 1)
max_docfreq <- quantile(freq_doc, max_docfreq, names = FALSE, type = 1)
} else if (docfreq_type == "rank") {
if (is.null(min_docfreq))
min_docfreq <- nfeat(x)
if (is.null(max_docfreq))
max_docfreq <- 1
r <- rank(freq_doc * -1, ties.method = 'min')
min_docfreq <- min(freq_doc[r <= min_docfreq])
max_docfreq <- max(freq_doc[r >= max_docfreq])
}
# checks that min is less than max
# if (max_termfreq < min_termfreq)
# stop("max_termfreq must be >= min_termfreq")
# if (max_docfreq < min_docfreq)
# stop("max_docfreq must be >= min_docfreq")
flag_min_term <- freq < min_termfreq
flag_max_term <- freq > max_termfreq
flag_min_doc <- freq_doc < min_docfreq
flag_max_doc <- freq_doc > max_docfreq
flag_all <- flag_min_term | flag_max_term | flag_min_doc | flag_max_doc
# in case no features were removed as a result of filtering conditions
if (!sum(flag_all)) {
if (verbose) catm("No features removed.", appendLF = TRUE)
return(x)
}
if (verbose) catm("Removing features occurring: ", appendLF = TRUE)
# print messages about frequency count removal
if (verbose && (sum(flag_min_term) || sum(flag_max_term))) {
if (sum(flag_min_term)) {
catm(" - fewer than ", min_termfreq, " time",
if (min_termfreq != 1L) "s" else "", ": ",
format(sum(flag_min_term), big.mark = ","),
sep = "", appendLF = TRUE)
}
if (sum(flag_max_term)) {
catm(" - more than ", max_termfreq, " time",
if (max_termfreq != 1L) "s" else "", ": ",
format(sum(flag_max_term), big.mark = ","),
sep = "", appendLF = TRUE)
}
}
# print messages about docfreq removal
if (verbose && (sum(flag_min_doc) || sum(flag_max_doc))) {
if (sum(flag_min_doc)) {
catm(" - in fewer than ", min_docfreq, " document",
ifelse(min_docfreq != 1, "s", ""), ": ",
format(sum(flag_min_doc), big.mark = ","),
sep = "", appendLF = TRUE)
}
if (sum(flag_max_doc)) {
catm(" - in more than ", max_docfreq, " document",
ifelse(max_docfreq != 1, "s", ""), ": ",
format(sum(flag_max_doc), big.mark = ","),
sep = "", appendLF = TRUE)
}
}
if (verbose) {
catm(" Total features removed: ", format(sum(flag_all), big.mark=","),
" (",
format(sum(flag_all) / nfeat(x) * 100, digits = 3, nsmall = 1),
"%).",
sep = "", appendLF = TRUE)
}
x[, !flag_all]
}
| /R/dfm_trim.R | no_license | lizl90/quanteda | R | false | false | 11,159 | r | #' Trim a dfm using frequency threshold-based feature selection
#'
#' @description Returns a document by feature matrix reduced in size based on
#' document and term frequency, usually in terms of a minimum frequency, but
#' may also be in terms of maximum frequencies. Setting a combination of
#' minimum and maximum frequencies will select features based on a range.
#'
#' @description Feature selection is implemented by considering features across
#' all documents, by summing them for term frequency, or counting the
#' documents in which they occur for document frequency. Rank and quantile
#' versions of these are also implemented, for taking the first \eqn{n}
#' features in terms of descending order of overall global counts or document
#' frequencies, or as a quantile of all frequencies.
#' @param x a \link{dfm} object
#' @param min_termfreq,max_termfreq minimum/maximum values of feature frequencies
#' across all documents, below/above which features will
#' be removed
#' @param termfreq_type how \code{min_termfreq} and \code{max_termfreq} are
#' intepreted. \code{"count"} sums the frequencies; \code{"rank"} is matched
#' against the inverted ranking of features in terms of overall frequency, so
#' that 1, 2, ... are the highest and second highest frequency features, and
#' so on; \code{"quantile"} sets the cutoffs according to the quantiles (see
#' \code{\link{quantile}}) of term frequencies.
#' @param min_docfreq,max_docfreq minimum/maximum values of a feature's document
#' frequency, below/above which features will be removed
#' @param docfreq_type specify how \code{min_docfreq} and \code{max_docfreq} are
#' intepreted. \code{"count"} is the same as \code{\link{docfreq}(x, scheme
#' = "count")}; \code{"rank"} is matched against the inverted ranking of
#' document frequency, so that 1, 2, ... are the features with the highest and
#' second highest document frequencies, and so on; \code{"quantile"} sets the
#' cutoffs according to the quantiles (see \code{\link{quantile}}) of document
#' frequencies.
#' @param sparsity equivalent to \code{1 - min_docfreq}, included for comparison
#' with \pkg{tm}
#' @param verbose print messages
#' @param ... not used
#' @return A \link{dfm} reduced in features (with the same number of documents)
#' @export
#' @note Trimming a \link{dfm} object is an operation based on the \emph{values}
#' in the document-feature matrix. To select subsets of a dfm based on the
#' features themselves (meaning the feature labels from
#' \code{\link{featnames}}) -- such as those matching a regular expression, or
#' removing features matching a stopword list, use \code{\link{dfm_select}}.
#' @seealso \code{\link{dfm_select}}, \code{\link{dfm_sample}}
#' @examples
#' (mydfm <- dfm(data_corpus_inaugural[1:5]))
#'
#' # keep only words occurring >= 10 times and in >= 2 documents
#' dfm_trim(mydfm, min_termfreq = 10, min_docfreq = 2)
#'
#' # keep only words occurring >= 10 times and in at least 0.4 of the documents
#' dfm_trim(mydfm, min_termfreq = 10, min_docfreq = 0.4)
#'
#' # keep only words occurring <= 10 times and in <=2 documents
#' dfm_trim(mydfm, max_termfreq = 10, max_docfreq = 2)
#'
#' # keep only words occurring <= 10 times and in at most 3/4 of the documents
#' dfm_trim(mydfm, max_termfreq = 10, max_docfreq = 0.75)
#'
#' # keep only words occurring 5 times in 1000, and in 2 of 5 of documents
#' dfm_trim(mydfm, min_docfreq = 0.4, min_termfreq = 0.005, termfreq_type = "prop")
#'
#' # keep only words occurring frequently (top 20%) and in <=2 documents
#' dfm_trim(mydfm, min_termfreq = 0.2, max_docfreq = 2, termfreq_type = "quantile")
#'
#' \dontrun{
#' # compare to removeSparseTerms from the tm package
#' (mydfm_tm <- convert(mydfm, "tm"))
#' tm::removeSparseTerms(mydfm_tm, 0.7)
#' dfm_trim(mydfm, min_docfreq = 0.3)
#' dfm_trim(mydfm, sparsity = 0.7)
#' }
#'
#' @export
dfm_trim <- function(x,
min_termfreq = NULL, max_termfreq = NULL, termfreq_type = c("count", "rank", "quantile"),
min_docfreq = NULL, max_docfreq = NULL, docfreq_type = c("count", "rank", "quantile"),
sparsity = NULL,
verbose = quanteda_options("verbose"),
...) {
UseMethod("dfm_trim")
}
#' @export
dfm_trim.default <- function(x,
min_termfreq = NULL, max_termfreq = NULL, termfreq_type = c("count", "prop", "rank", "quantile"),
min_docfreq = NULL, max_docfreq = NULL, docfreq_type = c("count", "prop", "rank", "quantile"),
sparsity = NULL,
verbose = quanteda_options("verbose"),
...) {
stop(friendly_class_undefined_message(class(x), "dfm_trim"))
}
#' @export
dfm_trim.dfm <- function(x,
min_termfreq = NULL, max_termfreq = NULL, termfreq_type = c("count", "prop", "rank", "quantile"),
min_docfreq = NULL, max_docfreq = NULL, docfreq_type = c("count", "prop", "rank", "quantile"),
sparsity = NULL,
verbose = quanteda_options("verbose"),
...) {
x <- as.dfm(x)
if (!nfeat(x) || !ndoc(x)) return(x)
dots <- list(...)
if ('min_count' %in% names(dots)) {
warning('min_count is deprecated, use min_termfreq')
min_termfreq <- dots[['min_count']]
}
if ('max_count' %in% names(dots)) {
warning('max_count is deprecated, use max_termfreq')
max_termfreq <- dots[['max_count']]
}
termfreq_type <- match.arg(termfreq_type)
docfreq_type <- match.arg(docfreq_type)
# warning if already fractional
if ((!is.null(min_termfreq) && !is.null(max_termfreq)) &&
x@weightTf$scheme != "count" || x@weightDf$scheme != "unary") {
warning("dfm has been previously weighted")
}
freq <- unname(colSums(x))
freq_doc <- unname(docfreq(x))
if (!is.null(sparsity)) {
if (!is.null(max_docfreq) && !is.null(sparsity))
stop("min/max_docfreq and sparsity both refer to a document ",
"threshold, both should not be specified")
if (verbose)
catm("Note: converting sparsity into min_docfreq = 1 -",
sparsity, "=", format(min_docfreq, big.mark=","), ".\n")
min_docfreq <- 1.0 - sparsity
docfreq_type <- "prop"
}
s <- sum(freq)
if (termfreq_type == "count") {
if (is.null(min_termfreq))
min_termfreq <- 1
if (is.null(max_termfreq))
max_termfreq <- max(freq)
} else if (termfreq_type == "prop") {
if (is.null(min_termfreq))
min_termfreq <- 0
if (is.null(max_termfreq))
max_termfreq <- 1
min_termfreq <- min_termfreq * s
max_termfreq <- max_termfreq * s
} else if (termfreq_type == "quantile") {
if (is.null(min_termfreq))
min_termfreq <- 0
if (is.null(max_termfreq))
max_termfreq <- 1
min_termfreq <- quantile(freq, min_termfreq, names = FALSE, type = 1)
max_termfreq <- quantile(freq, max_termfreq, names = FALSE, type = 1)
} else if (termfreq_type == "rank") {
if (is.null(min_termfreq))
min_termfreq <- nfeat(x)
if (is.null(max_termfreq))
max_termfreq <- 1
r <- rank(freq * -1, ties.method = 'min')
min_termfreq <- min(freq[r <= min_termfreq])
max_termfreq <- max(freq[r >= max_termfreq])
}
n <- ndoc(x)
if (docfreq_type == "count") {
if (is.null(min_docfreq))
min_docfreq <- 1
if (is.null(max_docfreq))
max_docfreq <- max(freq_doc)
} else if (docfreq_type == "prop") {
if (is.null(min_docfreq))
min_docfreq <- 0
if (is.null(max_docfreq))
max_docfreq <- 1
min_docfreq <- min_docfreq * n
max_docfreq <- max_docfreq * n
} else if (docfreq_type == "quantile") {
if (is.null(min_docfreq))
min_docfreq <- 0
if (is.null(max_docfreq))
max_docfreq <- 1
min_docfreq <- quantile(freq_doc, min_docfreq, names = FALSE, type = 1)
max_docfreq <- quantile(freq_doc, max_docfreq, names = FALSE, type = 1)
} else if (docfreq_type == "rank") {
if (is.null(min_docfreq))
min_docfreq <- nfeat(x)
if (is.null(max_docfreq))
max_docfreq <- 1
r <- rank(freq_doc * -1, ties.method = 'min')
min_docfreq <- min(freq_doc[r <= min_docfreq])
max_docfreq <- max(freq_doc[r >= max_docfreq])
}
# checks that min is less than max
# if (max_termfreq < min_termfreq)
# stop("max_termfreq must be >= min_termfreq")
# if (max_docfreq < min_docfreq)
# stop("max_docfreq must be >= min_docfreq")
flag_min_term <- freq < min_termfreq
flag_max_term <- freq > max_termfreq
flag_min_doc <- freq_doc < min_docfreq
flag_max_doc <- freq_doc > max_docfreq
flag_all <- flag_min_term | flag_max_term | flag_min_doc | flag_max_doc
# in case no features were removed as a result of filtering conditions
if (!sum(flag_all)) {
if (verbose) catm("No features removed.", appendLF = TRUE)
return(x)
}
if (verbose) catm("Removing features occurring: ", appendLF = TRUE)
# print messages about frequency count removal
if (verbose && (sum(flag_min_term) || sum(flag_max_term))) {
if (sum(flag_min_term)) {
catm(" - fewer than ", min_termfreq, " time",
if (min_termfreq != 1L) "s" else "", ": ",
format(sum(flag_min_term), big.mark = ","),
sep = "", appendLF = TRUE)
}
if (sum(flag_max_term)) {
catm(" - more than ", max_termfreq, " time",
if (max_termfreq != 1L) "s" else "", ": ",
format(sum(flag_max_term), big.mark = ","),
sep = "", appendLF = TRUE)
}
}
# print messages about docfreq removal
if (verbose && (sum(flag_min_doc) || sum(flag_max_doc))) {
if (sum(flag_min_doc)) {
catm(" - in fewer than ", min_docfreq, " document",
ifelse(min_docfreq != 1, "s", ""), ": ",
format(sum(flag_min_doc), big.mark = ","),
sep = "", appendLF = TRUE)
}
if (sum(flag_max_doc)) {
catm(" - in more than ", max_docfreq, " document",
ifelse(max_docfreq != 1, "s", ""), ": ",
format(sum(flag_max_doc), big.mark = ","),
sep = "", appendLF = TRUE)
}
}
if (verbose) {
catm(" Total features removed: ", format(sum(flag_all), big.mark=","),
" (",
format(sum(flag_all) / nfeat(x) * 100, digits = 3, nsmall = 1),
"%).",
sep = "", appendLF = TRUE)
}
x[, !flag_all]
}
|
# bring in our custom functions
source('./functions.R')
tagList(
div(class='d-flex flex-column mb-3',
div(class='card px-0 d-flex flex-column',
div(class='card-header',
h4('Description')
),
div(class='card-body justify-content-between px-3',
p('This viz aims to utilize raw GPS sensor data from football matches & training sessions
to provide insightful information with valuable findings for end users who are Physical
Performance Coaches, Sports Scientist and Head Coaches. It also offers the user to
interactively select a session by using the filters in the toggle options section')
),
)
),
div(class='d-flex flex-column mb-3',
div(class='card px-0 d-flex flex-column',
div(class='card-header',
h4('Player Stats')
),
div(class='card-body justify-content-between px-3',
p('This tab shows details of players distance covered, max speed, accelerations,
decelerations and max heart rate during the selected session. Also shows plots of distance,
heart rate and speed plotted over time')
),
)
),
div(class='d-flex flex-column mb-3',
div(class='card px-0 d-flex flex-column',
div(class='card-header',
h4('Positioning')
),
div(class='card-body justify-content-between px-3',
p('This tab displays details of the selected players poisition in terms of a
heatmap plotted on a pitch during the session')
),
)
),
div(class='d-flex flex-column mb-3',
div(class='card px-0 d-flex flex-column',
div(class='card-header',
h4('Team Comparison')
),
div(class='card-body justify-content-between px-3',
p('This tab provides visibility for a team view in terms of distance travelled,
average poisiton, max speed and average heart rate for the selected session')
),
)
),
)
| /Statsports_Dashboard/tab-home.R | no_license | fowlerthefox/statsports3 | R | false | false | 2,019 | r |
# bring in our custom functions
source('./functions.R')
tagList(
div(class='d-flex flex-column mb-3',
div(class='card px-0 d-flex flex-column',
div(class='card-header',
h4('Description')
),
div(class='card-body justify-content-between px-3',
p('This viz aims to utilize raw GPS sensor data from football matches & training sessions
to provide insightful information with valuable findings for end users who are Physical
Performance Coaches, Sports Scientist and Head Coaches. It also offers the user to
interactively select a session by using the filters in the toggle options section')
),
)
),
div(class='d-flex flex-column mb-3',
div(class='card px-0 d-flex flex-column',
div(class='card-header',
h4('Player Stats')
),
div(class='card-body justify-content-between px-3',
p('This tab shows details of players distance covered, max speed, accelerations,
decelerations and max heart rate during the selected session. Also shows plots of distance,
heart rate and speed plotted over time')
),
)
),
div(class='d-flex flex-column mb-3',
div(class='card px-0 d-flex flex-column',
div(class='card-header',
h4('Positioning')
),
div(class='card-body justify-content-between px-3',
p('This tab displays details of the selected players poisition in terms of a
heatmap plotted on a pitch during the session')
),
)
),
div(class='d-flex flex-column mb-3',
div(class='card px-0 d-flex flex-column',
div(class='card-header',
h4('Team Comparison')
),
div(class='card-body justify-content-between px-3',
p('This tab provides visibility for a team view in terms of distance travelled,
average poisiton, max speed and average heart rate for the selected session')
),
)
),
)
|
lop.login = function(failed.fun=lop.failed.login, create.user.fun=NULL,...) {
login = list(
login.title = "",
userid.label="user",
password.label="password",
login.btn.label="log in",
signup.btn.label="sign up",
reset.btn.label = "forgot password",
login.help = "",
userid.inp="loginUser",
password.inp="loginPassword",
login.btn = "loginBtn",
signup.btn = "loginSignupBtn",
reset.btn = "loginResetBtn",
alert="loginAlert",
failed.fun=failed.fun
)
login
}
lop.login.ui = function(lop,...) {
copy.into.env(source = lop$login)
sel = ids2sel(c(cid(userid.inp,lop),cid(password.inp,lop)))
widgets = list(
HTML(lop$login$login.title),
textInput(cid(userid.inp,lop), userid.label, value = lop$init.userid),
passwordInput(cid(password.inp,lop), password.label, value = lop$init.password),
actionButton(cid(login.btn,lop), login.btn.label, "data-form-selector"=sel),
actionButton(cid(signup.btn,lop), signup.btn.label),
actionButton(cid(reset.btn,lop), reset.btn.label),
uiOutput(cid(alert,lop)),
HTML(lop$login$login.help)
)
ui = wellPanel(widgets)
setUI(gid(alert,lop),"")
partButtonHandler(login.btn,pa=lop,lop.login.btn.click,lop=lop,no.authentication.required = TRUE)
partButtonHandler(signup.btn,pa=lop,lop.signup.btn.click,lop=lop,no.authentication.required = TRUE)
partButtonHandler(reset.btn,pa=lop,lop.reset.btn.click,lop=lop,no.authentication.required = TRUE)
ui
}
lop.signup.btn.click = function(app=getApp(),lop,...) {
if (!is.null(lop$signup.fun)) {
lop$signup.fun(lop=lop,...)
}
}
lop.reset.btn.click = function(app=getApp(),lop,...) {
if (!is.null(lop$reset.fun)) {
lop$reset.fun(lop=lop,...)
}
}
lop.login.btn.click = function(app=getApp(),lop,formValues,...) {
login = lop$login
userid = tolower(formValues[[gid(login$userid.inp,lop)]])
password = formValues[[gid(login$password.inp,lop)]]
cat("userid = ", userid, " password = ", password)
#partValue(login$userid.inp,lop)
#password = partValue(login$password.inp,lop)
res = lop.check.login(userid=userid,password = password, lop=lop)
restore.point("lop.login.btn.click")
if (res$ok==TRUE) {
app$is.authenticated = TRUE
lop$login.fun(userid=userid, password=password, lop=lop)
} else {
app$is.authenticated = FALSE
login$failed.fun(userid=userid, password=password, msg=res$msg, lop=lop)
}
}
lop.failed.login = function(app=getApp(),lop=get.lop(),msg,...) {
login = lop$login
show.html.warning(gid(login$alert,lop),msg)
#createAlert(app$session, login$alert,title="Log-in failed",content=msg, style="warning")
cat("\nlog-in failed: ",msg)
}
lop.check.login = function(userid, password, lop=get.lop()) {
restore.point("lop.check.login")
if (nchar(userid)==0)
return(list(ok=FALSE,msg="No user name entered."))
user = lop.get.user(userid=userid, lop=lop)
if (NROW(user)==0) {
return(list(ok=FALSE,msg="User does not exist."))
}
ok = check.password(password = password, salt=user$salt,hash=user$hash)
if (ok) {
return(list(ok=TRUE,msg=""))
}
return(list(ok=FALSE,msg="Wrong password."))
}
check.email.domain = function(email, domain) {
ok = str.ends.with(email, domain)
if (!ok) {
return(list(ok=ok, msg=paste0("You can only create an account with an email that ends with ", domain)))
}
return(list(ok=ok, msg=""))
}
| /R/login.r | no_license | skranz/loginPart | R | false | false | 3,414 | r |
lop.login = function(failed.fun=lop.failed.login, create.user.fun=NULL,...) {
login = list(
login.title = "",
userid.label="user",
password.label="password",
login.btn.label="log in",
signup.btn.label="sign up",
reset.btn.label = "forgot password",
login.help = "",
userid.inp="loginUser",
password.inp="loginPassword",
login.btn = "loginBtn",
signup.btn = "loginSignupBtn",
reset.btn = "loginResetBtn",
alert="loginAlert",
failed.fun=failed.fun
)
login
}
lop.login.ui = function(lop,...) {
copy.into.env(source = lop$login)
sel = ids2sel(c(cid(userid.inp,lop),cid(password.inp,lop)))
widgets = list(
HTML(lop$login$login.title),
textInput(cid(userid.inp,lop), userid.label, value = lop$init.userid),
passwordInput(cid(password.inp,lop), password.label, value = lop$init.password),
actionButton(cid(login.btn,lop), login.btn.label, "data-form-selector"=sel),
actionButton(cid(signup.btn,lop), signup.btn.label),
actionButton(cid(reset.btn,lop), reset.btn.label),
uiOutput(cid(alert,lop)),
HTML(lop$login$login.help)
)
ui = wellPanel(widgets)
setUI(gid(alert,lop),"")
partButtonHandler(login.btn,pa=lop,lop.login.btn.click,lop=lop,no.authentication.required = TRUE)
partButtonHandler(signup.btn,pa=lop,lop.signup.btn.click,lop=lop,no.authentication.required = TRUE)
partButtonHandler(reset.btn,pa=lop,lop.reset.btn.click,lop=lop,no.authentication.required = TRUE)
ui
}
lop.signup.btn.click = function(app=getApp(),lop,...) {
if (!is.null(lop$signup.fun)) {
lop$signup.fun(lop=lop,...)
}
}
lop.reset.btn.click = function(app=getApp(),lop,...) {
if (!is.null(lop$reset.fun)) {
lop$reset.fun(lop=lop,...)
}
}
lop.login.btn.click = function(app=getApp(),lop,formValues,...) {
login = lop$login
userid = tolower(formValues[[gid(login$userid.inp,lop)]])
password = formValues[[gid(login$password.inp,lop)]]
cat("userid = ", userid, " password = ", password)
#partValue(login$userid.inp,lop)
#password = partValue(login$password.inp,lop)
res = lop.check.login(userid=userid,password = password, lop=lop)
restore.point("lop.login.btn.click")
if (res$ok==TRUE) {
app$is.authenticated = TRUE
lop$login.fun(userid=userid, password=password, lop=lop)
} else {
app$is.authenticated = FALSE
login$failed.fun(userid=userid, password=password, msg=res$msg, lop=lop)
}
}
lop.failed.login = function(app=getApp(),lop=get.lop(),msg,...) {
login = lop$login
show.html.warning(gid(login$alert,lop),msg)
#createAlert(app$session, login$alert,title="Log-in failed",content=msg, style="warning")
cat("\nlog-in failed: ",msg)
}
lop.check.login = function(userid, password, lop=get.lop()) {
restore.point("lop.check.login")
if (nchar(userid)==0)
return(list(ok=FALSE,msg="No user name entered."))
user = lop.get.user(userid=userid, lop=lop)
if (NROW(user)==0) {
return(list(ok=FALSE,msg="User does not exist."))
}
ok = check.password(password = password, salt=user$salt,hash=user$hash)
if (ok) {
return(list(ok=TRUE,msg=""))
}
return(list(ok=FALSE,msg="Wrong password."))
}
check.email.domain = function(email, domain) {
ok = str.ends.with(email, domain)
if (!ok) {
return(list(ok=ok, msg=paste0("You can only create an account with an email that ends with ", domain)))
}
return(list(ok=ok, msg=""))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Future-class.R
\name{getExpression}
\alias{getExpression}
\alias{getExpression.Future}
\title{Inject code for the next type of future to use for nested futures}
\usage{
getExpression(future, ...)
}
\arguments{
\item{future}{Current future.}
\item{\dots}{Not used.}
}
\value{
A future expression with code injected to set what
type of future to use for nested futures, iff any.
}
\description{
Inject code for the next type of future to use for nested futures
}
\details{
If no next future strategy is specified, the default is to
use \link{sequential} futures. This conservative approach protects
against spawning off recursive futures by mistake, especially
\link{multicore} and \link{multisession} ones.
The default will also set \code{options(mc.cores = 1L)} (*) so that
no parallel \R processes are spawned off by functions such as
\code{parallel::mclapply()} and friends.
Currently it is not possible to specify what type of nested
futures to be used, meaning the above default will always be
used.
See \href{https://github.com/HenrikBengtsson/future/issues/37}{Issue #37}
for plans on adding support for custom nested future types.
(*) Ideally we would set \code{mc.cores = 0} but that will unfortunately
cause \code{mclapply()} and friends to generate an error saying
"'mc.cores' must be >= 1". Ideally those functions should
fall back to using the non-multicore alternative in this
case, e.g. \code{mclapply(...)} => \code{lapply(...)}.
See \url{https://github.com/HenrikBengtsson/Wishlist-for-R/issues/7}
for a discussion on this.
}
\keyword{internal}
| /man/getExpression.Rd | no_license | HenrikBengtsson/future | R | false | true | 1,644 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Future-class.R
\name{getExpression}
\alias{getExpression}
\alias{getExpression.Future}
\title{Inject code for the next type of future to use for nested futures}
\usage{
getExpression(future, ...)
}
\arguments{
\item{future}{Current future.}
\item{\dots}{Not used.}
}
\value{
A future expression with code injected to set what
type of future to use for nested futures, iff any.
}
\description{
Inject code for the next type of future to use for nested futures
}
\details{
If no next future strategy is specified, the default is to
use \link{sequential} futures. This conservative approach protects
against spawning off recursive futures by mistake, especially
\link{multicore} and \link{multisession} ones.
The default will also set \code{options(mc.cores = 1L)} (*) so that
no parallel \R processes are spawned off by functions such as
\code{parallel::mclapply()} and friends.
Currently it is not possible to specify what type of nested
futures to be used, meaning the above default will always be
used.
See \href{https://github.com/HenrikBengtsson/future/issues/37}{Issue #37}
for plans on adding support for custom nested future types.
(*) Ideally we would set \code{mc.cores = 0} but that will unfortunately
cause \code{mclapply()} and friends to generate an error saying
"'mc.cores' must be >= 1". Ideally those functions should
fall back to using the non-multicore alternative in this
case, e.g. \code{mclapply(...)} => \code{lapply(...)}.
See \url{https://github.com/HenrikBengtsson/Wishlist-for-R/issues/7}
for a discussion on this.
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.r
\name{basename_sans_ext}
\alias{basename_sans_ext}
\title{Basename without extension}
\usage{
basename_sans_ext(path)
}
\arguments{
\item{path}{a filepath}
}
\value{
basename without extension
}
\description{
Combination of [base::basename()] and [tools::file_path_sans_ext()].
}
\examples{
beter:::basename_sans_ext("folder/file.extension") # returns "file"
}
\keyword{internal}
| /man/basename_sans_ext.Rd | permissive | bioDS/beter | R | false | true | 467 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.r
\name{basename_sans_ext}
\alias{basename_sans_ext}
\title{Basename without extension}
\usage{
basename_sans_ext(path)
}
\arguments{
\item{path}{a filepath}
}
\value{
basename without extension
}
\description{
Combination of [base::basename()] and [tools::file_path_sans_ext()].
}
\examples{
beter:::basename_sans_ext("folder/file.extension") # returns "file"
}
\keyword{internal}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
##makeCacheMatrix function stores a matrix
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function() x
setmatrix<-function(solve) m<<- solve
getmatrix<-function() m
list(set=set, get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m<-x$getmatrix()
if(!is.null(m)){
message("getting cached data")
return(m)
}
matrix<-x$get()
m<-solve(matrix, ...)
x$setmatrix(m)
m
}
| /cachematrix.R | no_license | onecbautista/ProgrammingAssignment2 | R | false | false | 746 | r | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
##makeCacheMatrix function stores a matrix
makeCacheMatrix <- function(x = matrix()) {
m<-NULL
set<-function(y){
x<<-y
m<<-NULL
}
get<-function() x
setmatrix<-function(solve) m<<- solve
getmatrix<-function() m
list(set=set, get=get,
setmatrix=setmatrix,
getmatrix=getmatrix)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
m<-x$getmatrix()
if(!is.null(m)){
message("getting cached data")
return(m)
}
matrix<-x$get()
m<-solve(matrix, ...)
x$setmatrix(m)
m
}
|
\name{getDefaultSArgs}
\alias{getDefaultSArgs}
\alias{getSArgs}
\title{Code/Variable generators for parameters in generated S functions}
\description{
These functions relate to generating the parameters
in S functions that interface to the C routines in the
.defs objects.
\code{getSArgs} returns the names of the S parameters.
\code{getDefaultSArgs} returns the code for
default values for each of the arguments (in the form \code{= value}).
}
\usage{
getDefaultSArgs(params, defs)
getSArgs(params)
}
\arguments{
\item{params}{the parameters from the description of the C routine}
\item{defs}{the top-level description of all of elements described in
the .defs file, i.e. classes, enums, flags, etc.
These are used to determine the type of the parameter
and the appropriate default values and
code to coerce S values to the appropriate type.}
}
\value{
A character vector whose length is equal to the
number of parameters in the C routine (given by \code{params}).
For \code{getSArgs}, this elements of the vector are the
names of the parameters.
For \code{getDefaultSArgs}, the elements are the S code
defining the default value or simply the empty string ("").
The names of the elements are the names of the parameters.
}
\references{\url{http://www.omegahat.net/RGtk/},
\url{http://www.omegahat.net/RGtkBindingGenerator},
\url{http://www.gtk.org}
\url{http://www.pygtk.org}(?)
}
\author{Duncan Temple Lang <duncan@research.bell-labs.com>}
\seealso{
\code{\link{collapseConstructors}}
\code{\link{genRCode}}
}
\examples{
}
\keyword{programming}
| /RGtkGen/man/getDefaultSArgs.Rd | no_license | statTarget/RGtk2 | R | false | false | 1,610 | rd | \name{getDefaultSArgs}
\alias{getDefaultSArgs}
\alias{getSArgs}
\title{Code/Variable generators for parameters in generated S functions}
\description{
These functions relate to generating the parameters
in S functions that interface to the C routines in the
.defs objects.
\code{getSArgs} returns the names of the S parameters.
\code{getDefaultSArgs} returns the code for
default values for each of the arguments (in the form \code{= value}).
}
\usage{
getDefaultSArgs(params, defs)
getSArgs(params)
}
\arguments{
\item{params}{the parameters from the description of the C routine}
\item{defs}{the top-level description of all of elements described in
the .defs file, i.e. classes, enums, flags, etc.
These are used to determine the type of the parameter
and the appropriate default values and
code to coerce S values to the appropriate type.}
}
\value{
A character vector whose length is equal to the
number of parameters in the C routine (given by \code{params}).
For \code{getSArgs}, this elements of the vector are the
names of the parameters.
For \code{getDefaultSArgs}, the elements are the S code
defining the default value or simply the empty string ("").
The names of the elements are the names of the parameters.
}
\references{\url{http://www.omegahat.net/RGtk/},
\url{http://www.omegahat.net/RGtkBindingGenerator},
\url{http://www.gtk.org}
\url{http://www.pygtk.org}(?)
}
\author{Duncan Temple Lang <duncan@research.bell-labs.com>}
\seealso{
\code{\link{collapseConstructors}}
\code{\link{genRCode}}
}
\examples{
}
\keyword{programming}
|
sampleIMAGES <- function() {
# sampleIMAGES
# Returns 10000 patches for training
#load IMAGES; # load images from disk
IMAGES <- readMat("IMAGES.mat")
IMAGES <- IMAGES$IMAGES #512x512x10 matrix
patchsize = 8; # we'll use 8x8 patches
numpatches = 10000;
# Initialize patches with zeros. Your code will fill in this matrix--one
# column per patch, 10000 columns.
# patches = zeros(patchsize*patchsize, numpatches);
patches = matrix(0,patchsize*patchsize, numpatches);
## ---------- YOUR CODE HERE --------------------------------------
# Instructions: Fill in the variable called "patches" using data
# from IMAGES.
#
# IMAGES is a 3D array containing 10 images
# For instance, IMAGES(:,:,6) is a 512x512 array containing the 6th image,
# and you can type "imagesc(IMAGES(:,:,6)), colormap gray;" to visualize
# it. (The contrast on these images look a bit off because they have
# been preprocessed using using "whitening." See the lecture notes for
# more details.) As a second example, IMAGES(21:30,21:30,1) is an image
# patch corresponding to the pixels in the block (21,21) to (30,30) of
# Image 1
set.seed(123)
patches <- apply(patches, 2, function(x){
nimage <- sample(1:10,1)
nx <- sample(1:505,1)
ny <- sample(1:505,1)
img <- IMAGES[nx:(nx+patchsize-1), ny:(ny+patchsize-1), nimage]
return(as.vector(img))
})
## ---------------------------------------------------------------
# For the autoencoder to work well we need to normalize the data
# Specifically, since the output of the network is bounded between [0,1]
# (due to the sigmoid activation function), we have to make sure
# the range of pixel values is also bounded between [0,1]
return(normalizeData(patches));
}
## ---------------------------------------------------------------
normalizeData <- function(patches) {
# Squash data to [0.1, 0.9] since we use sigmoid as the activation
# function in the output layer
# Remove DC (mean of images).
patches <- patches - mean(patches);
# Truncate to +/-3 standard deviations and scale to -1 to 1
pstd = 3 * sd(patches);
patches <- apply(patches, c(1,2), function(x){
max(min(x, pstd), -pstd) / pstd;
})
# Rescale from [-1,1] to [0.1,0.9]
return((patches + 1) * 0.4 + 0.1);
}
| /1_Encoder/sampleIMAGES.R | no_license | Sandy4321/Deep2 | R | false | false | 2,478 | r | sampleIMAGES <- function() {
# sampleIMAGES
# Returns 10000 patches for training
#load IMAGES; # load images from disk
IMAGES <- readMat("IMAGES.mat")
IMAGES <- IMAGES$IMAGES #512x512x10 matrix
patchsize = 8; # we'll use 8x8 patches
numpatches = 10000;
# Initialize patches with zeros. Your code will fill in this matrix--one
# column per patch, 10000 columns.
# patches = zeros(patchsize*patchsize, numpatches);
patches = matrix(0,patchsize*patchsize, numpatches);
## ---------- YOUR CODE HERE --------------------------------------
# Instructions: Fill in the variable called "patches" using data
# from IMAGES.
#
# IMAGES is a 3D array containing 10 images
# For instance, IMAGES(:,:,6) is a 512x512 array containing the 6th image,
# and you can type "imagesc(IMAGES(:,:,6)), colormap gray;" to visualize
# it. (The contrast on these images look a bit off because they have
# been preprocessed using using "whitening." See the lecture notes for
# more details.) As a second example, IMAGES(21:30,21:30,1) is an image
# patch corresponding to the pixels in the block (21,21) to (30,30) of
# Image 1
set.seed(123)
patches <- apply(patches, 2, function(x){
nimage <- sample(1:10,1)
nx <- sample(1:505,1)
ny <- sample(1:505,1)
img <- IMAGES[nx:(nx+patchsize-1), ny:(ny+patchsize-1), nimage]
return(as.vector(img))
})
## ---------------------------------------------------------------
# For the autoencoder to work well we need to normalize the data
# Specifically, since the output of the network is bounded between [0,1]
# (due to the sigmoid activation function), we have to make sure
# the range of pixel values is also bounded between [0,1]
return(normalizeData(patches));
}
## ---------------------------------------------------------------
normalizeData <- function(patches) {
# Squash data to [0.1, 0.9] since we use sigmoid as the activation
# function in the output layer
# Remove DC (mean of images).
patches <- patches - mean(patches);
# Truncate to +/-3 standard deviations and scale to -1 to 1
pstd = 3 * sd(patches);
patches <- apply(patches, c(1,2), function(x){
max(min(x, pstd), -pstd) / pstd;
})
# Rescale from [-1,1] to [0.1,0.9]
return((patches + 1) * 0.4 + 0.1);
}
|
library(shotGroups)
### Name: combineData
### Title: Combine list of data frames into one
### Aliases: combineData
### ** Examples
## combine list of data frames to one single data frame
data(DFlistCm)
DFcm <- combineData(DFlistCm)
str(DFcm)
head(DFcm)
| /data/genthat_extracted_code/shotGroups/examples/combineData.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 260 | r | library(shotGroups)
### Name: combineData
### Title: Combine list of data frames into one
### Aliases: combineData
### ** Examples
## combine list of data frames to one single data frame
data(DFlistCm)
DFcm <- combineData(DFlistCm)
str(DFcm)
head(DFcm)
|
# You'll have to test some of these parameters to get the best smooth
# for your data :)
# Some data - "cars" is a built in dataset
x <- cars$speed
y <- cars$dist
# jagged line:
plot(x,y, type='l', lwd=2, col='red')
# smooth - "span" is the degree of smoothing - you'll need to test this out
# on your data.
sm <- loess(y~x, span=0.35)
# Use this to interpolate a bunch more points if you don't get a smooth enough line
xl <- seq(min(x),max(x), (max(x) - min(x))/1000)
# OR
smoothSpline <- smooth.spline(x,y,spar=0.40)
#pr <- predict(sm, se=T)
# plot w/ points first
plot(x,y)
# loess smooth (generally works better with more data than I have here - try it out)
lines(predict(sm), col="red", lwd=2)
# smoother loess
lines(xl, predict(sm, xl), col="green", lwd=2)
# smoothSpline
lines(smoothSpline, col="blue", lwd=2)
#####
# Now with ggplot
library(ggplot2)
# The "se=F" in geom_smooth means to NOT plot the confidence interval of the smoothed fit.
# You can simply remove it if you want to see the confidence interval.
p <- ggplot(cars, aes(x=speed, y=dist)) + geom_point() + geom_smooth(se=F)
p
| /R/SomeLineSmoothingCode.R | no_license | tacormier/general | R | false | false | 1,107 | r | # You'll have to test some of these parameters to get the best smooth
# for your data :)
# Some data - "cars" is a built in dataset
x <- cars$speed
y <- cars$dist
# jagged line:
plot(x,y, type='l', lwd=2, col='red')
# smooth - "span" is the degree of smoothing - you'll need to test this out
# on your data.
sm <- loess(y~x, span=0.35)
# Use this to interpolate a bunch more points if you don't get a smooth enough line
xl <- seq(min(x),max(x), (max(x) - min(x))/1000)
# OR
smoothSpline <- smooth.spline(x,y,spar=0.40)
#pr <- predict(sm, se=T)
# plot w/ points first
plot(x,y)
# loess smooth (generally works better with more data than I have here - try it out)
lines(predict(sm), col="red", lwd=2)
# smoother loess
lines(xl, predict(sm, xl), col="green", lwd=2)
# smoothSpline
lines(smoothSpline, col="blue", lwd=2)
#####
# Now with ggplot
library(ggplot2)
# The "se=F" in geom_smooth means to NOT plot the confidence interval of the smoothed fit.
# You can simply remove it if you want to see the confidence interval.
p <- ggplot(cars, aes(x=speed, y=dist)) + geom_point() + geom_smooth(se=F)
p
|
library(ggplot2)
library(tidyr)
library(dplR)
library(forecast)
library(imputeTS)
library(dlm)
library(itsmr)
library(MASS)
#*******************************************************************************
#*******************************************************************************
# Preparing the time series
#*******************************************************************************
#*******************************************************************************
raw_rwl <- readRDS("data/rwl_900+.Rds")
depth_function <- function(x){
sum(!is.na(x))
}
rwl_mean <- apply(raw_rwl,1,mean,na.rm=T)
rwl_sd <- apply(raw_rwl,1,sd,na.rm=T)
rwl_depth <- apply(rwl_df, 1, depth_function)
rwl_ts <- ts(rwl_mean,end=2017)
rwl_mean_ts <- ts(rwl_mean,end=2017)
rwl_sd_ts <- ts(rwl_sd,end=2017)
rwl_depth_ts <- ts(rwl_depth,end=2017)
year <- seq(start(rwl_ts)[1],end(rwl_ts)[1])
seriesDepth <- as.data.frame(cbind(year,rwl_depth))
seriesMean <- as.data.frame(cbind(year,rwl_mean))
# Exploratory plots
seriesDepthPlot <- ggplot(seriesDepth, aes(x=year,y=rwl_depth)) +
geom_line() +
ylab("Sample depth [#]") +
xlab("Time") +
theme(aspect.ratio = 0.618)
seriesMeanPlot <- ggplot(seriesMean, aes(x=year,y=rwl_mean)) +
geom_line() +
ylab("Mean ringwidth [1/100 mm]") +
xlab("Time") +
theme(aspect.ratio = 0.618)
# Showing heteroskedasticity
heterosk <- lm(seriesMean$rwl_mean~seriesMean$year)
seriesHeterosk <- as.data.frame(cbind(year,heterosk$residuals))
seriesHeteroskedasticityPlot <- ggplot(seriesHeterosk, aes(x=year,y=V2)) +
geom_line() +
ylab("Residuals [1/100 mm]") +
xlab("Time") +
theme(aspect.ratio = 0.618)
#*******************************************************************************
# Restrict series to 1400 - 1800
spruce_window <- window(rwl_ts,start=1400, end=1800)
rwl_depth_window <- window(rwl_depth_ts,start=1400, end=1800)
rwl_mean_window <- window(rwl_mean_ts,start=1400, end=1800)
rwl_sd_window <- window(rwl_sd_ts,start=1400, end=1800)
t <- 1400:1800
# Literature
# New Zealand Journal of Ecology (1990) 13: 9-15 (https://newzealandecology.org/nzje/1872)
#*******************************************************************************
#*******************************************************************************
# Best approach: Scaling, log-transform, linear trend removal
#*******************************************************************************
#*******************************************************************************
spruce_window_stab <- spruce_window/rwl_sd_window
spruce_window_log <- log(spruce_window_stab)
order1_model <- lm(spruce_window_log ~ t)
summary(order1_model)
order1_exog_model <- lm(log(spruce_window_stab)~t+rwl_depth_window)
order1_exog_model
data_log_order1_varstab <- as.data.frame(cbind(Time=t, y=order1_model$residuals,model=rep("Log trans., order 1",times=length(t))))
data_log_order1_varstab$Time <- as.numeric(levels(data_log_order1_varstab$Time)[data_log_order1_varstab$Time])
data_log_order1_varstab$y <- as.numeric(levels(data_log_order1_varstab$y)[data_log_order1_varstab$y])
#*******************************************************************************
#*******************************************************************************
# Woollons approach: Discarded
#*******************************************************************************
#*******************************************************************************
# Method 1: Proposed in Woollons and Norton
#-------------------------------------------------------------------------------
# Warren (1980) (cited in Woollons and Norton, 1990) propose
# to estimate a trend of the form:
# Y = alpha*t^(beta)*exp(delta*t)
# This approach is nice because, if we
# take the ln on both sides:
# ln(Y) = ln(alpha) + beta*ln(t) + delta*t
# we get something linear in ln(t)
log_spruce_window <- log(spruce_window)
log_t <- log(t)
d1 <- cbind(log_spruce_window, log_t, t)
m1 <- lm(log_spruce_window~log_t+t, data=d1)
summary(m1)
# Fitted values
app_1_trend_1_fit<- exp(m1$fitted.values)
# ggplot
d_ggplot_1 <- cbind(Time=t, y1=spruce_window, y2=app_1_trend_1_fit)
d_ggplot_1 <- as.data.frame(d_ggplot_1)
pplot <- ggplot(d_ggplot_1, aes(x=Time))
pplot + geom_line(aes(y=y1), color="blue") +
geom_line(aes(y=y2), color="black") +
ggtitle("Estimated mean by the method of Warren (1980)") +
theme(plot.title = element_text(hjust=0.5)) +
xlab("Time") + ylab("Tree ring width (1/100 mm)")
# Method 2: Polynomial of order 2
#*******************************************************************************
t2 <- t^2
d2 <- cbind(d1,t2)
d2 <- cbind(spruce_window, d2)
colnames(d2) <- c("y", "ln_y", "ln_t", "t", "t2")
head(d2)
m2 <- lm(y ~ t+t2,data=d2)
summary(m2)
app_1_trend_2_fit <- m2$fitted.values
# ggplot
d_ggplot_2 <- cbind(Time=t, y1=spruce_window, y2=app_1_trend_1_fit, y3=app_1_trend_2_fit)
d_ggplot_2 <- as.data.frame(d_ggplot_2)
data_p_order2 <- as.data.frame(cbind(Time=t, y=app_1_trend_2_fit,model=rep("Trend Polynomial order 2",times=length(t))))
d_ggplot_2_gathered <- d_ggplot_2 %>% gather(y, values, y2:y3)
colnames(d_ggplot_2_gathered) <- c("Time", "Width", "Est_Mean_Method", "Values")
str(d_ggplot_2_gathered)
pplot <- ggplot(d_ggplot_2_gathered, aes(x=Time))
pplot + geom_line(aes(y=Values, group=Est_Mean_Method, color=factor(Est_Mean_Method, labels=c("Warren (1980)", "Polynomial of order 2")),
linetype=factor(Est_Mean_Method, labels=c("Warren (1980)", "Polynomial of order 2")))) +
geom_line(aes(y=Width), color="blue") +
scale_color_manual(values=c("black", "green")) +
ggtitle("Estimated means by the method of Warren (1980) \n and by a polynomial of order 2") +
theme(plot.title = element_text(hjust=0.5), legend.position = "top") +
xlab("Time") + ylab("Tree ring width (1/100 mm)") +
labs(color = "Methods") +
labs(linetype= "Methods")
# Variance Stabilisation
#*******************************************************************************
# Residual time series
#-------------------------------------------------------------------------------
m1_res <- spruce_window - app_1_trend_1_fit
m2_res <- spruce_window - app_1_trend_2_fit
# ggplot
d_ggplot_3 <- cbind(Time=t, y1=m1_res, y2=m2_res)
d_ggplot_3 <- as.data.frame(d_ggplot_3)
str(d_ggplot_3)
d_ggplot_3_gathered <- d_ggplot_3 %>% gather(y, values, y1:y2)
str(d_ggplot_3_gathered)
# Legend manipulations
# https://rpubs.com/hughes/10012
pplot <- ggplot(d_ggplot_3_gathered, aes(x=Time))
pplot + geom_line(aes(y=values, group=y, color=factor(y, labels=c("Warren (1980)", "Polynomial of order 2")),
linetype=factor(y, labels=c("Warren (1980)", "Polynomial of order 2")))) +
scale_color_manual(values=c("black", "green")) +
ggtitle('Residual time series') +
theme(plot.title = element_text(hjust=0.5), legend.position = "top") +
xlab("Time") + ylab("Tree ring width (1/100 mm)") +
labs(color = "Methods") +
labs(linetype="Methods")
# Residual transformations
#-------------------------------------------------------------------------------
# Method 1: Proposed transformation in Wollons and Norton (1990) for both methods
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
m1_i <- m1_res/app_1_trend_1_fit # Warren (1980)
m2_i <- m2$residuals/m2$fitted.values # Polynomial of order 2
temp <- cbind(Time=t, y=m1_i,model=rep("Woollons",times=length(t)))
data_woollons <- as.data.frame(cbind(Time=t, y=m1_i,model=rep("Woollons",times=length(t))))
data_p_order2 <- as.data.frame(cbind(Time=t, y=m2_i,model=rep("Polynomial order 2",times=length(t))))
# data for ggplot
d_ggplot_4 <- cbind(Time=t, y1=m1_i, y2=m2_i)
d_ggplot_4 <- as.data.frame(d_ggplot_4)
d_ggplot_4_gathered <- d_ggplot_4 %>% gather(y, values, y1:y2)
# Acf and Pacf
# For the acf and pacf plots the transformed residuals calculated with the method of Warren (1980)
# are used.
ggAcf(m1_i, main="ACF plot of the transformed residuals after having detrended the series with the method \n of Warren (1980)") # bold font does not work
ggPacf(m1_i, main="PACF plot of the transformed residuals after having detrended the series with the method \n of Warren (1980)")
# Method 2: Stabilisation by using SD of yearly observations (raw data)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Stabilisation: (sample mean - \mu)/sd(sample mean)
# \mu was estimated using Woollons and Norton
stab_sd_ts <- (rwl_mean_window - app_1_trend_1_fit)/(sqrt(rwl_depth_window)^(-1)*rwl_sd_window)
plot(stab_sd_ts)
d_ggplot_5 <- cbind(Time=t, y=stab_sd_ts)
d_ggplot_5 <- as.data.frame(d_ggplot_5)
data_woollons_sd_stab <- as.data.frame(cbind(Time=t, y=stab_sd_ts,model=rep("Woollons SD stab.",times=length(t))))
data_woollons_sd_stab$Time <- as.numeric(levels(data_woollons_sd_stab$Time)[data_woollons_sd_stab$Time])
data_woollons_sd_stab$y <- as.numeric(levels(data_woollons_sd_stab$y)[data_woollons_sd_stab$y])
#*******************************************************************************
#*******************************************************************************
# Approach: Power transformation of the residuals (by hand) -> DISCARDED
#*******************************************************************************
#*******************************************************************************
eps <- 1/10000 # To prevent log from getting -inf
R <- spruce_window
local_mean <- (R[1:(length(R)-1)] + R[2:length(R)])/2
local_sd <- abs(diff(R)) + eps
log_local_mean <- log(local_mean)
log_local_sd <- log(local_sd)
powt_df <- as.data.frame(cbind(log_S=log_local_sd, log_M=log_local_mean))
str(powt_df)
# linear model
m <- lm(log_S ~ log_M, data=powt_df)
summary(m)
b <- m$coef[2]
R_transformed <- R^(1-b)
data_powertrans <- as.data.frame(cbind(Time=t, y=R_transformed,method=rep("Power transformed",times=length(t))))
plot(y=R_transformed, x=1:length(R_transformed), type="l")
# Time series still has a trend
R_transformed_df <- data.frame(Width_trans=R_transformed, Time=t)
m <- lm(Width_trans~., data=R_transformed_df)
summary(m)
R_transformed_centered <- m$residuals
data_powertrans_centered <- as.data.frame(cbind(Time=t, y=R_transformed_centered,method=rep("Power trans. centered",times=length(t))))
acf(R_transformed_centered)
pacf(R_transformed_centered) # => AR(2)
arima(x=R_transformed_centered, c(2,0,0), order=c(0,0,0),method="ML")
# ggplot
# transformed time series
R_transformed
d_ggplot_6<- cbind(Time=t, y1=R_transformed, y2=R_transformed_centered)
d_ggplot_6<- as.data.frame(d_ggplot_6)
str(d_ggplot_5)
d_ggplot_5_gathered <- d_ggplot_6%>% gather(y, Values, y1:y2)
pplot <- ggplot(d_ggplot_5_gathered, aes(x=Time))
pplot + geom_line(aes(y=Values, group=y,
color=factor(y, labels=c("After power transformation", "After power transformation and linear trend")))) +
scale_color_manual(values=c("blue", "green")) +
ggtitle('Residual time series') +
theme(plot.title = element_text(hjust=0.5), legend.position = "top") +
xlab("Time") + ylab("Tree ring width (1/100 mm)") +
labs(color = "Methods") +
labs(linetype="Methods")
ggAcf(R_transformed_centered, main="ACF plot after a power transformation and subtracting a linear trend")
ggPacf(R_transformed_centered, main="PACF plot after a power transformation and subtracting a linear trend")
#*******************************************************************************
#*******************************************************************************
# Approach: Box-Cox transformation -> DISCARDED
#*******************************************************************************
#*******************************************************************************
# Box-Cox-Transform:
lambdas <- boxcox(spruce_window~t)
l <- lambdas$x[which.max(lambdas$y)] # this is the MLE lambda to transform data
spruce_window_boxcox <- (spruce_window^l-1)/l-mean((spruce_window^l-1)/l) # Box-Cox transformation
plotc(spruce_window_boxcox)
data_boxcox <- as.data.frame(cbind(Time=t, y=spruce_window_boxcox,method=rep("Box-Cox transform",times=length(t))))
acf(spruce_window_boxcox)
pacf(spruce_window_boxcox)
#*******************************************************************************
#*******************************************************************************
# Overview plots -> report
#*******************************************************************************
#*******************************************************************************
# woollons
stationarity_woollons_plot <- ggplot(data_woollons_sd_stab,aes(x=Time,y=y)) +
geom_line(size=0.3) +
ylab("")
# power transformation & box-cox
data_powerbox <- rbind(data_powertrans_centered,
data_boxcox)
data_powerbox$Time <- as.numeric(levels(data_powerbox$Time)[data_powerbox$Time])
data_powerbox$y <- as.numeric(levels(data_powerbox$y)[data_powerbox$y])
stationarity_powerbox_plot <- ggplot(data_powerbox,aes(x=Time,y=y,colour=method)) +
geom_line(size=0.3) +
ylab("") +
theme(legend.justification=c(1,1), legend.position=c(1,1))
stationaritylog_order1_plot <- ggplot(data_log_order1_varstab,aes(x=Time,y=y)) +
geom_hline(yintercept = 0,size=0.3) +
geom_line(size=0.3) +
ylab("Scaled, log tranformed values [-]")
stationarity_qq_plot <- ggplot(data_log_order1_varstab, aes(sample = y)) +
geom_qq(alpha=0.5) + stat_qq_line(size=0.3)
| /analysis/make_series_stationary.R | no_license | datarian/time-series-final-project | R | false | false | 13,459 | r | library(ggplot2)
library(tidyr)
library(dplR)
library(forecast)
library(imputeTS)
library(dlm)
library(itsmr)
library(MASS)
#*******************************************************************************
#*******************************************************************************
# Preparing the time series
#*******************************************************************************
#*******************************************************************************
raw_rwl <- readRDS("data/rwl_900+.Rds")
depth_function <- function(x){
sum(!is.na(x))
}
rwl_mean <- apply(raw_rwl,1,mean,na.rm=T)
rwl_sd <- apply(raw_rwl,1,sd,na.rm=T)
rwl_depth <- apply(rwl_df, 1, depth_function)
rwl_ts <- ts(rwl_mean,end=2017)
rwl_mean_ts <- ts(rwl_mean,end=2017)
rwl_sd_ts <- ts(rwl_sd,end=2017)
rwl_depth_ts <- ts(rwl_depth,end=2017)
year <- seq(start(rwl_ts)[1],end(rwl_ts)[1])
seriesDepth <- as.data.frame(cbind(year,rwl_depth))
seriesMean <- as.data.frame(cbind(year,rwl_mean))
# Exploratory plots
seriesDepthPlot <- ggplot(seriesDepth, aes(x=year,y=rwl_depth)) +
geom_line() +
ylab("Sample depth [#]") +
xlab("Time") +
theme(aspect.ratio = 0.618)
seriesMeanPlot <- ggplot(seriesMean, aes(x=year,y=rwl_mean)) +
geom_line() +
ylab("Mean ringwidth [1/100 mm]") +
xlab("Time") +
theme(aspect.ratio = 0.618)
# Showing heteroskedasticity
heterosk <- lm(seriesMean$rwl_mean~seriesMean$year)
seriesHeterosk <- as.data.frame(cbind(year,heterosk$residuals))
seriesHeteroskedasticityPlot <- ggplot(seriesHeterosk, aes(x=year,y=V2)) +
geom_line() +
ylab("Residuals [1/100 mm]") +
xlab("Time") +
theme(aspect.ratio = 0.618)
#*******************************************************************************
# Restrict series to 1400 - 1800
spruce_window <- window(rwl_ts,start=1400, end=1800)
rwl_depth_window <- window(rwl_depth_ts,start=1400, end=1800)
rwl_mean_window <- window(rwl_mean_ts,start=1400, end=1800)
rwl_sd_window <- window(rwl_sd_ts,start=1400, end=1800)
t <- 1400:1800
# Literature
# New Zealand Journal of Ecology (1990) 13: 9-15 (https://newzealandecology.org/nzje/1872)
#*******************************************************************************
#*******************************************************************************
# Best approach: Scaling, log-transform, linear trend removal
#*******************************************************************************
#*******************************************************************************
spruce_window_stab <- spruce_window/rwl_sd_window
spruce_window_log <- log(spruce_window_stab)
order1_model <- lm(spruce_window_log ~ t)
summary(order1_model)
order1_exog_model <- lm(log(spruce_window_stab)~t+rwl_depth_window)
order1_exog_model
data_log_order1_varstab <- as.data.frame(cbind(Time=t, y=order1_model$residuals,model=rep("Log trans., order 1",times=length(t))))
data_log_order1_varstab$Time <- as.numeric(levels(data_log_order1_varstab$Time)[data_log_order1_varstab$Time])
data_log_order1_varstab$y <- as.numeric(levels(data_log_order1_varstab$y)[data_log_order1_varstab$y])
#*******************************************************************************
#*******************************************************************************
# Woollons approach: Discarded
#*******************************************************************************
#*******************************************************************************
# Method 1: Proposed in Woollons and Norton
#-------------------------------------------------------------------------------
# Warren (1980) (cited in Woollons and Norton, 1990) propose
# to estimate a trend of the form:
# Y = alpha*t^(beta)*exp(delta*t)
# This approach is nice because, if we
# take the ln on both sides:
# ln(Y) = ln(alpha) + beta*ln(t) + delta*t
# we get something linear in ln(t)
log_spruce_window <- log(spruce_window)
log_t <- log(t)
d1 <- cbind(log_spruce_window, log_t, t)
m1 <- lm(log_spruce_window~log_t+t, data=d1)
summary(m1)
# Fitted values
app_1_trend_1_fit<- exp(m1$fitted.values)
# ggplot
d_ggplot_1 <- cbind(Time=t, y1=spruce_window, y2=app_1_trend_1_fit)
d_ggplot_1 <- as.data.frame(d_ggplot_1)
pplot <- ggplot(d_ggplot_1, aes(x=Time))
pplot + geom_line(aes(y=y1), color="blue") +
geom_line(aes(y=y2), color="black") +
ggtitle("Estimated mean by the method of Warren (1980)") +
theme(plot.title = element_text(hjust=0.5)) +
xlab("Time") + ylab("Tree ring width (1/100 mm)")
# Method 2: Polynomial of order 2
#*******************************************************************************
t2 <- t^2
d2 <- cbind(d1,t2)
d2 <- cbind(spruce_window, d2)
colnames(d2) <- c("y", "ln_y", "ln_t", "t", "t2")
head(d2)
m2 <- lm(y ~ t+t2,data=d2)
summary(m2)
app_1_trend_2_fit <- m2$fitted.values
# ggplot
d_ggplot_2 <- cbind(Time=t, y1=spruce_window, y2=app_1_trend_1_fit, y3=app_1_trend_2_fit)
d_ggplot_2 <- as.data.frame(d_ggplot_2)
data_p_order2 <- as.data.frame(cbind(Time=t, y=app_1_trend_2_fit,model=rep("Trend Polynomial order 2",times=length(t))))
d_ggplot_2_gathered <- d_ggplot_2 %>% gather(y, values, y2:y3)
colnames(d_ggplot_2_gathered) <- c("Time", "Width", "Est_Mean_Method", "Values")
str(d_ggplot_2_gathered)
pplot <- ggplot(d_ggplot_2_gathered, aes(x=Time))
pplot + geom_line(aes(y=Values, group=Est_Mean_Method, color=factor(Est_Mean_Method, labels=c("Warren (1980)", "Polynomial of order 2")),
linetype=factor(Est_Mean_Method, labels=c("Warren (1980)", "Polynomial of order 2")))) +
geom_line(aes(y=Width), color="blue") +
scale_color_manual(values=c("black", "green")) +
ggtitle("Estimated means by the method of Warren (1980) \n and by a polynomial of order 2") +
theme(plot.title = element_text(hjust=0.5), legend.position = "top") +
xlab("Time") + ylab("Tree ring width (1/100 mm)") +
labs(color = "Methods") +
labs(linetype= "Methods")
# Variance Stabilisation
#*******************************************************************************
# Residual time series
#-------------------------------------------------------------------------------
m1_res <- spruce_window - app_1_trend_1_fit
m2_res <- spruce_window - app_1_trend_2_fit
# ggplot
d_ggplot_3 <- cbind(Time=t, y1=m1_res, y2=m2_res)
d_ggplot_3 <- as.data.frame(d_ggplot_3)
str(d_ggplot_3)
d_ggplot_3_gathered <- d_ggplot_3 %>% gather(y, values, y1:y2)
str(d_ggplot_3_gathered)
# Legend manipulations
# https://rpubs.com/hughes/10012
pplot <- ggplot(d_ggplot_3_gathered, aes(x=Time))
pplot + geom_line(aes(y=values, group=y, color=factor(y, labels=c("Warren (1980)", "Polynomial of order 2")),
linetype=factor(y, labels=c("Warren (1980)", "Polynomial of order 2")))) +
scale_color_manual(values=c("black", "green")) +
ggtitle('Residual time series') +
theme(plot.title = element_text(hjust=0.5), legend.position = "top") +
xlab("Time") + ylab("Tree ring width (1/100 mm)") +
labs(color = "Methods") +
labs(linetype="Methods")
# Residual transformations
#-------------------------------------------------------------------------------
# Method 1: Proposed transformation in Wollons and Norton (1990) for both methods
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
m1_i <- m1_res/app_1_trend_1_fit # Warren (1980)
m2_i <- m2$residuals/m2$fitted.values # Polynomial of order 2
temp <- cbind(Time=t, y=m1_i,model=rep("Woollons",times=length(t)))
data_woollons <- as.data.frame(cbind(Time=t, y=m1_i,model=rep("Woollons",times=length(t))))
data_p_order2 <- as.data.frame(cbind(Time=t, y=m2_i,model=rep("Polynomial order 2",times=length(t))))
# data for ggplot
d_ggplot_4 <- cbind(Time=t, y1=m1_i, y2=m2_i)
d_ggplot_4 <- as.data.frame(d_ggplot_4)
d_ggplot_4_gathered <- d_ggplot_4 %>% gather(y, values, y1:y2)
# Acf and Pacf
# For the acf and pacf plots the transformed residuals calculated with the method of Warren (1980)
# are used.
ggAcf(m1_i, main="ACF plot of the transformed residuals after having detrended the series with the method \n of Warren (1980)") # bold font does not work
ggPacf(m1_i, main="PACF plot of the transformed residuals after having detrended the series with the method \n of Warren (1980)")
# Method 2: Stabilisation by using SD of yearly observations (raw data)
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Stabilisation: (sample mean - \mu)/sd(sample mean)
# \mu was estimated using Woollons and Norton
stab_sd_ts <- (rwl_mean_window - app_1_trend_1_fit)/(sqrt(rwl_depth_window)^(-1)*rwl_sd_window)
plot(stab_sd_ts)
d_ggplot_5 <- cbind(Time=t, y=stab_sd_ts)
d_ggplot_5 <- as.data.frame(d_ggplot_5)
data_woollons_sd_stab <- as.data.frame(cbind(Time=t, y=stab_sd_ts,model=rep("Woollons SD stab.",times=length(t))))
data_woollons_sd_stab$Time <- as.numeric(levels(data_woollons_sd_stab$Time)[data_woollons_sd_stab$Time])
data_woollons_sd_stab$y <- as.numeric(levels(data_woollons_sd_stab$y)[data_woollons_sd_stab$y])
#*******************************************************************************
#*******************************************************************************
# Approach: Power transformation of the residuals (by hand) -> DISCARDED
#*******************************************************************************
#*******************************************************************************
eps <- 1/10000 # To prevent log from getting -inf
R <- spruce_window
local_mean <- (R[1:(length(R)-1)] + R[2:length(R)])/2
local_sd <- abs(diff(R)) + eps
log_local_mean <- log(local_mean)
log_local_sd <- log(local_sd)
powt_df <- as.data.frame(cbind(log_S=log_local_sd, log_M=log_local_mean))
str(powt_df)
# linear model
m <- lm(log_S ~ log_M, data=powt_df)
summary(m)
b <- m$coef[2]
R_transformed <- R^(1-b)
data_powertrans <- as.data.frame(cbind(Time=t, y=R_transformed,method=rep("Power transformed",times=length(t))))
plot(y=R_transformed, x=1:length(R_transformed), type="l")
# Time series still has a trend
R_transformed_df <- data.frame(Width_trans=R_transformed, Time=t)
m <- lm(Width_trans~., data=R_transformed_df)
summary(m)
R_transformed_centered <- m$residuals
data_powertrans_centered <- as.data.frame(cbind(Time=t, y=R_transformed_centered,method=rep("Power trans. centered",times=length(t))))
acf(R_transformed_centered)
pacf(R_transformed_centered) # => AR(2)
arima(x=R_transformed_centered, c(2,0,0), order=c(0,0,0),method="ML")
# ggplot
# transformed time series
R_transformed
d_ggplot_6<- cbind(Time=t, y1=R_transformed, y2=R_transformed_centered)
d_ggplot_6<- as.data.frame(d_ggplot_6)
str(d_ggplot_5)
d_ggplot_5_gathered <- d_ggplot_6%>% gather(y, Values, y1:y2)
pplot <- ggplot(d_ggplot_5_gathered, aes(x=Time))
pplot + geom_line(aes(y=Values, group=y,
color=factor(y, labels=c("After power transformation", "After power transformation and linear trend")))) +
scale_color_manual(values=c("blue", "green")) +
ggtitle('Residual time series') +
theme(plot.title = element_text(hjust=0.5), legend.position = "top") +
xlab("Time") + ylab("Tree ring width (1/100 mm)") +
labs(color = "Methods") +
labs(linetype="Methods")
ggAcf(R_transformed_centered, main="ACF plot after a power transformation and subtracting a linear trend")
ggPacf(R_transformed_centered, main="PACF plot after a power transformation and subtracting a linear trend")
#*******************************************************************************
#*******************************************************************************
# Approach: Box-Cox transformation -> DISCARDED
#*******************************************************************************
#*******************************************************************************
# Box-Cox-Transform:
lambdas <- boxcox(spruce_window~t)
l <- lambdas$x[which.max(lambdas$y)] # this is the MLE lambda to transform data
spruce_window_boxcox <- (spruce_window^l-1)/l-mean((spruce_window^l-1)/l) # Box-Cox transformation
plotc(spruce_window_boxcox)
data_boxcox <- as.data.frame(cbind(Time=t, y=spruce_window_boxcox,method=rep("Box-Cox transform",times=length(t))))
acf(spruce_window_boxcox)
pacf(spruce_window_boxcox)
#*******************************************************************************
#*******************************************************************************
# Overview plots -> report
#*******************************************************************************
#*******************************************************************************
# woollons
stationarity_woollons_plot <- ggplot(data_woollons_sd_stab,aes(x=Time,y=y)) +
geom_line(size=0.3) +
ylab("")
# power transformation & box-cox
data_powerbox <- rbind(data_powertrans_centered,
data_boxcox)
data_powerbox$Time <- as.numeric(levels(data_powerbox$Time)[data_powerbox$Time])
data_powerbox$y <- as.numeric(levels(data_powerbox$y)[data_powerbox$y])
stationarity_powerbox_plot <- ggplot(data_powerbox,aes(x=Time,y=y,colour=method)) +
geom_line(size=0.3) +
ylab("") +
theme(legend.justification=c(1,1), legend.position=c(1,1))
stationaritylog_order1_plot <- ggplot(data_log_order1_varstab,aes(x=Time,y=y)) +
geom_hline(yintercept = 0,size=0.3) +
geom_line(size=0.3) +
ylab("Scaled, log tranformed values [-]")
stationarity_qq_plot <- ggplot(data_log_order1_varstab, aes(sample = y)) +
geom_qq(alpha=0.5) + stat_qq_line(size=0.3)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multBayesQR.R
\name{multBayesQR}
\alias{multBayesQR}
\title{Multiple-output Bayesian quantile regression model}
\usage{
multBayesQR(response, formulaPred, directionPoint, tau = 0.5, dataFile,
itNum = 2000, burnin, thin = 1, betaValue = NULL, sigmaValue = 1,
vSampleInit = NULL, priorVar = 100, hyperSigma = c(0.1, 0.1),
refresh = 100, bayesx = TRUE, sigmaSampling = TRUE, quiet = T,
tobit = FALSE, numCores = 1, recordLat = FALSE, blocksV = 0,
stopOrdering = FALSE, numOrdered = itNum/2, outfile = NULL, ...)
}
\arguments{
\item{response}{Names of response variables}
\item{formulaPred}{a formula object, with . on the left side of a ~ operator,
and the predictor terms, separated by + operators, on the right side.}
\item{directionPoint}{Either a vector with the same number of dimensions of
response variable, indicating a direction, or a integer indicating the
number of directions equally spaced in the unit circle one should
estimate.}
\item{tau}{Quantiles of interest. Default is th median, \code{tau = 0.5}.}
\item{dataFile}{A data.frame from which to find the variables defined in the
formula.}
\item{itNum}{Number of iterations.}
\item{burnin}{Size of the initial to be discarded.}
\item{thin}{Thinning parameter. Default value is 1.}
\item{betaValue}{Initial values for the parameter beta for the continuous
part.}
\item{sigmaValue}{Initial value for the scale parameter.}
\item{vSampleInit}{Initial value for the latent variables.}
\item{priorVar}{Value that multiplies a identity matrix in the elicition
process of the prior variance of the regression parameters.}
\item{hyperSigma}{Vector of size containing the hyperparameters of the
inverse gamma distribution for the sigma parameter of the asymmetric
Laplace distribution. Default is c(0.1, 0.1), which gives a noninformative
prior for sigma.}
\item{refresh}{Interval between printing a message during the iteration
process. Default is set to 100.}
\item{bayesx}{If TRUE, the default, it uses bayesX software to estimate
the quantile regression oarameters, which can be faster. If FALSE, it
uses a Rcpp implementation of the MCMC sampler.}
\item{sigmaSampling}{If TRUE, the default, it will sample from the posterior
distribution of the scale parameter. If FALSE, all values will be fixed to
1.}
\item{quiet}{If TRUE, the default, it does not print messages to check if
the MCMC is actually updating. If FALSE, it will use the value of refresh
to print messages to control the iteration process.}
\item{tobit}{If TRUE, it will input the censored value for all observations
with y = 0, according to the model. If FALSE, the default, it will estimate
the parameter without this inputation process.}
\item{numCores}{The number of cores that could be used for estimating
parallel models when more than one direction is considered.}
\item{recordLat}{If TRUE, it will keep the Markov chain samples for the
latent variable. Default is FALSE.}
\item{blocksV}{Number of blocks used to sample in the posterior distribution
of the latent variable. If 0, then blocking is not used and all latent
observations are sampled from. Default value is 0.}
\item{stopOrdering}{If TRUE, it will stop ordering the weighted residuals
in order to update the states of the latent variables, and will consider
the ordering of some particular state of the chain; if FALSE, for every
iteration of the MCMC procedure, it will keep reordering these residual
terms. Default is FALSE.}
\item{numOrdered}{The number of iterations that will be used to order
the weighted residuals needed for the update of the posterior
distribution of the latent variables. Default is half the size of
the MCMC chain.}
\item{outfile}{argument to be passed to \code{bayesx.control}, in order
to define a directory where all output files should be saved.}
\item{...}{arguments passed to \code{bayesx.control}.}
}
\value{
A list with the chains of all parameters of interest.
}
\description{
This function estimates a multiple-output Bayesian quantile regression model
}
| /man/multBayesQR.Rd | no_license | why94nb/baquantreg | R | false | true | 4,095 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multBayesQR.R
\name{multBayesQR}
\alias{multBayesQR}
\title{Multiple-output Bayesian quantile regression model}
\usage{
multBayesQR(response, formulaPred, directionPoint, tau = 0.5, dataFile,
itNum = 2000, burnin, thin = 1, betaValue = NULL, sigmaValue = 1,
vSampleInit = NULL, priorVar = 100, hyperSigma = c(0.1, 0.1),
refresh = 100, bayesx = TRUE, sigmaSampling = TRUE, quiet = T,
tobit = FALSE, numCores = 1, recordLat = FALSE, blocksV = 0,
stopOrdering = FALSE, numOrdered = itNum/2, outfile = NULL, ...)
}
\arguments{
\item{response}{Names of response variables}
\item{formulaPred}{a formula object, with . on the left side of a ~ operator,
and the predictor terms, separated by + operators, on the right side.}
\item{directionPoint}{Either a vector with the same number of dimensions of
response variable, indicating a direction, or a integer indicating the
number of directions equally spaced in the unit circle one should
estimate.}
\item{tau}{Quantiles of interest. Default is th median, \code{tau = 0.5}.}
\item{dataFile}{A data.frame from which to find the variables defined in the
formula.}
\item{itNum}{Number of iterations.}
\item{burnin}{Size of the initial to be discarded.}
\item{thin}{Thinning parameter. Default value is 1.}
\item{betaValue}{Initial values for the parameter beta for the continuous
part.}
\item{sigmaValue}{Initial value for the scale parameter.}
\item{vSampleInit}{Initial value for the latent variables.}
\item{priorVar}{Value that multiplies a identity matrix in the elicition
process of the prior variance of the regression parameters.}
\item{hyperSigma}{Vector of size containing the hyperparameters of the
inverse gamma distribution for the sigma parameter of the asymmetric
Laplace distribution. Default is c(0.1, 0.1), which gives a noninformative
prior for sigma.}
\item{refresh}{Interval between printing a message during the iteration
process. Default is set to 100.}
\item{bayesx}{If TRUE, the default, it uses bayesX software to estimate
the quantile regression oarameters, which can be faster. If FALSE, it
uses a Rcpp implementation of the MCMC sampler.}
\item{sigmaSampling}{If TRUE, the default, it will sample from the posterior
distribution of the scale parameter. If FALSE, all values will be fixed to
1.}
\item{quiet}{If TRUE, the default, it does not print messages to check if
the MCMC is actually updating. If FALSE, it will use the value of refresh
to print messages to control the iteration process.}
\item{tobit}{If TRUE, it will input the censored value for all observations
with y = 0, according to the model. If FALSE, the default, it will estimate
the parameter without this inputation process.}
\item{numCores}{The number of cores that could be used for estimating
parallel models when more than one direction is considered.}
\item{recordLat}{If TRUE, it will keep the Markov chain samples for the
latent variable. Default is FALSE.}
\item{blocksV}{Number of blocks used to sample in the posterior distribution
of the latent variable. If 0, then blocking is not used and all latent
observations are sampled from. Default value is 0.}
\item{stopOrdering}{If TRUE, it will stop ordering the weighted residuals
in order to update the states of the latent variables, and will consider
the ordering of some particular state of the chain; if FALSE, for every
iteration of the MCMC procedure, it will keep reordering these residual
terms. Default is FALSE.}
\item{numOrdered}{The number of iterations that will be used to order
the weighted residuals needed for the update of the posterior
distribution of the latent variables. Default is half the size of
the MCMC chain.}
\item{outfile}{argument to be passed to \code{bayesx.control}, in order
to define a directory where all output files should be saved.}
\item{...}{arguments passed to \code{bayesx.control}.}
}
\value{
A list with the chains of all parameters of interest.
}
\description{
This function estimates a multiple-output Bayesian quantile regression model
}
|
## Useful Commands and Tips
## Dates represented by Date class, stored as number of days since 1970-01-01
## Time is represented by POSIXct and POSIXlt, stored as number of seconds since
## 1970-01-01 for POSIXct and as a list of seconds, minutes, hours, etc for
## POSIXlt
## To see internals of variable
unclass(d1)
## create date variables
as.Date("1969-01-01")
## get system date
Sys.Date()
## get system time, returns class POSIXct
Sys.time()
## convert to POSIXlt, time must be in format (YYYY-MM-DD)
as.POSIXlt(Sys.time())
## condence unclass
str(unclass())
## get day of the week
weekdays()
## get month
months()
## get quarter
quarters()
## convert character string into time (POSIXlt), note need to know code use help
strptime()
## can perform addition and subtraction and comparisons on times and dates
## difftime() lets you specify 'units' parameter
difftime(Sys.time, t1, units = 'days')
## if you use dates and times a lot look into lubridate package
| /data_science/R_programming/week02/swirl/dates_times.R | no_license | pbolfing/Coursera_Old | R | false | false | 982 | r | ## Useful Commands and Tips
## Dates represented by Date class, stored as number of days since 1970-01-01
## Time is represented by POSIXct and POSIXlt, stored as number of seconds since
## 1970-01-01 for POSIXct and as a list of seconds, minutes, hours, etc for
## POSIXlt
## To see internals of variable
unclass(d1)
## create date variables
as.Date("1969-01-01")
## get system date
Sys.Date()
## get system time, returns class POSIXct
Sys.time()
## convert to POSIXlt, time must be in format (YYYY-MM-DD)
as.POSIXlt(Sys.time())
## condence unclass
str(unclass())
## get day of the week
weekdays()
## get month
months()
## get quarter
quarters()
## convert character string into time (POSIXlt), note need to know code use help
strptime()
## can perform addition and subtraction and comparisons on times and dates
## difftime() lets you specify 'units' parameter
difftime(Sys.time, t1, units = 'days')
## if you use dates and times a lot look into lubridate package
|
#New code
3+3 | /sample.R | no_license | Vinaysg693/MyRprogram | R | false | false | 14 | r | #New code
3+3 |
autofitVariogram = function(formula, input_data, model = c("Sph", "Exp", "Gau", "Ste"),
kappa = c(0.05, seq(0.2, 2, 0.1), 5, 10), fix.values = c(NA,NA,NA),
verbose = FALSE, GLS.model = NA, start_vals = c(NA,NA,NA),
miscFitOptions = list(),...)
# This function automatically fits a variogram to input_data
{
# Check for anisotropy parameters
if('alpha' %in% names(list(...))) warning('Anisotropic variogram model fitting not supported, see the documentation of autofitVariogram for more details.')
# Take the misc fit options and overwrite the defaults by the user specified ones
miscFitOptionsDefaults = list(merge.small.bins = TRUE, min.np.bin = 5)
miscFitOptions = modifyList(miscFitOptionsDefaults, miscFitOptions)
# Create boundaries
if (is(input_data, "Spatial")) {
longlat = !is.projected(input_data)
if(is.na(longlat)) longlat = FALSE
diagonal = spDists(t(bbox(input_data)), longlat = longlat)[1,2] # 0.35 times the length of the central axis through the area
} else {
longlat = st_is_longlat(input_data)
if (is.na(longlat)) longlat = FALSE
bb = st_bbox(input_data)
diagonal = sqrt(((bb$xmax-bb$xmin)^2)+((bb$ymax-bb$ymin)^2))
}
boundaries = c(2,4,6,9,12,15,25,35,50,65,80,100) * diagonal * 0.35/100 # Boundaries for the bins in km
# If you specifiy a variogram model in GLS.model the Generelised least squares sample variogram is constructed
if(!is(GLS.model, "variogramModel")) {
experimental_variogram = variogram(formula, input_data,boundaries = boundaries, ...)
} else {
if(verbose) cat("Calculating GLS sample variogram\n")
g = gstat(NULL, "bla", formula, input_data, model = GLS.model, set = list(gls=1))
experimental_variogram = variogram(g, boundaries = boundaries, ...)
}
# request by Jon Skoien
if(miscFitOptions[["merge.small.bins"]]) {
if(verbose) cat("Checking if any bins have less than 5 points, merging bins when necessary...\n\n")
while(TRUE) {
if(length(experimental_variogram$np[experimental_variogram$np < miscFitOptions[["min.np.bin"]]]) == 0 | length(boundaries) == 1) break
boundaries = boundaries[2:length(boundaries)]
if(!is(GLS.model, "variogramModel")) {
experimental_variogram = variogram(formula, input_data,boundaries = boundaries, ...)
} else {
experimental_variogram = variogram(g, boundaries = boundaries, ...)
}
}
}
# set initial values
if(is.na(start_vals[1])) { # Nugget
initial_nugget = min(experimental_variogram$gamma)
} else {
initial_nugget = start_vals[1]
}
if(is.na(start_vals[2])) { # Range
initial_range = 0.1 * diagonal # 0.10 times the length of the central axis through the area
} else {
initial_range = start_vals[2]
}
if(is.na(start_vals[3])) { # Sill
initial_sill = mean(c(max(experimental_variogram$gamma), median(experimental_variogram$gamma)))
} else {
initial_sill = start_vals[3]
}
# Determine what should be automatically fitted and what should be fixed
# Nugget
if(!is.na(fix.values[1]))
{
fit_nugget = FALSE
initial_nugget = fix.values[1]
} else
fit_nugget = TRUE
# Range
if(!is.na(fix.values[2]))
{
fit_range = FALSE
initial_range = fix.values[2]
} else
fit_range = TRUE
# Partial sill
if(!is.na(fix.values[3]))
{
fit_sill = FALSE
initial_sill = fix.values[3]
} else
fit_sill = TRUE
getModel = function(psill, model, range, kappa, nugget, fit_range, fit_sill, fit_nugget, verbose)
{
if(verbose) debug.level = 1 else debug.level = 0
if(model == "Pow") {
warning("Using the power model is at your own risk, read the docs of autofitVariogram for more details.")
if(is.na(start_vals[1])) nugget = 0
if(is.na(start_vals[2])) range = 1 # If a power mode, range == 1 is a better start value
if(is.na(start_vals[3])) sill = 1
}
obj = try(fit.variogram(experimental_variogram,
model = vgm(psill=psill, model=model, range=range,
nugget=nugget,kappa = kappa),
fit.ranges = c(fit_range), fit.sills = c(fit_nugget, fit_sill),
debug.level = 0),
TRUE)
if("try-error" %in% class(obj)) {
#print(traceback())
warning("An error has occured during variogram fitting. Used:\n",
"\tnugget:\t", nugget,
"\n\tmodel:\t", model,
"\n\tpsill:\t", psill,
"\n\trange:\t", range,
"\n\tkappa:\t",ifelse(kappa == 0, NA, kappa),
"\n as initial guess. This particular variogram fit is not taken into account. \nGstat error:\n", obj)
return(NULL)
} else return(obj)
}
# Automatically testing different models, the one with the smallest sums-of-squares is chosen
test_models = model
SSerr_list = c()
vgm_list = list()
counter = 1
for(m in test_models) {
if(m != "Mat" && m != "Ste") { # If not Matern and not Stein
model_fit = getModel(initial_sill - initial_nugget, m, initial_range, kappa = 0, initial_nugget, fit_range, fit_sill, fit_nugget, verbose = verbose)
if(!is.null(model_fit)) { # skip models that failed
vgm_list[[counter]] = model_fit
SSerr_list = c(SSerr_list, attr(model_fit, "SSErr"))}
counter = counter + 1
} else { # Else loop also over kappa values
for(k in kappa) {
model_fit = getModel(initial_sill - initial_nugget, m, initial_range, k, initial_nugget, fit_range, fit_sill, fit_nugget, verbose = verbose)
if(!is.null(model_fit)) {
vgm_list[[counter]] = model_fit
SSerr_list = c(SSerr_list, attr(model_fit, "SSErr"))}
counter = counter + 1
}
}
}
# Check for negative values in sill or range coming from fit.variogram
# and NULL values in vgm_list, and remove those with a warning
strange_entries = sapply(vgm_list, function(v) any(c(v$psill, v$range) < 0) | is.null(v))
if(any(strange_entries)) {
if(verbose) {
print(vgm_list[strange_entries])
cat("^^^ ABOVE MODELS WERE REMOVED ^^^\n\n")
}
warning("Some models where removed for being either NULL or having a negative sill/range/nugget, \n\tset verbose == TRUE for more information")
SSerr_list = SSerr_list[!strange_entries]
vgm_list = vgm_list[!strange_entries]
}
if(verbose) {
cat("Selected:\n")
print(vgm_list[[which.min(SSerr_list)]])
cat("\nTested models, best first:\n")
tested = data.frame("Tested models" = sapply(vgm_list, function(x) as.character(x[2,1])),
kappa = sapply(vgm_list, function(x) as.character(x[2,4])),
"SSerror" = SSerr_list)
tested = tested[order(tested$SSerror),]
print(tested)
}
result = list(exp_var = experimental_variogram, var_model = vgm_list[[which.min(SSerr_list)]], sserr = min(SSerr_list))
class(result) = c("autofitVariogram","list")
return(result)
}
| /automap/R/autofitVariogram.r | no_license | jskoien/jskoien | R | false | false | 7,211 | r | autofitVariogram = function(formula, input_data, model = c("Sph", "Exp", "Gau", "Ste"),
kappa = c(0.05, seq(0.2, 2, 0.1), 5, 10), fix.values = c(NA,NA,NA),
verbose = FALSE, GLS.model = NA, start_vals = c(NA,NA,NA),
miscFitOptions = list(),...)
# This function automatically fits a variogram to input_data
{
# Check for anisotropy parameters
if('alpha' %in% names(list(...))) warning('Anisotropic variogram model fitting not supported, see the documentation of autofitVariogram for more details.')
# Take the misc fit options and overwrite the defaults by the user specified ones
miscFitOptionsDefaults = list(merge.small.bins = TRUE, min.np.bin = 5)
miscFitOptions = modifyList(miscFitOptionsDefaults, miscFitOptions)
# Create boundaries
if (is(input_data, "Spatial")) {
longlat = !is.projected(input_data)
if(is.na(longlat)) longlat = FALSE
diagonal = spDists(t(bbox(input_data)), longlat = longlat)[1,2] # 0.35 times the length of the central axis through the area
} else {
longlat = st_is_longlat(input_data)
if (is.na(longlat)) longlat = FALSE
bb = st_bbox(input_data)
diagonal = sqrt(((bb$xmax-bb$xmin)^2)+((bb$ymax-bb$ymin)^2))
}
boundaries = c(2,4,6,9,12,15,25,35,50,65,80,100) * diagonal * 0.35/100 # Boundaries for the bins in km
# If you specifiy a variogram model in GLS.model the Generelised least squares sample variogram is constructed
if(!is(GLS.model, "variogramModel")) {
experimental_variogram = variogram(formula, input_data,boundaries = boundaries, ...)
} else {
if(verbose) cat("Calculating GLS sample variogram\n")
g = gstat(NULL, "bla", formula, input_data, model = GLS.model, set = list(gls=1))
experimental_variogram = variogram(g, boundaries = boundaries, ...)
}
# request by Jon Skoien
if(miscFitOptions[["merge.small.bins"]]) {
if(verbose) cat("Checking if any bins have less than 5 points, merging bins when necessary...\n\n")
while(TRUE) {
if(length(experimental_variogram$np[experimental_variogram$np < miscFitOptions[["min.np.bin"]]]) == 0 | length(boundaries) == 1) break
boundaries = boundaries[2:length(boundaries)]
if(!is(GLS.model, "variogramModel")) {
experimental_variogram = variogram(formula, input_data,boundaries = boundaries, ...)
} else {
experimental_variogram = variogram(g, boundaries = boundaries, ...)
}
}
}
# set initial values
if(is.na(start_vals[1])) { # Nugget
initial_nugget = min(experimental_variogram$gamma)
} else {
initial_nugget = start_vals[1]
}
if(is.na(start_vals[2])) { # Range
initial_range = 0.1 * diagonal # 0.10 times the length of the central axis through the area
} else {
initial_range = start_vals[2]
}
if(is.na(start_vals[3])) { # Sill
initial_sill = mean(c(max(experimental_variogram$gamma), median(experimental_variogram$gamma)))
} else {
initial_sill = start_vals[3]
}
# Determine what should be automatically fitted and what should be fixed
# Nugget
if(!is.na(fix.values[1]))
{
fit_nugget = FALSE
initial_nugget = fix.values[1]
} else
fit_nugget = TRUE
# Range
if(!is.na(fix.values[2]))
{
fit_range = FALSE
initial_range = fix.values[2]
} else
fit_range = TRUE
# Partial sill
if(!is.na(fix.values[3]))
{
fit_sill = FALSE
initial_sill = fix.values[3]
} else
fit_sill = TRUE
getModel = function(psill, model, range, kappa, nugget, fit_range, fit_sill, fit_nugget, verbose)
{
if(verbose) debug.level = 1 else debug.level = 0
if(model == "Pow") {
warning("Using the power model is at your own risk, read the docs of autofitVariogram for more details.")
if(is.na(start_vals[1])) nugget = 0
if(is.na(start_vals[2])) range = 1 # If a power mode, range == 1 is a better start value
if(is.na(start_vals[3])) sill = 1
}
obj = try(fit.variogram(experimental_variogram,
model = vgm(psill=psill, model=model, range=range,
nugget=nugget,kappa = kappa),
fit.ranges = c(fit_range), fit.sills = c(fit_nugget, fit_sill),
debug.level = 0),
TRUE)
if("try-error" %in% class(obj)) {
#print(traceback())
warning("An error has occured during variogram fitting. Used:\n",
"\tnugget:\t", nugget,
"\n\tmodel:\t", model,
"\n\tpsill:\t", psill,
"\n\trange:\t", range,
"\n\tkappa:\t",ifelse(kappa == 0, NA, kappa),
"\n as initial guess. This particular variogram fit is not taken into account. \nGstat error:\n", obj)
return(NULL)
} else return(obj)
}
# Automatically testing different models, the one with the smallest sums-of-squares is chosen
test_models = model
SSerr_list = c()
vgm_list = list()
counter = 1
for(m in test_models) {
if(m != "Mat" && m != "Ste") { # If not Matern and not Stein
model_fit = getModel(initial_sill - initial_nugget, m, initial_range, kappa = 0, initial_nugget, fit_range, fit_sill, fit_nugget, verbose = verbose)
if(!is.null(model_fit)) { # skip models that failed
vgm_list[[counter]] = model_fit
SSerr_list = c(SSerr_list, attr(model_fit, "SSErr"))}
counter = counter + 1
} else { # Else loop also over kappa values
for(k in kappa) {
model_fit = getModel(initial_sill - initial_nugget, m, initial_range, k, initial_nugget, fit_range, fit_sill, fit_nugget, verbose = verbose)
if(!is.null(model_fit)) {
vgm_list[[counter]] = model_fit
SSerr_list = c(SSerr_list, attr(model_fit, "SSErr"))}
counter = counter + 1
}
}
}
# Check for negative values in sill or range coming from fit.variogram
# and NULL values in vgm_list, and remove those with a warning
strange_entries = sapply(vgm_list, function(v) any(c(v$psill, v$range) < 0) | is.null(v))
if(any(strange_entries)) {
if(verbose) {
print(vgm_list[strange_entries])
cat("^^^ ABOVE MODELS WERE REMOVED ^^^\n\n")
}
warning("Some models where removed for being either NULL or having a negative sill/range/nugget, \n\tset verbose == TRUE for more information")
SSerr_list = SSerr_list[!strange_entries]
vgm_list = vgm_list[!strange_entries]
}
if(verbose) {
cat("Selected:\n")
print(vgm_list[[which.min(SSerr_list)]])
cat("\nTested models, best first:\n")
tested = data.frame("Tested models" = sapply(vgm_list, function(x) as.character(x[2,1])),
kappa = sapply(vgm_list, function(x) as.character(x[2,4])),
"SSerror" = SSerr_list)
tested = tested[order(tested$SSerror),]
print(tested)
}
result = list(exp_var = experimental_variogram, var_model = vgm_list[[which.min(SSerr_list)]], sserr = min(SSerr_list))
class(result) = c("autofitVariogram","list")
return(result)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/defineExperiment.R
\name{get_ExpDes}
\alias{get_ExpDes}
\title{get_ExpDes}
\usage{
get_ExpDes(csv.in)
}
\arguments{
\item{csv.in}{Experimental Design read from csv}
}
\value{
list containing design and instrument
}
\description{
get Experimental Design
}
| /man/get_ExpDes.Rd | no_license | cbroeckl/RAMClustR | R | false | true | 333 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/defineExperiment.R
\name{get_ExpDes}
\alias{get_ExpDes}
\title{get_ExpDes}
\usage{
get_ExpDes(csv.in)
}
\arguments{
\item{csv.in}{Experimental Design read from csv}
}
\value{
list containing design and instrument
}
\description{
get Experimental Design
}
|
% File src/library/bigsplines/man/grid-internal.Rd
% Part of the R bigsplines package,
% Nathaniel E. Helwig <helwig@umn.edu>
\name{bigsplines-internal}
%% List of Internal Functions Called
\alias{gcvcss}
\alias{gcvgss}
\alias{gcvoss}
\alias{gcvssa}
\alias{gcvssg}
\alias{gcvssp}
\alias{getRandom}
\alias{lamcoef}
\alias{lamcoefg}
\alias{lamloop}
\alias{lamloopg}
\alias{makerkm}
\alias{makeZtX}
\alias{makeZtZ}
\alias{MPinv}
\alias{nbmle}
\alias{num2col}
\alias{pdsXty}
\alias{pinvsm}
\alias{postvar}
\alias{rkron}
\alias{remlri}
\alias{remlvc}
\alias{smartssa}
\alias{smartssg}
\alias{smartssp}
\alias{ssawork}
\alias{ssadpm}
\alias{ssblup}
\alias{ssgwork}
\alias{sspwork}
\alias{sspdpm}
\alias{tcprod}
\alias{unifqsum}
\alias{unifqsumg}
\alias{cubker}
\alias{cubkersym}
\alias{cubkerz}
\alias{cubkerzsym}
\alias{linker}
\alias{linkersym}
\alias{nomker}
\alias{nomkersym}
\alias{ordker}
\alias{ordkermon}
\alias{ordkersym}
\alias{perker}
\alias{perkersym}
\alias{sumfreq}
\alias{tpsker}
\alias{tpskersym}
\title{Internal functions for big splines package}
\description{
Internal functions for big splines package.
}
\details{
These functions are not to be called by the user.
}
\keyword{ internal }
| /man/bigsplines-internal.Rd | no_license | cran/bigsplines | R | false | false | 1,212 | rd | % File src/library/bigsplines/man/grid-internal.Rd
% Part of the R bigsplines package,
% Nathaniel E. Helwig <helwig@umn.edu>
\name{bigsplines-internal}
%% List of Internal Functions Called
\alias{gcvcss}
\alias{gcvgss}
\alias{gcvoss}
\alias{gcvssa}
\alias{gcvssg}
\alias{gcvssp}
\alias{getRandom}
\alias{lamcoef}
\alias{lamcoefg}
\alias{lamloop}
\alias{lamloopg}
\alias{makerkm}
\alias{makeZtX}
\alias{makeZtZ}
\alias{MPinv}
\alias{nbmle}
\alias{num2col}
\alias{pdsXty}
\alias{pinvsm}
\alias{postvar}
\alias{rkron}
\alias{remlri}
\alias{remlvc}
\alias{smartssa}
\alias{smartssg}
\alias{smartssp}
\alias{ssawork}
\alias{ssadpm}
\alias{ssblup}
\alias{ssgwork}
\alias{sspwork}
\alias{sspdpm}
\alias{tcprod}
\alias{unifqsum}
\alias{unifqsumg}
\alias{cubker}
\alias{cubkersym}
\alias{cubkerz}
\alias{cubkerzsym}
\alias{linker}
\alias{linkersym}
\alias{nomker}
\alias{nomkersym}
\alias{ordker}
\alias{ordkermon}
\alias{ordkersym}
\alias{perker}
\alias{perkersym}
\alias{sumfreq}
\alias{tpsker}
\alias{tpskersym}
\title{Internal functions for big splines package}
\description{
Internal functions for big splines package.
}
\details{
These functions are not to be called by the user.
}
\keyword{ internal }
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_rem.R
\name{plot_rem}
\alias{plot_rem}
\title{A function to plot the Random Effect Model (REM) MetaVolcano}
\usage{
plot_rem(meta_diffexp, jobname, outputfolder, genecol, metathr)
}
\arguments{
\item{meta_diffexp}{data.frame/data.table containing the REM results from
rem_mv() <data.table/data.frame>}
\item{jobname}{name of the running job <string>}
\item{outputfolder}{/path where to write the results/ <string>}
\item{genecol}{column name of the variable to label genes in the .html file
<string>}
\item{metathr}{top percentage of perturbed genes to be highlighted <double>}
}
\value{
\code{ggplot2} object
}
\description{
This function plots the REM MetaVolcano using ggplot2
}
\examples{
data(diffexplist)
diffexplist <- lapply(diffexplist, function(del) {
dplyr::filter(del, grepl("MP", Symbol))
})
mv <- rem_mv(diffexplist, metathr = 0.1)
gg <- plot_rem(mv@metaresult, "MV", ".", "Symbol", 0.01)
plot(gg)
}
\keyword{REM}
\keyword{metavolcano}
\keyword{write}
| /man/plot_rem.Rd | no_license | csbl-usp/MetaVolcanoR | R | false | true | 1,058 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_rem.R
\name{plot_rem}
\alias{plot_rem}
\title{A function to plot the Random Effect Model (REM) MetaVolcano}
\usage{
plot_rem(meta_diffexp, jobname, outputfolder, genecol, metathr)
}
\arguments{
\item{meta_diffexp}{data.frame/data.table containing the REM results from
rem_mv() <data.table/data.frame>}
\item{jobname}{name of the running job <string>}
\item{outputfolder}{/path where to write the results/ <string>}
\item{genecol}{column name of the variable to label genes in the .html file
<string>}
\item{metathr}{top percentage of perturbed genes to be highlighted <double>}
}
\value{
\code{ggplot2} object
}
\description{
This function plots the REM MetaVolcano using ggplot2
}
\examples{
data(diffexplist)
diffexplist <- lapply(diffexplist, function(del) {
dplyr::filter(del, grepl("MP", Symbol))
})
mv <- rem_mv(diffexplist, metathr = 0.1)
gg <- plot_rem(mv@metaresult, "MV", ".", "Symbol", 0.01)
plot(gg)
}
\keyword{REM}
\keyword{metavolcano}
\keyword{write}
|
# Copyright 2018 Observational Health Data Sciences and Informatics
#
# This file is part of EvaluatingCaseControl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
computeSampleSize <- function(outputFolder) {
OhdsiRTools::logInfo("Computing sample size and power")
ccdFile = file.path(outputFolder, "ccIbd", "ccd_cd1_cc1_o3_ed1_e5_ccd1.rds")
ccd <- readRDS(ccdFile)
row1 <- CaseControl::computeMdrr(ccd)
row1$Study <- "Crockett et al."
row1$Outcome <- "Ulcerative colitis"
row1$exposure <- "Isotretinoin"
ccdFile = file.path(outputFolder, "ccAp", "ccd_cd1_n1_cc1_o2_ed1_e4_ccd1.rds")
ccd <- readRDS(ccdFile)
row2 <- CaseControl::computeMdrr(ccd)
row2$Study <- "Chou et al."
row2$Outcome <- "Acute pancreatitis"
row2$exposure <- "DPP-4 inhibitors"
table <- rbind(row1, row2)
write.csv(table, file.path(outputFolder, "SampleSize.csv"), row.names = FALSE)
}
| /EvaluatingCaseControl/R/SampleSize.R | no_license | NEONKID/StudyProtocolSandbox | R | false | false | 1,386 | r | # Copyright 2018 Observational Health Data Sciences and Informatics
#
# This file is part of EvaluatingCaseControl
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
computeSampleSize <- function(outputFolder) {
OhdsiRTools::logInfo("Computing sample size and power")
ccdFile = file.path(outputFolder, "ccIbd", "ccd_cd1_cc1_o3_ed1_e5_ccd1.rds")
ccd <- readRDS(ccdFile)
row1 <- CaseControl::computeMdrr(ccd)
row1$Study <- "Crockett et al."
row1$Outcome <- "Ulcerative colitis"
row1$exposure <- "Isotretinoin"
ccdFile = file.path(outputFolder, "ccAp", "ccd_cd1_n1_cc1_o2_ed1_e4_ccd1.rds")
ccd <- readRDS(ccdFile)
row2 <- CaseControl::computeMdrr(ccd)
row2$Study <- "Chou et al."
row2$Outcome <- "Acute pancreatitis"
row2$exposure <- "DPP-4 inhibitors"
table <- rbind(row1, row2)
write.csv(table, file.path(outputFolder, "SampleSize.csv"), row.names = FALSE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_mux.R
\name{read_mux2_data_alternative}
\alias{read_mux2_data_alternative}
\title{read a mux2 data file}
\usage{
read_mux2_data_alternative(mux2file, inds = NULL)
}
\arguments{
\item{mux2file}{mux2 filename}
\item{inds}{indices to extract, see \code{read_mux2_data} for details}
}
\value{
see \code{read_mux2_data} for details
}
\description{
This function does basically the same thing as the previous read_mux2_data
file (for GAR15 file_subtype only). It is arguably more elegant.
significantly faster. Anyway it might be useful for debugging so I keep it
here, but am not exporting it into the package namespace
}
| /R/rptha/man/read_mux2_data_alternative.Rd | permissive | GeoscienceAustralia/ptha | R | false | true | 702 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_mux.R
\name{read_mux2_data_alternative}
\alias{read_mux2_data_alternative}
\title{read a mux2 data file}
\usage{
read_mux2_data_alternative(mux2file, inds = NULL)
}
\arguments{
\item{mux2file}{mux2 filename}
\item{inds}{indices to extract, see \code{read_mux2_data} for details}
}
\value{
see \code{read_mux2_data} for details
}
\description{
This function does basically the same thing as the previous read_mux2_data
file (for GAR15 file_subtype only). It is arguably more elegant.
significantly faster. Anyway it might be useful for debugging so I keep it
here, but am not exporting it into the package namespace
}
|
rm(list=ls())
load("agil1.Rdata")
require(dplyr)
require(ggplot2)
Sourcewise<- customers %>%
group_by(SourceID,format(customers$JoinDate,'%Y')) %>%
summarize((custacq=length(CustomerID)))
names(Sourcewise)<- c("SourceID", "Year", "CustomerCount")
Sourcewise<- merge(Sourcewise, marketingc, by=c("SourceID", "Year"))
Sourcewise<- merge(Sourcewise, sources, by=c("SourceID"))
Sourcewise$SourceID<-NULL
# Sourcewise<-cbind(Sourcewise[,c(1,4)],scale(Sourcewise[,2:3],scale = F))
# str(Sourcewise)
# ggplot(Sourcewise) + geom_line(aes(x=Year,y=MarketingCost,group=1,color="MarketingCost")) + geom_line(aes(x=Year,y=CustomerCount,group=1,color="CC")) +facet_wrap(~SourceName,scales="free") +
# scale_color_manual(name = "combined legend",
# values=c(MarketingCost = "red", CC = "blue"))
#
# ggplot(Sourcewise) + geom_line(aes(y=CustomerCount,x=MarketingCost,group=1,color="MarketingCost")) + #+ geom_line(aes(x=Year,y=CustomerCount,group=1,color="CC"))
# facet_wrap(~SourceName,scales="free") +
# scale_color_manual(name = "combined legend",
# values=c(MarketingCost = "red", CC = "blue"))
Sourcewise <- Sourcewise %>% group_by(SourceName) %>% mutate(scaledcc=scale(CustomerCount),scaledmc=scale(MarketingCost))
qplot(customers$SourceID)
ggplot(Sourcewise) + geom_bar(stat = "identity",aes(x= SourceName,y = MarketingCost,fill=as.factor(SourceName)),show.legend = FALSE)+ facet_wrap(~Year,scales="free") + theme(axis.text.x = element_text(angle=45, vjust=1, size=8, hjust=1)) + scale_x_discrete(label=function(x) abbreviate(x, minlength=9))+labs(x=NULL,y="Marketing Spend",title= "Marketing Spend each year")+ scale_fill_brewer(palette="PuOr")
ggplot(Sourcewise) + geom_bar(stat = "identity",aes(x= SourceName,y = CustomerCount,fill=as.factor(SourceName)),show.legend = FALSE) + facet_wrap(~Year,scales = "free")+ theme(axis.text.x = element_text(angle=45, vjust=1, size=8, hjust=1)) + scale_x_discrete(label=function(x) abbreviate(x, minlength=9))+labs(x=NULL,y="Marketing Spend",title= "Customers acquired each year") + scale_fill_brewer(palette="PuOr")
# Cheapest source to acquire customers
Sourcewise$mcpercust<- Sourcewise$MarketingCost/Sourcewise$CustomerCount
head(Sourcewise)
ggplot(Sourcewise) + geom_bar(stat = "identity",aes(x= SourceName,y = mcpercust,fill=as.factor(SourceName)),show.legend = F)+ facet_wrap(~Year,scales="free_x") +ggtitle("Marketing cost per Customers acquired") +xlab("Source Name") + ylab("Marketing Cost") + guides(fill=guide_legend(title="Sources"))+ theme(axis.text.x = element_text(angle=45, vjust=1, size=8, hjust=1)) + scale_x_discrete(label=function(x) abbreviate(x, minlength=9))+labs(x=NULL,y="Marketing Spend\nper customer acquired",title= "Marketing spend to acquire a customer") + scale_fill_brewer(palette="PuOr")
# No Dimininshing returns yet. So keep investing!
ggplot(Sourcewise) + geom_line(aes(Year,scaledmc,color="Marketing Cost"),group=1) + geom_line(aes(Year,scaledcc,color="Customer Count"),group=1) +facet_wrap(~SourceName,scales = "free_x")+labs(y="Scaled Values",title="Scaled comparision of Marketing cost & Customers acquired",x=NULL) + scale_colour_manual("", values = c("Marketing Cost"="cadetblue", "Customer Count"="brown"))
| /Mcosts.R | no_license | Nashavi/Kampong_ecom | R | false | false | 3,249 | r | rm(list=ls())
load("agil1.Rdata")
require(dplyr)
require(ggplot2)
Sourcewise<- customers %>%
group_by(SourceID,format(customers$JoinDate,'%Y')) %>%
summarize((custacq=length(CustomerID)))
names(Sourcewise)<- c("SourceID", "Year", "CustomerCount")
Sourcewise<- merge(Sourcewise, marketingc, by=c("SourceID", "Year"))
Sourcewise<- merge(Sourcewise, sources, by=c("SourceID"))
Sourcewise$SourceID<-NULL
# Sourcewise<-cbind(Sourcewise[,c(1,4)],scale(Sourcewise[,2:3],scale = F))
# str(Sourcewise)
# ggplot(Sourcewise) + geom_line(aes(x=Year,y=MarketingCost,group=1,color="MarketingCost")) + geom_line(aes(x=Year,y=CustomerCount,group=1,color="CC")) +facet_wrap(~SourceName,scales="free") +
# scale_color_manual(name = "combined legend",
# values=c(MarketingCost = "red", CC = "blue"))
#
# ggplot(Sourcewise) + geom_line(aes(y=CustomerCount,x=MarketingCost,group=1,color="MarketingCost")) + #+ geom_line(aes(x=Year,y=CustomerCount,group=1,color="CC"))
# facet_wrap(~SourceName,scales="free") +
# scale_color_manual(name = "combined legend",
# values=c(MarketingCost = "red", CC = "blue"))
Sourcewise <- Sourcewise %>% group_by(SourceName) %>% mutate(scaledcc=scale(CustomerCount),scaledmc=scale(MarketingCost))
qplot(customers$SourceID)
ggplot(Sourcewise) + geom_bar(stat = "identity",aes(x= SourceName,y = MarketingCost,fill=as.factor(SourceName)),show.legend = FALSE)+ facet_wrap(~Year,scales="free") + theme(axis.text.x = element_text(angle=45, vjust=1, size=8, hjust=1)) + scale_x_discrete(label=function(x) abbreviate(x, minlength=9))+labs(x=NULL,y="Marketing Spend",title= "Marketing Spend each year")+ scale_fill_brewer(palette="PuOr")
ggplot(Sourcewise) + geom_bar(stat = "identity",aes(x= SourceName,y = CustomerCount,fill=as.factor(SourceName)),show.legend = FALSE) + facet_wrap(~Year,scales = "free")+ theme(axis.text.x = element_text(angle=45, vjust=1, size=8, hjust=1)) + scale_x_discrete(label=function(x) abbreviate(x, minlength=9))+labs(x=NULL,y="Marketing Spend",title= "Customers acquired each year") + scale_fill_brewer(palette="PuOr")
# Cheapest source to acquire customers
Sourcewise$mcpercust<- Sourcewise$MarketingCost/Sourcewise$CustomerCount
head(Sourcewise)
ggplot(Sourcewise) + geom_bar(stat = "identity",aes(x= SourceName,y = mcpercust,fill=as.factor(SourceName)),show.legend = F)+ facet_wrap(~Year,scales="free_x") +ggtitle("Marketing cost per Customers acquired") +xlab("Source Name") + ylab("Marketing Cost") + guides(fill=guide_legend(title="Sources"))+ theme(axis.text.x = element_text(angle=45, vjust=1, size=8, hjust=1)) + scale_x_discrete(label=function(x) abbreviate(x, minlength=9))+labs(x=NULL,y="Marketing Spend\nper customer acquired",title= "Marketing spend to acquire a customer") + scale_fill_brewer(palette="PuOr")
# No Dimininshing returns yet. So keep investing!
ggplot(Sourcewise) + geom_line(aes(Year,scaledmc,color="Marketing Cost"),group=1) + geom_line(aes(Year,scaledcc,color="Customer Count"),group=1) +facet_wrap(~SourceName,scales = "free_x")+labs(y="Scaled Values",title="Scaled comparision of Marketing cost & Customers acquired",x=NULL) + scale_colour_manual("", values = c("Marketing Cost"="cadetblue", "Customer Count"="brown"))
|
#' Displays the content of variables.
#'
#' R implementation of the SPSS \code{LIST} Function
#'
#' LIST displays the content of selected variables. It is possible to display a sequence with the \code{cases} argument.\code{from} determine the begin of the sequence, \code{to} determine the end of the sequence. \code{by} determine the increment of the sequence.
#'
#' @usage xpssList(x, variables = colnames(x), cases = list(from = 1, to = nrow(x), by = 1))
#'
#' @param x a (non-empty) data.frame or input data of class \code{xpssFrame}.
#' @param variables atomic character or character vector with the names of the variables.
#' @param cases list containing the arguments from, to, by. All parameters are atomic numerics. See Details for more.
#' @return A data.frame with case values for specified variables in the dataset. If cases and variables are not specified, List return the complete dataset. If cases are specified the output is a user-defined sequence.
#' @author Bastian Wiessner
#' @examples
#' data(fromXPSS)
#'
#' xpssList(x=fromXPSS)
#'
#' xpssList(x=fromXPSS,
#' variables = "V1")
#'
#' xpssList(x=fromXPSS,
#' variables = c("V1","V2"))
#'
#'
#' xpssList(x=fromXPSS,
#' variables = span(fromXPSS,
#' from="V1",
#' to="V4"),
#' cases =list(from=2,
#' to=18,
#' by=2))
#'
#' @export
xpssList <- function(x,
variables = colnames(x),
cases = list(from = 1,
to = nrow(x),
by = 1))
{
functiontype <- "SB"
x <- applyMetaCheck(x)
if(!is.numeric(cases$from) || !is.numeric(cases$to) || !is.numeric(cases$by)) {
stop("the arguments from, to and by have to numeric")
}
if(cases$to > nrow(x)) {
stop("argument to is bigger than the dataset")
}
pos <- seq(cases$from,cases$to,cases$by)
erg <- data.frame(1:length(pos))
if(length(variables) == 1){
erg <- as.data.frame(x[,variables][pos],stringsAsFactors=F)
names(erg) <- names(x[which(colnames(x)%in%variables)])
}else{
for(i in 1:length(variables)){
erg[[i]] <- as.data.frame(x[,variables[i]][pos])
names(erg[[i]]) <- colnames(x[i])
}
}
return(erg)
}
| /R/xpssList.R | no_license | cran/translateSPSS2R | R | false | false | 2,389 | r | #' Displays the content of variables.
#'
#' R implementation of the SPSS \code{LIST} Function
#'
#' LIST displays the content of selected variables. It is possible to display a sequence with the \code{cases} argument.\code{from} determine the begin of the sequence, \code{to} determine the end of the sequence. \code{by} determine the increment of the sequence.
#'
#' @usage xpssList(x, variables = colnames(x), cases = list(from = 1, to = nrow(x), by = 1))
#'
#' @param x a (non-empty) data.frame or input data of class \code{xpssFrame}.
#' @param variables atomic character or character vector with the names of the variables.
#' @param cases list containing the arguments from, to, by. All parameters are atomic numerics. See Details for more.
#' @return A data.frame with case values for specified variables in the dataset. If cases and variables are not specified, List return the complete dataset. If cases are specified the output is a user-defined sequence.
#' @author Bastian Wiessner
#' @examples
#' data(fromXPSS)
#'
#' xpssList(x=fromXPSS)
#'
#' xpssList(x=fromXPSS,
#' variables = "V1")
#'
#' xpssList(x=fromXPSS,
#' variables = c("V1","V2"))
#'
#'
#' xpssList(x=fromXPSS,
#' variables = span(fromXPSS,
#' from="V1",
#' to="V4"),
#' cases =list(from=2,
#' to=18,
#' by=2))
#'
#' @export
xpssList <- function(x,
variables = colnames(x),
cases = list(from = 1,
to = nrow(x),
by = 1))
{
functiontype <- "SB"
x <- applyMetaCheck(x)
if(!is.numeric(cases$from) || !is.numeric(cases$to) || !is.numeric(cases$by)) {
stop("the arguments from, to and by have to numeric")
}
if(cases$to > nrow(x)) {
stop("argument to is bigger than the dataset")
}
pos <- seq(cases$from,cases$to,cases$by)
erg <- data.frame(1:length(pos))
if(length(variables) == 1){
erg <- as.data.frame(x[,variables][pos],stringsAsFactors=F)
names(erg) <- names(x[which(colnames(x)%in%variables)])
}else{
for(i in 1:length(variables)){
erg[[i]] <- as.data.frame(x[,variables[i]][pos])
names(erg[[i]]) <- colnames(x[i])
}
}
return(erg)
}
|
# Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read_sas('C:/MEPS/.FYC..sas7bdat');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Add aggregate sources of payment
if(year <= 1999)
FYC <- FYC %>% mutate(TOTTRI.yy. = TOTCHM.yy.)
FYC <- FYC %>% mutate(
TOTOTH.yy. = TOTOFD.yy. + TOTSTL.yy. + TOTOPR.yy. + TOTOPU.yy. + TOTOSR.yy.,
TOTOTZ.yy. = TOTOTH.yy. + TOTWCP.yy. + TOTVA.yy.,
TOTPTR.yy. = TOTPRV.yy. + TOTTRI.yy.)
# Age groups
# To compute for all age groups, replace 'agegrps' in the 'svyby' function with 'agegrps_v2X' or 'agegrps_v3X'
FYC <- FYC %>%
mutate(agegrps = cut(AGELAST,
breaks = c(-1, 4.5, 17.5, 44.5, 64.5, Inf),
labels = c("Under 5","5-17","18-44","45-64","65+"))) %>%
mutate(agegrps_v2X = cut(AGELAST,
breaks = c(-1, 17.5 ,64.5, Inf),
labels = c("Under 18","18-64","65+"))) %>%
mutate(agegrps_v3X = cut(AGELAST,
breaks = c(-1, 4.5, 6.5, 12.5, 17.5, 18.5, 24.5, 29.5, 34.5, 44.5, 54.5, 64.5, Inf),
labels = c("Under 5", "5-6", "7-12", "13-17", "18", "19-24", "25-29",
"30-34", "35-44", "45-54", "55-64", "65+")))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
# Loop over sources of payment
sops <- c("EXP", "SLF", "PTR", "MCR", "MCD", "OTZ")
results <- list()
for(sp in sops) {
key <- paste0("TOT", sp)
formula <- as.formula(sprintf("~%s.yy.", key))
results[[key]] <- svyby(formula, FUN = svytotal, by = ~agegrps, design = FYCdsgn)
}
print(results)
| /mepstrends/hc_use/json/code/r/totEXP__agegrps__sop__.r | permissive | HHS-AHRQ/MEPS-summary-tables | R | false | false | 2,212 | r | # Install and load packages
package_names <- c("survey","dplyr","foreign","devtools")
lapply(package_names, function(x) if(!x %in% installed.packages()) install.packages(x))
lapply(package_names, require, character.only=T)
install_github("e-mitchell/meps_r_pkg/MEPS")
library(MEPS)
options(survey.lonely.psu="adjust")
# Load FYC file
FYC <- read_sas('C:/MEPS/.FYC..sas7bdat');
year <- .year.
if(year <= 2001) FYC <- FYC %>% mutate(VARPSU = VARPSU.yy., VARSTR=VARSTR.yy.)
if(year <= 1998) FYC <- FYC %>% rename(PERWT.yy.F = WTDPER.yy.)
if(year == 1996) FYC <- FYC %>% mutate(AGE42X = AGE2X, AGE31X = AGE1X)
FYC <- FYC %>%
mutate_at(vars(starts_with("AGE")),funs(replace(., .< 0, NA))) %>%
mutate(AGELAST = coalesce(AGE.yy.X, AGE42X, AGE31X))
FYC$ind = 1
# Add aggregate sources of payment
if(year <= 1999)
FYC <- FYC %>% mutate(TOTTRI.yy. = TOTCHM.yy.)
FYC <- FYC %>% mutate(
TOTOTH.yy. = TOTOFD.yy. + TOTSTL.yy. + TOTOPR.yy. + TOTOPU.yy. + TOTOSR.yy.,
TOTOTZ.yy. = TOTOTH.yy. + TOTWCP.yy. + TOTVA.yy.,
TOTPTR.yy. = TOTPRV.yy. + TOTTRI.yy.)
# Age groups
# To compute for all age groups, replace 'agegrps' in the 'svyby' function with 'agegrps_v2X' or 'agegrps_v3X'
FYC <- FYC %>%
mutate(agegrps = cut(AGELAST,
breaks = c(-1, 4.5, 17.5, 44.5, 64.5, Inf),
labels = c("Under 5","5-17","18-44","45-64","65+"))) %>%
mutate(agegrps_v2X = cut(AGELAST,
breaks = c(-1, 17.5 ,64.5, Inf),
labels = c("Under 18","18-64","65+"))) %>%
mutate(agegrps_v3X = cut(AGELAST,
breaks = c(-1, 4.5, 6.5, 12.5, 17.5, 18.5, 24.5, 29.5, 34.5, 44.5, 54.5, 64.5, Inf),
labels = c("Under 5", "5-6", "7-12", "13-17", "18", "19-24", "25-29",
"30-34", "35-44", "45-54", "55-64", "65+")))
FYCdsgn <- svydesign(
id = ~VARPSU,
strata = ~VARSTR,
weights = ~PERWT.yy.F,
data = FYC,
nest = TRUE)
# Loop over sources of payment
sops <- c("EXP", "SLF", "PTR", "MCR", "MCD", "OTZ")
results <- list()
for(sp in sops) {
key <- paste0("TOT", sp)
formula <- as.formula(sprintf("~%s.yy.", key))
results[[key]] <- svyby(formula, FUN = svytotal, by = ~agegrps, design = FYCdsgn)
}
print(results)
|
#' Plot representation of contrast matrix
#'
#' Plot contrast matrix to clarify interpretation of hypothesis tests with linear contrasts
#'
#' @param L contrast matrix
#'
#' @return
#' ggplot2 object
#'
#' @examples
#'
#' # load library
#' # library(variancePartition)
#'
#' # load simulated data:
#' # geneExpr: matrix of gene expression values
#' # info: information/metadata about each sample
#' data(varPartData)
#'
#' # 1) get contrast matrix testing if the coefficient for Batch2 is zero
#' # 2) get contrast matrix testing if the coefficient for Batch2 is different from Batch3
#' form <- ~ Batch + (1|Individual) + (1|Tissue)
#' L = makeContrastsDream(form, info, contrasts=c("Batch2", Batch_3_vs_2 = "Batch3 - Batch2"))
#'
#' # plot contrasts
#' plotContrasts( L )
#'
#' @importFrom reshape2 melt
#' @import ggplot2
#' @export
plotContrasts = function( L ){
if( is(L, "numeric") ){
L = as.matrix(L, ncol=1)
}
if( is.null(colnames(L)) ){
colnames(L) = paste0('L', seq_len(ncol(L)))
}
# check rownames of contrasts
if( length(unique(colnames(L))) != ncol(L) ){
stop(paste("Contrast names must be unique: ", paste(colnames(L), collapse=', ')))
}
# check that each contrast sum to zero
tol = sqrt(.Machine$double.eps)
sumZero = apply(L, 2, function(x){
abs(sum(x)) < tol
})
# if( any(!sumZero) ){
# stop('Each contrast must sum to 0. ', paste(names(sumZero[!sumZero]), collapse=', '), ' fails')
# }
df = melt(t(L))
colnames(df)[1:2] = c("Var1", "Var2")
df$Var1 = factor(df$Var1)
if( identical(levels(df$Var1), "1") ){
df$Var1 = factor(rep('', nrow(df)))
}
Var1 = Var2 = value = NULL
h = length(unique(df$Var1))
w = length(unique(df$Var2))
ggplot(df, aes(Var2, y=Var1, fill=value)) + geom_tile(color="black") + theme_minimal() + theme(aspect.ratio=h/w,
panel.grid.major =element_blank(),
panel.grid.minor =element_blank(),
plot.title = element_text(hjust = 0.5),
axis.text.x = element_text(angle = 45, vjust = 1, hjust=1)) + scale_fill_gradient2(name="Contrast coef", limits=c(-1,1), low=alpha("blue",.8), mid="white", high=alpha("red", .8)) + xlab("Variable") + ylab("Contrasts") + ggtitle("Graphical representation of linear contrasts") + geom_text(aes(label=round(value, 2)), fontface = "bold")
}
| /R/plotContrasts.R | no_license | DarwinAwardWinner/variancePartition | R | false | false | 2,273 | r |
#' Plot representation of contrast matrix
#'
#' Plot contrast matrix to clarify interpretation of hypothesis tests with linear contrasts
#'
#' @param L contrast matrix
#'
#' @return
#' ggplot2 object
#'
#' @examples
#'
#' # load library
#' # library(variancePartition)
#'
#' # load simulated data:
#' # geneExpr: matrix of gene expression values
#' # info: information/metadata about each sample
#' data(varPartData)
#'
#' # 1) get contrast matrix testing if the coefficient for Batch2 is zero
#' # 2) get contrast matrix testing if the coefficient for Batch2 is different from Batch3
#' form <- ~ Batch + (1|Individual) + (1|Tissue)
#' L = makeContrastsDream(form, info, contrasts=c("Batch2", Batch_3_vs_2 = "Batch3 - Batch2"))
#'
#' # plot contrasts
#' plotContrasts( L )
#'
#' @importFrom reshape2 melt
#' @import ggplot2
#' @export
plotContrasts = function( L ){
if( is(L, "numeric") ){
L = as.matrix(L, ncol=1)
}
if( is.null(colnames(L)) ){
colnames(L) = paste0('L', seq_len(ncol(L)))
}
# check rownames of contrasts
if( length(unique(colnames(L))) != ncol(L) ){
stop(paste("Contrast names must be unique: ", paste(colnames(L), collapse=', ')))
}
# check that each contrast sum to zero
tol = sqrt(.Machine$double.eps)
sumZero = apply(L, 2, function(x){
abs(sum(x)) < tol
})
# if( any(!sumZero) ){
# stop('Each contrast must sum to 0. ', paste(names(sumZero[!sumZero]), collapse=', '), ' fails')
# }
df = melt(t(L))
colnames(df)[1:2] = c("Var1", "Var2")
df$Var1 = factor(df$Var1)
if( identical(levels(df$Var1), "1") ){
df$Var1 = factor(rep('', nrow(df)))
}
Var1 = Var2 = value = NULL
h = length(unique(df$Var1))
w = length(unique(df$Var2))
ggplot(df, aes(Var2, y=Var1, fill=value)) + geom_tile(color="black") + theme_minimal() + theme(aspect.ratio=h/w,
panel.grid.major =element_blank(),
panel.grid.minor =element_blank(),
plot.title = element_text(hjust = 0.5),
axis.text.x = element_text(angle = 45, vjust = 1, hjust=1)) + scale_fill_gradient2(name="Contrast coef", limits=c(-1,1), low=alpha("blue",.8), mid="white", high=alpha("red", .8)) + xlab("Variable") + ylab("Contrasts") + ggtitle("Graphical representation of linear contrasts") + geom_text(aes(label=round(value, 2)), fontface = "bold")
}
|
test_that("predict for location shift models runs without errors", {
ns <- 30
nobs <- 10
draws <- structure(list(nsamples = ns), class = "brmsdraws")
draws$dpars <- list(
mu = matrix(rnorm(ns * nobs), ncol = nobs),
sigma = rchisq(ns, 3), nu = rgamma(ns, 4)
)
i <- sample(nobs, 1)
pred <- brms:::predict_gaussian(i, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_student(i, draws = draws)
expect_equal(length(pred), ns)
})
test_that("predict for various skewed models runs without errors", {
ns <- 50
nobs <- 2
draws <- structure(list(nsamples = ns), class = "brmsdraws")
draws$dpars <- list(
sigma = rchisq(ns, 3), beta = rchisq(ns, 3),
mu = matrix(rnorm(ns * nobs), ncol = nobs),
alpha = rnorm(ns), ndt = 1
)
pred <- brms:::predict_lognormal(1, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_shifted_lognormal(1, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_exgaussian(1, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_skew_normal(1, draws = draws)
expect_equal(length(pred), ns)
})
test_that("predict for aysm_laplace models runs without errors", {
ns <- 50
draws <- structure(list(nsamples = ns), class = "brmsdraws")
draws$dpars <- list(
sigma = rchisq(ns, 3), quantile = rbeta(ns, 2, 1),
mu = matrix(rnorm(ns*2), ncol = 2)
)
pred <- brms:::predict_asym_laplace(1, draws = draws)
expect_equal(length(pred), ns)
})
test_that("predict for multivariate linear models runs without errors", {
ns <- 10
nvars <- 3
ncols <- 4
nobs <- nvars * ncols
Sigma = array(cov(matrix(rnorm(300), ncol = 3)), dim = c(3, 3, 10))
draws <- structure(list(nsamples = ns), class = "mvbrmsdraws")
draws$mvpars <- list(
Mu = array(rnorm(ns*nobs*nvars), dim = c(ns, nobs, nvars)),
Sigma = aperm(Sigma, c(3, 1, 2))
)
draws$dpars <- list(nu = rgamma(ns, 5))
draws$data <- list(N = nobs, N_trait = ncols)
pred <- brms:::predict_gaussian_mv(1, draws = draws)
expect_equal(dim(pred), c(ns, nvars))
pred <- brms:::predict_student_mv(2, draws = draws)
expect_equal(dim(pred), c(ns, nvars))
})
test_that("predict for ARMA covariance models runs without errors", {
ns <- 20
nobs <- 15
draws <- structure(list(nsamples = ns), class = "brmsdraws")
draws$dpars <- list(
mu = matrix(rnorm(ns*nobs), ncol = nobs),
sigma = rchisq(ns, 3),
nu = rgamma(ns, 5)
)
draws$ac <- list(
ar = matrix(rbeta(ns, 0.5, 0.5), ncol = 1),
ma = matrix(rnorm(ns, 0.2, 1), ncol = 1),
begin_tg = c(1, 5, 12), nobs_tg = c(4, 7, 3)
)
draws$data <- list(se = rgamma(ns, 10))
draws$f$fun <- "gaussian_cov"
pred <- brms:::predict_gaussian_cov(1, draws = draws)
expect_equal(length(pred), ns * 4)
draws$f$fun <- "student_cov"
pred <- brms:::predict_student_cov(2, draws = draws)
expect_equal(length(pred), ns * 7)
})
test_that("loglik for SAR models runs without errors", {
ns = 3
draws <- structure(list(nsamples = ns, nobs = 10), class = "brmsdraws")
draws$dpars <- list(
mu = matrix(rnorm(30), nrow = ns),
nu = rep(2, ns),
sigma = rep(10, ns)
)
draws$ac <- list(lagsar = matrix(c(0.3, 0.5, 0.7)), W = diag(10))
pred <- brms:::predict_gaussian_lagsar(1, draws = draws)
expect_equal(dim(pred), c(3, 10))
pred <- brms:::predict_student_lagsar(1, draws = draws)
expect_equal(dim(pred), c(3, 10))
draws$ac$errorsar <- draws$ac$lagsar
draws$ac$lagsar <- NULL
pred <- brms:::predict_gaussian_errorsar(1, draws = draws)
expect_equal(dim(pred), c(3, 10))
pred <- brms:::predict_student_errorsar(1, draws = draws)
expect_equal(dim(pred), c(3, 10))
})
test_that("predict for 'cor_fixed' models runs without errors", {
ns <- 3
draws <- structure(list(nsamples = ns), class = "brmsdraws")
draws$dpars <- list(
mu = matrix(rnorm(30), nrow = ns),
nu = rep(2, ns)
)
draws$ac <- list(V = diag(10))
pred <- brms:::predict_gaussian_fixed(1, draws = draws)
expect_equal(dim(pred), c(3, 10))
pred <- brms:::predict_student_fixed(1, draws = draws)
expect_equal(dim(pred), c(3, 10))
})
test_that("predict for count and survival models runs without errors", {
ns <- 25
nobs <- 10
trials <- sample(10:30, nobs, replace = TRUE)
draws <- structure(list(nsamples = ns, nobs = nobs), class = "brmsdraws")
draws$dpars <- list(
eta = matrix(rnorm(ns*nobs), ncol = nobs),
shape = rgamma(ns, 4), xi = 0
)
draws$dpars$nu <- draws$dpars$sigma <- draws$dpars$shape + 1
draws$data <- list(trials = trials)
i <- sample(nobs, 1)
draws$dpars$mu <- brms:::inv_cloglog(draws$dpars$eta)
pred <- brms:::predict_binomial(i, draws = draws)
expect_equal(length(pred), ns)
draws$dpars$mu <- exp(draws$dpars$eta)
pred <- brms:::predict_poisson(i, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_negbinomial(i, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_geometric(i, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_exponential(i, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_gamma(i, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_frechet(i, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_inverse.gaussian(i, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_gen_extreme_value(i, draws = draws)
expect_equal(length(pred), ns)
draws$f$link <- "log"
pred <- brms:::predict_weibull(i, draws = draws)
expect_equal(length(pred), ns)
})
test_that("predict for bernoulli and beta models works correctly", {
ns <- 17
nobs <- 10
draws <- structure(list(nsamples = ns, nobs = nobs), class = "brmsdraws")
draws$dpars <- list(
mu = brms:::inv_logit(matrix(rnorm(ns * nobs * 2), ncol = 2 * nobs)),
phi = rgamma(ns, 4)
)
i <- sample(1:nobs, 1)
pred <- brms:::predict_bernoulli(i, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_beta(i, draws = draws)
expect_equal(length(pred), ns)
})
test_that("predict for circular models runs without errors", {
ns <- 15
nobs <- 10
draws <- structure(list(nsamples = ns, nobs = nobs), class = "brmsdraws")
draws$dpars <- list(
mu = 2 * atan(matrix(rnorm(ns * nobs * 2), ncol = nobs * 2)),
kappa = rgamma(ns, 4)
)
i <- sample(seq_len(nobs), 1)
pred <- brms:::predict_von_mises(i, draws = draws)
expect_equal(length(pred), ns)
})
test_that("predict for zero-inflated and hurdle models runs without erros", {
ns <- 50
nobs <- 8
trials <- sample(10:30, nobs, replace = TRUE)
draws <- structure(list(nsamples = ns, nobs = nobs), class = "brmsdraws")
draws$dpars <- list(
eta = matrix(rnorm(ns * nobs * 2), ncol = nobs * 2),
shape = rgamma(ns, 4), phi = rgamma(ns, 1),
zi = rbeta(ns, 1, 1), coi = rbeta(ns, 5, 7)
)
draws$dpars$hu <- draws$dpars$zoi <- draws$dpars$zi
draws$data <- list(trials = trials)
draws$dpars$mu <- exp(draws$dpars$eta)
pred <- brms:::predict_hurdle_poisson(1, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_hurdle_negbinomial(2, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_hurdle_gamma(5, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_zero_inflated_poisson(3, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_zero_inflated_negbinomial(6, draws = draws)
expect_equal(length(pred), ns)
draws$dpars$mu <- brms:::inv_logit(draws$dpars$eta)
pred <- brms:::predict_zero_inflated_binomial(4, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_zero_inflated_beta(8, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_zero_one_inflated_beta(7, draws = draws)
expect_equal(length(pred), ns)
})
test_that("predict for ordinal models runs without erros", {
ns <- 50
nobs <- 8
ncat <- 4
draws <- structure(list(nsamples = ns, nobs = nobs), class = "brmsdraws")
draws$dpars <- list(
mu = array(rnorm(ns*nobs), dim = c(ns, nobs, ncat)),
disc = rexp(ns)
)
draws$data <- list(Y = rep(1:ncat, 2), ncat = ncat)
draws$f$link <- "logit"
pred <- sapply(1:nobs, brms:::predict_cumulative, draws = draws)
expect_equal(dim(pred), c(ns, nobs))
pred <- sapply(1:nobs, brms:::predict_sratio, draws = draws)
expect_equal(dim(pred), c(ns, nobs))
pred <- sapply(1:nobs, brms:::predict_cratio, draws = draws)
expect_equal(dim(pred), c(ns, nobs))
pred <- sapply(1:nobs, brms:::predict_acat, draws = draws)
expect_equal(dim(pred), c(ns, nobs))
draws$f$link <- "probit"
pred <- sapply(1:nobs, brms:::predict_acat, draws = draws)
expect_equal(dim(pred), c(ns, nobs))
})
test_that("loglik for categorical models runs without erros", {
ns <- 50
nobs <- 8
ncat <- 3
draws <- structure(list(nsamples = ns, nobs = nobs), class = "brmsdraws")
draws$dpars <- list(
mu1 = array(rnorm(ns*nobs), dim = c(ns, nobs)),
mu2 = array(rnorm(ns*nobs), dim = c(ns, nobs))
)
draws$data <- list(Y = rep(1:ncat, 2), ncat = ncat)
draws$f$link <- "logit"
pred <- sapply(1:nobs, brms:::predict_categorical, draws = draws)
expect_equal(dim(pred), c(ns, nobs))
})
test_that("truncated predict run without errors", {
ns <- 30
nobs <- 15
draws <- structure(list(nsamples = ns, nobs = nobs), class = "brmsdraws")
draws$dpars <- list(
mu = matrix(rnorm(ns * nobs), ncol = nobs),
sigma = rchisq(ns, 3)
)
draws$data <- list(lb = sample(-(4:7), nobs, TRUE))
pred <- sapply(1:nobs, brms:::predict_gaussian, draws = draws)
expect_equal(dim(pred), c(ns, nobs))
draws$dpars$mu <- exp(draws$dpars$mu)
draws$data <- list(ub = sample(70:80, nobs, TRUE))
pred <- sapply(1:nobs, brms:::predict_poisson, draws = draws)
expect_equal(dim(pred), c(ns, nobs))
draws$data <- list(lb = rep(0, nobs), ub = sample(70:75, nobs, TRUE))
pred <- sapply(1:nobs, brms:::predict_poisson, draws = draws)
expect_equal(dim(pred), c(ns, nobs))
})
test_that("predict for the wiener diffusion model runs without errors", {
ns <- 5
nobs <- 3
draws <- structure(list(nsamples = ns, nobs = nobs), class = "brmsdraws")
draws$dpars <- list(
mu = matrix(rnorm(ns * nobs), ncol = nobs),
bs = rchisq(ns, 3), ndt = rep(0.5, ns),
bias = rbeta(ns, 1, 1)
)
draws$data <- list(Y = abs(rnorm(ns)) + 0.5, dec = c(1, 0, 1))
i <- sample(1:nobs, 1)
expect_equal(nrow(brms:::predict_wiener(i, draws)), ns)
})
| /tests/testthat/tests.predict.R | no_license | JayKimBravekjh/brms | R | false | false | 10,610 | r | test_that("predict for location shift models runs without errors", {
ns <- 30
nobs <- 10
draws <- structure(list(nsamples = ns), class = "brmsdraws")
draws$dpars <- list(
mu = matrix(rnorm(ns * nobs), ncol = nobs),
sigma = rchisq(ns, 3), nu = rgamma(ns, 4)
)
i <- sample(nobs, 1)
pred <- brms:::predict_gaussian(i, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_student(i, draws = draws)
expect_equal(length(pred), ns)
})
test_that("predict for various skewed models runs without errors", {
ns <- 50
nobs <- 2
draws <- structure(list(nsamples = ns), class = "brmsdraws")
draws$dpars <- list(
sigma = rchisq(ns, 3), beta = rchisq(ns, 3),
mu = matrix(rnorm(ns * nobs), ncol = nobs),
alpha = rnorm(ns), ndt = 1
)
pred <- brms:::predict_lognormal(1, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_shifted_lognormal(1, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_exgaussian(1, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_skew_normal(1, draws = draws)
expect_equal(length(pred), ns)
})
test_that("predict for aysm_laplace models runs without errors", {
ns <- 50
draws <- structure(list(nsamples = ns), class = "brmsdraws")
draws$dpars <- list(
sigma = rchisq(ns, 3), quantile = rbeta(ns, 2, 1),
mu = matrix(rnorm(ns*2), ncol = 2)
)
pred <- brms:::predict_asym_laplace(1, draws = draws)
expect_equal(length(pred), ns)
})
test_that("predict for multivariate linear models runs without errors", {
ns <- 10
nvars <- 3
ncols <- 4
nobs <- nvars * ncols
Sigma = array(cov(matrix(rnorm(300), ncol = 3)), dim = c(3, 3, 10))
draws <- structure(list(nsamples = ns), class = "mvbrmsdraws")
draws$mvpars <- list(
Mu = array(rnorm(ns*nobs*nvars), dim = c(ns, nobs, nvars)),
Sigma = aperm(Sigma, c(3, 1, 2))
)
draws$dpars <- list(nu = rgamma(ns, 5))
draws$data <- list(N = nobs, N_trait = ncols)
pred <- brms:::predict_gaussian_mv(1, draws = draws)
expect_equal(dim(pred), c(ns, nvars))
pred <- brms:::predict_student_mv(2, draws = draws)
expect_equal(dim(pred), c(ns, nvars))
})
test_that("predict for ARMA covariance models runs without errors", {
ns <- 20
nobs <- 15
draws <- structure(list(nsamples = ns), class = "brmsdraws")
draws$dpars <- list(
mu = matrix(rnorm(ns*nobs), ncol = nobs),
sigma = rchisq(ns, 3),
nu = rgamma(ns, 5)
)
draws$ac <- list(
ar = matrix(rbeta(ns, 0.5, 0.5), ncol = 1),
ma = matrix(rnorm(ns, 0.2, 1), ncol = 1),
begin_tg = c(1, 5, 12), nobs_tg = c(4, 7, 3)
)
draws$data <- list(se = rgamma(ns, 10))
draws$f$fun <- "gaussian_cov"
pred <- brms:::predict_gaussian_cov(1, draws = draws)
expect_equal(length(pred), ns * 4)
draws$f$fun <- "student_cov"
pred <- brms:::predict_student_cov(2, draws = draws)
expect_equal(length(pred), ns * 7)
})
test_that("loglik for SAR models runs without errors", {
ns = 3
draws <- structure(list(nsamples = ns, nobs = 10), class = "brmsdraws")
draws$dpars <- list(
mu = matrix(rnorm(30), nrow = ns),
nu = rep(2, ns),
sigma = rep(10, ns)
)
draws$ac <- list(lagsar = matrix(c(0.3, 0.5, 0.7)), W = diag(10))
pred <- brms:::predict_gaussian_lagsar(1, draws = draws)
expect_equal(dim(pred), c(3, 10))
pred <- brms:::predict_student_lagsar(1, draws = draws)
expect_equal(dim(pred), c(3, 10))
draws$ac$errorsar <- draws$ac$lagsar
draws$ac$lagsar <- NULL
pred <- brms:::predict_gaussian_errorsar(1, draws = draws)
expect_equal(dim(pred), c(3, 10))
pred <- brms:::predict_student_errorsar(1, draws = draws)
expect_equal(dim(pred), c(3, 10))
})
test_that("predict for 'cor_fixed' models runs without errors", {
ns <- 3
draws <- structure(list(nsamples = ns), class = "brmsdraws")
draws$dpars <- list(
mu = matrix(rnorm(30), nrow = ns),
nu = rep(2, ns)
)
draws$ac <- list(V = diag(10))
pred <- brms:::predict_gaussian_fixed(1, draws = draws)
expect_equal(dim(pred), c(3, 10))
pred <- brms:::predict_student_fixed(1, draws = draws)
expect_equal(dim(pred), c(3, 10))
})
test_that("predict for count and survival models runs without errors", {
ns <- 25
nobs <- 10
trials <- sample(10:30, nobs, replace = TRUE)
draws <- structure(list(nsamples = ns, nobs = nobs), class = "brmsdraws")
draws$dpars <- list(
eta = matrix(rnorm(ns*nobs), ncol = nobs),
shape = rgamma(ns, 4), xi = 0
)
draws$dpars$nu <- draws$dpars$sigma <- draws$dpars$shape + 1
draws$data <- list(trials = trials)
i <- sample(nobs, 1)
draws$dpars$mu <- brms:::inv_cloglog(draws$dpars$eta)
pred <- brms:::predict_binomial(i, draws = draws)
expect_equal(length(pred), ns)
draws$dpars$mu <- exp(draws$dpars$eta)
pred <- brms:::predict_poisson(i, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_negbinomial(i, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_geometric(i, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_exponential(i, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_gamma(i, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_frechet(i, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_inverse.gaussian(i, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_gen_extreme_value(i, draws = draws)
expect_equal(length(pred), ns)
draws$f$link <- "log"
pred <- brms:::predict_weibull(i, draws = draws)
expect_equal(length(pred), ns)
})
test_that("predict for bernoulli and beta models works correctly", {
ns <- 17
nobs <- 10
draws <- structure(list(nsamples = ns, nobs = nobs), class = "brmsdraws")
draws$dpars <- list(
mu = brms:::inv_logit(matrix(rnorm(ns * nobs * 2), ncol = 2 * nobs)),
phi = rgamma(ns, 4)
)
i <- sample(1:nobs, 1)
pred <- brms:::predict_bernoulli(i, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_beta(i, draws = draws)
expect_equal(length(pred), ns)
})
test_that("predict for circular models runs without errors", {
ns <- 15
nobs <- 10
draws <- structure(list(nsamples = ns, nobs = nobs), class = "brmsdraws")
draws$dpars <- list(
mu = 2 * atan(matrix(rnorm(ns * nobs * 2), ncol = nobs * 2)),
kappa = rgamma(ns, 4)
)
i <- sample(seq_len(nobs), 1)
pred <- brms:::predict_von_mises(i, draws = draws)
expect_equal(length(pred), ns)
})
test_that("predict for zero-inflated and hurdle models runs without erros", {
ns <- 50
nobs <- 8
trials <- sample(10:30, nobs, replace = TRUE)
draws <- structure(list(nsamples = ns, nobs = nobs), class = "brmsdraws")
draws$dpars <- list(
eta = matrix(rnorm(ns * nobs * 2), ncol = nobs * 2),
shape = rgamma(ns, 4), phi = rgamma(ns, 1),
zi = rbeta(ns, 1, 1), coi = rbeta(ns, 5, 7)
)
draws$dpars$hu <- draws$dpars$zoi <- draws$dpars$zi
draws$data <- list(trials = trials)
draws$dpars$mu <- exp(draws$dpars$eta)
pred <- brms:::predict_hurdle_poisson(1, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_hurdle_negbinomial(2, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_hurdle_gamma(5, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_zero_inflated_poisson(3, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_zero_inflated_negbinomial(6, draws = draws)
expect_equal(length(pred), ns)
draws$dpars$mu <- brms:::inv_logit(draws$dpars$eta)
pred <- brms:::predict_zero_inflated_binomial(4, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_zero_inflated_beta(8, draws = draws)
expect_equal(length(pred), ns)
pred <- brms:::predict_zero_one_inflated_beta(7, draws = draws)
expect_equal(length(pred), ns)
})
test_that("predict for ordinal models runs without erros", {
ns <- 50
nobs <- 8
ncat <- 4
draws <- structure(list(nsamples = ns, nobs = nobs), class = "brmsdraws")
draws$dpars <- list(
mu = array(rnorm(ns*nobs), dim = c(ns, nobs, ncat)),
disc = rexp(ns)
)
draws$data <- list(Y = rep(1:ncat, 2), ncat = ncat)
draws$f$link <- "logit"
pred <- sapply(1:nobs, brms:::predict_cumulative, draws = draws)
expect_equal(dim(pred), c(ns, nobs))
pred <- sapply(1:nobs, brms:::predict_sratio, draws = draws)
expect_equal(dim(pred), c(ns, nobs))
pred <- sapply(1:nobs, brms:::predict_cratio, draws = draws)
expect_equal(dim(pred), c(ns, nobs))
pred <- sapply(1:nobs, brms:::predict_acat, draws = draws)
expect_equal(dim(pred), c(ns, nobs))
draws$f$link <- "probit"
pred <- sapply(1:nobs, brms:::predict_acat, draws = draws)
expect_equal(dim(pred), c(ns, nobs))
})
test_that("loglik for categorical models runs without erros", {
ns <- 50
nobs <- 8
ncat <- 3
draws <- structure(list(nsamples = ns, nobs = nobs), class = "brmsdraws")
draws$dpars <- list(
mu1 = array(rnorm(ns*nobs), dim = c(ns, nobs)),
mu2 = array(rnorm(ns*nobs), dim = c(ns, nobs))
)
draws$data <- list(Y = rep(1:ncat, 2), ncat = ncat)
draws$f$link <- "logit"
pred <- sapply(1:nobs, brms:::predict_categorical, draws = draws)
expect_equal(dim(pred), c(ns, nobs))
})
test_that("truncated predict run without errors", {
ns <- 30
nobs <- 15
draws <- structure(list(nsamples = ns, nobs = nobs), class = "brmsdraws")
draws$dpars <- list(
mu = matrix(rnorm(ns * nobs), ncol = nobs),
sigma = rchisq(ns, 3)
)
draws$data <- list(lb = sample(-(4:7), nobs, TRUE))
pred <- sapply(1:nobs, brms:::predict_gaussian, draws = draws)
expect_equal(dim(pred), c(ns, nobs))
draws$dpars$mu <- exp(draws$dpars$mu)
draws$data <- list(ub = sample(70:80, nobs, TRUE))
pred <- sapply(1:nobs, brms:::predict_poisson, draws = draws)
expect_equal(dim(pred), c(ns, nobs))
draws$data <- list(lb = rep(0, nobs), ub = sample(70:75, nobs, TRUE))
pred <- sapply(1:nobs, brms:::predict_poisson, draws = draws)
expect_equal(dim(pred), c(ns, nobs))
})
test_that("predict for the wiener diffusion model runs without errors", {
ns <- 5
nobs <- 3
draws <- structure(list(nsamples = ns, nobs = nobs), class = "brmsdraws")
draws$dpars <- list(
mu = matrix(rnorm(ns * nobs), ncol = nobs),
bs = rchisq(ns, 3), ndt = rep(0.5, ns),
bias = rbeta(ns, 1, 1)
)
draws$data <- list(Y = abs(rnorm(ns)) + 0.5, dec = c(1, 0, 1))
i <- sample(1:nobs, 1)
expect_equal(nrow(brms:::predict_wiener(i, draws)), ns)
})
|
setwd("/Users/loey/Desktop/Research/InfluencingCogSci/R/cogsci_analysis/influenceNeighbors")
library(tidyverse)
library(futile.matrix)
library(pbapply)
topic.df <- read_csv("fullcogsci_topics_authorAbbr.csv") %>%
distinct() %>%
mutate(authors=ifelse(grepl(",",authors), gsub(",.*","",authors), authors),
authors=ifelse(authors=="J Tenenbaums", "J Tenenbaum", authors)) %>%
dplyr::select(-X1)
full_author_neighbor.df = c()
neighbor_mats_final = list()
years = 1981:2019
for(i in 1:length(years)){
temp = readRDS(paste0("neighbors/neighbors_",years[i],".rds"))
diag(temp) <- NA # removing self co-authorship binary info too
neighbor_mats_final[[i]] <- temp
neighbor.names = rownames(temp)
full_author_neighbor.df <- c(full_author_neighbor.df, paste0(neighbor.names,"_",years[i]))
}
unique_authors <- unique(full_author_neighbor.df)
length(unique_authors)
peek(neighbor_mats_final[[1]],15)
peek(neighbor_mats_final[[20]],15)
#### FUNCTIONS ####
get_avg_topic_dist <- function(df) {
topic.means = df %>%
dplyr::select(-title, -authors, -year) %>%
colMeans()
topic.means = as.data.frame(topic.means)
return(topic.means)
}
get_neighbor_topic_dist_byYear <- function(year.select, author, neighbor.dist=2:4){
neighbor.matr = neighbor_mats_final[[year.select-1980]][author,]
neighbors = neighbor.matr[which(neighbor.matr %in% neighbor.dist)]
neighbor.topics <- topic.df %>%
filter(year==year.select & length(neighbors > 0) & authors %in% names(neighbors)) %>%
get_avg_topic_dist() %>%
as.list()
return(neighbor.topics)
}
get_neighbor_topic_dist_byYear.next <- function(year.select, author, neighbor.dist=2:4){
neighbor.matr = neighbor_mats_final[[year.select-1980]][author,]
neighbors = neighbor.matr[which(neighbor.matr %in% neighbor.dist)]
neighbor.topics.next <- topic.df %>%
filter(year==(year.select+1) & length(neighbors > 0) & authors %in% names(neighbors)) %>%
get_avg_topic_dist() %>%
as.list()
return(neighbor.topics.next)
}
get_projection_angle = function(vec.a, vec.b) {
cos.theta = sum(vec.a * vec.b) / ( sqrt(sum(vec.a * vec.a)) * sqrt(sum(vec.b * vec.b)) )
return(cos.theta)
}
prob_to_logit = function(p){
log(p/(1-p))
}
####################
# Each author's topic distribution for a given year
allTopicDist <- topic.df %>%
mutate(author_year = paste0(authors,"_",year)) %>%
filter(author_year %in% unique_authors) %>%
group_by(year, authors) %>%
dplyr::select(-c(title, author_year)) %>%
summarise_all(mean)
allAuthorTopicDist <- allTopicDist %>%
ungroup() %>%
gather("topic","author_mean",3:102) %>%
dplyr::arrange(year,authors) %>%
mutate(topic = paste0("topic",str_pad(topic, 3, pad = "0")))
#write_csv(allAuthorTopicDist, "avg_topic_dist_author_year.csv")
# Author's topic distribution for next year
allAuthorTopicDist.nextYr <- allAuthorTopicDist %>%
filter(year > 1981) %>%
mutate(year = year-1,
next.year = year+1,
next.author_mean = author_mean) %>%
dplyr::select(-author_mean)
allAuthorTopicDist.time <- full_join(allAuthorTopicDist, allAuthorTopicDist.nextYr)
#write_csv(allAuthorTopicDist.time, "avg_topic_dist_author_this_prev.csv")
allAuthorTopicDist.diff <- allAuthorTopicDist.time %>%
na.omit() %>%
mutate(diff.author_mean = next.author_mean-author_mean)
nrow(allAuthorTopicDist.diff)/100 #7,740 authors with papers contributed in consecutive years
# Each author's neighbors' topic distribution for a given year
allNeighborTopicDist <- allTopicDist %>%
dplyr::select(year, authors) %>%
mutate(neighbor_mean = pbmapply(get_neighbor_topic_dist_byYear,year,authors)) %>%
unnest() %>%
ungroup() %>%
mutate(topic = paste0("topic",str_pad(rep(1:100, nrow(allTopicDist)), 3, pad = "0")))
#write_csv(allNeighborTopicDist, "avg_topic_dist_neighbors_year.csv")
# Author's neighbors' topic distribution for next year
allNeighborTopicDist.nextYr <- allTopicDist %>%
filter(year < 2019) %>%
dplyr::select(year, authors) %>%
mutate(neighbor_mean = pbmapply(get_neighbor_topic_dist_byYear.next,year,authors)) %>%
unnest() %>%
ungroup() %>%
mutate(topic = paste0("topic",str_pad(rep(1:100, nrow(filter(allTopicDist, year<2019))), 3, pad = "0")),
next.year = year+1,
next.neighbor_mean = neighbor_mean) %>%
dplyr::select(-neighbor_mean)
allNeighborTopicDist.time <- full_join(allNeighborTopicDist, allNeighborTopicDist.nextYr)
#write_csv(allNeighborTopicDist.time, "avg_topic_dist_neighbors_this_prev.csv")
allNeighborTopicDist.diff <- allNeighborTopicDist.time %>%
na.omit() %>%
mutate(diff.neighbor_mean = next.neighbor_mean-neighbor_mean)
nrow(allNeighborTopicDist.diff)/100 #8,962 authors's neighbors with papers contributed in consecutive years
combinedTopicDist <- full_join(allAuthorTopicDist.diff, allNeighborTopicDist.diff, by=c("year","authors","topic","next.year")) %>%
na.omit() %>%
mutate(diff.neighbor_author_mean = neighbor_mean - author_mean)
nrow(combinedTopicDist)/100 #2,687 (previously), 3,347 (now) distinct authors/years
all_proj_angles <- combinedTopicDist %>%
group_by(authors, year) %>%
summarise(proj_scalar_author = get_projection_angle(diff.author_mean, diff.neighbor_author_mean)*sqrt(sum(diff.author_mean^2)),
proj_scalar_neighbors = get_projection_angle(diff.neighbor_mean, diff.neighbor_author_mean)*sqrt(sum(diff.neighbor_mean^2)))
all_proj_angles
write_csv(all_proj_angles, "all_projection_angles.csv")
ggplot(all_proj_angles, aes(x=proj_scalar_author, y=proj_scalar_neighbors)) +
geom_point() +
geom_rug()
ggsave("authors_neighbors.png")
# Correlate with centrality
all_centrality <- data.frame()
for(i in 1981:2019){
year_centrality <- read_csv(paste0("../networkByYear/centrality_",i,".csv")) %>%
mutate(year=i)
all_centrality <- bind_rows(all_centrality, year_centrality) %>%
dplyr::select(-X1)
}
all_centrality <- all_centrality %>%
spread(CM, measure) %>%
mutate(authors = label) %>%
dplyr::select(-c(id, label))
all_proj_angles_central <- all_proj_angles %>%
left_join(all_centrality) %>%
mutate(logit.eigen = ifelse(eigen==0, -99,
ifelse(eigen==1, 99,
prob_to_logit(eigen))))
cor.test(all_proj_angles_central$proj_scalar_author, all_proj_angles_central$proj_scalar_neighbors)
cor.test(all_proj_angles_central$logit.eigen, all_proj_angles_central$proj_scalar_neighbors)
cor.test(all_proj_angles_central$logit.eigen, all_proj_angles_central$proj_scalar_author)
cor.test(log(all_proj_angles_central$degree), all_proj_angles_central$proj_scalar_neighbors)
cor.test(log(all_proj_angles_central$degree), all_proj_angles_central$proj_scalar_author)
cor.test(log(all_proj_angles_central$close), all_proj_angles_central$proj_scalar_neighbors)
cor.test(log(all_proj_angles_central$close), all_proj_angles_central$proj_scalar_author)
# Bimodal distribution
ggplot(all_proj_angles_central, aes(x=logit.eigen)) +
geom_density()
ggplot(all_proj_angles_central, aes(x=log(degree))) +
geom_density()
ggplot(all_proj_angles_central, aes(x=log(close))) +
geom_density()
ggplot(all_proj_angles_central, aes(x=log(between))) +
geom_density()
ggplot(all_proj_angles_central, aes(x=logit.eigen, y=proj_scalar_neighbors)) +
geom_point() +
geom_smooth(method=lm)
ggplot(all_proj_angles_central, aes(x=log(degree), y=proj_scalar_neighbors)) +
geom_point() +
geom_smooth(method=lm)
ggplot(all_proj_angles_central, aes(x=log(degree), y=proj_scalar_author)) +
geom_point() +
geom_smooth(method=lm)
ggplot(all_proj_angles_central, aes(x=log(close), y=proj_scalar_neighbors)) +
geom_point() +
geom_smooth(method=lm)
#### NULL SHUFFLE PROJECTION AND CENTRALITY ####
corr<- function(dat){
with(data=dat, cor(dat$central, dat$proj_scalar))
}
dat.shuffle.degree.neighbors <- function(dat){
data.frame(central=log(dat$degree),
proj_scalar=sample(dat$proj_scalar_neighbors, length(dat$proj_scalar_neighbors), replace=F)) %>%
na.omit()
}
dat.shuffle.degree.author <- function(dat){
data.frame(central=log(dat$degree),
proj_scalar=sample(dat$proj_scalar_author, length(dat$proj_scalar_author), replace=F)) %>%
na.omit()
}
dat.shuffle.eigen.neighbors <- function(dat){
data.frame(central=dat$logit.eigen,
proj_scalar=sample(dat$proj_scalar_neighbors, length(dat$proj_scalar_neighbors), replace=F)) %>%
na.omit()
}
dat.shuffle.eigen.author <- function(dat){
data.frame(central=dat$logit.eigen,
proj_scalar=sample(dat$proj_scalar_author, length(dat$proj_scalar_author), replace=F)) %>%
na.omit()
}
K = 10000
shuffle.samps.degree.neighbors <- replicate(K, corr(dat.shuffle.degree.neighbors(all_proj_angles_central)))
shuffle.samps.degree.author <- replicate(K, corr(dat.shuffle.degree.author(all_proj_angles_central)))
shuffle.samps.eigen.neighbors <- replicate(K, corr(dat.shuffle.eigen.neighbors(all_proj_angles_central)))
shuffle.samps.eigen.author <- replicate(K, corr(dat.shuffle.eigen.author(all_proj_angles_central)))
trueDegreeNeighborCorr <- cor(log(all_proj_angles_central$degree), all_proj_angles_central$proj_scalar_neighbors)
trueDegreeAuthorCorr <- cor(log(all_proj_angles_central$degree), all_proj_angles_central$proj_scalar_author)
trueEigenNeighborCorr <- cor(all_proj_angles_central$logit.eigen, all_proj_angles_central$proj_scalar_neighbors)
trueEigenAuthorCorr <- cor(all_proj_angles_central$logit.eigen, all_proj_angles_central$proj_scalar_author)
data.frame(r = shuffle.samps.degree.neighbors) %>%
ggplot(aes(x=r)) +
geom_histogram() +
geom_vline(xintercept=trueNeighborCorr, colour="red") +
ggtitle("Null Shuffle Log(Degree Centrality) vs Author Influence Neighbors")
ggsave("img/null.shuffle.simple_centralVsProj_influenceNeighbors.png")
data.frame(r = shuffle.samps.degree.author) %>%
ggplot(aes(x=r)) +
geom_histogram() +
geom_vline(xintercept=trueAuthorCorr, colour="red") +
ggtitle("Null Shuffle Log(Degree Centrality) vs Neighbors Influence Author")
ggsave("img/null.shuffle.simple_centralVsProj_influenceAuthor.png")
data.frame(r = shuffle.samps.eigen.neighbors) %>%
ggplot(aes(x=r)) +
geom_histogram() +
geom_vline(xintercept=trueEigenNeighborCorr, colour="red") +
ggtitle("Null Shuffle Logit(Eigen Centrality) vs Author Influence Neighbors")
ggsave("img/null.shuffle.simple_centralVsProj_influenceNeighbors_eigen.png")
data.frame(r = shuffle.samps.eigen.author) %>%
ggplot(aes(x=r)) +
geom_histogram() +
geom_vline(xintercept=trueEigenAuthorCorr, colour="red") +
ggtitle("Null Shuffle Logit(Eigen Centrality) vs Neighbors Influence Author")
ggsave("img/null.shuffle.simple_centralVsProj_influenceAuthor_eigen.png")
| /R/cogsci_analysis/influenceNeighbors/cogsci_influence_neighbors.R | no_license | Oleg-295/InfluencingCogSci | R | false | false | 10,741 | r | setwd("/Users/loey/Desktop/Research/InfluencingCogSci/R/cogsci_analysis/influenceNeighbors")
library(tidyverse)
library(futile.matrix)
library(pbapply)
topic.df <- read_csv("fullcogsci_topics_authorAbbr.csv") %>%
distinct() %>%
mutate(authors=ifelse(grepl(",",authors), gsub(",.*","",authors), authors),
authors=ifelse(authors=="J Tenenbaums", "J Tenenbaum", authors)) %>%
dplyr::select(-X1)
full_author_neighbor.df = c()
neighbor_mats_final = list()
years = 1981:2019
for(i in 1:length(years)){
temp = readRDS(paste0("neighbors/neighbors_",years[i],".rds"))
diag(temp) <- NA # removing self co-authorship binary info too
neighbor_mats_final[[i]] <- temp
neighbor.names = rownames(temp)
full_author_neighbor.df <- c(full_author_neighbor.df, paste0(neighbor.names,"_",years[i]))
}
unique_authors <- unique(full_author_neighbor.df)
length(unique_authors)
peek(neighbor_mats_final[[1]],15)
peek(neighbor_mats_final[[20]],15)
#### FUNCTIONS ####
get_avg_topic_dist <- function(df) {
topic.means = df %>%
dplyr::select(-title, -authors, -year) %>%
colMeans()
topic.means = as.data.frame(topic.means)
return(topic.means)
}
get_neighbor_topic_dist_byYear <- function(year.select, author, neighbor.dist=2:4){
neighbor.matr = neighbor_mats_final[[year.select-1980]][author,]
neighbors = neighbor.matr[which(neighbor.matr %in% neighbor.dist)]
neighbor.topics <- topic.df %>%
filter(year==year.select & length(neighbors > 0) & authors %in% names(neighbors)) %>%
get_avg_topic_dist() %>%
as.list()
return(neighbor.topics)
}
get_neighbor_topic_dist_byYear.next <- function(year.select, author, neighbor.dist=2:4){
neighbor.matr = neighbor_mats_final[[year.select-1980]][author,]
neighbors = neighbor.matr[which(neighbor.matr %in% neighbor.dist)]
neighbor.topics.next <- topic.df %>%
filter(year==(year.select+1) & length(neighbors > 0) & authors %in% names(neighbors)) %>%
get_avg_topic_dist() %>%
as.list()
return(neighbor.topics.next)
}
get_projection_angle = function(vec.a, vec.b) {
cos.theta = sum(vec.a * vec.b) / ( sqrt(sum(vec.a * vec.a)) * sqrt(sum(vec.b * vec.b)) )
return(cos.theta)
}
prob_to_logit = function(p){
log(p/(1-p))
}
####################
# Each author's topic distribution for a given year
allTopicDist <- topic.df %>%
mutate(author_year = paste0(authors,"_",year)) %>%
filter(author_year %in% unique_authors) %>%
group_by(year, authors) %>%
dplyr::select(-c(title, author_year)) %>%
summarise_all(mean)
allAuthorTopicDist <- allTopicDist %>%
ungroup() %>%
gather("topic","author_mean",3:102) %>%
dplyr::arrange(year,authors) %>%
mutate(topic = paste0("topic",str_pad(topic, 3, pad = "0")))
#write_csv(allAuthorTopicDist, "avg_topic_dist_author_year.csv")
# Author's topic distribution for next year
allAuthorTopicDist.nextYr <- allAuthorTopicDist %>%
filter(year > 1981) %>%
mutate(year = year-1,
next.year = year+1,
next.author_mean = author_mean) %>%
dplyr::select(-author_mean)
allAuthorTopicDist.time <- full_join(allAuthorTopicDist, allAuthorTopicDist.nextYr)
#write_csv(allAuthorTopicDist.time, "avg_topic_dist_author_this_prev.csv")
allAuthorTopicDist.diff <- allAuthorTopicDist.time %>%
na.omit() %>%
mutate(diff.author_mean = next.author_mean-author_mean)
nrow(allAuthorTopicDist.diff)/100 #7,740 authors with papers contributed in consecutive years
# Each author's neighbors' topic distribution for a given year
allNeighborTopicDist <- allTopicDist %>%
dplyr::select(year, authors) %>%
mutate(neighbor_mean = pbmapply(get_neighbor_topic_dist_byYear,year,authors)) %>%
unnest() %>%
ungroup() %>%
mutate(topic = paste0("topic",str_pad(rep(1:100, nrow(allTopicDist)), 3, pad = "0")))
#write_csv(allNeighborTopicDist, "avg_topic_dist_neighbors_year.csv")
# Author's neighbors' topic distribution for next year
allNeighborTopicDist.nextYr <- allTopicDist %>%
filter(year < 2019) %>%
dplyr::select(year, authors) %>%
mutate(neighbor_mean = pbmapply(get_neighbor_topic_dist_byYear.next,year,authors)) %>%
unnest() %>%
ungroup() %>%
mutate(topic = paste0("topic",str_pad(rep(1:100, nrow(filter(allTopicDist, year<2019))), 3, pad = "0")),
next.year = year+1,
next.neighbor_mean = neighbor_mean) %>%
dplyr::select(-neighbor_mean)
allNeighborTopicDist.time <- full_join(allNeighborTopicDist, allNeighborTopicDist.nextYr)
#write_csv(allNeighborTopicDist.time, "avg_topic_dist_neighbors_this_prev.csv")
allNeighborTopicDist.diff <- allNeighborTopicDist.time %>%
na.omit() %>%
mutate(diff.neighbor_mean = next.neighbor_mean-neighbor_mean)
nrow(allNeighborTopicDist.diff)/100 #8,962 authors's neighbors with papers contributed in consecutive years
combinedTopicDist <- full_join(allAuthorTopicDist.diff, allNeighborTopicDist.diff, by=c("year","authors","topic","next.year")) %>%
na.omit() %>%
mutate(diff.neighbor_author_mean = neighbor_mean - author_mean)
nrow(combinedTopicDist)/100 #2,687 (previously), 3,347 (now) distinct authors/years
all_proj_angles <- combinedTopicDist %>%
group_by(authors, year) %>%
summarise(proj_scalar_author = get_projection_angle(diff.author_mean, diff.neighbor_author_mean)*sqrt(sum(diff.author_mean^2)),
proj_scalar_neighbors = get_projection_angle(diff.neighbor_mean, diff.neighbor_author_mean)*sqrt(sum(diff.neighbor_mean^2)))
all_proj_angles
write_csv(all_proj_angles, "all_projection_angles.csv")
ggplot(all_proj_angles, aes(x=proj_scalar_author, y=proj_scalar_neighbors)) +
geom_point() +
geom_rug()
ggsave("authors_neighbors.png")
# Correlate with centrality
all_centrality <- data.frame()
for(i in 1981:2019){
year_centrality <- read_csv(paste0("../networkByYear/centrality_",i,".csv")) %>%
mutate(year=i)
all_centrality <- bind_rows(all_centrality, year_centrality) %>%
dplyr::select(-X1)
}
all_centrality <- all_centrality %>%
spread(CM, measure) %>%
mutate(authors = label) %>%
dplyr::select(-c(id, label))
all_proj_angles_central <- all_proj_angles %>%
left_join(all_centrality) %>%
mutate(logit.eigen = ifelse(eigen==0, -99,
ifelse(eigen==1, 99,
prob_to_logit(eigen))))
cor.test(all_proj_angles_central$proj_scalar_author, all_proj_angles_central$proj_scalar_neighbors)
cor.test(all_proj_angles_central$logit.eigen, all_proj_angles_central$proj_scalar_neighbors)
cor.test(all_proj_angles_central$logit.eigen, all_proj_angles_central$proj_scalar_author)
cor.test(log(all_proj_angles_central$degree), all_proj_angles_central$proj_scalar_neighbors)
cor.test(log(all_proj_angles_central$degree), all_proj_angles_central$proj_scalar_author)
cor.test(log(all_proj_angles_central$close), all_proj_angles_central$proj_scalar_neighbors)
cor.test(log(all_proj_angles_central$close), all_proj_angles_central$proj_scalar_author)
# Bimodal distribution
ggplot(all_proj_angles_central, aes(x=logit.eigen)) +
geom_density()
ggplot(all_proj_angles_central, aes(x=log(degree))) +
geom_density()
ggplot(all_proj_angles_central, aes(x=log(close))) +
geom_density()
ggplot(all_proj_angles_central, aes(x=log(between))) +
geom_density()
ggplot(all_proj_angles_central, aes(x=logit.eigen, y=proj_scalar_neighbors)) +
geom_point() +
geom_smooth(method=lm)
ggplot(all_proj_angles_central, aes(x=log(degree), y=proj_scalar_neighbors)) +
geom_point() +
geom_smooth(method=lm)
ggplot(all_proj_angles_central, aes(x=log(degree), y=proj_scalar_author)) +
geom_point() +
geom_smooth(method=lm)
ggplot(all_proj_angles_central, aes(x=log(close), y=proj_scalar_neighbors)) +
geom_point() +
geom_smooth(method=lm)
#### NULL SHUFFLE PROJECTION AND CENTRALITY ####
corr<- function(dat){
with(data=dat, cor(dat$central, dat$proj_scalar))
}
dat.shuffle.degree.neighbors <- function(dat){
data.frame(central=log(dat$degree),
proj_scalar=sample(dat$proj_scalar_neighbors, length(dat$proj_scalar_neighbors), replace=F)) %>%
na.omit()
}
dat.shuffle.degree.author <- function(dat){
data.frame(central=log(dat$degree),
proj_scalar=sample(dat$proj_scalar_author, length(dat$proj_scalar_author), replace=F)) %>%
na.omit()
}
dat.shuffle.eigen.neighbors <- function(dat){
data.frame(central=dat$logit.eigen,
proj_scalar=sample(dat$proj_scalar_neighbors, length(dat$proj_scalar_neighbors), replace=F)) %>%
na.omit()
}
dat.shuffle.eigen.author <- function(dat){
data.frame(central=dat$logit.eigen,
proj_scalar=sample(dat$proj_scalar_author, length(dat$proj_scalar_author), replace=F)) %>%
na.omit()
}
K = 10000
shuffle.samps.degree.neighbors <- replicate(K, corr(dat.shuffle.degree.neighbors(all_proj_angles_central)))
shuffle.samps.degree.author <- replicate(K, corr(dat.shuffle.degree.author(all_proj_angles_central)))
shuffle.samps.eigen.neighbors <- replicate(K, corr(dat.shuffle.eigen.neighbors(all_proj_angles_central)))
shuffle.samps.eigen.author <- replicate(K, corr(dat.shuffle.eigen.author(all_proj_angles_central)))
trueDegreeNeighborCorr <- cor(log(all_proj_angles_central$degree), all_proj_angles_central$proj_scalar_neighbors)
trueDegreeAuthorCorr <- cor(log(all_proj_angles_central$degree), all_proj_angles_central$proj_scalar_author)
trueEigenNeighborCorr <- cor(all_proj_angles_central$logit.eigen, all_proj_angles_central$proj_scalar_neighbors)
trueEigenAuthorCorr <- cor(all_proj_angles_central$logit.eigen, all_proj_angles_central$proj_scalar_author)
data.frame(r = shuffle.samps.degree.neighbors) %>%
ggplot(aes(x=r)) +
geom_histogram() +
geom_vline(xintercept=trueNeighborCorr, colour="red") +
ggtitle("Null Shuffle Log(Degree Centrality) vs Author Influence Neighbors")
ggsave("img/null.shuffle.simple_centralVsProj_influenceNeighbors.png")
data.frame(r = shuffle.samps.degree.author) %>%
ggplot(aes(x=r)) +
geom_histogram() +
geom_vline(xintercept=trueAuthorCorr, colour="red") +
ggtitle("Null Shuffle Log(Degree Centrality) vs Neighbors Influence Author")
ggsave("img/null.shuffle.simple_centralVsProj_influenceAuthor.png")
data.frame(r = shuffle.samps.eigen.neighbors) %>%
ggplot(aes(x=r)) +
geom_histogram() +
geom_vline(xintercept=trueEigenNeighborCorr, colour="red") +
ggtitle("Null Shuffle Logit(Eigen Centrality) vs Author Influence Neighbors")
ggsave("img/null.shuffle.simple_centralVsProj_influenceNeighbors_eigen.png")
data.frame(r = shuffle.samps.eigen.author) %>%
ggplot(aes(x=r)) +
geom_histogram() +
geom_vline(xintercept=trueEigenAuthorCorr, colour="red") +
ggtitle("Null Shuffle Logit(Eigen Centrality) vs Neighbors Influence Author")
ggsave("img/null.shuffle.simple_centralVsProj_influenceAuthor_eigen.png")
|
################################################################################
# #
# Code for Plotting Distributions and including them in Latex #
# #
################################################################################
library(tikzDevice)
library(ggplot2)
# Creating the plot
setwd("C:/Users/Mark/Dropbox/Graduate School/04) GSI/Final Exam One Pager")
# Creat tikz device
tikz(file = "fdist.tex", width = 3.5, height = 2.0)
dat <- data.frame(a = 0, b = 0)
# ggplot object
ggplot(dat) +
stat_function(fun = function(x) {df(x = x, df1 = 1, df2 = 1)}, aes(color = "$F_{1,1}$"), lwd = 1.0, lty = 1, n = 400) +
# stat_function(fun = function(x) {df(x = x, df1 = 2, df2 = 1)}, aes(color = "$F_{2,1}$"), lwd = 1.5, lty = 1) +
stat_function(fun = function(x) {df(x = x, df1 = 10, df2 = 1)}, aes(color = "$F_{10,1}$"), lwd = 1.0, lty = 1) +
stat_function(fun = function(x) {df(x = x, df1 = 100, df2 = 100)}, aes(color = "$F_{100,100}$"), lwd = 1.0, lty = 1) +
scale_x_continuous(limits = c(0,2.25), expand = c(0,0)) +
scale_y_continuous(limits = c(0, 2.5), expand = c(0,0)) +
# scale_color_brewer(palette = "Set1") +
ggtitle("$F$ Distribution", subtitle = "Notice, its always skewed right") +
labs(x = "$F$", y = "Density", color = "Distribution") +
ggthemes::scale_color_gdocs() +
ggthemes::theme_tufte()
dev.off()
# Creat tikz device
tikz(file = "chidist.tex", width = 3.5, height = 2.0)
dat <- data.frame(a = 0, b = 0)
# ggplot object
ggplot(dat) +
stat_function(fun = function(x) {dchisq(x = x, df = 1)}, aes(color = "$\\chi^2_{1}$"), lwd = 1.0, lty = 1, n = 400) +
stat_function(fun = function(x) {dchisq(x = x, df = 2)}, aes(color = "$\\chi^2_{2}$"), lwd = 1.0, lty = 1, n = 400) +
stat_function(fun = function(x) {dchisq(x = x, df = 3)}, aes(color = "$\\chi^2_{3}$"), lwd = 1.0, lty = 1, n = 400) +
stat_function(fun = function(x) {dchisq(x = x, df = 4)}, aes(color = "$\\chi^2_{4}$"), lwd = 1.0, lty = 1, n = 400) +
stat_function(fun = function(x) {dchisq(x = x, df = 6)}, aes(color = "$\\chi^2_{6}$"), lwd = 1.0, lty = 1, n = 400) +
stat_function(fun = function(x) {dchisq(x = x, df = 9)}, aes(color = "$\\chi^2_{9}$"), lwd = 1.0, lty = 1, n = 400) +
scale_x_continuous(limits = c(0,8), expand = c(0,0)) +
scale_y_continuous(limits = c(0, 0.5), expand = c(0,0)) +
ggtitle("$\\chi^2$ Distribution") +
labs(x = "$\\chi^2$", y = "Density", color = "Distribution") +
ggthemes::scale_color_gdocs() +
ggthemes::theme_tufte()
dev.off()
| /resources/final_exam_study_guide/CodePlotter.R | no_license | MarkKurzeja/MarkKurzeja.github.io | R | false | false | 2,688 | r | ################################################################################
# #
# Code for Plotting Distributions and including them in Latex #
# #
################################################################################
library(tikzDevice)
library(ggplot2)
# Creating the plot
setwd("C:/Users/Mark/Dropbox/Graduate School/04) GSI/Final Exam One Pager")
# Creat tikz device
tikz(file = "fdist.tex", width = 3.5, height = 2.0)
dat <- data.frame(a = 0, b = 0)
# ggplot object
ggplot(dat) +
stat_function(fun = function(x) {df(x = x, df1 = 1, df2 = 1)}, aes(color = "$F_{1,1}$"), lwd = 1.0, lty = 1, n = 400) +
# stat_function(fun = function(x) {df(x = x, df1 = 2, df2 = 1)}, aes(color = "$F_{2,1}$"), lwd = 1.5, lty = 1) +
stat_function(fun = function(x) {df(x = x, df1 = 10, df2 = 1)}, aes(color = "$F_{10,1}$"), lwd = 1.0, lty = 1) +
stat_function(fun = function(x) {df(x = x, df1 = 100, df2 = 100)}, aes(color = "$F_{100,100}$"), lwd = 1.0, lty = 1) +
scale_x_continuous(limits = c(0,2.25), expand = c(0,0)) +
scale_y_continuous(limits = c(0, 2.5), expand = c(0,0)) +
# scale_color_brewer(palette = "Set1") +
ggtitle("$F$ Distribution", subtitle = "Notice, its always skewed right") +
labs(x = "$F$", y = "Density", color = "Distribution") +
ggthemes::scale_color_gdocs() +
ggthemes::theme_tufte()
dev.off()
# Creat tikz device
tikz(file = "chidist.tex", width = 3.5, height = 2.0)
dat <- data.frame(a = 0, b = 0)
# ggplot object
ggplot(dat) +
stat_function(fun = function(x) {dchisq(x = x, df = 1)}, aes(color = "$\\chi^2_{1}$"), lwd = 1.0, lty = 1, n = 400) +
stat_function(fun = function(x) {dchisq(x = x, df = 2)}, aes(color = "$\\chi^2_{2}$"), lwd = 1.0, lty = 1, n = 400) +
stat_function(fun = function(x) {dchisq(x = x, df = 3)}, aes(color = "$\\chi^2_{3}$"), lwd = 1.0, lty = 1, n = 400) +
stat_function(fun = function(x) {dchisq(x = x, df = 4)}, aes(color = "$\\chi^2_{4}$"), lwd = 1.0, lty = 1, n = 400) +
stat_function(fun = function(x) {dchisq(x = x, df = 6)}, aes(color = "$\\chi^2_{6}$"), lwd = 1.0, lty = 1, n = 400) +
stat_function(fun = function(x) {dchisq(x = x, df = 9)}, aes(color = "$\\chi^2_{9}$"), lwd = 1.0, lty = 1, n = 400) +
scale_x_continuous(limits = c(0,8), expand = c(0,0)) +
scale_y_continuous(limits = c(0, 0.5), expand = c(0,0)) +
ggtitle("$\\chi^2$ Distribution") +
labs(x = "$\\chi^2$", y = "Density", color = "Distribution") +
ggthemes::scale_color_gdocs() +
ggthemes::theme_tufte()
dev.off()
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(lubridate)
library(tidyverse); theme_set(theme_bw(base_size = 14))
d_pb_app = readRDS("data/d_pb_app.rds")
n_d = d_pb_app %>% group_by(match_id) %>% slice(1)
d_content_app = readRDS("data/d_content_app.rds")
party_colors = scale_color_manual(values = c("Angela Merkel" = "black", "Peer Steinbrueck" = "red", "keinen von beiden" = "orange", "CDU/CSU" = "black", "AfD" = "blue", "alle" = "black", "DIE LINKE" = "purple", "FDP" = "yellow", "GRUENE" = "green", "keine Angabe" = "darkgrey", "keine Partei" = "orange", "maennlich" = "darkblue", "noch nicht entschieden" = "orange", "PIRATEN" = "goldenrod", "SPD" = "red", "trifft nicht zu" = "darkgrey", "weiblich" = "green"))
party_fills = scale_fill_manual(values = c("Angela Merkel" = "black", "Peer Steinbrueck" = "red", "keinen von beiden" = "orange", "CDU/CSU" = "black", "AfD" = "blue", "alle" = "black", "DIE LINKE" = "purple", "FDP" = "yellow", "GRUENE" = "green", "keine Angabe" = "darkgrey", "keine Partei" = "orange", "maennlich" = "darkblue", "noch nicht entschieden" = "orange", "PIRATEN" = "goldenrod", "SPD" = "red", "trifft nicht zu" = "darkgrey", "weiblich" = "pink", "Merkel" = "black", "Merkel, Angela" = "black", "Steinbrueck" = "red", "Steinbrueck, Peer" = "red", "Moderator/in" = "turquoise", "Between-Saldo" = "black"))
shinyServer(function(input, output) {
output$ui <- renderUI({
checkboxGroupInput("groups",
"Gruppen",
choices = unique(unlist(d_pb_app[, input$group_factor])),
selected = c("Angela Merkel", "Peer Steinbrueck"))
})
visdata <- reactive({
d = d_pb_app %>%
filter_at(vars(one_of(input$group_factor)), any_vars(. %in% input$groups)) %>%
group_by_at(vars(one_of(input$group_factor))) %>%
mutate(n = length(unique(match_id))) %>% ungroup %>%
group_by_at(vars(one_of(input$aggr_time, input$group_factor), cand, valence)) %>%
summarise(SUM_rtr = mean(trans_rtr), n = mean(n)) %>%
ungroup %>%
mutate(SUM_rtr = SUM_rtr / n)
if (input$aggr_item == 2) {
d %>%
group_by_at(vars(one_of(input$aggr_time, input$group_factor), cand)) %>%
summarise(SUM_rtr = sum(SUM_rtr))
} else if (input$aggr_item == 3) {
d %>%
group_by_at(vars(one_of(input$aggr_time, input$group_factor), cand)) %>%
summarise(SUM_rtr = sum(SUM_rtr)) %>%
ungroup %>%
mutate(SUM_rtr = ifelse(cand == "Merkel", SUM_rtr, -SUM_rtr)) %>%
group_by_at(vars(one_of(input$aggr_time, input$group_factor))) %>%
summarise(SUM_rtr = sum(SUM_rtr)) %>%
mutate(cand = "Between-Saldo")
}
else {
d
}
})
n_table = reactive({
n_d %>% group_by_at(vars(one_of(input$group_factor))) %>% count()
})
output$n_print = renderTable({
n_table()
})
output$kurve <- renderPlot({
min_rtr = visdata() %>% .$SUM_rtr %>% min
p = visdata() %>%
ggplot(aes_string(input$aggr_time, "SUM_rtr"))
if (input$speaker == "Ja") {
p = p +
geom_rect(data = d_content_app,
aes(xmin = start_time, xmax = end_time, ymin = -Inf, ymax = Inf,
fill = sprecher),
inherit.aes = FALSE, alpha = .2)
}
if (input$aggr_item != 3) {
p = p + geom_bar(aes(fill = cand), stat = "identity", position = "dodge") +
facet_wrap(input$group_factor, ncol = 1)
} else {
p = p + geom_line(aes_string(color = input$group_factor))
}
p = p + coord_cartesian(xlim = input$time) +
labs(color = NULL, fill = NULL, x = NULL) +
geom_hline(yintercept = 0, linetype = 2) +
party_colors + party_fills
if (input$content != "Nein") {
p = p + geom_text(data = d_content_app, aes(start_time, min_rtr - 0.1 * abs(min_rtr), label = eval(parse(text = input$content))), angle = 90, vjust = 1, hjust = 0, inherit.aes = FALSE)
}
p
})
output$click_info <- renderPrint({
nearPoints(visdata(), input$plot1_click, addDist = FALSE, maxpoints = 3)
})
})
| /R/rtr_pb/server.R | no_license | bachl/rtr_napoko | R | false | false | 4,330 | r | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(lubridate)
library(tidyverse); theme_set(theme_bw(base_size = 14))
d_pb_app = readRDS("data/d_pb_app.rds")
n_d = d_pb_app %>% group_by(match_id) %>% slice(1)
d_content_app = readRDS("data/d_content_app.rds")
party_colors = scale_color_manual(values = c("Angela Merkel" = "black", "Peer Steinbrueck" = "red", "keinen von beiden" = "orange", "CDU/CSU" = "black", "AfD" = "blue", "alle" = "black", "DIE LINKE" = "purple", "FDP" = "yellow", "GRUENE" = "green", "keine Angabe" = "darkgrey", "keine Partei" = "orange", "maennlich" = "darkblue", "noch nicht entschieden" = "orange", "PIRATEN" = "goldenrod", "SPD" = "red", "trifft nicht zu" = "darkgrey", "weiblich" = "green"))
party_fills = scale_fill_manual(values = c("Angela Merkel" = "black", "Peer Steinbrueck" = "red", "keinen von beiden" = "orange", "CDU/CSU" = "black", "AfD" = "blue", "alle" = "black", "DIE LINKE" = "purple", "FDP" = "yellow", "GRUENE" = "green", "keine Angabe" = "darkgrey", "keine Partei" = "orange", "maennlich" = "darkblue", "noch nicht entschieden" = "orange", "PIRATEN" = "goldenrod", "SPD" = "red", "trifft nicht zu" = "darkgrey", "weiblich" = "pink", "Merkel" = "black", "Merkel, Angela" = "black", "Steinbrueck" = "red", "Steinbrueck, Peer" = "red", "Moderator/in" = "turquoise", "Between-Saldo" = "black"))
shinyServer(function(input, output) {
output$ui <- renderUI({
checkboxGroupInput("groups",
"Gruppen",
choices = unique(unlist(d_pb_app[, input$group_factor])),
selected = c("Angela Merkel", "Peer Steinbrueck"))
})
visdata <- reactive({
d = d_pb_app %>%
filter_at(vars(one_of(input$group_factor)), any_vars(. %in% input$groups)) %>%
group_by_at(vars(one_of(input$group_factor))) %>%
mutate(n = length(unique(match_id))) %>% ungroup %>%
group_by_at(vars(one_of(input$aggr_time, input$group_factor), cand, valence)) %>%
summarise(SUM_rtr = mean(trans_rtr), n = mean(n)) %>%
ungroup %>%
mutate(SUM_rtr = SUM_rtr / n)
if (input$aggr_item == 2) {
d %>%
group_by_at(vars(one_of(input$aggr_time, input$group_factor), cand)) %>%
summarise(SUM_rtr = sum(SUM_rtr))
} else if (input$aggr_item == 3) {
d %>%
group_by_at(vars(one_of(input$aggr_time, input$group_factor), cand)) %>%
summarise(SUM_rtr = sum(SUM_rtr)) %>%
ungroup %>%
mutate(SUM_rtr = ifelse(cand == "Merkel", SUM_rtr, -SUM_rtr)) %>%
group_by_at(vars(one_of(input$aggr_time, input$group_factor))) %>%
summarise(SUM_rtr = sum(SUM_rtr)) %>%
mutate(cand = "Between-Saldo")
}
else {
d
}
})
n_table = reactive({
n_d %>% group_by_at(vars(one_of(input$group_factor))) %>% count()
})
output$n_print = renderTable({
n_table()
})
output$kurve <- renderPlot({
min_rtr = visdata() %>% .$SUM_rtr %>% min
p = visdata() %>%
ggplot(aes_string(input$aggr_time, "SUM_rtr"))
if (input$speaker == "Ja") {
p = p +
geom_rect(data = d_content_app,
aes(xmin = start_time, xmax = end_time, ymin = -Inf, ymax = Inf,
fill = sprecher),
inherit.aes = FALSE, alpha = .2)
}
if (input$aggr_item != 3) {
p = p + geom_bar(aes(fill = cand), stat = "identity", position = "dodge") +
facet_wrap(input$group_factor, ncol = 1)
} else {
p = p + geom_line(aes_string(color = input$group_factor))
}
p = p + coord_cartesian(xlim = input$time) +
labs(color = NULL, fill = NULL, x = NULL) +
geom_hline(yintercept = 0, linetype = 2) +
party_colors + party_fills
if (input$content != "Nein") {
p = p + geom_text(data = d_content_app, aes(start_time, min_rtr - 0.1 * abs(min_rtr), label = eval(parse(text = input$content))), angle = 90, vjust = 1, hjust = 0, inherit.aes = FALSE)
}
p
})
output$click_info <- renderPrint({
nearPoints(visdata(), input$plot1_click, addDist = FALSE, maxpoints = 3)
})
})
|
col = "CDI"
names(A_85)[names(A_85) == col] <- 'val'
df <- merge(A_85[c('model', 'lat', 'long', 'location', 'val')], A_obs[c('location', col)], all.x=T)
df$val <- df$val - df[, col]
df_hiPosChange <- df %>% filter(val >= 0.5)
df_hiNegChange <- df %>% filter(val <= -0.1)
df_NoChange <- df %>% filter(val <= 0.08)
df_NoChange <- df_NoChange %>% filter(val >= -0.08)
df_mean <- aggregate(val ~ lat+long+location, data=df, mean) | /cold_hardiness/2021_for_paper/plot_drivers/find_HiChanges.R | permissive | HNoorazar/Ag | R | false | false | 430 | r | col = "CDI"
names(A_85)[names(A_85) == col] <- 'val'
df <- merge(A_85[c('model', 'lat', 'long', 'location', 'val')], A_obs[c('location', col)], all.x=T)
df$val <- df$val - df[, col]
df_hiPosChange <- df %>% filter(val >= 0.5)
df_hiNegChange <- df %>% filter(val <= -0.1)
df_NoChange <- df %>% filter(val <= 0.08)
df_NoChange <- df_NoChange %>% filter(val >= -0.08)
df_mean <- aggregate(val ~ lat+long+location, data=df, mean) |
\name{projection}
\alias{projection}
\title{
Compute the projection matrix onto a given set of variables
}
\description{
The function computes the projection matrix onto a set of columns of a given matrix.
}
\usage{
projection(X, active = NULL)
}
\arguments{
\item{X}{
a matrix containing the columns onto which the projection matrix is computed.
}
\item{active}{
an index set of the columns of X.
}
}
\value{
Returns the projection matrix onto the columns of "X" whose indices are included in "active". When active=NULL, a null matrix is returned.
}
\author{
Haeran Cho
}
| /man/projection.Rd | no_license | hjl2014/tilting | R | false | false | 613 | rd | \name{projection}
\alias{projection}
\title{
Compute the projection matrix onto a given set of variables
}
\description{
The function computes the projection matrix onto a set of columns of a given matrix.
}
\usage{
projection(X, active = NULL)
}
\arguments{
\item{X}{
a matrix containing the columns onto which the projection matrix is computed.
}
\item{active}{
an index set of the columns of X.
}
}
\value{
Returns the projection matrix onto the columns of "X" whose indices are included in "active". When active=NULL, a null matrix is returned.
}
\author{
Haeran Cho
}
|
library(onls)
#https://www.r-bloggers.com/introducing-orthogonal-nonlinear-least-squares-regression-in-r/
DNase1 <- subset(DNase, Run == 1)
DNase1$density <- sapply(DNase1$density, function(x) rnorm(1, x, 0.1 * x))
mod1 <- onls(density ~ Asym/(1 + exp((xmid - log(conc))/scal)),
data = DNase1, start = list(Asym = 3, xmid = 0, scal = 1))
print(mod1)
plot(mod1, xlim = c(0, 0.5), ylim = c(0, 0.5)) | /Orthogonal Nonlinear Least-Squares Regression.R | no_license | thomaspennerconsidine/FinFuncs | R | false | false | 414 | r |
library(onls)
#https://www.r-bloggers.com/introducing-orthogonal-nonlinear-least-squares-regression-in-r/
DNase1 <- subset(DNase, Run == 1)
DNase1$density <- sapply(DNase1$density, function(x) rnorm(1, x, 0.1 * x))
mod1 <- onls(density ~ Asym/(1 + exp((xmid - log(conc))/scal)),
data = DNase1, start = list(Asym = 3, xmid = 0, scal = 1))
print(mod1)
plot(mod1, xlim = c(0, 0.5), ylim = c(0, 0.5)) |
#' turn a bbox vector into a polygon
#'
#' @param bbox a length 4 numeric vector of xmin, ymin, xmax, ymax
#' @param bbox_crs the coordinate reference system the `bbox` is using
#' @param return_crs the coordinate reference system to convert the data to
#'
#' @return an sf polygon that based on coordinates of `bbox`
bbox_to_polygon <- function(bbox, bbox_crs = "+init=epsg:4326", return_crs = NULL) {
names(bbox) <- c("xmin","ymin","xmax","ymax")
class(bbox) <- "bbox"
bbox_poly <- sf::st_as_sfc(bbox)
st_crs(bbox_poly) <- st_crs(bbox_crs)
if (!is.null(return_crs)) {
sf::st_transform(bbox_poly, return_crs)
}
else {
bbox_poly
}
}
#' Create a polygon for the plotting view
#'
#'
#' @param view_config a list that contains the numeric vector `bbox`, `height`, `width` and `projection` (optional)
#'
#' @details if `projection` is missing, it is assumed to be "+init=epsg:4326"
#' @return an sf polygon that contains the `bbox` specfied at the aspect ratio specified
as_view_polygon <- function(view_config) {
# the projected polygon specified by the user
bbox_projected <- bbox_to_polygon(bbox = unlist(view_config$bbox), return_crs = view_config$projection)
poly_bbox <- as.numeric(sf::st_bbox(bbox_projected))
aspect <- view_config$width / view_config$height
bbox_aspect <- diff(poly_bbox[c(1,3)]) / diff(poly_bbox[c(2,4)])
if (bbox_aspect > aspect){ # flatter than it should be
new_y_diff <- diff(poly_bbox[c(1,3)]) / aspect
y_buffer <- new_y_diff - diff(poly_bbox[c(2,4)])
poly_bbox[2] <- poly_bbox[2] - y_buffer / 2
poly_bbox[4] <- poly_bbox[4] + y_buffer / 2
} else { # taller than it should be
# new x dimension
new_x_diff <- diff(poly_bbox[c(2,4)]) * aspect
# new x dimension - existing y dimension
x_buffer <- new_x_diff - diff(poly_bbox[c(1,3)])
# subtract half the buffer from min x, add half to max x
poly_bbox[1] <- poly_bbox[1] - x_buffer / 2
poly_bbox[3] <- poly_bbox[3] + x_buffer / 2
}
view_poly <- bbox_to_polygon(poly_bbox, bbox_crs = st_crs(bbox_projected))
return(view_poly)
}
#' create a view polygon from the configuration info and then write it to a file
#' and push it to Drive so that we can use a shared version of the polygon
#' across OSes / GDAL versions
post_view_polygon <- function(ind_file, view_config) {
data_file <- as_data_file(ind_file)
view_polygon <- as_view_polygon(view_config)
saveRDS(view_polygon, file=data_file)
gd_put(ind_file, data_file)
}
#' download and read in the view polygon
get_view_polygon <- function(view_poly_ind) {
readRDS(sc_retrieve(view_poly_ind))
}
#' fetch and merge geometries from maps and mapdata packages
#'
#' @param geoms_config a list that includes named arguments for maps::map(...) function calls
#' @param crs an option st_crs object, or it is taken from `within` argument
#' @param within a `sf` polygon to check for intersections (outside are excluded)
#'
#' @return an sf object with geometries filtered according to input arguments
fetch_geoms <- function(ind_file, geoms_config, crs = sf::st_crs(within), within = NULL){
fetch_sf_geoms <- function(...){
# get data from maps package
map_data <- sf::st_as_sf(maps::map(..., fill = TRUE, plot = FALSE))
if (!is.na(crs)) {
# crs will be NA if within is NULL
map_data <- sf::st_transform(map_data, crs)
}
if (!is.null(within)) {
# filter polygons to intersections w/ `within`
subset_idx <- sf::st_intersects(map_data, within, sparse = F)
map_data <- map_data[subset_idx, ]
}
return(map_data)
}
# call for all features specified
geoms_list <- lapply(geoms_config[[1]], function(x) do.call(fetch_sf_geoms, x))
geoms_out <- geoms_list[[1]]
# merge if there are more than one
if (length(geoms_list) > 1){
for (i in 2:length(geoms_list)){
geoms_out <- rbind(geoms_out, geoms_list[[i]])
}
}
# save and post data, write indicator file
saveRDS(geoms_out, as_data_file(ind_file))
gd_put(ind_file, ind_file)
}
| /1_fetch/src/map_utils.R | no_license | lindsayplatt/vizstorm-GIF | R | false | false | 4,036 | r | #' turn a bbox vector into a polygon
#'
#' @param bbox a length 4 numeric vector of xmin, ymin, xmax, ymax
#' @param bbox_crs the coordinate reference system the `bbox` is using
#' @param return_crs the coordinate reference system to convert the data to
#'
#' @return an sf polygon that based on coordinates of `bbox`
bbox_to_polygon <- function(bbox, bbox_crs = "+init=epsg:4326", return_crs = NULL) {
names(bbox) <- c("xmin","ymin","xmax","ymax")
class(bbox) <- "bbox"
bbox_poly <- sf::st_as_sfc(bbox)
st_crs(bbox_poly) <- st_crs(bbox_crs)
if (!is.null(return_crs)) {
sf::st_transform(bbox_poly, return_crs)
}
else {
bbox_poly
}
}
#' Create a polygon for the plotting view
#'
#'
#' @param view_config a list that contains the numeric vector `bbox`, `height`, `width` and `projection` (optional)
#'
#' @details if `projection` is missing, it is assumed to be "+init=epsg:4326"
#' @return an sf polygon that contains the `bbox` specfied at the aspect ratio specified
as_view_polygon <- function(view_config) {
# the projected polygon specified by the user
bbox_projected <- bbox_to_polygon(bbox = unlist(view_config$bbox), return_crs = view_config$projection)
poly_bbox <- as.numeric(sf::st_bbox(bbox_projected))
aspect <- view_config$width / view_config$height
bbox_aspect <- diff(poly_bbox[c(1,3)]) / diff(poly_bbox[c(2,4)])
if (bbox_aspect > aspect){ # flatter than it should be
new_y_diff <- diff(poly_bbox[c(1,3)]) / aspect
y_buffer <- new_y_diff - diff(poly_bbox[c(2,4)])
poly_bbox[2] <- poly_bbox[2] - y_buffer / 2
poly_bbox[4] <- poly_bbox[4] + y_buffer / 2
} else { # taller than it should be
# new x dimension
new_x_diff <- diff(poly_bbox[c(2,4)]) * aspect
# new x dimension - existing y dimension
x_buffer <- new_x_diff - diff(poly_bbox[c(1,3)])
# subtract half the buffer from min x, add half to max x
poly_bbox[1] <- poly_bbox[1] - x_buffer / 2
poly_bbox[3] <- poly_bbox[3] + x_buffer / 2
}
view_poly <- bbox_to_polygon(poly_bbox, bbox_crs = st_crs(bbox_projected))
return(view_poly)
}
#' create a view polygon from the configuration info and then write it to a file
#' and push it to Drive so that we can use a shared version of the polygon
#' across OSes / GDAL versions
post_view_polygon <- function(ind_file, view_config) {
data_file <- as_data_file(ind_file)
view_polygon <- as_view_polygon(view_config)
saveRDS(view_polygon, file=data_file)
gd_put(ind_file, data_file)
}
#' download and read in the view polygon
get_view_polygon <- function(view_poly_ind) {
readRDS(sc_retrieve(view_poly_ind))
}
#' fetch and merge geometries from maps and mapdata packages
#'
#' @param geoms_config a list that includes named arguments for maps::map(...) function calls
#' @param crs an option st_crs object, or it is taken from `within` argument
#' @param within a `sf` polygon to check for intersections (outside are excluded)
#'
#' @return an sf object with geometries filtered according to input arguments
fetch_geoms <- function(ind_file, geoms_config, crs = sf::st_crs(within), within = NULL){
fetch_sf_geoms <- function(...){
# get data from maps package
map_data <- sf::st_as_sf(maps::map(..., fill = TRUE, plot = FALSE))
if (!is.na(crs)) {
# crs will be NA if within is NULL
map_data <- sf::st_transform(map_data, crs)
}
if (!is.null(within)) {
# filter polygons to intersections w/ `within`
subset_idx <- sf::st_intersects(map_data, within, sparse = F)
map_data <- map_data[subset_idx, ]
}
return(map_data)
}
# call for all features specified
geoms_list <- lapply(geoms_config[[1]], function(x) do.call(fetch_sf_geoms, x))
geoms_out <- geoms_list[[1]]
# merge if there are more than one
if (length(geoms_list) > 1){
for (i in 2:length(geoms_list)){
geoms_out <- rbind(geoms_out, geoms_list[[i]])
}
}
# save and post data, write indicator file
saveRDS(geoms_out, as_data_file(ind_file))
gd_put(ind_file, ind_file)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multivariance-functions.R
\name{total.multivariance}
\alias{total.multivariance}
\title{total distance multivariance}
\usage{
total.multivariance(
x,
vec = NA,
lambda = 1,
Nscale = TRUE,
Escale = TRUE,
squared = TRUE,
...
)
}
\arguments{
\item{x}{either a data matrix or a list of doubly centered distance matrices}
\item{vec}{if x is a matrix, then this indicates which columns are treated together as one sample; if x is a list, these are the indexes for which the multivariance is calculated. The default is all columns and all indexes, respectively.}
\item{lambda}{a scaling parameter >0. Each k-tuple multivariance gets weight \code{lambda^(n-k)}.}
\item{Nscale}{if \code{TRUE} the multivariance is scaled up by the sample size (and thus it is exactly as required for the test of independence)}
\item{Escale}{if \code{TRUE} then it is scaled by the number of multivariances which are theoretically summed up (in the case of independence this yields for normalized distance matrices an estimator with expectation 1)}
\item{squared}{if \code{FALSE} it returns the actual multivariance, otherwise the squared multivariance (less computation)}
\item{...}{these are passed to \code{\link{cdms}} (which is only invoked if \code{x} is a matrix)}
}
\description{
computes the total distance multivariance
}
\details{
Total distance multivariance is per definition the scaled sum of certain distance multivariances, and it characterize dependence.
As a rough guide to interpret the value of total distance multivariance note:
\itemize{
\item Large values indicate dependence.
\item For \code{Nscale = TRUE} values close to 1 and smaller indicate independence, larger values indicate dependence. In fact, in the case of independence the test statistic is a Gaussian quadratic form with expectation 1 and samples of it can be generated by \code{\link{resample.multivariance}}.
\item For \code{Nscale = FALSE} small values (close to 0) indicate independence, larger values indicate dependence.
}
Finally note, that due to numerical (in)precision the value of total multivariance might become negative. In these cases it is set to 0. A warning is issued, if the value is negative and further than the usual (used by \code{\link[base]{all.equal}}) tolerance away from 0.
}
\examples{
x = matrix(rnorm(100*3),ncol = 3)
total.multivariance(x) #for an independent sample
# the value coincides with
(multivariance(x[,c(1,2)],Nscale = TRUE) + multivariance(x[,c(1,3)],Nscale = TRUE)+
multivariance(x[,c(2,3)],Nscale = TRUE) + multivariance(x,Nscale = TRUE))/4
total.multivariance(coins(100)) #value for a dependent sample which is 2-independent
}
\references{
For the theoretic background see the references given on the main help page of this package: \link{multivariance-package}.
}
| /fuzzedpackages/multivariance/man/total.multivariance.Rd | no_license | akhikolla/testpackages | R | false | true | 2,875 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multivariance-functions.R
\name{total.multivariance}
\alias{total.multivariance}
\title{total distance multivariance}
\usage{
total.multivariance(
x,
vec = NA,
lambda = 1,
Nscale = TRUE,
Escale = TRUE,
squared = TRUE,
...
)
}
\arguments{
\item{x}{either a data matrix or a list of doubly centered distance matrices}
\item{vec}{if x is a matrix, then this indicates which columns are treated together as one sample; if x is a list, these are the indexes for which the multivariance is calculated. The default is all columns and all indexes, respectively.}
\item{lambda}{a scaling parameter >0. Each k-tuple multivariance gets weight \code{lambda^(n-k)}.}
\item{Nscale}{if \code{TRUE} the multivariance is scaled up by the sample size (and thus it is exactly as required for the test of independence)}
\item{Escale}{if \code{TRUE} then it is scaled by the number of multivariances which are theoretically summed up (in the case of independence this yields for normalized distance matrices an estimator with expectation 1)}
\item{squared}{if \code{FALSE} it returns the actual multivariance, otherwise the squared multivariance (less computation)}
\item{...}{these are passed to \code{\link{cdms}} (which is only invoked if \code{x} is a matrix)}
}
\description{
computes the total distance multivariance
}
\details{
Total distance multivariance is per definition the scaled sum of certain distance multivariances, and it characterize dependence.
As a rough guide to interpret the value of total distance multivariance note:
\itemize{
\item Large values indicate dependence.
\item For \code{Nscale = TRUE} values close to 1 and smaller indicate independence, larger values indicate dependence. In fact, in the case of independence the test statistic is a Gaussian quadratic form with expectation 1 and samples of it can be generated by \code{\link{resample.multivariance}}.
\item For \code{Nscale = FALSE} small values (close to 0) indicate independence, larger values indicate dependence.
}
Finally note, that due to numerical (in)precision the value of total multivariance might become negative. In these cases it is set to 0. A warning is issued, if the value is negative and further than the usual (used by \code{\link[base]{all.equal}}) tolerance away from 0.
}
\examples{
x = matrix(rnorm(100*3),ncol = 3)
total.multivariance(x) #for an independent sample
# the value coincides with
(multivariance(x[,c(1,2)],Nscale = TRUE) + multivariance(x[,c(1,3)],Nscale = TRUE)+
multivariance(x[,c(2,3)],Nscale = TRUE) + multivariance(x,Nscale = TRUE))/4
total.multivariance(coins(100)) #value for a dependent sample which is 2-independent
}
\references{
For the theoretic background see the references given on the main help page of this package: \link{multivariance-package}.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_occurance_rolling.R
\name{plot_occurance_rolling}
\alias{plot_occurance_rolling}
\title{A function for the plotting of occurances in experience sampling data with lagged data}
\usage{
plot_occurance_rolling(data, title = "")
}
\arguments{
\item{data}{an object, produced with desc_occurance_rolling}
\item{title}{the title (optional)}
}
\description{
This function produces a plot of occurances of experience sampling data
}
\examples{
plot_occurance(dat1)
}
\keyword{descriptives}
\keyword{experience}
\keyword{sampling}
| /man/plot_occurance_rolling.Rd | no_license | AlexanderNBrand/ExSurv | R | false | true | 630 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_occurance_rolling.R
\name{plot_occurance_rolling}
\alias{plot_occurance_rolling}
\title{A function for the plotting of occurances in experience sampling data with lagged data}
\usage{
plot_occurance_rolling(data, title = "")
}
\arguments{
\item{data}{an object, produced with desc_occurance_rolling}
\item{title}{the title (optional)}
}
\description{
This function produces a plot of occurances of experience sampling data
}
\examples{
plot_occurance(dat1)
}
\keyword{descriptives}
\keyword{experience}
\keyword{sampling}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/ForIA.R
\name{addCustom}
\alias{addCustom}
\title{addCustom function}
\usage{
addCustom(df, column, FUN = log, name = deparse(substitute(FUN)),
end = TRUE, custom.name = NULL)
}
\arguments{
\item{data}{is a mts object}
\item{time_start}{is the starting period for trends}
\item{time_names}{is a vector of names for created trends}
}
\description{
This is a function allowing to create custom transformation for selected columns in a dataframe
}
\examples{
mtcars \%>\% addCustom(2:3, FUN = function(x) log(x), name = "lol")
transform_pmr(ts(mtcars), column = c("cyl", "disp"), method = "custom", end = TRUE, name = "r", FUN = function(x) {x * as.data.frame(ts(mtcars)[,1])},
custom.name = function(df, column, name) c(colnames(df),
sapply(column, function(x) ifelse(length(grep("n", x)) > 0, gsub("n", "r", x), paste0(x, "_r")))))
}
| /man/addCustom.Rd | no_license | Rawrqs/ForIA | R | false | false | 967 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/ForIA.R
\name{addCustom}
\alias{addCustom}
\title{addCustom function}
\usage{
addCustom(df, column, FUN = log, name = deparse(substitute(FUN)),
end = TRUE, custom.name = NULL)
}
\arguments{
\item{data}{is a mts object}
\item{time_start}{is the starting period for trends}
\item{time_names}{is a vector of names for created trends}
}
\description{
This is a function allowing to create custom transformation for selected columns in a dataframe
}
\examples{
mtcars \%>\% addCustom(2:3, FUN = function(x) log(x), name = "lol")
transform_pmr(ts(mtcars), column = c("cyl", "disp"), method = "custom", end = TRUE, name = "r", FUN = function(x) {x * as.data.frame(ts(mtcars)[,1])},
custom.name = function(df, column, name) c(colnames(df),
sapply(column, function(x) ifelse(length(grep("n", x)) > 0, gsub("n", "r", x), paste0(x, "_r")))))
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.