content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/pleura.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.04,family="gaussian",standardize=TRUE)
sink('./Model/EN/Lasso/pleura/pleura_019.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Lasso/pleura/pleura_019.R | no_license | leon1003/QSMART | R | false | false | 351 | r | library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/pleura.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.04,family="gaussian",standardize=TRUE)
sink('./Model/EN/Lasso/pleura/pleura_019.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
% Generated by roxygen2 (4.0.2.9000): do not edit by hand
% Please edit documentation in R/adapter.r
\name{fetch_adapter}
\alias{fetch_adapter}
\title{Fetch a syberia IO adapter.}
\usage{
fetch_adapter(keyword)
}
\arguments{
\item{keyword}{character. The keyword for the adapter (e.g., 'file', 's3', etc.)}
}
\value{
an \code{adapter} object (defined in this package, syberiaStages)
}
\description{
IO adapters are (reference class) objects that have a \code{read}
and \code{write} method. By wrapping things in an adapter, you do not have to
worry about whether to use, e.g., \code{read.csv} versus \code{s3read}
or \code{write.csv} versus \code{s3store}. If you are familiar with
the tundra package, think of adapters as like tundra containers for
importing and exporting data.
}
\details{
For example, we can do: \code{fetch_adapter('file')$write(iris, '/tmp/iris.csv')}
and the contents of the built-in \code{iris} data set will be stored
in the file \code{"/tmp/iris.csv"}.
}
| /man/fetch_adapter.Rd | permissive | kirillseva/syberiaStages | R | false | false | 982 | rd | % Generated by roxygen2 (4.0.2.9000): do not edit by hand
% Please edit documentation in R/adapter.r
\name{fetch_adapter}
\alias{fetch_adapter}
\title{Fetch a syberia IO adapter.}
\usage{
fetch_adapter(keyword)
}
\arguments{
\item{keyword}{character. The keyword for the adapter (e.g., 'file', 's3', etc.)}
}
\value{
an \code{adapter} object (defined in this package, syberiaStages)
}
\description{
IO adapters are (reference class) objects that have a \code{read}
and \code{write} method. By wrapping things in an adapter, you do not have to
worry about whether to use, e.g., \code{read.csv} versus \code{s3read}
or \code{write.csv} versus \code{s3store}. If you are familiar with
the tundra package, think of adapters as like tundra containers for
importing and exporting data.
}
\details{
For example, we can do: \code{fetch_adapter('file')$write(iris, '/tmp/iris.csv')}
and the contents of the built-in \code{iris} data set will be stored
in the file \code{"/tmp/iris.csv"}.
}
|
n = 100 # Sample size
N0 = 1000000 # Population size
mu = 1e-8 # number of mutations per generation per bp
theta = 0.04 # Scaled mutation rate
l = 10000 # number of base pairs
alph = 7/(4*N0)
#alph = 0
nloops = 200
xsi = matrix(0, ncol = (n-1))
for (il in 1:nloops) {
gen = 0
tree = list(list())
for (i in 1:n) {
tree[[1]][i] = list(i)
}
# Making a random tree
for (k in n:2) {
tn = n+1-k
rand2 = sample.int(k,2)
tree[[tn+1]] = list()
merge_set = unlist(union(tree[[tn]][rand2[1]], tree[[tn]][rand2[2]]))
rem_set = tree[[tn]]
rem_set = rem_set[-c(rand2[1],rand2[2])]
tree[[tn+1]][[1]] = (merge_set)
tree[[tn+1]] = c(tree[[tn+1]],(rem_set))
#print('tn+1: ')
#print(tn+1)
#print(merge_set)
#print(rem_set)
#print(tree[[tn+1]])
}
# Find the mutations
snp = matrix(0,nrow=n,ncol=1)
mut_nums = c()
for (k in n:2) {
k_ch_2 = k*(k-1)/2
Nt = exp(-alph*gen)*N0
Tk = rgeom(1, k_ch_2/(2*Nt))
#Tk = N0/(k_ch_2)
gen = gen + Tk
# Tk = N/(k_ch_2)
#print("Tk:")
#print(Tk)
p_mean = l*Tk*mu
#print('p_mean:')
#print(p_mean)
rand_vec = rpois(k,p_mean)
#print('rand_vec:')
#print(rand_vec)
mut_nums = c(mut_nums,rand_vec)
}
# mut_sum is the total number of mutations
mut_sum = sum(mut_nums)
mut_locs = sample.int(l,mut_sum)
snp = matrix(0, nrow = n, ncol = mut_sum)
icol = 1
ibranch = 1
for (k in n:2) {
tn = n+1-k
for (i in 1:k) {
if (mut_nums[ibranch] >= 1) {
for (imu in 1:mut_nums[ibranch]) {
snp[c(unlist(tree[[tn]][i])),icol] = 1
icol = icol+1
}
}
ibranch = ibranch + 1
}
# print('icol:')
# print(icol)
}
colnames(snp) = mut_locs
Num = matrix(0, ncol = dim(snp)[2])
for (i in 1:dim(snp)[2]) {
Num[i] = sum(snp[,i])
}
for (i in 1:(n-1)) {
xsi[i] = xsi[i] + length(which(Num == i))
}
}
xsi = xsi / nloops
xsi = xsi * c(1:(n-1)) / l
png("P2_alpha=7.png")
plot(1:(n-1), xsi, type="b", xlab="i", ylab="i*xsi", main='alpha=7/(4*N)')
#lines(c(1,(n-1)),c(0.04,0.04), type="l")
dev.off()
| /Homeworks/A2/R/P2.R | no_license | mldmort/CSE-280A | R | false | false | 2,189 | r |
n = 100 # Sample size
N0 = 1000000 # Population size
mu = 1e-8 # number of mutations per generation per bp
theta = 0.04 # Scaled mutation rate
l = 10000 # number of base pairs
alph = 7/(4*N0)
#alph = 0
nloops = 200
xsi = matrix(0, ncol = (n-1))
for (il in 1:nloops) {
gen = 0
tree = list(list())
for (i in 1:n) {
tree[[1]][i] = list(i)
}
# Making a random tree
for (k in n:2) {
tn = n+1-k
rand2 = sample.int(k,2)
tree[[tn+1]] = list()
merge_set = unlist(union(tree[[tn]][rand2[1]], tree[[tn]][rand2[2]]))
rem_set = tree[[tn]]
rem_set = rem_set[-c(rand2[1],rand2[2])]
tree[[tn+1]][[1]] = (merge_set)
tree[[tn+1]] = c(tree[[tn+1]],(rem_set))
#print('tn+1: ')
#print(tn+1)
#print(merge_set)
#print(rem_set)
#print(tree[[tn+1]])
}
# Find the mutations
snp = matrix(0,nrow=n,ncol=1)
mut_nums = c()
for (k in n:2) {
k_ch_2 = k*(k-1)/2
Nt = exp(-alph*gen)*N0
Tk = rgeom(1, k_ch_2/(2*Nt))
#Tk = N0/(k_ch_2)
gen = gen + Tk
# Tk = N/(k_ch_2)
#print("Tk:")
#print(Tk)
p_mean = l*Tk*mu
#print('p_mean:')
#print(p_mean)
rand_vec = rpois(k,p_mean)
#print('rand_vec:')
#print(rand_vec)
mut_nums = c(mut_nums,rand_vec)
}
# mut_sum is the total number of mutations
mut_sum = sum(mut_nums)
mut_locs = sample.int(l,mut_sum)
snp = matrix(0, nrow = n, ncol = mut_sum)
icol = 1
ibranch = 1
for (k in n:2) {
tn = n+1-k
for (i in 1:k) {
if (mut_nums[ibranch] >= 1) {
for (imu in 1:mut_nums[ibranch]) {
snp[c(unlist(tree[[tn]][i])),icol] = 1
icol = icol+1
}
}
ibranch = ibranch + 1
}
# print('icol:')
# print(icol)
}
colnames(snp) = mut_locs
Num = matrix(0, ncol = dim(snp)[2])
for (i in 1:dim(snp)[2]) {
Num[i] = sum(snp[,i])
}
for (i in 1:(n-1)) {
xsi[i] = xsi[i] + length(which(Num == i))
}
}
xsi = xsi / nloops
xsi = xsi * c(1:(n-1)) / l
png("P2_alpha=7.png")
plot(1:(n-1), xsi, type="b", xlab="i", ylab="i*xsi", main='alpha=7/(4*N)')
#lines(c(1,(n-1)),c(0.04,0.04), type="l")
dev.off()
|
foo_action <- function(x){
v <- x
v[is.na(x)] <- 0
v <- 1*(v > 0)
return(v)
}
construct_qlearning_ready_data <- function(){
# keep nb rows
n <- nrow(tmp_)
# learning dataset (tm was for transition matrix, but surely a better name would be suited)
tm <- data.frame(
'daytime_0' = tmp_$daytime_disc[-n],
'daytime_1' = tmp_$daytime_disc[-1],
'glucose_0' = tmp_$value_blood_glucose_measurement_[-n],
'glucose_1' = tmp_$value_blood_glucose_measurement_[-1]
)
tm$action_nph_0 <- foo_action(tmp_$value_nph_insulin_dose_[-n])
tm$action_reg_0 <- foo_action(tmp_$value_regular_insulin_dose_[-n])
tm$action_ult_0 <- foo_action(tmp_$value_ultralente_insulin_dose_[-n])
tm$action_0 <- apply(cbind(tm[,c('action_nph_0', 'action_reg_0', 'action_ult_0')]), 1, paste0, collapse = "")
tm$action_0 <- factor(x = tm$action_0)
### tm$action_P <- tm$action_0[]
is_ok <- (tmp_$id[-1] == tmp_$id[-n])
tm <- tm[is_ok,]
tm <- na.omit(tm)
return(tm)
}
| /construct_qlearning_ready_data.R | no_license | FredericLoge/diabetesPresc | R | false | false | 989 | r | foo_action <- function(x){
v <- x
v[is.na(x)] <- 0
v <- 1*(v > 0)
return(v)
}
construct_qlearning_ready_data <- function(){
# keep nb rows
n <- nrow(tmp_)
# learning dataset (tm was for transition matrix, but surely a better name would be suited)
tm <- data.frame(
'daytime_0' = tmp_$daytime_disc[-n],
'daytime_1' = tmp_$daytime_disc[-1],
'glucose_0' = tmp_$value_blood_glucose_measurement_[-n],
'glucose_1' = tmp_$value_blood_glucose_measurement_[-1]
)
tm$action_nph_0 <- foo_action(tmp_$value_nph_insulin_dose_[-n])
tm$action_reg_0 <- foo_action(tmp_$value_regular_insulin_dose_[-n])
tm$action_ult_0 <- foo_action(tmp_$value_ultralente_insulin_dose_[-n])
tm$action_0 <- apply(cbind(tm[,c('action_nph_0', 'action_reg_0', 'action_ult_0')]), 1, paste0, collapse = "")
tm$action_0 <- factor(x = tm$action_0)
### tm$action_P <- tm$action_0[]
is_ok <- (tmp_$id[-1] == tmp_$id[-n])
tm <- tm[is_ok,]
tm <- na.omit(tm)
return(tm)
}
|
#Abrir librerías
library(readxl)
library(ggplot2)
library(dplyr)
#
#Descargar archivo
url <- paste0("http://www.supersociedades.gov.co/asuntos-economicos-y-contables/",
"estudios-y-supervision-por-riesgos/SIREM/Documents/2015.zip")
file <- "datos.zip"
download.file(url, file)
#Descomprimir archivo
unzip(file)
setwd("2015")
source("source.R")
files <- c("BalanceGeneral2015SIREMV2.xls", "EstadoResultados2015SIREMV2.xls", "FlujoEfectivo2015SIREMV2.xls")
data <- lapply(files, read_excel, 1)
balSheet <- data[[1]]; balSheet <- as.data.frame(balSheet[balSheet$CIUDAD == "BOGOTA D.C. ", ])
profLoss <- data[[2]]; profLoss <- as.data.frame(profLoss[profLoss$CIUDAD == "BOGOTA D.C. ", ])
cashFlow <- data[[3]]; cashFlow <- as.data.frame(cashFlow[cashFlow$CIUDAD == "BOGOTA D.C. ", ])
dataCat <- balSheet$SECTOR ; dataCat <- as.data.frame(dataCat) ; names(dataCat) <- "sector"
makeGroup <- function(introData) arrange(top_n(summarise(group_by(dataCat, sector), count = n()), n = 20), desc(count))
ggplot(makeGroup(balSheet), aes(x = sector, y = count, fill = sector)) +
stat_summary(fun.y = sum, geom = "bar") +
theme_bw() +
theme(axis.text.x=element_blank(),axis.ticks.x=element_blank()) +
labs(y = "# de empresas", title = "Top 20 sectores", fill = "Sector")
dataInic <- balSheet[,c(1:6, 52, 60, 161, 233) ]
dataInic2 <- cashFlow[, c(1:6, 67)]
dataInic$pruebaAcida <- (dataInic$`TOTAL ACTIVO CORRIENTE` - dataInic$`14 SUBTOTAL INVENTARIOS (CP)`)/dataInic$`TOTAL PASIVO CORRIENTE`
total <- merge(dataInic, dataInic2, by="NIT")
finalDataset <- total[(total$pruebaAcida > 0) & (total$`TOTAL - AUMENTO (DISMINUCION) DEL EFECTIVO`) > 0, ]
empresasMalas <- total[(total$pruebaAcida < 0) | (total$`TOTAL - AUMENTO (DISMINUCION) DEL EFECTIVO`) < 0, ]
dataCat <- empresasMalas$SECTOR.x ; dataCat <- as.data.frame(dataCat) ; names(dataCat) <- "sector"
ggplot(makeGroup(dataCat), aes(x = sector, y = count, fill = sector)) +
stat_summary(fun.y = sum, geom = "bar") +
theme_bw() +
theme(axis.text.x=element_blank(),axis.ticks.x=element_blank()) +
labs(y = "# de empresas", title = "Top 20 sectores", fill = "Sector")
juzgar <- total[!is.na(total$mala), ]
grupo <- group_by(juzgar, SECTOR.x, mala)
propMalas <- summarise(grupo, count = n(), sum = sum(mala))
propMalas <- summarise(propMalas, empMalas = sum(sum), totalEmp = sum(count))
propMalas$prop <- propMalas$empMalas/propMalas$totalEmp
datosMalas <- arrange(top_n(propMalas, n = 20), desc(prop))
| /DB SuperFinanciera/analisis.R | permissive | eledero/RDatelligence | R | false | false | 2,492 | r | #Abrir librerías
library(readxl)
library(ggplot2)
library(dplyr)
#
#Descargar archivo
url <- paste0("http://www.supersociedades.gov.co/asuntos-economicos-y-contables/",
"estudios-y-supervision-por-riesgos/SIREM/Documents/2015.zip")
file <- "datos.zip"
download.file(url, file)
#Descomprimir archivo
unzip(file)
setwd("2015")
source("source.R")
files <- c("BalanceGeneral2015SIREMV2.xls", "EstadoResultados2015SIREMV2.xls", "FlujoEfectivo2015SIREMV2.xls")
data <- lapply(files, read_excel, 1)
balSheet <- data[[1]]; balSheet <- as.data.frame(balSheet[balSheet$CIUDAD == "BOGOTA D.C. ", ])
profLoss <- data[[2]]; profLoss <- as.data.frame(profLoss[profLoss$CIUDAD == "BOGOTA D.C. ", ])
cashFlow <- data[[3]]; cashFlow <- as.data.frame(cashFlow[cashFlow$CIUDAD == "BOGOTA D.C. ", ])
dataCat <- balSheet$SECTOR ; dataCat <- as.data.frame(dataCat) ; names(dataCat) <- "sector"
makeGroup <- function(introData) arrange(top_n(summarise(group_by(dataCat, sector), count = n()), n = 20), desc(count))
ggplot(makeGroup(balSheet), aes(x = sector, y = count, fill = sector)) +
stat_summary(fun.y = sum, geom = "bar") +
theme_bw() +
theme(axis.text.x=element_blank(),axis.ticks.x=element_blank()) +
labs(y = "# de empresas", title = "Top 20 sectores", fill = "Sector")
dataInic <- balSheet[,c(1:6, 52, 60, 161, 233) ]
dataInic2 <- cashFlow[, c(1:6, 67)]
dataInic$pruebaAcida <- (dataInic$`TOTAL ACTIVO CORRIENTE` - dataInic$`14 SUBTOTAL INVENTARIOS (CP)`)/dataInic$`TOTAL PASIVO CORRIENTE`
total <- merge(dataInic, dataInic2, by="NIT")
finalDataset <- total[(total$pruebaAcida > 0) & (total$`TOTAL - AUMENTO (DISMINUCION) DEL EFECTIVO`) > 0, ]
empresasMalas <- total[(total$pruebaAcida < 0) | (total$`TOTAL - AUMENTO (DISMINUCION) DEL EFECTIVO`) < 0, ]
dataCat <- empresasMalas$SECTOR.x ; dataCat <- as.data.frame(dataCat) ; names(dataCat) <- "sector"
ggplot(makeGroup(dataCat), aes(x = sector, y = count, fill = sector)) +
stat_summary(fun.y = sum, geom = "bar") +
theme_bw() +
theme(axis.text.x=element_blank(),axis.ticks.x=element_blank()) +
labs(y = "# de empresas", title = "Top 20 sectores", fill = "Sector")
juzgar <- total[!is.na(total$mala), ]
grupo <- group_by(juzgar, SECTOR.x, mala)
propMalas <- summarise(grupo, count = n(), sum = sum(mala))
propMalas <- summarise(propMalas, empMalas = sum(sum), totalEmp = sum(count))
propMalas$prop <- propMalas$empMalas/propMalas$totalEmp
datosMalas <- arrange(top_n(propMalas, n = 20), desc(prop))
|
library(forecast)
### Name: rwf
### Title: Naive and Random Walk Forecasts
### Aliases: rwf naive print.naive snaive
### Keywords: ts
### ** Examples
gold.fcast <- rwf(gold[1:60], h=50)
plot(gold.fcast)
plot(naive(gold,h=50),include=200)
plot(snaive(wineind))
| /data/genthat_extracted_code/forecast/examples/naive.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 273 | r | library(forecast)
### Name: rwf
### Title: Naive and Random Walk Forecasts
### Aliases: rwf naive print.naive snaive
### Keywords: ts
### ** Examples
gold.fcast <- rwf(gold[1:60], h=50)
plot(gold.fcast)
plot(naive(gold,h=50),include=200)
plot(snaive(wineind))
|
# Analyze correlation coeeficient score
# Written by Son Doan, Nov, 2010
# Input, table include data by the following format
# Year week no \t week no \t CDC \t GFT \ Total tweets \t Cullota_best \t Tweet-flu \t Syndromes \t Synd + Flu
# Read input data
data1 <- read.table('tmp-count')
data <- read.table('dat.txt')
# Assign to vector
CDC <- data[[1]]
# GFT <- data[[2]]
TotalTweets <- data[[9]]
# CullotaBest <- data[[7]]
# TweetFlu <- data[[3]]
# Synd <- data[[4]]
# SyndFlu <- data[[5]]
# US <- data[[8]]
# SyndFluURL <- data[[10]]
# SEM1 <- data[[11]]
# SEM1Swine <- data[[12]]
# SEM1Negation <- data[[13]]
# SyndFluClassified <- data[[14]]
# SEM1ClassifiedNegation <- data[[15]]
# SEM1Subj <- data[[16]]
# SEM1NegSub <- data[[17]]
# SEM1NegSubAsthma <- data[[18]]
new <- data1[[3]]
# CullotaBestNorm <- CullotaBest/TotalTweets
# TweetFluNorm <- TweetFlu/TotalTweets
# SyndNorm <- Synd/TotalTweets
# SyndFluNorm <- SyndFlu/TotalTweets
# USNorm <- US/TotalTweets
# SyndFluURLNorm <- SyndFluURL/TotalTweets
# SEM1Norm <- SEM1/TotalTweets
# SEM1SwineNorm <- SEM1Swine/TotalTweets
# SEM1NegationNorm <- SEM1Negation/TotalTweets
# SyndFluClassifiedNorm <- SyndFluClassified/TotalTweets
# SEM1ClassifiedNegationNorm <- SEM1ClassifiedNegation/TotalTweets
# SEM1SubjNorm <- SEM1Subj/TotalTweets
# SEM1NegSubNorm <- SEM1NegSub/TotalTweets
# SEM1NegSubAsthmaNorm <- SEM1NegSubAsthma/TotalTweets
newNorm <- new/TotalTweets
# Calculate correlation score
# print("======================")
# print("Correlation score")
# cor.test(CDC,GFT)
# cor.test(CDC,CullotaBestNorm)
# cor.test(CDC,TweetFluNorm)
# cor.test(CDC,SyndNorm)
# cor.test(CDC,SyndFluNorm)
# cor.test(CDC,USNorm)
# cor.test(CDC,SyndFluURLNorm)
# cor.test(CDC,SEM1Norm)
# cor.test(CDC,SEM1SwineNorm)
# cor.test(CDC,SEM1NegationNorm)
# cor.test(CDC,SyndFluClassifiedNorm)
# cor.test(CDC,SEM1ClassifiedNegationNorm)
# cor.test(CDC,SEM1SubjNorm)
# cor.test(CDC,SEM1NegSubNorm)
# cor.test(CDC,SEM1NegSubAsthmaNorm)
print("=============================")
print("NEW SCORE ")
cor.test(CDC,newNorm)
| /report_score.R | no_license | sondoan/RCorellation | R | false | false | 2,059 | r | # Analyze correlation coeeficient score
# Written by Son Doan, Nov, 2010
# Input, table include data by the following format
# Year week no \t week no \t CDC \t GFT \ Total tweets \t Cullota_best \t Tweet-flu \t Syndromes \t Synd + Flu
# Read input data
data1 <- read.table('tmp-count')
data <- read.table('dat.txt')
# Assign to vector
CDC <- data[[1]]
# GFT <- data[[2]]
TotalTweets <- data[[9]]
# CullotaBest <- data[[7]]
# TweetFlu <- data[[3]]
# Synd <- data[[4]]
# SyndFlu <- data[[5]]
# US <- data[[8]]
# SyndFluURL <- data[[10]]
# SEM1 <- data[[11]]
# SEM1Swine <- data[[12]]
# SEM1Negation <- data[[13]]
# SyndFluClassified <- data[[14]]
# SEM1ClassifiedNegation <- data[[15]]
# SEM1Subj <- data[[16]]
# SEM1NegSub <- data[[17]]
# SEM1NegSubAsthma <- data[[18]]
new <- data1[[3]]
# CullotaBestNorm <- CullotaBest/TotalTweets
# TweetFluNorm <- TweetFlu/TotalTweets
# SyndNorm <- Synd/TotalTweets
# SyndFluNorm <- SyndFlu/TotalTweets
# USNorm <- US/TotalTweets
# SyndFluURLNorm <- SyndFluURL/TotalTweets
# SEM1Norm <- SEM1/TotalTweets
# SEM1SwineNorm <- SEM1Swine/TotalTweets
# SEM1NegationNorm <- SEM1Negation/TotalTweets
# SyndFluClassifiedNorm <- SyndFluClassified/TotalTweets
# SEM1ClassifiedNegationNorm <- SEM1ClassifiedNegation/TotalTweets
# SEM1SubjNorm <- SEM1Subj/TotalTweets
# SEM1NegSubNorm <- SEM1NegSub/TotalTweets
# SEM1NegSubAsthmaNorm <- SEM1NegSubAsthma/TotalTweets
newNorm <- new/TotalTweets
# Calculate correlation score
# print("======================")
# print("Correlation score")
# cor.test(CDC,GFT)
# cor.test(CDC,CullotaBestNorm)
# cor.test(CDC,TweetFluNorm)
# cor.test(CDC,SyndNorm)
# cor.test(CDC,SyndFluNorm)
# cor.test(CDC,USNorm)
# cor.test(CDC,SyndFluURLNorm)
# cor.test(CDC,SEM1Norm)
# cor.test(CDC,SEM1SwineNorm)
# cor.test(CDC,SEM1NegationNorm)
# cor.test(CDC,SyndFluClassifiedNorm)
# cor.test(CDC,SEM1ClassifiedNegationNorm)
# cor.test(CDC,SEM1SubjNorm)
# cor.test(CDC,SEM1NegSubNorm)
# cor.test(CDC,SEM1NegSubAsthmaNorm)
print("=============================")
print("NEW SCORE ")
cor.test(CDC,newNorm)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Workout3.R
\name{bin_variable}
\alias{bin_variable}
\title{Binomial Variable}
\usage{
bin_variable(trials, prob)
}
\arguments{
\item{trials}{an integer with number of trials}
\item{prob}{the probability of the success occur}
}
\value{
a 'binvar' class object containing the number of trials and the probability of success
}
\description{
the function used to generate a binomial variable with class 'binvar'
}
\examples{
bin_variable(10, 0.5)
}
| /binomial/man/bin_variable.Rd | no_license | stat133-sp19/hw-stat133-H4O2 | R | false | true | 524 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Workout3.R
\name{bin_variable}
\alias{bin_variable}
\title{Binomial Variable}
\usage{
bin_variable(trials, prob)
}
\arguments{
\item{trials}{an integer with number of trials}
\item{prob}{the probability of the success occur}
}
\value{
a 'binvar' class object containing the number of trials and the probability of success
}
\description{
the function used to generate a binomial variable with class 'binvar'
}
\examples{
bin_variable(10, 0.5)
}
|
textChooseDir <- function(path=".", pattern="[^~]$", ..., history=TRUE, verbose=FALSE) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'path':
path <- Arguments$getReadablePath(path, mustExist=TRUE);
# Argument 'pattern':
pattern <- Arguments$getRegularExpression(pattern);
# Argument 'history':
history <- Arguments$getLogical(history);
# Argument 'verbose':
verbose <- Arguments$getVerbose(verbose);
if (verbose) {
pushState(verbose);
on.exit(popState(verbose));
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Use previously used directory given this path?
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
pathHistoryList <- getOption("textChooseDir::pathHistoryList");
if (!is.list(pathHistoryList)) {
pathHistoryList <- list();
}
key <- getAbsolutePath(path);
pathHistory <- c();
if (history) {
verbose && enter(verbose, "Looking for previous menu selections");
verbose && cat(verbose, "Lookup key (based on inital path): ", key);
verbose && cat(verbose, "pathHistoryList:");
verbose && str(verbose, pathHistoryList);
verbose && cat(verbose, "Keys:");
verbose && print(verbose, names(pathHistoryList));
if (is.element(key, names(pathHistoryList))) {
pathHistory <- pathHistoryList[[key]];
# Use the first path on the history stack
nbrOfRecords <- length(pathHistory);
if (nbrOfRecords > 0) {
verbose && enter(verbose, "Found a record of ", nbrOfRecords, " menu selections");
verbose && print(verbose, pathHistory);
path <- pathHistory[nbrOfRecords];
verbose && cat(verbose, "Using the latest: ", path);
verbose && exit(verbose);
}
}
verbose && exit(verbose);
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
repeat {
n <- length(pathHistory);
if (n == 0 || (pathHistory[n] != path)) {
pathHistory <- c(pathHistory, path);
}
path <- Arguments$getReadablePath(path);
# List all files
paths <- list.files(pattern=pattern, path=path, full.names=TRUE);
# Expand links
paths <- sapply(paths, FUN=function(path) {
filePath(path, expandLinks="any");
});
# Keep only directories
paths <- paths[sapply(paths, FUN=isDirectory)];
if (length(paths) > 0) {
# Cleanup options
options <- gsub(".*/", "", paths);
options <- gsub(".(lnk|LNK)$", "", options);
# Append slash to directories
options <- paste(options, "/", sep="");
names(options) <- seq_along(options);
} else {
options <- NULL;
}
options <- c(options, "ENTER"="<choose>");
if (length(pathHistory) > 1)
options <- c(options, "-"="<back>");
options <- c(options, "q"="<quit>");
ruler <- paste(rep("*", getOption("width")-10), collapse="");
title <- sprintf("Current directory: %s", path);
title <- sprintf("\n%s\n%s", ruler, title);
ans <- textMenu(options, title=title);
choice <- options[ans];
if (choice == "<choose>") {
break;
} else if (choice == "<quit>") {
path <- NULL;
break;
} else if (choice == "<back>") {
path <- pathHistory[length(pathHistory)-1];
pathHistory <- pathHistory[seq_len(length(pathHistory)-2)];
} else {
path <- paths[ans];
}
} # repeat
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Remember stack of paths
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
pathHistoryList[[key]] <- pathHistory;
options("textChooseDir::pathHistoryList"=pathHistoryList);
if (!isDirectory(path))
path <- NULL;
path;
} # textChooseDir()
############################################################################
# HISTORY:
# 2009-02-21
# o Created.
############################################################################
| /R/textChooseDir.R | no_license | HenrikBengtsson/R.menu | R | false | false | 4,147 | r | textChooseDir <- function(path=".", pattern="[^~]$", ..., history=TRUE, verbose=FALSE) {
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Validate arguments
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Argument 'path':
path <- Arguments$getReadablePath(path, mustExist=TRUE);
# Argument 'pattern':
pattern <- Arguments$getRegularExpression(pattern);
# Argument 'history':
history <- Arguments$getLogical(history);
# Argument 'verbose':
verbose <- Arguments$getVerbose(verbose);
if (verbose) {
pushState(verbose);
on.exit(popState(verbose));
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Use previously used directory given this path?
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
pathHistoryList <- getOption("textChooseDir::pathHistoryList");
if (!is.list(pathHistoryList)) {
pathHistoryList <- list();
}
key <- getAbsolutePath(path);
pathHistory <- c();
if (history) {
verbose && enter(verbose, "Looking for previous menu selections");
verbose && cat(verbose, "Lookup key (based on inital path): ", key);
verbose && cat(verbose, "pathHistoryList:");
verbose && str(verbose, pathHistoryList);
verbose && cat(verbose, "Keys:");
verbose && print(verbose, names(pathHistoryList));
if (is.element(key, names(pathHistoryList))) {
pathHistory <- pathHistoryList[[key]];
# Use the first path on the history stack
nbrOfRecords <- length(pathHistory);
if (nbrOfRecords > 0) {
verbose && enter(verbose, "Found a record of ", nbrOfRecords, " menu selections");
verbose && print(verbose, pathHistory);
path <- pathHistory[nbrOfRecords];
verbose && cat(verbose, "Using the latest: ", path);
verbose && exit(verbose);
}
}
verbose && exit(verbose);
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
repeat {
n <- length(pathHistory);
if (n == 0 || (pathHistory[n] != path)) {
pathHistory <- c(pathHistory, path);
}
path <- Arguments$getReadablePath(path);
# List all files
paths <- list.files(pattern=pattern, path=path, full.names=TRUE);
# Expand links
paths <- sapply(paths, FUN=function(path) {
filePath(path, expandLinks="any");
});
# Keep only directories
paths <- paths[sapply(paths, FUN=isDirectory)];
if (length(paths) > 0) {
# Cleanup options
options <- gsub(".*/", "", paths);
options <- gsub(".(lnk|LNK)$", "", options);
# Append slash to directories
options <- paste(options, "/", sep="");
names(options) <- seq_along(options);
} else {
options <- NULL;
}
options <- c(options, "ENTER"="<choose>");
if (length(pathHistory) > 1)
options <- c(options, "-"="<back>");
options <- c(options, "q"="<quit>");
ruler <- paste(rep("*", getOption("width")-10), collapse="");
title <- sprintf("Current directory: %s", path);
title <- sprintf("\n%s\n%s", ruler, title);
ans <- textMenu(options, title=title);
choice <- options[ans];
if (choice == "<choose>") {
break;
} else if (choice == "<quit>") {
path <- NULL;
break;
} else if (choice == "<back>") {
path <- pathHistory[length(pathHistory)-1];
pathHistory <- pathHistory[seq_len(length(pathHistory)-2)];
} else {
path <- paths[ans];
}
} # repeat
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Remember stack of paths
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
pathHistoryList[[key]] <- pathHistory;
options("textChooseDir::pathHistoryList"=pathHistoryList);
if (!isDirectory(path))
path <- NULL;
path;
} # textChooseDir()
############################################################################
# HISTORY:
# 2009-02-21
# o Created.
############################################################################
|
#' Plot 3D brain parcellations
#'
#' \code{ggseg3d} plots and returns a plotly mesh3d object.
#' @author Athanasia Mowinckel and Didac Pineiro
#'
#' @param .data A data.frame to use for plot aesthetics. Must include a
#' column called "region" corresponding to regions.
#' @param atlas Either a string with the name of a 3d atlas to use.
#' @param hemisphere String. Hemisphere to plot. Either "left" or "right"[default],
#' can also be "subcort".
#' @param surface String. Which surface to plot. Either "pial","white", or "inflated"[default]
#' @param label String. Quoted name of column in atlas/data that should be used to name traces
#' @param text String. Quoated name of column in atlas/data that should be added as extra
#' information in the hover text.
#' @param colour String. Quoted name of column from which colour should be supplied
#' @param palette String. Vector of colour names or HEX colours. Can also be a named
#' numeric vector, with colours as names, and breakpoint for that colour as the value
#' @param na.colour String. Either name, hex of RGB for colour of NA in colour.
#' @param na.alpha Numeric. A number between 0 and 1 to control transparency of NA-regions.
#' @param show.legend Logical. Toggle legend if colour is numeric.
#' @param options.legend list of layout changes to colourbar
#'
#' \strong{Available surfaces:}
#' \itemize{
#' \item `inflated:` Fully inflated surface
#' \item `semi-inflated:` Semi-inflated surface
#' \item `white:` white matter surface
#' }
#'
#' @return a plotly object
#'
#' @importFrom dplyr filter full_join select distinct summarise
#' @importFrom plotly plot_ly add_trace layout
#' @importFrom scales colour_ramp brewer_pal rescale gradient_n_pal
#' @importFrom tidyr unite_
#'
#' @examples
#' ggseg3d()
#' ggseg3d(surface="white")
#' ggseg3d(surface="inflated")
#' ggseg3d(show.legend = FALSE)
#'
#' @seealso \code{\link[plotly]{plot_ly}}, \code{\link[plotly]{add_trace}}, \code{\link[plotly]{layout}}, the plotly package
#'
#' @export
ggseg3d <- function(.data=NULL, atlas="dk_3d",
surface = "LCBC", hemisphere = c("right","subcort"),
label = "region", text = NULL, colour = "colour",
palette = NULL, na.colour = "darkgrey", na.alpha = 1,
show.legend = TRUE, options.legend = NULL) {
# Grab the atlas, even if it has been provided as character string
atlas3d = get_atlas(atlas,
surface = surface,
hemisphere = hemisphere)
# If data has been supplied, merge it
if(!is.null(.data)){
atlas3d <- data_merge(.data, atlas3d)
}
pal.colours <- get_palette(palette)
# If colour column is numeric, calculate the gradient
if(is.numeric(unlist(atlas3d[,colour]))){
if(is.null(names(palette))){
pal.colours$values <- seq(min(atlas3d[,colour], na.rm = TRUE),
max(atlas3d[,colour], na.rm = TRUE),
length.out = nrow(pal.colours))
}
atlas3d$new_col = gradient_n_pal(pal.colours$orig, pal.colours$values,"Lab")(
unlist(atlas3d[,colour]))
fill = "new_col"
}else{
fill = colour
}
# initiate plot
p = plotly::plot_ly()
# add one trace per file inputed
for(tt in 1:nrow(atlas3d)){
col = rep(unlist(atlas3d[tt, fill]), nrow(atlas3d$mesh[[tt]]$faces))
col = ifelse(is.na(col), na.colour, col)
op = ifelse(is.na(unlist(atlas3d[tt, fill])), na.alpha, 1)
txt = if(is.null(text)){
text
}else{
paste0(text, ": ", unlist(atlas3d[tt, text]))
}
p = plotly::add_trace(p,
x = atlas3d$mesh[[tt]]$vertices$x,
y = atlas3d$mesh[[tt]]$vertices$y,
z = atlas3d$mesh[[tt]]$vertices$z,
i = atlas3d$mesh[[tt]]$faces$i-1,
j = atlas3d$mesh[[tt]]$faces$j-1,
k = atlas3d$mesh[[tt]]$faces$k-1,
facecolor = col,
type = "mesh3d",
text = txt,
showscale = FALSE,
opacity = op,
name = unlist(atlas3d[tt, label])
)
}
# work around to get legend
if(show.legend & is.numeric(unlist(atlas3d[,colour]))){
dt_leg <- dplyr::mutate(pal.colours,
x = 0, y = 0, z = 0)
p <- plotly::add_trace(p, data = dt_leg,
x = ~ x, y = ~ y, z = ~ z,
intensity = ~ values,
colorscale = unname(dt_leg[,c("norm", "hex")]),
type = "mesh3d",
colorbar = options.legend
)
}
p
}
## quiets concerns of R CMD check
if(getRversion() >= "2.15.1"){
utils::globalVariables(c("tt", "surf", "mesh", "new_col"))
}
| /R/ggseg3d.R | permissive | muschellij2/ggseg3d | R | false | false | 4,903 | r | #' Plot 3D brain parcellations
#'
#' \code{ggseg3d} plots and returns a plotly mesh3d object.
#' @author Athanasia Mowinckel and Didac Pineiro
#'
#' @param .data A data.frame to use for plot aesthetics. Must include a
#' column called "region" corresponding to regions.
#' @param atlas Either a string with the name of a 3d atlas to use.
#' @param hemisphere String. Hemisphere to plot. Either "left" or "right"[default],
#' can also be "subcort".
#' @param surface String. Which surface to plot. Either "pial","white", or "inflated"[default]
#' @param label String. Quoted name of column in atlas/data that should be used to name traces
#' @param text String. Quoated name of column in atlas/data that should be added as extra
#' information in the hover text.
#' @param colour String. Quoted name of column from which colour should be supplied
#' @param palette String. Vector of colour names or HEX colours. Can also be a named
#' numeric vector, with colours as names, and breakpoint for that colour as the value
#' @param na.colour String. Either name, hex of RGB for colour of NA in colour.
#' @param na.alpha Numeric. A number between 0 and 1 to control transparency of NA-regions.
#' @param show.legend Logical. Toggle legend if colour is numeric.
#' @param options.legend list of layout changes to colourbar
#'
#' \strong{Available surfaces:}
#' \itemize{
#' \item `inflated:` Fully inflated surface
#' \item `semi-inflated:` Semi-inflated surface
#' \item `white:` white matter surface
#' }
#'
#' @return a plotly object
#'
#' @importFrom dplyr filter full_join select distinct summarise
#' @importFrom plotly plot_ly add_trace layout
#' @importFrom scales colour_ramp brewer_pal rescale gradient_n_pal
#' @importFrom tidyr unite_
#'
#' @examples
#' ggseg3d()
#' ggseg3d(surface="white")
#' ggseg3d(surface="inflated")
#' ggseg3d(show.legend = FALSE)
#'
#' @seealso \code{\link[plotly]{plot_ly}}, \code{\link[plotly]{add_trace}}, \code{\link[plotly]{layout}}, the plotly package
#'
#' @export
ggseg3d <- function(.data=NULL, atlas="dk_3d",
surface = "LCBC", hemisphere = c("right","subcort"),
label = "region", text = NULL, colour = "colour",
palette = NULL, na.colour = "darkgrey", na.alpha = 1,
show.legend = TRUE, options.legend = NULL) {
# Grab the atlas, even if it has been provided as character string
atlas3d = get_atlas(atlas,
surface = surface,
hemisphere = hemisphere)
# If data has been supplied, merge it
if(!is.null(.data)){
atlas3d <- data_merge(.data, atlas3d)
}
pal.colours <- get_palette(palette)
# If colour column is numeric, calculate the gradient
if(is.numeric(unlist(atlas3d[,colour]))){
if(is.null(names(palette))){
pal.colours$values <- seq(min(atlas3d[,colour], na.rm = TRUE),
max(atlas3d[,colour], na.rm = TRUE),
length.out = nrow(pal.colours))
}
atlas3d$new_col = gradient_n_pal(pal.colours$orig, pal.colours$values,"Lab")(
unlist(atlas3d[,colour]))
fill = "new_col"
}else{
fill = colour
}
# initiate plot
p = plotly::plot_ly()
# add one trace per file inputed
for(tt in 1:nrow(atlas3d)){
col = rep(unlist(atlas3d[tt, fill]), nrow(atlas3d$mesh[[tt]]$faces))
col = ifelse(is.na(col), na.colour, col)
op = ifelse(is.na(unlist(atlas3d[tt, fill])), na.alpha, 1)
txt = if(is.null(text)){
text
}else{
paste0(text, ": ", unlist(atlas3d[tt, text]))
}
p = plotly::add_trace(p,
x = atlas3d$mesh[[tt]]$vertices$x,
y = atlas3d$mesh[[tt]]$vertices$y,
z = atlas3d$mesh[[tt]]$vertices$z,
i = atlas3d$mesh[[tt]]$faces$i-1,
j = atlas3d$mesh[[tt]]$faces$j-1,
k = atlas3d$mesh[[tt]]$faces$k-1,
facecolor = col,
type = "mesh3d",
text = txt,
showscale = FALSE,
opacity = op,
name = unlist(atlas3d[tt, label])
)
}
# work around to get legend
if(show.legend & is.numeric(unlist(atlas3d[,colour]))){
dt_leg <- dplyr::mutate(pal.colours,
x = 0, y = 0, z = 0)
p <- plotly::add_trace(p, data = dt_leg,
x = ~ x, y = ~ y, z = ~ z,
intensity = ~ values,
colorscale = unname(dt_leg[,c("norm", "hex")]),
type = "mesh3d",
colorbar = options.legend
)
}
p
}
## quiets concerns of R CMD check
if(getRversion() >= "2.15.1"){
utils::globalVariables(c("tt", "surf", "mesh", "new_col"))
}
|
rm(list=ls())
setwd("~/Studies/Peeking/Lids/Analysis")
#---------------------------------------GRAPH1----------------------------#
Graph1data=read.table(file="LidsAverages.txt",header=T)
library(lme4)
library(ggplot2)
library(cowplot)
bp1 <- ggplot(Graph1data, aes(x=Condition, y=score)) +
stat_boxplot(geom ='errorbar', width = 0.2)+
geom_boxplot(width = 0.4,fill="goldenrod1")
bp1
plot1<- bp1 + scale_x_discrete(limits = rev(levels(Graph1data$Condition))) +
expand_limits(y=c(0,1)) +
#stat_sum(aes(size = factor(..n..)),colour="maroon4",geom="point") +
#scale_size_discrete(range = c(1, 8)) +
theme_bw()+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
ggtitle("Choice performance") +
theme(plot.title = element_text(lineheight=.8, face="bold")) +
theme(axis.title.x = element_blank()) +
ylab("Mean proportion of correct choices") +
theme(legend.position="none")+
geom_hline(aes(yintercept=0.25), colour="red", linetype="dashed")
plot1
bp2 <- ggplot(Graph1data, aes(x=Condition, y=peek)) +
stat_boxplot(geom ='errorbar',width=0.2)+
geom_boxplot(width=0.4,fill="salmon")
bp2
plot2 <- bp2 + scale_x_discrete(limits = rev(levels(Graph1data$Condition))) +
expand_limits(y=c(0,1)) +
# stat_sum(aes(size = factor(..n..)), colour = "salmon", geom = "point") +
# scale_size_discrete(range = c(1, 8)) +
theme_bw()+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
ggtitle("Peeking") +
theme(plot.title = element_text(lineheight=.8, face="bold")) +
theme(axis.title.x = element_blank()) +
ylab("Mean proportion of peeking") +
theme(legend.position="none")
plot2
plot_grid(plot1, plot2, labels = "AUTO")
#------------------------------------GRAPH2-------------------------------------#
Graph2data=read.table(file="LidsPeekvNo.txt",header=T)
bp3 <- ggplot(Graph2data, aes(x=peek, y=score),na.rm=TRUE) +
stat_boxplot(geom ='errorbar', width = 0.2)+
geom_boxplot(width = 0.4,fill="seagreen3")
bp3
plot3 <- bp3 +
expand_limits(y=c(0,1)) +
theme_bw()+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
ggtitle(" ") +
theme(plot.title = element_text(lineheight=.8, face="bold")) +
ylab("Average proportion of correct cups chosen") +
xlab("Presence of peeking")
plot3
#---------------------------------------GRAPHs not in write up -------------------------#
DCdata=read.table(file="LidsPeekCond.txt",header=T)
bp4 <- ggplot(DCdata, aes(x=Configuration, y=peek)) +
geom_boxplot()
bp4
plot4 <- bp4 + scale_x_discrete(limits = rev(levels(DCdata$Condition))) +
expand_limits(y=c(0,1)) +
stat_sum(aes(size = factor(..n..)), colour = "purple", geom = "point") +
scale_size_discrete(range = c(1, 16)) +
theme_bw()+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
ggtitle("Any peeking - opaque condition") +
theme(plot.title = element_text(lineheight=.8, face="bold")) +
ylab("Mean proportion of peeking") +
xlab("Cup configuration") +
theme(legend.position="none")
plot4
bp5 <- ggplot(DCdata, aes(x=Configuration, y=peekAbove)) +
geom_boxplot()
bp5
plot5 <- bp5 + scale_x_discrete(limits = rev(levels(DCdata$Condition))) +
expand_limits(y=c(0,1)) +
stat_sum(aes(size = factor(..n..)), colour = "purple", geom = "point") +
scale_size_discrete(range = c(1, 16)) +
theme_bw()+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
ggtitle("Peeking above - opaque condition") +
theme(plot.title = element_text(lineheight=.8, face="bold")) +
ylab("Mean proportion of peeking") +
xlab("Cup Configuration") +
theme(legend.position="none")
plot5
bp6<- ggplot(DCdata, aes(x=Configuration, y=peekBelow)) +
geom_boxplot()
bp6
plot6 <- bp6 + scale_x_discrete(limits = rev(levels(DCdata$Condition))) +
expand_limits(y=c(0,1)) +
stat_sum(aes(size = factor(..n..)), colour = "purple", geom = "point") +
scale_size_discrete(range = c(1, 16)) +
theme_bw()+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
ggtitle("Peeking below - opaque condition") +
theme(plot.title = element_text(lineheight=.8, face="bold")) +
ylab("Mean proportion of peeking") +
xlab("Cup configuration") +
theme(legend.position="none")
plot6 | /from Elle/LidsPlots.R | no_license | cvoelter/info_seeking | R | false | false | 4,332 | r | rm(list=ls())
setwd("~/Studies/Peeking/Lids/Analysis")
#---------------------------------------GRAPH1----------------------------#
Graph1data=read.table(file="LidsAverages.txt",header=T)
library(lme4)
library(ggplot2)
library(cowplot)
bp1 <- ggplot(Graph1data, aes(x=Condition, y=score)) +
stat_boxplot(geom ='errorbar', width = 0.2)+
geom_boxplot(width = 0.4,fill="goldenrod1")
bp1
plot1<- bp1 + scale_x_discrete(limits = rev(levels(Graph1data$Condition))) +
expand_limits(y=c(0,1)) +
#stat_sum(aes(size = factor(..n..)),colour="maroon4",geom="point") +
#scale_size_discrete(range = c(1, 8)) +
theme_bw()+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
ggtitle("Choice performance") +
theme(plot.title = element_text(lineheight=.8, face="bold")) +
theme(axis.title.x = element_blank()) +
ylab("Mean proportion of correct choices") +
theme(legend.position="none")+
geom_hline(aes(yintercept=0.25), colour="red", linetype="dashed")
plot1
bp2 <- ggplot(Graph1data, aes(x=Condition, y=peek)) +
stat_boxplot(geom ='errorbar',width=0.2)+
geom_boxplot(width=0.4,fill="salmon")
bp2
plot2 <- bp2 + scale_x_discrete(limits = rev(levels(Graph1data$Condition))) +
expand_limits(y=c(0,1)) +
# stat_sum(aes(size = factor(..n..)), colour = "salmon", geom = "point") +
# scale_size_discrete(range = c(1, 8)) +
theme_bw()+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
ggtitle("Peeking") +
theme(plot.title = element_text(lineheight=.8, face="bold")) +
theme(axis.title.x = element_blank()) +
ylab("Mean proportion of peeking") +
theme(legend.position="none")
plot2
plot_grid(plot1, plot2, labels = "AUTO")
#------------------------------------GRAPH2-------------------------------------#
Graph2data=read.table(file="LidsPeekvNo.txt",header=T)
bp3 <- ggplot(Graph2data, aes(x=peek, y=score),na.rm=TRUE) +
stat_boxplot(geom ='errorbar', width = 0.2)+
geom_boxplot(width = 0.4,fill="seagreen3")
bp3
plot3 <- bp3 +
expand_limits(y=c(0,1)) +
theme_bw()+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
ggtitle(" ") +
theme(plot.title = element_text(lineheight=.8, face="bold")) +
ylab("Average proportion of correct cups chosen") +
xlab("Presence of peeking")
plot3
#---------------------------------------GRAPHs not in write up -------------------------#
DCdata=read.table(file="LidsPeekCond.txt",header=T)
bp4 <- ggplot(DCdata, aes(x=Configuration, y=peek)) +
geom_boxplot()
bp4
plot4 <- bp4 + scale_x_discrete(limits = rev(levels(DCdata$Condition))) +
expand_limits(y=c(0,1)) +
stat_sum(aes(size = factor(..n..)), colour = "purple", geom = "point") +
scale_size_discrete(range = c(1, 16)) +
theme_bw()+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
ggtitle("Any peeking - opaque condition") +
theme(plot.title = element_text(lineheight=.8, face="bold")) +
ylab("Mean proportion of peeking") +
xlab("Cup configuration") +
theme(legend.position="none")
plot4
bp5 <- ggplot(DCdata, aes(x=Configuration, y=peekAbove)) +
geom_boxplot()
bp5
plot5 <- bp5 + scale_x_discrete(limits = rev(levels(DCdata$Condition))) +
expand_limits(y=c(0,1)) +
stat_sum(aes(size = factor(..n..)), colour = "purple", geom = "point") +
scale_size_discrete(range = c(1, 16)) +
theme_bw()+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
ggtitle("Peeking above - opaque condition") +
theme(plot.title = element_text(lineheight=.8, face="bold")) +
ylab("Mean proportion of peeking") +
xlab("Cup Configuration") +
theme(legend.position="none")
plot5
bp6<- ggplot(DCdata, aes(x=Configuration, y=peekBelow)) +
geom_boxplot()
bp6
plot6 <- bp6 + scale_x_discrete(limits = rev(levels(DCdata$Condition))) +
expand_limits(y=c(0,1)) +
stat_sum(aes(size = factor(..n..)), colour = "purple", geom = "point") +
scale_size_discrete(range = c(1, 16)) +
theme_bw()+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())+
ggtitle("Peeking below - opaque condition") +
theme(plot.title = element_text(lineheight=.8, face="bold")) +
ylab("Mean proportion of peeking") +
xlab("Cup configuration") +
theme(legend.position="none")
plot6 |
#' POST a full query to the REST API for Cosmos DB.
#'
#' @param sql.what String for specifying what fields to retrieve. Typically called select condition. Defaults to *
#' @param sql.where String for specifying what filter to use on data. Typically called search condition. Defaults to empty.
#' @param sql.orderby String for specifying what field to order query by. Defaults to empty.
#' @param debug.auth Logical value for getting verbose output of auth header being constructed. Defaults to false.
#' @param debug.query Logical value for getting verbose output of HTTP response, printing all headers. Defaults to false.
#' @param content.response Logical value to determine whether to retrieve full response or just the documents
#' @param asc Logical, where true returns the data in ascending order by sql.orderby. Defaults to True.
#' @return Prints status code of HTTP POST, and returns full HTTP response or just the content
#' @keywords query cosmosdb post
#' @export
#' @examples
#' cosmosQuery(sql.what = "c.contact.eloquaId", sql.where = "c.contact.eloquaId != null")
cosmosQuery <- function(sql.what = "*", sql.where = "",sql.orderby = "",
max.items = 100, debug.auth = FALSE, debug.query = FALSE, content.response = FALSE,asc=T) {
require(digest)
require(base64enc)
require(httr)
require(jsonlite)
require(stringr)
# Use the current time to create a proper auth header
current.time <- Sys.time()
# Coerce current time to proper format
ms.date.string <- tolower(format(current.time, "%a, %d %b %Y %H:%M:%S %Z", tz = "GMT"))
# Create POST URI for posting query to collection
post.uri <- paste(envCosmosDB$uri, "/dbs/", envCosmosDB$dbName, "/colls/", envCosmosDB$collName, "/docs", sep = "")
# Create the resource link and type from the environment variables
res.link <- paste("dbs/", envCosmosDB$dbName, "/colls/", envCosmosDB$collName, sep = "")
res.type <- "docs"
# Create full query with function
full.query <- constructQuery(sql.what, sql.where, sql.orderby,asc)
# Convert full query to JSON for HTTP POST
json.query <- toJSON(list(query = full.query, parameters = list()))
# First set of brackets break the operation; remove them
json.query <- str_replace(json.query, fixed("["), "")
json.query <- str_replace(json.query, fixed("]"), "")
# Generate auth header using specifications
auth.header <- genHeader(verb = "POST", resource.type = res.type, resource.link = res.link, stored.time = ms.date.string, debug = debug.auth)
raw.response <- POST(post.uri, add_headers(.headers = c("Authorization" = auth.header, "x-ms-version" = "2017-02-22", "x-ms-date" = ms.date.string, "Content-Type" = "application/query+json", "x-ms-documentdb-isquery" = "true", "x-ms-documentdb-query-enablecrosspartition" = "true", "x-ms-max-item-count" = max.items)), body = json.query)
# Send the status code of the POST to the console
print(paste("Status Code is", raw.response$status_code, sep = " "))
# Debug flag for viewing headers upon troubleshooting
if (debug.query == TRUE) {
print("*** Headers of Response ***")
print(raw.response$headers)
print(readBin(raw.response$content, "character"))
}
# Check content response flag; act accordingly
if (content.response == FALSE) {
raw.response
} else if (content.response == TRUE) {
char.response <- readContent(raw.response)
char.response$Documents
} else {
print("Invalid content response option specified. Logical value required.")
}
}
| /R/cosmosQuery.R | permissive | kgmccann/cosmosR | R | false | false | 3,616 | r | #' POST a full query to the REST API for Cosmos DB.
#'
#' @param sql.what String for specifying what fields to retrieve. Typically called select condition. Defaults to *
#' @param sql.where String for specifying what filter to use on data. Typically called search condition. Defaults to empty.
#' @param sql.orderby String for specifying what field to order query by. Defaults to empty.
#' @param debug.auth Logical value for getting verbose output of auth header being constructed. Defaults to false.
#' @param debug.query Logical value for getting verbose output of HTTP response, printing all headers. Defaults to false.
#' @param content.response Logical value to determine whether to retrieve full response or just the documents
#' @param asc Logical, where true returns the data in ascending order by sql.orderby. Defaults to True.
#' @return Prints status code of HTTP POST, and returns full HTTP response or just the content
#' @keywords query cosmosdb post
#' @export
#' @examples
#' cosmosQuery(sql.what = "c.contact.eloquaId", sql.where = "c.contact.eloquaId != null")
cosmosQuery <- function(sql.what = "*", sql.where = "",sql.orderby = "",
max.items = 100, debug.auth = FALSE, debug.query = FALSE, content.response = FALSE,asc=T) {
require(digest)
require(base64enc)
require(httr)
require(jsonlite)
require(stringr)
# Use the current time to create a proper auth header
current.time <- Sys.time()
# Coerce current time to proper format
ms.date.string <- tolower(format(current.time, "%a, %d %b %Y %H:%M:%S %Z", tz = "GMT"))
# Create POST URI for posting query to collection
post.uri <- paste(envCosmosDB$uri, "/dbs/", envCosmosDB$dbName, "/colls/", envCosmosDB$collName, "/docs", sep = "")
# Create the resource link and type from the environment variables
res.link <- paste("dbs/", envCosmosDB$dbName, "/colls/", envCosmosDB$collName, sep = "")
res.type <- "docs"
# Create full query with function
full.query <- constructQuery(sql.what, sql.where, sql.orderby,asc)
# Convert full query to JSON for HTTP POST
json.query <- toJSON(list(query = full.query, parameters = list()))
# First set of brackets break the operation; remove them
json.query <- str_replace(json.query, fixed("["), "")
json.query <- str_replace(json.query, fixed("]"), "")
# Generate auth header using specifications
auth.header <- genHeader(verb = "POST", resource.type = res.type, resource.link = res.link, stored.time = ms.date.string, debug = debug.auth)
raw.response <- POST(post.uri, add_headers(.headers = c("Authorization" = auth.header, "x-ms-version" = "2017-02-22", "x-ms-date" = ms.date.string, "Content-Type" = "application/query+json", "x-ms-documentdb-isquery" = "true", "x-ms-documentdb-query-enablecrosspartition" = "true", "x-ms-max-item-count" = max.items)), body = json.query)
# Send the status code of the POST to the console
print(paste("Status Code is", raw.response$status_code, sep = " "))
# Debug flag for viewing headers upon troubleshooting
if (debug.query == TRUE) {
print("*** Headers of Response ***")
print(raw.response$headers)
print(readBin(raw.response$content, "character"))
}
# Check content response flag; act accordingly
if (content.response == FALSE) {
raw.response
} else if (content.response == TRUE) {
char.response <- readContent(raw.response)
char.response$Documents
} else {
print("Invalid content response option specified. Logical value required.")
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generated_client.R
\name{apps_get}
\alias{apps_get}
\title{List details of a Decision Application}
\usage{
apps_get(slug)
}
\arguments{
\item{slug}{string required. The slug for the application.}
}
\value{
A list containing the following elements:
\item{slug}{string, The slug for the application.}
\item{id}{integer, The unique id of the application.}
\item{instanceName}{string, A word that describes an instance of this app.}
\item{name}{string, The name of the application.}
\item{currentRelease}{object, A list containing the following elements:
\itemize{
\item id integer, The unique id of the release.
\item appId integer, The id of the app the release belongs to.
\item reportTemplateId integer, ID of the report template for this release.
\item resources object, A hash of resources associated with this release.
}}
\item{features}{object, App features.}
}
\description{
List details of a Decision Application
}
| /man/apps_get.Rd | no_license | JosiahParry/civis-r | R | false | true | 1,000 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generated_client.R
\name{apps_get}
\alias{apps_get}
\title{List details of a Decision Application}
\usage{
apps_get(slug)
}
\arguments{
\item{slug}{string required. The slug for the application.}
}
\value{
A list containing the following elements:
\item{slug}{string, The slug for the application.}
\item{id}{integer, The unique id of the application.}
\item{instanceName}{string, A word that describes an instance of this app.}
\item{name}{string, The name of the application.}
\item{currentRelease}{object, A list containing the following elements:
\itemize{
\item id integer, The unique id of the release.
\item appId integer, The id of the app the release belongs to.
\item reportTemplateId integer, ID of the report template for this release.
\item resources object, A hash of resources associated with this release.
}}
\item{features}{object, App features.}
}
\description{
List details of a Decision Application
}
|
# setwd()
rm(list = ls())
library(dplyr)
library(XML)
api_key = ""
options(scipen = 99) # 지수표기법 대신 숫자 그대로 표시
# 1. ISBN아동_2019년_9-11월_아리랑어린이도서관 인기대출도서 10개 데이터 로드
## API 호출해 응답받은 xml 문서 파싱해 저장(도서명, ISBN, ISBN부가기호, 주제분류)
lib_code = 111468 # 아리랑어린이도서관 코드
start_d = '2019-09-01'
end_d = '2019-11-30'
fileUrl = paste0('http://data4library.kr/api/loanItemSrchByLib?authKey=', api_key,
'&libCode=', lib_code, '&startDt=', start_d,
'&endDt=', end_d, '&addCode=7&pageSize=10')
xml_doc = xmlTreeParse(fileUrl,useInternal=TRUE)
pop_books = trimws(xpathSApply( xmlRoot(xml_doc), "//bookname", xmlValue)); pop_books
pop_ISBN = trimws(xpathSApply( xmlRoot(xml_doc), "//isbn13", xmlValue)); pop_ISBN
pop_addNum = trimws(xpathSApply( xmlRoot(xml_doc), "//addition_symbol", xmlValue)); pop_addNum
pop_classNum = trimws(xpathSApply( xmlRoot(xml_doc), "//class_no", xmlValue)); pop_classNum
any(is.na(c(pop_books, pop_ISBN, pop_addNum, pop_classNum))) # 정상이면 False
# ## 기타 파싱 연습
# rootNode <- xmlRoot(xml_doc) # 루트노드 따로 저장
# xmlName(rootNode) # 루트노드의 이름 확인
# names(rootNode) # 루트노드의 하위노드 확인
# docs_node = rootNode[[3]] # doc들을 가지고 있는 docs 노드 따로 저장
# doc1_node = docs_node[[1]] # docs 노드의 첫 번째 doc 노드 따로 저장
# length(names(doc1_node)) # doc 노드의 하위노드 개수 확인
# doc1_node[[11]] # doc 노드의 11번째 값이 대출건수 값
# 2. 2019년_11월_아리랑어린이도서관 장서/대출 데이터 로드 & 전처리 및 확인
## load columns : 도서명, ISBN, 부가기호, 주제분류, 도서권수, 대출건수
## 도서권수 0인 도서(독본) 제외, 아동도서(부가기호 7)만 추출, 대출건수 0만 추출
lib_df = read.csv('아리랑어린이도서관 장서 대출목록 (2019년 11월).csv',
stringsAsFactors=F )[c(2, 6, 8, 10, 11, 12)]
nrow(lib_df)
lib_df = lib_df %>% filter(도서권수 != 0) # 도서권수 0 인 도서(독본) 제외
lib_df = lib_df %>%
filter(!is.na(부가기호) & 부가기호==7) %>%
select(-부가기호) # 아동도서(부가기호 7)만 추출
lib_df = lib_df %>%
filter(대출건수==0) %>%
select(-대출건수) # 누적 대출건수 0만 추출
nrow(lib_df)
str(lib_df)
head(lib_df,2)
table(is.na(lib_df))
summary(lib_df)
table(lib_df$주제분류번호)
table(lib_df$주제분류번호)[table(lib_df$주제분류번호) > 20] %>% sort() # 해당하는 도서 많은 주제만 추출
## 813 소설, 990 전기, 375 유아 및 초등 교육, 811 시
table(lib_df$도서권수)
lib_df[ lib_df$도서권수>=5 , ] %>% arrange(도서권수) # 도서권수 이상치 확인 - 추가 취재 필요?
# 3. 아리랑어린이도서관 장서 전처리 df에서, 10대 인기 도서와 주제분류 겹치는 도서만 추출
matched_books_df = lib_df[ lib_df$주제분류번호 %in% pop_classNum, ]
nrow(matched_books_df) # 999권
table(pop_classNum)
table(matched_books_df$주제분류번호)
## idea - 겹치는 도서 수가 적정한 911에 대해 핵심 키워드 기반 유사도 분석??
## issue - 겹치는 도서 수가 너무 많은 813.6, 813.8 은 도서 목록을 어떻게 축소??
## issue - 겹치는 도서가 너무 적거나 없는 001, 813.7, 833.6 은 도서 목록을 어떻게 생성??
# 4. sampling - 주제분류 911 도서 대신 추천하기 위해 비교할 도서 목록 생성
## + 전체 df에서 제목 키워드 포함한 도서 추가
pop_books[pop_classNum==911]
pop_ISBN[pop_classNum==911]
## 주제분류 같은(911) 도서 추출
matched_books_df[matched_books_df$주제분류번호==911, ]
## 제목 일부 겹치는 도서 추출
titl_simil_df = lib_df[grepl('why', lib_df$도서명, fixed = TRUE) |
grepl('신화', lib_df$도서명, fixed = TRUE) |
grepl('전설', lib_df$도서명, fixed = TRUE), ]
titl_simil_df
## 유사성 비교할 최종 목록 생성
compare_books = c(pop_books[pop_classNum==911],
matched_books_df[matched_books_df$주제분류번호==911, ]$도서명,
titl_simil_df$도서명); compare_books
compare_ISBN = c(pop_ISBN[pop_classNum==911],
matched_books_df[matched_books_df$주제분류번호==911, ]$ISBN,
titl_simil_df$ISBN); compare_ISBN
length(compare_books) == length(compare_ISBN) # 정상이면 TRUE
## Error/issue - API 호출결과 키워드 0개인 도서 빈번
# 5. 각 도서별 키워드 및 상세정보 저장한 이중 리스트 생성 (API 사용)
res_list = list()
list_idx = 1
for (idx in 1:length(compare_books)) {
tmp_ISBN = compare_ISBN[idx]
tmp_bookname = compare_books[idx]
tmp_url = paste0('http://data4library.kr/api/keywordList?authKey=', api_key,'&isbn13=', tmp_ISBN)
xml_doc = xmlTreeParse(tmp_url, useInternal=TRUE)
Sys.sleep(1)
tmp_keywords = trimws(xpathSApply( xmlRoot(xml_doc), "//word", xmlValue))
tmp_weights = as.numeric(trimws(xpathSApply( xmlRoot(xml_doc), "//weight", xmlValue)))
if (length(tmp_keywords)==0) {
cat(tmp_bookname, '- 본 도서는 키워드가 제공되지 않아 수집하지 않습니다.\n')
next()
}
tmp_res_list = list(bookname = tmp_bookname, ISBN = tmp_ISBN,
keywords_df = data.frame(keyword = tmp_keywords, weight = tmp_weights, stringsAsFactors = F))
res_list[[list_idx]] = tmp_res_list
list_idx = list_idx+1
# check
cat(tmp_bookname, '-',
nrow(tmp_res_list$keywords_df), '개 키워드 수집 완료',
any(is.na(tmp_res_list)),
any(is.na(tmp_res_list$keywords_df)), '\n') # 정상이면 F, F
# if (idx%%50==0) {
# print('======= 50번째 완료 =======')
# }
# check
}
length(res_list)
View(res_list)
# 6. 키워드 가중치를 근거로, 도서쌍의 내적값을 구해, 키워드 유사도 순으로 우선순위 결정
bookname_vec = c()
ISBN_vec = c()
inner_product_vec = c()
for (tmp_list in res_list) {
bookname_vec = c(bookname_vec, tmp_list$bookname)
ISBN_vec = c(ISBN_vec, tmp_list$ISBN)
tmp_df = inner_join( res_list[[1]]$keywords_df,
tmp_list$keywords_df,
by='keyword') %>%
mutate(multiple = weight.x * weight.y)
inner_product_vec = c(inner_product_vec, sum(tmp_df$multiple))
}
res_df = data.frame(bookname = bookname_vec,
ISBN = ISBN_vec,
inner_product = inner_product_vec,
stringsAsFactors = F) %>%
arrange(desc(inner_product))
res_df
| /191225_2019년11월 아리랑어린이도서관_데이터 수집,전처리,샘플결과도출/191225_handlingAPI_handlingList_DA(Textmining)_similarBooks.R | no_license | wesley-94/R_project_multicampus | R | false | false | 6,835 | r | # setwd()
rm(list = ls())
library(dplyr)
library(XML)
api_key = ""
options(scipen = 99) # 지수표기법 대신 숫자 그대로 표시
# 1. ISBN아동_2019년_9-11월_아리랑어린이도서관 인기대출도서 10개 데이터 로드
## API 호출해 응답받은 xml 문서 파싱해 저장(도서명, ISBN, ISBN부가기호, 주제분류)
lib_code = 111468 # 아리랑어린이도서관 코드
start_d = '2019-09-01'
end_d = '2019-11-30'
fileUrl = paste0('http://data4library.kr/api/loanItemSrchByLib?authKey=', api_key,
'&libCode=', lib_code, '&startDt=', start_d,
'&endDt=', end_d, '&addCode=7&pageSize=10')
xml_doc = xmlTreeParse(fileUrl,useInternal=TRUE)
pop_books = trimws(xpathSApply( xmlRoot(xml_doc), "//bookname", xmlValue)); pop_books
pop_ISBN = trimws(xpathSApply( xmlRoot(xml_doc), "//isbn13", xmlValue)); pop_ISBN
pop_addNum = trimws(xpathSApply( xmlRoot(xml_doc), "//addition_symbol", xmlValue)); pop_addNum
pop_classNum = trimws(xpathSApply( xmlRoot(xml_doc), "//class_no", xmlValue)); pop_classNum
any(is.na(c(pop_books, pop_ISBN, pop_addNum, pop_classNum))) # 정상이면 False
# ## 기타 파싱 연습
# rootNode <- xmlRoot(xml_doc) # 루트노드 따로 저장
# xmlName(rootNode) # 루트노드의 이름 확인
# names(rootNode) # 루트노드의 하위노드 확인
# docs_node = rootNode[[3]] # doc들을 가지고 있는 docs 노드 따로 저장
# doc1_node = docs_node[[1]] # docs 노드의 첫 번째 doc 노드 따로 저장
# length(names(doc1_node)) # doc 노드의 하위노드 개수 확인
# doc1_node[[11]] # doc 노드의 11번째 값이 대출건수 값
# 2. 2019년_11월_아리랑어린이도서관 장서/대출 데이터 로드 & 전처리 및 확인
## load columns : 도서명, ISBN, 부가기호, 주제분류, 도서권수, 대출건수
## 도서권수 0인 도서(독본) 제외, 아동도서(부가기호 7)만 추출, 대출건수 0만 추출
lib_df = read.csv('아리랑어린이도서관 장서 대출목록 (2019년 11월).csv',
stringsAsFactors=F )[c(2, 6, 8, 10, 11, 12)]
nrow(lib_df)
lib_df = lib_df %>% filter(도서권수 != 0) # 도서권수 0 인 도서(독본) 제외
lib_df = lib_df %>%
filter(!is.na(부가기호) & 부가기호==7) %>%
select(-부가기호) # 아동도서(부가기호 7)만 추출
lib_df = lib_df %>%
filter(대출건수==0) %>%
select(-대출건수) # 누적 대출건수 0만 추출
nrow(lib_df)
str(lib_df)
head(lib_df,2)
table(is.na(lib_df))
summary(lib_df)
table(lib_df$주제분류번호)
table(lib_df$주제분류번호)[table(lib_df$주제분류번호) > 20] %>% sort() # 해당하는 도서 많은 주제만 추출
## 813 소설, 990 전기, 375 유아 및 초등 교육, 811 시
table(lib_df$도서권수)
lib_df[ lib_df$도서권수>=5 , ] %>% arrange(도서권수) # 도서권수 이상치 확인 - 추가 취재 필요?
# 3. 아리랑어린이도서관 장서 전처리 df에서, 10대 인기 도서와 주제분류 겹치는 도서만 추출
matched_books_df = lib_df[ lib_df$주제분류번호 %in% pop_classNum, ]
nrow(matched_books_df) # 999권
table(pop_classNum)
table(matched_books_df$주제분류번호)
## idea - 겹치는 도서 수가 적정한 911에 대해 핵심 키워드 기반 유사도 분석??
## issue - 겹치는 도서 수가 너무 많은 813.6, 813.8 은 도서 목록을 어떻게 축소??
## issue - 겹치는 도서가 너무 적거나 없는 001, 813.7, 833.6 은 도서 목록을 어떻게 생성??
# 4. sampling - 주제분류 911 도서 대신 추천하기 위해 비교할 도서 목록 생성
## + 전체 df에서 제목 키워드 포함한 도서 추가
pop_books[pop_classNum==911]
pop_ISBN[pop_classNum==911]
## 주제분류 같은(911) 도서 추출
matched_books_df[matched_books_df$주제분류번호==911, ]
## 제목 일부 겹치는 도서 추출
titl_simil_df = lib_df[grepl('why', lib_df$도서명, fixed = TRUE) |
grepl('신화', lib_df$도서명, fixed = TRUE) |
grepl('전설', lib_df$도서명, fixed = TRUE), ]
titl_simil_df
## 유사성 비교할 최종 목록 생성
compare_books = c(pop_books[pop_classNum==911],
matched_books_df[matched_books_df$주제분류번호==911, ]$도서명,
titl_simil_df$도서명); compare_books
compare_ISBN = c(pop_ISBN[pop_classNum==911],
matched_books_df[matched_books_df$주제분류번호==911, ]$ISBN,
titl_simil_df$ISBN); compare_ISBN
length(compare_books) == length(compare_ISBN) # 정상이면 TRUE
## Error/issue - API 호출결과 키워드 0개인 도서 빈번
# 5. 각 도서별 키워드 및 상세정보 저장한 이중 리스트 생성 (API 사용)
res_list = list()
list_idx = 1
for (idx in 1:length(compare_books)) {
tmp_ISBN = compare_ISBN[idx]
tmp_bookname = compare_books[idx]
tmp_url = paste0('http://data4library.kr/api/keywordList?authKey=', api_key,'&isbn13=', tmp_ISBN)
xml_doc = xmlTreeParse(tmp_url, useInternal=TRUE)
Sys.sleep(1)
tmp_keywords = trimws(xpathSApply( xmlRoot(xml_doc), "//word", xmlValue))
tmp_weights = as.numeric(trimws(xpathSApply( xmlRoot(xml_doc), "//weight", xmlValue)))
if (length(tmp_keywords)==0) {
cat(tmp_bookname, '- 본 도서는 키워드가 제공되지 않아 수집하지 않습니다.\n')
next()
}
tmp_res_list = list(bookname = tmp_bookname, ISBN = tmp_ISBN,
keywords_df = data.frame(keyword = tmp_keywords, weight = tmp_weights, stringsAsFactors = F))
res_list[[list_idx]] = tmp_res_list
list_idx = list_idx+1
# check
cat(tmp_bookname, '-',
nrow(tmp_res_list$keywords_df), '개 키워드 수집 완료',
any(is.na(tmp_res_list)),
any(is.na(tmp_res_list$keywords_df)), '\n') # 정상이면 F, F
# if (idx%%50==0) {
# print('======= 50번째 완료 =======')
# }
# check
}
length(res_list)
View(res_list)
# 6. 키워드 가중치를 근거로, 도서쌍의 내적값을 구해, 키워드 유사도 순으로 우선순위 결정
bookname_vec = c()
ISBN_vec = c()
inner_product_vec = c()
for (tmp_list in res_list) {
bookname_vec = c(bookname_vec, tmp_list$bookname)
ISBN_vec = c(ISBN_vec, tmp_list$ISBN)
tmp_df = inner_join( res_list[[1]]$keywords_df,
tmp_list$keywords_df,
by='keyword') %>%
mutate(multiple = weight.x * weight.y)
inner_product_vec = c(inner_product_vec, sum(tmp_df$multiple))
}
res_df = data.frame(bookname = bookname_vec,
ISBN = ISBN_vec,
inner_product = inner_product_vec,
stringsAsFactors = F) %>%
arrange(desc(inner_product))
res_df
|
# Author: Rachel Oidtman
# This script is to demonstrate the differences in revised estimates of p'_Z,c
# and p'_Z,s when you calculate them at each time point versuse the aggregated
# time series.
#=============================================================================#
# load data and libraries
#=============================================================================#
rm(list = ls())
load('../data/processed/processed_arbo_americas.RData')
load('../output/diagnostic_distributions.RData')
other_c = deng_c
other_c[, 3:ncol(deng_c)] = deng_c[, 3:ncol(deng_c)] + chik_c[, 3:ncol(chik_c)]
other_s = deng_s
other_s[, 3:ncol(deng_s)] = deng_s[, 3:ncol(deng_s)] + chik_s[, 3:ncol(chik_s)]
countries = colnames(deng_c[3:ncol(deng_c)])
#=============================================================================#
# start time series in week 39 of 2015
#=============================================================================#
other_s = other_s[which(other_s$Week == 39 & other_s$Year == 2015): nrow(other_s),]
other_c = other_c[which(other_c$Week == 39 & other_c$Year == 2015): nrow(other_c),]
zik_c = zik_c[which(zik_c$Week == 39 & zik_c$Year == 2015): nrow(zik_c),]
zik_s = zik_s[which(zik_s$Week == 39 & zik_s$Year == 2015): nrow(zik_s),]
#=============================================================================#
# calculate p' for cumulative time series
#=============================================================================#
# observed cumulative p' for every country
observed_p_prime_Zc = rep(NA, length = length(countries))
observed_p_prime_Zs = rep(NA, length = length(countries))
l = 1
for(cc in unique(countries)){
observed_p_prime_Zc[l] = (sum(zik_c[,which(colnames(zik_c) == cc)])) /
(sum((zik_c[, which(colnames(zik_c) == cc)])) + sum(other_c[,which(colnames(other_c) == cc)]))
observed_p_prime_Zs[l] = (sum(zik_s[,which(colnames(zik_s) == cc)])) /
(sum((zik_s[, which(colnames(zik_s) == cc)])) + sum(other_s[,which(colnames(other_s) == cc)]))
l = l + 1
}
# beta-binomial conjugate relationship to bring uncertainty into estimates of p'
alpha_prior = 1
beta_prior = 1
bb_conj = function(alpha_in, beta_in, num_suc, num_tri){
alpha_out = alpha_in + num_suc
beta_out = beta_in + num_tri - num_suc
return(c(beta_param1 = alpha_out, beta_param2 = beta_out))
}
# confirmed and suspected cases p'
p_prime_Zc = matrix(NA, nrow = 2, ncol = length(countries))
p_prime_Zs = matrix(NA, nrow = 2, ncol = length(countries))
l = 1
for(cc in unique(countries)){
p_prime_Zc[,l] = bb_conj(alpha_in = alpha_prior, beta_in = beta_prior,
num_suc = sum(zik_c[, which(colnames(zik_c) == cc)]),
num_tri = sum(other_c[, which(colnames(other_c) == cc)] + zik_c[, which(colnames(zik_c) == cc)]))
p_prime_Zs[,l] = bb_conj(alpha_in = alpha_prior, beta_in = beta_prior,
num_suc = sum(zik_s[, which(colnames(zik_c) == cc)]),
num_tri = sum(other_s[, which(colnames(other_c) == cc)] + zik_s[, which(colnames(zik_c) == cc)]))
l = l + 1
}
#=============================================================================#
# data structure
#=============================================================================#
# with more than 1000 samples, subsequent analyses take very long period of time. start with 10.
# number of samples for posterior draws
num_samples = 10
# draw num_sample samples from posterior of p'_Zc at each time step for each country
p_prime_Zc_samples = matrix(NA, nrow = num_samples, ncol = length(countries))
p_prime_Zs_samples = matrix(NA, nrow = num_samples, ncol = length(countries))
for(cc in 1:length(countries)){
p_prime_Zc_samples[, cc] = rbeta(num_samples, shape1 = p_prime_Zc[1,cc], shape2 = p_prime_Zc[2,cc])
p_prime_Zs_samples[, cc] = rbeta(num_samples, shape1 = p_prime_Zs[1,cc], shape2 = p_prime_Zs[2,cc])
}
# data structure to hold variable length estimates of p_Zc and p_Zs for each country and each time point
p_Zc = list()
length(p_Zc) = length(countries)
p_Zs = list()
length(p_Zs) = length(countries)
#=============================================================================#
# functions
#=============================================================================#
# se and sp constraints 1 & 2 --> 1 = se > p'; 2 = se + sp cannot equal 1
constrain_diag_1_2 = function(p_prime_in, diag_mat_in, browse = F){
if(browse) browser()
return(which((diag_mat_in[,1] > p_prime_in) & diag_mat_in[,1] + diag_mat_in[,2] != 1))
}
# se and sp constraint 3 --> numerator and denomiator of p must either both be
# positive or negative to result in a postive p value
constrain_diag_3 = function(p_prime_in, diag_mat_in, browse = F){
if(browse) browser()
if(length(diag_mat_in) == 0){
diag_mat_in = matrix(NA, ncol= 2)
}
if(length(nrow(diag_mat_in)) == 0){
diag_mat_in = matrix(diag_mat_in, ncol = 2)
}
a = sapply(1:nrow(diag_mat_in), function(ff) p_prime_in - 1 + diag_mat_in[ff,2])
b = sapply(1:nrow(diag_mat_in), function(ff) diag_mat_in[ff,1] - 1 + diag_mat_in[ff,2])
return(which(a/b <= 1 & a/b >= 0))
}
# calculate pZ function
calc_pZ = function(se_in, sp_in, p_prime_in, browse = F){
if(browse) browser()
return(
sapply(1:length(se_in), function(ff)
((p_prime_in - 1 + sp_in[ff]) / (sp_in[ff] - 1 + se_in[ff])))
)
}
#=============================================================================#
# calculating allowable se and sp and estimating p_Z
#=============================================================================#
# l = 1
for(cc in 1:length(countries)){
# determine indices of allowable sensitivity and specificity values for confirmed and suspected cases for constraints 1 and 2
diag_c_time_country_1_2 = lapply(1:num_samples, function(ff)
diag_dist_c[lapply(p_prime_Zc_samples[,cc], constrain_diag_1_2, diag_dist_c)[[ff]],])
diag_s_time_country_1_2 = lapply(1:num_samples, function(ff)
diag_dist_s[lapply(p_prime_Zs_samples[,cc], constrain_diag_1_2, diag_dist_s)[[ff]],])
# input allowable se and sp values for constraints 1 and 2 into constraint 3 to determine final allowable se and sp values
# for confirmed and suspected cases
diag_c_time_country_3 =
lapply(1:num_samples, function(kk)
matrix(diag_c_time_country_1_2[[kk]],ncol=2)[lapply(1:num_samples,
function(ff) constrain_diag_3(p_prime_Zc_samples[ff,cc], diag_c_time_country_1_2[[ff]]))[[kk]],])
diag_s_time_country_3 =
lapply(1:num_samples, function(kk)
matrix(diag_s_time_country_1_2[[kk]],ncol=2)[lapply(1:num_samples,
function(ff) constrain_diag_3(p_prime_Zs_samples[ff,cc], diag_s_time_country_1_2[[ff]]))[[kk]],])
# calculate estimates p_Zc
vec_tmp = c()
for(ii in 1:num_samples){
out_tmp = calc_pZ(se_in = matrix(diag_c_time_country_3[[ii]], ncol = 2)[,1],
sp_in = matrix(diag_c_time_country_3[[ii]], ncol = 2)[,2],
p_prime_in = p_prime_Zc_samples[ii, cc])
vec_tmp = c(vec_tmp, out_tmp)
}
p_Zc[[cc]] = unlist(vec_tmp)
# calculate estimates p_Zs
vec_tmp = c()
for(ii in 1:num_samples){
out_tmp = calc_pZ(se_in = matrix(diag_s_time_country_3[[ii]], ncol = 2)[,1],
sp_in = matrix(diag_s_time_country_3[[ii]], ncol = 2)[,2],
p_prime_in = p_prime_Zs_samples[ii, cc], browse = F)
vec_tmp = c(vec_tmp, out_tmp)
}
p_Zs[[cc]] = unlist(vec_tmp)
}
#=============================================================================#
# save output
#=============================================================================#
save(p_Zs, p_Zc, file = '../output/3b_revised_p_temporal_aggregated.RData')
| /code/3b_data_aggregation_timeseries_PCR.R | no_license | roidtman/zika_misdiagnosis | R | false | false | 7,990 | r | # Author: Rachel Oidtman
# This script is to demonstrate the differences in revised estimates of p'_Z,c
# and p'_Z,s when you calculate them at each time point versuse the aggregated
# time series.
#=============================================================================#
# load data and libraries
#=============================================================================#
rm(list = ls())
load('../data/processed/processed_arbo_americas.RData')
load('../output/diagnostic_distributions.RData')
other_c = deng_c
other_c[, 3:ncol(deng_c)] = deng_c[, 3:ncol(deng_c)] + chik_c[, 3:ncol(chik_c)]
other_s = deng_s
other_s[, 3:ncol(deng_s)] = deng_s[, 3:ncol(deng_s)] + chik_s[, 3:ncol(chik_s)]
countries = colnames(deng_c[3:ncol(deng_c)])
#=============================================================================#
# start time series in week 39 of 2015
#=============================================================================#
other_s = other_s[which(other_s$Week == 39 & other_s$Year == 2015): nrow(other_s),]
other_c = other_c[which(other_c$Week == 39 & other_c$Year == 2015): nrow(other_c),]
zik_c = zik_c[which(zik_c$Week == 39 & zik_c$Year == 2015): nrow(zik_c),]
zik_s = zik_s[which(zik_s$Week == 39 & zik_s$Year == 2015): nrow(zik_s),]
#=============================================================================#
# calculate p' for cumulative time series
#=============================================================================#
# observed cumulative p' for every country
observed_p_prime_Zc = rep(NA, length = length(countries))
observed_p_prime_Zs = rep(NA, length = length(countries))
l = 1
for(cc in unique(countries)){
observed_p_prime_Zc[l] = (sum(zik_c[,which(colnames(zik_c) == cc)])) /
(sum((zik_c[, which(colnames(zik_c) == cc)])) + sum(other_c[,which(colnames(other_c) == cc)]))
observed_p_prime_Zs[l] = (sum(zik_s[,which(colnames(zik_s) == cc)])) /
(sum((zik_s[, which(colnames(zik_s) == cc)])) + sum(other_s[,which(colnames(other_s) == cc)]))
l = l + 1
}
# beta-binomial conjugate relationship to bring uncertainty into estimates of p'
alpha_prior = 1
beta_prior = 1
bb_conj = function(alpha_in, beta_in, num_suc, num_tri){
alpha_out = alpha_in + num_suc
beta_out = beta_in + num_tri - num_suc
return(c(beta_param1 = alpha_out, beta_param2 = beta_out))
}
# confirmed and suspected cases p'
p_prime_Zc = matrix(NA, nrow = 2, ncol = length(countries))
p_prime_Zs = matrix(NA, nrow = 2, ncol = length(countries))
l = 1
for(cc in unique(countries)){
p_prime_Zc[,l] = bb_conj(alpha_in = alpha_prior, beta_in = beta_prior,
num_suc = sum(zik_c[, which(colnames(zik_c) == cc)]),
num_tri = sum(other_c[, which(colnames(other_c) == cc)] + zik_c[, which(colnames(zik_c) == cc)]))
p_prime_Zs[,l] = bb_conj(alpha_in = alpha_prior, beta_in = beta_prior,
num_suc = sum(zik_s[, which(colnames(zik_c) == cc)]),
num_tri = sum(other_s[, which(colnames(other_c) == cc)] + zik_s[, which(colnames(zik_c) == cc)]))
l = l + 1
}
#=============================================================================#
# data structure
#=============================================================================#
# with more than 1000 samples, subsequent analyses take very long period of time. start with 10.
# number of samples for posterior draws
num_samples = 10
# draw num_sample samples from posterior of p'_Zc at each time step for each country
p_prime_Zc_samples = matrix(NA, nrow = num_samples, ncol = length(countries))
p_prime_Zs_samples = matrix(NA, nrow = num_samples, ncol = length(countries))
for(cc in 1:length(countries)){
p_prime_Zc_samples[, cc] = rbeta(num_samples, shape1 = p_prime_Zc[1,cc], shape2 = p_prime_Zc[2,cc])
p_prime_Zs_samples[, cc] = rbeta(num_samples, shape1 = p_prime_Zs[1,cc], shape2 = p_prime_Zs[2,cc])
}
# data structure to hold variable length estimates of p_Zc and p_Zs for each country and each time point
p_Zc = list()
length(p_Zc) = length(countries)
p_Zs = list()
length(p_Zs) = length(countries)
#=============================================================================#
# functions
#=============================================================================#
# se and sp constraints 1 & 2 --> 1 = se > p'; 2 = se + sp cannot equal 1
constrain_diag_1_2 = function(p_prime_in, diag_mat_in, browse = F){
if(browse) browser()
return(which((diag_mat_in[,1] > p_prime_in) & diag_mat_in[,1] + diag_mat_in[,2] != 1))
}
# se and sp constraint 3 --> numerator and denomiator of p must either both be
# positive or negative to result in a postive p value
constrain_diag_3 = function(p_prime_in, diag_mat_in, browse = F){
if(browse) browser()
if(length(diag_mat_in) == 0){
diag_mat_in = matrix(NA, ncol= 2)
}
if(length(nrow(diag_mat_in)) == 0){
diag_mat_in = matrix(diag_mat_in, ncol = 2)
}
a = sapply(1:nrow(diag_mat_in), function(ff) p_prime_in - 1 + diag_mat_in[ff,2])
b = sapply(1:nrow(diag_mat_in), function(ff) diag_mat_in[ff,1] - 1 + diag_mat_in[ff,2])
return(which(a/b <= 1 & a/b >= 0))
}
# calculate pZ function
calc_pZ = function(se_in, sp_in, p_prime_in, browse = F){
if(browse) browser()
return(
sapply(1:length(se_in), function(ff)
((p_prime_in - 1 + sp_in[ff]) / (sp_in[ff] - 1 + se_in[ff])))
)
}
#=============================================================================#
# calculating allowable se and sp and estimating p_Z
#=============================================================================#
# l = 1
for(cc in 1:length(countries)){
# determine indices of allowable sensitivity and specificity values for confirmed and suspected cases for constraints 1 and 2
diag_c_time_country_1_2 = lapply(1:num_samples, function(ff)
diag_dist_c[lapply(p_prime_Zc_samples[,cc], constrain_diag_1_2, diag_dist_c)[[ff]],])
diag_s_time_country_1_2 = lapply(1:num_samples, function(ff)
diag_dist_s[lapply(p_prime_Zs_samples[,cc], constrain_diag_1_2, diag_dist_s)[[ff]],])
# input allowable se and sp values for constraints 1 and 2 into constraint 3 to determine final allowable se and sp values
# for confirmed and suspected cases
diag_c_time_country_3 =
lapply(1:num_samples, function(kk)
matrix(diag_c_time_country_1_2[[kk]],ncol=2)[lapply(1:num_samples,
function(ff) constrain_diag_3(p_prime_Zc_samples[ff,cc], diag_c_time_country_1_2[[ff]]))[[kk]],])
diag_s_time_country_3 =
lapply(1:num_samples, function(kk)
matrix(diag_s_time_country_1_2[[kk]],ncol=2)[lapply(1:num_samples,
function(ff) constrain_diag_3(p_prime_Zs_samples[ff,cc], diag_s_time_country_1_2[[ff]]))[[kk]],])
# calculate estimates p_Zc
vec_tmp = c()
for(ii in 1:num_samples){
out_tmp = calc_pZ(se_in = matrix(diag_c_time_country_3[[ii]], ncol = 2)[,1],
sp_in = matrix(diag_c_time_country_3[[ii]], ncol = 2)[,2],
p_prime_in = p_prime_Zc_samples[ii, cc])
vec_tmp = c(vec_tmp, out_tmp)
}
p_Zc[[cc]] = unlist(vec_tmp)
# calculate estimates p_Zs
vec_tmp = c()
for(ii in 1:num_samples){
out_tmp = calc_pZ(se_in = matrix(diag_s_time_country_3[[ii]], ncol = 2)[,1],
sp_in = matrix(diag_s_time_country_3[[ii]], ncol = 2)[,2],
p_prime_in = p_prime_Zs_samples[ii, cc], browse = F)
vec_tmp = c(vec_tmp, out_tmp)
}
p_Zs[[cc]] = unlist(vec_tmp)
}
#=============================================================================#
# save output
#=============================================================================#
save(p_Zs, p_Zc, file = '../output/3b_revised_p_temporal_aggregated.RData')
|
#===========================================
# Smart Metering Uncertainty Forecasting
#
# Author Estevao "Steve" Alvarenga
# efsa@bath.edu
# Created in 10/Feb/17
#-------------------------------------------
# smuf_main combined functions optim & fcst
#===========================================
#===========================================
# Initialising
#===========================================
setwd("~/GitRepos/smuf_rdev")
source("smuf_main-fxs.R")
savfile = "smuf_runf_1023_IR_seaf10-4h.rds"
wm01_00 <- readRDS("smuf_import-completeIRhour.rds")
importpar <- readRDS("smuf_import-parameter.rds")
s01 <- importpar[1]
s02 <- importpar[2]
s03 <- importpar[3]
sum_of_h <- importpar[4]
data_size <- importpar[5]
#===========================================
# Integrated Parameters
#===========================================
#cus_list to 1000, stp to 150 (detectcores), hrz_lim larger (0:167)*113), turn on CV
cus_list <- seq(1,100)
frontierstp <- 16 # Number of demand bins (Stepwise frontier for portfolio optimisation)
frontierexp <- 1 # Exponentiality of frontier steps
max.gen <- 100 # For genetic opt
waitgen <- 10 # For genetic opt
win_size <- c(4,24) # Small and large win_size (select only 2)
win_selec <- win_size[1]
cross_overh <- 4 # Cross-over forced for fx_fcst_kds_quickvector
ahead_t <- seq(1,6) # Up to s02
hrz_lim <- seq(1,10)*37 # Rolling forecasts steps {seq(0:167)*113} is comprehensive
in_sample_fr <- 1/6 # Fraction for diving in- and out-sample
crossvalsize <- 1 # Number of weeks in the end of in_sample used for crossvalidation
crossvalstps <- 16 # Steps used for multiple crossvalidation (Only KDE)
crossvalfocus <- c(1) # What period is focused when running crossvalidation
is_wins_weeks <- 12 # Number of weeks used for in-sample (KDE uses win_size) & seasonality
sampling <- 1024 # For monte-carlo CRPS calculation
armalags <- c(5,5) # Max lags for ARIMA fit in ARMA-GARCH model (use smuf_lags.R)
gof.min <- 0.05 # GoF crossover value to change ARMA-GARCH to KDS
#===========================================
# Call simulator
#===========================================
bighlpopgr <- list()
bighlpcrps <- list()
myleg = c("Random","sdev","seaf_pure","seaf+minEi")
for (h in hrz_lim){
ptm <- proc.time()
runkey <- Sys.time()
cat("\n\nStep",match(h,hrz_lim), "of",length(hrz_lim),"| Running BIG [h] LOOP with h =",h,"\n")
#===========================================
# Individual customers forecast
#===========================================
cat("[Ind] ")
wm01_01 <- wm01_00[min(cus_list):max(cus_list),]
wl06 <- fx_int_fcstgeneric_kdss(wm01_01,h,in_sample_fr,s01,s02,sum_of_h,win_selec,is_wins_weeks,crossvalsize,fcst_run,armalags,cross_overh,gof.min)
wv45 <- rowMeans(wl06[[1]])
sd01 <- as.numeric(fx_sd_mymat(wl06[[3]]))
wv46 <- seq(0,frontierstp)^frontierexp/frontierstp^frontierexp * sum(wv45)
#===========================================
# Random groups & evaluation
#===========================================
cat("[Rnd] ")
wm01_02l <- fx_rndgrp(wm01_01,frontierstp)
wm01_02 <- wm01_02l[[1]] / rowSums(wm01_02l[[2]])
wl06rnd <- fx_int_fcstgeneric_kdss(wm01_02,h,in_sample_fr,s01,s02,sum_of_h,win_selec,is_wins_weeks,crossvalsize,fcst_run,armalags,cross_overh,gof.min)
wv45rnd <- as.numeric(rowMeans(wl06rnd[[1]]) * rowSums(wm01_02l[[2]]))
# sd01rnd <- as.numeric(fx_sd_mymat(wl06rnd[[3]]))
cr01rnd <- rowMeans(wl06rnd[[2]])
#===========================================
# Optimising groups & evaluation
#===========================================
cat("[OptSDEV] ")
optgrp_sdev <- foreach (i = 1:frontierstp,
.packages=c("forecast","rgenoud"),
.combine=c("rbind")) %dopar% {
opt_min_cusd = wv46[i]
opt_max_cusd = wv46[i+1]
optgrp <- genoud(fx_optgrp_sdev, nvars=nrow(wm01_01), max.generations=max.gen, wait.generations=waitgen,
starting.values=c(rep(1,nrow(wm01_01))), Domains = cbind(c(rep(0,nrow(wm01_01))),c(rep(1,nrow(wm01_01)))),
data.type.int=TRUE, int.seed=1,
print.level=1)
if(optgrp$value == 10) {
grouped = c(rep(0,nrow(wm01_01)))
} else {
grouped = optgrp$par
}
grouped
}
bighlpopgr <- fx_sav_optgrps(c("sdev",h,frontierstp,length(cus_list),crossvalstps,armalags,runkey),optgrp_sdev)
res_sdev <- fx_applgrp(optgrp_sdev,wv46,wm01_01,fx_int_fcstgeneric_kdss,h,in_sample_fr,s01,s02,sum_of_h,win_size,is_wins_weeks,crossvalsize,armalags,cross_overh,crossvalfocus)
wm13seaf <- as.matrix(wl06[[4]])
wm14seaf <- as.matrix(wl06[[3]])
cat("[OptSEAF_pure] ")
optgrp_sfna <- foreach (i = 1:frontierstp,
.packages=c("forecast","rgenoud"),
.combine=c("rbind")) %dopar% {
opt_min_cusd = wv46[i]
opt_max_cusd = wv46[i+1]
optgrp <- genoud(fx_optgrp_seaf, nvars=nrow(wm01_01), max.generations=max.gen, wait.generations=waitgen,
starting.values=optgrp_sdev[i,], Domains = cbind(c(rep(0,nrow(wm01_01))),c(rep(1,nrow(wm01_01)))),
data.type.int=TRUE, int.seed=1,
print.level=1)
if(optgrp$value == 10) {
grouped = c(rep(0,nrow(wm01_01)))
} else {
grouped = optgrp$par
}
grouped
}
bighlpopgr <- fx_sav_optgrps(c("sfna",h,frontierstp,length(cus_list),crossvalstps,armalags,crossvalfocus,runkey),optgrp_sfna)
res_sfna <- fx_applgrp(optgrp_sfna,wv46,wm01_01,fx_int_fcstgeneric_kdss,h,in_sample_fr,s01,s02,sum_of_h,win_size,is_wins_weeks,crossvalsize,armalags,cross_overh,crossvalfocus)
cat("[OptSEAF+minEi] ")
optgrp_sfmE <- foreach (i = 1:frontierstp,
.packages=c("forecast","rgenoud"),
.combine=c("rbind")) %dopar% {
opt_min_cusd = wv46[i]
opt_max_cusd = wv46[i+1]
optgrp <- genoud(fx_optgrp_ssmix2, nvars=nrow(wm01_01), max.generations=max.gen, wait.generations=waitgen,
starting.values=optgrp_sdev[i,], Domains = cbind(c(rep(0,nrow(wm01_01))),c(rep(1,nrow(wm01_01)))),
data.type.int=TRUE, int.seed=1,
print.level=1,
defratsd=0.5)
if(optgrp$value == 10) {
grouped = c(rep(0,nrow(wm01_01)))
} else {
grouped = optgrp$par
}
grouped
}
bighlpopgr <- fx_sav_optgrps(c("sfmE",h,frontierstp,length(cus_list),crossvalstps,armalags,crossvalfocus,runkey),optgrp_sfmE)
res_sfmE <- fx_applgrp(optgrp_sfmE,wv46,wm01_01,fx_int_fcstgeneric_kdss,h,in_sample_fr,s01,s02,sum_of_h,win_size,is_wins_weeks,crossvalsize,armalags,cross_overh,crossvalfocus)
bighlpcrps <- fx_sav_optress(c("sdev+2seas",h,frontierstp,length(cus_list),crossvalstps,armalags,crossvalfocus,runkey),
list(c(h,frontierstp,length(cus_list)),cbind(cr01rnd,wv45rnd),res_sdev,res_sfna,res_sfmE))
saveRDS(list(bighlpopgr,bighlpcrps), file=savfile)
fx_plt_rnd_vs_opt(bighlpcrps[[length(bighlpcrps)]][[2]],c(0,0.1),c(0,sum(wv45)),myleg,"CRPS")
cat("\n")
print(proc.time() - ptm)
}
biglpcrpsavg = list()
for (j in 1:(length(myleg)+1)){
biglpcrpsavg[[j]]=bighlpcrps[[1]][[2]][[j]]
}
for (i in 2:length(bighlpcrps)){
for (j in 1:(length(myleg)+1)){
biglpcrpsavg[[j]] = biglpcrpsavg[[j]] + bighlpcrps[[i]][[2]][[j]]
}
}
for (j in 1:(length(myleg)+1)){
biglpcrpsavg[[j]]=biglpcrpsavg[[j]]/length(bighlpcrps)
}
fx_plt_rnd_vs_opt(biglpcrpsavg,c(0,0.1),c(0,sum(wv45)),myleg,"CRPS")
for (j in 3:(length(myleg)+1)){
cat('\n',myleg[(j-1)],' ',mean(biglpcrpsavg[[j]][,1],na.rm=T))
}
saveRDS(biglpcrpsavg, file=paste("summary_",savfile,sep=""))
| /smuf_runf_1023_IR_seaf10-4h.R | no_license | efsalvarenga/smuf_rdev | R | false | false | 9,237 | r | #===========================================
# Smart Metering Uncertainty Forecasting
#
# Author Estevao "Steve" Alvarenga
# efsa@bath.edu
# Created in 10/Feb/17
#-------------------------------------------
# smuf_main combined functions optim & fcst
#===========================================
#===========================================
# Initialising
#===========================================
setwd("~/GitRepos/smuf_rdev")
source("smuf_main-fxs.R")
savfile = "smuf_runf_1023_IR_seaf10-4h.rds"
wm01_00 <- readRDS("smuf_import-completeIRhour.rds")
importpar <- readRDS("smuf_import-parameter.rds")
s01 <- importpar[1]
s02 <- importpar[2]
s03 <- importpar[3]
sum_of_h <- importpar[4]
data_size <- importpar[5]
#===========================================
# Integrated Parameters
#===========================================
#cus_list to 1000, stp to 150 (detectcores), hrz_lim larger (0:167)*113), turn on CV
cus_list <- seq(1,100)
frontierstp <- 16 # Number of demand bins (Stepwise frontier for portfolio optimisation)
frontierexp <- 1 # Exponentiality of frontier steps
max.gen <- 100 # For genetic opt
waitgen <- 10 # For genetic opt
win_size <- c(4,24) # Small and large win_size (select only 2)
win_selec <- win_size[1]
cross_overh <- 4 # Cross-over forced for fx_fcst_kds_quickvector
ahead_t <- seq(1,6) # Up to s02
hrz_lim <- seq(1,10)*37 # Rolling forecasts steps {seq(0:167)*113} is comprehensive
in_sample_fr <- 1/6 # Fraction for diving in- and out-sample
crossvalsize <- 1 # Number of weeks in the end of in_sample used for crossvalidation
crossvalstps <- 16 # Steps used for multiple crossvalidation (Only KDE)
crossvalfocus <- c(1) # What period is focused when running crossvalidation
is_wins_weeks <- 12 # Number of weeks used for in-sample (KDE uses win_size) & seasonality
sampling <- 1024 # For monte-carlo CRPS calculation
armalags <- c(5,5) # Max lags for ARIMA fit in ARMA-GARCH model (use smuf_lags.R)
gof.min <- 0.05 # GoF crossover value to change ARMA-GARCH to KDS
#===========================================
# Call simulator
#===========================================
bighlpopgr <- list()
bighlpcrps <- list()
myleg = c("Random","sdev","seaf_pure","seaf+minEi")
for (h in hrz_lim){
ptm <- proc.time()
runkey <- Sys.time()
cat("\n\nStep",match(h,hrz_lim), "of",length(hrz_lim),"| Running BIG [h] LOOP with h =",h,"\n")
#===========================================
# Individual customers forecast
#===========================================
cat("[Ind] ")
wm01_01 <- wm01_00[min(cus_list):max(cus_list),]
wl06 <- fx_int_fcstgeneric_kdss(wm01_01,h,in_sample_fr,s01,s02,sum_of_h,win_selec,is_wins_weeks,crossvalsize,fcst_run,armalags,cross_overh,gof.min)
wv45 <- rowMeans(wl06[[1]])
sd01 <- as.numeric(fx_sd_mymat(wl06[[3]]))
wv46 <- seq(0,frontierstp)^frontierexp/frontierstp^frontierexp * sum(wv45)
#===========================================
# Random groups & evaluation
#===========================================
cat("[Rnd] ")
wm01_02l <- fx_rndgrp(wm01_01,frontierstp)
wm01_02 <- wm01_02l[[1]] / rowSums(wm01_02l[[2]])
wl06rnd <- fx_int_fcstgeneric_kdss(wm01_02,h,in_sample_fr,s01,s02,sum_of_h,win_selec,is_wins_weeks,crossvalsize,fcst_run,armalags,cross_overh,gof.min)
wv45rnd <- as.numeric(rowMeans(wl06rnd[[1]]) * rowSums(wm01_02l[[2]]))
# sd01rnd <- as.numeric(fx_sd_mymat(wl06rnd[[3]]))
cr01rnd <- rowMeans(wl06rnd[[2]])
#===========================================
# Optimising groups & evaluation
#===========================================
cat("[OptSDEV] ")
optgrp_sdev <- foreach (i = 1:frontierstp,
.packages=c("forecast","rgenoud"),
.combine=c("rbind")) %dopar% {
opt_min_cusd = wv46[i]
opt_max_cusd = wv46[i+1]
optgrp <- genoud(fx_optgrp_sdev, nvars=nrow(wm01_01), max.generations=max.gen, wait.generations=waitgen,
starting.values=c(rep(1,nrow(wm01_01))), Domains = cbind(c(rep(0,nrow(wm01_01))),c(rep(1,nrow(wm01_01)))),
data.type.int=TRUE, int.seed=1,
print.level=1)
if(optgrp$value == 10) {
grouped = c(rep(0,nrow(wm01_01)))
} else {
grouped = optgrp$par
}
grouped
}
bighlpopgr <- fx_sav_optgrps(c("sdev",h,frontierstp,length(cus_list),crossvalstps,armalags,runkey),optgrp_sdev)
res_sdev <- fx_applgrp(optgrp_sdev,wv46,wm01_01,fx_int_fcstgeneric_kdss,h,in_sample_fr,s01,s02,sum_of_h,win_size,is_wins_weeks,crossvalsize,armalags,cross_overh,crossvalfocus)
wm13seaf <- as.matrix(wl06[[4]])
wm14seaf <- as.matrix(wl06[[3]])
cat("[OptSEAF_pure] ")
optgrp_sfna <- foreach (i = 1:frontierstp,
.packages=c("forecast","rgenoud"),
.combine=c("rbind")) %dopar% {
opt_min_cusd = wv46[i]
opt_max_cusd = wv46[i+1]
optgrp <- genoud(fx_optgrp_seaf, nvars=nrow(wm01_01), max.generations=max.gen, wait.generations=waitgen,
starting.values=optgrp_sdev[i,], Domains = cbind(c(rep(0,nrow(wm01_01))),c(rep(1,nrow(wm01_01)))),
data.type.int=TRUE, int.seed=1,
print.level=1)
if(optgrp$value == 10) {
grouped = c(rep(0,nrow(wm01_01)))
} else {
grouped = optgrp$par
}
grouped
}
bighlpopgr <- fx_sav_optgrps(c("sfna",h,frontierstp,length(cus_list),crossvalstps,armalags,crossvalfocus,runkey),optgrp_sfna)
res_sfna <- fx_applgrp(optgrp_sfna,wv46,wm01_01,fx_int_fcstgeneric_kdss,h,in_sample_fr,s01,s02,sum_of_h,win_size,is_wins_weeks,crossvalsize,armalags,cross_overh,crossvalfocus)
cat("[OptSEAF+minEi] ")
optgrp_sfmE <- foreach (i = 1:frontierstp,
.packages=c("forecast","rgenoud"),
.combine=c("rbind")) %dopar% {
opt_min_cusd = wv46[i]
opt_max_cusd = wv46[i+1]
optgrp <- genoud(fx_optgrp_ssmix2, nvars=nrow(wm01_01), max.generations=max.gen, wait.generations=waitgen,
starting.values=optgrp_sdev[i,], Domains = cbind(c(rep(0,nrow(wm01_01))),c(rep(1,nrow(wm01_01)))),
data.type.int=TRUE, int.seed=1,
print.level=1,
defratsd=0.5)
if(optgrp$value == 10) {
grouped = c(rep(0,nrow(wm01_01)))
} else {
grouped = optgrp$par
}
grouped
}
bighlpopgr <- fx_sav_optgrps(c("sfmE",h,frontierstp,length(cus_list),crossvalstps,armalags,crossvalfocus,runkey),optgrp_sfmE)
res_sfmE <- fx_applgrp(optgrp_sfmE,wv46,wm01_01,fx_int_fcstgeneric_kdss,h,in_sample_fr,s01,s02,sum_of_h,win_size,is_wins_weeks,crossvalsize,armalags,cross_overh,crossvalfocus)
bighlpcrps <- fx_sav_optress(c("sdev+2seas",h,frontierstp,length(cus_list),crossvalstps,armalags,crossvalfocus,runkey),
list(c(h,frontierstp,length(cus_list)),cbind(cr01rnd,wv45rnd),res_sdev,res_sfna,res_sfmE))
saveRDS(list(bighlpopgr,bighlpcrps), file=savfile)
fx_plt_rnd_vs_opt(bighlpcrps[[length(bighlpcrps)]][[2]],c(0,0.1),c(0,sum(wv45)),myleg,"CRPS")
cat("\n")
print(proc.time() - ptm)
}
biglpcrpsavg = list()
for (j in 1:(length(myleg)+1)){
biglpcrpsavg[[j]]=bighlpcrps[[1]][[2]][[j]]
}
for (i in 2:length(bighlpcrps)){
for (j in 1:(length(myleg)+1)){
biglpcrpsavg[[j]] = biglpcrpsavg[[j]] + bighlpcrps[[i]][[2]][[j]]
}
}
for (j in 1:(length(myleg)+1)){
biglpcrpsavg[[j]]=biglpcrpsavg[[j]]/length(bighlpcrps)
}
fx_plt_rnd_vs_opt(biglpcrpsavg,c(0,0.1),c(0,sum(wv45)),myleg,"CRPS")
for (j in 3:(length(myleg)+1)){
cat('\n',myleg[(j-1)],' ',mean(biglpcrpsavg[[j]][,1],na.rm=T))
}
saveRDS(biglpcrpsavg, file=paste("summary_",savfile,sep=""))
|
#
# lines-hour-outlier.R, 16 Mar 14
#
# Data from:
# On the effectiveness of early life cycle defect prediction with Bayesian Nets
# Norman Fenton and Martin Neil and William Marsh and Peter Hearty and {\L}ukasz Radli\'{n}ski and Paul Krause
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
library(car)
brew_col=rainbow(3)
plot_layout(1, 2)
loc_hour=read.csv(paste0(ESEUR_dir, "regression/10.1.1.157.6206.csv.xz"), as.is=TRUE)
loc_hour=subset(loc_hour, !is.na(KLoC))
loc_hour=loc_hour[order(loc_hour$KLoC), ]
# cook.levels uses row.names and it is confusing because the original ones
# are maintained from before: loc_hour[order(loc_hour$KLoC), ]
row.names(loc_hour)=1:nrow(loc_hour)
x_bounds=range(loc_hour$Hours)
y_bounds=range(loc_hour$KLoC)
Hours_KLoC=function(df)
{
plot(df$Hours, df$KLoC,
xlim=x_bounds, ylim=y_bounds,
xlab="Hours", ylab="Lines of code (Kloc)")
plh_mod=glm(KLoC ~ Hours, data=df)
plh_pred=predict(plh_mod, type="response", se.fit=TRUE)
lines(df$Hours, plh_pred$fit, col=brew_col[1])
lines(df$Hours, plh_pred$fit+plh_pred$se.fit*1.96, col=brew_col[3])
lines(df$Hours, plh_pred$fit-plh_pred$se.fit*1.96, col=brew_col[3])
return(plh_mod)
}
plot_cutoff=function(df, model)
{
cutoff=4/(nrow(df)+1+1)
plot(model, which=4, cook.levels=cutoff, caption="")
abline(h=cutoff, lty=2, col="red")
}
all_mod=Hours_KLoC(loc_hour)
# plot_cutoff(loc_hour, all_mod)
s1_loc_hour=loc_hour[-c(21,25,29),]
# subset1_mod=Hours_KLoC(s1_loc_hour)
# plot_cutoff(s1_loc_hour, subset1_mod)
#
# influenceIndexPlot(all_mod)
s2_loc_hour=s1_loc_hour[-c(23,24,26),]
# subset2_mod=Hours_KLoC(s2_loc_hour)
# plot_cutoff(s2_loc_hour, subset2_mod)
s3_loc_hour=s2_loc_hour[-c(19,23),]
# subset3_mod=Hours_KLoC(s3_loc_hour)
# plot_cutoff(s3_loc_hour, subset3_mod)
s4_loc_hour=s3_loc_hour[-c(4,19,20),]
# subset4_mod=Hours_KLoC(s4_loc_hour)
# plot_cutoff(s4_loc_hour, subset4_mod)
s5_loc_hour=s4_loc_hour[-c(17),]
# subset5_mod=Hours_KLoC(s5_loc_hour)
# plot_cutoff(s5_loc_hour, subset5_mod)
s6_loc_hour=s5_loc_hour[-c(17),]
# subset6_mod=Hours_KLoC(s6_loc_hour)
# plot_cutoff(s6_loc_hour, subset6_mod)
s7_loc_hour=s6_loc_hour[-c(12,14),]
# subset7_mod=Hours_KLoC(s7_loc_hour)
# plot_cutoff(s7_loc_hour, subset7_mod)
s8_loc_hour=s7_loc_hour[-c(12),]
# subset8_mod=Hours_KLoC(s8_loc_hour)
# plot_cutoff(s8_loc_hour, subset8_mod)
s9_loc_hour=s8_loc_hour[-c(12),]
# subset9_mod=Hours_KLoC(s9_loc_hour)
# plot_cutoff(s9_loc_hour, subset9_mod)
s10_loc_hour=s9_loc_hour[-c(12),]
subset10_mod=Hours_KLoC(s10_loc_hour)
# plot_cutoff(s10_loc_hour, subset10_mod)
#
# outlierTest(all_mod)
# influencePlot(all_mod)
#
#
# library(robustbase)
#
# rlh_mod=glmrob(Hours ~ KLoC, family=gaussian, data=loc_hour)
| /regression/misc/lines-hour-outlier.R | no_license | Derek-Jones/ESEUR-code-data | R | false | false | 2,763 | r | #
# lines-hour-outlier.R, 16 Mar 14
#
# Data from:
# On the effectiveness of early life cycle defect prediction with Bayesian Nets
# Norman Fenton and Martin Neil and William Marsh and Peter Hearty and {\L}ukasz Radli\'{n}ski and Paul Krause
#
# Example from:
# Empirical Software Engineering using R
# Derek M. Jones
source("ESEUR_config.r")
library(car)
brew_col=rainbow(3)
plot_layout(1, 2)
loc_hour=read.csv(paste0(ESEUR_dir, "regression/10.1.1.157.6206.csv.xz"), as.is=TRUE)
loc_hour=subset(loc_hour, !is.na(KLoC))
loc_hour=loc_hour[order(loc_hour$KLoC), ]
# cook.levels uses row.names and it is confusing because the original ones
# are maintained from before: loc_hour[order(loc_hour$KLoC), ]
row.names(loc_hour)=1:nrow(loc_hour)
x_bounds=range(loc_hour$Hours)
y_bounds=range(loc_hour$KLoC)
Hours_KLoC=function(df)
{
plot(df$Hours, df$KLoC,
xlim=x_bounds, ylim=y_bounds,
xlab="Hours", ylab="Lines of code (Kloc)")
plh_mod=glm(KLoC ~ Hours, data=df)
plh_pred=predict(plh_mod, type="response", se.fit=TRUE)
lines(df$Hours, plh_pred$fit, col=brew_col[1])
lines(df$Hours, plh_pred$fit+plh_pred$se.fit*1.96, col=brew_col[3])
lines(df$Hours, plh_pred$fit-plh_pred$se.fit*1.96, col=brew_col[3])
return(plh_mod)
}
plot_cutoff=function(df, model)
{
cutoff=4/(nrow(df)+1+1)
plot(model, which=4, cook.levels=cutoff, caption="")
abline(h=cutoff, lty=2, col="red")
}
all_mod=Hours_KLoC(loc_hour)
# plot_cutoff(loc_hour, all_mod)
s1_loc_hour=loc_hour[-c(21,25,29),]
# subset1_mod=Hours_KLoC(s1_loc_hour)
# plot_cutoff(s1_loc_hour, subset1_mod)
#
# influenceIndexPlot(all_mod)
s2_loc_hour=s1_loc_hour[-c(23,24,26),]
# subset2_mod=Hours_KLoC(s2_loc_hour)
# plot_cutoff(s2_loc_hour, subset2_mod)
s3_loc_hour=s2_loc_hour[-c(19,23),]
# subset3_mod=Hours_KLoC(s3_loc_hour)
# plot_cutoff(s3_loc_hour, subset3_mod)
s4_loc_hour=s3_loc_hour[-c(4,19,20),]
# subset4_mod=Hours_KLoC(s4_loc_hour)
# plot_cutoff(s4_loc_hour, subset4_mod)
s5_loc_hour=s4_loc_hour[-c(17),]
# subset5_mod=Hours_KLoC(s5_loc_hour)
# plot_cutoff(s5_loc_hour, subset5_mod)
s6_loc_hour=s5_loc_hour[-c(17),]
# subset6_mod=Hours_KLoC(s6_loc_hour)
# plot_cutoff(s6_loc_hour, subset6_mod)
s7_loc_hour=s6_loc_hour[-c(12,14),]
# subset7_mod=Hours_KLoC(s7_loc_hour)
# plot_cutoff(s7_loc_hour, subset7_mod)
s8_loc_hour=s7_loc_hour[-c(12),]
# subset8_mod=Hours_KLoC(s8_loc_hour)
# plot_cutoff(s8_loc_hour, subset8_mod)
s9_loc_hour=s8_loc_hour[-c(12),]
# subset9_mod=Hours_KLoC(s9_loc_hour)
# plot_cutoff(s9_loc_hour, subset9_mod)
s10_loc_hour=s9_loc_hour[-c(12),]
subset10_mod=Hours_KLoC(s10_loc_hour)
# plot_cutoff(s10_loc_hour, subset10_mod)
#
# outlierTest(all_mod)
# influencePlot(all_mod)
#
#
# library(robustbase)
#
# rlh_mod=glmrob(Hours ~ KLoC, family=gaussian, data=loc_hour)
|
library(tidyverse)
#Information on APIs available at https://www.census.gov/data/developers/data-sets.html
#Pull in data on vacancies by tract, ACS 15-19
Vacancies <- read.csv(file="https://api.census.gov/data/2019/acs/acs5?get=NAME,group(B25004)&for=tract:*&in=state:36%20county:005,047,061,081,085")
#Clean file
names(Vacancies)[names(Vacancies) == "X..NAME"] <- "Name"
names(Vacancies)[names(Vacancies) == "tract."] <- "tract"
Vacancies$Name <- gsub("\\[", "", Vacancies$Name)
Vacancies$tract <- gsub("\\]", "", Vacancies$tract)
#Import crosswalk file for tracts to NTA
Tract2NTA <- read.csv("https://raw.githubusercontent.com/mayijun1203/SKILLGH/master/RTract2010_to_NTA2010.csv")
#Create a variable for boro that matches tract to NTA crosswalk file
Vacancies$Boro <- 0
Vacancies$Boro[which(Vacancies$county==5)] <- 2
Vacancies$Boro[which(Vacancies$county==47)] <- 3
Vacancies$Boro[which(Vacancies$county==61)] <- 1
Vacancies$Boro[which(Vacancies$county==81)] <- 4
Vacancies$Boro[which(Vacancies$county==85)] <- 5
#Create BoroCT2010 variable to match tract to NTA crosswalk file
Vacancies$BoroCT2010 <- paste0(Vacancies$Boro,Vacancies$tract)
#Merge crosswalk file to ACS data
Vacancies <- merge(Vacancies, Tract2NTA, by="BoroCT2010")
#Aggregate ACS data to the NTA level
Vacancies <- aggregate(Vacancies[ ,c("B25004_001E", "B25004_002E", "B25004_003E", "B25004_004E", "B25004_005E", "B25004_006E", "B25004_007E", "B25004_008E")], by=list(Vacancies$NTACode), FUN=sum, na.rm=T)
#Calculate percent of vacants within each type
attach(Vacancies)
Vacancies$RentPer <- B25004_002E/B25004_001E*100
Vacancies$RentedPer <- B25004_003E/B25004_001E*100
Vacancies$SalePer <- B25004_004E/B25004_001E*100
Vacancies$SoldPer <- B25004_005E/B25004_001E*100
Vacancies$SeasonalPer <- B25004_006E/B25004_001E*100
Vacancies$MigrantPer <- B25004_007E/B25004_001E*100
Vacancies$OtherPer <- B25004_008E/B25004_001E*100
detach(Vacancies)
#Create a new dataset with just the variables of interest
VacanciesPer <- Vacancies[ ,c(1, 10:16)]
#Reshape file from wide to long format for graphing
VacanciesLong <- gather(VacanciesPer, Type, Percent, 2:8, factor_key=T)
#Rename variables
names(VacanciesLong) <- c("NTACode", "Type", "Percent")
#Create a variable for the borough by taking first two characters of NTA code
VacanciesLong$Boro <- substr(VacanciesLong$NTACode, 1, 2)
#Create a new dataset for just Manhattan
VacanciesLongMN <- VacanciesLong[which(VacanciesLong$Boro=="MN"),]
#Plot percent of vacants within each type for Manhattan NTAs as a stacked bar chart, NTAs side by side
VacanciesplotMNstacked <- ggplot(data=VacanciesLongMN) +
geom_bar(aes(x=NTACode, y=Percent, fill=Type), position="stack", stat="identity") +
scale_y_continuous(breaks=seq(0,100,20)) +
scale_fill_brewer(palette="Accent", labels=c("For Rent", "Rented, Not Occupied", "For Sale Only", "Sold, Not Occupied", "For Seasonal, Recreational, or Occasional Use", "For Migrant Workers", "Other Vacant")) +
labs(title="Manhattan: Percent of Vacancies by Type", x="Neighborhood Tabulation Area", y="Percent of All Vacants", fill="Type of Vacant") +
theme_bw() +
theme(axis.text.x=element_text(angle=90, face="bold"), plot.title=element_text(hjust=0.5), legend.position="bottom")
VacanciesplotMNstacked
#Remove MN99
VacanciesLongMN2 <- VacanciesLongMN[VacanciesLongMN$NTACode!="MN99",]
#Plot percent of vacants within each type for Manhattan NTAs, with each NTA displayed separately
VacanciesplotMNFacet <- ggplot(data=VacanciesLongMN2) +
geom_bar(aes(x=Type, y=Percent, fill=Type), stat="identity") +
scale_y_continuous(breaks=seq(0,100,20)) +
scale_fill_brewer(palette="Accent", labels=c("For Rent", "Rented, Not Occupied", "For Sale Only", "Sold, Not Occupied", "For Seasonal, Recreational, or Occasional Use", "For Migrant Workers", "Other Vacant")) +
facet_wrap(vars(VacanciesLongMN2$NTACode), ncol=7) +
labs(title="Manhattan: Percent of Vacancies by Type", x="Type of Vacancy", y="Percent of All Vacants", fill="Type of Vacant") +
theme_bw() +
theme(axis.text.x=element_blank(), plot.title=element_text(hjust=0.5), legend.position="bottom", axis.ticks.x=element_blank())
VacanciesplotMNFacet
| /R/Eric's script for ACS API.R | no_license | ssanichar/SKILLGH | R | false | false | 4,194 | r |
library(tidyverse)
#Information on APIs available at https://www.census.gov/data/developers/data-sets.html
#Pull in data on vacancies by tract, ACS 15-19
Vacancies <- read.csv(file="https://api.census.gov/data/2019/acs/acs5?get=NAME,group(B25004)&for=tract:*&in=state:36%20county:005,047,061,081,085")
#Clean file
names(Vacancies)[names(Vacancies) == "X..NAME"] <- "Name"
names(Vacancies)[names(Vacancies) == "tract."] <- "tract"
Vacancies$Name <- gsub("\\[", "", Vacancies$Name)
Vacancies$tract <- gsub("\\]", "", Vacancies$tract)
#Import crosswalk file for tracts to NTA
Tract2NTA <- read.csv("https://raw.githubusercontent.com/mayijun1203/SKILLGH/master/RTract2010_to_NTA2010.csv")
#Create a variable for boro that matches tract to NTA crosswalk file
Vacancies$Boro <- 0
Vacancies$Boro[which(Vacancies$county==5)] <- 2
Vacancies$Boro[which(Vacancies$county==47)] <- 3
Vacancies$Boro[which(Vacancies$county==61)] <- 1
Vacancies$Boro[which(Vacancies$county==81)] <- 4
Vacancies$Boro[which(Vacancies$county==85)] <- 5
#Create BoroCT2010 variable to match tract to NTA crosswalk file
Vacancies$BoroCT2010 <- paste0(Vacancies$Boro,Vacancies$tract)
#Merge crosswalk file to ACS data
Vacancies <- merge(Vacancies, Tract2NTA, by="BoroCT2010")
#Aggregate ACS data to the NTA level
Vacancies <- aggregate(Vacancies[ ,c("B25004_001E", "B25004_002E", "B25004_003E", "B25004_004E", "B25004_005E", "B25004_006E", "B25004_007E", "B25004_008E")], by=list(Vacancies$NTACode), FUN=sum, na.rm=T)
#Calculate percent of vacants within each type
attach(Vacancies)
Vacancies$RentPer <- B25004_002E/B25004_001E*100
Vacancies$RentedPer <- B25004_003E/B25004_001E*100
Vacancies$SalePer <- B25004_004E/B25004_001E*100
Vacancies$SoldPer <- B25004_005E/B25004_001E*100
Vacancies$SeasonalPer <- B25004_006E/B25004_001E*100
Vacancies$MigrantPer <- B25004_007E/B25004_001E*100
Vacancies$OtherPer <- B25004_008E/B25004_001E*100
detach(Vacancies)
#Create a new dataset with just the variables of interest
VacanciesPer <- Vacancies[ ,c(1, 10:16)]
#Reshape file from wide to long format for graphing
VacanciesLong <- gather(VacanciesPer, Type, Percent, 2:8, factor_key=T)
#Rename variables
names(VacanciesLong) <- c("NTACode", "Type", "Percent")
#Create a variable for the borough by taking first two characters of NTA code
VacanciesLong$Boro <- substr(VacanciesLong$NTACode, 1, 2)
#Create a new dataset for just Manhattan
VacanciesLongMN <- VacanciesLong[which(VacanciesLong$Boro=="MN"),]
#Plot percent of vacants within each type for Manhattan NTAs as a stacked bar chart, NTAs side by side
VacanciesplotMNstacked <- ggplot(data=VacanciesLongMN) +
geom_bar(aes(x=NTACode, y=Percent, fill=Type), position="stack", stat="identity") +
scale_y_continuous(breaks=seq(0,100,20)) +
scale_fill_brewer(palette="Accent", labels=c("For Rent", "Rented, Not Occupied", "For Sale Only", "Sold, Not Occupied", "For Seasonal, Recreational, or Occasional Use", "For Migrant Workers", "Other Vacant")) +
labs(title="Manhattan: Percent of Vacancies by Type", x="Neighborhood Tabulation Area", y="Percent of All Vacants", fill="Type of Vacant") +
theme_bw() +
theme(axis.text.x=element_text(angle=90, face="bold"), plot.title=element_text(hjust=0.5), legend.position="bottom")
VacanciesplotMNstacked
#Remove MN99
VacanciesLongMN2 <- VacanciesLongMN[VacanciesLongMN$NTACode!="MN99",]
#Plot percent of vacants within each type for Manhattan NTAs, with each NTA displayed separately
VacanciesplotMNFacet <- ggplot(data=VacanciesLongMN2) +
geom_bar(aes(x=Type, y=Percent, fill=Type), stat="identity") +
scale_y_continuous(breaks=seq(0,100,20)) +
scale_fill_brewer(palette="Accent", labels=c("For Rent", "Rented, Not Occupied", "For Sale Only", "Sold, Not Occupied", "For Seasonal, Recreational, or Occasional Use", "For Migrant Workers", "Other Vacant")) +
facet_wrap(vars(VacanciesLongMN2$NTACode), ncol=7) +
labs(title="Manhattan: Percent of Vacancies by Type", x="Type of Vacancy", y="Percent of All Vacants", fill="Type of Vacant") +
theme_bw() +
theme(axis.text.x=element_blank(), plot.title=element_text(hjust=0.5), legend.position="bottom", axis.ticks.x=element_blank())
VacanciesplotMNFacet
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53817551737791e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615783564-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 329 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.53817551737791e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
# This file is automatically generated, you probably don't want to edit this
logRegOrdOptions <- if (requireNamespace("jmvcore", quietly=TRUE)) R6::R6Class(
"logRegOrdOptions",
inherit = jmvcore::Options,
public = list(
initialize = function(
dep = NULL,
covs = NULL,
factors = NULL,
blocks = list(
list()),
refLevels = NULL,
modelTest = FALSE,
dev = TRUE,
aic = TRUE,
bic = FALSE,
pseudoR2 = list(
"r2mf"),
omni = FALSE,
thres = FALSE,
ci = FALSE,
ciWidth = 95,
OR = FALSE,
ciOR = FALSE,
ciWidthOR = 95, ...) {
super$initialize(
package="jmv",
name="logRegOrd",
requiresData=TRUE,
...)
private$..dep <- jmvcore::OptionVariable$new(
"dep",
dep,
suggested=list(
"ordinal"),
permitted=list(
"factor"))
private$..covs <- jmvcore::OptionVariables$new(
"covs",
covs,
suggested=list(
"continuous"),
permitted=list(
"numeric"),
default=NULL)
private$..factors <- jmvcore::OptionVariables$new(
"factors",
factors,
suggested=list(
"nominal"),
permitted=list(
"factor"),
default=NULL)
private$..blocks <- jmvcore::OptionArray$new(
"blocks",
blocks,
default=list(
list()),
template=jmvcore::OptionTerms$new(
"blocks",
NULL))
private$..refLevels <- jmvcore::OptionArray$new(
"refLevels",
refLevels,
items="(factors)",
default=NULL,
template=jmvcore::OptionGroup$new(
"refLevels",
NULL,
elements=list(
jmvcore::OptionVariable$new(
"var",
NULL,
content="$key"),
jmvcore::OptionLevel$new(
"ref",
NULL))))
private$..modelTest <- jmvcore::OptionBool$new(
"modelTest",
modelTest,
default=FALSE)
private$..dev <- jmvcore::OptionBool$new(
"dev",
dev,
default=TRUE)
private$..aic <- jmvcore::OptionBool$new(
"aic",
aic,
default=TRUE)
private$..bic <- jmvcore::OptionBool$new(
"bic",
bic,
default=FALSE)
private$..pseudoR2 <- jmvcore::OptionNMXList$new(
"pseudoR2",
pseudoR2,
options=list(
"r2mf",
"r2cs",
"r2n"),
default=list(
"r2mf"))
private$..omni <- jmvcore::OptionBool$new(
"omni",
omni,
default=FALSE)
private$..thres <- jmvcore::OptionBool$new(
"thres",
thres,
default=FALSE)
private$..ci <- jmvcore::OptionBool$new(
"ci",
ci,
default=FALSE)
private$..ciWidth <- jmvcore::OptionNumber$new(
"ciWidth",
ciWidth,
min=50,
max=99.9,
default=95)
private$..OR <- jmvcore::OptionBool$new(
"OR",
OR,
default=FALSE)
private$..ciOR <- jmvcore::OptionBool$new(
"ciOR",
ciOR,
default=FALSE)
private$..ciWidthOR <- jmvcore::OptionNumber$new(
"ciWidthOR",
ciWidthOR,
min=50,
max=99.9,
default=95)
self$.addOption(private$..dep)
self$.addOption(private$..covs)
self$.addOption(private$..factors)
self$.addOption(private$..blocks)
self$.addOption(private$..refLevels)
self$.addOption(private$..modelTest)
self$.addOption(private$..dev)
self$.addOption(private$..aic)
self$.addOption(private$..bic)
self$.addOption(private$..pseudoR2)
self$.addOption(private$..omni)
self$.addOption(private$..thres)
self$.addOption(private$..ci)
self$.addOption(private$..ciWidth)
self$.addOption(private$..OR)
self$.addOption(private$..ciOR)
self$.addOption(private$..ciWidthOR)
}),
active = list(
dep = function() private$..dep$value,
covs = function() private$..covs$value,
factors = function() private$..factors$value,
blocks = function() private$..blocks$value,
refLevels = function() private$..refLevels$value,
modelTest = function() private$..modelTest$value,
dev = function() private$..dev$value,
aic = function() private$..aic$value,
bic = function() private$..bic$value,
pseudoR2 = function() private$..pseudoR2$value,
omni = function() private$..omni$value,
thres = function() private$..thres$value,
ci = function() private$..ci$value,
ciWidth = function() private$..ciWidth$value,
OR = function() private$..OR$value,
ciOR = function() private$..ciOR$value,
ciWidthOR = function() private$..ciWidthOR$value),
private = list(
..dep = NA,
..covs = NA,
..factors = NA,
..blocks = NA,
..refLevels = NA,
..modelTest = NA,
..dev = NA,
..aic = NA,
..bic = NA,
..pseudoR2 = NA,
..omni = NA,
..thres = NA,
..ci = NA,
..ciWidth = NA,
..OR = NA,
..ciOR = NA,
..ciWidthOR = NA)
)
logRegOrdResults <- if (requireNamespace("jmvcore", quietly=TRUE)) R6::R6Class(
"logRegOrdResults",
inherit = jmvcore::Group,
active = list(
modelFit = function() private$.items[["modelFit"]],
modelComp = function() private$.items[["modelComp"]],
models = function() private$.items[["models"]]),
private = list(),
public=list(
initialize=function(options) {
super$initialize(
options=options,
name="",
title="Ordinal Logistic Regression")
self$add(jmvcore::Table$new(
options=options,
name="modelFit",
title="Model Fit Measures",
clearWith=list(
"dep",
"blocks"),
visible="(dev || aic || bic || pseudoR2:r2mf || pseudoR2:r2cs || pseudoR2:r2n || modelTest)",
columns=list(
list(
`name`="model",
`title`="Model",
`type`="text"),
list(
`name`="dev",
`title`="Deviance",
`type`="number",
`visible`="(dev)"),
list(
`name`="aic",
`title`="AIC",
`type`="number",
`visible`="(aic)"),
list(
`name`="bic",
`title`="BIC",
`type`="number",
`visible`="(bic)"),
list(
`name`="r2mf",
`title`="R\u00B2<sub>McF</sub>",
`type`="number",
`visible`="(pseudoR2:r2mf)"),
list(
`name`="r2cs",
`title`="R\u00B2<sub>CS</sub>",
`type`="number",
`visible`="(pseudoR2:r2cs)"),
list(
`name`="r2n",
`title`="R\u00B2<sub>N</sub>",
`type`="number",
`visible`="(pseudoR2:r2n)"),
list(
`name`="chi",
`title`="\u03C7\u00B2",
`type`="number",
`superTitle`="Overall Model Test",
`visible`="(modelTest)"),
list(
`name`="df",
`title`="df",
`type`="integer",
`superTitle`="Overall Model Test",
`visible`="(modelTest)"),
list(
`name`="p",
`title`="p",
`type`="number",
`format`="zto,pvalue",
`superTitle`="Overall Model Test",
`visible`="(modelTest)"))))
self$add(jmvcore::Table$new(
options=options,
name="modelComp",
title="Model Comparisons",
clearWith=list(
"dep",
"blocks"),
columns=list(
list(
`name`="model1",
`title`="Model",
`content`=".",
`type`="text",
`superTitle`="Comparison"),
list(
`name`="sep",
`title`="",
`content`="-",
`type`="text",
`format`="narrow",
`superTitle`="Comparison"),
list(
`name`="model2",
`title`="Model",
`content`=".",
`type`="text",
`superTitle`="Comparison"),
list(
`name`="chi",
`title`="\u03C7\u00B2",
`type`="number"),
list(
`name`="df",
`title`="df",
`type`="integer"),
list(
`name`="p",
`title`="p",
`type`="number",
`format`="zto,pvalue"))))
self$add(jmvcore::Array$new(
options=options,
name="models",
title="Model Specific Results",
layout="listSelect",
hideHeadingOnlyChild=TRUE,
template=R6::R6Class(
inherit = jmvcore::Group,
active = list(
lrt = function() private$.items[["lrt"]],
coef = function() private$.items[["coef"]],
thres = function() private$.items[["thres"]]),
private = list(),
public=list(
initialize=function(options) {
super$initialize(
options=options,
name="undefined",
title="")
self$add(jmvcore::Table$new(
options=options,
name="lrt",
title="Omnibus Likelihood Ratio Tests",
clearWith=list(
"dep",
"blocks"),
visible="(omni)",
refs="car",
columns=list(
list(
`name`="term",
`title`="Predictor",
`type`="text"),
list(
`name`="chi",
`title`="\u03C7\u00B2",
`type`="number"),
list(
`name`="df",
`title`="df",
`type`="integer"),
list(
`name`="p",
`title`="p",
`type`="number",
`format`="zto,pvalue"))))
self$add(jmvcore::Table$new(
options=options,
name="coef",
title="`Model Coefficients - ${dep}`",
clearWith=list(
"dep",
"blocks",
"refLevels"),
refs="MASS",
columns=list(
list(
`name`="term",
`title`="Predictor",
`type`="text"),
list(
`name`="est",
`title`="Estimate",
`type`="number"),
list(
`name`="lower",
`title`="Lower",
`type`="number",
`visible`="(ci)"),
list(
`name`="upper",
`title`="Upper",
`type`="number",
`visible`="(ci)"),
list(
`name`="se",
`title`="SE",
`type`="number"),
list(
`name`="z",
`title`="Z",
`type`="number"),
list(
`name`="p",
`title`="p",
`type`="number",
`format`="zto,pvalue"),
list(
`name`="odds",
`title`="Odds ratio",
`type`="number",
`visible`="(OR)"),
list(
`name`="oddsLower",
`title`="Lower",
`type`="number",
`visible`="(ciOR && OR)"),
list(
`name`="oddsUpper",
`title`="Upper",
`type`="number",
`visible`="(ciOR && OR)"))))
self$add(jmvcore::Table$new(
options=options,
name="thres",
title="Model Thresholds",
clearWith=list(
"dep",
"blocks",
"refLevels"),
visible="(thres)",
columns=list(
list(
`name`="term",
`title`="Threshold",
`type`="text"),
list(
`name`="est",
`title`="Estimate",
`type`="number"),
list(
`name`="se",
`title`="SE",
`type`="number"),
list(
`name`="z",
`title`="Z",
`type`="number"),
list(
`name`="p",
`title`="p",
`type`="number",
`format`="zto,pvalue"),
list(
`name`="odds",
`title`="Odds ratio",
`type`="number",
`visible`="(OR)"))))}))$new(options=options)))}))
logRegOrdBase <- if (requireNamespace("jmvcore", quietly=TRUE)) R6::R6Class(
"logRegOrdBase",
inherit = jmvcore::Analysis,
public = list(
initialize = function(options, data=NULL, datasetId="", analysisId="", revision=0) {
super$initialize(
package = "jmv",
name = "logRegOrd",
version = c(1,0,0),
options = options,
results = logRegOrdResults$new(options=options),
data = data,
datasetId = datasetId,
analysisId = analysisId,
revision = revision,
pause = NULL,
completeWhenFilled = FALSE,
requiresMissings = FALSE,
weightsSupport = 'integerOnly')
}))
#' Ordinal Logistic Regression
#'
#' Ordinal Logistic Regression
#'
#' @examples
#' set.seed(1337)
#'
#' y <- factor(sample(1:3, 100, replace = TRUE))
#' x1 <- rnorm(100)
#' x2 <- rnorm(100)
#'
#' df <- data.frame(y=y, x1=x1, x2=x2)
#'
#' logRegOrd(data = df, dep = y,
#' covs = vars(x1, x2),
#' blocks = list(list("x1", "x2")))
#'
#' #
#' # ORDINAL LOGISTIC REGRESSION
#' #
#' # Model Fit Measures
#' # ---------------------------------------
#' # Model Deviance AIC R²-McF
#' # ---------------------------------------
#' # 1 218 226 5.68e-4
#' # ---------------------------------------
#' #
#' #
#' # MODEL SPECIFIC RESULTS
#' #
#' # MODEL 1
#' #
#' # Model Coefficients
#' # ----------------------------------------------------
#' # Predictor Estimate SE Z p
#' # ----------------------------------------------------
#' # x1 0.0579 0.193 0.300 0.764
#' # x2 0.0330 0.172 0.192 0.848
#' # ----------------------------------------------------
#' #
#' #
#'
#' @param data the data as a data frame
#' @param dep a string naming the dependent variable from \code{data},
#' variable must be a factor
#' @param covs a vector of strings naming the covariates from \code{data}
#' @param factors a vector of strings naming the fixed factors from
#' \code{data}
#' @param blocks a list containing vectors of strings that name the predictors
#' that are added to the model. The elements are added to the model according
#' to their order in the list
#' @param refLevels a list of lists specifying reference levels of the
#' dependent variable and all the factors
#' @param modelTest \code{TRUE} or \code{FALSE} (default), provide the model
#' comparison between the models and the NULL model
#' @param dev \code{TRUE} (default) or \code{FALSE}, provide the deviance (or
#' -2LogLikelihood) for the models
#' @param aic \code{TRUE} (default) or \code{FALSE}, provide Aikaike's
#' Information Criterion (AIC) for the models
#' @param bic \code{TRUE} or \code{FALSE} (default), provide Bayesian
#' Information Criterion (BIC) for the models
#' @param pseudoR2 one or more of \code{'r2mf'}, \code{'r2cs'}, or
#' \code{'r2n'}; use McFadden's, Cox & Snell, and Nagelkerke pseudo-R²,
#' respectively
#' @param omni \code{TRUE} or \code{FALSE} (default), provide the omnibus
#' likelihood ratio tests for the predictors
#' @param thres \code{TRUE} or \code{FALSE} (default), provide the thresholds
#' that are used as cut-off scores for the levels of the dependent variable
#' @param ci \code{TRUE} or \code{FALSE} (default), provide a confidence
#' interval for the model coefficient estimates
#' @param ciWidth a number between 50 and 99.9 (default: 95) specifying the
#' confidence interval width
#' @param OR \code{TRUE} or \code{FALSE} (default), provide the exponential of
#' the log-odds ratio estimate, or the odds ratio estimate
#' @param ciOR \code{TRUE} or \code{FALSE} (default), provide a confidence
#' interval for the model coefficient odds ratio estimates
#' @param ciWidthOR a number between 50 and 99.9 (default: 95) specifying the
#' confidence interval width
#' @return A results object containing:
#' \tabular{llllll}{
#' \code{results$modelFit} \tab \tab \tab \tab \tab a table \cr
#' \code{results$modelComp} \tab \tab \tab \tab \tab a table \cr
#' \code{results$models} \tab \tab \tab \tab \tab an array of model specific results \cr
#' }
#'
#' Tables can be converted to data frames with \code{asDF} or \code{\link{as.data.frame}}. For example:
#'
#' \code{results$modelFit$asDF}
#'
#' \code{as.data.frame(results$modelFit)}
#'
#' @export
logRegOrd <- function(
data,
dep,
covs = NULL,
factors = NULL,
blocks = list(
list()),
refLevels = NULL,
modelTest = FALSE,
dev = TRUE,
aic = TRUE,
bic = FALSE,
pseudoR2 = list(
"r2mf"),
omni = FALSE,
thres = FALSE,
ci = FALSE,
ciWidth = 95,
OR = FALSE,
ciOR = FALSE,
ciWidthOR = 95) {
if ( ! requireNamespace("jmvcore", quietly=TRUE))
stop("logRegOrd requires jmvcore to be installed (restart may be required)")
if ( ! missing(dep)) dep <- jmvcore::resolveQuo(jmvcore::enquo(dep))
if ( ! missing(covs)) covs <- jmvcore::resolveQuo(jmvcore::enquo(covs))
if ( ! missing(factors)) factors <- jmvcore::resolveQuo(jmvcore::enquo(factors))
if (missing(data))
data <- jmvcore::marshalData(
parent.frame(),
`if`( ! missing(dep), dep, NULL),
`if`( ! missing(covs), covs, NULL),
`if`( ! missing(factors), factors, NULL))
for (v in dep) if (v %in% names(data)) data[[v]] <- as.factor(data[[v]])
for (v in factors) if (v %in% names(data)) data[[v]] <- as.factor(data[[v]])
options <- logRegOrdOptions$new(
dep = dep,
covs = covs,
factors = factors,
blocks = blocks,
refLevels = refLevels,
modelTest = modelTest,
dev = dev,
aic = aic,
bic = bic,
pseudoR2 = pseudoR2,
omni = omni,
thres = thres,
ci = ci,
ciWidth = ciWidth,
OR = OR,
ciOR = ciOR,
ciWidthOR = ciWidthOR)
analysis <- logRegOrdClass$new(
options = options,
data = data)
analysis$run()
analysis$results
}
| /R/logregord.h.R | no_license | jamovi/jmv | R | false | false | 24,647 | r |
# This file is automatically generated, you probably don't want to edit this
logRegOrdOptions <- if (requireNamespace("jmvcore", quietly=TRUE)) R6::R6Class(
"logRegOrdOptions",
inherit = jmvcore::Options,
public = list(
initialize = function(
dep = NULL,
covs = NULL,
factors = NULL,
blocks = list(
list()),
refLevels = NULL,
modelTest = FALSE,
dev = TRUE,
aic = TRUE,
bic = FALSE,
pseudoR2 = list(
"r2mf"),
omni = FALSE,
thres = FALSE,
ci = FALSE,
ciWidth = 95,
OR = FALSE,
ciOR = FALSE,
ciWidthOR = 95, ...) {
super$initialize(
package="jmv",
name="logRegOrd",
requiresData=TRUE,
...)
private$..dep <- jmvcore::OptionVariable$new(
"dep",
dep,
suggested=list(
"ordinal"),
permitted=list(
"factor"))
private$..covs <- jmvcore::OptionVariables$new(
"covs",
covs,
suggested=list(
"continuous"),
permitted=list(
"numeric"),
default=NULL)
private$..factors <- jmvcore::OptionVariables$new(
"factors",
factors,
suggested=list(
"nominal"),
permitted=list(
"factor"),
default=NULL)
private$..blocks <- jmvcore::OptionArray$new(
"blocks",
blocks,
default=list(
list()),
template=jmvcore::OptionTerms$new(
"blocks",
NULL))
private$..refLevels <- jmvcore::OptionArray$new(
"refLevels",
refLevels,
items="(factors)",
default=NULL,
template=jmvcore::OptionGroup$new(
"refLevels",
NULL,
elements=list(
jmvcore::OptionVariable$new(
"var",
NULL,
content="$key"),
jmvcore::OptionLevel$new(
"ref",
NULL))))
private$..modelTest <- jmvcore::OptionBool$new(
"modelTest",
modelTest,
default=FALSE)
private$..dev <- jmvcore::OptionBool$new(
"dev",
dev,
default=TRUE)
private$..aic <- jmvcore::OptionBool$new(
"aic",
aic,
default=TRUE)
private$..bic <- jmvcore::OptionBool$new(
"bic",
bic,
default=FALSE)
private$..pseudoR2 <- jmvcore::OptionNMXList$new(
"pseudoR2",
pseudoR2,
options=list(
"r2mf",
"r2cs",
"r2n"),
default=list(
"r2mf"))
private$..omni <- jmvcore::OptionBool$new(
"omni",
omni,
default=FALSE)
private$..thres <- jmvcore::OptionBool$new(
"thres",
thres,
default=FALSE)
private$..ci <- jmvcore::OptionBool$new(
"ci",
ci,
default=FALSE)
private$..ciWidth <- jmvcore::OptionNumber$new(
"ciWidth",
ciWidth,
min=50,
max=99.9,
default=95)
private$..OR <- jmvcore::OptionBool$new(
"OR",
OR,
default=FALSE)
private$..ciOR <- jmvcore::OptionBool$new(
"ciOR",
ciOR,
default=FALSE)
private$..ciWidthOR <- jmvcore::OptionNumber$new(
"ciWidthOR",
ciWidthOR,
min=50,
max=99.9,
default=95)
self$.addOption(private$..dep)
self$.addOption(private$..covs)
self$.addOption(private$..factors)
self$.addOption(private$..blocks)
self$.addOption(private$..refLevels)
self$.addOption(private$..modelTest)
self$.addOption(private$..dev)
self$.addOption(private$..aic)
self$.addOption(private$..bic)
self$.addOption(private$..pseudoR2)
self$.addOption(private$..omni)
self$.addOption(private$..thres)
self$.addOption(private$..ci)
self$.addOption(private$..ciWidth)
self$.addOption(private$..OR)
self$.addOption(private$..ciOR)
self$.addOption(private$..ciWidthOR)
}),
active = list(
dep = function() private$..dep$value,
covs = function() private$..covs$value,
factors = function() private$..factors$value,
blocks = function() private$..blocks$value,
refLevels = function() private$..refLevels$value,
modelTest = function() private$..modelTest$value,
dev = function() private$..dev$value,
aic = function() private$..aic$value,
bic = function() private$..bic$value,
pseudoR2 = function() private$..pseudoR2$value,
omni = function() private$..omni$value,
thres = function() private$..thres$value,
ci = function() private$..ci$value,
ciWidth = function() private$..ciWidth$value,
OR = function() private$..OR$value,
ciOR = function() private$..ciOR$value,
ciWidthOR = function() private$..ciWidthOR$value),
private = list(
..dep = NA,
..covs = NA,
..factors = NA,
..blocks = NA,
..refLevels = NA,
..modelTest = NA,
..dev = NA,
..aic = NA,
..bic = NA,
..pseudoR2 = NA,
..omni = NA,
..thres = NA,
..ci = NA,
..ciWidth = NA,
..OR = NA,
..ciOR = NA,
..ciWidthOR = NA)
)
logRegOrdResults <- if (requireNamespace("jmvcore", quietly=TRUE)) R6::R6Class(
"logRegOrdResults",
inherit = jmvcore::Group,
active = list(
modelFit = function() private$.items[["modelFit"]],
modelComp = function() private$.items[["modelComp"]],
models = function() private$.items[["models"]]),
private = list(),
public=list(
initialize=function(options) {
super$initialize(
options=options,
name="",
title="Ordinal Logistic Regression")
self$add(jmvcore::Table$new(
options=options,
name="modelFit",
title="Model Fit Measures",
clearWith=list(
"dep",
"blocks"),
visible="(dev || aic || bic || pseudoR2:r2mf || pseudoR2:r2cs || pseudoR2:r2n || modelTest)",
columns=list(
list(
`name`="model",
`title`="Model",
`type`="text"),
list(
`name`="dev",
`title`="Deviance",
`type`="number",
`visible`="(dev)"),
list(
`name`="aic",
`title`="AIC",
`type`="number",
`visible`="(aic)"),
list(
`name`="bic",
`title`="BIC",
`type`="number",
`visible`="(bic)"),
list(
`name`="r2mf",
`title`="R\u00B2<sub>McF</sub>",
`type`="number",
`visible`="(pseudoR2:r2mf)"),
list(
`name`="r2cs",
`title`="R\u00B2<sub>CS</sub>",
`type`="number",
`visible`="(pseudoR2:r2cs)"),
list(
`name`="r2n",
`title`="R\u00B2<sub>N</sub>",
`type`="number",
`visible`="(pseudoR2:r2n)"),
list(
`name`="chi",
`title`="\u03C7\u00B2",
`type`="number",
`superTitle`="Overall Model Test",
`visible`="(modelTest)"),
list(
`name`="df",
`title`="df",
`type`="integer",
`superTitle`="Overall Model Test",
`visible`="(modelTest)"),
list(
`name`="p",
`title`="p",
`type`="number",
`format`="zto,pvalue",
`superTitle`="Overall Model Test",
`visible`="(modelTest)"))))
self$add(jmvcore::Table$new(
options=options,
name="modelComp",
title="Model Comparisons",
clearWith=list(
"dep",
"blocks"),
columns=list(
list(
`name`="model1",
`title`="Model",
`content`=".",
`type`="text",
`superTitle`="Comparison"),
list(
`name`="sep",
`title`="",
`content`="-",
`type`="text",
`format`="narrow",
`superTitle`="Comparison"),
list(
`name`="model2",
`title`="Model",
`content`=".",
`type`="text",
`superTitle`="Comparison"),
list(
`name`="chi",
`title`="\u03C7\u00B2",
`type`="number"),
list(
`name`="df",
`title`="df",
`type`="integer"),
list(
`name`="p",
`title`="p",
`type`="number",
`format`="zto,pvalue"))))
self$add(jmvcore::Array$new(
options=options,
name="models",
title="Model Specific Results",
layout="listSelect",
hideHeadingOnlyChild=TRUE,
template=R6::R6Class(
inherit = jmvcore::Group,
active = list(
lrt = function() private$.items[["lrt"]],
coef = function() private$.items[["coef"]],
thres = function() private$.items[["thres"]]),
private = list(),
public=list(
initialize=function(options) {
super$initialize(
options=options,
name="undefined",
title="")
self$add(jmvcore::Table$new(
options=options,
name="lrt",
title="Omnibus Likelihood Ratio Tests",
clearWith=list(
"dep",
"blocks"),
visible="(omni)",
refs="car",
columns=list(
list(
`name`="term",
`title`="Predictor",
`type`="text"),
list(
`name`="chi",
`title`="\u03C7\u00B2",
`type`="number"),
list(
`name`="df",
`title`="df",
`type`="integer"),
list(
`name`="p",
`title`="p",
`type`="number",
`format`="zto,pvalue"))))
self$add(jmvcore::Table$new(
options=options,
name="coef",
title="`Model Coefficients - ${dep}`",
clearWith=list(
"dep",
"blocks",
"refLevels"),
refs="MASS",
columns=list(
list(
`name`="term",
`title`="Predictor",
`type`="text"),
list(
`name`="est",
`title`="Estimate",
`type`="number"),
list(
`name`="lower",
`title`="Lower",
`type`="number",
`visible`="(ci)"),
list(
`name`="upper",
`title`="Upper",
`type`="number",
`visible`="(ci)"),
list(
`name`="se",
`title`="SE",
`type`="number"),
list(
`name`="z",
`title`="Z",
`type`="number"),
list(
`name`="p",
`title`="p",
`type`="number",
`format`="zto,pvalue"),
list(
`name`="odds",
`title`="Odds ratio",
`type`="number",
`visible`="(OR)"),
list(
`name`="oddsLower",
`title`="Lower",
`type`="number",
`visible`="(ciOR && OR)"),
list(
`name`="oddsUpper",
`title`="Upper",
`type`="number",
`visible`="(ciOR && OR)"))))
self$add(jmvcore::Table$new(
options=options,
name="thres",
title="Model Thresholds",
clearWith=list(
"dep",
"blocks",
"refLevels"),
visible="(thres)",
columns=list(
list(
`name`="term",
`title`="Threshold",
`type`="text"),
list(
`name`="est",
`title`="Estimate",
`type`="number"),
list(
`name`="se",
`title`="SE",
`type`="number"),
list(
`name`="z",
`title`="Z",
`type`="number"),
list(
`name`="p",
`title`="p",
`type`="number",
`format`="zto,pvalue"),
list(
`name`="odds",
`title`="Odds ratio",
`type`="number",
`visible`="(OR)"))))}))$new(options=options)))}))
logRegOrdBase <- if (requireNamespace("jmvcore", quietly=TRUE)) R6::R6Class(
"logRegOrdBase",
inherit = jmvcore::Analysis,
public = list(
initialize = function(options, data=NULL, datasetId="", analysisId="", revision=0) {
super$initialize(
package = "jmv",
name = "logRegOrd",
version = c(1,0,0),
options = options,
results = logRegOrdResults$new(options=options),
data = data,
datasetId = datasetId,
analysisId = analysisId,
revision = revision,
pause = NULL,
completeWhenFilled = FALSE,
requiresMissings = FALSE,
weightsSupport = 'integerOnly')
}))
#' Ordinal Logistic Regression
#'
#' Ordinal Logistic Regression
#'
#' @examples
#' set.seed(1337)
#'
#' y <- factor(sample(1:3, 100, replace = TRUE))
#' x1 <- rnorm(100)
#' x2 <- rnorm(100)
#'
#' df <- data.frame(y=y, x1=x1, x2=x2)
#'
#' logRegOrd(data = df, dep = y,
#' covs = vars(x1, x2),
#' blocks = list(list("x1", "x2")))
#'
#' #
#' # ORDINAL LOGISTIC REGRESSION
#' #
#' # Model Fit Measures
#' # ---------------------------------------
#' # Model Deviance AIC R²-McF
#' # ---------------------------------------
#' # 1 218 226 5.68e-4
#' # ---------------------------------------
#' #
#' #
#' # MODEL SPECIFIC RESULTS
#' #
#' # MODEL 1
#' #
#' # Model Coefficients
#' # ----------------------------------------------------
#' # Predictor Estimate SE Z p
#' # ----------------------------------------------------
#' # x1 0.0579 0.193 0.300 0.764
#' # x2 0.0330 0.172 0.192 0.848
#' # ----------------------------------------------------
#' #
#' #
#'
#' @param data the data as a data frame
#' @param dep a string naming the dependent variable from \code{data},
#' variable must be a factor
#' @param covs a vector of strings naming the covariates from \code{data}
#' @param factors a vector of strings naming the fixed factors from
#' \code{data}
#' @param blocks a list containing vectors of strings that name the predictors
#' that are added to the model. The elements are added to the model according
#' to their order in the list
#' @param refLevels a list of lists specifying reference levels of the
#' dependent variable and all the factors
#' @param modelTest \code{TRUE} or \code{FALSE} (default), provide the model
#' comparison between the models and the NULL model
#' @param dev \code{TRUE} (default) or \code{FALSE}, provide the deviance (or
#' -2LogLikelihood) for the models
#' @param aic \code{TRUE} (default) or \code{FALSE}, provide Aikaike's
#' Information Criterion (AIC) for the models
#' @param bic \code{TRUE} or \code{FALSE} (default), provide Bayesian
#' Information Criterion (BIC) for the models
#' @param pseudoR2 one or more of \code{'r2mf'}, \code{'r2cs'}, or
#' \code{'r2n'}; use McFadden's, Cox & Snell, and Nagelkerke pseudo-R²,
#' respectively
#' @param omni \code{TRUE} or \code{FALSE} (default), provide the omnibus
#' likelihood ratio tests for the predictors
#' @param thres \code{TRUE} or \code{FALSE} (default), provide the thresholds
#' that are used as cut-off scores for the levels of the dependent variable
#' @param ci \code{TRUE} or \code{FALSE} (default), provide a confidence
#' interval for the model coefficient estimates
#' @param ciWidth a number between 50 and 99.9 (default: 95) specifying the
#' confidence interval width
#' @param OR \code{TRUE} or \code{FALSE} (default), provide the exponential of
#' the log-odds ratio estimate, or the odds ratio estimate
#' @param ciOR \code{TRUE} or \code{FALSE} (default), provide a confidence
#' interval for the model coefficient odds ratio estimates
#' @param ciWidthOR a number between 50 and 99.9 (default: 95) specifying the
#' confidence interval width
#' @return A results object containing:
#' \tabular{llllll}{
#' \code{results$modelFit} \tab \tab \tab \tab \tab a table \cr
#' \code{results$modelComp} \tab \tab \tab \tab \tab a table \cr
#' \code{results$models} \tab \tab \tab \tab \tab an array of model specific results \cr
#' }
#'
#' Tables can be converted to data frames with \code{asDF} or \code{\link{as.data.frame}}. For example:
#'
#' \code{results$modelFit$asDF}
#'
#' \code{as.data.frame(results$modelFit)}
#'
#' @export
logRegOrd <- function(
data,
dep,
covs = NULL,
factors = NULL,
blocks = list(
list()),
refLevels = NULL,
modelTest = FALSE,
dev = TRUE,
aic = TRUE,
bic = FALSE,
pseudoR2 = list(
"r2mf"),
omni = FALSE,
thres = FALSE,
ci = FALSE,
ciWidth = 95,
OR = FALSE,
ciOR = FALSE,
ciWidthOR = 95) {
if ( ! requireNamespace("jmvcore", quietly=TRUE))
stop("logRegOrd requires jmvcore to be installed (restart may be required)")
if ( ! missing(dep)) dep <- jmvcore::resolveQuo(jmvcore::enquo(dep))
if ( ! missing(covs)) covs <- jmvcore::resolveQuo(jmvcore::enquo(covs))
if ( ! missing(factors)) factors <- jmvcore::resolveQuo(jmvcore::enquo(factors))
if (missing(data))
data <- jmvcore::marshalData(
parent.frame(),
`if`( ! missing(dep), dep, NULL),
`if`( ! missing(covs), covs, NULL),
`if`( ! missing(factors), factors, NULL))
for (v in dep) if (v %in% names(data)) data[[v]] <- as.factor(data[[v]])
for (v in factors) if (v %in% names(data)) data[[v]] <- as.factor(data[[v]])
options <- logRegOrdOptions$new(
dep = dep,
covs = covs,
factors = factors,
blocks = blocks,
refLevels = refLevels,
modelTest = modelTest,
dev = dev,
aic = aic,
bic = bic,
pseudoR2 = pseudoR2,
omni = omni,
thres = thres,
ci = ci,
ciWidth = ciWidth,
OR = OR,
ciOR = ciOR,
ciWidthOR = ciWidthOR)
analysis <- logRegOrdClass$new(
options = options,
data = data)
analysis$run()
analysis$results
}
|
library(readr)
household_power_consumption <- read_delim("/Desktop/github/exdata%2Fdata%2Fhousehold_power_consumption/household_power_consumption.txt",
";", escape_double = FALSE, trim_ws = TRUE)
# Quality checks
summary(household_power_consumption)
table(household_power_consumption$Date)
# use data from the dates 2007-02-01 and 2007-02-02
eda_data <- household_power_consumption[ which(household_power_consumption$Date =='01/2/2007' |
household_power_consumption$Date =='2/2/2007'),]
#using eda_data which is filtered 2 day data see main.R for code
plot(1:nrow(eda_data),as.numeric(eda_data$Sub_metering_1),main="", ylab="Energy sub metering",xlab="",type="l",col="black",axes=FALSE)
par(new=T)
plot(as.numeric(eda_data$Sub_metering_2),col="red",type="l",axes=FALSE,ylim=c(0,40),xlab="",ylab="")
par(new=T)
plot(as.numeric(eda_data$Sub_metering_3),col="blue",type="l",axes=FALSE,ylim=c(0,40),xlab="",ylab="")
axis(1, at = c(0,1500,2900),labels = c("Thu","Fri","Sat"))
axis(2, at = c(0,10,20,30),labels = c("0","10","20","30"))
legend("topright",legend=c("sub-metering1","sub-metering2","sub-metering3"),col=c("black","red","blue"),lty=c(1,1,1),lwd=2)
box(lty = 1, col = 'black')
dev.copy(png,"plot3.png")
dev.off()
#using eda_data which is filtered 2 day data see main.R for code
par(new=F)
par(mfrow=c(2,2))
plot(as.numeric(eda_data$Global_active_power),main ="Global Active Power",type="l", ylab="Global Active Power(kilowatts)",xlab="",axes=FALSE)
axis(1, at = c(0,1500,2900),labels = c("Thu","Fri","Sat"))
axis(2,at=c(0,2,4,6))
box(lty = 1, col = 'black')
#voltage
plot(as.numeric(eda_data$Voltage),main="voltage",type="l", ylab="voltage",xlab="",axes=FALSE)
axis(1, at = c(0,1500,2900),labels = c("Thu","Fri","Sat"))
axis(2,at=c(234,238,240,242),labels=c("234","238","240","242"))
box(lty = 1, col = 'black')
#plot3
plot(1:nrow(eda_data),as.numeric(eda_data$Sub_metering_1),main="Energy metering", ylab="Energy sub metering",xlab="",type="l",col="black",axes=FALSE)
par(new=T)
plot(as.numeric(eda_data$Sub_metering_2),col="red",type="l",axes=FALSE,ylim=c(0,40),xlab="",ylab="")
par(new=T)
plot(as.numeric(eda_data$Sub_metering_3),col="blue",type="l",axes=FALSE,ylim=c(0,40),xlab="",ylab="")
axis(1, at = c(0,1500,2900),labels = c("Thu","Fri","Sat"))
axis(2, at = c(0,10,20,30),labels = c("0","10","20","30"))
legend("topright",legend=c("sub-metering1","sub-metering2","sub-metering3"),col=c("black","red","blue"),lty=c(1,1,1),lwd=2)
box(lty = 1, col = 'black')
par(new=F)
plot(as.numeric(eda_data$Global_reactive_power),main ="Global Reactive Power",type="l", ylab="",xlab="",axes=FALSE)
axis(1, at = c(0,1500,2900),labels = c("Thu","Fri","Sat"))
axis(2,at=c(0.1,0.2,0.3,0.4),c("0.1","0.2","0.3","0.4"))
box(lty = 1, col = 'black')
dev.copy(png,"plot4.png")
dev.off()
| /week1.R | no_license | thirumalahcu/ExData_Plotting1 | R | false | false | 2,963 | r | library(readr)
household_power_consumption <- read_delim("/Desktop/github/exdata%2Fdata%2Fhousehold_power_consumption/household_power_consumption.txt",
";", escape_double = FALSE, trim_ws = TRUE)
# Quality checks
summary(household_power_consumption)
table(household_power_consumption$Date)
# use data from the dates 2007-02-01 and 2007-02-02
eda_data <- household_power_consumption[ which(household_power_consumption$Date =='01/2/2007' |
household_power_consumption$Date =='2/2/2007'),]
#using eda_data which is filtered 2 day data see main.R for code
plot(1:nrow(eda_data),as.numeric(eda_data$Sub_metering_1),main="", ylab="Energy sub metering",xlab="",type="l",col="black",axes=FALSE)
par(new=T)
plot(as.numeric(eda_data$Sub_metering_2),col="red",type="l",axes=FALSE,ylim=c(0,40),xlab="",ylab="")
par(new=T)
plot(as.numeric(eda_data$Sub_metering_3),col="blue",type="l",axes=FALSE,ylim=c(0,40),xlab="",ylab="")
axis(1, at = c(0,1500,2900),labels = c("Thu","Fri","Sat"))
axis(2, at = c(0,10,20,30),labels = c("0","10","20","30"))
legend("topright",legend=c("sub-metering1","sub-metering2","sub-metering3"),col=c("black","red","blue"),lty=c(1,1,1),lwd=2)
box(lty = 1, col = 'black')
dev.copy(png,"plot3.png")
dev.off()
#using eda_data which is filtered 2 day data see main.R for code
par(new=F)
par(mfrow=c(2,2))
plot(as.numeric(eda_data$Global_active_power),main ="Global Active Power",type="l", ylab="Global Active Power(kilowatts)",xlab="",axes=FALSE)
axis(1, at = c(0,1500,2900),labels = c("Thu","Fri","Sat"))
axis(2,at=c(0,2,4,6))
box(lty = 1, col = 'black')
#voltage
plot(as.numeric(eda_data$Voltage),main="voltage",type="l", ylab="voltage",xlab="",axes=FALSE)
axis(1, at = c(0,1500,2900),labels = c("Thu","Fri","Sat"))
axis(2,at=c(234,238,240,242),labels=c("234","238","240","242"))
box(lty = 1, col = 'black')
#plot3
plot(1:nrow(eda_data),as.numeric(eda_data$Sub_metering_1),main="Energy metering", ylab="Energy sub metering",xlab="",type="l",col="black",axes=FALSE)
par(new=T)
plot(as.numeric(eda_data$Sub_metering_2),col="red",type="l",axes=FALSE,ylim=c(0,40),xlab="",ylab="")
par(new=T)
plot(as.numeric(eda_data$Sub_metering_3),col="blue",type="l",axes=FALSE,ylim=c(0,40),xlab="",ylab="")
axis(1, at = c(0,1500,2900),labels = c("Thu","Fri","Sat"))
axis(2, at = c(0,10,20,30),labels = c("0","10","20","30"))
legend("topright",legend=c("sub-metering1","sub-metering2","sub-metering3"),col=c("black","red","blue"),lty=c(1,1,1),lwd=2)
box(lty = 1, col = 'black')
par(new=F)
plot(as.numeric(eda_data$Global_reactive_power),main ="Global Reactive Power",type="l", ylab="",xlab="",axes=FALSE)
axis(1, at = c(0,1500,2900),labels = c("Thu","Fri","Sat"))
axis(2,at=c(0.1,0.2,0.3,0.4),c("0.1","0.2","0.3","0.4"))
box(lty = 1, col = 'black')
dev.copy(png,"plot4.png")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check_data_integrity.R
\name{check_data_integrity}
\alias{check_data_integrity}
\title{Check data integrity}
\usage{
check_data_integrity(x)
}
\arguments{
\item{x}{dataframe with at least colums lat, long and primary_fur_color}
}
\value{
Original dataframe if all tests are good. Otherwise stops.
}
\description{
Check data integrity
}
\examples{
datafile <- system.file("nyc_squirrels_sample.csv", package = "fusen.squirrels")
nyc_squirrels <- read.csv(datafile)
nyc_squirrels_ok <- check_data_integrity(nyc_squirrels)
}
| /man/check_data_integrity.Rd | permissive | statnmap/fusen.squirrels | R | false | true | 600 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check_data_integrity.R
\name{check_data_integrity}
\alias{check_data_integrity}
\title{Check data integrity}
\usage{
check_data_integrity(x)
}
\arguments{
\item{x}{dataframe with at least colums lat, long and primary_fur_color}
}
\value{
Original dataframe if all tests are good. Otherwise stops.
}
\description{
Check data integrity
}
\examples{
datafile <- system.file("nyc_squirrels_sample.csv", package = "fusen.squirrels")
nyc_squirrels <- read.csv(datafile)
nyc_squirrels_ok <- check_data_integrity(nyc_squirrels)
}
|
#' Color palettes for professional sports teams
#'
#' @docType data
#' @format A data frame with one row for each professional team and five variables:
#' \describe{
#' \item{name}{the name of the team}
#' \item{league}{the league in which the team plays}
#' \item{primary}{the team's primary color}
#' \item{secondary}{the team's secondary color}
#' \item{tertiary}{the team's tertiary color}
#' \item{quaternary}{the team's quaternary color}
#' \item{division}{the team's division}
#' \item{logo}{URL to the team's logo, hosted by \url{http://www.sportslogos.net}}
#' }
#'
#' @details The colors given are HTML hexidecimal values. See \code{\link[grDevices]{colors}}
#' for more information.
#'
#' @source \url{http://jim-nielsen.com/teamcolors/}, \url{http://www.sportslogos.net}, \url{https://teamcolorcodes.com/}
#'
#' @examples
#' data(teamcolors)
#'
#' if (require(Lahman) & require(dplyr)) {
#' pythag <- Teams %>%
#' filter(yearID == 2014) %>%
#' select(name, W, L, R, RA) %>%
#' mutate(wpct = W / (W+L), exp_wpct = 1 / (1 + (RA/R)^2)) %>%
#' # St. Louis Cardinals do not match
#' left_join(teamcolors, by = "name")
#' with(pythag, plot(exp_wpct, wpct, bg = primary, col = secondary, pch = 21, cex = 3))
#'
#'
#' # Using ggplot2
#' if (require(ggplot2)) {
#' ggplot(pythag, aes(x = wpct, y = exp_wpct, color = name, fill = name)) +
#' geom_abline(slope = 1, intercept = 0, linetype = 3) +
#' geom_point(shape = 21, size = 3) +
#' scale_fill_manual(values = pythag$primary, guide = FALSE) +
#' scale_color_manual(values = pythag$secondary, guide = FALSE) +
#' geom_text(aes(label = substr(name, 1, 3))) +
#' scale_x_continuous("Winning Percentage", limits = c(0.3, 0.7)) +
#' scale_y_continuous("Expected Winning Percentage", limits = c(0.3, 0.7)) +
#' coord_equal()
#' }
#' }
#'
"teamcolors" | /R/data.R | no_license | joepope44/teamcolors | R | false | false | 1,886 | r | #' Color palettes for professional sports teams
#'
#' @docType data
#' @format A data frame with one row for each professional team and five variables:
#' \describe{
#' \item{name}{the name of the team}
#' \item{league}{the league in which the team plays}
#' \item{primary}{the team's primary color}
#' \item{secondary}{the team's secondary color}
#' \item{tertiary}{the team's tertiary color}
#' \item{quaternary}{the team's quaternary color}
#' \item{division}{the team's division}
#' \item{logo}{URL to the team's logo, hosted by \url{http://www.sportslogos.net}}
#' }
#'
#' @details The colors given are HTML hexidecimal values. See \code{\link[grDevices]{colors}}
#' for more information.
#'
#' @source \url{http://jim-nielsen.com/teamcolors/}, \url{http://www.sportslogos.net}, \url{https://teamcolorcodes.com/}
#'
#' @examples
#' data(teamcolors)
#'
#' if (require(Lahman) & require(dplyr)) {
#' pythag <- Teams %>%
#' filter(yearID == 2014) %>%
#' select(name, W, L, R, RA) %>%
#' mutate(wpct = W / (W+L), exp_wpct = 1 / (1 + (RA/R)^2)) %>%
#' # St. Louis Cardinals do not match
#' left_join(teamcolors, by = "name")
#' with(pythag, plot(exp_wpct, wpct, bg = primary, col = secondary, pch = 21, cex = 3))
#'
#'
#' # Using ggplot2
#' if (require(ggplot2)) {
#' ggplot(pythag, aes(x = wpct, y = exp_wpct, color = name, fill = name)) +
#' geom_abline(slope = 1, intercept = 0, linetype = 3) +
#' geom_point(shape = 21, size = 3) +
#' scale_fill_manual(values = pythag$primary, guide = FALSE) +
#' scale_color_manual(values = pythag$secondary, guide = FALSE) +
#' geom_text(aes(label = substr(name, 1, 3))) +
#' scale_x_continuous("Winning Percentage", limits = c(0.3, 0.7)) +
#' scale_y_continuous("Expected Winning Percentage", limits = c(0.3, 0.7)) +
#' coord_equal()
#' }
#' }
#'
"teamcolors" |
#### Basics ####
#Ex. 1
60 * 24 * 7
#Ex. 2
sum(1, 8, 4, 2, 9, 4, 8, 5)
#Ex. 3
help(rep)
?rep
#Ex. 4
x <- 5
y <- 7
z = x + y
z + 3 == 15
#Ex. 5
rep("Go Penn!", times = 30)
#Ex. 6
x <- seq(-1, 1, by = 0.1)
x
#### Harry Potter ####
#Ex. 1
wizards <- c("Harry", "Ron", "Fred", "George", "Sirius")
ranking <- c(4, 2, 5, 1, 3)
#Remember that the elements of character vectors need to be enclosed in
#quotation marks. Either single or double quotes will work.
#Ex. 3
wizards[2]
#Ex. 4
#There are several different ways to do this. Here are two possibilities.
wizards[c(3, 4, 5)] <- c("Hermione", "Ginny", "Malfoy")
wizards[3:5] <- c("Hermione", "Ginny", "Malfoy")
#Ex. 5
names(wizards) <- c("Lead", "Friend", "Friend", "Wife", "Rival")
wizards
#Ex. 6
names(wizards)[5] <- "Ex-Rival"
names(wizards)
#Ex. 7
barplot(ranking)
#The wizards vector contains character data rather than numerical data so we
#can't plot it.
#Ex. 8
names(ranking) <- wizards
barplot(ranking)
#### Steve's Finances ####
#Ex. 1
years <- c(2009, 2010, 2011, 2012)
income <- c(50000, 52000, 52500, 48000)
expenses <- c(35000, 34000, 38000, 40000)
#Ex. 2
savings <- income - expenses
plot(years, savings)
plot_ly(x = years, y = savings, type = "scatter", mode = "markers")
#Ex. 3
sum(savings)
#Ex. 4
years <- c(2009, 2010, 2011, 2012, 2013)
income <- c(50000, 52000, 52500, 48000, NA)
#If we just use sum, we'll get NA
sum(income)
#To avoid this, we use the following option to ignore the NA
sum(income, na.rm = TRUE)
#### Summary Stats ####
#Ex. 1
scores <- c(18, 95, 76, 90, 84, 83, 80, 79, 63, 76, 55, 78, 90, 81, 88, 89, 92,
73, 83, 72, 85, 66, 77, 82, 99, 87)
#Ex. 2
mean(scores)
median(scores)
sd(scores)
#### Trojan War ####
#Ex. 1
age <- c(21, 26, 51, 22, 160, 160, 160)
person <- c("Achilles", "Hector", "Priam", "Paris", "Apollo", "Athena", "Aphrodite")
description <- c("Aggressive", "Loyal", "Regal", "Cowardly", "Proud", "Wise", "Conniving")
#Ex. 2
trojan.war <- data.table(person, age, description)
#Ex. 3
#There are many different ways to do this:
trojan.war$description
trojan.war["description"]
#Ex. 4
#There are several ways to do this. Here are a few:
trojanWar[person == "Achilles" | person == "Hector", ]
trojanWar[person %in% c("Achilles", "Hector"), ]
trojanWar[c(1, 2), ]
trojanWar[1:2, ]
#Ex. 5
#There are many ways to do this. Here are a few:
trojanWar[person %in% c("Apollo", "Athena", "Aphrodite"), .(person, description)]
trojanWar[c(5, 6, 7), c(1, 3)]
trojanWar[5:7, c("person", "description")]
#Ex. 6
#There are many different ways to do this. Here are a few:
trojanWar$age[trojanWar$person == "Priam"] <- 72
trojanWar[3, 2] <- 72
trojanWar$age[3] <- 72
trojanWar
| /Rtutorials/Rtutorial1.R | no_license | emallickhossain/Econ103Public | R | false | false | 2,699 | r | #### Basics ####
#Ex. 1
60 * 24 * 7
#Ex. 2
sum(1, 8, 4, 2, 9, 4, 8, 5)
#Ex. 3
help(rep)
?rep
#Ex. 4
x <- 5
y <- 7
z = x + y
z + 3 == 15
#Ex. 5
rep("Go Penn!", times = 30)
#Ex. 6
x <- seq(-1, 1, by = 0.1)
x
#### Harry Potter ####
#Ex. 1
wizards <- c("Harry", "Ron", "Fred", "George", "Sirius")
ranking <- c(4, 2, 5, 1, 3)
#Remember that the elements of character vectors need to be enclosed in
#quotation marks. Either single or double quotes will work.
#Ex. 3
wizards[2]
#Ex. 4
#There are several different ways to do this. Here are two possibilities.
wizards[c(3, 4, 5)] <- c("Hermione", "Ginny", "Malfoy")
wizards[3:5] <- c("Hermione", "Ginny", "Malfoy")
#Ex. 5
names(wizards) <- c("Lead", "Friend", "Friend", "Wife", "Rival")
wizards
#Ex. 6
names(wizards)[5] <- "Ex-Rival"
names(wizards)
#Ex. 7
barplot(ranking)
#The wizards vector contains character data rather than numerical data so we
#can't plot it.
#Ex. 8
names(ranking) <- wizards
barplot(ranking)
#### Steve's Finances ####
#Ex. 1
years <- c(2009, 2010, 2011, 2012)
income <- c(50000, 52000, 52500, 48000)
expenses <- c(35000, 34000, 38000, 40000)
#Ex. 2
savings <- income - expenses
plot(years, savings)
plot_ly(x = years, y = savings, type = "scatter", mode = "markers")
#Ex. 3
sum(savings)
#Ex. 4
years <- c(2009, 2010, 2011, 2012, 2013)
income <- c(50000, 52000, 52500, 48000, NA)
#If we just use sum, we'll get NA
sum(income)
#To avoid this, we use the following option to ignore the NA
sum(income, na.rm = TRUE)
#### Summary Stats ####
#Ex. 1
scores <- c(18, 95, 76, 90, 84, 83, 80, 79, 63, 76, 55, 78, 90, 81, 88, 89, 92,
73, 83, 72, 85, 66, 77, 82, 99, 87)
#Ex. 2
mean(scores)
median(scores)
sd(scores)
#### Trojan War ####
#Ex. 1
age <- c(21, 26, 51, 22, 160, 160, 160)
person <- c("Achilles", "Hector", "Priam", "Paris", "Apollo", "Athena", "Aphrodite")
description <- c("Aggressive", "Loyal", "Regal", "Cowardly", "Proud", "Wise", "Conniving")
#Ex. 2
trojan.war <- data.table(person, age, description)
#Ex. 3
#There are many different ways to do this:
trojan.war$description
trojan.war["description"]
#Ex. 4
#There are several ways to do this. Here are a few:
trojanWar[person == "Achilles" | person == "Hector", ]
trojanWar[person %in% c("Achilles", "Hector"), ]
trojanWar[c(1, 2), ]
trojanWar[1:2, ]
#Ex. 5
#There are many ways to do this. Here are a few:
trojanWar[person %in% c("Apollo", "Athena", "Aphrodite"), .(person, description)]
trojanWar[c(5, 6, 7), c(1, 3)]
trojanWar[5:7, c("person", "description")]
#Ex. 6
#There are many different ways to do this. Here are a few:
trojanWar$age[trojanWar$person == "Priam"] <- 72
trojanWar[3, 2] <- 72
trojanWar$age[3] <- 72
trojanWar
|
#James Valles - Interactive app. Dataset is cat.csv, ensure the path is correct on line 8 before loading app.
# You can launch the interactive app by visiting the following address: https://jamesvalles.shinyapps.io/Corona-19Stats/
library(shiny)
library(readxl)
library(datasets)
#Import Covid19 Dataset
Coviddata<- read.csv("cat.csv")
# Define UI for application that draws a histogram
ui <- fluidPage(
# Give the page a title
titlePanel("COVID-19 Stats by Country"),
# Generate a row with a sidebar
sidebarLayout(
# Define the sidebar with one input
sidebarPanel(
selectInput("Country", "Country:",
choices=colnames(Coviddata)),
hr(),
helpText("This chart contains the latest number of deaths, patients in critical and serious condition, and those that have recovered. Source: BNO News ")
),
# Create a spot for the barplot
mainPanel(
plotOutput("covidPlot")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
# Fill in the spot we created for a plot
output$covidPlot <- renderPlot({
# Render a barplot
barplot(Coviddata[,input$Country],
main=input$Country,
ylab="Number",
xlab="Current Statistics as of March 11, 2020", names.arg = c("Death", "Serious", "Critical", "Recovered"))
})
}
# Run the application
shinyApp(ui = ui, server = server)
| /Final Project DataViz/Graph files/JamesVallesShinyDashboard/corona/app.R | no_license | jamesvalles/COVID19-VISUALIZATIONS | R | false | false | 1,579 | r | #James Valles - Interactive app. Dataset is cat.csv, ensure the path is correct on line 8 before loading app.
# You can launch the interactive app by visiting the following address: https://jamesvalles.shinyapps.io/Corona-19Stats/
library(shiny)
library(readxl)
library(datasets)
#Import Covid19 Dataset
Coviddata<- read.csv("cat.csv")
# Define UI for application that draws a histogram
ui <- fluidPage(
# Give the page a title
titlePanel("COVID-19 Stats by Country"),
# Generate a row with a sidebar
sidebarLayout(
# Define the sidebar with one input
sidebarPanel(
selectInput("Country", "Country:",
choices=colnames(Coviddata)),
hr(),
helpText("This chart contains the latest number of deaths, patients in critical and serious condition, and those that have recovered. Source: BNO News ")
),
# Create a spot for the barplot
mainPanel(
plotOutput("covidPlot")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
# Fill in the spot we created for a plot
output$covidPlot <- renderPlot({
# Render a barplot
barplot(Coviddata[,input$Country],
main=input$Country,
ylab="Number",
xlab="Current Statistics as of March 11, 2020", names.arg = c("Death", "Serious", "Critical", "Recovered"))
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
# 셀레늄 패키지와 rvest 패키지를 설치합니다.
source("https://install-github.me/ropensci/RSelenium")
install.packages("rvest")
install.packages("dplyr")
# 패키지를 불러서 사용할 준비를 합니다.
library(RSelenium)
library(rvest)
library(dplyr)
# phantomjs 브라우저를 사용합니다.
pJS <- wdman::phantomjs(port = 4567L)
Sys.sleep(5)
# 컨트롤 드라이버를 연결합니다.
remDr <- remoteDriver(browserName = 'phantomjs', port = 4567L)
# 컨트롤 드라이버를 실행합니다.
remDr$open()
# 기업 페이지를 켭니다. jtbc 를 선택했습니다.
remDr$navigate("https://m.facebook.com/wlrwkddlseoskantnv")
# 스크린샷을 확인합니다. 아래에 계속 있는 코드들도 실제 실행시는 없어도 됩니다. 확인용도로만 사용하세요.
# remDr$screenshot(display = T)
Sys.sleep(5)
# 화면을 아래로 계속 내리는 동작을 합니다.
# 그래야 과거 포스트를 계속 불러와줍니다.
# while 문 안에 10을 큰 숫자로 바꾸면 더 많은 포스트를 불러옵니다.
webElem <- remDr$findElement(using = 'css selector', ".scrollArea")
webElem$sendKeysToElement(list(key = "end"))
Sys.sleep(3)
len <- 0
while(len<50){
webElems <- remDr$findElements(using = 'css selector', ".story_body_container")
len <- length(webElems)
print(len)
remDr$mouseMoveToLocation(webElement = webElems[[1]])
remDr$buttondown()
remDr$mouseMoveToLocation(webElement = webElems[[len]])
remDr$buttonup()
Sys.sleep(5)
}
# 포스트 단위로 구분해서 가져옵니다.
remDr$getPageSource()[[1]] %>%
read_html() %>%
html_nodes("article") -> articles
# 공유 컨텐츠를 분리합니다.
containers <-
lapply(articles, function(article)
article %>%
html_nodes(".story_body_container")
)
# 포스트 내의 글자만 가져와서 저장합니다.
# 공유 컨텐츠의 처리가 있어 조금 복잡합니다.
posts <-
lapply(containers, function(container){
if(length(container)==1){
container %>%
html_nodes("div span p") %>%
html_text %>%
paste0(collapse = " ") %>%
c("") -> res
}
if(length(container)==2){
c1 <- container[1] %>%
html_nodes("div span p") %>%
html_text %>%
paste0(collapse = " ")
c2 <- container[2] %>%
html_nodes("div span p") %>%
html_text %>%
paste0(collapse = " ")
res <- c(
ifelse(c1 != "",c1,""),
ifelse(c2 != "",c2,"")
)
res[1]<- ifelse(c1==c2,"",c1)
}
return(data.frame(content = res[1],
nested_content = res[2],
stringsAsFactors = F))
})
# data.frame으로 저장합니다.
posts <- bind_rows(posts)
# 좋아요, 댓글, 공유 수 정보를 가져오기 위해 포스트 단위로 구분해 저장합니다.
remDr$getPageSource()[[1]] %>%
read_html() %>%
html_nodes("footer") -> count_info
# 좋아요 수를 가져옵니다.
like <-
sapply(count_info, function(x){
x %>%
html_nodes("div div a span.like_def") %>%
html_text %>%
gsub("[좋아요 |개|,]","",.) %>%
as.integer() -> res
if(identical(res,integer(0))){
res<-0
}
return(res)
})
# 댓글 수를 가져옵니다.
comment <-
sapply(count_info, function(x){
x %>%
html_nodes("div div a span.cmt_def") %>%
html_text %>%
gsub("[댓글 |개|,]","",.) %>%
as.integer() -> res
if(identical(res,integer(0))){
res<-0
}
return(res)
})
# 공유 수를 가져옵니다.
share <-
sapply(count_info, function(x){
x %>%
html_nodes(xpath="div/div/a/span[2]") %>%
html_text %>%
gsub("[공유 |회|,]","",.) %>%
as.integer() -> res
if(identical(res,integer(0))){
res<-0
}
return(res)
})
# 포스트 단위로 저장합니다.
tar <- bind_cols(posts,
data.frame(like),
data.frame(comment),
data.frame(share))
# 컨트롤 드라이버를 종료합니다.
remDr$closeall()
# 브라우저를 종료합니다.
pJS$stop() | /R/코드_페이스북 크롤링.R | no_license | lunaseorim/1802 | R | false | false | 4,140 | r | # 셀레늄 패키지와 rvest 패키지를 설치합니다.
source("https://install-github.me/ropensci/RSelenium")
install.packages("rvest")
install.packages("dplyr")
# 패키지를 불러서 사용할 준비를 합니다.
library(RSelenium)
library(rvest)
library(dplyr)
# phantomjs 브라우저를 사용합니다.
pJS <- wdman::phantomjs(port = 4567L)
Sys.sleep(5)
# 컨트롤 드라이버를 연결합니다.
remDr <- remoteDriver(browserName = 'phantomjs', port = 4567L)
# 컨트롤 드라이버를 실행합니다.
remDr$open()
# 기업 페이지를 켭니다. jtbc 를 선택했습니다.
remDr$navigate("https://m.facebook.com/wlrwkddlseoskantnv")
# 스크린샷을 확인합니다. 아래에 계속 있는 코드들도 실제 실행시는 없어도 됩니다. 확인용도로만 사용하세요.
# remDr$screenshot(display = T)
Sys.sleep(5)
# 화면을 아래로 계속 내리는 동작을 합니다.
# 그래야 과거 포스트를 계속 불러와줍니다.
# while 문 안에 10을 큰 숫자로 바꾸면 더 많은 포스트를 불러옵니다.
webElem <- remDr$findElement(using = 'css selector', ".scrollArea")
webElem$sendKeysToElement(list(key = "end"))
Sys.sleep(3)
len <- 0
while(len<50){
webElems <- remDr$findElements(using = 'css selector', ".story_body_container")
len <- length(webElems)
print(len)
remDr$mouseMoveToLocation(webElement = webElems[[1]])
remDr$buttondown()
remDr$mouseMoveToLocation(webElement = webElems[[len]])
remDr$buttonup()
Sys.sleep(5)
}
# 포스트 단위로 구분해서 가져옵니다.
remDr$getPageSource()[[1]] %>%
read_html() %>%
html_nodes("article") -> articles
# 공유 컨텐츠를 분리합니다.
containers <-
lapply(articles, function(article)
article %>%
html_nodes(".story_body_container")
)
# 포스트 내의 글자만 가져와서 저장합니다.
# 공유 컨텐츠의 처리가 있어 조금 복잡합니다.
posts <-
lapply(containers, function(container){
if(length(container)==1){
container %>%
html_nodes("div span p") %>%
html_text %>%
paste0(collapse = " ") %>%
c("") -> res
}
if(length(container)==2){
c1 <- container[1] %>%
html_nodes("div span p") %>%
html_text %>%
paste0(collapse = " ")
c2 <- container[2] %>%
html_nodes("div span p") %>%
html_text %>%
paste0(collapse = " ")
res <- c(
ifelse(c1 != "",c1,""),
ifelse(c2 != "",c2,"")
)
res[1]<- ifelse(c1==c2,"",c1)
}
return(data.frame(content = res[1],
nested_content = res[2],
stringsAsFactors = F))
})
# data.frame으로 저장합니다.
posts <- bind_rows(posts)
# 좋아요, 댓글, 공유 수 정보를 가져오기 위해 포스트 단위로 구분해 저장합니다.
remDr$getPageSource()[[1]] %>%
read_html() %>%
html_nodes("footer") -> count_info
# 좋아요 수를 가져옵니다.
like <-
sapply(count_info, function(x){
x %>%
html_nodes("div div a span.like_def") %>%
html_text %>%
gsub("[좋아요 |개|,]","",.) %>%
as.integer() -> res
if(identical(res,integer(0))){
res<-0
}
return(res)
})
# 댓글 수를 가져옵니다.
comment <-
sapply(count_info, function(x){
x %>%
html_nodes("div div a span.cmt_def") %>%
html_text %>%
gsub("[댓글 |개|,]","",.) %>%
as.integer() -> res
if(identical(res,integer(0))){
res<-0
}
return(res)
})
# 공유 수를 가져옵니다.
share <-
sapply(count_info, function(x){
x %>%
html_nodes(xpath="div/div/a/span[2]") %>%
html_text %>%
gsub("[공유 |회|,]","",.) %>%
as.integer() -> res
if(identical(res,integer(0))){
res<-0
}
return(res)
})
# 포스트 단위로 저장합니다.
tar <- bind_cols(posts,
data.frame(like),
data.frame(comment),
data.frame(share))
# 컨트롤 드라이버를 종료합니다.
remDr$closeall()
# 브라우저를 종료합니다.
pJS$stop() |
IVEstCVWeights2 <- function(beta, theta, H, ivest=1, sigma2v=1, N=23400,
adjust=T, diagnose=F) {
if (H==0) {
return(numeric())
}
if (exists("beta", mode="numeric")) {
theta <- diff(c(0, beta, 0))
q <- length(theta) - 1
} else if (exists("theta", mode="numeric")) {
q <- length(theta) - 1
} else {
return(numeric)
}
# preliminary estimator
sigma2.phi <- numeric(length=q+1)
for (i in 0:q) {
sigma2.phi[i+1] <- sigma2v * sum( theta[0:(q-i)+1] * theta[0:(q-i)+i+1] )
}
# matrices
K <- 1 + q + H
# A
matrixA <- diag(2, nrow=K, ncol=K)
matrixA[1] <- 1
# major diagonal of B and C
sigma2.phi.long <- c(rev(sigma2.phi[-1]), sigma2.phi)
matrixB <- diag(sigma2.phi[1]*2, nrow=K, ncol=K)
matrixB[1] <- matrixB[1] / 2
matrixC <- diag(sum(sigma2.phi.long^2), nrow=K, ncol=K)
for (i in 1:q) {
psi <- sum(sigma2.phi.long[(i+1):(1+2*q)] * sigma2.phi.long[1:(1+2*q-i)])
if ((i+1)<K) {
#B
diag(matrixB[(i+1):K, 1:(K-i)]) <- sigma2.phi[i+1]*2
diag(matrixB[1:(K-i), (i+1):K]) <- sigma2.phi[i+1]*2
#C
diag(matrixC[(i+1):K, 1:(K-i)]) <- psi
diag(matrixC[1:(K-i), (i+1):K]) <- psi
} else if ((i+1)==K) {
#B
matrixB[i+1, 1] <- sigma2.phi[i+1]*2
matrixB[1, i+1] <- sigma2.phi[i+1]*2
#C
matrixC[i+1, 1] <- psi
matrixC[1, i+1] <- psi
}
}
for (i in (q+1):min(2*q, q+H)) {
psi <- sum(sigma2.phi.long[(i+1):(1+2*q)] * sigma2.phi.long[1:(1+2*q-i)])
if ((i+1)<K) {
diag(matrixC[(i+1):K, 1:(K-i)]) <- psi
diag(matrixC[1:(K-i), (i+1):K]) <- psi
} else if ((i+1)==K) {
matrixC[i+1, 1] <- psi
matrixC[1, i+1] <- psi
}
}
var.matrix <- 2*ivest^2/N*matrixA + 4*ivest*matrixB + 4*N*matrixC
if (adjust) {
adj.vector <- N / (N- 0:q)
} else {
adj.vector <- rep(1, q+1)
}
if (diagnose) {
return(list(var=var.matrix,
A=matrixA,
B=matrixB,
C=matrixC))
} else {
return(as.numeric(
-solve(var.matrix[(2+q):K, (2+q):K, drop=F],
t(t(var.matrix[(2+q):K, 1:(1+q), drop=F])*adj.vector)
%*% matrix(rep(1, q+1), ncol=1))))
}
}
| /IVEstCVWeights2.R | no_license | wangguansong/IVEstimators | R | false | false | 2,279 | r | IVEstCVWeights2 <- function(beta, theta, H, ivest=1, sigma2v=1, N=23400,
adjust=T, diagnose=F) {
if (H==0) {
return(numeric())
}
if (exists("beta", mode="numeric")) {
theta <- diff(c(0, beta, 0))
q <- length(theta) - 1
} else if (exists("theta", mode="numeric")) {
q <- length(theta) - 1
} else {
return(numeric)
}
# preliminary estimator
sigma2.phi <- numeric(length=q+1)
for (i in 0:q) {
sigma2.phi[i+1] <- sigma2v * sum( theta[0:(q-i)+1] * theta[0:(q-i)+i+1] )
}
# matrices
K <- 1 + q + H
# A
matrixA <- diag(2, nrow=K, ncol=K)
matrixA[1] <- 1
# major diagonal of B and C
sigma2.phi.long <- c(rev(sigma2.phi[-1]), sigma2.phi)
matrixB <- diag(sigma2.phi[1]*2, nrow=K, ncol=K)
matrixB[1] <- matrixB[1] / 2
matrixC <- diag(sum(sigma2.phi.long^2), nrow=K, ncol=K)
for (i in 1:q) {
psi <- sum(sigma2.phi.long[(i+1):(1+2*q)] * sigma2.phi.long[1:(1+2*q-i)])
if ((i+1)<K) {
#B
diag(matrixB[(i+1):K, 1:(K-i)]) <- sigma2.phi[i+1]*2
diag(matrixB[1:(K-i), (i+1):K]) <- sigma2.phi[i+1]*2
#C
diag(matrixC[(i+1):K, 1:(K-i)]) <- psi
diag(matrixC[1:(K-i), (i+1):K]) <- psi
} else if ((i+1)==K) {
#B
matrixB[i+1, 1] <- sigma2.phi[i+1]*2
matrixB[1, i+1] <- sigma2.phi[i+1]*2
#C
matrixC[i+1, 1] <- psi
matrixC[1, i+1] <- psi
}
}
for (i in (q+1):min(2*q, q+H)) {
psi <- sum(sigma2.phi.long[(i+1):(1+2*q)] * sigma2.phi.long[1:(1+2*q-i)])
if ((i+1)<K) {
diag(matrixC[(i+1):K, 1:(K-i)]) <- psi
diag(matrixC[1:(K-i), (i+1):K]) <- psi
} else if ((i+1)==K) {
matrixC[i+1, 1] <- psi
matrixC[1, i+1] <- psi
}
}
var.matrix <- 2*ivest^2/N*matrixA + 4*ivest*matrixB + 4*N*matrixC
if (adjust) {
adj.vector <- N / (N- 0:q)
} else {
adj.vector <- rep(1, q+1)
}
if (diagnose) {
return(list(var=var.matrix,
A=matrixA,
B=matrixB,
C=matrixC))
} else {
return(as.numeric(
-solve(var.matrix[(2+q):K, (2+q):K, drop=F],
t(t(var.matrix[(2+q):K, 1:(1+q), drop=F])*adj.vector)
%*% matrix(rep(1, q+1), ncol=1))))
}
}
|
# Subset of formatted density shade plots, with colors and with only 4 or 5 plots.
# Edited 19 July to have the minimum rather than mean temperature displayed
sites2use <- c('STEI','BART','KONZ','JORN')
l1 <- labeller(siteID = c(STEI='Steigerwaldt', BART='Bartlett', KONZ='Konza', JORN='Jornada'))
sites_temporder <- neonsitedata %>% arrange(bio6) %>% dplyr::select(siteID, bio6)
pdensshade4 <- ggplot(filter(mam_capture_sitemerge, year == 2015, siteID %in% sites2use) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID))) +
stat_density(adjust = 2, size = 1, aes(x = log10(weight), group = taxonID, fill=taxonID), alpha = 0.25, geom='polygon', position = 'identity') + facet_wrap(~ siteID, ncol = 1, labeller = l1) +
scale_x_continuous(name = expression(paste('log'[10],' body mass')), breaks = c(1, 2), labels = c(10, 100), limits = c(0.5,2.8)) +
scale_y_continuous(name = 'probability density', expand = c(0,0), limits=c(0,9)) +
geom_text(aes(label = paste('NO =', round(ostat_norm,3)), x = 2.5, y = 8), color = 'black', data = o2015 %>% filter(siteID %in% sites2use, trait %in% 'logweight') %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID)), family = 'Helvetica') +
geom_text(aes(label = paste0(round(bio1, 2), '°C'), x = 1, y = 8), color = 'black', data = neonsitedata %>% filter(siteID %in% sites2use) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID)), family = 'Helvetica') +
theme_john + theme(axis.text.y=element_blank(), axis.ticks.y=element_blank(), legend.position = 'none', strip.text = element_text(family='Helvetica'))
ggsave('figs/msfigs/4paneldensplot.png', pdensshade4, height = 9, width = 4, dpi = 400)
colorlist <- c('darkorange2', 'gold2', 'black', 'royalblue3','purple3', 'forestgreen', 'red3')
set.seed(27701)
colorvalues <- sample(colorRampPalette(colorlist)(24))
# Edited version with better facet display and location of text on the figure.
pdensshade4clean <- ggplot(filter(mam_capture_sitemerge, year == 2015, siteID %in% sites2use) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID))) +
stat_density(adjust = 2, size = 1, aes(x = log10(weight), group = taxonID, fill=taxonID), alpha = 0.67, geom='polygon', position = 'identity') + facet_wrap(~ siteID, ncol = 1, labeller = l1) +
scale_fill_manual(values = colorvalues) +
scale_x_continuous(name = 'Body Mass (g)', breaks = c(1, 2, 3), labels = c(10, 100, 1000), limits = c(0.5,3)) +
scale_y_continuous(name = 'Probability Density', expand = c(0,0), limits=c(0,9)) +
geom_text(aes(label = paste('Overlap =', round(ostat_norm,3)), x = 2.5, y = 8.5), color = 'black', data = o2015 %>% filter(siteID %in% sites2use, trait %in% 'logweight') %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID)), family = 'Helvetica') +
geom_text(aes(label = paste0('MTCM = ', round(bio6, 1), '°C'), x = 0.5, y = 8.5), color = 'black', data = neonsitedata %>% filter(siteID %in% sites2use) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID)), family = 'Helvetica', hjust = 0) +
theme_john + theme(axis.text.y=element_blank(), axis.ticks.y=element_blank(), legend.position = 'none', strip.text = element_text(family='Helvetica'), strip.background = element_blank())
ggsave('C:/Users/Q/google_drive/NEON_EAGER/Figures/msfigs2017jan/fig3.png', pdensshade4clean, height = 9, width = 4, dpi = 400)
| /code/vis/subsetdensityplot.r | no_license | NEON-biodiversity/mammalitv | R | false | false | 3,378 | r | # Subset of formatted density shade plots, with colors and with only 4 or 5 plots.
# Edited 19 July to have the minimum rather than mean temperature displayed
sites2use <- c('STEI','BART','KONZ','JORN')
l1 <- labeller(siteID = c(STEI='Steigerwaldt', BART='Bartlett', KONZ='Konza', JORN='Jornada'))
sites_temporder <- neonsitedata %>% arrange(bio6) %>% dplyr::select(siteID, bio6)
pdensshade4 <- ggplot(filter(mam_capture_sitemerge, year == 2015, siteID %in% sites2use) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID))) +
stat_density(adjust = 2, size = 1, aes(x = log10(weight), group = taxonID, fill=taxonID), alpha = 0.25, geom='polygon', position = 'identity') + facet_wrap(~ siteID, ncol = 1, labeller = l1) +
scale_x_continuous(name = expression(paste('log'[10],' body mass')), breaks = c(1, 2), labels = c(10, 100), limits = c(0.5,2.8)) +
scale_y_continuous(name = 'probability density', expand = c(0,0), limits=c(0,9)) +
geom_text(aes(label = paste('NO =', round(ostat_norm,3)), x = 2.5, y = 8), color = 'black', data = o2015 %>% filter(siteID %in% sites2use, trait %in% 'logweight') %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID)), family = 'Helvetica') +
geom_text(aes(label = paste0(round(bio1, 2), '°C'), x = 1, y = 8), color = 'black', data = neonsitedata %>% filter(siteID %in% sites2use) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID)), family = 'Helvetica') +
theme_john + theme(axis.text.y=element_blank(), axis.ticks.y=element_blank(), legend.position = 'none', strip.text = element_text(family='Helvetica'))
ggsave('figs/msfigs/4paneldensplot.png', pdensshade4, height = 9, width = 4, dpi = 400)
colorlist <- c('darkorange2', 'gold2', 'black', 'royalblue3','purple3', 'forestgreen', 'red3')
set.seed(27701)
colorvalues <- sample(colorRampPalette(colorlist)(24))
# Edited version with better facet display and location of text on the figure.
pdensshade4clean <- ggplot(filter(mam_capture_sitemerge, year == 2015, siteID %in% sites2use) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID))) +
stat_density(adjust = 2, size = 1, aes(x = log10(weight), group = taxonID, fill=taxonID), alpha = 0.67, geom='polygon', position = 'identity') + facet_wrap(~ siteID, ncol = 1, labeller = l1) +
scale_fill_manual(values = colorvalues) +
scale_x_continuous(name = 'Body Mass (g)', breaks = c(1, 2, 3), labels = c(10, 100, 1000), limits = c(0.5,3)) +
scale_y_continuous(name = 'Probability Density', expand = c(0,0), limits=c(0,9)) +
geom_text(aes(label = paste('Overlap =', round(ostat_norm,3)), x = 2.5, y = 8.5), color = 'black', data = o2015 %>% filter(siteID %in% sites2use, trait %in% 'logweight') %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID)), family = 'Helvetica') +
geom_text(aes(label = paste0('MTCM = ', round(bio6, 1), '°C'), x = 0.5, y = 8.5), color = 'black', data = neonsitedata %>% filter(siteID %in% sites2use) %>% mutate(siteID = factor(siteID, levels=sites_temporder$siteID)), family = 'Helvetica', hjust = 0) +
theme_john + theme(axis.text.y=element_blank(), axis.ticks.y=element_blank(), legend.position = 'none', strip.text = element_text(family='Helvetica'), strip.background = element_blank())
ggsave('C:/Users/Q/google_drive/NEON_EAGER/Figures/msfigs2017jan/fig3.png', pdensshade4clean, height = 9, width = 4, dpi = 400)
|
library(dplyr)
library(tidyverse)
library(readr)
library(ggplot2)
data <- read_csv("../data/lab_sodra.csv")
data %>%
filter(ecoActCode == 452000) %>%
ggplot(aes(x = avgWage)) +
geom_histogram(bins = 100)
filtered2 <- data %>%
filter(ecoActCode == 452000) %>% group_by(code) %>%
summarise(suma = sum(avgWage)) %>%
arrange(desc(suma)) %>% head(5)
merged <- merge(filtered2, data, by = "code")
merged %>%
ggplot(aes(x = month, y = avgWage, group = name)) + theme_minimal()+
geom_line(aes(colour = name)) + geom_point(aes(color = name)) +
theme(axis.text.x=element_blank())
merged %>% group_by(name) %>%
summarise(maxNumInsured = max(numInsured)) %>%
ggplot(aes(x = reorder(name, -maxNumInsured), y = maxNumInsured, fill = name))
+ geom_col(aes(fill = name)) +
theme(axis.text.x=element_blank()) + xlab('name')
| /laboratorinis/R/lab.R | no_license | Airidasurb/KTU-duomenu-vizualizacija | R | false | false | 844 | r | library(dplyr)
library(tidyverse)
library(readr)
library(ggplot2)
data <- read_csv("../data/lab_sodra.csv")
data %>%
filter(ecoActCode == 452000) %>%
ggplot(aes(x = avgWage)) +
geom_histogram(bins = 100)
filtered2 <- data %>%
filter(ecoActCode == 452000) %>% group_by(code) %>%
summarise(suma = sum(avgWage)) %>%
arrange(desc(suma)) %>% head(5)
merged <- merge(filtered2, data, by = "code")
merged %>%
ggplot(aes(x = month, y = avgWage, group = name)) + theme_minimal()+
geom_line(aes(colour = name)) + geom_point(aes(color = name)) +
theme(axis.text.x=element_blank())
merged %>% group_by(name) %>%
summarise(maxNumInsured = max(numInsured)) %>%
ggplot(aes(x = reorder(name, -maxNumInsured), y = maxNumInsured, fill = name))
+ geom_col(aes(fill = name)) +
theme(axis.text.x=element_blank()) + xlab('name')
|
source('simData_methods.R')
source('./RE-EM Forest/functions.R')
library(REEMtree)
library(gtools)
library(rlist)
library(caret)
#GENERATE DATA # Date Structured as follows: X variables, y variable, patient/cluster variable
#######################################################################################################
# n <- 100
# T <- 5
# data <- as.data.frame(sim_2(n,T)) # genearate CS data set (no random effect)
# colnames(data)[colnames(data) == 'V401'] <- 'y'
#
# X = as.data.frame( data[,-ncol(data)] ) # remove y value from for X matrix
# y = data$y # assign target variable
# # add patient/cluster information
# for (i in 1:n){
# data$patient[(1+(i-1)*T):(i*T)] = rep(i,T)
# }
#
#
# # Testing data
# data_test <- as.data.frame(sim_2(100,5)) # generate AR data set (no random effect--> use sim_3_RE() for RE)
# X_test = as.data.frame( data_test[,-ncol(data_test)] ) # remove y value from for X matrix
# y_test = data_test[,ncol(data_test)] # assign target variable
# # add patient/cluster information
# for (i in 1:n){
# data_test$patient[(1+(i-1)*T):(i*T)] = rep(i,T)
# }
#######################################################################################################
# RE-EM Forest Functions
# ***Curently data must have precise format
################# # parameter to include : dt_max_depth, mtry
# ** algorithm subsets features once for each tree. Does NOT subset at each step of each decision tree. This is issue**
random_forest_algorithm = function(train_data, ntree=500, mtry=max(floor((ncol(train_data)-2)/3), 1) ){
forest <- list()
for(i in 1:ntree){
boot <- bootstrap(data, nrow(data))
X_boot <- boot[,1:(ncol(boot)-2)] # FIX THIS # this code depends on X matrix being all of orig. data except last two colums (y and patient)
sub_feat <- subset_features(X_boot, mtry) # choose a subset of size mtry of original features
formula = as.formula(paste("y~",paste(sub_feat,collapse="+")))
##### Decision Tree Implemenation
tree <- REEMtree(formula = formula, data=boot,random=~1|patient)
# tree <- rpart(formula,data=boot)
#####
forest <- list.append(forest, tree)
}
return(forest)
}
# ***Curently data must have precise format
################# # parameter to include : dt_max_depth, mtry
bagging_algorithm = function(train_data, ntree=500, mtry=max(floor((ncol(train_data)-2)/3), 1) ){
forest <- list()
for(i in 1:ntree){
boot <- bootstrap(data, nrow(data))
X_boot <- boot[,1:(ncol(boot)-2)] # FIX THIS # this code depends on X matrix being all of orig. data except last two colums (y and patient)
features <- colnames(X_boot)
formula = as.formula(paste("y~",paste(features,collapse="+")))
##### Decision Tree Implemenation
tree <- REEMtree(formula = formula, data=boot,random=~1|patient)
# tree <- rpart(formula,data=boot)
#####
forest <- list.append(forest, tree)
}
return(forest)
}
# predict function: input (1) a forest_model returned from the random_forest_algorithm() function
# and (2) test data. Returns predictions for the test data.
forest_predict = function(forest_model, test_data){
all_predictions <- as.data.frame(matrix( ,nrow(test_data),ncol=0)) #create empty dataframe
for(i in 1:length(forest_model)){
tree_predictions <- unlist(predict(forest_model[i], test_data, EstimateRandomEffects=FALSE)) # predictions from ith tree
all_predictions <- as.data.frame(cbind(all_predictions,tree_predictions)) # concatenate predictions from each tree
}
predictions <- rowMeans(all_predictions) # calculate average of predictions from trees
return(predictions)
}
#reem.forest <- random_forest_algorithm(data, ntree = 500) # ~ 5 minutes to run
# *** Computationally slow ~20 min for example data
###### Permutation Test for Variable Importance
# returns list of [1]names of variables in order of importance [2]dataframe of varaibles and their error difference
forest_importance = function(forest_model, test_data){
X_test = as.data.frame( test_data[,-ncol(test_data)] ) # remove y value from for X matrix
y_test = test_data[,ncol(test_data)] # assign target variable
pred <- forest_predict(forest_model, test_data)
base_error <- mean((pred-y_test)^2) # baseline error from non-permutated data
error_difference <- as.data.frame(matrix( ,1,ncol=0)) #create empty df to store
for(var in colnames(X_test)){
data_perm <- test_data
data_perm[var] <- permute(test_data[[var]]) # permute column of interest
perm_predictions <- forest_predict(forest_model = forest_model, test_data = data_perm)
perm_error <- mean((perm_predictions-y_test)^2)
error_difference[var] <- abs(base_error - perm_error) #### **computationally inefficient (dataframe=bad)
}
var_importance <- colnames(sort(error_difference, decreasing = TRUE))
return(list(var_importance, error_difference))
}
| /Relevant/RE-EM Forest/reem_forest.R | no_license | dankojis/random-forest-extensions | R | false | false | 5,005 | r | source('simData_methods.R')
source('./RE-EM Forest/functions.R')
library(REEMtree)
library(gtools)
library(rlist)
library(caret)
#GENERATE DATA # Date Structured as follows: X variables, y variable, patient/cluster variable
#######################################################################################################
# n <- 100
# T <- 5
# data <- as.data.frame(sim_2(n,T)) # genearate CS data set (no random effect)
# colnames(data)[colnames(data) == 'V401'] <- 'y'
#
# X = as.data.frame( data[,-ncol(data)] ) # remove y value from for X matrix
# y = data$y # assign target variable
# # add patient/cluster information
# for (i in 1:n){
# data$patient[(1+(i-1)*T):(i*T)] = rep(i,T)
# }
#
#
# # Testing data
# data_test <- as.data.frame(sim_2(100,5)) # generate AR data set (no random effect--> use sim_3_RE() for RE)
# X_test = as.data.frame( data_test[,-ncol(data_test)] ) # remove y value from for X matrix
# y_test = data_test[,ncol(data_test)] # assign target variable
# # add patient/cluster information
# for (i in 1:n){
# data_test$patient[(1+(i-1)*T):(i*T)] = rep(i,T)
# }
#######################################################################################################
# RE-EM Forest Functions
# ***Curently data must have precise format
################# # parameter to include : dt_max_depth, mtry
# ** algorithm subsets features once for each tree. Does NOT subset at each step of each decision tree. This is issue**
random_forest_algorithm = function(train_data, ntree=500, mtry=max(floor((ncol(train_data)-2)/3), 1) ){
forest <- list()
for(i in 1:ntree){
boot <- bootstrap(data, nrow(data))
X_boot <- boot[,1:(ncol(boot)-2)] # FIX THIS # this code depends on X matrix being all of orig. data except last two colums (y and patient)
sub_feat <- subset_features(X_boot, mtry) # choose a subset of size mtry of original features
formula = as.formula(paste("y~",paste(sub_feat,collapse="+")))
##### Decision Tree Implemenation
tree <- REEMtree(formula = formula, data=boot,random=~1|patient)
# tree <- rpart(formula,data=boot)
#####
forest <- list.append(forest, tree)
}
return(forest)
}
# ***Curently data must have precise format
################# # parameter to include : dt_max_depth, mtry
bagging_algorithm = function(train_data, ntree=500, mtry=max(floor((ncol(train_data)-2)/3), 1) ){
forest <- list()
for(i in 1:ntree){
boot <- bootstrap(data, nrow(data))
X_boot <- boot[,1:(ncol(boot)-2)] # FIX THIS # this code depends on X matrix being all of orig. data except last two colums (y and patient)
features <- colnames(X_boot)
formula = as.formula(paste("y~",paste(features,collapse="+")))
##### Decision Tree Implemenation
tree <- REEMtree(formula = formula, data=boot,random=~1|patient)
# tree <- rpart(formula,data=boot)
#####
forest <- list.append(forest, tree)
}
return(forest)
}
# predict function: input (1) a forest_model returned from the random_forest_algorithm() function
# and (2) test data. Returns predictions for the test data.
forest_predict = function(forest_model, test_data){
all_predictions <- as.data.frame(matrix( ,nrow(test_data),ncol=0)) #create empty dataframe
for(i in 1:length(forest_model)){
tree_predictions <- unlist(predict(forest_model[i], test_data, EstimateRandomEffects=FALSE)) # predictions from ith tree
all_predictions <- as.data.frame(cbind(all_predictions,tree_predictions)) # concatenate predictions from each tree
}
predictions <- rowMeans(all_predictions) # calculate average of predictions from trees
return(predictions)
}
#reem.forest <- random_forest_algorithm(data, ntree = 500) # ~ 5 minutes to run
# *** Computationally slow ~20 min for example data
###### Permutation Test for Variable Importance
# returns list of [1]names of variables in order of importance [2]dataframe of varaibles and their error difference
forest_importance = function(forest_model, test_data){
X_test = as.data.frame( test_data[,-ncol(test_data)] ) # remove y value from for X matrix
y_test = test_data[,ncol(test_data)] # assign target variable
pred <- forest_predict(forest_model, test_data)
base_error <- mean((pred-y_test)^2) # baseline error from non-permutated data
error_difference <- as.data.frame(matrix( ,1,ncol=0)) #create empty df to store
for(var in colnames(X_test)){
data_perm <- test_data
data_perm[var] <- permute(test_data[[var]]) # permute column of interest
perm_predictions <- forest_predict(forest_model = forest_model, test_data = data_perm)
perm_error <- mean((perm_predictions-y_test)^2)
error_difference[var] <- abs(base_error - perm_error) #### **computationally inefficient (dataframe=bad)
}
var_importance <- colnames(sort(error_difference, decreasing = TRUE))
return(list(var_importance, error_difference))
}
|
library(igraph)
### Name: component_distribution
### Title: Connected components of a graph
### Aliases: component_distribution components no.clusters clusters
### is.connected cluster.distribution count_components is_connected
### Keywords: graphs
### ** Examples
g <- sample_gnp(20, 1/20)
clu <- components(g)
groups(clu)
| /data/genthat_extracted_code/igraph/examples/components.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 335 | r | library(igraph)
### Name: component_distribution
### Title: Connected components of a graph
### Aliases: component_distribution components no.clusters clusters
### is.connected cluster.distribution count_components is_connected
### Keywords: graphs
### ** Examples
g <- sample_gnp(20, 1/20)
clu <- components(g)
groups(clu)
|
library(qdap)
### Name: gantt_plot
### Title: Gantt Plot
### Aliases: gantt_plot
### Keywords: Gantt
### ** Examples
## Not run:
##D with(rajSPLIT, gantt_plot(text.var = dialogue,
##D grouping.var = person, size=4))
##D
##D with(rajSPLIT, gantt_plot(text.var = dialogue,
##D grouping.var = list(fam.aff, sex), rm.var = act,
##D title = "Romeo and Juliet's dialogue"))
##D
##D with(rajSPLIT, gantt_plot(dialogue, list(fam.aff, sex), act,
##D transform=T))
##D
##D rajSPLIT2 <- rajSPLIT
##D
##D rajSPLIT2$newb <- as.factor(sample(LETTERS[1:2], nrow(rajSPLIT2),
##D replace=TRUE))
##D
##D z <- with(rajSPLIT2, gantt_plot(dialogue, list(fam.aff, sex),
##D list(act, newb), size = 4))
##D
##D library(ggplot2); library(scales); library(RColorBrewer); library(grid)
##D z + theme(panel.spacing = unit(1, "lines")) + scale_colour_grey()
##D z + scale_colour_brewer(palette="Dark2")
##D
##D ## Fill Variable Example
##D dat <- rajSPLIT[rajSPLIT$act == 1, ]
##D dat$end_mark <- factor(end_mark(dat$dialogue))
##D
##D with(dat, gantt_plot(text.var = dialogue, grouping.var = list(person, sex),
##D fill.var=end_mark))
##D
##D ## Repeated Measures with Fill Example
##D rajSPLIT$end_mark <- end_mark(rajSPLIT$dialogue)
##D
##D with(rajSPLIT, gantt_plot(text.var = dialogue,
##D grouping.var = list(fam.aff), rm.var = list(act),
##D fill.var=end_mark, title = "Romeo and Juliet's dialogue"))
##D
##D ## Repeated Measures Sentence Type Example
##D with(rajSPLIT, gantt_plot(text.var = dialogue,
##D grouping.var = list(fam.aff, sex), rm.var = list(end_mark, act),
##D title = "Romeo and Juliet's dialogue"))
##D
##D ## Reset rajSPLIT
##D rajSPLIT <- qdap::rajSPLIT
##D
##D ## Animate It
##D ##=================
##D ani_gantt <- with(mraja1, gantt_plot(dialogue, person))
##D
##D library(animation)
##D loc <- reports::folder(animation_gantt)
##D
##D ## Set up the plotting function
##D oopt <- animation::ani.options(interval = 0.1)
##D
##D FUN <- function() {
##D out <- Animate(ani_gantt)
##D lapply(out, function(x) {
##D print(x)
##D animation::ani.pause()
##D })
##D
##D }
##D
##D type <- if(.Platform$OS.type == "windows") shell else system
##D saveVideo(FUN(), video.name = "animation.avi", interval = 0.1, outdir = loc)
##D
##D saveLatex(FUN(), autoplay = TRUE, loop = FALSE, latex.filename = "tester.tex",
##D caption = "animated dialogue", outdir = loc, ani.type = "pdf",
##D ani.dev = "pdf", ani.width = 5, ani.height = 5.5, interval = 0.1)
##D
##D
##D saveHTML(FUN(), autoplay = FALSE, loop = TRUE, verbose = FALSE,
##D ani.width=600, ani.height=280,
##D outdir = file.path(loc, "new"), single.opts =
##D "'controls': ['first', 'play', 'loop', 'speed'], 'delayMin': 0")
##D
## End(Not run)
| /data/genthat_extracted_code/qdap/examples/gantt_plot.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 2,818 | r | library(qdap)
### Name: gantt_plot
### Title: Gantt Plot
### Aliases: gantt_plot
### Keywords: Gantt
### ** Examples
## Not run:
##D with(rajSPLIT, gantt_plot(text.var = dialogue,
##D grouping.var = person, size=4))
##D
##D with(rajSPLIT, gantt_plot(text.var = dialogue,
##D grouping.var = list(fam.aff, sex), rm.var = act,
##D title = "Romeo and Juliet's dialogue"))
##D
##D with(rajSPLIT, gantt_plot(dialogue, list(fam.aff, sex), act,
##D transform=T))
##D
##D rajSPLIT2 <- rajSPLIT
##D
##D rajSPLIT2$newb <- as.factor(sample(LETTERS[1:2], nrow(rajSPLIT2),
##D replace=TRUE))
##D
##D z <- with(rajSPLIT2, gantt_plot(dialogue, list(fam.aff, sex),
##D list(act, newb), size = 4))
##D
##D library(ggplot2); library(scales); library(RColorBrewer); library(grid)
##D z + theme(panel.spacing = unit(1, "lines")) + scale_colour_grey()
##D z + scale_colour_brewer(palette="Dark2")
##D
##D ## Fill Variable Example
##D dat <- rajSPLIT[rajSPLIT$act == 1, ]
##D dat$end_mark <- factor(end_mark(dat$dialogue))
##D
##D with(dat, gantt_plot(text.var = dialogue, grouping.var = list(person, sex),
##D fill.var=end_mark))
##D
##D ## Repeated Measures with Fill Example
##D rajSPLIT$end_mark <- end_mark(rajSPLIT$dialogue)
##D
##D with(rajSPLIT, gantt_plot(text.var = dialogue,
##D grouping.var = list(fam.aff), rm.var = list(act),
##D fill.var=end_mark, title = "Romeo and Juliet's dialogue"))
##D
##D ## Repeated Measures Sentence Type Example
##D with(rajSPLIT, gantt_plot(text.var = dialogue,
##D grouping.var = list(fam.aff, sex), rm.var = list(end_mark, act),
##D title = "Romeo and Juliet's dialogue"))
##D
##D ## Reset rajSPLIT
##D rajSPLIT <- qdap::rajSPLIT
##D
##D ## Animate It
##D ##=================
##D ani_gantt <- with(mraja1, gantt_plot(dialogue, person))
##D
##D library(animation)
##D loc <- reports::folder(animation_gantt)
##D
##D ## Set up the plotting function
##D oopt <- animation::ani.options(interval = 0.1)
##D
##D FUN <- function() {
##D out <- Animate(ani_gantt)
##D lapply(out, function(x) {
##D print(x)
##D animation::ani.pause()
##D })
##D
##D }
##D
##D type <- if(.Platform$OS.type == "windows") shell else system
##D saveVideo(FUN(), video.name = "animation.avi", interval = 0.1, outdir = loc)
##D
##D saveLatex(FUN(), autoplay = TRUE, loop = FALSE, latex.filename = "tester.tex",
##D caption = "animated dialogue", outdir = loc, ani.type = "pdf",
##D ani.dev = "pdf", ani.width = 5, ani.height = 5.5, interval = 0.1)
##D
##D
##D saveHTML(FUN(), autoplay = FALSE, loop = TRUE, verbose = FALSE,
##D ani.width=600, ani.height=280,
##D outdir = file.path(loc, "new"), single.opts =
##D "'controls': ['first', 'play', 'loop', 'speed'], 'delayMin': 0")
##D
## End(Not run)
|
rep1 <-
function(x,val){
t(apply(x,1,vec2,val=val))
}
| /R/rep1.R | no_license | frousseu/rNest | R | false | false | 79 | r | rep1 <-
function(x,val){
t(apply(x,1,vec2,val=val))
}
|
# Custom functions are camelCase. Arrays, parameters, and arguments are PascalCase
# Dependency functions are not embedded in master functions, and are marked with the flag dependency in the documentation
# []-notation is used wherever possible, and $-notation is avoided.
######################################### Load Required Libraries ###########################################
# Save and print the app start time
Start<-print(Sys.time())
# If running from UW-Madison
# Load or install the doParallel package
if (suppressWarnings(require("doParallel"))==FALSE) {
install.packages("doParallel",repos="http://cran.cnr.berkeley.edu/");
library("doParallel");
}
# Load or install the RPostgreSQL package
if (suppressWarnings(require("RPostgreSQL"))==FALSE) {
install.packages("RPostgreSQL",repos="http://cran.cnr.berkeley.edu/");
library("RPostgreSQL");
}
# Start a cluster for multicore, 3 by default or higher if passed as command line argument
CommandArgument<-commandArgs(TRUE)
if (length(CommandArgument)==0) {
Cluster<-makeCluster(3)
} else {
Cluster<-makeCluster(as.numeric(CommandArgument[1]))
}
#############################################################################################################
##################################### DATA DOWNLOAD FUNCTIONS, FIDELITY #####################################
#############################################################################################################
# No functions at this time
########################################### Data Download Script ############################################
# print current status to terminal
print(paste("Load postgres tables",Sys.time()))
# If RUNNING FROM UW-MADISON:
# Download the config file
Credentials<-as.matrix(read.table("Credentials.yml",row.names=1))
# Connect to PostgreSQL
Driver <- dbDriver("PostgreSQL") # Establish database driver
Connection <- dbConnect(Driver, dbname = Credentials["database:",], host = Credentials["host:",], port = Credentials["port:",], user = Credentials["user:",])
# Query the sentences fro postgresql
DeepDiveData<-dbGetQuery(Connection,"SELECT docid, sentid, words, poses FROM nlp_sentences_352")
# IF TESTING IN 402:
# Download data from Postgres:
#Driver <- dbDriver("PostgreSQL") # Establish database driver
#Connection <- dbConnect(Driver, dbname = "labuser", host = "localhost", port = 5432, user = "labuser")
#DeepDiveData<-dbGetQuery(Connection,"SELECT docid, sentid, words, poses FROM pbdb_fidelity.pbdb_fidelity_data")
# Record initial stats
Description1<-"Initial Data"
# Initial number of documents and rows in DeepDiveData
Docs1<-length((unique(DeepDiveData[,"docid"])))
Rows1<-nrow(DeepDiveData)
Clusters1<-0
#############################################################################################################
###################################### DATA CLEANING FUNCTIONS, FIDELITY ####################################
#############################################################################################################
# No functions at this time
############################################ Data Cleaning Script ###########################################
# print current status to terminal
print(paste("Clean DeepDiveData",Sys.time()))
# Remove bracket symbols ({ and }) from DeepDiveData sentences
DeepDiveData[,"words"]<-gsub("\\{|\\}","",DeepDiveData[,"words"])
# Replace "Fm" with "Formation" in words column
DeepDiveData[,"words"]<-gsub(",Fm,",",Formation,",DeepDiveData[,"words"])
# Remove bracket symbols ({ and }) from DeepDiveData poses column
DeepDiveData[,"poses"]<-gsub("\\{|\\}","",DeepDiveData[,"poses"])
# Remove commas from DeepDiveData poses column
DeepDiveData[,"poses"]<-gsub(","," ",DeepDiveData[,"poses"])
# Remove commas from DeepDiveData to prepare to run grep function
CleanedDDWords<-gsub(","," ",DeepDiveData[,"words"])
# Replace instances of "Fm" with "Formation"
CleanedDDWords<-gsub("Fm", "Formation", CleanedDDWords)
#############################################################################################################
###################################### FORMATION SEARCH FUNCTIONS, FIDELITY #################################
#############################################################################################################
########################################### Formation Search Script #########################################
# print current status
print(paste("Search for the word ' formation' in DeepDiveData sentences",Sys.time()))
# Apply grep to the object cleaned words
FormationHits<-grep(" formation", ignore.case=TRUE, perl = TRUE, CleanedDDWords)
# Extact DeepDiveData rows corresponding with formation hits
SubsetDeepDive<-DeepDiveData[FormationHits,]
# Update the stats table
Description2<-"Subset DeepDiveData to rows which contain the word 'formation'"
# Record number of documents and rows in SubsetDeepDive:
Docs2<-length((unique(SubsetDeepDive[,"docid"])))
Rows2<-nrow(SubsetDeepDive)
Clusters2<-0
# Remove SubsetDeepDive sentences that are more than 350 characters in length
ShortSent<-sapply(SubsetDeepDive[,"words"], function(x) as.character(nchar(x)<=350))
# Remove sentences that exceed the character limit from SubsetDeepDive
SubsetDeepDive<-SubsetDeepDive[which(ShortSent==TRUE),]
# Update the stats table
Description3<-"Remove sentences exceeding 350 characters"
# Record number of documents and rows in SubsetDeepDive:
Docs3<-length((unique(SubsetDeepDive[,"docid"])))
Rows3<-nrow(SubsetDeepDive)
Clusters3<-0
#############################################################################################################
####################################### NNP CLUSTER FUNCTIONS, FIDELITY #####################################
#############################################################################################################
# Consecutive word position locater function:
findConsecutive<-function(DeepDivePoses) {
Breaks<-c(0,which(diff(DeepDivePoses)!=1),length(DeepDivePoses))
ConsecutiveList<-lapply(seq(length(Breaks)-1),function(x) DeepDivePoses[(Breaks[x]+1):Breaks[x+1]])
return(ConsecutiveList)
}
############################################## NNP Cluster Script ###########################################
# Replace slashes from SubsetDeepDive words and poses columns with the word "SLASH"
SubsetDeepDive[,"words"]<-gsub("\"","SLASH",SubsetDeepDive[,"words"])
SubsetDeepDive[,"poses"]<-gsub("\"","SLASH",SubsetDeepDive[,"poses"])
# print current status to terminal
print(paste("Extract NNPs from SubsetDeepDive rows",Sys.time()))
# Create a list of vectors showing each formation hit sentence's unlisted poses column
DeepDivePoses<-parSapply(Cluster, SubsetDeepDive[,"poses"],function(x) unlist(strsplit(as.character(x)," ")))
# Assign names to each list element corresponding to the row in SubsetDeepDive
names(DeepDivePoses)<-1:nrow(SubsetDeepDive)
# Extract all the NNPs from DeepDivePoses
# NOTE: Search for CC as to get hits like "Middendorf And Black Creek Formations" which is NNP, CC, NNP, NNP, NNP
DeepDiveNNPs<-parSapply(Cluster, DeepDivePoses,function(x) which(x=="NNP"|x=="CC"))
# print current status to terminal
print(paste("Find consecutive NNPs in DeepDiveNNPs",Sys.time()))
# Apply function to DeepDiveNNPs list
ConsecutiveNNPs<-sapply(DeepDiveNNPs, findConsecutive)
# Collapse each cluster into a single character string such that each sentence from formation hits shows its associated clusters
SentenceNNPs<-sapply(ConsecutiveNNPs,function(y) sapply(y,function(x) paste(x,collapse=",")))
# print current status to terminal
print(paste("Find words Associated with Conescutive NNPs",Sys.time()))
# Create a data frame with a row for each NNP cluster
# Make a column for cluster elements
ClusterPosition<-unlist(SentenceNNPs)
# Make a column for sentence IDs
ClusterCount<-sapply(SentenceNNPs,length)
# Repeat the SubsetDeepDive row number (denoted in the names of SentenceNNPs) by the number of NNP clusters in each sentence
SubsetDDRow<-rep(names(SentenceNNPs),times=ClusterCount)
# Bind cluster position data with the row number data
ClusterData<-as.data.frame(cbind(ClusterPosition,SubsetDDRow))
# Reformat the data
ClusterData[,"SubsetDDRow"]<-as.numeric(as.character(ClusterData[,"SubsetDDRow"]))
# Remove NA's from ClusterData
ClusterData<-ClusterData[which(ClusterData[,"ClusterPosition"]!="NA"),]
# Create columns for docid and sentid data for each cluster
docid<-SubsetDeepDive[ClusterData[,"SubsetDDRow"],"docid"]
sentid<-SubsetDeepDive[ClusterData[,"SubsetDDRow"],"sentid"]
# Bind the data to the data frame
ClusterData<-cbind(ClusterData, docid, sentid)
# Reformat ClusterData
ClusterData[,"ClusterPosition"]<-as.character(ClusterData[,"ClusterPosition"])
ClusterData[,"docid"]<-as.character(ClusterData[,"docid"])
ClusterData[,"sentid"]<-as.numeric(as.character(ClusterData[,"sentid"]))
ClusterData[,"SubsetDDRow"]<-as.numeric(as.character(ClusterData[,"SubsetDDRow"]))
# Extract the sentences for the associated SubsetDeepDive rows
ClusterSentences<-sapply(ClusterData[,"SubsetDDRow"], function (x) SubsetDeepDive[x,"words"])
# Split and unlist the words in each cluster sentence
ClusterSentencesSplit<-sapply(ClusterSentences,function(x) unlist(strsplit(as.character(x),",")))
# Extract the NNP Clusters from theh associate sentences
# Get numeric elements for each NNP Cluster word
NNPElements<-lapply(ClusterData[,"ClusterPosition"],function(x) as.numeric(unlist(strsplit(x,","))))
# Create a vector for the number of Clusters in ClusterData
NumClusterVector<-1:nrow(ClusterData)
# Extract the words from ClusterSentencesSplit
ClusterWords<-sapply(NumClusterVector, function(y) sapply(NNPElements[y], function(x) ClusterSentencesSplit[[y]][x]))
# Collapse the clusters into single character strings
NNPWords<-sapply(ClusterWords, function(x) paste(array(x), collapse=" "))
# Bind the clusters to the ClusterData frame
ClusterData[,"NNPWords"]<-NNPWords
# Update the stats table
Description4<-"Extract NPP clusters from SubsetDeepDive rows"
# Record number of documents and rows in SubsetDeepDive:
Docs4<-length(unique(ClusterData[,"docid"]))
Rows4<-length(unique(ClusterData[,"SubsetDDRow"]))
Clusters4<-nrow(ClusterData)
#############################################################################################################
##################################### FORMATION CLUSTERS FUNCTIONS, FIDELITY ################################
#############################################################################################################
# Capitalization function from stack exchane
simpleCap <- function(x) {
s <- strsplit(x, " ")[[1]]
paste(toupper(substring(s, 1,1)), substring(s, 2),
sep="", collapse=" ")
}
########################################### Formation Clusters Script #######################################
# print current status to terminal
print(paste("Extract 'formation' clusters from ClusterData",Sys.time()))
# Find NNP clusters with the world formation in them
FormationClusters<-grep(" formation",ClusterData[,"NNPWords"],ignore.case=TRUE,perl=TRUE) # We could do a search for tail, to ensure it's the last word
# Extract those rows from ClusterData
FormationData<-ClusterData[FormationClusters,]
FormationData[,"docid"]<-as.character(FormationData[,"docid"])
# Find non-formation clusters
PostFmClusters<-ClusterData[-FormationClusters,]
# Update the stats table
Description5<-"Extract NNP clusters containing the word 'formation'"
# Record number of documents and rows in SubsetDeepDive:
Docs5<-length(unique(FormationData[,"docid"]))
Rows5<-length(unique(FormationData[,"SubsetDDRow"]))
Clusters5<-nrow(FormationData)
# print current status to terminal
print(paste("Capitalize formation names appropriately",Sys.time()))
# Make all characters in the NNPWords column lower case
FormationData[,"NNPWords"]<-tolower(FormationData[,"NNPWords"])
# Apply simpleCap function to NNPWords column so the first letter of every word is capitalized.
FormationData[,"NNPWords"]<-sapply(FormationData[,"NNPWords"], simpleCap)
# print current status to terminal
print(paste(" Remove all characters after 'Formation' or 'Formations'",Sys.time()))
# Account for romance language exceptions
Des<-grep("Des",FormationData[,"NNPWords"], perl=TRUE, ignore.case=TRUE)
Les<-grep("Les",FormationData[,"NNPWords"], perl=TRUE, ignore.case=TRUE)
FrenchRows<-c(Des,Les)
# Extract FormationData NNPWords with "Formations" NNP clusters
PluralWithFrench<-grep("Formations",FormationData[,"NNPWords"], perl=TRUE, ignore.case=TRUE)
# Make sure character removal is not performed on french rows
Plural<-PluralWithFrench[which(!PluralWithFrench%in%FrenchRows)]
# Replace (non-french) plural rows of NNPWords column with version with characters after "formations" removed
FormationsCut<-gsub("(Formations).*","\\1",FormationData[Plural,"NNPWords"])
FormationData[Plural,"NNPWords"]<-FormationsCut
# Extract FormationData NNPWords with "Formation" NNP clusters
# Find the FormationData NNPWords rows with "Formation" NNP clusters (NON PLURALS)
SingularWithFrench<-which(!1:nrow(FormationData)%in%Plural)
# Make sure character removal is not performed on french rows
Singular<-SingularWithFrench[which(!SingularWithFrench%in%FrenchRows)]
# Replace (non-french) singular rows of NNPWords column with version with characters after "formation" removed
FormationCut<-gsub("(Formation).*","\\1",FormationData[Singular,"NNPWords"])
FormationData[Singular,"NNPWords"]<-FormationCut
# Remove FormationData rows which only have "Formation" in the NNPWords column
FormationData<-FormationData[-which(FormationData[,"NNPWords"]=="Formation"),]
# Update the stats table
Description6<-"Remove rows that are just the word 'Formation'"
# Record number of documents and rows in SubsetDeepDive:
Docs6<-length(unique(FormationData[,"docid"]))
Rows6<-length(unique(FormationData[,"SubsetDDRow"]))
Clusters6<-nrow(FormationData)
# STEP THIRTEEN: Split the NNPClusters where there is an "And"
SplitFormations<-strsplit(FormationData[,"NNPWords"],'And ')
# Remove the blanks created by the splitting
SplitFormationsClean<-sapply(SplitFormations,function(x) unlist(x)[unlist(x)!=""])
# SplitFormations is a list of the split clusters. Figure out which clusters were split at "And" using length.
SplitCount<-sapply(SplitFormationsClean,length)
# Repeat the data in FormationData for each split cluster by its length
SubsetDDRow<-rep(FormationData[,"SubsetDDRow"],time=SplitCount)
ClusterPosition<-rep(FormationData[,"ClusterPosition"],times=SplitCount)
docid<-rep(FormationData[,"docid"],times=SplitCount)
sentid<-rep(FormationData[,"sentid"],times=SplitCount)
# Make a column for the split formations
Formation<-unlist(SplitFormationsClean)
FormationData<-as.data.frame(cbind(Formation,SubsetDDRow,ClusterPosition,docid,sentid))
# Reformat data
FormationData[,"SubsetDDRow"]<-as.numeric(as.character(FormationData[,"SubsetDDRow"]))
FormationData[,"Formation"]<-as.character(FormationData[,"Formation"])
FormationData[,"ClusterPosition"]<-as.character(FormationData[,"ClusterPosition"])
FormationData[,"docid"]<-as.character(FormationData[,"docid"])
FormationData[,"sentid"]<-as.numeric(as.character(FormationData[,"sentid"]))
# Paste "Formation" to the end of the split clusters where it is missing
# Determine the split clusters that DO contain the word "Formation"
FormationHalves<-grep("Formation",FormationData[,"Formation"], perl=TRUE, ignore.case=TRUE)
# Paste "Formation" to all of the non FormationHalves rows
FormationData[-FormationHalves,"Formation"]<-paste(FormationData[-FormationHalves,"Formation"], "Formation", sep=" ")
# Update the stats table
Description7<-"Split NNPClusters at 'And'"
# Record number of documents and rows in SubsetDeepDive:
Docs7<-length(unique(FormationData[,"docid"]))
Rows7<-length(unique(FormationData[,"SubsetDDRow"]))
Clusters7<-nrow(FormationData)
# STEP FOURTEEN: Remove Formations that equal to 1 word in length or more than 5 words in length.
print(paste("Remove Formations > 5 or = 1 word(s) in length",Sys.time()))
# Determine the number of words in each NNPWords row
WordLength<-sapply(sapply(FormationData[,"ClusterPosition"], function(x) strsplit(x, ",")), function(x) length(x))
# Determine which rows have more than 5 NNPWords or only 1 NNPWord
BadFormations<-which(WordLength>5|WordLength==1)
# Remove those rows from FormationData
FormationData<-FormationData[-BadFormations,]
# Update the stats table
Description8<-"Remove Formations > 5 words in length"
# Record number of documents and rows in SubsetDeepDive:
Docs8<-length(unique(FormationData[,"docid"]))
Rows8<-dim(unique(FormationData[,c("docid","sentid")]))[1]
Clusters8<-nrow(FormationData)
# STEP FIFTEEN: Clean FormationData
print(paste("Clean FormationData",Sys.time()))
# Remove spaces at the beginning and/or end of the Formation column where necessary
FormationData[,"Formation"]<-trimws(FormationData[,"Formation"], which=c("both"))
# Remove double spaces in the formation column
FormationData[,"Formation"]<-gsub(" "," ",FormationData[,"Formation"])
# Remove s in "Formations" where necessary
FormationData[,"Formation"]<-gsub("Formations","Formation",FormationData[,"Formation"])
# STEP SIXTEEN: Write outputs
print(paste("Writing Outputs",Sys.time()))
# Extract columns of interest for the output
FormationData<-FormationData[,c("Formation","docid","sentid")]
# Return formation stats table
StepDescription<-c(Description1, Description2, Description3, Description4, Description5, Description6, Description7, Description8)
NumberDocuments<-c(Docs1, Docs2, Docs3, Docs4, Docs5, Docs6, Docs7, Docs8)
NumberRows<-c(Rows1, Rows2, Rows3, Rows4, Rows5, Rows6, Rows7,Rows8)
NumberClusters<-c(Clusters1, Clusters2, Clusters3, Clusters4, Clusters5, Clusters6, Clusters7, Clusters8)
# Bind formation stats columns
Stats<-cbind(StepDescription,NumberDocuments,NumberRows,NumberClusters)
# Set directory for output
CurrentDirectory<-getwd()
setwd(paste(CurrentDirectory,"/output",sep=""))
# Clear any old output files
unlink("*")
# Write csv output files
write.csv(PostFmClusters, "PostFmClusters.csv")
write.csv(FormationData, "FormationData.csv")
write.csv(Stats, "Stats.csv")
# Stop the cluster
stopCluster(Cluster)
# COMPLETE
print(paste("Complete",Sys.time()))
| /International_Formations_app.R | no_license | ItoErika/International_Formations_app | R | false | false | 18,488 | r | # Custom functions are camelCase. Arrays, parameters, and arguments are PascalCase
# Dependency functions are not embedded in master functions, and are marked with the flag dependency in the documentation
# []-notation is used wherever possible, and $-notation is avoided.
######################################### Load Required Libraries ###########################################
# Save and print the app start time
Start<-print(Sys.time())
# If running from UW-Madison
# Load or install the doParallel package
if (suppressWarnings(require("doParallel"))==FALSE) {
install.packages("doParallel",repos="http://cran.cnr.berkeley.edu/");
library("doParallel");
}
# Load or install the RPostgreSQL package
if (suppressWarnings(require("RPostgreSQL"))==FALSE) {
install.packages("RPostgreSQL",repos="http://cran.cnr.berkeley.edu/");
library("RPostgreSQL");
}
# Start a cluster for multicore, 3 by default or higher if passed as command line argument
CommandArgument<-commandArgs(TRUE)
if (length(CommandArgument)==0) {
Cluster<-makeCluster(3)
} else {
Cluster<-makeCluster(as.numeric(CommandArgument[1]))
}
#############################################################################################################
##################################### DATA DOWNLOAD FUNCTIONS, FIDELITY #####################################
#############################################################################################################
# No functions at this time
########################################### Data Download Script ############################################
# print current status to terminal
print(paste("Load postgres tables",Sys.time()))
# If RUNNING FROM UW-MADISON:
# Download the config file
Credentials<-as.matrix(read.table("Credentials.yml",row.names=1))
# Connect to PostgreSQL
Driver <- dbDriver("PostgreSQL") # Establish database driver
Connection <- dbConnect(Driver, dbname = Credentials["database:",], host = Credentials["host:",], port = Credentials["port:",], user = Credentials["user:",])
# Query the sentences fro postgresql
DeepDiveData<-dbGetQuery(Connection,"SELECT docid, sentid, words, poses FROM nlp_sentences_352")
# IF TESTING IN 402:
# Download data from Postgres:
#Driver <- dbDriver("PostgreSQL") # Establish database driver
#Connection <- dbConnect(Driver, dbname = "labuser", host = "localhost", port = 5432, user = "labuser")
#DeepDiveData<-dbGetQuery(Connection,"SELECT docid, sentid, words, poses FROM pbdb_fidelity.pbdb_fidelity_data")
# Record initial stats
Description1<-"Initial Data"
# Initial number of documents and rows in DeepDiveData
Docs1<-length((unique(DeepDiveData[,"docid"])))
Rows1<-nrow(DeepDiveData)
Clusters1<-0
#############################################################################################################
###################################### DATA CLEANING FUNCTIONS, FIDELITY ####################################
#############################################################################################################
# No functions at this time
############################################ Data Cleaning Script ###########################################
# print current status to terminal
print(paste("Clean DeepDiveData",Sys.time()))
# Remove bracket symbols ({ and }) from DeepDiveData sentences
DeepDiveData[,"words"]<-gsub("\\{|\\}","",DeepDiveData[,"words"])
# Replace "Fm" with "Formation" in words column
DeepDiveData[,"words"]<-gsub(",Fm,",",Formation,",DeepDiveData[,"words"])
# Remove bracket symbols ({ and }) from DeepDiveData poses column
DeepDiveData[,"poses"]<-gsub("\\{|\\}","",DeepDiveData[,"poses"])
# Remove commas from DeepDiveData poses column
DeepDiveData[,"poses"]<-gsub(","," ",DeepDiveData[,"poses"])
# Remove commas from DeepDiveData to prepare to run grep function
CleanedDDWords<-gsub(","," ",DeepDiveData[,"words"])
# Replace instances of "Fm" with "Formation"
CleanedDDWords<-gsub("Fm", "Formation", CleanedDDWords)
#############################################################################################################
###################################### FORMATION SEARCH FUNCTIONS, FIDELITY #################################
#############################################################################################################
########################################### Formation Search Script #########################################
# print current status
print(paste("Search for the word ' formation' in DeepDiveData sentences",Sys.time()))
# Apply grep to the object cleaned words
FormationHits<-grep(" formation", ignore.case=TRUE, perl = TRUE, CleanedDDWords)
# Extact DeepDiveData rows corresponding with formation hits
SubsetDeepDive<-DeepDiveData[FormationHits,]
# Update the stats table
Description2<-"Subset DeepDiveData to rows which contain the word 'formation'"
# Record number of documents and rows in SubsetDeepDive:
Docs2<-length((unique(SubsetDeepDive[,"docid"])))
Rows2<-nrow(SubsetDeepDive)
Clusters2<-0
# Remove SubsetDeepDive sentences that are more than 350 characters in length
ShortSent<-sapply(SubsetDeepDive[,"words"], function(x) as.character(nchar(x)<=350))
# Remove sentences that exceed the character limit from SubsetDeepDive
SubsetDeepDive<-SubsetDeepDive[which(ShortSent==TRUE),]
# Update the stats table
Description3<-"Remove sentences exceeding 350 characters"
# Record number of documents and rows in SubsetDeepDive:
Docs3<-length((unique(SubsetDeepDive[,"docid"])))
Rows3<-nrow(SubsetDeepDive)
Clusters3<-0
#############################################################################################################
####################################### NNP CLUSTER FUNCTIONS, FIDELITY #####################################
#############################################################################################################
# Consecutive word position locater function:
findConsecutive<-function(DeepDivePoses) {
Breaks<-c(0,which(diff(DeepDivePoses)!=1),length(DeepDivePoses))
ConsecutiveList<-lapply(seq(length(Breaks)-1),function(x) DeepDivePoses[(Breaks[x]+1):Breaks[x+1]])
return(ConsecutiveList)
}
############################################## NNP Cluster Script ###########################################
# Replace slashes from SubsetDeepDive words and poses columns with the word "SLASH"
SubsetDeepDive[,"words"]<-gsub("\"","SLASH",SubsetDeepDive[,"words"])
SubsetDeepDive[,"poses"]<-gsub("\"","SLASH",SubsetDeepDive[,"poses"])
# print current status to terminal
print(paste("Extract NNPs from SubsetDeepDive rows",Sys.time()))
# Create a list of vectors showing each formation hit sentence's unlisted poses column
DeepDivePoses<-parSapply(Cluster, SubsetDeepDive[,"poses"],function(x) unlist(strsplit(as.character(x)," ")))
# Assign names to each list element corresponding to the row in SubsetDeepDive
names(DeepDivePoses)<-1:nrow(SubsetDeepDive)
# Extract all the NNPs from DeepDivePoses
# NOTE: Search for CC as to get hits like "Middendorf And Black Creek Formations" which is NNP, CC, NNP, NNP, NNP
DeepDiveNNPs<-parSapply(Cluster, DeepDivePoses,function(x) which(x=="NNP"|x=="CC"))
# print current status to terminal
print(paste("Find consecutive NNPs in DeepDiveNNPs",Sys.time()))
# Apply function to DeepDiveNNPs list
ConsecutiveNNPs<-sapply(DeepDiveNNPs, findConsecutive)
# Collapse each cluster into a single character string such that each sentence from formation hits shows its associated clusters
SentenceNNPs<-sapply(ConsecutiveNNPs,function(y) sapply(y,function(x) paste(x,collapse=",")))
# print current status to terminal
print(paste("Find words Associated with Conescutive NNPs",Sys.time()))
# Create a data frame with a row for each NNP cluster
# Make a column for cluster elements
ClusterPosition<-unlist(SentenceNNPs)
# Make a column for sentence IDs
ClusterCount<-sapply(SentenceNNPs,length)
# Repeat the SubsetDeepDive row number (denoted in the names of SentenceNNPs) by the number of NNP clusters in each sentence
SubsetDDRow<-rep(names(SentenceNNPs),times=ClusterCount)
# Bind cluster position data with the row number data
ClusterData<-as.data.frame(cbind(ClusterPosition,SubsetDDRow))
# Reformat the data
ClusterData[,"SubsetDDRow"]<-as.numeric(as.character(ClusterData[,"SubsetDDRow"]))
# Remove NA's from ClusterData
ClusterData<-ClusterData[which(ClusterData[,"ClusterPosition"]!="NA"),]
# Create columns for docid and sentid data for each cluster
docid<-SubsetDeepDive[ClusterData[,"SubsetDDRow"],"docid"]
sentid<-SubsetDeepDive[ClusterData[,"SubsetDDRow"],"sentid"]
# Bind the data to the data frame
ClusterData<-cbind(ClusterData, docid, sentid)
# Reformat ClusterData
ClusterData[,"ClusterPosition"]<-as.character(ClusterData[,"ClusterPosition"])
ClusterData[,"docid"]<-as.character(ClusterData[,"docid"])
ClusterData[,"sentid"]<-as.numeric(as.character(ClusterData[,"sentid"]))
ClusterData[,"SubsetDDRow"]<-as.numeric(as.character(ClusterData[,"SubsetDDRow"]))
# Extract the sentences for the associated SubsetDeepDive rows
ClusterSentences<-sapply(ClusterData[,"SubsetDDRow"], function (x) SubsetDeepDive[x,"words"])
# Split and unlist the words in each cluster sentence
ClusterSentencesSplit<-sapply(ClusterSentences,function(x) unlist(strsplit(as.character(x),",")))
# Extract the NNP Clusters from theh associate sentences
# Get numeric elements for each NNP Cluster word
NNPElements<-lapply(ClusterData[,"ClusterPosition"],function(x) as.numeric(unlist(strsplit(x,","))))
# Create a vector for the number of Clusters in ClusterData
NumClusterVector<-1:nrow(ClusterData)
# Extract the words from ClusterSentencesSplit
ClusterWords<-sapply(NumClusterVector, function(y) sapply(NNPElements[y], function(x) ClusterSentencesSplit[[y]][x]))
# Collapse the clusters into single character strings
NNPWords<-sapply(ClusterWords, function(x) paste(array(x), collapse=" "))
# Bind the clusters to the ClusterData frame
ClusterData[,"NNPWords"]<-NNPWords
# Update the stats table
Description4<-"Extract NPP clusters from SubsetDeepDive rows"
# Record number of documents and rows in SubsetDeepDive:
Docs4<-length(unique(ClusterData[,"docid"]))
Rows4<-length(unique(ClusterData[,"SubsetDDRow"]))
Clusters4<-nrow(ClusterData)
#############################################################################################################
##################################### FORMATION CLUSTERS FUNCTIONS, FIDELITY ################################
#############################################################################################################
# Capitalization function from stack exchane
simpleCap <- function(x) {
s <- strsplit(x, " ")[[1]]
paste(toupper(substring(s, 1,1)), substring(s, 2),
sep="", collapse=" ")
}
########################################### Formation Clusters Script #######################################
# print current status to terminal
print(paste("Extract 'formation' clusters from ClusterData",Sys.time()))
# Find NNP clusters with the world formation in them
FormationClusters<-grep(" formation",ClusterData[,"NNPWords"],ignore.case=TRUE,perl=TRUE) # We could do a search for tail, to ensure it's the last word
# Extract those rows from ClusterData
FormationData<-ClusterData[FormationClusters,]
FormationData[,"docid"]<-as.character(FormationData[,"docid"])
# Find non-formation clusters
PostFmClusters<-ClusterData[-FormationClusters,]
# Update the stats table
Description5<-"Extract NNP clusters containing the word 'formation'"
# Record number of documents and rows in SubsetDeepDive:
Docs5<-length(unique(FormationData[,"docid"]))
Rows5<-length(unique(FormationData[,"SubsetDDRow"]))
Clusters5<-nrow(FormationData)
# print current status to terminal
print(paste("Capitalize formation names appropriately",Sys.time()))
# Make all characters in the NNPWords column lower case
FormationData[,"NNPWords"]<-tolower(FormationData[,"NNPWords"])
# Apply simpleCap function to NNPWords column so the first letter of every word is capitalized.
FormationData[,"NNPWords"]<-sapply(FormationData[,"NNPWords"], simpleCap)
# print current status to terminal
print(paste(" Remove all characters after 'Formation' or 'Formations'",Sys.time()))
# Account for romance language exceptions
Des<-grep("Des",FormationData[,"NNPWords"], perl=TRUE, ignore.case=TRUE)
Les<-grep("Les",FormationData[,"NNPWords"], perl=TRUE, ignore.case=TRUE)
FrenchRows<-c(Des,Les)
# Extract FormationData NNPWords with "Formations" NNP clusters
PluralWithFrench<-grep("Formations",FormationData[,"NNPWords"], perl=TRUE, ignore.case=TRUE)
# Make sure character removal is not performed on french rows
Plural<-PluralWithFrench[which(!PluralWithFrench%in%FrenchRows)]
# Replace (non-french) plural rows of NNPWords column with version with characters after "formations" removed
FormationsCut<-gsub("(Formations).*","\\1",FormationData[Plural,"NNPWords"])
FormationData[Plural,"NNPWords"]<-FormationsCut
# Extract FormationData NNPWords with "Formation" NNP clusters
# Find the FormationData NNPWords rows with "Formation" NNP clusters (NON PLURALS)
SingularWithFrench<-which(!1:nrow(FormationData)%in%Plural)
# Make sure character removal is not performed on french rows
Singular<-SingularWithFrench[which(!SingularWithFrench%in%FrenchRows)]
# Replace (non-french) singular rows of NNPWords column with version with characters after "formation" removed
FormationCut<-gsub("(Formation).*","\\1",FormationData[Singular,"NNPWords"])
FormationData[Singular,"NNPWords"]<-FormationCut
# Remove FormationData rows which only have "Formation" in the NNPWords column
FormationData<-FormationData[-which(FormationData[,"NNPWords"]=="Formation"),]
# Update the stats table
Description6<-"Remove rows that are just the word 'Formation'"
# Record number of documents and rows in SubsetDeepDive:
Docs6<-length(unique(FormationData[,"docid"]))
Rows6<-length(unique(FormationData[,"SubsetDDRow"]))
Clusters6<-nrow(FormationData)
# STEP THIRTEEN: Split the NNPClusters where there is an "And"
SplitFormations<-strsplit(FormationData[,"NNPWords"],'And ')
# Remove the blanks created by the splitting
SplitFormationsClean<-sapply(SplitFormations,function(x) unlist(x)[unlist(x)!=""])
# SplitFormations is a list of the split clusters. Figure out which clusters were split at "And" using length.
SplitCount<-sapply(SplitFormationsClean,length)
# Repeat the data in FormationData for each split cluster by its length
SubsetDDRow<-rep(FormationData[,"SubsetDDRow"],time=SplitCount)
ClusterPosition<-rep(FormationData[,"ClusterPosition"],times=SplitCount)
docid<-rep(FormationData[,"docid"],times=SplitCount)
sentid<-rep(FormationData[,"sentid"],times=SplitCount)
# Make a column for the split formations
Formation<-unlist(SplitFormationsClean)
FormationData<-as.data.frame(cbind(Formation,SubsetDDRow,ClusterPosition,docid,sentid))
# Reformat data
FormationData[,"SubsetDDRow"]<-as.numeric(as.character(FormationData[,"SubsetDDRow"]))
FormationData[,"Formation"]<-as.character(FormationData[,"Formation"])
FormationData[,"ClusterPosition"]<-as.character(FormationData[,"ClusterPosition"])
FormationData[,"docid"]<-as.character(FormationData[,"docid"])
FormationData[,"sentid"]<-as.numeric(as.character(FormationData[,"sentid"]))
# Paste "Formation" to the end of the split clusters where it is missing
# Determine the split clusters that DO contain the word "Formation"
FormationHalves<-grep("Formation",FormationData[,"Formation"], perl=TRUE, ignore.case=TRUE)
# Paste "Formation" to all of the non FormationHalves rows
FormationData[-FormationHalves,"Formation"]<-paste(FormationData[-FormationHalves,"Formation"], "Formation", sep=" ")
# Update the stats table
Description7<-"Split NNPClusters at 'And'"
# Record number of documents and rows in SubsetDeepDive:
Docs7<-length(unique(FormationData[,"docid"]))
Rows7<-length(unique(FormationData[,"SubsetDDRow"]))
Clusters7<-nrow(FormationData)
# STEP FOURTEEN: Remove Formations that equal to 1 word in length or more than 5 words in length.
print(paste("Remove Formations > 5 or = 1 word(s) in length",Sys.time()))
# Determine the number of words in each NNPWords row
WordLength<-sapply(sapply(FormationData[,"ClusterPosition"], function(x) strsplit(x, ",")), function(x) length(x))
# Determine which rows have more than 5 NNPWords or only 1 NNPWord
BadFormations<-which(WordLength>5|WordLength==1)
# Remove those rows from FormationData
FormationData<-FormationData[-BadFormations,]
# Update the stats table
Description8<-"Remove Formations > 5 words in length"
# Record number of documents and rows in SubsetDeepDive:
Docs8<-length(unique(FormationData[,"docid"]))
Rows8<-dim(unique(FormationData[,c("docid","sentid")]))[1]
Clusters8<-nrow(FormationData)
# STEP FIFTEEN: Clean FormationData
print(paste("Clean FormationData",Sys.time()))
# Remove spaces at the beginning and/or end of the Formation column where necessary
FormationData[,"Formation"]<-trimws(FormationData[,"Formation"], which=c("both"))
# Remove double spaces in the formation column
FormationData[,"Formation"]<-gsub(" "," ",FormationData[,"Formation"])
# Remove s in "Formations" where necessary
FormationData[,"Formation"]<-gsub("Formations","Formation",FormationData[,"Formation"])
# STEP SIXTEEN: Write outputs
print(paste("Writing Outputs",Sys.time()))
# Extract columns of interest for the output
FormationData<-FormationData[,c("Formation","docid","sentid")]
# Return formation stats table
StepDescription<-c(Description1, Description2, Description3, Description4, Description5, Description6, Description7, Description8)
NumberDocuments<-c(Docs1, Docs2, Docs3, Docs4, Docs5, Docs6, Docs7, Docs8)
NumberRows<-c(Rows1, Rows2, Rows3, Rows4, Rows5, Rows6, Rows7,Rows8)
NumberClusters<-c(Clusters1, Clusters2, Clusters3, Clusters4, Clusters5, Clusters6, Clusters7, Clusters8)
# Bind formation stats columns
Stats<-cbind(StepDescription,NumberDocuments,NumberRows,NumberClusters)
# Set directory for output
CurrentDirectory<-getwd()
setwd(paste(CurrentDirectory,"/output",sep=""))
# Clear any old output files
unlink("*")
# Write csv output files
write.csv(PostFmClusters, "PostFmClusters.csv")
write.csv(FormationData, "FormationData.csv")
write.csv(Stats, "Stats.csv")
# Stop the cluster
stopCluster(Cluster)
# COMPLETE
print(paste("Complete",Sys.time()))
|
install.packages("lubridate")
library(lubridate)
install.packages("data.table")
library(data.table)
# path
setwd("C:/tmp")
# download data
fileUrl <- "https://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip"
data <- tempfile()
download.file(fileUrl, data)
data <- unzip(data)
str(data)
# read data
data1 <- fread("household_power_consumption.txt", na.strings = "?")
#subset the dataset
data2 <- data1[grepl("^[1,2]/2/2007", data1$Date),]
#plot1
hist(data2$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (killowatts)")
# Save file and close device
dev.copy(png,"plot1.png", width=480, height=480)
dev.off() | /Plot1.R | no_license | s-w-o/ExData_Plotting1 | R | false | false | 711 | r | install.packages("lubridate")
library(lubridate)
install.packages("data.table")
library(data.table)
# path
setwd("C:/tmp")
# download data
fileUrl <- "https://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip"
data <- tempfile()
download.file(fileUrl, data)
data <- unzip(data)
str(data)
# read data
data1 <- fread("household_power_consumption.txt", na.strings = "?")
#subset the dataset
data2 <- data1[grepl("^[1,2]/2/2007", data1$Date),]
#plot1
hist(data2$Global_active_power, col = "red", main = "Global Active Power", xlab = "Global Active Power (killowatts)")
# Save file and close device
dev.copy(png,"plot1.png", width=480, height=480)
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OrnsteinUhlenbeckHalfLife.R
\name{OrnsteinUhlenbeckHalfLife}
\alias{OrnsteinUhlenbeckHalfLife}
\title{Calculate Half-Life for Mean-Reversion}
\usage{
OrnsteinUhlenbeckHalfLife(price.Ratio)
}
\arguments{
\item{price.Ratio}{price ratio between pairs}
}
\value{
A \code{list} of half life results
}
\description{
Computed from an Ornstein–Uhlenbeck process. This is the theoretically computed time, based on a historical window of data,
that it will take for the spread to mean-revert half of its distance after having diverged from the mean of the spread.
}
\examples{
getFX("AUD/USD")
getFX("CAD/USD")
half.life <- OrnsteinUhlenbeckHalfLife(AUDUSD/CADUSD)
half.life
}
| /man/OrnsteinUhlenbeckHalfLife.Rd | permissive | elephann/RQuantTrader | R | false | true | 748 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/OrnsteinUhlenbeckHalfLife.R
\name{OrnsteinUhlenbeckHalfLife}
\alias{OrnsteinUhlenbeckHalfLife}
\title{Calculate Half-Life for Mean-Reversion}
\usage{
OrnsteinUhlenbeckHalfLife(price.Ratio)
}
\arguments{
\item{price.Ratio}{price ratio between pairs}
}
\value{
A \code{list} of half life results
}
\description{
Computed from an Ornstein–Uhlenbeck process. This is the theoretically computed time, based on a historical window of data,
that it will take for the spread to mean-revert half of its distance after having diverged from the mean of the spread.
}
\examples{
getFX("AUD/USD")
getFX("CAD/USD")
half.life <- OrnsteinUhlenbeckHalfLife(AUDUSD/CADUSD)
half.life
}
|
#' Worldplot
#'
#' Takes in a year vector of data and returns a map of the US or
#' international countries that maps the geographical distribution of the datasets
#'
#' @param year The vector for the year that is to be mapped
#' @param title The title of the plot and the name which it will be saved to
#' @param US choice of mapping US territories or mapping international countries
#' @param save TRUE to save, FALSE to not
#' @param interactive TRUE to choose the interactive mode in viewer, FALSE to view in just the default plot
#' @return a S4 object that has the year dataset mapped to it
#'
#' @examples
#' ##Creating a state map of the data with data from 2001
#' usmap2001 <- worldplot(yearsdata$X2001, title = "Change in Students 2000-2001")
#'
#' ##Creating an international map with interactive view
#' worldmap <- worldplot(yearsdata$X2014, title = "Change in Students 2000-2001", interactive = TRUE)
#'
#' @import leaflet tmap tmaptools
#' @export
#'
##The function takes in a year vector organizing all of the data into a list object.
##Then appends it to an S4 object and returns a plot of either the US, or international countries.
worldplot <- function(year, title = "", US = TRUE, save = FALSE, interactive = FALSE){
library(leaflet)
library(tmap)
library(tmaptools)
##creating a copy matrix of a 2 by 207 matrix and filling in the first columnn with the countries
copy <- as.data.frame(matrix(0, ncol = 1, nrow =207))
copy$V1 <- yearsdata$State.Countries
colnames(copy)[1] <- "NAME"
##Adds the absolutechange colomn to each data frame
copy$Students <- c(year)
##reads in the map data of the U.S. and the worldmap
nationshapefile <- "~/mapstu/inst/extdata/cb_2015_us_state_20m/cb_2015_us_state_20m.shp"
countryshpfiles ="~/mapstu/inst/extdata/TM_WORLD_BORDERS-0.3/TM_WORLD_BORDERS-0.3.shp"
##Sets the breaks in the maps. The US map uses breaksby10 for a better visual representation,
##whereas the international map auto scales.
breaksby10 = NULL
##the if loop determines whether the user wants to plot international areas or states
if (US == TRUE){
#selects the map of the U.S.
mapgeo <- read_shape(file = nationshapefile)
##Corrects the Districtof Columbia issue
copy$NAME <- as.character(copy$NAME)
copy[copy$NAME == "Districtof Columbia", 'NAME'] <- "District of Columbia"
##A for loop that generates the breaks in the legend that is used to map the data
breaksby10 = c()
for (i in 0:300){
if (i%%20 == 0){
breaksby10 = c(breaksby10, i)
}
}
##Adds Inf to the end of the list
breaksby10 = c(breaksby10, Inf)
}
##choice for the selection of international countries
else{
#selects to graph the international countries
mapgeo <- read_shape(file = countryshpfiles)
##Must correct democratic republic of congo, republic of korea, georgia,
##bosnia and herzgonivia, trinidad and tobago, united republic of tanzania
copy$NAME <- as.character(copy$NAME)
copy[copy$NAME == "Districtof Columbia", 'NAME'] <- "District of Columbia"
copy[copy$NAME == "Trinidadand Tobago", 'NAME'] <- "Trinidad and Tobago"
copy[copy$NAME == "Muscatand Oman", 'NAME'] <- "Oman"
copy[copy$NAME == "Republicof Korea", 'NAME'] <- "Trinidad and Tobago"
copy[copy$NAME == "Georgia", 'NAME'] <- "Georgia(State)"
copy[copy$NAME == "Georgia(Country)", 'NAME'] <- "Georgia"
copy[copy$NAME == "Laos", 'NAME'] <- "Lao People's Democratic Republic"
}
##Creating map
mapgeo@data <- data.frame(mapgeo@data, copy[match(mapgeo@data[,"NAME"], copy[,"NAME"]),])
##Code for the legends, titles, color scheme, format of the map, and creation of the actual plot
##Creation of the completed plot with the data incorporated
worldmap <- tm_shape(mapgeo) + tm_polygons(c("Students"),
breaks = breaksby10,
palette = "Purples",
contrast=.7,
id="name",
auto.palette.mapping=FALSE,
title= title) + tm_style_gray() + tm_format_World()
##Saving the map
if (save == TRUE){
save_tmap(worldmap, paste(title, ".png", sep = ""))
}
##Choosing how the map is viewed, either plot or interactive mode
if (interactive == TRUE){
tmap_mode("view")
}
else{
tmap_mode("plot")
}
##Returns the desired s4 object
return(worldmap)
}
| /R/worldplot.R | no_license | healthyred/mapstu | R | false | false | 4,562 | r | #' Worldplot
#'
#' Takes in a year vector of data and returns a map of the US or
#' international countries that maps the geographical distribution of the datasets
#'
#' @param year The vector for the year that is to be mapped
#' @param title The title of the plot and the name which it will be saved to
#' @param US choice of mapping US territories or mapping international countries
#' @param save TRUE to save, FALSE to not
#' @param interactive TRUE to choose the interactive mode in viewer, FALSE to view in just the default plot
#' @return a S4 object that has the year dataset mapped to it
#'
#' @examples
#' ##Creating a state map of the data with data from 2001
#' usmap2001 <- worldplot(yearsdata$X2001, title = "Change in Students 2000-2001")
#'
#' ##Creating an international map with interactive view
#' worldmap <- worldplot(yearsdata$X2014, title = "Change in Students 2000-2001", interactive = TRUE)
#'
#' @import leaflet tmap tmaptools
#' @export
#'
##The function takes in a year vector organizing all of the data into a list object.
##Then appends it to an S4 object and returns a plot of either the US, or international countries.
worldplot <- function(year, title = "", US = TRUE, save = FALSE, interactive = FALSE){
library(leaflet)
library(tmap)
library(tmaptools)
##creating a copy matrix of a 2 by 207 matrix and filling in the first columnn with the countries
copy <- as.data.frame(matrix(0, ncol = 1, nrow =207))
copy$V1 <- yearsdata$State.Countries
colnames(copy)[1] <- "NAME"
##Adds the absolutechange colomn to each data frame
copy$Students <- c(year)
##reads in the map data of the U.S. and the worldmap
nationshapefile <- "~/mapstu/inst/extdata/cb_2015_us_state_20m/cb_2015_us_state_20m.shp"
countryshpfiles ="~/mapstu/inst/extdata/TM_WORLD_BORDERS-0.3/TM_WORLD_BORDERS-0.3.shp"
##Sets the breaks in the maps. The US map uses breaksby10 for a better visual representation,
##whereas the international map auto scales.
breaksby10 = NULL
##the if loop determines whether the user wants to plot international areas or states
if (US == TRUE){
#selects the map of the U.S.
mapgeo <- read_shape(file = nationshapefile)
##Corrects the Districtof Columbia issue
copy$NAME <- as.character(copy$NAME)
copy[copy$NAME == "Districtof Columbia", 'NAME'] <- "District of Columbia"
##A for loop that generates the breaks in the legend that is used to map the data
breaksby10 = c()
for (i in 0:300){
if (i%%20 == 0){
breaksby10 = c(breaksby10, i)
}
}
##Adds Inf to the end of the list
breaksby10 = c(breaksby10, Inf)
}
##choice for the selection of international countries
else{
#selects to graph the international countries
mapgeo <- read_shape(file = countryshpfiles)
##Must correct democratic republic of congo, republic of korea, georgia,
##bosnia and herzgonivia, trinidad and tobago, united republic of tanzania
copy$NAME <- as.character(copy$NAME)
copy[copy$NAME == "Districtof Columbia", 'NAME'] <- "District of Columbia"
copy[copy$NAME == "Trinidadand Tobago", 'NAME'] <- "Trinidad and Tobago"
copy[copy$NAME == "Muscatand Oman", 'NAME'] <- "Oman"
copy[copy$NAME == "Republicof Korea", 'NAME'] <- "Trinidad and Tobago"
copy[copy$NAME == "Georgia", 'NAME'] <- "Georgia(State)"
copy[copy$NAME == "Georgia(Country)", 'NAME'] <- "Georgia"
copy[copy$NAME == "Laos", 'NAME'] <- "Lao People's Democratic Republic"
}
##Creating map
mapgeo@data <- data.frame(mapgeo@data, copy[match(mapgeo@data[,"NAME"], copy[,"NAME"]),])
##Code for the legends, titles, color scheme, format of the map, and creation of the actual plot
##Creation of the completed plot with the data incorporated
worldmap <- tm_shape(mapgeo) + tm_polygons(c("Students"),
breaks = breaksby10,
palette = "Purples",
contrast=.7,
id="name",
auto.palette.mapping=FALSE,
title= title) + tm_style_gray() + tm_format_World()
##Saving the map
if (save == TRUE){
save_tmap(worldmap, paste(title, ".png", sep = ""))
}
##Choosing how the map is viewed, either plot or interactive mode
if (interactive == TRUE){
tmap_mode("view")
}
else{
tmap_mode("plot")
}
##Returns the desired s4 object
return(worldmap)
}
|
if (!dir.exists("summarazing data")){
dir.create("summarazing data")
}
setwd("./summarazing.data")
URL<-"https://data.baltimorecity.gov/api/views/k5ry-ef3g/rows.csv?accessType=DOWNLOAD"
download.file(URL,destfile="rest.csv",method="curl")
rest<-read.csv("rest.csv")
### Summarizing Data ###
head()
tail()
summary(rest)
str(rest)
quantile(rest$councilDistrict,na.rm=T) ## good for looking into integers and numeric type of data within df
## creating tables
table(rest$zipCode,useNA="ifany") ## creates a 1-dimensional df
table(rest$councilDistrict,rest$zipCode) ## creates 2-dim df
## Checking for missing values
sum(is.na(rest$name))<-gives you the number of NAs in a given column
any(is.na(rest$name))<-tells you whether or not there are NAs in a given column
col<-vector()
NAs<-vector()
for (i in names(rest)){
sum.NA<-sum(is.na(rest[i]))
col<-c(col,i)
NAs<-c(NAs, sum.NA)
}
## getting row and col sums
colSums(is.na(rest))
all(colSums(is.na(rest))==0)
## identifying values with specific characteristics ##
table(rest$zipCode %in% c(21212,21213))
## making cross tabs ##
as.data.frame(xtabs(zipCode~name+councilDistrict,data=rest))
| /03-getting-and-cleaning-data/notes/3.2 summarazing data.R | no_license | JethroAlba/Coursera-Data-Science-Spec | R | false | false | 1,229 | r |
if (!dir.exists("summarazing data")){
dir.create("summarazing data")
}
setwd("./summarazing.data")
URL<-"https://data.baltimorecity.gov/api/views/k5ry-ef3g/rows.csv?accessType=DOWNLOAD"
download.file(URL,destfile="rest.csv",method="curl")
rest<-read.csv("rest.csv")
### Summarizing Data ###
head()
tail()
summary(rest)
str(rest)
quantile(rest$councilDistrict,na.rm=T) ## good for looking into integers and numeric type of data within df
## creating tables
table(rest$zipCode,useNA="ifany") ## creates a 1-dimensional df
table(rest$councilDistrict,rest$zipCode) ## creates 2-dim df
## Checking for missing values
sum(is.na(rest$name))<-gives you the number of NAs in a given column
any(is.na(rest$name))<-tells you whether or not there are NAs in a given column
col<-vector()
NAs<-vector()
for (i in names(rest)){
sum.NA<-sum(is.na(rest[i]))
col<-c(col,i)
NAs<-c(NAs, sum.NA)
}
## getting row and col sums
colSums(is.na(rest))
all(colSums(is.na(rest))==0)
## identifying values with specific characteristics ##
table(rest$zipCode %in% c(21212,21213))
## making cross tabs ##
as.data.frame(xtabs(zipCode~name+councilDistrict,data=rest))
|
library(deSolve)
library(ggplot2)
l1 <- seq(100,0,by=-5)
l2 <- seq(5,100,by=5)
l3 <- seq(95,0,by=-5)
l4 <- seq(5,100,by=5)
input <- c(l1, l2, l3, l4)
START<-0; FINISH<-20; STEP<-0.25
simtime <- seq(START, FINISH, by=STEP)
stocks <- c(sStock=100)
auxs <- c(aOutflow=50)
model <- function(time, stocks, auxs){
with(as.list(c(stocks, auxs)),{
fInflow <- input[which(simtime==time)]
fOutflow <- aOutflow
dS_dt <- fInflow - fOutflow
ans <- list(c(dS_dt),Inflow=fInflow,
Outflow=fOutflow,
NetFlow=dS_dt)
})
}
# Run simulation
o<-data.frame(ode(y=stocks, times=simtime, func = model,
parms=auxs, method='euler'))
qplot(x=time,y=sStock,data=o) +
geom_line()
qplot(x=sStock,y=NetFlow,data=o) +
geom_path()
| /lectures/CT561/models/08 Lecture/Stock Example.R | permissive | mohammad-miftakhus-sholikin/SDMR | R | false | false | 802 | r | library(deSolve)
library(ggplot2)
l1 <- seq(100,0,by=-5)
l2 <- seq(5,100,by=5)
l3 <- seq(95,0,by=-5)
l4 <- seq(5,100,by=5)
input <- c(l1, l2, l3, l4)
START<-0; FINISH<-20; STEP<-0.25
simtime <- seq(START, FINISH, by=STEP)
stocks <- c(sStock=100)
auxs <- c(aOutflow=50)
model <- function(time, stocks, auxs){
with(as.list(c(stocks, auxs)),{
fInflow <- input[which(simtime==time)]
fOutflow <- aOutflow
dS_dt <- fInflow - fOutflow
ans <- list(c(dS_dt),Inflow=fInflow,
Outflow=fOutflow,
NetFlow=dS_dt)
})
}
# Run simulation
o<-data.frame(ode(y=stocks, times=simtime, func = model,
parms=auxs, method='euler'))
qplot(x=time,y=sStock,data=o) +
geom_line()
qplot(x=sStock,y=NetFlow,data=o) +
geom_path()
|
numPerPatch68000 <- c(2516,2484)
| /NatureEE-data-archive/Run203121/JAFSdata/JAFSnumPerPatch68000.R | no_license | flaxmans/NatureEE2017 | R | false | false | 33 | r | numPerPatch68000 <- c(2516,2484)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{HIV_Simu_Dataset_Delta025}
\alias{HIV_Simu_Dataset_Delta025}
\title{HIV RNA Load Simulated Data for Delta AUC = -0.25}
\format{
A data frame with 4 columns and 1000 rows:
\tabular{ll}{
\code{id} \tab The subject id. \cr
\code{time} \tab Time of HIV RNA load measurements. \cr
\code{Group} \tab Treatment group. \cr
\code{VL} \tab The HIV RNA load measures. \cr
}
}
\usage{
HIV_Simu_Dataset_Delta025
}
\description{
This dataset gives simulated longitudinal log-transformed HIV RNA load dynamics for two distinct groups of patients who interrupted their antiretroviral treatment at baseline (time=0).
The group 'Group1' was simulated to mimic a control group while the group 'Group2' gathers patients assigned to a treatment group.
Each group accounts for 20 patients and each of them were followed for 24 weeks with one measure every week.
This dataset was simulated to obtain a difference of area under the mean HIV RNA load curves between the two groups (AUC2 - AUC1) equals to -0.25 log10 cp/ml.
}
\details{
A dataset containing simulated data used in the accompanying paper to this package.
}
\examples{
HIV_Simu_Dataset_Delta025
}
\keyword{datasets}
| /man/HIV_Simu_Dataset_Delta025.Rd | permissive | marie-alexandre/AUCcomparison | R | false | true | 1,261 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{HIV_Simu_Dataset_Delta025}
\alias{HIV_Simu_Dataset_Delta025}
\title{HIV RNA Load Simulated Data for Delta AUC = -0.25}
\format{
A data frame with 4 columns and 1000 rows:
\tabular{ll}{
\code{id} \tab The subject id. \cr
\code{time} \tab Time of HIV RNA load measurements. \cr
\code{Group} \tab Treatment group. \cr
\code{VL} \tab The HIV RNA load measures. \cr
}
}
\usage{
HIV_Simu_Dataset_Delta025
}
\description{
This dataset gives simulated longitudinal log-transformed HIV RNA load dynamics for two distinct groups of patients who interrupted their antiretroviral treatment at baseline (time=0).
The group 'Group1' was simulated to mimic a control group while the group 'Group2' gathers patients assigned to a treatment group.
Each group accounts for 20 patients and each of them were followed for 24 weeks with one measure every week.
This dataset was simulated to obtain a difference of area under the mean HIV RNA load curves between the two groups (AUC2 - AUC1) equals to -0.25 log10 cp/ml.
}
\details{
A dataset containing simulated data used in the accompanying paper to this package.
}
\examples{
HIV_Simu_Dataset_Delta025
}
\keyword{datasets}
|
getType = function(object) UseMethod("getType")
getType.default = function(x) {
stop("Invalid object. Must supply relationship or graph object.")
}
getType.graph = function(graph) {
url = attr(graph, "relationship_types")
headers = setHeaders(graph)
response = http_request(url, "GET", "OK", httpheader=headers)
result = fromJSON(response)
return(result)
}
getType.relationship = function(rel) {
return(attr(rel, "type"))
} | /R/getType.R | permissive | noelnamai/RNeo4j | R | false | false | 440 | r | getType = function(object) UseMethod("getType")
getType.default = function(x) {
stop("Invalid object. Must supply relationship or graph object.")
}
getType.graph = function(graph) {
url = attr(graph, "relationship_types")
headers = setHeaders(graph)
response = http_request(url, "GET", "OK", httpheader=headers)
result = fromJSON(response)
return(result)
}
getType.relationship = function(rel) {
return(attr(rel, "type"))
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check.class.R
\name{check.class}
\alias{check.class}
\title{Function to check whether obj belongs to class or
is a list of class (if list.of.class==TRUE)}
\usage{
check.class(obj, class, list.of.class = TRUE)
}
\description{
Function to check whether obj belongs to class or
is a list of class (if list.of.class==TRUE)
}
\keyword{internal}
| /man/check.class.Rd | no_license | tshmak/crosspred | R | false | true | 418 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check.class.R
\name{check.class}
\alias{check.class}
\title{Function to check whether obj belongs to class or
is a list of class (if list.of.class==TRUE)}
\usage{
check.class(obj, class, list.of.class = TRUE)
}
\description{
Function to check whether obj belongs to class or
is a list of class (if list.of.class==TRUE)
}
\keyword{internal}
|
dir <- 'C:/OneDrive/RCoursera_specialization/04_ExploratoryDataAnalysis/CP1/exdata-data-household_power_consumption/'
setwd(dir)
power <- read.csv("household_power_consumption.txt", sep=";", na.strings="?", stringsAsFactors=FALSE)
power <- power[grep('^1/2/2007|^2/2/2007',power$Date), ]
par(mfrow = c(2,2))
power$weekDay <- weekdays(as.Date(power$Date))
plot(power$Global_active_power, type = 'l', axes=FALSE, ylab = 'Global Active Power (kilowatts)', xlab = '')
box()
axis(2,seq(0,max(power$Global_active_power), by = 2),las=1)
pos.axis <- c(which(!duplicated(power$weekDay)), nrow(power))
axis(1, at=pos.axis, labels=c('Thu', 'Fri', 'Sat'))
plot(power$Voltage, ylab = 'Voltage', xlab = 'datetime', type = 'l', axes=FALSE)
box()
axis(2,seq(ceiling(min(power$Voltage)), max(power$Voltage), by = 4),las=3)
axis(1, at=pos.axis, labels=c('Thu', 'Fri', 'Sat'))
with(power, {
plot(Sub_metering_1, axes = FALSE, type = 'l', ylab = 'Energy sub metering', xlab = '')
lines(Sub_metering_2, col = 'red')
lines(Sub_metering_3, col = 'blue')
box()
axis(2, seq(0,max(power$Sub_metering_1), by = 10),las=3)
axis(1, at=pos.axis, labels=c('Thu', 'Fri', 'Sat'))
legend('topright',col = c('black', 'blue', 'red'), legend = c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'),
lty=1, bty="n")
})
plot(power$Global_reactive_power, ylab = 'Global_reactive_power', xlab = 'datetime', type = 'l', axes = FALSE)
box()
axis(2, seq(0, max(power$Global_reactive_power), by = 0.1), las=3)
axis(1, at=pos.axis, labels=c('Thu', 'Fri', 'Sat'))
png(filename = 'plot4.png', width = 480, height = 480)
par(mfrow = c(2,2))
plot(power$Global_active_power, type = 'l', axes=FALSE, ylab = 'Global Active Power (kilowatts)', xlab = '')
box()
axis(2,seq(0,max(power$Global_active_power), by = 2),las=1)
pos.axis <- c(which(!duplicated(power$weekDay)), nrow(power))
axis(1, at=pos.axis, labels=c('Thu', 'Fri', 'Sat'))
plot(power$Voltage, ylab = 'Voltage', xlab = 'datetime', type = 'l', axes=FALSE)
box()
axis(2,seq(ceiling(min(power$Voltage)), max(power$Voltage), by = 4),las=3)
axis(1, at=pos.axis, labels=c('Thu', 'Fri', 'Sat'))
with(power, {
plot(Sub_metering_1, axes = FALSE, type = 'l', ylab = 'Energy sub metering', xlab = '')
lines(Sub_metering_2, col = 'red')
lines(Sub_metering_3, col = 'blue')
box()
axis(2, seq(0,max(power$Sub_metering_1), by = 10),las=3)
axis(1, at=pos.axis, labels=c('Thu', 'Fri', 'Sat'))
legend('topright',col = c('black', 'blue', 'red'), legend = c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'),
lty=1, bty="n")
})
plot(power$Global_reactive_power, ylab = 'Global_reactive_power', xlab = 'datetime', type = 'l', axes = FALSE)
box()
axis(2, seq(0, max(power$Global_reactive_power), by = 0.1), las=3)
axis(1, at=pos.axis, labels=c('Thu', 'Fri', 'Sat'))
dev.off()
| /plot4.R | no_license | gonzalezivan90/ExData_Plotting1 | R | false | false | 2,826 | r | dir <- 'C:/OneDrive/RCoursera_specialization/04_ExploratoryDataAnalysis/CP1/exdata-data-household_power_consumption/'
setwd(dir)
power <- read.csv("household_power_consumption.txt", sep=";", na.strings="?", stringsAsFactors=FALSE)
power <- power[grep('^1/2/2007|^2/2/2007',power$Date), ]
par(mfrow = c(2,2))
power$weekDay <- weekdays(as.Date(power$Date))
plot(power$Global_active_power, type = 'l', axes=FALSE, ylab = 'Global Active Power (kilowatts)', xlab = '')
box()
axis(2,seq(0,max(power$Global_active_power), by = 2),las=1)
pos.axis <- c(which(!duplicated(power$weekDay)), nrow(power))
axis(1, at=pos.axis, labels=c('Thu', 'Fri', 'Sat'))
plot(power$Voltage, ylab = 'Voltage', xlab = 'datetime', type = 'l', axes=FALSE)
box()
axis(2,seq(ceiling(min(power$Voltage)), max(power$Voltage), by = 4),las=3)
axis(1, at=pos.axis, labels=c('Thu', 'Fri', 'Sat'))
with(power, {
plot(Sub_metering_1, axes = FALSE, type = 'l', ylab = 'Energy sub metering', xlab = '')
lines(Sub_metering_2, col = 'red')
lines(Sub_metering_3, col = 'blue')
box()
axis(2, seq(0,max(power$Sub_metering_1), by = 10),las=3)
axis(1, at=pos.axis, labels=c('Thu', 'Fri', 'Sat'))
legend('topright',col = c('black', 'blue', 'red'), legend = c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'),
lty=1, bty="n")
})
plot(power$Global_reactive_power, ylab = 'Global_reactive_power', xlab = 'datetime', type = 'l', axes = FALSE)
box()
axis(2, seq(0, max(power$Global_reactive_power), by = 0.1), las=3)
axis(1, at=pos.axis, labels=c('Thu', 'Fri', 'Sat'))
png(filename = 'plot4.png', width = 480, height = 480)
par(mfrow = c(2,2))
plot(power$Global_active_power, type = 'l', axes=FALSE, ylab = 'Global Active Power (kilowatts)', xlab = '')
box()
axis(2,seq(0,max(power$Global_active_power), by = 2),las=1)
pos.axis <- c(which(!duplicated(power$weekDay)), nrow(power))
axis(1, at=pos.axis, labels=c('Thu', 'Fri', 'Sat'))
plot(power$Voltage, ylab = 'Voltage', xlab = 'datetime', type = 'l', axes=FALSE)
box()
axis(2,seq(ceiling(min(power$Voltage)), max(power$Voltage), by = 4),las=3)
axis(1, at=pos.axis, labels=c('Thu', 'Fri', 'Sat'))
with(power, {
plot(Sub_metering_1, axes = FALSE, type = 'l', ylab = 'Energy sub metering', xlab = '')
lines(Sub_metering_2, col = 'red')
lines(Sub_metering_3, col = 'blue')
box()
axis(2, seq(0,max(power$Sub_metering_1), by = 10),las=3)
axis(1, at=pos.axis, labels=c('Thu', 'Fri', 'Sat'))
legend('topright',col = c('black', 'blue', 'red'), legend = c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'),
lty=1, bty="n")
})
plot(power$Global_reactive_power, ylab = 'Global_reactive_power', xlab = 'datetime', type = 'l', axes = FALSE)
box()
axis(2, seq(0, max(power$Global_reactive_power), by = 0.1), las=3)
axis(1, at=pos.axis, labels=c('Thu', 'Fri', 'Sat'))
dev.off()
|
## from Chapter 23 of R for Data Science
## Model Basics
library(modelr) ## includes sim1 data set
options(na.action = na.warn)
# EDA tools
library(tidyverse)
## another way to generate a sim data set, linear with wiggles...
## simple, used to help understand basics of modeling...
true_model <- function(x) {
1 + 2*x + rnorm(length(x), sd=2.5)
}
sim <- data_frame(x = seq(0,10,length = 20),
y = true_model(x)
)
##########################################
## how are x and y related?
ggplot(sim1, aes(x, y)) + geom_point()
## using a linear model, generate a bunch (250) of models
## and overlay them on the data.
## y = a_1 * x + a_2
models <- tibble(
a1 = runif(250, -20, 40), ## y-intercept
a2 = runif(250, -5, 5) ## slope
)
ggplot(sim1, aes(x, y)) +
geom_abline(aes(intercept = a1, slope = a2), data = models, alpha = 1/4) +
geom_point()
## how do we find the "good" models?
## minimize the distance from the data to the line...
## To compute the distance, first turn the model family into an R function.
## This takes the model parameters and the data as inputs, and gives values
## predicted by the model as output.
model1 <- function(a, data) {
a[1] + data$x*a[2]
}
model1(c(7, 1.5), sim1)
## this is 30 distances; how do we collapse that into a single number??
## RMS deviation !
measure_distance <- function(mod, data) {
diff <- data$y - model1(mod,data)
sqrt(mean(diff^2))
}
measure_distance(c(7,1.5), sim1) ## 2.67
## Now we can use purrr to compute the distance for all 250 models.
## We need a helper function because our distance function expects
## the model as a numeric vector of length two.
sim1_dist <- function(a1,a2) {
measure_distance(c(a1,a2), sim1)
}
models <- models %>% mutate(dist = purrr::map2_dbl(a1,a2, sim1_dist))
models
## Let's overlay the 10 best models on the data
## colored by -dist (easy way to make sure the best models/smallest dist)
## get the brightest colors...
ggplot(sim1, aes(x,y)) +
geom_point(size = 2, color = 'grey30') +
geom_abline(
aes(intercept = a1, slope = a2, color = -dist),
data = filter(models ,rank(dist) <= 10)
)
## We can also think about these models as observations,and visualizing
## with a scatterplot of a1 vs a2, again colored by distance. | /scripts/RDS_Chap23_Model-Basics.R | no_license | brusko/learningR | R | false | false | 2,325 | r | ## from Chapter 23 of R for Data Science
## Model Basics
library(modelr) ## includes sim1 data set
options(na.action = na.warn)
# EDA tools
library(tidyverse)
## another way to generate a sim data set, linear with wiggles...
## simple, used to help understand basics of modeling...
true_model <- function(x) {
1 + 2*x + rnorm(length(x), sd=2.5)
}
sim <- data_frame(x = seq(0,10,length = 20),
y = true_model(x)
)
##########################################
## how are x and y related?
ggplot(sim1, aes(x, y)) + geom_point()
## using a linear model, generate a bunch (250) of models
## and overlay them on the data.
## y = a_1 * x + a_2
models <- tibble(
a1 = runif(250, -20, 40), ## y-intercept
a2 = runif(250, -5, 5) ## slope
)
ggplot(sim1, aes(x, y)) +
geom_abline(aes(intercept = a1, slope = a2), data = models, alpha = 1/4) +
geom_point()
## how do we find the "good" models?
## minimize the distance from the data to the line...
## To compute the distance, first turn the model family into an R function.
## This takes the model parameters and the data as inputs, and gives values
## predicted by the model as output.
model1 <- function(a, data) {
a[1] + data$x*a[2]
}
model1(c(7, 1.5), sim1)
## this is 30 distances; how do we collapse that into a single number??
## RMS deviation !
measure_distance <- function(mod, data) {
diff <- data$y - model1(mod,data)
sqrt(mean(diff^2))
}
measure_distance(c(7,1.5), sim1) ## 2.67
## Now we can use purrr to compute the distance for all 250 models.
## We need a helper function because our distance function expects
## the model as a numeric vector of length two.
sim1_dist <- function(a1,a2) {
measure_distance(c(a1,a2), sim1)
}
models <- models %>% mutate(dist = purrr::map2_dbl(a1,a2, sim1_dist))
models
## Let's overlay the 10 best models on the data
## colored by -dist (easy way to make sure the best models/smallest dist)
## get the brightest colors...
ggplot(sim1, aes(x,y)) +
geom_point(size = 2, color = 'grey30') +
geom_abline(
aes(intercept = a1, slope = a2, color = -dist),
data = filter(models ,rank(dist) <= 10)
)
## We can also think about these models as observations,and visualizing
## with a scatterplot of a1 vs a2, again colored by distance. |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io.R
\name{loadirradiations}
\alias{loadirradiations}
\title{Load the irradiation schedule}
\usage{
loadirradiations(fname)
}
\arguments{
\item{fname}{file name (in .csv format)}
}
\value{
a list of irradiations, where each irradiation is a named
list containing:
\code{tin}: vector with the start times of irradiations \cr
\code{tout}: vector with the end times of irradiations \cr
\code{P}: vector with the power of the irradiations
}
\description{
Loads a .csv file with the schedule of a multi-stage neutron
irradiation
}
\examples{
irrfile <- system.file("irradiations.csv",package="ArArRedux")
irr <- loadirradiations(irrfile)
str(irr)
}
| /man/loadirradiations.Rd | no_license | pvermees/ArArRedux | R | false | true | 723 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/io.R
\name{loadirradiations}
\alias{loadirradiations}
\title{Load the irradiation schedule}
\usage{
loadirradiations(fname)
}
\arguments{
\item{fname}{file name (in .csv format)}
}
\value{
a list of irradiations, where each irradiation is a named
list containing:
\code{tin}: vector with the start times of irradiations \cr
\code{tout}: vector with the end times of irradiations \cr
\code{P}: vector with the power of the irradiations
}
\description{
Loads a .csv file with the schedule of a multi-stage neutron
irradiation
}
\examples{
irrfile <- system.file("irradiations.csv",package="ArArRedux")
irr <- loadirradiations(irrfile)
str(irr)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/matching.R
\name{matches}
\alias{matches}
\alias{match-SMARTS}
\title{matches}
\usage{
matches(query, target, return.matches = FALSE)
}
\arguments{
\item{query}{Required. A SMARTSQuery}
\item{target}{Required. The molecule to query. Should be a `jobjRef` representing an `IAtomContainer`}
\item{return.matches}{Optional. Default \code{FALSE}}
}
\description{
matches
}
| /man/matches.Rd | no_license | cran/rcdk | R | false | true | 449 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/matching.R
\name{matches}
\alias{matches}
\alias{match-SMARTS}
\title{matches}
\usage{
matches(query, target, return.matches = FALSE)
}
\arguments{
\item{query}{Required. A SMARTSQuery}
\item{target}{Required. The molecule to query. Should be a `jobjRef` representing an `IAtomContainer`}
\item{return.matches}{Optional. Default \code{FALSE}}
}
\description{
matches
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Boxp2.R
\name{aovMuiBoxP2}
\alias{aovMuiBoxP2}
\title{Variance test or non-parametric test results visualization, using boxplot paired line show pair com...}
\usage{
aovMuiBoxP2(
data = data_wt,
i = 3,
sig_show = "line",
result = result,
ns = FALSE
)
}
\arguments{
\item{data}{a data.frame contain the input data}
\item{i}{for col id or colames}
\item{sig_show}{Distinctive display, "abc" or "line"}
\item{result}{output from aovMcomper or KwWlx. You can also import result calculated from other software (a data frame)}
\item{ns}{Logical value, whether to display insignificant marks}
}
\value{
data frame
}
\description{
Variance test or non-parametric test results visualization, using boxplot paired line show pair com...
}
\examples{
# data(data_wt)
result = KwWlx2(data = data_wt, i= 4)
PlotresultBox = aovMuiBoxP2(data = data_wt, i= 4,sig_show ="abc",result = result[[1]])
# utput result
p = PlotresultBox[[1]]
p
}
\references{
Yuan J, Zhao J, Wen T, Zhao M, Li R, Goossens P, Huang Q, Bai Y, Vivanco JM, Kowalchuk GA, Berendsen RL, Shen Q
Root exudates drive the soil-borne legacy of aboveground pathogen infection
Microbiome 2018,DOI: \url{doi: 10.1186/s40168-018-0537-x}
}
\author{
Contact: Tao Wen \email{2018203048@njau.edu.cn} Jun Yuan \email{junyuan@njau.edu.cn}
}
| /man/aovMuiBoxP2.Rd | no_license | WatsonWoo/EasyStat | R | false | true | 1,371 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Boxp2.R
\name{aovMuiBoxP2}
\alias{aovMuiBoxP2}
\title{Variance test or non-parametric test results visualization, using boxplot paired line show pair com...}
\usage{
aovMuiBoxP2(
data = data_wt,
i = 3,
sig_show = "line",
result = result,
ns = FALSE
)
}
\arguments{
\item{data}{a data.frame contain the input data}
\item{i}{for col id or colames}
\item{sig_show}{Distinctive display, "abc" or "line"}
\item{result}{output from aovMcomper or KwWlx. You can also import result calculated from other software (a data frame)}
\item{ns}{Logical value, whether to display insignificant marks}
}
\value{
data frame
}
\description{
Variance test or non-parametric test results visualization, using boxplot paired line show pair com...
}
\examples{
# data(data_wt)
result = KwWlx2(data = data_wt, i= 4)
PlotresultBox = aovMuiBoxP2(data = data_wt, i= 4,sig_show ="abc",result = result[[1]])
# utput result
p = PlotresultBox[[1]]
p
}
\references{
Yuan J, Zhao J, Wen T, Zhao M, Li R, Goossens P, Huang Q, Bai Y, Vivanco JM, Kowalchuk GA, Berendsen RL, Shen Q
Root exudates drive the soil-borne legacy of aboveground pathogen infection
Microbiome 2018,DOI: \url{doi: 10.1186/s40168-018-0537-x}
}
\author{
Contact: Tao Wen \email{2018203048@njau.edu.cn} Jun Yuan \email{junyuan@njau.edu.cn}
}
|
/Forbrugsdata/andre_indkomstgrupper.R | permissive | Anaconda95/SpecialeJR | R | false | false | 2,142 | r | ||
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/appregistry_operations.R
\name{appregistry_sync_resource}
\alias{appregistry_sync_resource}
\title{Syncs the resource with what is currently recorded in App registry}
\usage{
appregistry_sync_resource(resourceType, resource)
}
\arguments{
\item{resourceType}{[required] The type of resource of which the application will be associated.}
\item{resource}{[required] An entity you can work with and specify with a name or ID. Examples
include an Amazon EC2 instance, an AWS CloudFormation stack, or an
Amazon S3 bucket.}
}
\description{
Syncs the resource with what is currently recorded in App registry.
Specifically, the resource’s App registry system tags are synced with
its associated application. The resource is removed if it is not
associated with the application. The caller must have permissions to
read and update the resource.
}
\section{Request syntax}{
\preformatted{svc$sync_resource(
resourceType = "CFN_STACK",
resource = "string"
)
}
}
\keyword{internal}
| /paws/man/appregistry_sync_resource.Rd | permissive | sanchezvivi/paws | R | false | true | 1,056 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/appregistry_operations.R
\name{appregistry_sync_resource}
\alias{appregistry_sync_resource}
\title{Syncs the resource with what is currently recorded in App registry}
\usage{
appregistry_sync_resource(resourceType, resource)
}
\arguments{
\item{resourceType}{[required] The type of resource of which the application will be associated.}
\item{resource}{[required] An entity you can work with and specify with a name or ID. Examples
include an Amazon EC2 instance, an AWS CloudFormation stack, or an
Amazon S3 bucket.}
}
\description{
Syncs the resource with what is currently recorded in App registry.
Specifically, the resource’s App registry system tags are synced with
its associated application. The resource is removed if it is not
associated with the application. The caller must have permissions to
read and update the resource.
}
\section{Request syntax}{
\preformatted{svc$sync_resource(
resourceType = "CFN_STACK",
resource = "string"
)
}
}
\keyword{internal}
|
#Athor: Viwe_Mabongo
#Date: 22 April 2021
#Day_4_Session
library(tidyverse)
str(data)
Input <- ("
Student Sex Teacher Steps Rating
a female Jacob 8000 7
b female Jacob 9000 10
c female Jacob 10000 9
d female Jacob 7000 5
e female Jacob 6000 4
f female Jacob 8000 8
g male Jacob 7000 6
h male Jacob 5000 5
i male Jacob 9000 10
j male Jacob 7000 8
k female Sadam 8000 7
l female Sadam 9000 8
m female Sadam 9000 8
n female Sadam 8000 9
o male Sadam 6000 5
p male Sadam 8000 9
q male Sadam 7000 6
r female Donald 10000 10
s female Donald 9000 10
t female Donald 8000 8
u female Donald 8000 7
v female Donald 6000 7
w male Donald 6000 8
x male Donald 8000 10
y male Donald 7000 7
z male Donald 7000 7
")
#Summary of the data showing measures of cental tendency
data <- read.table(textConnection(Input),header = TRUE)
summary(data)
library(rcompanion)
# ungrouped data is indicated with a 1 on the right side of the formula, or the group = NULL argument.
groupwiseMean(Steps ~ Sex,data = data, conf = 0.95, digits = 3)
out <- groupwiseMean(Steps ~ Sex,data = data, conf = 0.95, digits = 3)
#Plotting a graph showing steps of males and females
ggplot(data = out) +
geom_col(aes(x = Sex, y = Mean), fill = "red", col = "black") +
geom_errorbar(aes(ymin = Trad.lower,
ymax = Trad.upper,
x = Sex),
col ="black",
width = 0.2) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
xlab("Sex") + ylab("Steps")
out2 <- groupwiseMean(Steps ~ Teacher + Sex, data = data, conf = 0.95,digits = 3)
#Creating a plot of steps which is splitted into matrix of panels
ggplot(data = out2) +
geom_col(aes(x = Sex, y = Mean), fill = "red", col = "black") +
geom_errorbar(aes(ymin = Trad.lower,
ymax = Trad.upper,
x = Sex),
col ="black",
width = 0.2) +
facet_wrap(~Teacher, ncol = 3)
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
xlab("Sex") + ylab("Steps")
#Bootstraping
groupwiseMean(Steps ~ Sex,
data = data,
conf = 0.95,
digits = 3,
R = 10000,
boot = TRUE,
traditional = FALSE,
normal = FALSE,
basic = FALSE,
percentile = FALSE,
bca = TRUE)
# Perfoming anova two test
anova <- aov(Steps~Sex*Teacher, data = data)
summary(anova)
anova_Tukey <- TukeyHSD(anova)
#Plot Turkey showing differences in mean levels of Sex: Teacher
plot(anova_Tukey)
| /Day_4.R | no_license | ViweMabongo/Biostats_2021 | R | false | false | 2,946 | r | #Athor: Viwe_Mabongo
#Date: 22 April 2021
#Day_4_Session
library(tidyverse)
str(data)
Input <- ("
Student Sex Teacher Steps Rating
a female Jacob 8000 7
b female Jacob 9000 10
c female Jacob 10000 9
d female Jacob 7000 5
e female Jacob 6000 4
f female Jacob 8000 8
g male Jacob 7000 6
h male Jacob 5000 5
i male Jacob 9000 10
j male Jacob 7000 8
k female Sadam 8000 7
l female Sadam 9000 8
m female Sadam 9000 8
n female Sadam 8000 9
o male Sadam 6000 5
p male Sadam 8000 9
q male Sadam 7000 6
r female Donald 10000 10
s female Donald 9000 10
t female Donald 8000 8
u female Donald 8000 7
v female Donald 6000 7
w male Donald 6000 8
x male Donald 8000 10
y male Donald 7000 7
z male Donald 7000 7
")
#Summary of the data showing measures of cental tendency
data <- read.table(textConnection(Input),header = TRUE)
summary(data)
library(rcompanion)
# ungrouped data is indicated with a 1 on the right side of the formula, or the group = NULL argument.
groupwiseMean(Steps ~ Sex,data = data, conf = 0.95, digits = 3)
out <- groupwiseMean(Steps ~ Sex,data = data, conf = 0.95, digits = 3)
#Plotting a graph showing steps of males and females
ggplot(data = out) +
geom_col(aes(x = Sex, y = Mean), fill = "red", col = "black") +
geom_errorbar(aes(ymin = Trad.lower,
ymax = Trad.upper,
x = Sex),
col ="black",
width = 0.2) +
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
xlab("Sex") + ylab("Steps")
out2 <- groupwiseMean(Steps ~ Teacher + Sex, data = data, conf = 0.95,digits = 3)
#Creating a plot of steps which is splitted into matrix of panels
ggplot(data = out2) +
geom_col(aes(x = Sex, y = Mean), fill = "red", col = "black") +
geom_errorbar(aes(ymin = Trad.lower,
ymax = Trad.upper,
x = Sex),
col ="black",
width = 0.2) +
facet_wrap(~Teacher, ncol = 3)
theme(axis.text.x = element_text(angle = 45, hjust = 1)) +
xlab("Sex") + ylab("Steps")
#Bootstraping
groupwiseMean(Steps ~ Sex,
data = data,
conf = 0.95,
digits = 3,
R = 10000,
boot = TRUE,
traditional = FALSE,
normal = FALSE,
basic = FALSE,
percentile = FALSE,
bca = TRUE)
# Perfoming anova two test
anova <- aov(Steps~Sex*Teacher, data = data)
summary(anova)
anova_Tukey <- TukeyHSD(anova)
#Plot Turkey showing differences in mean levels of Sex: Teacher
plot(anova_Tukey)
|
source(Sys.getenv('RESULTS_BASE_PARTIAL'))
source(Sys.getenv('RESULTS_DISPLAY_PARTIAL'))
library(argparse)
library(lubridate, warn.conflicts = FALSE)
library(wombat)
library(ncdf4)
parser <- ArgumentParser()
parser$add_argument('--control-emissions')
parser$add_argument('--perturbations')
parser$add_argument('--xco2-daily-base')
parser$add_argument('--xco2-daily-201601r02')
parser$add_argument('--xco2-daily-201601r06')
parser$add_argument('--output')
args <- parser$parse_args()
GEOS_CHEM_GRID <- list(
longitude = list(
centres = seq(-180, 177.51, 2.5),
widths = rep(2.5, 144)
),
latitude = list(
centres = c(-89.5, seq(-88, 88.1, 2), 89.5),
widths = c(1, rep(2, 89), 1)
)
)
clamp <- function(x, a, b) pmin(pmax(x, a), b)
low_colour <- '#35978f'
mid_colour <- '#ffffff'
high_colour <- '#bf812d'
flux_max_abs <- 1.5
sensitivity_max_abs <- 0.3
read_xco2_daily <- function(filename) {
with_nc_file(list(fn = filename), {
times <- ymd_hms('2000-01-01 00:00:00') + minutes(ncvar_get(fn, 'time'))
xco2 <- ncvar_get(fn, 'xco2')
})
expand.grid(
longitude_index = seq_along(GEOS_CHEM_GRID$longitude$centres),
latitude_index = seq_along(GEOS_CHEM_GRID$latitude$centres),
time = times
) %>%
mutate(
date = as.Date(time),
longitude = GEOS_CHEM_GRID$longitude$centres[longitude_index],
cell_width = GEOS_CHEM_GRID$longitude$widths[longitude_index],
latitude = GEOS_CHEM_GRID$latitude$centres[latitude_index],
cell_height = GEOS_CHEM_GRID$latitude$widths[latitude_index],
xco2 = as.vector(xco2)
) %>%
select(-time, -longitude_index, -latitude_index)
}
xco2_base_df <- read_xco2_daily(args$xco2_daily_base)
xco2_201601r02_df <- read_xco2_daily(args$xco2_daily_201601r02) %>%
mutate(region = 2)
xco2_201601r06_df <- read_xco2_daily(args$xco2_daily_201601r06) %>%
mutate(region = 6)
xco2_sensitivity <- bind_rows(
xco2_201601r02_df,
xco2_201601r06_df
) %>%
left_join(
xco2_base_df %>% select(date, longitude, latitude, xco2_base = xco2),
by = c('date', 'longitude', 'latitude')
) %>%
mutate(
xco2_sensitivity = xco2 - xco2_base
)
control_emissions <- fst::read_fst(args$control_emissions)
perturbations <- fst::read_fst(args$perturbations) %>%
left_join(
control_emissions %>%
select(
model_id,
month_start,
longitude,
cell_width,
latitude,
cell_height
),
by = 'model_id'
) %>%
mutate(flux_density = flux_density * 31536000)
perturbations_i <- perturbations %>%
filter(region %in% c(2, 6), month_start == '2016-01-01', abs(flux_density) > 0) %>%
mutate(
date = '2016-01',
region = sprintf('TransCom3 %02d', region)
)
sensitivities_i <- xco2_sensitivity %>%
filter(
region %in% c(2, 6),
format(date) %in% c(
'2016-01-01',
'2016-01-15',
'2016-02-15'
)
) %>%
mutate(
region = sprintf('TransCom3 %02d', region)
)
flux_plot <- ggplot() +
geom_tile(
data = perturbations_i,
mapping = aes(
longitude,
latitude,
width = cell_width,
height = cell_height,
fill = clamp(flux_density, -flux_max_abs, flux_max_abs)
)
) +
geom_world() +
coord_quickmap() +
scale_fill_gradient2(
low = low_colour,
mid = mid_colour,
high = high_colour,
limits = c(-1, 1) * flux_max_abs
) +
labs(
x = 'Longitude',
y = 'Latitude'
) +
facet_grid(date ~ region) +
scale_x_continuous(expand = c(0, 0), limits = c(-180, 180)) +
scale_y_continuous(expand = c(0, 0), limits = c(-90, 90)) +
guides(fill = guide_colourbar(
title = expression('[kg/'*m^2*'/year]'),
title.position = 'right',
title.theme = element_text(angle = 90, hjust = 0.5),
barheight = 6,
frame.colour = 'black'
)) +
theme(
legend.position = 'right',
plot.title = element_text(hjust = 0.5),
plot.margin = margin(),
panel.grid = element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
axis.line.x.bottom = element_blank(),
axis.line.y.left = element_blank()
)
sensitivity_plot <- ggplot() +
geom_tile(
data = sensitivities_i,
mapping = aes(
longitude,
latitude,
width = cell_width,
height = cell_height,
fill = clamp(xco2_sensitivity, -sensitivity_max_abs, sensitivity_max_abs)
)
) +
geom_world() +
coord_quickmap() +
scale_fill_gradient2(
low = low_colour,
mid = mid_colour,
high = high_colour
) +
scale_x_continuous(expand = c(0, 0), limits = c(-180, 180)) +
scale_y_continuous(expand = c(0, 0), limits = c(-90, 90)) +
labs(x = 'Longitude', y = 'Latitude') +
facet_grid(date ~ region) +
guides(fill = guide_colourbar(
title = '[ppm]',
title.position = 'right',
title.theme = element_text(angle = 90, hjust = 0.5),
barheight = 10,
frame.colour = 'black'
)) +
theme(
legend.position = 'right',
plot.title = element_text(hjust = 0.5),
plot.margin = margin(),
panel.grid = element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
axis.line.x.bottom = element_blank(),
axis.line.y.left = element_blank()
)
plot_width <- DISPLAY_SETTINGS$full_width
plot_height <- DISPLAY_SETTINGS$full_height - 8
legend_width <- 2.2
title_height <- 1
plot_margin <- 0.5
spacer <- 0.5
row_height <- (plot_height - spacer - 2 * title_height - 2 * plot_margin) / 4
output <- gridExtra::arrangeGrob(
grobs = list(
grid::textGrob(expression(phi[j](bold(s), t))),
flux_plot + theme(legend.position = 'none'),
get_legend(flux_plot),
grid::textGrob(expression(psi[j](bold(s), h, t))),
sensitivity_plot + theme(legend.position = 'none'),
get_legend(sensitivity_plot)
),
layout_matrix = rbind(
c(1, NA),
c(2, 3),
c(NA, NA),
c(4, NA),
c(5, 6)
),
heights = c(
title_height,
row_height + plot_margin,
spacer,
title_height,
3 * row_height + plot_margin
),
widths = c(
plot_width - legend_width,
legend_width
)
)
ggsave(
args$output,
plot = output,
width = plot_width,
height = plot_height,
units = 'cm'
)
| /4_results/src/sensitivities.R | no_license | mbertolacci/wombat-paper | R | false | false | 6,245 | r | source(Sys.getenv('RESULTS_BASE_PARTIAL'))
source(Sys.getenv('RESULTS_DISPLAY_PARTIAL'))
library(argparse)
library(lubridate, warn.conflicts = FALSE)
library(wombat)
library(ncdf4)
parser <- ArgumentParser()
parser$add_argument('--control-emissions')
parser$add_argument('--perturbations')
parser$add_argument('--xco2-daily-base')
parser$add_argument('--xco2-daily-201601r02')
parser$add_argument('--xco2-daily-201601r06')
parser$add_argument('--output')
args <- parser$parse_args()
GEOS_CHEM_GRID <- list(
longitude = list(
centres = seq(-180, 177.51, 2.5),
widths = rep(2.5, 144)
),
latitude = list(
centres = c(-89.5, seq(-88, 88.1, 2), 89.5),
widths = c(1, rep(2, 89), 1)
)
)
clamp <- function(x, a, b) pmin(pmax(x, a), b)
low_colour <- '#35978f'
mid_colour <- '#ffffff'
high_colour <- '#bf812d'
flux_max_abs <- 1.5
sensitivity_max_abs <- 0.3
read_xco2_daily <- function(filename) {
with_nc_file(list(fn = filename), {
times <- ymd_hms('2000-01-01 00:00:00') + minutes(ncvar_get(fn, 'time'))
xco2 <- ncvar_get(fn, 'xco2')
})
expand.grid(
longitude_index = seq_along(GEOS_CHEM_GRID$longitude$centres),
latitude_index = seq_along(GEOS_CHEM_GRID$latitude$centres),
time = times
) %>%
mutate(
date = as.Date(time),
longitude = GEOS_CHEM_GRID$longitude$centres[longitude_index],
cell_width = GEOS_CHEM_GRID$longitude$widths[longitude_index],
latitude = GEOS_CHEM_GRID$latitude$centres[latitude_index],
cell_height = GEOS_CHEM_GRID$latitude$widths[latitude_index],
xco2 = as.vector(xco2)
) %>%
select(-time, -longitude_index, -latitude_index)
}
xco2_base_df <- read_xco2_daily(args$xco2_daily_base)
xco2_201601r02_df <- read_xco2_daily(args$xco2_daily_201601r02) %>%
mutate(region = 2)
xco2_201601r06_df <- read_xco2_daily(args$xco2_daily_201601r06) %>%
mutate(region = 6)
xco2_sensitivity <- bind_rows(
xco2_201601r02_df,
xco2_201601r06_df
) %>%
left_join(
xco2_base_df %>% select(date, longitude, latitude, xco2_base = xco2),
by = c('date', 'longitude', 'latitude')
) %>%
mutate(
xco2_sensitivity = xco2 - xco2_base
)
control_emissions <- fst::read_fst(args$control_emissions)
perturbations <- fst::read_fst(args$perturbations) %>%
left_join(
control_emissions %>%
select(
model_id,
month_start,
longitude,
cell_width,
latitude,
cell_height
),
by = 'model_id'
) %>%
mutate(flux_density = flux_density * 31536000)
perturbations_i <- perturbations %>%
filter(region %in% c(2, 6), month_start == '2016-01-01', abs(flux_density) > 0) %>%
mutate(
date = '2016-01',
region = sprintf('TransCom3 %02d', region)
)
sensitivities_i <- xco2_sensitivity %>%
filter(
region %in% c(2, 6),
format(date) %in% c(
'2016-01-01',
'2016-01-15',
'2016-02-15'
)
) %>%
mutate(
region = sprintf('TransCom3 %02d', region)
)
flux_plot <- ggplot() +
geom_tile(
data = perturbations_i,
mapping = aes(
longitude,
latitude,
width = cell_width,
height = cell_height,
fill = clamp(flux_density, -flux_max_abs, flux_max_abs)
)
) +
geom_world() +
coord_quickmap() +
scale_fill_gradient2(
low = low_colour,
mid = mid_colour,
high = high_colour,
limits = c(-1, 1) * flux_max_abs
) +
labs(
x = 'Longitude',
y = 'Latitude'
) +
facet_grid(date ~ region) +
scale_x_continuous(expand = c(0, 0), limits = c(-180, 180)) +
scale_y_continuous(expand = c(0, 0), limits = c(-90, 90)) +
guides(fill = guide_colourbar(
title = expression('[kg/'*m^2*'/year]'),
title.position = 'right',
title.theme = element_text(angle = 90, hjust = 0.5),
barheight = 6,
frame.colour = 'black'
)) +
theme(
legend.position = 'right',
plot.title = element_text(hjust = 0.5),
plot.margin = margin(),
panel.grid = element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
axis.line.x.bottom = element_blank(),
axis.line.y.left = element_blank()
)
sensitivity_plot <- ggplot() +
geom_tile(
data = sensitivities_i,
mapping = aes(
longitude,
latitude,
width = cell_width,
height = cell_height,
fill = clamp(xco2_sensitivity, -sensitivity_max_abs, sensitivity_max_abs)
)
) +
geom_world() +
coord_quickmap() +
scale_fill_gradient2(
low = low_colour,
mid = mid_colour,
high = high_colour
) +
scale_x_continuous(expand = c(0, 0), limits = c(-180, 180)) +
scale_y_continuous(expand = c(0, 0), limits = c(-90, 90)) +
labs(x = 'Longitude', y = 'Latitude') +
facet_grid(date ~ region) +
guides(fill = guide_colourbar(
title = '[ppm]',
title.position = 'right',
title.theme = element_text(angle = 90, hjust = 0.5),
barheight = 10,
frame.colour = 'black'
)) +
theme(
legend.position = 'right',
plot.title = element_text(hjust = 0.5),
plot.margin = margin(),
panel.grid = element_blank(),
axis.title = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank(),
axis.line.x.bottom = element_blank(),
axis.line.y.left = element_blank()
)
plot_width <- DISPLAY_SETTINGS$full_width
plot_height <- DISPLAY_SETTINGS$full_height - 8
legend_width <- 2.2
title_height <- 1
plot_margin <- 0.5
spacer <- 0.5
row_height <- (plot_height - spacer - 2 * title_height - 2 * plot_margin) / 4
output <- gridExtra::arrangeGrob(
grobs = list(
grid::textGrob(expression(phi[j](bold(s), t))),
flux_plot + theme(legend.position = 'none'),
get_legend(flux_plot),
grid::textGrob(expression(psi[j](bold(s), h, t))),
sensitivity_plot + theme(legend.position = 'none'),
get_legend(sensitivity_plot)
),
layout_matrix = rbind(
c(1, NA),
c(2, 3),
c(NA, NA),
c(4, NA),
c(5, 6)
),
heights = c(
title_height,
row_height + plot_margin,
spacer,
title_height,
3 * row_height + plot_margin
),
widths = c(
plot_width - legend_width,
legend_width
)
)
ggsave(
args$output,
plot = output,
width = plot_width,
height = plot_height,
units = 'cm'
)
|
library(aiRthermo)
### Name: moistAdiabaticLapseRate
### Title: Moist Adiabatic Lapse Rate
### Aliases: moistAdiabaticLapseRate
### Keywords: Functions
### ** Examples
data(RadiosondeA)
aws<-RadiosondeA[,6]/1000
moistAdiabaticLapseRate(aws)
| /data/genthat_extracted_code/aiRthermo/examples/moistAdiabaticLapseRate.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 248 | r | library(aiRthermo)
### Name: moistAdiabaticLapseRate
### Title: Moist Adiabatic Lapse Rate
### Aliases: moistAdiabaticLapseRate
### Keywords: Functions
### ** Examples
data(RadiosondeA)
aws<-RadiosondeA[,6]/1000
moistAdiabaticLapseRate(aws)
|
#' Sample non-mutated sites and extract mutations in region of interest.
#'
#' @param snv.mutations.file SNV mutations MAF file.
#' @param indel.mutations.file Indel mutations MAF file.
#' @param mask.regions.file Regions to mask in genome, for example, non-mappable regions/immunoglobin loci/CDS regions RDS file, default file = mask_regions.RDS.
#' @param all.sites.file All sites in whole genome RDS file, default file = all_sites.RDS.
#' @param region.of.interest Region of interest bed file, default = NULL.
#' @param sample To sample for non-mutated sites or to use all sites in region of interest, default = TRUE.
#' @param cores Number of cores, default = 1.
#' @return A list contatining SNV mutations in region of interest, sampled mutated and non-mutated SNV sites, indel mutations in region of interest and sampled mutated and non-mutated indel sites.
#' @export
sample.sites = function(snv.mutations.file, indel.mutations.file, mask.regions.file = system.file("extdata", "mask_regions.RDS", package = "MutSpot"), all.sites.file = system.file("extdata", "all_sites.RDS", package = "MutSpot"), region.of.interest = NULL, sample = TRUE, cores = 1) {
max.sites = 2000000 * 1.12
min.sites = 4000 * 1.12
# Chr1-ChrX
chrOrder <- c(paste("chr", 1:22, sep=""), "chrX")
seqi = GenomeInfoDb::seqinfo(BSgenome.Hsapiens.UCSC.hg19::Hsapiens)[GenomeInfoDb::seqnames(BSgenome.Hsapiens.UCSC.hg19::Hsapiens)[1:23]]
# Define masked region i.e. CDS, immunoglobulin loci and nonmappable
mask.regions = readRDS(mask.regions.file)
mask.regions = mask.regions[as.character(GenomeInfoDb::seqnames(mask.regions)) %in% as.character(GenomeInfoDb::seqnames(seqi))]
# Define all sites in whole genome
all.sites = readRDS(all.sites.file)
all.sites = all.sites[as.character(GenomeInfoDb::seqnames(all.sites)) %in% as.character(GenomeInfoDb::seqnames(seqi))]
if (!is.null(snv.mutations.file)) {
# Define SNV mutations
maf.snv.mutations <- maf.to.granges(snv.mutations.file)
maf.snv.mutations = maf.snv.mutations[as.character(GenomeInfoDb::seqnames(maf.snv.mutations)) %in% as.character(GenomeInfoDb::seqnames(seqi))]
maf.snv.mutations2 = maf.snv.mutations
# If specified region, redefine SNV mutations to be in specified region
if (!is.null(region.of.interest)) {
# Define specified region
regions = bed.to.granges(region.of.interest)
regions = GenomicRanges::reduce(regions)
regions = regions[as.character(GenomeInfoDb::seqnames(regions)) %in% as.character(GenomeInfoDb::seqnames(seqi))]
ovl = IRanges::findOverlaps(maf.snv.mutations, regions)
maf.snv.mutations = maf.snv.mutations[unique(S4Vectors::queryHits(ovl))]
filtered.snv.mutations = maf.snv.mutations
filtered.snv.mutations = GenomicRanges::as.data.frame(filtered.snv.mutations)
filtered.snv.mutations = filtered.snv.mutations[ ,-c(4,5)]
} else {
filtered.snv.mutations = NULL
}
if (length(maf.snv.mutations) > max.sites / 2) {
downsample.snv = TRUE
print(paste("Downsample SNV mutations as number of SNV mutations exceeded ", max.sites, sep = ""))
} else {
downsample.snv = FALSE
}
if (length(maf.snv.mutations) < min.sites / 2) {
ratio.snv = ceiling((min.sites - length(maf.snv.mutations)) / length(maf.snv.mutations))
print(paste("Ratio of number of mutated sites to non-mutated sites for SNV is 1:", ratio.snv, sep = ""))
} else {
ratio.snv = 1
}
} else {
downsample.snv = FALSE
}
if (!is.null(indel.mutations.file)) {
# Define indel mutations
maf.indel.mutations <- maf.to.granges(indel.mutations.file)
maf.indel.mutations = maf.indel.mutations[as.character(GenomeInfoDb::seqnames(maf.indel.mutations)) %in% as.character(GenomeInfoDb::seqnames(seqi))]
maf.indel.mutations2 = maf.indel.mutations
# If specified region, redefine indel mutations to be in specified region
if (!is.null(region.of.interest)) {
regions = bed.to.granges(region.of.interest)
regions = GenomicRanges::reduce(regions)
regions = regions[as.character(GenomeInfoDb::seqnames(regions)) %in% as.character(GenomeInfoDb::seqnames(seqi))]
ovl = IRanges::findOverlaps(maf.indel.mutations, regions)
maf.indel.mutations = maf.indel.mutations[unique(S4Vectors::queryHits(ovl))]
filtered.indel.mutations = maf.indel.mutations
filtered.indel.mutations = GenomicRanges::as.data.frame(filtered.indel.mutations)
filtered.indel.mutations = filtered.indel.mutations[ ,-c(4,5)]
} else {
filtered.indel.mutations = NULL
}
if (length(maf.indel.mutations) > max.sites / 2) {
downsample.indel = TRUE
print(paste("Downsample indel mutations as number of indel mutations exceeded ", max.sites, sep = ""))
} else {
downsample.indel = FALSE
}
if (length(maf.indel.mutations) < min.sites / 2) {
ratio.indel = ceiling((min.sites - length(maf.indel.mutations)) / length(maf.indel.mutations))
print(paste("Ratio of number of mutated sites to non-mutated sites for indel is 1:", ratio.indel, sep = ""))
} else {
ratio.indel = 1
}
} else {
downsample.indel = FALSE
}
# To downsample mutated sites or not, if too many mutations, should consider downsampling before sampling for non-mutated sites
if (downsample.snv) {
nsites.snv = max.sites / 2
t = GenomicRanges::split(maf.snv.mutations, GenomeInfoDb::seqnames(maf.snv.mutations))
nsites.snv.chrom = round(unlist(lapply(t, FUN=function(x) sum(as.numeric(GenomicRanges::width(x))))) / sum(unlist(lapply(t, FUN = function(x) sum(as.numeric(GenomicRanges::width(x)))))) * nsites.snv)
seed.rand.snv = seq(1:length(t)) * 4
# Downsample sites
downsampled.snv.sites = parallel::mclapply(1:length(t), function(i) {
pop = IRanges::tile(t[[i]], width = 1)
pop = BiocGenerics::unlist(pop)
set.seed(seed.rand.snv[i])
pos = sample(GenomicRanges::start(pop), nsites.snv.chrom[i])
if (length(pos) > 0) {
gr = GenomicRanges::GRanges(unique(as.character(GenomeInfoDb::seqnames(t[[i]]))), IRanges::IRanges(pos, pos))
return(gr)
} else {
return(NULL)
}
}, mc.cores = cores)
downsampled.snv.sites[sapply(downsampled.snv.sites, is.null)] <- NULL
downsampled.snv.sites = suppressWarnings(do.call(c, downsampled.snv.sites))
maf.snv.mutations = downsampled.snv.sites
} else {
downsampled.snv.sites = NULL
}
if (downsample.indel) {
nsites.indel = max.sites / 2
t = GenomicRanges::split(maf.indel.mutations, GenomeInfoDb::seqnames(maf.indel.mutations))
nsites.indel.chrom = round(unlist(lapply(t, FUN=function(x) sum(as.numeric(GenomicRanges::width(x))))) / sum(unlist(lapply(t, FUN = function(x) sum(as.numeric(GenomicRanges::width(x)))))) * nsites.snv)
seed.rand.indel = seq(1:length(t)) * 4
# Downsample sites
downsampled.indel.sites = parallel::mclapply(1:length(t), function(i) {
pop = IRanges::tile(t[[i]], width = 1)
pop = BiocGenerics::unlist(pop)
set.seed(seed.rand.indel[i])
pos = sample(GenomicRanges::start(pop), nsites.indel.chrom[i])
if (length(pos) > 0) {
gr = GenomicRanges::GRanges(unique(as.character(GenomeInfoDb::seqnames(t[[i]]))), IRanges::IRanges(pos, pos))
return(gr)
} else {
return(NULL)
}
}, mc.cores = cores)
downsampled.indel.sites[sapply(downsampled.indel.sites, is.null)] <- NULL
downsampled.indel.sites = suppressWarnings(do.call(c, downsampled.indel.sites))
maf.indel.mutations = downsampled.indel.sites
} else {
downsampled.indel.sites = NULL
}
# To sample or not to sample for non-mutated sites, if the specified region is too small, may choose not to sample sites and use all sites in the specified region
if (sample) {
print("Sampling to be done...")
# If snv mutations available, else skip this
if (!is.null(snv.mutations.file)) {
print("Sampling SNV sites...")
npatients.snv = length(unique(maf.snv.mutations$sid))
maf.snv.mutations <- unique(maf.snv.mutations)
# Remove SNV mutations in masked region
maf.snv.mutations = subtract.regions.from.roi(maf.snv.mutations, mask.regions, cores=cores)
# Target number of sites to sample, take into account of larger masked regions, and that mutated sites tend not be in masked regions
nsites.snv = length(maf.snv.mutations)*(ratio.snv + ratio.snv * 0.12)
if (nsites.snv < c(10000 * 1.12)) {
nsites.snv = 10000 * 1.12
}
# If specified region, redefine all sites to be specified region and not whole genome
if (!is.null(region.of.interest)) {
all.sites.snv = regions[as.character(GenomeInfoDb::seqnames(regions)) %in% as.character(GenomeInfoDb::seqnames(seqi))]
if (nsites.snv > sum(as.numeric(GenomicRanges::width(all.sites.snv)))) {
print("Error due to insufficient sites to sample from")
sampled.snv.sites = NULL
} else {
# Number of sites to sample per chromosome
t = GenomicRanges::split(all.sites.snv, GenomeInfoDb::seqnames(all.sites.snv))
nsites.snv.chrom = round(unlist(lapply(t, FUN=function(x) sum(as.numeric(GenomicRanges::width(x))))) / sum(unlist(lapply(t, FUN = function(x) sum(as.numeric(GenomicRanges::width(x)))))) * nsites.snv)
seed.rand.snv = seq(1:length(t)) * 4
# Sample sites
all.sites.snv.samples = parallel::mclapply(1:length(t), function(i) {
pop = IRanges::tile(t[[i]], width = 1)
pop = BiocGenerics::unlist(pop)
set.seed(seed.rand.snv[i])
pos = sample(GenomicRanges::start(pop), nsites.snv.chrom[i])
if (length(pos) > 0) {
gr = GenomicRanges::GRanges(unique(as.character(GenomeInfoDb::seqnames(t[[i]]))), IRanges::IRanges(pos, pos))
return(gr)
} else {
return(NULL)
}
}, mc.cores = cores)
}
} else {
all.sites.snv = all.sites
# Number of sites to sample per chromosome
nsites.snv.chrom = round(GenomicRanges::width(all.sites.snv) / sum(as.numeric(GenomicRanges::width(all.sites.snv))) * nsites.snv)
seed.rand.snv = seq(1:length(all.sites.snv)) * 4
# Sample sites
all.sites.snv.samples = parallel::mclapply(1:length(all.sites.snv), function(i) {
set.seed(seed.rand.snv[i])
pos = sample(GenomicRanges::start(all.sites.snv)[i]:GenomicRanges::end(all.sites.snv)[i], nsites.snv.chrom[i])
if (length(pos) > 0) {
gr = GenomicRanges::GRanges(as.character(GenomeInfoDb::seqnames(all.sites.snv)[i]), IRanges::IRanges(pos, pos))
return (gr)
} else {
return(NULL)
}
}, mc.cores = cores)
}
all.sites.snv.samples[sapply(all.sites.snv.samples, is.null)] <- NULL
if (length(all.sites.snv.samples) == 0) {
filtered.snv.mutations = NULL
sampled.snv.sites = NULL
} else {
# all.sites.snv.samples = suppressWarnings(do.call(getMethod(c, "GenomicRanges"), all.sites.snv.samples))
all.sites.snv.samples = suppressWarnings(do.call(c, all.sites.snv.samples))
# Mask selected sites that are mutated or in masked region
GenomicRanges::mcols(maf.snv.mutations2) = NULL
mask.snv.regions = GenomicRanges::reduce(c(maf.snv.mutations2, mask.regions))
nonmut.snv.sample = subtract.regions.from.roi(all.sites.snv.samples, mask.snv.regions, cores = cores)
if (length(nonmut.snv.sample) != 0) {
nonmut.snv.sample$mut = 0
maf.snv.mutations$mut = 1
sampled.snv.sites = sort(c(nonmut.snv.sample, maf.snv.mutations))
} else {
sampled.snv.sites = NULL
filtered.snv.mutations = NULL
}
}
} else {
filtered.snv.mutations = NULL
sampled.snv.sites = NULL
}
# If indel mutations available, else skip this
if (!is.null(indel.mutations.file)) {
print("Sampling indel sites...")
npatients.indel = length(unique(maf.indel.mutations$sid))
maf.indel.mutations <- unique(maf.indel.mutations)
# Remove indel mutations in masked region
maf.indel.mutations = subtract.regions.from.roi(maf.indel.mutations, mask.regions, cores = cores)
# Target number of sites to sample, take into account of larger masked regions, and that mutated sites tend not be in masked regions
nsites.indel = length(maf.indel.mutations) * (ratio.indel + ratio.indel * 0.12)
if (nsites.indel < c(10000 * 1.12)) {
nsites.indel = 10000 * 1.12
}
# If specified region, redefine all sites to be specified region and not whole genome
if (!is.null(region.of.interest)) {
all.sites.indel = regions[as.character(GenomeInfoDb::seqnames(regions)) %in% as.character(GenomeInfoDb::seqnames(seqi))]
if (nsites.indel > sum(as.numeric(GenomicRanges::width(all.sites.indel)))) {
print("Error due to insufficient sites to sample from")
sampled.indel.sites = NULL
} else {
# Number of sites to sample per chromosome
t = GenomicRanges::split(all.sites.indel, GenomeInfoDb::seqnames(all.sites.indel))
nsites.indel.chrom = round(unlist(lapply(t, FUN = function(x) sum(as.numeric(GenomicRanges::width(x))))) / sum(unlist(lapply(t, FUN = function(x) sum(as.numeric(GenomicRanges::width(x)))))) * nsites.indel)
seed.rand.indel = seq(1:length(t)) * 4
# Sample sites
all.sites.indel.samples = parallel::mclapply(1:length(t), function(i) {
pop = IRanges::tile(t[[i]], width = 1)
pop = BiocGenerics::unlist(pop)
set.seed(seed.rand.indel[i])
pos = sample(GenomicRanges::start(pop), nsites.indel.chrom[i])
if (length(pos) > 0) {
gr = GenomicRanges::GRanges(unique(as.character(GenomeInfoDb::seqnames(t[[i]]))), IRanges::IRanges(pos, pos))
return(gr)
} else {
return(NULL)
}
}, mc.cores = cores)
}
} else {
all.sites.indel = all.sites
# Number of sites to sample per chromosome
nsites.indel.chrom = round(GenomicRanges::width(all.sites.indel) / sum(as.numeric(GenomicRanges::width(all.sites.indel))) * nsites.indel)
seed.rand.indel = seq(1:length(all.sites.indel)) * 4
# Sample sites
all.sites.indel.samples = parallel::mclapply(1:length(all.sites.indel), function(i) {
set.seed(seed.rand.indel[i])
pos = sample(GenomicRanges::start(all.sites.indel)[i]:GenomicRanges::end(all.sites.indel)[i], nsites.indel.chrom[i])
if (length(pos) > 0) {
gr = GenomicRanges::GRanges(as.character(GenomeInfoDb::seqnames(all.sites.indel)[i]), IRanges::IRanges(pos, pos))
return(gr)
} else {
return(NULL)
}
}, mc.cores = cores)
}
all.sites.indel.samples[sapply(all.sites.indel.samples, is.null)] <- NULL
if (length(all.sites.indel.samples) == 0) {
filtered.indel.mutations = NULL
sampled.indel.sites = NULL
} else {
# all.sites.indel.samples = suppressWarnings(do.call(getMethod(c, "GenomicRanges"), all.sites.indel.samples))
all.sites.indel.samples = suppressWarnings(do.call(c, all.sites.indel.samples))
# Mask selected sites that are mutated or in nonmapple regions
GenomicRanges::mcols(maf.indel.mutations2) = NULL
mask.indel.regions = GenomicRanges::reduce(c(maf.indel.mutations2, mask.regions))
nonmut.indel.sample = subtract.regions.from.roi(all.sites.indel.samples, mask.indel.regions, cores = cores)
if (length(nonmut.indel.sample) != 0) {
nonmut.indel.sample$mut = 0
maf.indel.mutations$mut = 1
GenomicRanges::start(maf.indel.mutations) = GenomicRanges::start(maf.indel.mutations) + ceiling((GenomicRanges::width(maf.indel.mutations) - 1) / 2)
GenomicRanges::end(maf.indel.mutations) = GenomicRanges::start(maf.indel.mutations)
sampled.indel.sites = sort(c(nonmut.indel.sample, maf.indel.mutations))
} else {
filtered.indel.mutations = NULL
sampled.indel.sites = NULL
}
}
} else {
filtered.indel.mutations = NULL
sampled.indel.sites = NULL
}
return(list(filtered.snv.mutations, sampled.snv.sites, filtered.indel.mutations, sampled.indel.sites, downsampled.snv.sites, downsampled.indel.sites))
} else {
print("No sampling...")
# If SNV mutations available, else skip this
if (!is.null(snv.mutations.file)) {
print("Preparing SNV sites...")
npatients.snv = length(unique(maf.snv.mutations$sid))
maf.snv.mutations <- unique(maf.snv.mutations)
# Remove SNV mutations in masked region
maf.snv.mutations = subtract.regions.from.roi(maf.snv.mutations, mask.regions, cores = cores)
# If specified region, redefine all sites to be specified region and not whole genome
if (!is.null(region.of.interest)) {
all.sites.snv = regions[as.character(GenomeInfoDb::seqnames(regions)) %in% as.character(GenomeInfoDb::seqnames(seqi))]
} else {
all.sites.snv = all.sites
}
# All sites are potential non-mutated sites
all.sites.snv = IRanges::tile(all.sites.snv, width = 1)
all.sites.snv = BiocGenerics::unlist(all.sites.snv)
# Mask selected sites that are mutated or in masked region
GenomicRanges::mcols(maf.snv.mutations2) = NULL
mask.snv.regions = GenomicRanges::reduce(c(maf.snv.mutations2, mask.regions))
nonmut.snv.sample = subtract.regions.from.roi(all.sites.snv, mask.snv.regions, cores = cores)
if (length(nonmut.snv.sample) != 0) {
nonmut.snv.sample$mut = 0
maf.snv.mutations$mut = 1
sampled.snv.sites = sort(c(nonmut.snv.sample, maf.snv.mutations))
} else {
filtered.snv.mutations = NULL
sampled.snv.sites = NULL
}
} else {
filtered.snv.mutations = NULL
sampled.snv.sites = NULL
}
# If indel mutations available, else skip this
if (!is.null(indel.mutations.file)) {
print("Preparing indel sites...")
npatients.indel = length(unique(maf.indel.mutations$sid))
maf.indel.mutations <- unique(maf.indel.mutations)
# Remove indel mutations in masked region
maf.indel.mutations = subtract.regions.from.roi(maf.indel.mutations, mask.regions, cores = cores)
# If specified region, redefine all sites to be specified region and not whole genome
if (!is.null(region.of.interest)) {
all.sites.indel = regions[as.character(GenomeInfoDb::seqnames(regions)) %in% as.character(GenomeInfoDb::seqnames(seqi))]
} else {
all.sites.indel = all.sites
}
# All sites are potential non-mutated sites
all.sites.indel = IRanges::tile(all.sites.indel, width = 1)
all.sites.indel = BiocGenerics::unlist(all.sites.indel)
# Mask selected sites that are mutated or in masked region
GenomicRanges::mcols(maf.indel.mutations2) = NULL
mask.indel.regions = GenomicRanges::reduce(c(maf.indel.mutations2, mask.regions))
nonmut.indel.sample = subtract.regions.from.roi(all.sites.indel, mask.indel.regions, cores = cores)
if (length(nonmut.indel.sample) != 0) {
nonmut.indel.sample$mut = 0
maf.indel.mutations$mut = 1
GenomicRanges::start(maf.indel.mutations) = GenomicRanges::start(maf.indel.mutations) + ceiling((GenomicRanges::width(maf.indel.mutations) - 1) / 2)
GenomicRanges::end(maf.indel.mutations) = GenomicRanges::start(maf.indel.mutations)
sampled.indel.sites = sort(c(nonmut.indel.sample, maf.indel.mutations))
} else {
filtered.indel.mutations = NULL
sampled.indel.sites = NULL
}
} else {
filtered.indel.mutations = NULL
sampled.indel.sites = NULL
}
return(list(filtered.snv.mutations, sampled.snv.sites, filtered.indel.mutations, sampled.indel.sites, downsampled.snv.sites, downsampled.indel.sites))
}
}
| /MutSpot_Rpackage/R/sample.sites.R | no_license | danchubb/hg38MutSpot | R | false | false | 21,759 | r | #' Sample non-mutated sites and extract mutations in region of interest.
#'
#' @param snv.mutations.file SNV mutations MAF file.
#' @param indel.mutations.file Indel mutations MAF file.
#' @param mask.regions.file Regions to mask in genome, for example, non-mappable regions/immunoglobin loci/CDS regions RDS file, default file = mask_regions.RDS.
#' @param all.sites.file All sites in whole genome RDS file, default file = all_sites.RDS.
#' @param region.of.interest Region of interest bed file, default = NULL.
#' @param sample To sample for non-mutated sites or to use all sites in region of interest, default = TRUE.
#' @param cores Number of cores, default = 1.
#' @return A list contatining SNV mutations in region of interest, sampled mutated and non-mutated SNV sites, indel mutations in region of interest and sampled mutated and non-mutated indel sites.
#' @export
sample.sites = function(snv.mutations.file, indel.mutations.file, mask.regions.file = system.file("extdata", "mask_regions.RDS", package = "MutSpot"), all.sites.file = system.file("extdata", "all_sites.RDS", package = "MutSpot"), region.of.interest = NULL, sample = TRUE, cores = 1) {
max.sites = 2000000 * 1.12
min.sites = 4000 * 1.12
# Chr1-ChrX
chrOrder <- c(paste("chr", 1:22, sep=""), "chrX")
seqi = GenomeInfoDb::seqinfo(BSgenome.Hsapiens.UCSC.hg19::Hsapiens)[GenomeInfoDb::seqnames(BSgenome.Hsapiens.UCSC.hg19::Hsapiens)[1:23]]
# Define masked region i.e. CDS, immunoglobulin loci and nonmappable
mask.regions = readRDS(mask.regions.file)
mask.regions = mask.regions[as.character(GenomeInfoDb::seqnames(mask.regions)) %in% as.character(GenomeInfoDb::seqnames(seqi))]
# Define all sites in whole genome
all.sites = readRDS(all.sites.file)
all.sites = all.sites[as.character(GenomeInfoDb::seqnames(all.sites)) %in% as.character(GenomeInfoDb::seqnames(seqi))]
if (!is.null(snv.mutations.file)) {
# Define SNV mutations
maf.snv.mutations <- maf.to.granges(snv.mutations.file)
maf.snv.mutations = maf.snv.mutations[as.character(GenomeInfoDb::seqnames(maf.snv.mutations)) %in% as.character(GenomeInfoDb::seqnames(seqi))]
maf.snv.mutations2 = maf.snv.mutations
# If specified region, redefine SNV mutations to be in specified region
if (!is.null(region.of.interest)) {
# Define specified region
regions = bed.to.granges(region.of.interest)
regions = GenomicRanges::reduce(regions)
regions = regions[as.character(GenomeInfoDb::seqnames(regions)) %in% as.character(GenomeInfoDb::seqnames(seqi))]
ovl = IRanges::findOverlaps(maf.snv.mutations, regions)
maf.snv.mutations = maf.snv.mutations[unique(S4Vectors::queryHits(ovl))]
filtered.snv.mutations = maf.snv.mutations
filtered.snv.mutations = GenomicRanges::as.data.frame(filtered.snv.mutations)
filtered.snv.mutations = filtered.snv.mutations[ ,-c(4,5)]
} else {
filtered.snv.mutations = NULL
}
if (length(maf.snv.mutations) > max.sites / 2) {
downsample.snv = TRUE
print(paste("Downsample SNV mutations as number of SNV mutations exceeded ", max.sites, sep = ""))
} else {
downsample.snv = FALSE
}
if (length(maf.snv.mutations) < min.sites / 2) {
ratio.snv = ceiling((min.sites - length(maf.snv.mutations)) / length(maf.snv.mutations))
print(paste("Ratio of number of mutated sites to non-mutated sites for SNV is 1:", ratio.snv, sep = ""))
} else {
ratio.snv = 1
}
} else {
downsample.snv = FALSE
}
if (!is.null(indel.mutations.file)) {
# Define indel mutations
maf.indel.mutations <- maf.to.granges(indel.mutations.file)
maf.indel.mutations = maf.indel.mutations[as.character(GenomeInfoDb::seqnames(maf.indel.mutations)) %in% as.character(GenomeInfoDb::seqnames(seqi))]
maf.indel.mutations2 = maf.indel.mutations
# If specified region, redefine indel mutations to be in specified region
if (!is.null(region.of.interest)) {
regions = bed.to.granges(region.of.interest)
regions = GenomicRanges::reduce(regions)
regions = regions[as.character(GenomeInfoDb::seqnames(regions)) %in% as.character(GenomeInfoDb::seqnames(seqi))]
ovl = IRanges::findOverlaps(maf.indel.mutations, regions)
maf.indel.mutations = maf.indel.mutations[unique(S4Vectors::queryHits(ovl))]
filtered.indel.mutations = maf.indel.mutations
filtered.indel.mutations = GenomicRanges::as.data.frame(filtered.indel.mutations)
filtered.indel.mutations = filtered.indel.mutations[ ,-c(4,5)]
} else {
filtered.indel.mutations = NULL
}
if (length(maf.indel.mutations) > max.sites / 2) {
downsample.indel = TRUE
print(paste("Downsample indel mutations as number of indel mutations exceeded ", max.sites, sep = ""))
} else {
downsample.indel = FALSE
}
if (length(maf.indel.mutations) < min.sites / 2) {
ratio.indel = ceiling((min.sites - length(maf.indel.mutations)) / length(maf.indel.mutations))
print(paste("Ratio of number of mutated sites to non-mutated sites for indel is 1:", ratio.indel, sep = ""))
} else {
ratio.indel = 1
}
} else {
downsample.indel = FALSE
}
# To downsample mutated sites or not, if too many mutations, should consider downsampling before sampling for non-mutated sites
if (downsample.snv) {
nsites.snv = max.sites / 2
t = GenomicRanges::split(maf.snv.mutations, GenomeInfoDb::seqnames(maf.snv.mutations))
nsites.snv.chrom = round(unlist(lapply(t, FUN=function(x) sum(as.numeric(GenomicRanges::width(x))))) / sum(unlist(lapply(t, FUN = function(x) sum(as.numeric(GenomicRanges::width(x)))))) * nsites.snv)
seed.rand.snv = seq(1:length(t)) * 4
# Downsample sites
downsampled.snv.sites = parallel::mclapply(1:length(t), function(i) {
pop = IRanges::tile(t[[i]], width = 1)
pop = BiocGenerics::unlist(pop)
set.seed(seed.rand.snv[i])
pos = sample(GenomicRanges::start(pop), nsites.snv.chrom[i])
if (length(pos) > 0) {
gr = GenomicRanges::GRanges(unique(as.character(GenomeInfoDb::seqnames(t[[i]]))), IRanges::IRanges(pos, pos))
return(gr)
} else {
return(NULL)
}
}, mc.cores = cores)
downsampled.snv.sites[sapply(downsampled.snv.sites, is.null)] <- NULL
downsampled.snv.sites = suppressWarnings(do.call(c, downsampled.snv.sites))
maf.snv.mutations = downsampled.snv.sites
} else {
downsampled.snv.sites = NULL
}
if (downsample.indel) {
nsites.indel = max.sites / 2
t = GenomicRanges::split(maf.indel.mutations, GenomeInfoDb::seqnames(maf.indel.mutations))
nsites.indel.chrom = round(unlist(lapply(t, FUN=function(x) sum(as.numeric(GenomicRanges::width(x))))) / sum(unlist(lapply(t, FUN = function(x) sum(as.numeric(GenomicRanges::width(x)))))) * nsites.snv)
seed.rand.indel = seq(1:length(t)) * 4
# Downsample sites
downsampled.indel.sites = parallel::mclapply(1:length(t), function(i) {
pop = IRanges::tile(t[[i]], width = 1)
pop = BiocGenerics::unlist(pop)
set.seed(seed.rand.indel[i])
pos = sample(GenomicRanges::start(pop), nsites.indel.chrom[i])
if (length(pos) > 0) {
gr = GenomicRanges::GRanges(unique(as.character(GenomeInfoDb::seqnames(t[[i]]))), IRanges::IRanges(pos, pos))
return(gr)
} else {
return(NULL)
}
}, mc.cores = cores)
downsampled.indel.sites[sapply(downsampled.indel.sites, is.null)] <- NULL
downsampled.indel.sites = suppressWarnings(do.call(c, downsampled.indel.sites))
maf.indel.mutations = downsampled.indel.sites
} else {
downsampled.indel.sites = NULL
}
# To sample or not to sample for non-mutated sites, if the specified region is too small, may choose not to sample sites and use all sites in the specified region
if (sample) {
print("Sampling to be done...")
# If snv mutations available, else skip this
if (!is.null(snv.mutations.file)) {
print("Sampling SNV sites...")
npatients.snv = length(unique(maf.snv.mutations$sid))
maf.snv.mutations <- unique(maf.snv.mutations)
# Remove SNV mutations in masked region
maf.snv.mutations = subtract.regions.from.roi(maf.snv.mutations, mask.regions, cores=cores)
# Target number of sites to sample, take into account of larger masked regions, and that mutated sites tend not be in masked regions
nsites.snv = length(maf.snv.mutations)*(ratio.snv + ratio.snv * 0.12)
if (nsites.snv < c(10000 * 1.12)) {
nsites.snv = 10000 * 1.12
}
# If specified region, redefine all sites to be specified region and not whole genome
if (!is.null(region.of.interest)) {
all.sites.snv = regions[as.character(GenomeInfoDb::seqnames(regions)) %in% as.character(GenomeInfoDb::seqnames(seqi))]
if (nsites.snv > sum(as.numeric(GenomicRanges::width(all.sites.snv)))) {
print("Error due to insufficient sites to sample from")
sampled.snv.sites = NULL
} else {
# Number of sites to sample per chromosome
t = GenomicRanges::split(all.sites.snv, GenomeInfoDb::seqnames(all.sites.snv))
nsites.snv.chrom = round(unlist(lapply(t, FUN=function(x) sum(as.numeric(GenomicRanges::width(x))))) / sum(unlist(lapply(t, FUN = function(x) sum(as.numeric(GenomicRanges::width(x)))))) * nsites.snv)
seed.rand.snv = seq(1:length(t)) * 4
# Sample sites
all.sites.snv.samples = parallel::mclapply(1:length(t), function(i) {
pop = IRanges::tile(t[[i]], width = 1)
pop = BiocGenerics::unlist(pop)
set.seed(seed.rand.snv[i])
pos = sample(GenomicRanges::start(pop), nsites.snv.chrom[i])
if (length(pos) > 0) {
gr = GenomicRanges::GRanges(unique(as.character(GenomeInfoDb::seqnames(t[[i]]))), IRanges::IRanges(pos, pos))
return(gr)
} else {
return(NULL)
}
}, mc.cores = cores)
}
} else {
all.sites.snv = all.sites
# Number of sites to sample per chromosome
nsites.snv.chrom = round(GenomicRanges::width(all.sites.snv) / sum(as.numeric(GenomicRanges::width(all.sites.snv))) * nsites.snv)
seed.rand.snv = seq(1:length(all.sites.snv)) * 4
# Sample sites
all.sites.snv.samples = parallel::mclapply(1:length(all.sites.snv), function(i) {
set.seed(seed.rand.snv[i])
pos = sample(GenomicRanges::start(all.sites.snv)[i]:GenomicRanges::end(all.sites.snv)[i], nsites.snv.chrom[i])
if (length(pos) > 0) {
gr = GenomicRanges::GRanges(as.character(GenomeInfoDb::seqnames(all.sites.snv)[i]), IRanges::IRanges(pos, pos))
return (gr)
} else {
return(NULL)
}
}, mc.cores = cores)
}
all.sites.snv.samples[sapply(all.sites.snv.samples, is.null)] <- NULL
if (length(all.sites.snv.samples) == 0) {
filtered.snv.mutations = NULL
sampled.snv.sites = NULL
} else {
# all.sites.snv.samples = suppressWarnings(do.call(getMethod(c, "GenomicRanges"), all.sites.snv.samples))
all.sites.snv.samples = suppressWarnings(do.call(c, all.sites.snv.samples))
# Mask selected sites that are mutated or in masked region
GenomicRanges::mcols(maf.snv.mutations2) = NULL
mask.snv.regions = GenomicRanges::reduce(c(maf.snv.mutations2, mask.regions))
nonmut.snv.sample = subtract.regions.from.roi(all.sites.snv.samples, mask.snv.regions, cores = cores)
if (length(nonmut.snv.sample) != 0) {
nonmut.snv.sample$mut = 0
maf.snv.mutations$mut = 1
sampled.snv.sites = sort(c(nonmut.snv.sample, maf.snv.mutations))
} else {
sampled.snv.sites = NULL
filtered.snv.mutations = NULL
}
}
} else {
filtered.snv.mutations = NULL
sampled.snv.sites = NULL
}
# If indel mutations available, else skip this
if (!is.null(indel.mutations.file)) {
print("Sampling indel sites...")
npatients.indel = length(unique(maf.indel.mutations$sid))
maf.indel.mutations <- unique(maf.indel.mutations)
# Remove indel mutations in masked region
maf.indel.mutations = subtract.regions.from.roi(maf.indel.mutations, mask.regions, cores = cores)
# Target number of sites to sample, take into account of larger masked regions, and that mutated sites tend not be in masked regions
nsites.indel = length(maf.indel.mutations) * (ratio.indel + ratio.indel * 0.12)
if (nsites.indel < c(10000 * 1.12)) {
nsites.indel = 10000 * 1.12
}
# If specified region, redefine all sites to be specified region and not whole genome
if (!is.null(region.of.interest)) {
all.sites.indel = regions[as.character(GenomeInfoDb::seqnames(regions)) %in% as.character(GenomeInfoDb::seqnames(seqi))]
if (nsites.indel > sum(as.numeric(GenomicRanges::width(all.sites.indel)))) {
print("Error due to insufficient sites to sample from")
sampled.indel.sites = NULL
} else {
# Number of sites to sample per chromosome
t = GenomicRanges::split(all.sites.indel, GenomeInfoDb::seqnames(all.sites.indel))
nsites.indel.chrom = round(unlist(lapply(t, FUN = function(x) sum(as.numeric(GenomicRanges::width(x))))) / sum(unlist(lapply(t, FUN = function(x) sum(as.numeric(GenomicRanges::width(x)))))) * nsites.indel)
seed.rand.indel = seq(1:length(t)) * 4
# Sample sites
all.sites.indel.samples = parallel::mclapply(1:length(t), function(i) {
pop = IRanges::tile(t[[i]], width = 1)
pop = BiocGenerics::unlist(pop)
set.seed(seed.rand.indel[i])
pos = sample(GenomicRanges::start(pop), nsites.indel.chrom[i])
if (length(pos) > 0) {
gr = GenomicRanges::GRanges(unique(as.character(GenomeInfoDb::seqnames(t[[i]]))), IRanges::IRanges(pos, pos))
return(gr)
} else {
return(NULL)
}
}, mc.cores = cores)
}
} else {
all.sites.indel = all.sites
# Number of sites to sample per chromosome
nsites.indel.chrom = round(GenomicRanges::width(all.sites.indel) / sum(as.numeric(GenomicRanges::width(all.sites.indel))) * nsites.indel)
seed.rand.indel = seq(1:length(all.sites.indel)) * 4
# Sample sites
all.sites.indel.samples = parallel::mclapply(1:length(all.sites.indel), function(i) {
set.seed(seed.rand.indel[i])
pos = sample(GenomicRanges::start(all.sites.indel)[i]:GenomicRanges::end(all.sites.indel)[i], nsites.indel.chrom[i])
if (length(pos) > 0) {
gr = GenomicRanges::GRanges(as.character(GenomeInfoDb::seqnames(all.sites.indel)[i]), IRanges::IRanges(pos, pos))
return(gr)
} else {
return(NULL)
}
}, mc.cores = cores)
}
all.sites.indel.samples[sapply(all.sites.indel.samples, is.null)] <- NULL
if (length(all.sites.indel.samples) == 0) {
filtered.indel.mutations = NULL
sampled.indel.sites = NULL
} else {
# all.sites.indel.samples = suppressWarnings(do.call(getMethod(c, "GenomicRanges"), all.sites.indel.samples))
all.sites.indel.samples = suppressWarnings(do.call(c, all.sites.indel.samples))
# Mask selected sites that are mutated or in nonmapple regions
GenomicRanges::mcols(maf.indel.mutations2) = NULL
mask.indel.regions = GenomicRanges::reduce(c(maf.indel.mutations2, mask.regions))
nonmut.indel.sample = subtract.regions.from.roi(all.sites.indel.samples, mask.indel.regions, cores = cores)
if (length(nonmut.indel.sample) != 0) {
nonmut.indel.sample$mut = 0
maf.indel.mutations$mut = 1
GenomicRanges::start(maf.indel.mutations) = GenomicRanges::start(maf.indel.mutations) + ceiling((GenomicRanges::width(maf.indel.mutations) - 1) / 2)
GenomicRanges::end(maf.indel.mutations) = GenomicRanges::start(maf.indel.mutations)
sampled.indel.sites = sort(c(nonmut.indel.sample, maf.indel.mutations))
} else {
filtered.indel.mutations = NULL
sampled.indel.sites = NULL
}
}
} else {
filtered.indel.mutations = NULL
sampled.indel.sites = NULL
}
return(list(filtered.snv.mutations, sampled.snv.sites, filtered.indel.mutations, sampled.indel.sites, downsampled.snv.sites, downsampled.indel.sites))
} else {
print("No sampling...")
# If SNV mutations available, else skip this
if (!is.null(snv.mutations.file)) {
print("Preparing SNV sites...")
npatients.snv = length(unique(maf.snv.mutations$sid))
maf.snv.mutations <- unique(maf.snv.mutations)
# Remove SNV mutations in masked region
maf.snv.mutations = subtract.regions.from.roi(maf.snv.mutations, mask.regions, cores = cores)
# If specified region, redefine all sites to be specified region and not whole genome
if (!is.null(region.of.interest)) {
all.sites.snv = regions[as.character(GenomeInfoDb::seqnames(regions)) %in% as.character(GenomeInfoDb::seqnames(seqi))]
} else {
all.sites.snv = all.sites
}
# All sites are potential non-mutated sites
all.sites.snv = IRanges::tile(all.sites.snv, width = 1)
all.sites.snv = BiocGenerics::unlist(all.sites.snv)
# Mask selected sites that are mutated or in masked region
GenomicRanges::mcols(maf.snv.mutations2) = NULL
mask.snv.regions = GenomicRanges::reduce(c(maf.snv.mutations2, mask.regions))
nonmut.snv.sample = subtract.regions.from.roi(all.sites.snv, mask.snv.regions, cores = cores)
if (length(nonmut.snv.sample) != 0) {
nonmut.snv.sample$mut = 0
maf.snv.mutations$mut = 1
sampled.snv.sites = sort(c(nonmut.snv.sample, maf.snv.mutations))
} else {
filtered.snv.mutations = NULL
sampled.snv.sites = NULL
}
} else {
filtered.snv.mutations = NULL
sampled.snv.sites = NULL
}
# If indel mutations available, else skip this
if (!is.null(indel.mutations.file)) {
print("Preparing indel sites...")
npatients.indel = length(unique(maf.indel.mutations$sid))
maf.indel.mutations <- unique(maf.indel.mutations)
# Remove indel mutations in masked region
maf.indel.mutations = subtract.regions.from.roi(maf.indel.mutations, mask.regions, cores = cores)
# If specified region, redefine all sites to be specified region and not whole genome
if (!is.null(region.of.interest)) {
all.sites.indel = regions[as.character(GenomeInfoDb::seqnames(regions)) %in% as.character(GenomeInfoDb::seqnames(seqi))]
} else {
all.sites.indel = all.sites
}
# All sites are potential non-mutated sites
all.sites.indel = IRanges::tile(all.sites.indel, width = 1)
all.sites.indel = BiocGenerics::unlist(all.sites.indel)
# Mask selected sites that are mutated or in masked region
GenomicRanges::mcols(maf.indel.mutations2) = NULL
mask.indel.regions = GenomicRanges::reduce(c(maf.indel.mutations2, mask.regions))
nonmut.indel.sample = subtract.regions.from.roi(all.sites.indel, mask.indel.regions, cores = cores)
if (length(nonmut.indel.sample) != 0) {
nonmut.indel.sample$mut = 0
maf.indel.mutations$mut = 1
GenomicRanges::start(maf.indel.mutations) = GenomicRanges::start(maf.indel.mutations) + ceiling((GenomicRanges::width(maf.indel.mutations) - 1) / 2)
GenomicRanges::end(maf.indel.mutations) = GenomicRanges::start(maf.indel.mutations)
sampled.indel.sites = sort(c(nonmut.indel.sample, maf.indel.mutations))
} else {
filtered.indel.mutations = NULL
sampled.indel.sites = NULL
}
} else {
filtered.indel.mutations = NULL
sampled.indel.sites = NULL
}
return(list(filtered.snv.mutations, sampled.snv.sites, filtered.indel.mutations, sampled.indel.sites, downsampled.snv.sites, downsampled.indel.sites))
}
}
|
library(shiny)
shinyUI(fluidPage(
titlePanel("Create a checkboxGroupInput and a RadioButtons widget from a CSV"),
sidebarLayout(
sidebarPanel(fileInput(inputId = "CheckListFile", label = "Upload list of options")),
mainPanel(uiOutput("CustomCheckList")
)
)
))
| /ui.R | no_license | debarros/CSVtoChecklist | R | false | false | 278 | r | library(shiny)
shinyUI(fluidPage(
titlePanel("Create a checkboxGroupInput and a RadioButtons widget from a CSV"),
sidebarLayout(
sidebarPanel(fileInput(inputId = "CheckListFile", label = "Upload list of options")),
mainPanel(uiOutput("CustomCheckList")
)
)
))
|
## https://data.giss.nasa.gov/gistemp/ az adatok forrása
teljes <- read.csv("NH.Ts+dSST.csv", skip=1)
teljes[1,14]
éves <- as.numeric(as.character(teljes[, "J.D"]))
plot(éves)
temp.df <- data.frame(ido = 1880:2018,t = éves)
temp.df <- temp.df[-nrow(temp.df),]
# Nem-paraméteres módszer lokális regresszió
temp.lowess <- lowess(temp.df)
plot(temp.df)
lines(temp.lowess)
temp.lowess5 <- lowess(temp.df,f=1/28)
lines(temp.lowess5, col=2)
plot(temp.df$ido, temp.df$t-temp.lowess5$y)
lines(lowess(temp.df$ido, temp.df$t-temp.lowess5$y), col=2)
## Regresszió
temp.reg <- lm(t ~ ido, temp.df)
abline(temp.reg,col=3)
plot(temp.reg)
## Regresszió diagnosztika
plot(resid(temp.reg) ~ temp.df$ido)
lines(lowess(temp.df$ido, resid(temp.reg)),col=2)
## Másodfokú polinom
temp.reg2 <- lm(t ~ ido + I(ido^2), temp.df)
summary(temp.reg2)
lines(fitted(temp.reg2) ~ temp.df$ido,col=4)
## Negyedfokú polinom
temp.reg4 <- lm(t ~ poly(ido,4), temp.df)
lines(fitted(temp.reg4) ~ temp.df$ido,col=5)
## Előrejelzés
plot(temp.df, xlim=c(1880,2050),ylim=c(-0.5,6))
lines(2018:2050,predict(temp.reg,data.frame(ido=2018:2050)),col=1)
lines(2018:2050,predict(temp.reg2,data.frame(ido=2018:2050)),col=2)
lines(2018:2050,predict(temp.reg4,data.frame(ido=2018:2050)),col=3)
| /idosor.R | no_license | kaliczp/Modell2018Intro | R | false | false | 1,263 | r | ## https://data.giss.nasa.gov/gistemp/ az adatok forrása
teljes <- read.csv("NH.Ts+dSST.csv", skip=1)
teljes[1,14]
éves <- as.numeric(as.character(teljes[, "J.D"]))
plot(éves)
temp.df <- data.frame(ido = 1880:2018,t = éves)
temp.df <- temp.df[-nrow(temp.df),]
# Nem-paraméteres módszer lokális regresszió
temp.lowess <- lowess(temp.df)
plot(temp.df)
lines(temp.lowess)
temp.lowess5 <- lowess(temp.df,f=1/28)
lines(temp.lowess5, col=2)
plot(temp.df$ido, temp.df$t-temp.lowess5$y)
lines(lowess(temp.df$ido, temp.df$t-temp.lowess5$y), col=2)
## Regresszió
temp.reg <- lm(t ~ ido, temp.df)
abline(temp.reg,col=3)
plot(temp.reg)
## Regresszió diagnosztika
plot(resid(temp.reg) ~ temp.df$ido)
lines(lowess(temp.df$ido, resid(temp.reg)),col=2)
## Másodfokú polinom
temp.reg2 <- lm(t ~ ido + I(ido^2), temp.df)
summary(temp.reg2)
lines(fitted(temp.reg2) ~ temp.df$ido,col=4)
## Negyedfokú polinom
temp.reg4 <- lm(t ~ poly(ido,4), temp.df)
lines(fitted(temp.reg4) ~ temp.df$ido,col=5)
## Előrejelzés
plot(temp.df, xlim=c(1880,2050),ylim=c(-0.5,6))
lines(2018:2050,predict(temp.reg,data.frame(ido=2018:2050)),col=1)
lines(2018:2050,predict(temp.reg2,data.frame(ido=2018:2050)),col=2)
lines(2018:2050,predict(temp.reg4,data.frame(ido=2018:2050)),col=3)
|
fileName <- 'household_power_consumption.txt'
# load file if it is not already there
if (!file.exists(fileName)) {
download.file('https://d396qusza40orc.cloudfront.net/exdata/data/household_power_consumption.zip','household_power_consumption.zip',method='curl')
unzip('household_power_consumption.zip')
}
# identify columns to load
colList <- rep('NULL',9)
colList[1] <- 'character' # date
colList[3] <- 'numeric' # value
# read file
allRows <- read.table(fileName, sep=';', header=TRUE, na.strings='?', comment.char='', colClasses=colList)
# format dates for compariosn
allRows$Date <- as.Date(allRows$Date, '%d/%m/%Y')
# build index of rows to keep based on date range
dateIndex <- (allRows$Date >= as.Date('2007-02-01')) & (allRows$Date <= as.Date('2007-02-02'))
# keep only wanted rows
powerData <- allRows[dateIndex, 2]
# create png file
png(filename='plot1.png', width=480, height=480)
# draw histogram
hist(
powerData,
col='red',
main='Global Active Power',
xlab='Global Active Power (kilowatts)'
)
# close graphic device
dev.off()
| /plot1.R | no_license | gregbox515aws/ExData_Plotting1 | R | false | false | 1,064 | r | fileName <- 'household_power_consumption.txt'
# load file if it is not already there
if (!file.exists(fileName)) {
download.file('https://d396qusza40orc.cloudfront.net/exdata/data/household_power_consumption.zip','household_power_consumption.zip',method='curl')
unzip('household_power_consumption.zip')
}
# identify columns to load
colList <- rep('NULL',9)
colList[1] <- 'character' # date
colList[3] <- 'numeric' # value
# read file
allRows <- read.table(fileName, sep=';', header=TRUE, na.strings='?', comment.char='', colClasses=colList)
# format dates for compariosn
allRows$Date <- as.Date(allRows$Date, '%d/%m/%Y')
# build index of rows to keep based on date range
dateIndex <- (allRows$Date >= as.Date('2007-02-01')) & (allRows$Date <= as.Date('2007-02-02'))
# keep only wanted rows
powerData <- allRows[dateIndex, 2]
# create png file
png(filename='plot1.png', width=480, height=480)
# draw histogram
hist(
powerData,
col='red',
main='Global Active Power',
xlab='Global Active Power (kilowatts)'
)
# close graphic device
dev.off()
|
get_catalog_nhanes <-
function( data_name = "nhanes" , output_dir , ... ){
data_page <- "https://wwwn.cdc.gov/nchs/nhanes/search/DataPage.aspx"
data_html <- xml2::read_html( data_page )
this_table <- rvest::html_table( data_html )[[1]] #changed from [[2]] to allow download to work
names( this_table ) <- c( 'years' , 'data_name' , 'doc_name' , 'file_name' , 'date_published' )
all_links <- rvest::html_nodes( data_html , "a" )
link_text <- rvest::html_text( all_links )
link_refs <- rvest::html_attr( all_links , "href" )
this_table$full_url <- link_refs[ match( this_table$file_name , link_text ) ]
this_table$doc_url <- link_refs[ match( this_table$doc_name , link_text ) ]
this_table[ c( 'full_url' , 'doc_url' ) ] <- sapply( this_table[ c( 'full_url' , 'doc_url' ) ] , function( w ) ifelse( is.na( w ) , NA , paste0( "https://wwwn.cdc.gov" , w ) ) )
catalog <- this_table[ this_table$file_name != 'RDC Only' & this_table$date_published != 'Withdrawn' & this_table$full_url != "https://wwwn.cdc.gov#" , ]
# one all years doc hardcode
ayd <- catalog[ tolower( catalog$full_url ) == "https://wwwn.cdc.gov/nchs/nhanes/dxa/dxa.aspx" , ]
ayd$years <- ayd$full_url <- ayd$doc_url <- NULL
this_ayd <-
data.frame(
years = c( "2005-2006" , "2003-2004" , "2001-2002" , "1999-2000" ) ,
full_url = paste0( "https://wwwn.cdc.gov/Nchs/Data/Nhanes/Dxa/dxx" , c( "_d" , "_c" , "_b" , "" ) , ".xpt" ) ,
doc_url = paste0( "https://wwwn.cdc.gov/Nchs/Nhanes/2005-2006/DXX_D.htm" , "https://wwwn.cdc.gov/Nchs/Data/Nhanes/Dxa/dxx_c.pdf" , "https://wwwn.cdc.gov/Nchs/Data/Nhanes/Dxa/dxx_b.pdf" , "https://wwwn.cdc.gov/Nchs/Data/Nhanes/Dxa/dxx.pdf" ) ,
stringsAsFactors = FALSE
)
ayd <- merge( ayd , this_ayd )
catalog <- catalog[ tolower( catalog$full_url ) != "https://wwwn.cdc.gov/nchs/nhanes/dxa/dxa.aspx" , ]
catalog <- rbind( catalog , ayd )
catalog$output_filename <- paste0( output_dir , "/" , catalog$years , "/" , tolower( gsub( "\\.xpt" , ".csv" , basename( catalog$full_url ) , ignore.case = TRUE ) ) )
catalog <- catalog[ order( catalog[ , 'years' ] ) , ]
catalog
}
lodown_nhanes <-
function( data_name = "nhanes" , catalog , ... ){
on.exit( print( catalog ) )
tf <- tempfile()
for ( i in seq_len( nrow( catalog ) ) ){
# download the file
cachaca( catalog[ i , "full_url" ] , tf , mode = 'wb' )
if( grepl( "\\.zip$" , catalog[ i , "full_url" ] , ignore.case = TRUE ) ){
unzipped_files <- unzip( tf , exdir = tempdir() )
suppressWarnings( file.remove( tf ) )
tf <- unzipped_files
}
xport_attempt <- try( x <- foreign::read.xport( tf ) , silent = TRUE )
if( class( xport_attempt ) == 'try-error' ) x <- data.frame( haven::read_sas( tf ) )
# convert all column names to lowercase
names( x ) <- tolower( names( x ) )
# create directory if needed prior to saving file
if (!dir.exists(dirname(catalog$output_filename[i]))) {
dir.create(dirname(catalog$output_filename[i]))
}
# save file as csv rather than RDS
# saveRDS( x , file = catalog[ i , 'output_filename' ] , compress = FALSE )
write_csv(x, catalog[ i , 'output_filename' ])
catalog[ i , 'case_count' ] <- nrow( x )
# delete the temporary files
suppressWarnings( file.remove( tf ) )
cat( paste0( data_name , " catalog entry " , i , " of " , nrow( catalog ) , " stored at '" , catalog[ i , 'output_filename' ] , "'\r\n\n" ) )
}
on.exit()
catalog
}
| /R/nhanes.R | no_license | seanofahey/lodown | R | false | false | 3,489 | r | get_catalog_nhanes <-
function( data_name = "nhanes" , output_dir , ... ){
data_page <- "https://wwwn.cdc.gov/nchs/nhanes/search/DataPage.aspx"
data_html <- xml2::read_html( data_page )
this_table <- rvest::html_table( data_html )[[1]] #changed from [[2]] to allow download to work
names( this_table ) <- c( 'years' , 'data_name' , 'doc_name' , 'file_name' , 'date_published' )
all_links <- rvest::html_nodes( data_html , "a" )
link_text <- rvest::html_text( all_links )
link_refs <- rvest::html_attr( all_links , "href" )
this_table$full_url <- link_refs[ match( this_table$file_name , link_text ) ]
this_table$doc_url <- link_refs[ match( this_table$doc_name , link_text ) ]
this_table[ c( 'full_url' , 'doc_url' ) ] <- sapply( this_table[ c( 'full_url' , 'doc_url' ) ] , function( w ) ifelse( is.na( w ) , NA , paste0( "https://wwwn.cdc.gov" , w ) ) )
catalog <- this_table[ this_table$file_name != 'RDC Only' & this_table$date_published != 'Withdrawn' & this_table$full_url != "https://wwwn.cdc.gov#" , ]
# one all years doc hardcode
ayd <- catalog[ tolower( catalog$full_url ) == "https://wwwn.cdc.gov/nchs/nhanes/dxa/dxa.aspx" , ]
ayd$years <- ayd$full_url <- ayd$doc_url <- NULL
this_ayd <-
data.frame(
years = c( "2005-2006" , "2003-2004" , "2001-2002" , "1999-2000" ) ,
full_url = paste0( "https://wwwn.cdc.gov/Nchs/Data/Nhanes/Dxa/dxx" , c( "_d" , "_c" , "_b" , "" ) , ".xpt" ) ,
doc_url = paste0( "https://wwwn.cdc.gov/Nchs/Nhanes/2005-2006/DXX_D.htm" , "https://wwwn.cdc.gov/Nchs/Data/Nhanes/Dxa/dxx_c.pdf" , "https://wwwn.cdc.gov/Nchs/Data/Nhanes/Dxa/dxx_b.pdf" , "https://wwwn.cdc.gov/Nchs/Data/Nhanes/Dxa/dxx.pdf" ) ,
stringsAsFactors = FALSE
)
ayd <- merge( ayd , this_ayd )
catalog <- catalog[ tolower( catalog$full_url ) != "https://wwwn.cdc.gov/nchs/nhanes/dxa/dxa.aspx" , ]
catalog <- rbind( catalog , ayd )
catalog$output_filename <- paste0( output_dir , "/" , catalog$years , "/" , tolower( gsub( "\\.xpt" , ".csv" , basename( catalog$full_url ) , ignore.case = TRUE ) ) )
catalog <- catalog[ order( catalog[ , 'years' ] ) , ]
catalog
}
lodown_nhanes <-
function( data_name = "nhanes" , catalog , ... ){
on.exit( print( catalog ) )
tf <- tempfile()
for ( i in seq_len( nrow( catalog ) ) ){
# download the file
cachaca( catalog[ i , "full_url" ] , tf , mode = 'wb' )
if( grepl( "\\.zip$" , catalog[ i , "full_url" ] , ignore.case = TRUE ) ){
unzipped_files <- unzip( tf , exdir = tempdir() )
suppressWarnings( file.remove( tf ) )
tf <- unzipped_files
}
xport_attempt <- try( x <- foreign::read.xport( tf ) , silent = TRUE )
if( class( xport_attempt ) == 'try-error' ) x <- data.frame( haven::read_sas( tf ) )
# convert all column names to lowercase
names( x ) <- tolower( names( x ) )
# create directory if needed prior to saving file
if (!dir.exists(dirname(catalog$output_filename[i]))) {
dir.create(dirname(catalog$output_filename[i]))
}
# save file as csv rather than RDS
# saveRDS( x , file = catalog[ i , 'output_filename' ] , compress = FALSE )
write_csv(x, catalog[ i , 'output_filename' ])
catalog[ i , 'case_count' ] <- nrow( x )
# delete the temporary files
suppressWarnings( file.remove( tf ) )
cat( paste0( data_name , " catalog entry " , i , " of " , nrow( catalog ) , " stored at '" , catalog[ i , 'output_filename' ] , "'\r\n\n" ) )
}
on.exit()
catalog
}
|
####################################################################################################################################
### Filename: plot.R
### Description: Function for plotting the profiles only when one whole- and one subplot factor are used.
###
###
###
####################################################################################################################################
#' Plots profiles of the groups in case of one whole- and one subplot-factor.
#'
#' @param data A data.frame containing the data
#' @param group column name within the data frame data specifying the groups
#' @param factor1 column name within the data frame data specifying the first subplot-factor
#' @param subject column name within the data frame X identifying the subjects
#' @param response column name within the data frame X containing the response variable
#' @param xlab label of the x-axis of the plot
#' @param ylab label of the y-axis of the plot
#' @param legend logical indicating if a legend should be plotted
#' @param legend.title title of the legend
#' @return Plots profiles of the groups.
#' @example R/example_plot.txt
#' @keywords internal
hrm.plot <- function(data, group , factor1, subject, response, xlab="time", ylab="mean", legend = TRUE, legend.title = NULL ){
X <- as.data.frame(data)
data<-response
stopifnot(is.data.frame(X),is.character(subject), is.character(group),is.character(factor1),is.character(data),is.character(xlab),is.character(ylab))
f <- 0
f0 <- 0
crit <- 0
test <- 0
group <- as.character(group)
factor1 <- as.character(factor1)
subject <- as.character(subject)
xlab <- as.character(xlab)
ylab <- as.character(ylab)
X <- split(X, X[,group], drop=TRUE)
a <- length(X)
d <- nlevels(X[[1]][,factor1])
n <- rep(0,a)
means <- data.frame(dimension=as.numeric(levels(X[[1]][,factor1])))
groupnames <- c()
for(i in 1:a){
groupnames[i] <- as.character(X[[i]][1,group])
X[[i]] <- X[[i]][ order(X[[i]][,subject], X[[i]][,factor1]), ]
X[[i]] <- X[[i]][,data]
X[[i]] <- matrix(X[[i]], ncol=d,byrow=TRUE)
n[i] <- dim(X[[i]])[1]
means[,(i+1)] <- colMeans(X[[i]])
}
colnames(means) <- c("dimension",groupnames)
means <- melt(means, id.vars="dimension")
colnames(means) <- c("dimension", "group", "value")
pl <- ggplot() +
geom_line(data=means, aes(x=means$dimension, y=means$value,group=means$group,colour=means$group)) +
geom_point(data=means, aes(x=means$dimension, y=means$value,group=means$group,colour=means$group),size=1.5) +
xlab(xlab) +
ylab(ylab)
if(!legend){
pl <- pl + theme(legend.position = "none")
} else {
if(!is.null(legend.title) & is.character(legend.title)){
pl <- pl + scale_colour_hue(name=legend.title)
} else {
pl <- pl + theme(legend.title = element_blank())
}
pl <- pl + theme(legend.background = element_rect())
}
return(pl)
}
# hrm.plot end ------------------------------------------------------------
| /HRM/R/plot.R | no_license | akhikolla/TestedPackages-NoIssues | R | false | false | 3,106 | r | ####################################################################################################################################
### Filename: plot.R
### Description: Function for plotting the profiles only when one whole- and one subplot factor are used.
###
###
###
####################################################################################################################################
#' Plots profiles of the groups in case of one whole- and one subplot-factor.
#'
#' @param data A data.frame containing the data
#' @param group column name within the data frame data specifying the groups
#' @param factor1 column name within the data frame data specifying the first subplot-factor
#' @param subject column name within the data frame X identifying the subjects
#' @param response column name within the data frame X containing the response variable
#' @param xlab label of the x-axis of the plot
#' @param ylab label of the y-axis of the plot
#' @param legend logical indicating if a legend should be plotted
#' @param legend.title title of the legend
#' @return Plots profiles of the groups.
#' @example R/example_plot.txt
#' @keywords internal
hrm.plot <- function(data, group , factor1, subject, response, xlab="time", ylab="mean", legend = TRUE, legend.title = NULL ){
X <- as.data.frame(data)
data<-response
stopifnot(is.data.frame(X),is.character(subject), is.character(group),is.character(factor1),is.character(data),is.character(xlab),is.character(ylab))
f <- 0
f0 <- 0
crit <- 0
test <- 0
group <- as.character(group)
factor1 <- as.character(factor1)
subject <- as.character(subject)
xlab <- as.character(xlab)
ylab <- as.character(ylab)
X <- split(X, X[,group], drop=TRUE)
a <- length(X)
d <- nlevels(X[[1]][,factor1])
n <- rep(0,a)
means <- data.frame(dimension=as.numeric(levels(X[[1]][,factor1])))
groupnames <- c()
for(i in 1:a){
groupnames[i] <- as.character(X[[i]][1,group])
X[[i]] <- X[[i]][ order(X[[i]][,subject], X[[i]][,factor1]), ]
X[[i]] <- X[[i]][,data]
X[[i]] <- matrix(X[[i]], ncol=d,byrow=TRUE)
n[i] <- dim(X[[i]])[1]
means[,(i+1)] <- colMeans(X[[i]])
}
colnames(means) <- c("dimension",groupnames)
means <- melt(means, id.vars="dimension")
colnames(means) <- c("dimension", "group", "value")
pl <- ggplot() +
geom_line(data=means, aes(x=means$dimension, y=means$value,group=means$group,colour=means$group)) +
geom_point(data=means, aes(x=means$dimension, y=means$value,group=means$group,colour=means$group),size=1.5) +
xlab(xlab) +
ylab(ylab)
if(!legend){
pl <- pl + theme(legend.position = "none")
} else {
if(!is.null(legend.title) & is.character(legend.title)){
pl <- pl + scale_colour_hue(name=legend.title)
} else {
pl <- pl + theme(legend.title = element_blank())
}
pl <- pl + theme(legend.background = element_rect())
}
return(pl)
}
# hrm.plot end ------------------------------------------------------------
|
# ckglob --- if global prefix, mark lines to be affected
integer function ckglob (lin, i, status)
character lin (MAXLINE)
integer i, status
include SE_COMMON
integer line, usepat, usemark, tmp
integer defalt, match, optpat, getkn
pointer k
logical intrpt, brkflag
status = OK
usepat = EOF
usemark = EOF
if (lin (i) == GMARK || lin (i) == XMARK) { # global markname prefix?
if (lin (i) == GMARK) # tag lines with the specified markname
usemark = YES
else # tag lines without the specified markname
usemark = NO
i += 1
status = getkn (lin, i, Savknm, Savknm)
}
if (status == OK) # check for a pattern prefix too
select (lin (i))
when (GLOBAL, UCGLOBAL)
usepat = YES
when (EXCLUDE, UCEXCLUDE)
usepat = NO
ifany {
i += 1
if (optpat (lin, i) == ERR)
status = ERR
else
i += 1
}
if (status == OK && usepat == EOF && usemark == EOF)
status = EOF
elif (status == OK)
call defalt (1, Lastln)
if (status == OK) { # no errors so far, safe to proceed
call mesg ("GLOB"s, REMARK_MSG)
k = Line0 # mark all lines preceeding global range
for (line = 0; line < Line1; line += 1) {
Globmark (k) = NO
k = Nextline (k)
}
for ( ; line <= Line2; line += 1) { # mark lines in range
if (intrpt (brkflag)) { # check for an interrupt
status = ERR
return (status)
}
tmp = NO
if (usemark == EOF
|| usemark == YES && Markname (k) == Savknm
|| usemark == NO && Markname (k) ~= Savknm) {
if (usepat == EOF) # no global pattern to look for
tmp = YES
else { # there is also a pattern to look for
call gtxt (k)
if (match (Txt, Pat) == usepat)
tmp = YES
}
}
Globmark (k) = tmp
k = Nextline (k)
}
for ( ; k ~= Line0; k = Nextline (k)) # mark remaining lines
Globmark (k) = NO
call mesg (EOS, REMARK_MSG)
}
return (status)
end
| /swt/src/spc/se.u/source/ckglob.r | no_license | arnoldrobbins/gt-swt | R | false | false | 2,336 | r | # ckglob --- if global prefix, mark lines to be affected
integer function ckglob (lin, i, status)
character lin (MAXLINE)
integer i, status
include SE_COMMON
integer line, usepat, usemark, tmp
integer defalt, match, optpat, getkn
pointer k
logical intrpt, brkflag
status = OK
usepat = EOF
usemark = EOF
if (lin (i) == GMARK || lin (i) == XMARK) { # global markname prefix?
if (lin (i) == GMARK) # tag lines with the specified markname
usemark = YES
else # tag lines without the specified markname
usemark = NO
i += 1
status = getkn (lin, i, Savknm, Savknm)
}
if (status == OK) # check for a pattern prefix too
select (lin (i))
when (GLOBAL, UCGLOBAL)
usepat = YES
when (EXCLUDE, UCEXCLUDE)
usepat = NO
ifany {
i += 1
if (optpat (lin, i) == ERR)
status = ERR
else
i += 1
}
if (status == OK && usepat == EOF && usemark == EOF)
status = EOF
elif (status == OK)
call defalt (1, Lastln)
if (status == OK) { # no errors so far, safe to proceed
call mesg ("GLOB"s, REMARK_MSG)
k = Line0 # mark all lines preceeding global range
for (line = 0; line < Line1; line += 1) {
Globmark (k) = NO
k = Nextline (k)
}
for ( ; line <= Line2; line += 1) { # mark lines in range
if (intrpt (brkflag)) { # check for an interrupt
status = ERR
return (status)
}
tmp = NO
if (usemark == EOF
|| usemark == YES && Markname (k) == Savknm
|| usemark == NO && Markname (k) ~= Savknm) {
if (usepat == EOF) # no global pattern to look for
tmp = YES
else { # there is also a pattern to look for
call gtxt (k)
if (match (Txt, Pat) == usepat)
tmp = YES
}
}
Globmark (k) = tmp
k = Nextline (k)
}
for ( ; k ~= Line0; k = Nextline (k)) # mark remaining lines
Globmark (k) = NO
call mesg (EOS, REMARK_MSG)
}
return (status)
end
|
library(sperrorest)
### Name: add.distance
### Title: Add distance information to resampling objects
### Aliases: add.distance add.distance.resampling
### add.distance.represampling
### ** Examples
data(ecuador) # Muenchow et al. (2012), see ?ecuador
nsp.parti <- partition_cv(ecuador)
sp.parti <- partition_kmeans(ecuador)
nsp.parti <- add.distance(nsp.parti, ecuador)
sp.parti <- add.distance(sp.parti, ecuador)
# non-spatial partioning: very small test-training distance:
nsp.parti[[1]][[1]]$distance
# spatial partitioning: more substantial distance, depending on number of
# folds etc.
sp.parti[[1]][[1]]$distance
| /data/genthat_extracted_code/sperrorest/examples/add.distance.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 629 | r | library(sperrorest)
### Name: add.distance
### Title: Add distance information to resampling objects
### Aliases: add.distance add.distance.resampling
### add.distance.represampling
### ** Examples
data(ecuador) # Muenchow et al. (2012), see ?ecuador
nsp.parti <- partition_cv(ecuador)
sp.parti <- partition_kmeans(ecuador)
nsp.parti <- add.distance(nsp.parti, ecuador)
sp.parti <- add.distance(sp.parti, ecuador)
# non-spatial partioning: very small test-training distance:
nsp.parti[[1]][[1]]$distance
# spatial partitioning: more substantial distance, depending on number of
# folds etc.
sp.parti[[1]][[1]]$distance
|
#' Save PubMed files
#'
#' Save PubMed files as unparsed XML in ".txt" files, so that user can choose to parse with `XML` or `xml2` packages.
#'
#' @param pubmed_data Character vector of unparsed XML with root node "PubmedArticleSet". Generally the output of \code{\link{batch_fetch_pubmed_records}}.
#' @param dir Directory to save files to. Default = current directory via \code{here()}.
#' @param prefix Prefix to append to filename (optional)
#' @return SIDE-EFFECT. Unparsed XML in ".txt" files with batch number
#' @export
#' @examples
#'
#' my_dois <- c("10.1186/s12970-020-0336-1", "10.1016/S0140-6736(19)32971-X", "10.1056/NEJMoa1905239", "10.1371/journal.pone.0226893", "10.1016/S2352-3026(19)30207-8")
#'
#' \dontrun{
#' batch_dois() %>%
#' create_query_strings() %>%
#' batch_fetch_pubmed_records() %>%
#' write_pubmed_files()
#' }
# TODO: decide whether to save parsed or unparsed
# TODO: decide whether to save within fetch or after batch
# TODO: improve filenaming and user options
write_pubmed_files <- function(pubmed_data, dir = here::here(), prefix = "") {
if (prefix != "") {prefix = paste0("_", prefix)}
purrr::walk2(pubmed_data,
paste0(dir, "/", Sys.Date(), prefix, "_pubmed_data_",
1:length(pubmed_data), ".txt"),
readr::write_file)
}
| /R/write_pubmed_files.R | no_license | maia-sh/pubmedparser | R | false | false | 1,296 | r | #' Save PubMed files
#'
#' Save PubMed files as unparsed XML in ".txt" files, so that user can choose to parse with `XML` or `xml2` packages.
#'
#' @param pubmed_data Character vector of unparsed XML with root node "PubmedArticleSet". Generally the output of \code{\link{batch_fetch_pubmed_records}}.
#' @param dir Directory to save files to. Default = current directory via \code{here()}.
#' @param prefix Prefix to append to filename (optional)
#' @return SIDE-EFFECT. Unparsed XML in ".txt" files with batch number
#' @export
#' @examples
#'
#' my_dois <- c("10.1186/s12970-020-0336-1", "10.1016/S0140-6736(19)32971-X", "10.1056/NEJMoa1905239", "10.1371/journal.pone.0226893", "10.1016/S2352-3026(19)30207-8")
#'
#' \dontrun{
#' batch_dois() %>%
#' create_query_strings() %>%
#' batch_fetch_pubmed_records() %>%
#' write_pubmed_files()
#' }
# TODO: decide whether to save parsed or unparsed
# TODO: decide whether to save within fetch or after batch
# TODO: improve filenaming and user options
write_pubmed_files <- function(pubmed_data, dir = here::here(), prefix = "") {
if (prefix != "") {prefix = paste0("_", prefix)}
purrr::walk2(pubmed_data,
paste0(dir, "/", Sys.Date(), prefix, "_pubmed_data_",
1:length(pubmed_data), ".txt"),
readr::write_file)
}
|
context("model-methods")
set_cmdstan_path()
mod <- cmdstan_model(testing_stan_file("bernoulli_log_lik"), force_recompile = TRUE)
data_list <- testing_data("bernoulli")
fit <- mod$sample(data = data_list, chains = 1)
test_that("Methods error if not compiled", {
skip_if(os_is_wsl())
expect_error(
fit$log_prob(NULL),
"The method has not been compiled, please call `init_model_methods()` first",
fixed = TRUE
)
expect_error(
fit$grad_log_prob(NULL),
"The method has not been compiled, please call `init_model_methods()` first",
fixed = TRUE
)
expect_error(
fit$hessian(NULL),
"The method has not been compiled, please call `init_model_methods()` first",
fixed = TRUE
)
expect_error(
fit$unconstrain_variables(NULL),
"The method has not been compiled, please call `init_model_methods()` first",
fixed = TRUE
)
expect_error(
fit$constrain_variables(NULL),
"The method has not been compiled, please call `init_model_methods()` first",
fixed = TRUE
)
})
test_that("User warned about higher-order autodiff with hessian", {
skip_if(os_is_wsl())
expect_message(
fit$init_model_methods(hessian = TRUE, verbose = TRUE),
"The hessian method relies on higher-order autodiff which is still experimental. Please report any compilation errors that you encounter",
fixed = TRUE
)
})
test_that("Methods return correct values", {
skip_if(os_is_wsl())
lp <- fit$log_prob(unconstrained_variables=c(0.1))
expect_equal(lp, -8.6327599208828509347)
grad_lp <- -3.2997502497472801508
attr(grad_lp, "log_prob") <- lp
expect_equal(fit$grad_log_prob(unconstrained_variables=c(0.1)), grad_lp)
hessian <- list(
log_prob = lp,
grad_log_prob = -3.2997502497472801508,
hessian = as.matrix(-2.9925124823147033482, nrow=1, ncol=1)
)
expect_equal(fit$hessian(unconstrained_variables=c(0.1)), hessian)
hessian_noadj <- list(
log_prob = -7.2439666007357095268,
grad_log_prob = -3.2497918747894001257,
hessian = as.matrix(-2.4937604019289194568, nrow=1, ncol=1)
)
expect_equal(fit$hessian(unconstrained_variables=c(0.1), jacobian_adjustment = FALSE),
hessian_noadj)
cpars <- fit$constrain_variables(c(0.1))
cpars_true <- list(
theta = 0.52497918747894001257,
log_lik = rep(-7.2439666007357095268, data_list$N)
)
expect_equal(cpars, cpars_true)
expect_equal(fit$constrain_variables(c(0.1), generated_quantities = FALSE),
list(theta = 0.52497918747894001257))
skeleton <- list(
theta = array(0, dim = 1),
log_lik = array(0, dim = data_list$N)
)
expect_equal(fit$variable_skeleton(), skeleton)
unconstrained_variables <- fit$unconstrain_variables(cpars)
expect_equal(unconstrained_variables, c(0.1))
})
test_that("Model methods environments are independent", {
skip_if(os_is_wsl())
data_list_2 <- data_list
data_list_2$N <- 20
data_list_2$y <- c(data_list$y, data_list$y)
fit_2 <- mod$sample(data = data_list_2, chains = 1)
fit_2$init_model_methods()
expect_equal(fit$log_prob(unconstrained_variables=c(0.1)), -8.6327599208828509347)
expect_equal(fit_2$log_prob(unconstrained_variables=c(0.1)), -15.87672652161856135)
})
test_that("methods error for incorrect inputs", {
skip_if(os_is_wsl())
expect_error(
fit$log_prob(c(1,2)),
"Model has 1 unconstrained parameter(s), but 2 were provided!",
fixed = TRUE
)
expect_error(
fit$grad_log_prob(c(1,2)),
"Model has 1 unconstrained parameter(s), but 2 were provided!",
fixed = TRUE
)
expect_error(
fit$hessian(c(1,2)),
"Model has 1 unconstrained parameter(s), but 2 were provided!",
fixed = TRUE
)
expect_error(
fit$constrain_variables(c(1,2)),
"Model has 1 unconstrained parameter(s), but 2 were provided!",
fixed = TRUE
)
logistic_mod <- cmdstan_model(testing_stan_file("logistic"), force_recompile = TRUE)
logistic_data_list <- testing_data("logistic")
logistic_fit <- logistic_mod$sample(data = logistic_data_list, chains = 1)
# Init without Hessian, as bernoulli_logit_glm currently not fully fvar<var>
# compatible
logistic_fit$init_model_methods(verbose = TRUE)
expect_error(
logistic_fit$unconstrain_variables(list(alpha = 0.5)),
"Model parameter(s): beta not provided!",
fixed = TRUE
)
})
test_that("Methods error with already-compiled model", {
skip_if(os_is_wsl())
precompile_mod <- testing_model("bernoulli")
mod <- testing_model("bernoulli")
data_list <- testing_data("bernoulli")
fit <- mod$sample(data = data_list, chains = 1)
expect_error(
fit$init_model_methods(),
"Model methods cannot be used with a pre-compiled Stan executable, the model must be compiled again",
fixed = TRUE
)
})
test_that("Methods can be compiled with model", {
skip_if(os_is_wsl())
mod <- cmdstan_model(testing_stan_file("bernoulli"),
force_recompile = TRUE,
compile_model_methods = TRUE,
compile_hessian_method = TRUE)
fit <- mod$sample(data = data_list, chains = 1)
lp <- fit$log_prob(unconstrained_variables=c(0.6))
expect_equal(lp, -10.649855405830624733)
grad_lp <- -4.7478756747095447466
attr(grad_lp, "log_prob") <- lp
expect_equal(fit$grad_log_prob(unconstrained_variables=c(0.6)), grad_lp)
hessian <- list(
log_prob = lp,
grad_log_prob = -4.7478756747095447466,
hessian = as.matrix(-2.7454108854798882078, nrow=1, ncol=1)
)
expect_equal(fit$hessian(unconstrained_variables=c(0.6)), hessian)
cpars <- fit$constrain_variables(c(0.6))
expect_equal(cpars, list(theta = 0.64565630622579539555))
unconstrained_variables <- fit$unconstrain_variables(cpars)
expect_equal(unconstrained_variables, c(0.6))
})
test_that("unconstrain_variables correctly handles zero-length containers", {
skip_if(os_is_wsl())
model_code <- "
data {
int N;
}
parameters {
vector[N] y;
real x;
}
model {
x ~ std_normal();
y ~ std_normal();
}
"
mod <- cmdstan_model(write_stan_file(model_code),
compile_model_methods = TRUE)
fit <- mod$sample(data = list(N = 0), chains = 1)
unconstrained <- fit$unconstrain_variables(variables = list(x = 5))
expect_equal(unconstrained, 5)
})
test_that("unconstrain_draws returns correct values", {
skip_if(os_is_wsl())
# With no constraints, the parameter draws should be the same as the
# unconstrained draws
model_code <- "
data {
int N;
}
parameters {
real x;
}
model {
x ~ std_normal();
}
"
mod <- cmdstan_model(write_stan_file(model_code),
compile_model_methods = TRUE,
force_recompile = TRUE)
fit <- mod$sample(data = list(N = 0), chains = 1)
x_draws <- fit$draws(format = "draws_df")$x
# Unconstrain all internal draws
unconstrained_internal_draws <- fit$unconstrain_draws()[[1]]
expect_equal(as.numeric(x_draws), as.numeric(unconstrained_internal_draws))
# Unconstrain external CmdStan CSV files
unconstrained_csv <- fit$unconstrain_draws(files = fit$output_files())[[1]]
expect_equal(as.numeric(x_draws), as.numeric(unconstrained_csv))
# Unconstrain existing draws object
unconstrained_draws <- fit$unconstrain_draws(draws = fit$draws())[[1]]
expect_equal(as.numeric(x_draws), as.numeric(unconstrained_draws))
# With a lower-bounded constraint, the parameter draws should be the
# exponentiation of the unconstrained draws
model_code <- "
data {
int N;
}
parameters {
real<lower = 0> x;
}
model {
x ~ std_normal();
}
"
mod <- cmdstan_model(write_stan_file(model_code),
compile_model_methods = TRUE,
force_recompile = TRUE)
fit <- mod$sample(data = list(N = 0), chains = 1)
x_draws <- fit$draws(format = "draws_df")$x
unconstrained_internal_draws <- fit$unconstrain_draws()[[1]]
expect_equal(as.numeric(x_draws), exp(as.numeric(unconstrained_internal_draws)))
# Unconstrain external CmdStan CSV files
unconstrained_csv <- fit$unconstrain_draws(files = fit$output_files())[[1]]
expect_equal(as.numeric(x_draws), exp(as.numeric(unconstrained_csv)))
# Unconstrain existing draws object
unconstrained_draws <- fit$unconstrain_draws(draws = fit$draws())[[1]]
expect_equal(as.numeric(x_draws), exp(as.numeric(unconstrained_draws)))
})
test_that("Model methods can be initialised for models with no data", {
skip_if(os_is_wsl())
stan_file <- write_stan_file("parameters { real x; } model { x ~ std_normal(); }")
mod <- cmdstan_model(stan_file, compile_model_methods = TRUE, force_recompile = TRUE)
expect_no_error(fit <- mod$sample())
expect_equal(fit$log_prob(5), -12.5)
})
test_that("Variable skeleton returns correct dimensions for matrices", {
skip_if(os_is_wsl())
stan_file <- write_stan_file("
data {
int N;
int K;
}
parameters {
real x_real;
matrix[N,K] x_mat;
vector[K] x_vec;
row_vector[K] x_rowvec;
}
model {
x_real ~ std_normal();
}")
mod <- cmdstan_model(stan_file, compile_model_methods = TRUE,
force_recompile = TRUE)
N <- 4
K <- 3
fit <- mod$sample(data = list(N = N, K = K), chains = 1,
iter_warmup = 1, iter_sampling = 1)
target_skeleton <- list(
x_real = array(0, dim = 1),
x_mat = array(0, dim = c(N, K)),
x_vec = array(0, dim = K),
x_rowvec = array(0, dim = K)
)
expect_equal(fit$variable_skeleton(),
target_skeleton)
})
| /tests/testthat/test-model-methods.R | permissive | stan-dev/cmdstanr | R | false | false | 9,603 | r | context("model-methods")
set_cmdstan_path()
mod <- cmdstan_model(testing_stan_file("bernoulli_log_lik"), force_recompile = TRUE)
data_list <- testing_data("bernoulli")
fit <- mod$sample(data = data_list, chains = 1)
test_that("Methods error if not compiled", {
skip_if(os_is_wsl())
expect_error(
fit$log_prob(NULL),
"The method has not been compiled, please call `init_model_methods()` first",
fixed = TRUE
)
expect_error(
fit$grad_log_prob(NULL),
"The method has not been compiled, please call `init_model_methods()` first",
fixed = TRUE
)
expect_error(
fit$hessian(NULL),
"The method has not been compiled, please call `init_model_methods()` first",
fixed = TRUE
)
expect_error(
fit$unconstrain_variables(NULL),
"The method has not been compiled, please call `init_model_methods()` first",
fixed = TRUE
)
expect_error(
fit$constrain_variables(NULL),
"The method has not been compiled, please call `init_model_methods()` first",
fixed = TRUE
)
})
test_that("User warned about higher-order autodiff with hessian", {
skip_if(os_is_wsl())
expect_message(
fit$init_model_methods(hessian = TRUE, verbose = TRUE),
"The hessian method relies on higher-order autodiff which is still experimental. Please report any compilation errors that you encounter",
fixed = TRUE
)
})
test_that("Methods return correct values", {
skip_if(os_is_wsl())
lp <- fit$log_prob(unconstrained_variables=c(0.1))
expect_equal(lp, -8.6327599208828509347)
grad_lp <- -3.2997502497472801508
attr(grad_lp, "log_prob") <- lp
expect_equal(fit$grad_log_prob(unconstrained_variables=c(0.1)), grad_lp)
hessian <- list(
log_prob = lp,
grad_log_prob = -3.2997502497472801508,
hessian = as.matrix(-2.9925124823147033482, nrow=1, ncol=1)
)
expect_equal(fit$hessian(unconstrained_variables=c(0.1)), hessian)
hessian_noadj <- list(
log_prob = -7.2439666007357095268,
grad_log_prob = -3.2497918747894001257,
hessian = as.matrix(-2.4937604019289194568, nrow=1, ncol=1)
)
expect_equal(fit$hessian(unconstrained_variables=c(0.1), jacobian_adjustment = FALSE),
hessian_noadj)
cpars <- fit$constrain_variables(c(0.1))
cpars_true <- list(
theta = 0.52497918747894001257,
log_lik = rep(-7.2439666007357095268, data_list$N)
)
expect_equal(cpars, cpars_true)
expect_equal(fit$constrain_variables(c(0.1), generated_quantities = FALSE),
list(theta = 0.52497918747894001257))
skeleton <- list(
theta = array(0, dim = 1),
log_lik = array(0, dim = data_list$N)
)
expect_equal(fit$variable_skeleton(), skeleton)
unconstrained_variables <- fit$unconstrain_variables(cpars)
expect_equal(unconstrained_variables, c(0.1))
})
test_that("Model methods environments are independent", {
skip_if(os_is_wsl())
data_list_2 <- data_list
data_list_2$N <- 20
data_list_2$y <- c(data_list$y, data_list$y)
fit_2 <- mod$sample(data = data_list_2, chains = 1)
fit_2$init_model_methods()
expect_equal(fit$log_prob(unconstrained_variables=c(0.1)), -8.6327599208828509347)
expect_equal(fit_2$log_prob(unconstrained_variables=c(0.1)), -15.87672652161856135)
})
test_that("methods error for incorrect inputs", {
skip_if(os_is_wsl())
expect_error(
fit$log_prob(c(1,2)),
"Model has 1 unconstrained parameter(s), but 2 were provided!",
fixed = TRUE
)
expect_error(
fit$grad_log_prob(c(1,2)),
"Model has 1 unconstrained parameter(s), but 2 were provided!",
fixed = TRUE
)
expect_error(
fit$hessian(c(1,2)),
"Model has 1 unconstrained parameter(s), but 2 were provided!",
fixed = TRUE
)
expect_error(
fit$constrain_variables(c(1,2)),
"Model has 1 unconstrained parameter(s), but 2 were provided!",
fixed = TRUE
)
logistic_mod <- cmdstan_model(testing_stan_file("logistic"), force_recompile = TRUE)
logistic_data_list <- testing_data("logistic")
logistic_fit <- logistic_mod$sample(data = logistic_data_list, chains = 1)
# Init without Hessian, as bernoulli_logit_glm currently not fully fvar<var>
# compatible
logistic_fit$init_model_methods(verbose = TRUE)
expect_error(
logistic_fit$unconstrain_variables(list(alpha = 0.5)),
"Model parameter(s): beta not provided!",
fixed = TRUE
)
})
test_that("Methods error with already-compiled model", {
skip_if(os_is_wsl())
precompile_mod <- testing_model("bernoulli")
mod <- testing_model("bernoulli")
data_list <- testing_data("bernoulli")
fit <- mod$sample(data = data_list, chains = 1)
expect_error(
fit$init_model_methods(),
"Model methods cannot be used with a pre-compiled Stan executable, the model must be compiled again",
fixed = TRUE
)
})
test_that("Methods can be compiled with model", {
skip_if(os_is_wsl())
mod <- cmdstan_model(testing_stan_file("bernoulli"),
force_recompile = TRUE,
compile_model_methods = TRUE,
compile_hessian_method = TRUE)
fit <- mod$sample(data = data_list, chains = 1)
lp <- fit$log_prob(unconstrained_variables=c(0.6))
expect_equal(lp, -10.649855405830624733)
grad_lp <- -4.7478756747095447466
attr(grad_lp, "log_prob") <- lp
expect_equal(fit$grad_log_prob(unconstrained_variables=c(0.6)), grad_lp)
hessian <- list(
log_prob = lp,
grad_log_prob = -4.7478756747095447466,
hessian = as.matrix(-2.7454108854798882078, nrow=1, ncol=1)
)
expect_equal(fit$hessian(unconstrained_variables=c(0.6)), hessian)
cpars <- fit$constrain_variables(c(0.6))
expect_equal(cpars, list(theta = 0.64565630622579539555))
unconstrained_variables <- fit$unconstrain_variables(cpars)
expect_equal(unconstrained_variables, c(0.6))
})
test_that("unconstrain_variables correctly handles zero-length containers", {
skip_if(os_is_wsl())
model_code <- "
data {
int N;
}
parameters {
vector[N] y;
real x;
}
model {
x ~ std_normal();
y ~ std_normal();
}
"
mod <- cmdstan_model(write_stan_file(model_code),
compile_model_methods = TRUE)
fit <- mod$sample(data = list(N = 0), chains = 1)
unconstrained <- fit$unconstrain_variables(variables = list(x = 5))
expect_equal(unconstrained, 5)
})
test_that("unconstrain_draws returns correct values", {
skip_if(os_is_wsl())
# With no constraints, the parameter draws should be the same as the
# unconstrained draws
model_code <- "
data {
int N;
}
parameters {
real x;
}
model {
x ~ std_normal();
}
"
mod <- cmdstan_model(write_stan_file(model_code),
compile_model_methods = TRUE,
force_recompile = TRUE)
fit <- mod$sample(data = list(N = 0), chains = 1)
x_draws <- fit$draws(format = "draws_df")$x
# Unconstrain all internal draws
unconstrained_internal_draws <- fit$unconstrain_draws()[[1]]
expect_equal(as.numeric(x_draws), as.numeric(unconstrained_internal_draws))
# Unconstrain external CmdStan CSV files
unconstrained_csv <- fit$unconstrain_draws(files = fit$output_files())[[1]]
expect_equal(as.numeric(x_draws), as.numeric(unconstrained_csv))
# Unconstrain existing draws object
unconstrained_draws <- fit$unconstrain_draws(draws = fit$draws())[[1]]
expect_equal(as.numeric(x_draws), as.numeric(unconstrained_draws))
# With a lower-bounded constraint, the parameter draws should be the
# exponentiation of the unconstrained draws
model_code <- "
data {
int N;
}
parameters {
real<lower = 0> x;
}
model {
x ~ std_normal();
}
"
mod <- cmdstan_model(write_stan_file(model_code),
compile_model_methods = TRUE,
force_recompile = TRUE)
fit <- mod$sample(data = list(N = 0), chains = 1)
x_draws <- fit$draws(format = "draws_df")$x
unconstrained_internal_draws <- fit$unconstrain_draws()[[1]]
expect_equal(as.numeric(x_draws), exp(as.numeric(unconstrained_internal_draws)))
# Unconstrain external CmdStan CSV files
unconstrained_csv <- fit$unconstrain_draws(files = fit$output_files())[[1]]
expect_equal(as.numeric(x_draws), exp(as.numeric(unconstrained_csv)))
# Unconstrain existing draws object
unconstrained_draws <- fit$unconstrain_draws(draws = fit$draws())[[1]]
expect_equal(as.numeric(x_draws), exp(as.numeric(unconstrained_draws)))
})
test_that("Model methods can be initialised for models with no data", {
skip_if(os_is_wsl())
stan_file <- write_stan_file("parameters { real x; } model { x ~ std_normal(); }")
mod <- cmdstan_model(stan_file, compile_model_methods = TRUE, force_recompile = TRUE)
expect_no_error(fit <- mod$sample())
expect_equal(fit$log_prob(5), -12.5)
})
test_that("Variable skeleton returns correct dimensions for matrices", {
skip_if(os_is_wsl())
stan_file <- write_stan_file("
data {
int N;
int K;
}
parameters {
real x_real;
matrix[N,K] x_mat;
vector[K] x_vec;
row_vector[K] x_rowvec;
}
model {
x_real ~ std_normal();
}")
mod <- cmdstan_model(stan_file, compile_model_methods = TRUE,
force_recompile = TRUE)
N <- 4
K <- 3
fit <- mod$sample(data = list(N = N, K = K), chains = 1,
iter_warmup = 1, iter_sampling = 1)
target_skeleton <- list(
x_real = array(0, dim = 1),
x_mat = array(0, dim = c(N, K)),
x_vec = array(0, dim = K),
x_rowvec = array(0, dim = K)
)
expect_equal(fit$variable_skeleton(),
target_skeleton)
})
|
#install.packages("lubridate")
library(lubridate)
#install.packages("dplyr")
library(dplyr)
s <- "C:\\Users\\khush\\Documents\\IP\\Weather Data\\"
files <- list.files(s, pattern="*.csv", full.names=TRUE, recursive = TRUE)
fname = "C:/Users/khush/Documents/IP/Weather_West.csv"
#Assign Date and Hour
for (file in files) {
tryCatch({
Weather<- read.csv(file, header = TRUE)
Weather<- Weather[c("STATION",'DATE','HourlyDewPointTemperature','HourlyDryBulbTemperature','HourlyPrecipitation','HourlyRelativeHumidity','HourlyStationPressure','HourlyWetBulbTemperature','HourlyWindSpeed')]
Weather$DATE <- as.POSIXct(Weather$DATE, format = "%Y-%m-%dT%H:%M:%S", tz = "UTC")
Weather$Date <- as.Date(Weather$DATE)
Weather$Hour <- as.POSIXlt(Weather$DATE)$hour+1
write.table(Weather, file=fname, sep = ",", row.names = FALSE, append = TRUE, col.names=!file.exists(fname))
print(file)
}, error = function(cond) {
print(cond)
print("here")
Weather <- read.csv(file, header = TRUE)
Weather<-Weather[c("STATION",'DATE','HourlyDewPointTemperature','HourlyDryBulbTemperature','HourlyPrecipitation','HourlyRelativeHumidity','HourlyStationPressure','HourlyWetBulbTemperature','HourlyWindSpeed')]
Weather$DATE <- as.POSIXct(Weather$DATE, format = "%Y-%m-%dT%H:%M:%S",tz = "UTC")
Weather$Date <- as.Date(Weather$DATE)
Weather$Hour <- as.POSIXlt(Weather$DATE)$hour+1
write.table(Weather, file=fname, sep = ",", row.names = FALSE, append = TRUE, col.names=!file.exists(fname))
print(file)
})
}
fname = "C:/Users/khush/Documents/IP/Weather_West.csv"
Weather <- read.csv(fname, header = TRUE)
#Convering variables into numeric
Weather[3:9] <- lapply(Weather[3:9], function(x) as.numeric(as.character(x)))
str(Weather)
Weather<- Weather[-1:-2]
#Replacing DPT NA values by creating function
XY <- function(df, i) {
Se1 <- df$Date == df$Date[i] & df$Hour == df$Hour[i]
imputed <- mean(df$HourlyDewPointTemperature[Se1],na.rm = TRUE)
if(is.nan(imputed)){
imputed <- NA
}
return(imputed)
}
for (i in which(is.na(Weather$HourlyDewPointTemperature))) {
Weather$HourlyDewPointTemperature[i] <- XY(Weather, i)
}
#Replacing DBT NA values by creating function
AB <- function(df1, i) {
Se2 <- df1$Date == df1$Date[i] & df1$Hour == df1$Hour[i]
imputed <- mean(df1$HourlyDryBulbTemperature[Se2],na.rm = TRUE)
if(is.nan(imputed)){
imputed <- NA
}
return(imputed)
}
for (i in which(is.na(Weather$HourlyDryBulbTemperature))) {
Weather$HourlyDryBulbTemperature[i] <- AB(Weather, i)
}
#Replacing HRH NA values by creating function
CD <- function(df2, i) {
Se3 <- df2$Date == df2$Date[i] & df2$Hour == df2$Hour[i]
imputed <- mean(df2$HourlyRelativeHumidity[Se3],na.rm = TRUE)
if(is.nan(imputed)){
imputed <- NA
}
return(imputed)
}
for (i in which(is.na(Weather$HourlyRelativeHumidity))) {
Weather$HourlyRelativeHumidity[i] <- CD(Weather, i)
}
#Replacing HSP NA values by creating function
EF <- function(df7, i) {
Se4 <- df7$Date == df7$Date[i] & df7$Hour == df7$Hour[i]
imputed <- mean(df7$HourlyStationPressure[Se4],na.rm = TRUE)
if(is.nan(imputed)){
imputed <- NA
}
return(imputed)
}
for (i in which(is.na(Weather$HourlyStationPressure))) {
Weather$HourlyStationPressure[i] <- EF(Weather, i)
}
#Replacing HSP NA values by creating function
EF <- function(df7, i) {
Se4 <- df7$Date == df7$Date[i] & df7$Hour == df7$Hour[i-1]
imputed <- mean(df7$HourlyStationPressure[Se4],na.rm = TRUE)
if(is.nan(imputed)){
imputed <- NA
}
return(imputed)
}
for (i in which(is.na(Weather$HourlyStationPressure))) {
Weather$HourlyStationPressure[i] <- EF(Weather, i)
}
#Replacing WBT NA values by creating function
GH <- function(df4, i) {
Se5 <- df4$Date == df4$Date[i] & df4$Hour == df4$Hour[i]
imputed <- mean(df4$HourlyWetBulbTemperature[Se5],na.rm = TRUE)
#f(is.nan(imputed)){
#imputed <- NA
return(imputed)
}
for (i in which(is.na(Weather$HourlyWetBulbTemperature))) {
Weather$HourlyWetBulbTemperature[i] <- GH(Weather, i)
}
#Replacing WBT NA values by creating function
GH <- function(df4, i) {
Se5 <- df4$Date == df4$Date[i] & df4$Hour == df4$Hour[i-1]
imputed <- mean(df4$HourlyWetBulbTemperature[Se5],na.rm = TRUE)
#f(is.nan(imputed)){
#imputed <- NA
return(imputed)
}
for (i in which(is.na(Weather$HourlyWetBulbTemperature))) {
Weather$HourlyWetBulbTemperature[i] <- GH(Weather, i)
}
#Replacing HWS NA values by creating function
IJ <- function(df5, i) {
Se6 <- df5$Date == df5$Date[i] & df5$Hour == df5$Hour[i]
imputed <- mean(df5$HourlyWindSpeed[Se6],na.rm = TRUE)
if(is.nan(imputed)){
imputed <- NA
}
return(imputed)
}
for (i in which(is.na(Weather$HourlyWindSpeed))) {
Weather$HourlyWindSpeed[i] <- IJ(Weather, i)
}
#Replacing HP values by creating function
KL <- function(df6, i) {
Se7 <- df6$Date == df6$Date[i] & df6$Hour == df6$Hour[i]
imputed <- mean(df6$HourlyPrecipitation[Se7],na.rm = TRUE)
#if(is.nan(imputed)){
#imputed <- NA
return(imputed)
}
for (i in which(is.na(Weather$HourlyPrecipitation))) {
Weather$HourlyPrecipitation[i] <- KL(Weather, i)
}
#Replacing HP values by creating function
KL <- function(df6, i) {
Se7 <- df6$Date == df6$Date[i] & df6$Hour == df6$Hour[i-1]
imputed <- mean(df6$HourlyPrecipitation[Se7],na.rm = TRUE)
#if(is.nan(imputed)){
#imputed <- NA
return(imputed)
}
for (i in which(is.na(Weather$HourlyPrecipitation))) {
Weather$HourlyPrecipitation[i] <- KL(Weather, i)
}
summary(Weather)
write.csv(Weather,"C:/Users/khush/Documents/IP/W.WZ.csv")
#Averaging data for every hour
Weather.West <- Weather %>% group_by(Date,Hour) %>% summarise(mean_DPT = mean(HourlyDewPointTemperature), mean_DBT = mean(HourlyDryBulbTemperature), mean_HP = mean(HourlyPrecipitation), mean_HRH = mean(HourlyRelativeHumidity), mean_HSP = mean(HourlyStationPressure), mean_WBT = mean(HourlyWetBulbTemperature), mean_HWS = mean(HourlyWindSpeed))
summary(Weather.West)
#Loading West region from Load
West <- load[ which(load$Zone.Name=='WEST'), ]
#Merging West and Weather.West
total <- merge(West,Weather.West,by=c("Date","Hour"), all.x = T)
write.csv(total,"C:/Users/khush/Documents/IP/total.csv")
install.packages("ggcorrplot")
library(ggcorrplot)
corr <- round(cor(total), 1)
head(corr[, 1:16])
data %>% mutate(season = ifelse(month %in% 10:12, "Fall",
ifelse(month %in% 1:3, "Winter",
ifelse(month %in% 4:6, "Spring",
"Summer"))))
subset()
| /Weather code (1).R | no_license | khushbu3apr/Towards-enhancing-grid-reliability-A-framework-to-forecast-electricity-demand-growth | R | false | false | 6,799 | r | #install.packages("lubridate")
library(lubridate)
#install.packages("dplyr")
library(dplyr)
s <- "C:\\Users\\khush\\Documents\\IP\\Weather Data\\"
files <- list.files(s, pattern="*.csv", full.names=TRUE, recursive = TRUE)
fname = "C:/Users/khush/Documents/IP/Weather_West.csv"
#Assign Date and Hour
for (file in files) {
tryCatch({
Weather<- read.csv(file, header = TRUE)
Weather<- Weather[c("STATION",'DATE','HourlyDewPointTemperature','HourlyDryBulbTemperature','HourlyPrecipitation','HourlyRelativeHumidity','HourlyStationPressure','HourlyWetBulbTemperature','HourlyWindSpeed')]
Weather$DATE <- as.POSIXct(Weather$DATE, format = "%Y-%m-%dT%H:%M:%S", tz = "UTC")
Weather$Date <- as.Date(Weather$DATE)
Weather$Hour <- as.POSIXlt(Weather$DATE)$hour+1
write.table(Weather, file=fname, sep = ",", row.names = FALSE, append = TRUE, col.names=!file.exists(fname))
print(file)
}, error = function(cond) {
print(cond)
print("here")
Weather <- read.csv(file, header = TRUE)
Weather<-Weather[c("STATION",'DATE','HourlyDewPointTemperature','HourlyDryBulbTemperature','HourlyPrecipitation','HourlyRelativeHumidity','HourlyStationPressure','HourlyWetBulbTemperature','HourlyWindSpeed')]
Weather$DATE <- as.POSIXct(Weather$DATE, format = "%Y-%m-%dT%H:%M:%S",tz = "UTC")
Weather$Date <- as.Date(Weather$DATE)
Weather$Hour <- as.POSIXlt(Weather$DATE)$hour+1
write.table(Weather, file=fname, sep = ",", row.names = FALSE, append = TRUE, col.names=!file.exists(fname))
print(file)
})
}
fname = "C:/Users/khush/Documents/IP/Weather_West.csv"
Weather <- read.csv(fname, header = TRUE)
#Convering variables into numeric
Weather[3:9] <- lapply(Weather[3:9], function(x) as.numeric(as.character(x)))
str(Weather)
Weather<- Weather[-1:-2]
#Replacing DPT NA values by creating function
XY <- function(df, i) {
Se1 <- df$Date == df$Date[i] & df$Hour == df$Hour[i]
imputed <- mean(df$HourlyDewPointTemperature[Se1],na.rm = TRUE)
if(is.nan(imputed)){
imputed <- NA
}
return(imputed)
}
for (i in which(is.na(Weather$HourlyDewPointTemperature))) {
Weather$HourlyDewPointTemperature[i] <- XY(Weather, i)
}
#Replacing DBT NA values by creating function
AB <- function(df1, i) {
Se2 <- df1$Date == df1$Date[i] & df1$Hour == df1$Hour[i]
imputed <- mean(df1$HourlyDryBulbTemperature[Se2],na.rm = TRUE)
if(is.nan(imputed)){
imputed <- NA
}
return(imputed)
}
for (i in which(is.na(Weather$HourlyDryBulbTemperature))) {
Weather$HourlyDryBulbTemperature[i] <- AB(Weather, i)
}
#Replacing HRH NA values by creating function
CD <- function(df2, i) {
Se3 <- df2$Date == df2$Date[i] & df2$Hour == df2$Hour[i]
imputed <- mean(df2$HourlyRelativeHumidity[Se3],na.rm = TRUE)
if(is.nan(imputed)){
imputed <- NA
}
return(imputed)
}
for (i in which(is.na(Weather$HourlyRelativeHumidity))) {
Weather$HourlyRelativeHumidity[i] <- CD(Weather, i)
}
#Replacing HSP NA values by creating function
EF <- function(df7, i) {
Se4 <- df7$Date == df7$Date[i] & df7$Hour == df7$Hour[i]
imputed <- mean(df7$HourlyStationPressure[Se4],na.rm = TRUE)
if(is.nan(imputed)){
imputed <- NA
}
return(imputed)
}
for (i in which(is.na(Weather$HourlyStationPressure))) {
Weather$HourlyStationPressure[i] <- EF(Weather, i)
}
#Replacing HSP NA values by creating function
EF <- function(df7, i) {
Se4 <- df7$Date == df7$Date[i] & df7$Hour == df7$Hour[i-1]
imputed <- mean(df7$HourlyStationPressure[Se4],na.rm = TRUE)
if(is.nan(imputed)){
imputed <- NA
}
return(imputed)
}
for (i in which(is.na(Weather$HourlyStationPressure))) {
Weather$HourlyStationPressure[i] <- EF(Weather, i)
}
#Replacing WBT NA values by creating function
GH <- function(df4, i) {
Se5 <- df4$Date == df4$Date[i] & df4$Hour == df4$Hour[i]
imputed <- mean(df4$HourlyWetBulbTemperature[Se5],na.rm = TRUE)
#f(is.nan(imputed)){
#imputed <- NA
return(imputed)
}
for (i in which(is.na(Weather$HourlyWetBulbTemperature))) {
Weather$HourlyWetBulbTemperature[i] <- GH(Weather, i)
}
#Replacing WBT NA values by creating function
GH <- function(df4, i) {
Se5 <- df4$Date == df4$Date[i] & df4$Hour == df4$Hour[i-1]
imputed <- mean(df4$HourlyWetBulbTemperature[Se5],na.rm = TRUE)
#f(is.nan(imputed)){
#imputed <- NA
return(imputed)
}
for (i in which(is.na(Weather$HourlyWetBulbTemperature))) {
Weather$HourlyWetBulbTemperature[i] <- GH(Weather, i)
}
#Replacing HWS NA values by creating function
IJ <- function(df5, i) {
Se6 <- df5$Date == df5$Date[i] & df5$Hour == df5$Hour[i]
imputed <- mean(df5$HourlyWindSpeed[Se6],na.rm = TRUE)
if(is.nan(imputed)){
imputed <- NA
}
return(imputed)
}
for (i in which(is.na(Weather$HourlyWindSpeed))) {
Weather$HourlyWindSpeed[i] <- IJ(Weather, i)
}
#Replacing HP values by creating function
KL <- function(df6, i) {
Se7 <- df6$Date == df6$Date[i] & df6$Hour == df6$Hour[i]
imputed <- mean(df6$HourlyPrecipitation[Se7],na.rm = TRUE)
#if(is.nan(imputed)){
#imputed <- NA
return(imputed)
}
for (i in which(is.na(Weather$HourlyPrecipitation))) {
Weather$HourlyPrecipitation[i] <- KL(Weather, i)
}
#Replacing HP values by creating function
KL <- function(df6, i) {
Se7 <- df6$Date == df6$Date[i] & df6$Hour == df6$Hour[i-1]
imputed <- mean(df6$HourlyPrecipitation[Se7],na.rm = TRUE)
#if(is.nan(imputed)){
#imputed <- NA
return(imputed)
}
for (i in which(is.na(Weather$HourlyPrecipitation))) {
Weather$HourlyPrecipitation[i] <- KL(Weather, i)
}
summary(Weather)
write.csv(Weather,"C:/Users/khush/Documents/IP/W.WZ.csv")
#Averaging data for every hour
Weather.West <- Weather %>% group_by(Date,Hour) %>% summarise(mean_DPT = mean(HourlyDewPointTemperature), mean_DBT = mean(HourlyDryBulbTemperature), mean_HP = mean(HourlyPrecipitation), mean_HRH = mean(HourlyRelativeHumidity), mean_HSP = mean(HourlyStationPressure), mean_WBT = mean(HourlyWetBulbTemperature), mean_HWS = mean(HourlyWindSpeed))
summary(Weather.West)
#Loading West region from Load
West <- load[ which(load$Zone.Name=='WEST'), ]
#Merging West and Weather.West
total <- merge(West,Weather.West,by=c("Date","Hour"), all.x = T)
write.csv(total,"C:/Users/khush/Documents/IP/total.csv")
install.packages("ggcorrplot")
library(ggcorrplot)
corr <- round(cor(total), 1)
head(corr[, 1:16])
data %>% mutate(season = ifelse(month %in% 10:12, "Fall",
ifelse(month %in% 1:3, "Winter",
ifelse(month %in% 4:6, "Spring",
"Summer"))))
subset()
|
## Script to organize streamflow data ##
# Preliminaries - packages, directory
if (!require("pacman")) install.packages("pacman")
pacman::p_load(imputeTS, Hmisc, rio, plyr, dplyr, xlsx)
mydir <- setwd('D:/THESIS_PP')
# Load streamflow data in xls and converts to csv
loadcsv <- function(datadir){
setwd (datadir)
files.to.read = list.files(datadir, pattern="xls")
lapply(files.to.read, function(f) {
df = read.xlsx(f, sheetIndex=1)
write.csv(df, gsub("xls", "csv", f), row.names=FALSE)
})
alltables <- list.files (datadir, pattern='*.csv')
alltables1 <- lapply(alltables, read.csv)
alltables2 <- lapply(alltables1, function(x) x[-c(6941:10000),])# delete excess rows
alltables3 <- bind_cols(alltables2)
setwd('D:/THESIS_PP/mid-results')
write.csv (alltables3, file='obs_data1.csv')
setwd(datadir)
}
loadcsv('D:/THESIS_PP/data/streamflow/updated1')
| /R/07a_PP_OrganizeStreamflow.R | no_license | arnanaraza/streamflow | R | false | false | 914 | r | ## Script to organize streamflow data ##
# Preliminaries - packages, directory
if (!require("pacman")) install.packages("pacman")
pacman::p_load(imputeTS, Hmisc, rio, plyr, dplyr, xlsx)
mydir <- setwd('D:/THESIS_PP')
# Load streamflow data in xls and converts to csv
loadcsv <- function(datadir){
setwd (datadir)
files.to.read = list.files(datadir, pattern="xls")
lapply(files.to.read, function(f) {
df = read.xlsx(f, sheetIndex=1)
write.csv(df, gsub("xls", "csv", f), row.names=FALSE)
})
alltables <- list.files (datadir, pattern='*.csv')
alltables1 <- lapply(alltables, read.csv)
alltables2 <- lapply(alltables1, function(x) x[-c(6941:10000),])# delete excess rows
alltables3 <- bind_cols(alltables2)
setwd('D:/THESIS_PP/mid-results')
write.csv (alltables3, file='obs_data1.csv')
setwd(datadir)
}
loadcsv('D:/THESIS_PP/data/streamflow/updated1')
|
# Here you can write everything! This is a comment!
# R code for plotting the relationship among ecological variables
# install.packages is used to install packages
install.packages("sp")
library(sp)
# data is used to recall datasets
data(meuse)
# llok inside the set!
meuse
View(meuse) # it might not work for mac
# solve using: https://www.xquartz.org
head(meuse)
# Exercise: mean of all of the variables?
# cadmium crazy mean extraction
# (11.7+8.6+6.5....)/N
summary(meuse)
# Exercise: plot zinc (y) against cadmium (x)
# error
plot(cadmium, zinc)
# Error in h(simpleError(msg, call)) :
# error in evaluating the argument 'x' in selecting a method for function 'plot': object 'cadmium' not found
# $
plot(meuse$cadmium, meuse$zinc)
# if you attach you do not need the dollar symbol!
attach(meuse)
plot(cadmium, zinc)
plot(cadmium,lead)
# how to impress your supervisor!
# Scatterplot Matrices
pairs(meuse)
# Question!!!
# pairing only the elements part of the dataset: how to do that?
# only with cadmium, copper, lead, zinc...
##############################
# Lecture #2 on ecological variables
# Exercise
# Recall the package sp, recall the dataset meuse
library(sp)
data(meuse)
pairs(meuse)
head(meuse)
# cadmium copper lead zinc
# pairs with soil variables
# from column 3 to column 6
pairs(meuse[,3:6]) # how to do quadratic parathenses: AltGr + è
# let's use the names of the columns:
pairs(~ cadmium + copper + lead + zinc, data=meuse)
# tilde is going to be done by AltGr + ^
# in windows: Alt + 0126 or Alt + 126
# In Mac: alt + n on mac
# let's prettify the graph
# Exercise: just use cadmium, lead and zince
pairs(~ cadmium + lead + zinc, data=meuse)
# Exercise: change the color
pairs(~ cadmium + copper + lead + zinc, data=meuse, col="red")
# for the future change colours of single panels by the par() function
# Exercise: change the symbol to filled triangles: pch
pairs(~ cadmium + copper + lead + zinc, data=meuse, col="red", pch=17)
# nice page on pairs:
# https://statisticsglobe.com/r-pairs-plot-example/
# Exercise: increase the size of the trinagles
pairs(~ cadmium + copper + lead + zinc, data=meuse, col="red", pch=17, cex=3)
| /R_ecological_relationship.r | no_license | elhamkakaei2020/Monitoring_2021 | R | false | false | 2,209 | r | # Here you can write everything! This is a comment!
# R code for plotting the relationship among ecological variables
# install.packages is used to install packages
install.packages("sp")
library(sp)
# data is used to recall datasets
data(meuse)
# llok inside the set!
meuse
View(meuse) # it might not work for mac
# solve using: https://www.xquartz.org
head(meuse)
# Exercise: mean of all of the variables?
# cadmium crazy mean extraction
# (11.7+8.6+6.5....)/N
summary(meuse)
# Exercise: plot zinc (y) against cadmium (x)
# error
plot(cadmium, zinc)
# Error in h(simpleError(msg, call)) :
# error in evaluating the argument 'x' in selecting a method for function 'plot': object 'cadmium' not found
# $
plot(meuse$cadmium, meuse$zinc)
# if you attach you do not need the dollar symbol!
attach(meuse)
plot(cadmium, zinc)
plot(cadmium,lead)
# how to impress your supervisor!
# Scatterplot Matrices
pairs(meuse)
# Question!!!
# pairing only the elements part of the dataset: how to do that?
# only with cadmium, copper, lead, zinc...
##############################
# Lecture #2 on ecological variables
# Exercise
# Recall the package sp, recall the dataset meuse
library(sp)
data(meuse)
pairs(meuse)
head(meuse)
# cadmium copper lead zinc
# pairs with soil variables
# from column 3 to column 6
pairs(meuse[,3:6]) # how to do quadratic parathenses: AltGr + è
# let's use the names of the columns:
pairs(~ cadmium + copper + lead + zinc, data=meuse)
# tilde is going to be done by AltGr + ^
# in windows: Alt + 0126 or Alt + 126
# In Mac: alt + n on mac
# let's prettify the graph
# Exercise: just use cadmium, lead and zince
pairs(~ cadmium + lead + zinc, data=meuse)
# Exercise: change the color
pairs(~ cadmium + copper + lead + zinc, data=meuse, col="red")
# for the future change colours of single panels by the par() function
# Exercise: change the symbol to filled triangles: pch
pairs(~ cadmium + copper + lead + zinc, data=meuse, col="red", pch=17)
# nice page on pairs:
# https://statisticsglobe.com/r-pairs-plot-example/
# Exercise: increase the size of the trinagles
pairs(~ cadmium + copper + lead + zinc, data=meuse, col="red", pch=17, cex=3)
|
## Test functions ##
add <- function(x, y) {
xy.sum <- x + y # local var
return(xy.sum)
}
## mod <- Module('testAdd')
## fun <- Function('Add', DoubleType, c(x=DoubleType, y=DoubleType), mod)
## block <- Block(fun)
## ir <- IRBuilder(block)
## params <- getParameters(fun)
## xy.sum <- createLocalVariable(ir, DoubleType, 'xysum')
## createStore(ir, xy.sum,
## tmp = binOp(ir, Add, params$x, params$y)
## createReturn(ir, tmp)
## verifyModule(mod)
dumbAssign <- function() {
x <- 3L
return(x)
}
| /Rtests/compile.R | no_license | duncantl/RLLVMCompile | R | false | false | 507 | r | ## Test functions ##
add <- function(x, y) {
xy.sum <- x + y # local var
return(xy.sum)
}
## mod <- Module('testAdd')
## fun <- Function('Add', DoubleType, c(x=DoubleType, y=DoubleType), mod)
## block <- Block(fun)
## ir <- IRBuilder(block)
## params <- getParameters(fun)
## xy.sum <- createLocalVariable(ir, DoubleType, 'xysum')
## createStore(ir, xy.sum,
## tmp = binOp(ir, Add, params$x, params$y)
## createReturn(ir, tmp)
## verifyModule(mod)
dumbAssign <- function() {
x <- 3L
return(x)
}
|
ui <- fluidPage(
h1("Word Cloud"),
sidebarLayout(
sidebarPanel(
radioButtons(
inputId = "source",
label = "Word source",
choices = c(
"Art of War" = "book",
"Use your own words" = "own",
"Upload a file" = "file"
)
),
conditionalPanel(
condition = "input.source == 'own'",
textAreaInput("text", "Enter text", rows = 7)
),
# Wrap the file input in a conditional panel
conditionalPanel(
# The condition should be that the user selects
# "file" from the radio buttons
condition = "input.source == 'file'",
fileInput("file", "Select a file")
),
numericInput("num", "Maximum number of words",
value = 100, min = 5),
colourInput("col", "Background color", value = "white")
),
mainPanel(
wordcloud2Output("cloud")
)
)
)
server <- function(input, output) {
data_source <- reactive({
if (input$source == "book") {
data <- artofwar
} else if (input$source == "own") {
data <- input$text
} else if (input$source == "file") {
data <- input_file()
}
return(data)
})
input_file <- reactive({
if (is.null(input$file)) {
return("")
}
readLines(input$file$datapath)
})
output$cloud <- renderWordcloud2({
create_wordcloud(data_source(), num_words = input$num,
background = input$col)
})
}
shinyApp(ui = ui, server = server)
| /shiny/wordcloud conditional input panel.R | no_license | jyeazell/DataCamp_practice | R | false | false | 1,795 | r | ui <- fluidPage(
h1("Word Cloud"),
sidebarLayout(
sidebarPanel(
radioButtons(
inputId = "source",
label = "Word source",
choices = c(
"Art of War" = "book",
"Use your own words" = "own",
"Upload a file" = "file"
)
),
conditionalPanel(
condition = "input.source == 'own'",
textAreaInput("text", "Enter text", rows = 7)
),
# Wrap the file input in a conditional panel
conditionalPanel(
# The condition should be that the user selects
# "file" from the radio buttons
condition = "input.source == 'file'",
fileInput("file", "Select a file")
),
numericInput("num", "Maximum number of words",
value = 100, min = 5),
colourInput("col", "Background color", value = "white")
),
mainPanel(
wordcloud2Output("cloud")
)
)
)
server <- function(input, output) {
data_source <- reactive({
if (input$source == "book") {
data <- artofwar
} else if (input$source == "own") {
data <- input$text
} else if (input$source == "file") {
data <- input_file()
}
return(data)
})
input_file <- reactive({
if (is.null(input$file)) {
return("")
}
readLines(input$file$datapath)
})
output$cloud <- renderWordcloud2({
create_wordcloud(data_source(), num_words = input$num,
background = input$col)
})
}
shinyApp(ui = ui, server = server)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/resource_id.R
\name{resource_id}
\alias{resource_id}
\title{Resource classifier}
\usage{
resource_id(eventlog)
}
\arguments{
\item{eventlog}{An object of class \code{eventlog}.}
}
\description{
Get the resource classifier of an object of class \code{eventlog}.
}
\examples{
data(example_log)
resource_id(example_log)
}
\seealso{
\code{\link{eventlog}}, \code{\link{case_id}}, \code{\link{activity_instance_id}},
\code{\link{timestamp}}
}
| /man/resource_id.Rd | no_license | bbrewington/edeaR | R | false | true | 517 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/resource_id.R
\name{resource_id}
\alias{resource_id}
\title{Resource classifier}
\usage{
resource_id(eventlog)
}
\arguments{
\item{eventlog}{An object of class \code{eventlog}.}
}
\description{
Get the resource classifier of an object of class \code{eventlog}.
}
\examples{
data(example_log)
resource_id(example_log)
}
\seealso{
\code{\link{eventlog}}, \code{\link{case_id}}, \code{\link{activity_instance_id}},
\code{\link{timestamp}}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SiteUpdateDTO.r
\docType{data}
\name{SiteUpdateDTO}
\alias{SiteUpdateDTO}
\title{SiteUpdateDTO Class}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
SiteUpdateDTO
}
\description{
SiteUpdateDTO Class
}
\section{Fields}{
\describe{
\item{\code{uri}}{}
\item{\code{rdf_type}}{}
\item{\code{name}}{}
\item{\code{address}}{}
\item{\code{organizations}}{}
\item{\code{facilities}}{}
\item{\code{groups}}{}
\item{\code{rdf_type_name}}{}
}}
\keyword{datasets}
| /man/SiteUpdateDTO.Rd | no_license | OpenSILEX/opensilexClientToolsR | R | false | true | 566 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SiteUpdateDTO.r
\docType{data}
\name{SiteUpdateDTO}
\alias{SiteUpdateDTO}
\title{SiteUpdateDTO Class}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
SiteUpdateDTO
}
\description{
SiteUpdateDTO Class
}
\section{Fields}{
\describe{
\item{\code{uri}}{}
\item{\code{rdf_type}}{}
\item{\code{name}}{}
\item{\code{address}}{}
\item{\code{organizations}}{}
\item{\code{facilities}}{}
\item{\code{groups}}{}
\item{\code{rdf_type_name}}{}
}}
\keyword{datasets}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cairo--.R
\name{cairo_text_extents}
\alias{cairo_text_extents}
\title{cairo_text_extents}
\usage{
cairo_text_extents(cr, utf8, extents)
}
\arguments{
\item{cr}{[\code{cairo_t *}] a #cairo_t}
\item{utf8}{[\code{char *}] a NUL-terminated string of text encoded in UTF-8, or \%NULL}
\item{extents}{[\code{cairo_text_extents_t *}] a #cairo_text_extents_t object into which the results will be stored}
}
\description{
Gets the extents for a string of text. The extents describe a
user-space rectangle that encloses the "inked" portion of the text,
(as it would be drawn by cairo_show_text()). Additionally, the
x_advance and y_advance values indicate the amount by which the
current point would be advanced by cairo_show_text().
}
\details{
Note that whitespace characters do not directly contribute to the
size of the rectangle (extents.width and extents.height). They do
contribute indirectly by changing the position of non-whitespace
characters. In particular, trailing whitespace characters are
likely to not affect the size of the rectangle, though they will
affect the x_advance and y_advance values.
Since: 1.0
C function prototype: \code{void cairo_text_extents (cairo_t *cr, const char *utf8, cairo_text_extents_t *extents)}
}
\seealso{
Other cairo--:
\code{\link{cairo_append_path}()},
\code{\link{cairo_arc_negative}()},
\code{\link{cairo_arc}()},
\code{\link{cairo_clip_extents}()},
\code{\link{cairo_clip_preserve}()},
\code{\link{cairo_clip}()},
\code{\link{cairo_close_path}()},
\code{\link{cairo_copy_page}()},
\code{\link{cairo_copy_path_flat}()},
\code{\link{cairo_copy_path}()},
\code{\link{cairo_create}()},
\code{\link{cairo_curve_to}()},
\code{\link{cairo_device_to_user_distance}()},
\code{\link{cairo_device_to_user}()},
\code{\link{cairo_fill_extents}()},
\code{\link{cairo_fill_preserve}()},
\code{\link{cairo_fill}()},
\code{\link{cairo_font_extents}()},
\code{\link{cairo_get_antialias}()},
\code{\link{cairo_get_current_point}()},
\code{\link{cairo_get_dash_count}()},
\code{\link{cairo_get_dash}()},
\code{\link{cairo_get_fill_rule}()},
\code{\link{cairo_get_font_face}()},
\code{\link{cairo_get_font_matrix}()},
\code{\link{cairo_get_group_target}()},
\code{\link{cairo_get_line_cap}()},
\code{\link{cairo_get_line_join}()},
\code{\link{cairo_get_line_width}()},
\code{\link{cairo_get_matrix}()},
\code{\link{cairo_get_miter_limit}()},
\code{\link{cairo_get_operator}()},
\code{\link{cairo_get_source}()},
\code{\link{cairo_get_target}()},
\code{\link{cairo_get_tolerance}()},
\code{\link{cairo_has_current_point}()},
\code{\link{cairo_identity_matrix}()},
\code{\link{cairo_in_clip}()},
\code{\link{cairo_in_fill}()},
\code{\link{cairo_in_stroke}()},
\code{\link{cairo_line_to}()},
\code{\link{cairo_mask_surface}()},
\code{\link{cairo_mask}()},
\code{\link{cairo_move_to}()},
\code{\link{cairo_new_path}()},
\code{\link{cairo_new_sub_path}()},
\code{\link{cairo_paint_with_alpha}()},
\code{\link{cairo_paint}()},
\code{\link{cairo_path_extents}()},
\code{\link{cairo_pop_group_to_source}()},
\code{\link{cairo_pop_group}()},
\code{\link{cairo_push_group_with_content}()},
\code{\link{cairo_push_group}()},
\code{\link{cairo_rectangle}()},
\code{\link{cairo_rel_curve_to}()},
\code{\link{cairo_rel_line_to}()},
\code{\link{cairo_rel_move_to}()},
\code{\link{cairo_reset_clip}()},
\code{\link{cairo_restore}()},
\code{\link{cairo_rotate}()},
\code{\link{cairo_save}()},
\code{\link{cairo_scale}()},
\code{\link{cairo_select_font_face}()},
\code{\link{cairo_set_antialias}()},
\code{\link{cairo_set_dash}()},
\code{\link{cairo_set_fill_rule}()},
\code{\link{cairo_set_font_face}()},
\code{\link{cairo_set_font_matrix}()},
\code{\link{cairo_set_font_size}()},
\code{\link{cairo_set_line_cap}()},
\code{\link{cairo_set_line_join}()},
\code{\link{cairo_set_line_width}()},
\code{\link{cairo_set_matrix}()},
\code{\link{cairo_set_miter_limit}()},
\code{\link{cairo_set_operator}()},
\code{\link{cairo_set_source_rgba}()},
\code{\link{cairo_set_source_rgb}()},
\code{\link{cairo_set_source_surface}()},
\code{\link{cairo_set_source}()},
\code{\link{cairo_set_tolerance}()},
\code{\link{cairo_show_page}()},
\code{\link{cairo_show_text}()},
\code{\link{cairo_status}()},
\code{\link{cairo_stroke_extents}()},
\code{\link{cairo_stroke_preserve}()},
\code{\link{cairo_stroke}()},
\code{\link{cairo_tag_begin}()},
\code{\link{cairo_tag_end}()},
\code{\link{cairo_text_path}()},
\code{\link{cairo_transform}()},
\code{\link{cairo_translate}()},
\code{\link{cairo_user_to_device_distance}()},
\code{\link{cairo_user_to_device}()}
}
\concept{cairo--}
| /man/cairo_text_extents.Rd | permissive | coolbutuseless/cairocore | R | false | true | 4,648 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cairo--.R
\name{cairo_text_extents}
\alias{cairo_text_extents}
\title{cairo_text_extents}
\usage{
cairo_text_extents(cr, utf8, extents)
}
\arguments{
\item{cr}{[\code{cairo_t *}] a #cairo_t}
\item{utf8}{[\code{char *}] a NUL-terminated string of text encoded in UTF-8, or \%NULL}
\item{extents}{[\code{cairo_text_extents_t *}] a #cairo_text_extents_t object into which the results will be stored}
}
\description{
Gets the extents for a string of text. The extents describe a
user-space rectangle that encloses the "inked" portion of the text,
(as it would be drawn by cairo_show_text()). Additionally, the
x_advance and y_advance values indicate the amount by which the
current point would be advanced by cairo_show_text().
}
\details{
Note that whitespace characters do not directly contribute to the
size of the rectangle (extents.width and extents.height). They do
contribute indirectly by changing the position of non-whitespace
characters. In particular, trailing whitespace characters are
likely to not affect the size of the rectangle, though they will
affect the x_advance and y_advance values.
Since: 1.0
C function prototype: \code{void cairo_text_extents (cairo_t *cr, const char *utf8, cairo_text_extents_t *extents)}
}
\seealso{
Other cairo--:
\code{\link{cairo_append_path}()},
\code{\link{cairo_arc_negative}()},
\code{\link{cairo_arc}()},
\code{\link{cairo_clip_extents}()},
\code{\link{cairo_clip_preserve}()},
\code{\link{cairo_clip}()},
\code{\link{cairo_close_path}()},
\code{\link{cairo_copy_page}()},
\code{\link{cairo_copy_path_flat}()},
\code{\link{cairo_copy_path}()},
\code{\link{cairo_create}()},
\code{\link{cairo_curve_to}()},
\code{\link{cairo_device_to_user_distance}()},
\code{\link{cairo_device_to_user}()},
\code{\link{cairo_fill_extents}()},
\code{\link{cairo_fill_preserve}()},
\code{\link{cairo_fill}()},
\code{\link{cairo_font_extents}()},
\code{\link{cairo_get_antialias}()},
\code{\link{cairo_get_current_point}()},
\code{\link{cairo_get_dash_count}()},
\code{\link{cairo_get_dash}()},
\code{\link{cairo_get_fill_rule}()},
\code{\link{cairo_get_font_face}()},
\code{\link{cairo_get_font_matrix}()},
\code{\link{cairo_get_group_target}()},
\code{\link{cairo_get_line_cap}()},
\code{\link{cairo_get_line_join}()},
\code{\link{cairo_get_line_width}()},
\code{\link{cairo_get_matrix}()},
\code{\link{cairo_get_miter_limit}()},
\code{\link{cairo_get_operator}()},
\code{\link{cairo_get_source}()},
\code{\link{cairo_get_target}()},
\code{\link{cairo_get_tolerance}()},
\code{\link{cairo_has_current_point}()},
\code{\link{cairo_identity_matrix}()},
\code{\link{cairo_in_clip}()},
\code{\link{cairo_in_fill}()},
\code{\link{cairo_in_stroke}()},
\code{\link{cairo_line_to}()},
\code{\link{cairo_mask_surface}()},
\code{\link{cairo_mask}()},
\code{\link{cairo_move_to}()},
\code{\link{cairo_new_path}()},
\code{\link{cairo_new_sub_path}()},
\code{\link{cairo_paint_with_alpha}()},
\code{\link{cairo_paint}()},
\code{\link{cairo_path_extents}()},
\code{\link{cairo_pop_group_to_source}()},
\code{\link{cairo_pop_group}()},
\code{\link{cairo_push_group_with_content}()},
\code{\link{cairo_push_group}()},
\code{\link{cairo_rectangle}()},
\code{\link{cairo_rel_curve_to}()},
\code{\link{cairo_rel_line_to}()},
\code{\link{cairo_rel_move_to}()},
\code{\link{cairo_reset_clip}()},
\code{\link{cairo_restore}()},
\code{\link{cairo_rotate}()},
\code{\link{cairo_save}()},
\code{\link{cairo_scale}()},
\code{\link{cairo_select_font_face}()},
\code{\link{cairo_set_antialias}()},
\code{\link{cairo_set_dash}()},
\code{\link{cairo_set_fill_rule}()},
\code{\link{cairo_set_font_face}()},
\code{\link{cairo_set_font_matrix}()},
\code{\link{cairo_set_font_size}()},
\code{\link{cairo_set_line_cap}()},
\code{\link{cairo_set_line_join}()},
\code{\link{cairo_set_line_width}()},
\code{\link{cairo_set_matrix}()},
\code{\link{cairo_set_miter_limit}()},
\code{\link{cairo_set_operator}()},
\code{\link{cairo_set_source_rgba}()},
\code{\link{cairo_set_source_rgb}()},
\code{\link{cairo_set_source_surface}()},
\code{\link{cairo_set_source}()},
\code{\link{cairo_set_tolerance}()},
\code{\link{cairo_show_page}()},
\code{\link{cairo_show_text}()},
\code{\link{cairo_status}()},
\code{\link{cairo_stroke_extents}()},
\code{\link{cairo_stroke_preserve}()},
\code{\link{cairo_stroke}()},
\code{\link{cairo_tag_begin}()},
\code{\link{cairo_tag_end}()},
\code{\link{cairo_text_path}()},
\code{\link{cairo_transform}()},
\code{\link{cairo_translate}()},
\code{\link{cairo_user_to_device_distance}()},
\code{\link{cairo_user_to_device}()}
}
\concept{cairo--}
|
#Script for course project 1
#load the data table library
library(data.table)
library(tidyr)
library(lubridate)
url <- c("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip")
zip_download <- c("power_consumption.zip")
if(!file.exists(zip_download)) {
download.file(url, zip_download)
}
fileName <- c("household_power_consumption.txt")
if(!file.exists(folderName)) {
unzip (zip_download)
}
#Read the first 10 lines
dataSubset <- read.csv(fileName, header = TRUE, sep = ";", nrows = 10)
#get the classes of the columns
classes <- lapply(dataSubset, class)
#read the data in, apply colCasses and specify null values as "?"
data <- read.csv("household_power_consumption.txt", header = TRUE, sep = ";", colClasses = classes, na.strings = "?")
#transform to data.table for faster
data <- as.data.table(data)
#transform the Date into Date for better manipulation
data$Date <- as.Date(data$Date, "%d/%m/%Y")
#Subset on the required dates
datesSubset <- c("2007-02-02","2007-02-01")
datesSubset <- as.Date(datesSubset, "%Y-%m-%d")
data <- data[data[, Date %in% datesSubset]]
#Apply tidy data principles and collapse columnsh
data <- gather(data, sub_meter, sub_meter_reading, 7:9)
data <- mutate(data, DateTime = paste(Date, Time))
data$DateTime <- ymd_hms(data$DateTime)
#Set the screen display to 1 graph
par(mfrow = c(1,1))
#Set the margins
par(mar = c(5,3,3,1))
#Display the graph
with(data, plot(DateTime, sub_meter_reading, type="n", xlab = "",ylab="Energy sub metering"))
with(subset(data,sub_meter=="Sub_metering_1"), points(DateTime, sub_meter_reading, type="l"))
with(subset(data,sub_meter=="Sub_metering_2"), points(DateTime, sub_meter_reading, type="l", col = "Red"))
with(subset(data,sub_meter=="Sub_metering_3"), points(DateTime, sub_meter_reading, type="l", col = "Blue"))
legend("topright", pch = "-", col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## Copy my plot to a PNG file
dev.copy(png, file = "plot3.png")
## Don't forget to close the PNG device!
dev.off() | /plot3.R | no_license | RobertCazaciuc/ExploratoryDataWeek1 | R | false | false | 2,092 | r | #Script for course project 1
#load the data table library
library(data.table)
library(tidyr)
library(lubridate)
url <- c("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip")
zip_download <- c("power_consumption.zip")
if(!file.exists(zip_download)) {
download.file(url, zip_download)
}
fileName <- c("household_power_consumption.txt")
if(!file.exists(folderName)) {
unzip (zip_download)
}
#Read the first 10 lines
dataSubset <- read.csv(fileName, header = TRUE, sep = ";", nrows = 10)
#get the classes of the columns
classes <- lapply(dataSubset, class)
#read the data in, apply colCasses and specify null values as "?"
data <- read.csv("household_power_consumption.txt", header = TRUE, sep = ";", colClasses = classes, na.strings = "?")
#transform to data.table for faster
data <- as.data.table(data)
#transform the Date into Date for better manipulation
data$Date <- as.Date(data$Date, "%d/%m/%Y")
#Subset on the required dates
datesSubset <- c("2007-02-02","2007-02-01")
datesSubset <- as.Date(datesSubset, "%Y-%m-%d")
data <- data[data[, Date %in% datesSubset]]
#Apply tidy data principles and collapse columnsh
data <- gather(data, sub_meter, sub_meter_reading, 7:9)
data <- mutate(data, DateTime = paste(Date, Time))
data$DateTime <- ymd_hms(data$DateTime)
#Set the screen display to 1 graph
par(mfrow = c(1,1))
#Set the margins
par(mar = c(5,3,3,1))
#Display the graph
with(data, plot(DateTime, sub_meter_reading, type="n", xlab = "",ylab="Energy sub metering"))
with(subset(data,sub_meter=="Sub_metering_1"), points(DateTime, sub_meter_reading, type="l"))
with(subset(data,sub_meter=="Sub_metering_2"), points(DateTime, sub_meter_reading, type="l", col = "Red"))
with(subset(data,sub_meter=="Sub_metering_3"), points(DateTime, sub_meter_reading, type="l", col = "Blue"))
legend("topright", pch = "-", col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
## Copy my plot to a PNG file
dev.copy(png, file = "plot3.png")
## Don't forget to close the PNG device!
dev.off() |
# Install packages
install.packages("rvest")
install.packages("dplyr")
# Use libraries
library(rvest)
library(dplyr)
# HTTP GET web page
web_page <- read_html("https://www.doctoroncall.com.my/coronavirus")
# Iterate over case statuses
confirmed_cases <- web_page %>% html_node(".stats-number-pink") %>% html_text()
discharged_cases <- web_page %>% html_node(".stats-number-green") %>% html_text()
death_cases <- web_page %>% html_node(".stats-number-grey") %>% html_text()
# Tabulate result
data <- c("Confirmed Cases", confirmed_cases, "Discharged Cases", discharged_cases, "Death Cases", death_cases)
coronavirus_data_frame <- matrix(data, ncol = 2, byrow = TRUE) %>% as_tibble()
coronavirus_data_frame
| /scraper.R | no_license | hafizio/r-web-scraping | R | false | false | 721 | r | # Install packages
install.packages("rvest")
install.packages("dplyr")
# Use libraries
library(rvest)
library(dplyr)
# HTTP GET web page
web_page <- read_html("https://www.doctoroncall.com.my/coronavirus")
# Iterate over case statuses
confirmed_cases <- web_page %>% html_node(".stats-number-pink") %>% html_text()
discharged_cases <- web_page %>% html_node(".stats-number-green") %>% html_text()
death_cases <- web_page %>% html_node(".stats-number-grey") %>% html_text()
# Tabulate result
data <- c("Confirmed Cases", confirmed_cases, "Discharged Cases", discharged_cases, "Death Cases", death_cases)
coronavirus_data_frame <- matrix(data, ncol = 2, byrow = TRUE) %>% as_tibble()
coronavirus_data_frame
|
context("get_albums")
# set album id
id <- 337082
test_that("get_album returns the right output formats", {
skip_on_cran()
skip_if_offline()
output <- get_album(album_id = id)
expect_is(output, "genius_album")
expect_equal(output$content$id, id)
})
test_that("album_to_df returns the right output formats", {
skip_on_cran()
skip_if_offline()
output <- album_to_df(get_album(album_id = id))
expect_is(output, "tbl_df")
expect_is(output, "tbl")
expect_is(output, "data.frame")
expect_setequal(output$album_id, id)
})
test_that("get_album_df returns the right output formats", {
skip_on_cran()
skip_if_offline()
output <- get_album_df(album_id = id)
expect_is(output, "tbl_df")
expect_is(output, "tbl")
expect_is(output, "data.frame")
expect_setequal(output$album_id, id)
output <- get_album_meta(album_id = id)
expect_is(output, "tbl_df")
expect_is(output, "tbl")
expect_is(output, "data.frame")
expect_setequal(output$album_id, id)
})
test_that("get_album_tracklist_id returns the right output formats", {
skip_on_cran()
skip_if_offline()
output <- get_album_tracklist_id(album_id = id)
expect_is(output, "tbl_df")
expect_is(output, "tbl")
expect_is(output, "data.frame")
expect_setequal(output$album_id, id)
output <- scrape_tracklist(album_id = id)
expect_is(output, "tbl_df")
expect_is(output, "tbl")
expect_is(output, "data.frame")
expect_setequal(output$album_id, id)
})
test_that("get_album_tracklist_search returns the right output formats", {
skip_on_cran()
skip_if_offline()
output <- get_album_tracklist_search(artist_name = "Kendrick Lamar",
album_name = "DAMN.")
expect_is(output, "tbl_df")
expect_is(output, "tbl")
expect_is(output, "data.frame")
})
| /tests/testthat/test-get_albums.R | no_license | redapemusic35/geniusr | R | false | false | 1,802 | r | context("get_albums")
# set album id
id <- 337082
test_that("get_album returns the right output formats", {
skip_on_cran()
skip_if_offline()
output <- get_album(album_id = id)
expect_is(output, "genius_album")
expect_equal(output$content$id, id)
})
test_that("album_to_df returns the right output formats", {
skip_on_cran()
skip_if_offline()
output <- album_to_df(get_album(album_id = id))
expect_is(output, "tbl_df")
expect_is(output, "tbl")
expect_is(output, "data.frame")
expect_setequal(output$album_id, id)
})
test_that("get_album_df returns the right output formats", {
skip_on_cran()
skip_if_offline()
output <- get_album_df(album_id = id)
expect_is(output, "tbl_df")
expect_is(output, "tbl")
expect_is(output, "data.frame")
expect_setequal(output$album_id, id)
output <- get_album_meta(album_id = id)
expect_is(output, "tbl_df")
expect_is(output, "tbl")
expect_is(output, "data.frame")
expect_setequal(output$album_id, id)
})
test_that("get_album_tracklist_id returns the right output formats", {
skip_on_cran()
skip_if_offline()
output <- get_album_tracklist_id(album_id = id)
expect_is(output, "tbl_df")
expect_is(output, "tbl")
expect_is(output, "data.frame")
expect_setequal(output$album_id, id)
output <- scrape_tracklist(album_id = id)
expect_is(output, "tbl_df")
expect_is(output, "tbl")
expect_is(output, "data.frame")
expect_setequal(output$album_id, id)
})
test_that("get_album_tracklist_search returns the right output formats", {
skip_on_cran()
skip_if_offline()
output <- get_album_tracklist_search(artist_name = "Kendrick Lamar",
album_name = "DAMN.")
expect_is(output, "tbl_df")
expect_is(output, "tbl")
expect_is(output, "data.frame")
})
|
######## Helper functions###################
#' @method extractVars extCode
#' @rdname internalFunctions
#' @export
extractVars.extCode <- function(x) {
x$varDef$varName[!is.na(x$varDef$precision)]
}
############# extCode definition##############
createExtCode <- function(opt) {
levelNum = length(opt) + 1
extCode = structure(list(varDef = NULL, opt = opt, levelNum = levelNum,
valueRecord = NULL), class = "extCode")
for (i in seq_along(opt)) {
for (j in seq_along(opt[[i]])) {
extCode = addVarDef_inLevel(extCode, NA, opt[[i]][j], NA, i +
1)
}
}
extCode
}
findVarLevel <- function(extCode, var) {
ind = which(extCode$varDef$varName %in% var)
if (length(ind) == 0)
level = 1 else level = max(extCode$varDef[ind, ]$level)
return(level)
}
addVarDef_inLevel <- function(extCode, precision, varName, varDef, level) {
extCode$varDef = rbind(extCode$varDef, data.frame(precision = precision,
varName = varName, varDef = varDef, level = level, stringsAsFactors = FALSE))
extCode
}
addVarDef <- function(extCode, precision, varName, varDef) {
vars = extractVars(varDef)
ind = which(extCode$varDef$varName %in% vars)
if (length(ind) == 0)
level = 1 else level = max(extCode$varDef[ind, ]$level)
extCode = addVarDef_inLevel(extCode, precision, varName, varDef, level)
extCode
}
finalizeExtCode_hidden <- function(curCode) {
if (!is.na(curCode$precision)) {
return(paste0(curCode$precision, " ", curCode$varName, "=", curCode$varDef,
";"))
} else {
return(NULL)
}
}
finalizeExtCode <- function(extCode) {
levelNum = extCode$levelNum
value = vector("list", length = levelNum)
names(value) = paste0("L", seq_len(levelNum) - 1)
if (!is.null(extCode) && !is.null(extCode$varDef)) {
for (i in seq_len(nrow(extCode$varDef))) {
curCode = extCode$varDef[i, ]
level = curCode$level
res = finalizeExtCode_hidden(curCode)
if (!is.null(res)) {
value[[level]] = c(value[[level]], res)
}
}
}
value
}
hasVar.extCode <- function(x, var) {
var %in% x$varDef$varName
}
getVarFromExtCode <- function(extCode, precision, varDef) {
ind = which(extCode$varDef$precision == precision & extCode$varDef$varDef ==
varDef)
if (length(ind) > 1) {
stop("Redundant variable definition has been found")
}
if (length(ind) == 1) {
varName = extCode$varDef[ind, "varName"]
}
if (length(ind) == 0) {
varName = GPUVar$getTmpVar()
extCode = addVarDef(extCode, precision, varName, varDef)
}
return(list(var = varName, extCode = extCode))
}
getLevelNum <- function(extCode) {
extCode$levelNum
}
removeRedundantVar <- function(extCode, var) {
varNames = extCode$varDef$varName
newCode = extCode$varDef[varNames != var, ]
allVarDef = newCode$varDef
relatedVars = unique(unlist(lapply(allVarDef, extractVars)))
if (var %in% relatedVars) {
return(extCode)
} else {
extCode$varDef = newCode
return(extCode)
}
}
# Add a variable definition record in extra code in a special place The
# record will not affect the output of finalizeExtCode
addValueRecord <- function(extCode, value) {
extCode$valueRecord = c(extCode$valueRecord, value)
extCode
}
getAllVarsInRecord <- function(extCode) {
if (!is.null(extCode$valueRecord))
res = unique(as.vector(unlist(lapply(extCode$valueRecord, extractVars))))
res
}
# Get the number of variable definition in extra code
getVarsNum <- function(extCode) {
if (is.null(extCode) || is.null(extCode$varDef))
return(0)
return(nrow(extCode$varDef))
}
###################### Hoist Optimization#################################
###################### opt=list(c('gpu_k1')) extCode=createExtCode(opt)
###################### extCode=addVarDef(extCode,'double','a1','i+j')
###################### extCode=addVarDef(extCode,'double','a2','k+j+a3')
###################### extCode=addVarDef(extCode,'double','a3','i+gpu_k1')
###################### Exp='gpu_gp_size1_0 * a[(uint)(f+t)]' finalizeExtCode(extCode)
hoistOpt <- function(extCode, Exp) {
code = C_to_R(Exp)
code = vapply(expandExp(code), Simplify,character(1))
#code=expandExp(code)
codeInfo = list()
baseLevel = c()
# Decompose the code and find the base level for each code
for (i in seq_along(code)) {
codeInfo[[i]] = decomposeCode(extCode, code[i])
baseLevel = c(baseLevel, max(codeInfo[[i]]$level))
}
# Upgrade the level if the code is composed by a single variable and
# its level is unique
totalLevel = getLevelNum(extCode)
for (i in seq_len(totalLevel - 1)) {
ind = which(baseLevel == i)
if (length(ind) == 1 && nrow(codeInfo[[ind]]) == 1) {
codeInfo[[ind]]$level = codeInfo[[ind]]$level + 1
baseLevel[ind] = baseLevel[ind] + 1
}
}
baseRes = vector("list", length = totalLevel)
for (i in seq_along(codeInfo)) {
curInfo = codeInfo[[i]]
curlevels = sort(unique(curInfo$level))
curBase = baseLevel[i]
for (curLevel in curlevels) {
if (curLevel != curBase) {
varDef = CSimplify(constructCode(curInfo, curLevel))
res = getVarFromExtCode(extCode, GPUVar$default_int,
varDef)
varName = res$var
extCode = res$extCode
curInfo = replaceLevelWithVar(curInfo, varName, curLevel)
} else {
baseRes[[curLevel]] = c(baseRes[[curLevel]], constructCode(curInfo,
curLevel))
}
}
}
for (i in seq_along(baseRes)) {
if (is.null(baseRes[[i]]))
next
if (i != getLevelNum(extCode)) {
varDef = CSimplify(paste0(baseRes[[i]], collapse = "+"))
res = getVarFromExtCode(extCode, GPUVar$default_int,
varDef)
varName = res$var
baseRes[[totalLevel]] = c(baseRes[[totalLevel]], varName)
extCode = res$extCode
}
}
finalRes = list()
finalRes$value = CSimplify(paste0(baseRes[[totalLevel]], collapse = "+"))
finalRes$extCode = extCode
finalRes
}
# Remove the variable which is less than or equal to the given level
# Add a variable in the given level
replaceLevelWithVar <- function(codeInfo, var, level) {
ind = which(codeInfo$level <= level)
codeInfo = codeInfo[-ind, ]
newVar = data.frame(level = level, var = var, operator = "*", stringsAsFactors = FALSE)
codeInfo = rbind(codeInfo, newVar)
codeInfo
}
# Combine the variables into one variable The variables should in the
# level that is less than or equal to the given level
constructCode <- function(codeInfo, level) {
ind = which(codeInfo$level <= level)
codeInfo = codeInfo[ind, ]
res = c()
for (i in seq_len(length(ind))) {
curInfo = codeInfo[i, ]
res = c(res, curInfo$operator, paste0("(", curInfo$var, ")"))
}
if (length(res) != 0 && res[1] %in% c("*", "/", "+", "-")) {
res = res[-1]
}
paste0(res, collapse = "")
}
# Decompose the code into different level The code should not be able
# to separate by +,- operator
# The current supported decompose function is *
decomposeCode <- function(extCode, code) {
code = toExpression(code)
code = decomposeCode_hidden(extCode, code)
if (nrow(code) > 1) {
for (i in seq_len(getLevelNum(extCode) - 1)) {
ind = which(code$level == i)
if (length(ind) == 1) {
code[ind, ]$level = i + 1
}
}
}
code
}
decomposeCode_hidden <- function(extCode, code, operator = "") {
if (is.call(code)&&operator!="/") {
func = deparse(code[[1]])
if (func == "*") {
left = decomposeCode_hidden(extCode, code[[2]],operator=operator)
right = decomposeCode_hidden(extCode, code[[3]],operator = func)
res=rbind(left,right)
return(res)
}
if (func == "-") {
res = decomposeCode_hidden(extCode, code[[2]])
res$var[1] = paste0("-", res$var[1])
return(res)
}
if (func == "(") {
res = decomposeCode_hidden(extCode, code[[2]],operator=operator)
return(res)
}
}
level = findCodeLevel(extCode, code)
code_char = deparse(code)
res = data.frame(level = level, var = code_char, operator = operator,
stringsAsFactors = FALSE)
return(res)
}
# Obtain the level of the code
findCodeLevel <- function(extCode, code) {
code = toCharacter(code)
vars = extractVars(code)
level = findVarLevel(extCode, vars)
return(level)
}
#code="gpu_element_dist * (10 * gpu_element_j + gpu_element_i)"
# Expand the parathesis in the expression
expandExp <- function(code) {
code = toExpression(code)
if (!is.call(code))
return(deparse(code))
func = code[[1]]
if (func == "(")
return(expandExp(code[[2]]))
if (deparse(func) %in% c("+", "-", "*")) {
left = code[[2]]
right = code[[3]]
left_exp = expandExp(left)
right_exp = expandExp(right)
if (func == "+") {
res = c(left_exp, right_exp)
return(res)
}
if (func == "-") {
res = c(left_exp, paste0("-", right_exp))
return(res)
}
if (func == "*") {
res = c()
for (i in seq_along(left_exp)) {
for (j in seq_along(right_exp)) {
res = c(res, paste0(left_exp[i], "*", right_exp[j]))
}
}
return(res)
}
}
return(deparse(code))
}
| /R/extCodeManager.R | no_license | Jiefei-Wang/gpuMagic | R | false | false | 10,001 | r | ######## Helper functions###################
#' @method extractVars extCode
#' @rdname internalFunctions
#' @export
extractVars.extCode <- function(x) {
x$varDef$varName[!is.na(x$varDef$precision)]
}
############# extCode definition##############
createExtCode <- function(opt) {
levelNum = length(opt) + 1
extCode = structure(list(varDef = NULL, opt = opt, levelNum = levelNum,
valueRecord = NULL), class = "extCode")
for (i in seq_along(opt)) {
for (j in seq_along(opt[[i]])) {
extCode = addVarDef_inLevel(extCode, NA, opt[[i]][j], NA, i +
1)
}
}
extCode
}
findVarLevel <- function(extCode, var) {
ind = which(extCode$varDef$varName %in% var)
if (length(ind) == 0)
level = 1 else level = max(extCode$varDef[ind, ]$level)
return(level)
}
addVarDef_inLevel <- function(extCode, precision, varName, varDef, level) {
extCode$varDef = rbind(extCode$varDef, data.frame(precision = precision,
varName = varName, varDef = varDef, level = level, stringsAsFactors = FALSE))
extCode
}
addVarDef <- function(extCode, precision, varName, varDef) {
vars = extractVars(varDef)
ind = which(extCode$varDef$varName %in% vars)
if (length(ind) == 0)
level = 1 else level = max(extCode$varDef[ind, ]$level)
extCode = addVarDef_inLevel(extCode, precision, varName, varDef, level)
extCode
}
finalizeExtCode_hidden <- function(curCode) {
if (!is.na(curCode$precision)) {
return(paste0(curCode$precision, " ", curCode$varName, "=", curCode$varDef,
";"))
} else {
return(NULL)
}
}
finalizeExtCode <- function(extCode) {
levelNum = extCode$levelNum
value = vector("list", length = levelNum)
names(value) = paste0("L", seq_len(levelNum) - 1)
if (!is.null(extCode) && !is.null(extCode$varDef)) {
for (i in seq_len(nrow(extCode$varDef))) {
curCode = extCode$varDef[i, ]
level = curCode$level
res = finalizeExtCode_hidden(curCode)
if (!is.null(res)) {
value[[level]] = c(value[[level]], res)
}
}
}
value
}
hasVar.extCode <- function(x, var) {
var %in% x$varDef$varName
}
getVarFromExtCode <- function(extCode, precision, varDef) {
ind = which(extCode$varDef$precision == precision & extCode$varDef$varDef ==
varDef)
if (length(ind) > 1) {
stop("Redundant variable definition has been found")
}
if (length(ind) == 1) {
varName = extCode$varDef[ind, "varName"]
}
if (length(ind) == 0) {
varName = GPUVar$getTmpVar()
extCode = addVarDef(extCode, precision, varName, varDef)
}
return(list(var = varName, extCode = extCode))
}
getLevelNum <- function(extCode) {
extCode$levelNum
}
removeRedundantVar <- function(extCode, var) {
varNames = extCode$varDef$varName
newCode = extCode$varDef[varNames != var, ]
allVarDef = newCode$varDef
relatedVars = unique(unlist(lapply(allVarDef, extractVars)))
if (var %in% relatedVars) {
return(extCode)
} else {
extCode$varDef = newCode
return(extCode)
}
}
# Add a variable definition record in extra code in a special place The
# record will not affect the output of finalizeExtCode
addValueRecord <- function(extCode, value) {
extCode$valueRecord = c(extCode$valueRecord, value)
extCode
}
getAllVarsInRecord <- function(extCode) {
if (!is.null(extCode$valueRecord))
res = unique(as.vector(unlist(lapply(extCode$valueRecord, extractVars))))
res
}
# Get the number of variable definition in extra code
getVarsNum <- function(extCode) {
if (is.null(extCode) || is.null(extCode$varDef))
return(0)
return(nrow(extCode$varDef))
}
###################### Hoist Optimization#################################
###################### opt=list(c('gpu_k1')) extCode=createExtCode(opt)
###################### extCode=addVarDef(extCode,'double','a1','i+j')
###################### extCode=addVarDef(extCode,'double','a2','k+j+a3')
###################### extCode=addVarDef(extCode,'double','a3','i+gpu_k1')
###################### Exp='gpu_gp_size1_0 * a[(uint)(f+t)]' finalizeExtCode(extCode)
hoistOpt <- function(extCode, Exp) {
code = C_to_R(Exp)
code = vapply(expandExp(code), Simplify,character(1))
#code=expandExp(code)
codeInfo = list()
baseLevel = c()
# Decompose the code and find the base level for each code
for (i in seq_along(code)) {
codeInfo[[i]] = decomposeCode(extCode, code[i])
baseLevel = c(baseLevel, max(codeInfo[[i]]$level))
}
# Upgrade the level if the code is composed by a single variable and
# its level is unique
totalLevel = getLevelNum(extCode)
for (i in seq_len(totalLevel - 1)) {
ind = which(baseLevel == i)
if (length(ind) == 1 && nrow(codeInfo[[ind]]) == 1) {
codeInfo[[ind]]$level = codeInfo[[ind]]$level + 1
baseLevel[ind] = baseLevel[ind] + 1
}
}
baseRes = vector("list", length = totalLevel)
for (i in seq_along(codeInfo)) {
curInfo = codeInfo[[i]]
curlevels = sort(unique(curInfo$level))
curBase = baseLevel[i]
for (curLevel in curlevels) {
if (curLevel != curBase) {
varDef = CSimplify(constructCode(curInfo, curLevel))
res = getVarFromExtCode(extCode, GPUVar$default_int,
varDef)
varName = res$var
extCode = res$extCode
curInfo = replaceLevelWithVar(curInfo, varName, curLevel)
} else {
baseRes[[curLevel]] = c(baseRes[[curLevel]], constructCode(curInfo,
curLevel))
}
}
}
for (i in seq_along(baseRes)) {
if (is.null(baseRes[[i]]))
next
if (i != getLevelNum(extCode)) {
varDef = CSimplify(paste0(baseRes[[i]], collapse = "+"))
res = getVarFromExtCode(extCode, GPUVar$default_int,
varDef)
varName = res$var
baseRes[[totalLevel]] = c(baseRes[[totalLevel]], varName)
extCode = res$extCode
}
}
finalRes = list()
finalRes$value = CSimplify(paste0(baseRes[[totalLevel]], collapse = "+"))
finalRes$extCode = extCode
finalRes
}
# Remove the variable which is less than or equal to the given level
# Add a variable in the given level
replaceLevelWithVar <- function(codeInfo, var, level) {
ind = which(codeInfo$level <= level)
codeInfo = codeInfo[-ind, ]
newVar = data.frame(level = level, var = var, operator = "*", stringsAsFactors = FALSE)
codeInfo = rbind(codeInfo, newVar)
codeInfo
}
# Combine the variables into one variable The variables should in the
# level that is less than or equal to the given level
constructCode <- function(codeInfo, level) {
ind = which(codeInfo$level <= level)
codeInfo = codeInfo[ind, ]
res = c()
for (i in seq_len(length(ind))) {
curInfo = codeInfo[i, ]
res = c(res, curInfo$operator, paste0("(", curInfo$var, ")"))
}
if (length(res) != 0 && res[1] %in% c("*", "/", "+", "-")) {
res = res[-1]
}
paste0(res, collapse = "")
}
# Decompose the code into different level The code should not be able
# to separate by +,- operator
# The current supported decompose function is *
decomposeCode <- function(extCode, code) {
code = toExpression(code)
code = decomposeCode_hidden(extCode, code)
if (nrow(code) > 1) {
for (i in seq_len(getLevelNum(extCode) - 1)) {
ind = which(code$level == i)
if (length(ind) == 1) {
code[ind, ]$level = i + 1
}
}
}
code
}
decomposeCode_hidden <- function(extCode, code, operator = "") {
if (is.call(code)&&operator!="/") {
func = deparse(code[[1]])
if (func == "*") {
left = decomposeCode_hidden(extCode, code[[2]],operator=operator)
right = decomposeCode_hidden(extCode, code[[3]],operator = func)
res=rbind(left,right)
return(res)
}
if (func == "-") {
res = decomposeCode_hidden(extCode, code[[2]])
res$var[1] = paste0("-", res$var[1])
return(res)
}
if (func == "(") {
res = decomposeCode_hidden(extCode, code[[2]],operator=operator)
return(res)
}
}
level = findCodeLevel(extCode, code)
code_char = deparse(code)
res = data.frame(level = level, var = code_char, operator = operator,
stringsAsFactors = FALSE)
return(res)
}
# Obtain the level of the code
findCodeLevel <- function(extCode, code) {
code = toCharacter(code)
vars = extractVars(code)
level = findVarLevel(extCode, vars)
return(level)
}
#code="gpu_element_dist * (10 * gpu_element_j + gpu_element_i)"
# Expand the parathesis in the expression
expandExp <- function(code) {
code = toExpression(code)
if (!is.call(code))
return(deparse(code))
func = code[[1]]
if (func == "(")
return(expandExp(code[[2]]))
if (deparse(func) %in% c("+", "-", "*")) {
left = code[[2]]
right = code[[3]]
left_exp = expandExp(left)
right_exp = expandExp(right)
if (func == "+") {
res = c(left_exp, right_exp)
return(res)
}
if (func == "-") {
res = c(left_exp, paste0("-", right_exp))
return(res)
}
if (func == "*") {
res = c()
for (i in seq_along(left_exp)) {
for (j in seq_along(right_exp)) {
res = c(res, paste0(left_exp[i], "*", right_exp[j]))
}
}
return(res)
}
}
return(deparse(code))
}
|
#5歳階級 人口 棒グラフ
##################################################
#データ準備
#大阪市HPから推計人口 5歳階級のcsv取得
#Numbersで2017年11月と2017年12月分のデータ削除
#Rでそのファイルを読み込んで男女計の計のみをファイルに書き出し
data <- read.csv("20180101.csv", stringsAsFactors=F)
data_total <- data %>% filter(data$男女計=="計")
write.csv(data_total, "20180101_total.csv")
#Rの変数は数字・記号から始まることができないためcheck.names=Fを指定して再度読み込み
#ただしその副作用で$が使用できなくなるため、使うならば変数をバッククオート(SHIFT+@)で囲むこと
data <- read.csv("20180101_total.csv", check.names=F, stringsAsFactor=F)
##################################################
#棒グラフ
#24区分をファイルに書き出し
for(i in 1:24){
quartz(type="pdf", file=sprintf("区別5歳階級別推計人口_%d%s.pdf", i, data[i,3]))
.main=paste(data[i,3], " 5歳階級別 推計人口 2018年1月1日現在", sep="")
par(new=TRUE, family="HiraKakuProN-W3", xpd=TRUE)
barplot(as.integer(data[i,6:26]), width=0.9, col=2, xlim=c(1, 21), ylim=c(0, 15000), main=.main, xlab="", ylab="人", names.arg=c("0~4歳","5~9歳","10~14歳","15~19歳","20~24歳","25~29歳","30~34歳","35~39歳","40~44歳","45~49歳","50~54歳","55~59歳","60~64歳","65~69歳","70~74歳","75~79歳","80~84歳","85~89歳","90~94歳","95~99歳","100歳以上"), las=2)
dev.off()
}
| /5歳階級棒グラフ.R | no_license | tadakazu1972/R_tips | R | false | false | 1,589 | r | #5歳階級 人口 棒グラフ
##################################################
#データ準備
#大阪市HPから推計人口 5歳階級のcsv取得
#Numbersで2017年11月と2017年12月分のデータ削除
#Rでそのファイルを読み込んで男女計の計のみをファイルに書き出し
data <- read.csv("20180101.csv", stringsAsFactors=F)
data_total <- data %>% filter(data$男女計=="計")
write.csv(data_total, "20180101_total.csv")
#Rの変数は数字・記号から始まることができないためcheck.names=Fを指定して再度読み込み
#ただしその副作用で$が使用できなくなるため、使うならば変数をバッククオート(SHIFT+@)で囲むこと
data <- read.csv("20180101_total.csv", check.names=F, stringsAsFactor=F)
##################################################
#棒グラフ
#24区分をファイルに書き出し
for(i in 1:24){
quartz(type="pdf", file=sprintf("区別5歳階級別推計人口_%d%s.pdf", i, data[i,3]))
.main=paste(data[i,3], " 5歳階級別 推計人口 2018年1月1日現在", sep="")
par(new=TRUE, family="HiraKakuProN-W3", xpd=TRUE)
barplot(as.integer(data[i,6:26]), width=0.9, col=2, xlim=c(1, 21), ylim=c(0, 15000), main=.main, xlab="", ylab="人", names.arg=c("0~4歳","5~9歳","10~14歳","15~19歳","20~24歳","25~29歳","30~34歳","35~39歳","40~44歳","45~49歳","50~54歳","55~59歳","60~64歳","65~69歳","70~74歳","75~79歳","80~84歳","85~89歳","90~94歳","95~99歳","100歳以上"), las=2)
dev.off()
}
|
button_accept <- "color: rgba(0, 0, 0, 0.5);
background-color: #00a65a;
width: 100%;
margin: auto;
height: 60px;
border-color:#00a65a;
font-size: 30px"
button_decline <- "color: rgba(0, 0, 0, 0.5);
background-color: #963226;
width: 99%;
margin: auto;
height: 60px;
border-color:#963226;
font-size: 30px"
button_finish_shift <- "color: #ffffff;
background-color: #7d0c36;
width: 99%;
margin: auto;
height: 60px;
border-color:#7d0c36;
font-size: 16px"
slider_maxnumshift <- ".irs-bar,
.irs-bar-edge,
.irs-single,
.irs-grid-pol {
background: #242424;
border-color: #242424;
}" | /www/styling.R | no_license | BerriJ/access_app | R | false | false | 1,046 | r | button_accept <- "color: rgba(0, 0, 0, 0.5);
background-color: #00a65a;
width: 100%;
margin: auto;
height: 60px;
border-color:#00a65a;
font-size: 30px"
button_decline <- "color: rgba(0, 0, 0, 0.5);
background-color: #963226;
width: 99%;
margin: auto;
height: 60px;
border-color:#963226;
font-size: 30px"
button_finish_shift <- "color: #ffffff;
background-color: #7d0c36;
width: 99%;
margin: auto;
height: 60px;
border-color:#7d0c36;
font-size: 16px"
slider_maxnumshift <- ".irs-bar,
.irs-bar-edge,
.irs-single,
.irs-grid-pol {
background: #242424;
border-color: #242424;
}" |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/div.R
\name{Terms2formula}
\alias{Terms2formula}
\title{Model formula from vector of terms where intercept is specified as a string}
\usage{
Terms2formula(x, intercept = "(Intercept)", env = parent.frame())
}
\arguments{
\item{x}{Character vector}
\item{intercept}{String to specify intercept}
\item{env}{Parameter to \code{\link{as.formula}}}
}
\value{
formula
}
\description{
Model formula from vector of terms where intercept is specified as a string
}
\examples{
Terms2formula(c("a", "b:c"))
Terms2formula(c("a", "b:c"), NULL)
Terms2formula(c("a", "b:c", "(Intercept)"))
Terms2formula(c("a", "b:c"), "1")
Terms2formula(c("a", "b:c", "1"), "1")
}
\author{
Øyvind Langsrud
}
| /man/Terms2formula.Rd | permissive | olangsrud/experimentalRpackage | R | false | true | 758 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/div.R
\name{Terms2formula}
\alias{Terms2formula}
\title{Model formula from vector of terms where intercept is specified as a string}
\usage{
Terms2formula(x, intercept = "(Intercept)", env = parent.frame())
}
\arguments{
\item{x}{Character vector}
\item{intercept}{String to specify intercept}
\item{env}{Parameter to \code{\link{as.formula}}}
}
\value{
formula
}
\description{
Model formula from vector of terms where intercept is specified as a string
}
\examples{
Terms2formula(c("a", "b:c"))
Terms2formula(c("a", "b:c"), NULL)
Terms2formula(c("a", "b:c", "(Intercept)"))
Terms2formula(c("a", "b:c"), "1")
Terms2formula(c("a", "b:c", "1"), "1")
}
\author{
Øyvind Langsrud
}
|
#' Factor/ordered factor S3 class
#'
#' A [factor] is an integer with attribute `levels`, a character vector. There
#' should be one level for each integer between 1 and `max(x)`.
#' An [ordered] factor has the same properties as a factor, but possesses
#' an extra class that marks levels as having a total ordering.
#'
#' These functions help the base factor and ordered factor classes fit in to
#' the vctrs type system by providing constructors, coercion functions,
#' and casting functions. `new_factor()` and `new_ordered()` are low-level
#' constructors - they only check that types, but not values, are valid, so
#' are for expert use only.
#'
#' @param x Integer values which index in to `levels`.
#' @param levels Character vector of labels.
#' @param ...,class Used to for subclasses.
#' @keywords internal
#' @export
new_factor <- function(x = integer(), levels = character(), ..., class = character()) {
stopifnot(is.integer(x))
stopifnot(is.character(levels))
structure(
x,
levels = levels,
...,
class = c(class, "factor")
)
}
#' @export
#' @rdname new_factor
new_ordered <- function(x = integer(), levels = character()) {
new_factor(x = x, levels = levels, class = "ordered")
}
#' @export
vec_proxy.factor <- function(x, ...) {
x
}
#' @export
vec_proxy.ordered <- function(x, ...) {
x
}
#' @export
vec_restore.factor <- function(x, to, ...) {
NextMethod()
}
#' @export
vec_restore.ordered <- function(x, to, ...) {
NextMethod()
}
# Print -------------------------------------------------------------------
#' @export
vec_ptype_full.factor <- function(x, ...) {
paste0("factor<", hash_label(levels(x)), ">")
}
#' @export
vec_ptype_abbr.factor <- function(x, ...) {
"fct"
}
#' @export
vec_ptype_full.ordered <- function(x, ...) {
paste0("ordered<", hash_label(levels(x)), ">")
}
#' @export
vec_ptype_abbr.ordered <- function(x, ...) {
"ord"
}
# Coerce ------------------------------------------------------------------
#' @rdname new_factor
#' @export vec_ptype2.factor
#' @method vec_ptype2 factor
#' @export
vec_ptype2.factor <- function(x, y, ...) UseMethod("vec_ptype2.factor")
#' @method vec_ptype2.character factor
#' @export
vec_ptype2.character.factor <- function(x, y, ...) character()
#' @method vec_ptype2.factor character
#' @export
vec_ptype2.factor.character <- function(x, y, ...) character()
#' @method vec_ptype2.factor factor
#' @export
vec_ptype2.factor.factor <- function(x, y, ...) new_factor(levels = levels_union(x, y))
#' @rdname new_factor
#' @export vec_ptype2.ordered
#' @method vec_ptype2 ordered
#' @export
vec_ptype2.ordered <- function(x, y, ...) UseMethod("vec_ptype2.ordered")
#' @method vec_ptype2.ordered character
#' @export
vec_ptype2.ordered.character <- function(x, y, ...) character()
#' @method vec_ptype2.character ordered
#' @export
vec_ptype2.character.ordered <- function(x, y, ...) character()
#' @method vec_ptype2.ordered factor
#' @export
vec_ptype2.ordered.factor <- function(x, y, ..., x_arg = "", y_arg = "") {
stop_incompatible_type(x, y, x_arg = x_arg, y_arg = y_arg)
}
#' @method vec_ptype2.factor ordered
#' @export
vec_ptype2.factor.ordered <- function(x, y, ..., x_arg = "", y_arg = "") {
stop_incompatible_type(x, y, x_arg = x_arg, y_arg = y_arg)
}
#' @method vec_ptype2.ordered ordered
#' @export
vec_ptype2.ordered.ordered <- function(x, y, ...) new_ordered(levels = levels_union(x, y))
# Cast --------------------------------------------------------------------
#' @rdname new_factor
#' @export vec_cast.factor
#' @method vec_cast factor
#' @export
vec_cast.factor <- function(x, to, ...) {
UseMethod("vec_cast.factor")
}
fct_cast <- function(x, to, ..., x_arg = "", to_arg = "") {
fct_cast_impl(x, to, ..., x_arg = x_arg, to_arg = to_arg, ordered = FALSE)
}
fct_cast_impl <- function(x, to, ..., x_arg = "", to_arg = "", ordered = FALSE) {
if (length(levels(to)) == 0L) {
levels <- levels(x)
if (is.null(levels)) {
exclude <- NA
levels <- unique(x)
} else {
exclude <- NULL
}
factor(as.character(x), levels = levels, ordered = ordered, exclude = exclude)
} else {
lossy <- !(x %in% levels(to) | is.na(x))
out <- factor(x, levels = levels(to), ordered = ordered, exclude = NULL)
maybe_lossy_cast(out, x, to, lossy, x_arg = x_arg, to_arg = to_arg)
}
}
#' @export
#' @method vec_cast.factor factor
vec_cast.factor.factor <- function(x, to, ...) {
fct_cast(x, to, ...)
}
#' @export
#' @method vec_cast.factor ordered
vec_cast.factor.ordered <- function(x, to, ...) {
fct_cast(x, to, ...)
}
#' @export
#' @method vec_cast.factor character
vec_cast.factor.character <-function(x, to, ...) {
fct_cast(x, to, ...)
}
#' @export
#' @method vec_cast.character factor
vec_cast.character.factor <- function(x, to, ...) {
stop_native_implementation("vec_cast.character.factor")
}
#' @rdname new_factor
#' @export vec_cast.ordered
#' @method vec_cast ordered
#' @export
vec_cast.ordered <- function(x, to, ...) {
UseMethod("vec_cast.ordered")
}
ord_cast <- function(x, to, ..., x_arg = "", to_arg = "") {
fct_cast_impl(x, to, ..., x_arg = x_arg, to_arg = to_arg, ordered = TRUE)
}
#' @export
#' @method vec_cast.ordered ordered
vec_cast.ordered.ordered <- function(x, to, ...) {
ord_cast(x, to, ...)
}
#' @export
#' @method vec_cast.ordered factor
vec_cast.ordered.factor <- function(x, to, ...) {
ord_cast(x, to, ...)
}
#' @export
#' @method vec_cast.ordered character
vec_cast.ordered.character <-function(x, to, ...) {
ord_cast(x, to, ...)
}
#' @export
#' @method vec_cast.character ordered
vec_cast.character.ordered <- function(x, to, ...) {
stop_native_implementation("vec_cast.character.ordered")
}
# Math and arithmetic -----------------------------------------------------
#' @export
vec_math.factor <- function(.fn, .x, ...) {
stop_unsupported(.x, .fn)
}
#' @export
vec_arith.factor <- function(op, x, y, ...) {
stop_unsupported(x, op)
}
# Helpers -----------------------------------------------------------------
hash_label <- function(x, length = 5) {
if (length(x) == 0) {
""
} else {
# Can't use hash() currently because it hashes the string pointers
# for performance, so the values in the test change each time
substr(digest::digest(x), 1, length)
}
}
levels_union <- function(x, y) {
union(levels(x), levels(y))
}
| /R/type-factor.R | no_license | trinker/vctrs | R | false | false | 6,374 | r | #' Factor/ordered factor S3 class
#'
#' A [factor] is an integer with attribute `levels`, a character vector. There
#' should be one level for each integer between 1 and `max(x)`.
#' An [ordered] factor has the same properties as a factor, but possesses
#' an extra class that marks levels as having a total ordering.
#'
#' These functions help the base factor and ordered factor classes fit in to
#' the vctrs type system by providing constructors, coercion functions,
#' and casting functions. `new_factor()` and `new_ordered()` are low-level
#' constructors - they only check that types, but not values, are valid, so
#' are for expert use only.
#'
#' @param x Integer values which index in to `levels`.
#' @param levels Character vector of labels.
#' @param ...,class Used to for subclasses.
#' @keywords internal
#' @export
new_factor <- function(x = integer(), levels = character(), ..., class = character()) {
stopifnot(is.integer(x))
stopifnot(is.character(levels))
structure(
x,
levels = levels,
...,
class = c(class, "factor")
)
}
#' @export
#' @rdname new_factor
new_ordered <- function(x = integer(), levels = character()) {
new_factor(x = x, levels = levels, class = "ordered")
}
#' @export
vec_proxy.factor <- function(x, ...) {
x
}
#' @export
vec_proxy.ordered <- function(x, ...) {
x
}
#' @export
vec_restore.factor <- function(x, to, ...) {
NextMethod()
}
#' @export
vec_restore.ordered <- function(x, to, ...) {
NextMethod()
}
# Print -------------------------------------------------------------------
#' @export
vec_ptype_full.factor <- function(x, ...) {
paste0("factor<", hash_label(levels(x)), ">")
}
#' @export
vec_ptype_abbr.factor <- function(x, ...) {
"fct"
}
#' @export
vec_ptype_full.ordered <- function(x, ...) {
paste0("ordered<", hash_label(levels(x)), ">")
}
#' @export
vec_ptype_abbr.ordered <- function(x, ...) {
"ord"
}
# Coerce ------------------------------------------------------------------
#' @rdname new_factor
#' @export vec_ptype2.factor
#' @method vec_ptype2 factor
#' @export
vec_ptype2.factor <- function(x, y, ...) UseMethod("vec_ptype2.factor")
#' @method vec_ptype2.character factor
#' @export
vec_ptype2.character.factor <- function(x, y, ...) character()
#' @method vec_ptype2.factor character
#' @export
vec_ptype2.factor.character <- function(x, y, ...) character()
#' @method vec_ptype2.factor factor
#' @export
vec_ptype2.factor.factor <- function(x, y, ...) new_factor(levels = levels_union(x, y))
#' @rdname new_factor
#' @export vec_ptype2.ordered
#' @method vec_ptype2 ordered
#' @export
vec_ptype2.ordered <- function(x, y, ...) UseMethod("vec_ptype2.ordered")
#' @method vec_ptype2.ordered character
#' @export
vec_ptype2.ordered.character <- function(x, y, ...) character()
#' @method vec_ptype2.character ordered
#' @export
vec_ptype2.character.ordered <- function(x, y, ...) character()
#' @method vec_ptype2.ordered factor
#' @export
vec_ptype2.ordered.factor <- function(x, y, ..., x_arg = "", y_arg = "") {
stop_incompatible_type(x, y, x_arg = x_arg, y_arg = y_arg)
}
#' @method vec_ptype2.factor ordered
#' @export
vec_ptype2.factor.ordered <- function(x, y, ..., x_arg = "", y_arg = "") {
stop_incompatible_type(x, y, x_arg = x_arg, y_arg = y_arg)
}
#' @method vec_ptype2.ordered ordered
#' @export
vec_ptype2.ordered.ordered <- function(x, y, ...) new_ordered(levels = levels_union(x, y))
# Cast --------------------------------------------------------------------
#' @rdname new_factor
#' @export vec_cast.factor
#' @method vec_cast factor
#' @export
vec_cast.factor <- function(x, to, ...) {
UseMethod("vec_cast.factor")
}
fct_cast <- function(x, to, ..., x_arg = "", to_arg = "") {
fct_cast_impl(x, to, ..., x_arg = x_arg, to_arg = to_arg, ordered = FALSE)
}
fct_cast_impl <- function(x, to, ..., x_arg = "", to_arg = "", ordered = FALSE) {
if (length(levels(to)) == 0L) {
levels <- levels(x)
if (is.null(levels)) {
exclude <- NA
levels <- unique(x)
} else {
exclude <- NULL
}
factor(as.character(x), levels = levels, ordered = ordered, exclude = exclude)
} else {
lossy <- !(x %in% levels(to) | is.na(x))
out <- factor(x, levels = levels(to), ordered = ordered, exclude = NULL)
maybe_lossy_cast(out, x, to, lossy, x_arg = x_arg, to_arg = to_arg)
}
}
#' @export
#' @method vec_cast.factor factor
vec_cast.factor.factor <- function(x, to, ...) {
fct_cast(x, to, ...)
}
#' @export
#' @method vec_cast.factor ordered
vec_cast.factor.ordered <- function(x, to, ...) {
fct_cast(x, to, ...)
}
#' @export
#' @method vec_cast.factor character
vec_cast.factor.character <-function(x, to, ...) {
fct_cast(x, to, ...)
}
#' @export
#' @method vec_cast.character factor
vec_cast.character.factor <- function(x, to, ...) {
stop_native_implementation("vec_cast.character.factor")
}
#' @rdname new_factor
#' @export vec_cast.ordered
#' @method vec_cast ordered
#' @export
vec_cast.ordered <- function(x, to, ...) {
UseMethod("vec_cast.ordered")
}
ord_cast <- function(x, to, ..., x_arg = "", to_arg = "") {
fct_cast_impl(x, to, ..., x_arg = x_arg, to_arg = to_arg, ordered = TRUE)
}
#' @export
#' @method vec_cast.ordered ordered
vec_cast.ordered.ordered <- function(x, to, ...) {
ord_cast(x, to, ...)
}
#' @export
#' @method vec_cast.ordered factor
vec_cast.ordered.factor <- function(x, to, ...) {
ord_cast(x, to, ...)
}
#' @export
#' @method vec_cast.ordered character
vec_cast.ordered.character <-function(x, to, ...) {
ord_cast(x, to, ...)
}
#' @export
#' @method vec_cast.character ordered
vec_cast.character.ordered <- function(x, to, ...) {
stop_native_implementation("vec_cast.character.ordered")
}
# Math and arithmetic -----------------------------------------------------
#' @export
vec_math.factor <- function(.fn, .x, ...) {
stop_unsupported(.x, .fn)
}
#' @export
vec_arith.factor <- function(op, x, y, ...) {
stop_unsupported(x, op)
}
# Helpers -----------------------------------------------------------------
hash_label <- function(x, length = 5) {
if (length(x) == 0) {
""
} else {
# Can't use hash() currently because it hashes the string pointers
# for performance, so the values in the test change each time
substr(digest::digest(x), 1, length)
}
}
levels_union <- function(x, y) {
union(levels(x), levels(y))
}
|
library(igraph)
### Name: scg_semi_proj
### Title: Semi-Projectors
### Aliases: scg_semi_proj scgSemiProjectors
### ** Examples
library(Matrix)
# compute the semi-projectors and projector for the partition
# provided by a community detection method
g <- sample_pa(20, m = 1.5, directed = FALSE)
eb <- cluster_edge_betweenness(g)
memb <- membership(eb)
lr <- scg_semi_proj(memb)
#In the symmetric case L = R
tcrossprod(lr$R) # same as lr$R %*% t(lr$R)
P <- crossprod(lr$R) # same as t(lr$R) %*% lr$R
#P is an orthogonal projector
isSymmetric(P)
sum( (P %*% P-P)^2 )
## use L and R to coarse-grain the graph Laplacian
lr <- scg_semi_proj(memb, mtype="laplacian")
L <- laplacian_matrix(g)
Lt <- lr$L %*% L %*% t(lr$R)
## or better lr$L %*% tcrossprod(L,lr$R)
rowSums(Lt)
| /data/genthat_extracted_code/igraph/examples/scg_semi_proj.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 780 | r | library(igraph)
### Name: scg_semi_proj
### Title: Semi-Projectors
### Aliases: scg_semi_proj scgSemiProjectors
### ** Examples
library(Matrix)
# compute the semi-projectors and projector for the partition
# provided by a community detection method
g <- sample_pa(20, m = 1.5, directed = FALSE)
eb <- cluster_edge_betweenness(g)
memb <- membership(eb)
lr <- scg_semi_proj(memb)
#In the symmetric case L = R
tcrossprod(lr$R) # same as lr$R %*% t(lr$R)
P <- crossprod(lr$R) # same as t(lr$R) %*% lr$R
#P is an orthogonal projector
isSymmetric(P)
sum( (P %*% P-P)^2 )
## use L and R to coarse-grain the graph Laplacian
lr <- scg_semi_proj(memb, mtype="laplacian")
L <- laplacian_matrix(g)
Lt <- lr$L %*% L %*% t(lr$R)
## or better lr$L %*% tcrossprod(L,lr$R)
rowSums(Lt)
|
library(sf)
library(fasterize) # devtools::install_github("ecohealthalliance/fasterize")
library(raster)
library(dplyr)
source("src/R/common.R")
### STEP 1: create a polygon file with better labeling
# sp_id includes antarctica regions that are not 213, but
# is otherwise confusing.
# Plus, there are some regions are represented by multiple polygons
# (US, Hawaii, Alaska, etc.)...need to merge these.
# hand corrected topology issues for rgn_ids 260 (self intersection) and AK (hole outside polygon)
regions <- sf::read_sf(dsn = file.path(dir_M, "git-annex/globalprep/spatial/v2017/modified_sp_mol_2014"),
layer = "sp_mol")
## fixing labels on some disputed regions
regions <- regions %>%
mutate(rgn_name = ifelse(rgn_id == 255, "DISPUTED", rgn_name)) %>%
mutate(rgn_key = ifelse(rgn_id == 255, "XD", rgn_key)) %>%
mutate(sp_type = ifelse(rgn_id == 255 & sp_type == "eez", "eez-disputed", sp_type)) %>%
mutate(sp_type = ifelse(rgn_id == 255 & sp_type == "land", "land-disputed", sp_type))
old <- regions$rgn_id
regions$rgn_ant_id <- ifelse(regions$sp_type %in% c("eez-ccamlr", "land-ccamlr"),
regions$sp_id,
regions$rgn_id)
new <- regions$rgn_ant_id
old[old != new] # good: indicates only the antarctica regions are different, which is what we want
## create an ID to combine region ids/types into single polygons (most are, but there are a few exceptions,
# such as Hawaii and AK, in US)
regions$unq_id <- paste(regions$rgn_ant_id, regions$rgn_type, sep="_")
head(regions)
# get data for joining later
data <- data.frame(regions) %>%
select(sp_type, rgn_type, rgn_id, rgn_name, rgn_key, rgn_ant_id, unq_id) %>%
unique()
## some data checks:
## NOTE: All these seem correct
dups <- regions$unq_id[duplicated(regions$unq_id)] #42 duplicates (out of 568)
duplicatedData <- regions[regions$unq_id %in% dups, ]
duplicatedData_csv <- duplicatedData %>%
st_set_geometry(NULL)
regions[regions$unq_id %in% "171_eez", ]
write.csv(duplicatedData_csv, "globalprep/spatial/v2017/DataCheckofDuplicatedRegions.csv", row.names=FALSE)
regions[regions$rgn_id == 213, ]
# ## save file with new ID values
# st_write(regions, dsn=file.path(dir_M, "git-annex/globalprep/spatial/v2017/int"),
# layer = "regions_merge_id",
# driver="ESRI Shapefile")
# test <- st_read(file.path(dir_M, "git-annex/globalprep/spatial/v2017/int"), "regions_merge_id")
## correct some topology errors (intersections, etc.)
# when I run this, it seems to work, but I can't save the file!
# st_is_valid(regions)
# sf_extSoftVersion()["lwgeom"]
# regions_tidy <- st_make_valid(regions)
# st_is_valid(regions_tidy)
valid = st_is_valid(regions)
# see bad regions
regions[!valid, ]
areas <- st_area(regions) %>% as.numeric()
# Another method of fixing (seems to work):
regions_good <- regions[valid, ] ## only regions with no topology issues
regions_bad_tidy <- st_buffer(regions[!(valid), ], 0.0) #correct regions with topology issues
regions_tidy <- rbind(regions_good, regions_bad_tidy) # merge good and fixed regions
# ## save file with corrected topology
st_write(regions_tidy, dsn=file.path(dir_M, "git-annex/globalprep/spatial/v2017/int"),
layer = "regions_tidy",
driver="ESRI Shapefile")
regions_tidy <- st_read(dsn=file.path(dir_M, "git-annex/globalprep/spatial/v2017/int"),
layer = "regions_tidy")
#
## try this to combine regions
ids <- regions_tidy$unq_id
t0 <- aggregate(regions_tidy, list(ids = ids), head, n = 1)
t0_min <- t0 %>%
select(unq_id, geometry)
setdiff(t0_min$unq_id, data$unq_id)
setdiff(data$unq_id, t0_min$unq_id)
data[duplicated(data$unq_id), ]
t0_min <- left_join(t0_min, data, by="unq_id")
t0_min <- dplyr::select(t0_min, sp_type, rgn_type, rgn_id, rgn_name, rgn_key, rgn_ant_id)
rgns_final <- t0_min %>%
mutate(area = st_area(.) %>% as.numeric()) %>%
mutate(area = round(area/1000000)) %>%
select(type_w_ant = sp_type, rgn_type, rgn_id, rgn_name, rgn_key, rgn_ant_id, area_km2=area, geometry)
st_write(rgns_final, dsn=file.path(dir_M, "git-annex/globalprep/spatial/v2017"),
layer = "regions_2017_update",
driver="ESRI Shapefile")
#### Save as raster
# save eez area csv:
regions <- st_read(dsn=file.path(dir_M, "git-annex/globalprep/spatial/v2017"),
layer = "regions_2017_update")
regions_area <- regions %>%
filter(rgn_type == "eez") %>%
st_set_geometry(NULL) %>%
select(rgn_id, area_km2) %>%
group_by(rgn_id) %>%
summarize(area_km2 = sum(area_km2))
write.csv(regions_area, "globalprep/spatial/v2017/output/rgn_area.csv", row.names=FALSE)
# get most recent shapefile and select eez regions only:
regions <- st_read(dsn=file.path(dir_M, "git-annex/globalprep/spatial/v2017"),
layer = "regions_2017_update")
table(regions$type_w_ant)
table(regions$rgn_type)
filter(regions, rgn_type == "eez-inland")
regions <- regions[regions$rgn_type %in% c("eez", "fao"), ]
data_rgns <- data.frame(regions) %>%
filter(rgn_type %in% c("eez", "fao")) %>%
select(rgn_ant_id, area_km2)
### use the old mask as a template for projection, origin, extents, and resolution
old_mask <- raster(file.path(dir_M, 'git-annex/globalprep/spatial/d2014/data/rgn_mol_raster_1km/sp_mol_raster_1km.tif'))
regions_raster <- fasterize::fasterize(regions, old_mask, field = 'rgn_ant_id')
plot(regions_raster)
writeRaster(regions_raster,
file.path(dir_M, 'git-annex/globalprep/spatial/v2017/regions_with_fao_ant.tif'))
test_raster <- raster(file.path(dir_M, 'git-annex/globalprep/spatial/v2017/regions_with_fao_ant.tif'))
plot(test_raster)
tmp <- freq(test_raster)
tmp_df <- data.frame(tmp)
setdiff(as.numeric(as.character(tmp_df$value)), as.numeric(as.character(data_rgns$rgn_ant_id)))
setdiff(as.numeric(as.character(tmp_df$value)), as.numeric(as.character(data_rgns$rgn_ant_id)))
setdiff(data_rgns$rgn_ant_id, tmp_df$value)
tmp2 <- data.frame(tmp) %>%
select(rgn_ant_id = value, count) %>%
dplyr::mutate(area = 934.4789*934.4789*0.000001 * count) %>%
left_join(data_rgns, by="rgn_ant_id")
plot(log(tmp2$area), log(tmp2$area_km2))
abline(0,1, col="red")
### now make an ocean raster:
raster_ocean <- raster(file.path(dir_M, 'git-annex/globalprep/spatial/v2017/regions_eez_with_fao_ant.tif'))
plot(raster_ocean)
reclassify(raster_ocean, c(0,300000,1),
filename = file.path(dir_M, 'git-annex/globalprep/spatial/v2017/ocean.tif'))
### make a raster that includes land and ocean
regions <- st_read(dsn=file.path(dir_M, "git-annex/globalprep/spatial/v2017"),
layer = "regions_2017_update")
### use the old mask as a template for projection, origin, extents, and resolution
old_mask <- raster(file.path(dir_M, 'git-annex/globalprep/spatial/d2014/data/rgn_mol_raster_1km/sp_mol_raster_1km.tif'))
regions_raster <- fasterize::fasterize(regions, old_mask, field = 'rgn_ant_id')
writeRaster(regions_raster,
file.path(dir_M, 'git-annex/globalprep/spatial/v2017/regions_land_ocean.tif'))
test_raster <- raster(file.path(dir_M, 'git-annex/globalprep/spatial/v2017/regions_land_ocean.tif'))
plot(test_raster)
| /globalprep/spatial/v2017/spatial_manip_v3.R | no_license | OHI-Science/ohiprep_v2018 | R | false | false | 7,206 | r | library(sf)
library(fasterize) # devtools::install_github("ecohealthalliance/fasterize")
library(raster)
library(dplyr)
source("src/R/common.R")
### STEP 1: create a polygon file with better labeling
# sp_id includes antarctica regions that are not 213, but
# is otherwise confusing.
# Plus, there are some regions are represented by multiple polygons
# (US, Hawaii, Alaska, etc.)...need to merge these.
# hand corrected topology issues for rgn_ids 260 (self intersection) and AK (hole outside polygon)
regions <- sf::read_sf(dsn = file.path(dir_M, "git-annex/globalprep/spatial/v2017/modified_sp_mol_2014"),
layer = "sp_mol")
## fixing labels on some disputed regions
regions <- regions %>%
mutate(rgn_name = ifelse(rgn_id == 255, "DISPUTED", rgn_name)) %>%
mutate(rgn_key = ifelse(rgn_id == 255, "XD", rgn_key)) %>%
mutate(sp_type = ifelse(rgn_id == 255 & sp_type == "eez", "eez-disputed", sp_type)) %>%
mutate(sp_type = ifelse(rgn_id == 255 & sp_type == "land", "land-disputed", sp_type))
old <- regions$rgn_id
regions$rgn_ant_id <- ifelse(regions$sp_type %in% c("eez-ccamlr", "land-ccamlr"),
regions$sp_id,
regions$rgn_id)
new <- regions$rgn_ant_id
old[old != new] # good: indicates only the antarctica regions are different, which is what we want
## create an ID to combine region ids/types into single polygons (most are, but there are a few exceptions,
# such as Hawaii and AK, in US)
regions$unq_id <- paste(regions$rgn_ant_id, regions$rgn_type, sep="_")
head(regions)
# get data for joining later
data <- data.frame(regions) %>%
select(sp_type, rgn_type, rgn_id, rgn_name, rgn_key, rgn_ant_id, unq_id) %>%
unique()
## some data checks:
## NOTE: All these seem correct
dups <- regions$unq_id[duplicated(regions$unq_id)] #42 duplicates (out of 568)
duplicatedData <- regions[regions$unq_id %in% dups, ]
duplicatedData_csv <- duplicatedData %>%
st_set_geometry(NULL)
regions[regions$unq_id %in% "171_eez", ]
write.csv(duplicatedData_csv, "globalprep/spatial/v2017/DataCheckofDuplicatedRegions.csv", row.names=FALSE)
regions[regions$rgn_id == 213, ]
# ## save file with new ID values
# st_write(regions, dsn=file.path(dir_M, "git-annex/globalprep/spatial/v2017/int"),
# layer = "regions_merge_id",
# driver="ESRI Shapefile")
# test <- st_read(file.path(dir_M, "git-annex/globalprep/spatial/v2017/int"), "regions_merge_id")
## correct some topology errors (intersections, etc.)
# when I run this, it seems to work, but I can't save the file!
# st_is_valid(regions)
# sf_extSoftVersion()["lwgeom"]
# regions_tidy <- st_make_valid(regions)
# st_is_valid(regions_tidy)
valid = st_is_valid(regions)
# see bad regions
regions[!valid, ]
areas <- st_area(regions) %>% as.numeric()
# Another method of fixing (seems to work):
regions_good <- regions[valid, ] ## only regions with no topology issues
regions_bad_tidy <- st_buffer(regions[!(valid), ], 0.0) #correct regions with topology issues
regions_tidy <- rbind(regions_good, regions_bad_tidy) # merge good and fixed regions
# ## save file with corrected topology
st_write(regions_tidy, dsn=file.path(dir_M, "git-annex/globalprep/spatial/v2017/int"),
layer = "regions_tidy",
driver="ESRI Shapefile")
regions_tidy <- st_read(dsn=file.path(dir_M, "git-annex/globalprep/spatial/v2017/int"),
layer = "regions_tidy")
#
## try this to combine regions
ids <- regions_tidy$unq_id
t0 <- aggregate(regions_tidy, list(ids = ids), head, n = 1)
t0_min <- t0 %>%
select(unq_id, geometry)
setdiff(t0_min$unq_id, data$unq_id)
setdiff(data$unq_id, t0_min$unq_id)
data[duplicated(data$unq_id), ]
t0_min <- left_join(t0_min, data, by="unq_id")
t0_min <- dplyr::select(t0_min, sp_type, rgn_type, rgn_id, rgn_name, rgn_key, rgn_ant_id)
rgns_final <- t0_min %>%
mutate(area = st_area(.) %>% as.numeric()) %>%
mutate(area = round(area/1000000)) %>%
select(type_w_ant = sp_type, rgn_type, rgn_id, rgn_name, rgn_key, rgn_ant_id, area_km2=area, geometry)
st_write(rgns_final, dsn=file.path(dir_M, "git-annex/globalprep/spatial/v2017"),
layer = "regions_2017_update",
driver="ESRI Shapefile")
#### Save as raster
# save eez area csv:
regions <- st_read(dsn=file.path(dir_M, "git-annex/globalprep/spatial/v2017"),
layer = "regions_2017_update")
regions_area <- regions %>%
filter(rgn_type == "eez") %>%
st_set_geometry(NULL) %>%
select(rgn_id, area_km2) %>%
group_by(rgn_id) %>%
summarize(area_km2 = sum(area_km2))
write.csv(regions_area, "globalprep/spatial/v2017/output/rgn_area.csv", row.names=FALSE)
# get most recent shapefile and select eez regions only:
regions <- st_read(dsn=file.path(dir_M, "git-annex/globalprep/spatial/v2017"),
layer = "regions_2017_update")
table(regions$type_w_ant)
table(regions$rgn_type)
filter(regions, rgn_type == "eez-inland")
regions <- regions[regions$rgn_type %in% c("eez", "fao"), ]
data_rgns <- data.frame(regions) %>%
filter(rgn_type %in% c("eez", "fao")) %>%
select(rgn_ant_id, area_km2)
### use the old mask as a template for projection, origin, extents, and resolution
old_mask <- raster(file.path(dir_M, 'git-annex/globalprep/spatial/d2014/data/rgn_mol_raster_1km/sp_mol_raster_1km.tif'))
regions_raster <- fasterize::fasterize(regions, old_mask, field = 'rgn_ant_id')
plot(regions_raster)
writeRaster(regions_raster,
file.path(dir_M, 'git-annex/globalprep/spatial/v2017/regions_with_fao_ant.tif'))
test_raster <- raster(file.path(dir_M, 'git-annex/globalprep/spatial/v2017/regions_with_fao_ant.tif'))
plot(test_raster)
tmp <- freq(test_raster)
tmp_df <- data.frame(tmp)
setdiff(as.numeric(as.character(tmp_df$value)), as.numeric(as.character(data_rgns$rgn_ant_id)))
setdiff(as.numeric(as.character(tmp_df$value)), as.numeric(as.character(data_rgns$rgn_ant_id)))
setdiff(data_rgns$rgn_ant_id, tmp_df$value)
tmp2 <- data.frame(tmp) %>%
select(rgn_ant_id = value, count) %>%
dplyr::mutate(area = 934.4789*934.4789*0.000001 * count) %>%
left_join(data_rgns, by="rgn_ant_id")
plot(log(tmp2$area), log(tmp2$area_km2))
abline(0,1, col="red")
### now make an ocean raster:
raster_ocean <- raster(file.path(dir_M, 'git-annex/globalprep/spatial/v2017/regions_eez_with_fao_ant.tif'))
plot(raster_ocean)
reclassify(raster_ocean, c(0,300000,1),
filename = file.path(dir_M, 'git-annex/globalprep/spatial/v2017/ocean.tif'))
### make a raster that includes land and ocean
regions <- st_read(dsn=file.path(dir_M, "git-annex/globalprep/spatial/v2017"),
layer = "regions_2017_update")
### use the old mask as a template for projection, origin, extents, and resolution
old_mask <- raster(file.path(dir_M, 'git-annex/globalprep/spatial/d2014/data/rgn_mol_raster_1km/sp_mol_raster_1km.tif'))
regions_raster <- fasterize::fasterize(regions, old_mask, field = 'rgn_ant_id')
writeRaster(regions_raster,
file.path(dir_M, 'git-annex/globalprep/spatial/v2017/regions_land_ocean.tif'))
test_raster <- raster(file.path(dir_M, 'git-annex/globalprep/spatial/v2017/regions_land_ocean.tif'))
plot(test_raster)
|
#Rebecca Lews
#Course Project Preliminary Analysis
# read in final prepped dataset
library(ggplot2)
library(dplyr)
library(scales)
library(funModeling)
library(Hmisc)
library(corrplot)
# set strings as factors to false
options(stringsAsFactors = FALSE)
#read in data
final_data <- read.csv('FinalDataset.csv', header=TRUE)
dog_data <- final_data %>% filter(Species == 'Dog') %>% droplevels()
cat_data <- final_data %>% filter(Species == 'Cat') %>% droplevels()
profiling_num(dog_data)
profiling_num(cat_data)
str(dog_data)
freq(data=dog_data, input='PrimaryBreed')
freq(data=dog_data, input='Gender')
freq(data=dog_data, input='IntakeDateOnly')
freq(data=dog_data, input='OutcomeDateOnly')
freq(data=dog_data, input='IntakeAsilomar')
freq(data=dog_data, input='IntakeCondition')
freq(data=dog_data, input='IntakeType')
freq(data=dog_data, input='OutcomeAsilomar')
freq(data=dog_data, input='OutcomeSubType')
freq(data=dog_data, input='Size')
freq(data=dog_data, input='IntakeLocation')
freq(data=dog_data, input='IntakeSubLocation')
freq(data=dog_data, input='PreAltered')
freq(data=dog_data, input='OutcomeLocation')
freq(data=dog_data, input='OutcomeSubLocation')
str(cat_data)
freq(data=cat_data, input='PrimaryBreed')
freq(data=cat_data, input='Gender')
freq(data=cat_data, input='IntakeDateOnly')
freq(data=cat_data, input='OutcomeDateOnly')
freq(data=cat_data, input='IntakeAsilomar')
freq(data=cat_data, input='IntakeCondition')
freq(data=cat_data, input='IntakeType')
freq(data=cat_data, input='OutcomeAsilomar')
freq(data=cat_data, input='OutcomeSubType')
freq(data=cat_data, input='Size')
freq(data=cat_data, input='IntakeLocation')
freq(data=cat_data, input='IntakeSubLocation')
freq(data=cat_data, input='PreAltered')
freq(data=cat_data, input='OutcomeLocation')
freq(data=cat_data, input='OutcomeSubLocation')
###########################################################################################################################################
#feature extraction and selection
#before split data into dogs and cats
#drop unneeded date fields - IntakeDate, OutcomeDate, IntakeDateOnly, OutcomeDateOnly, OutcomeYear, OutcomeMonth, Month
#remove intake subtype from overall feature set
#remove intake reason from overall feature set
#exclude length owned and Intake Agendcy Association from overall feature set
#remove found zip from overall feature set
#create month and weekday variables for outcome date and intake date
library(lubridate)
final_data$IntakeMonth <- month(final_data$IntakeDateOnly)
final_data$OutcomeMonth <- month(final_data$OutcomeDateOnly)
final_data$IntakeWeekday <- wday(final_data$IntakeDateOnly)
final_data$OutcomeWeekday <- wday(final_data$OutcomeDateOnly)
#log10 transform intake age in months
#check for 0 values
final_data %>% filter(IntakeAgeMonths == 0)
#none
final_data$Log10AgeInMonths <- log10(final_data$IntakeAgeMonths)
#calculate the length of stay from intake and outcome dates as a target variable
final_data$LengthOfStayDays <- as.numeric( difftime(final_data$OutcomeDateOnly, final_data$IntakeDateOnly, units = "days"))
#derive binary target of adoption in under 7 days
final_data$AdoptedInSevenDays <- ifelse(final_data$LengthOfStayDays <= 7, 1, 0)
#create a binary feature to indicate improvement in asilomar status
final_data$IntakeAsilomar <- as.factor(final_data$IntakeAsilomar)
final_data$OutcomeAsilomar <- as.factor(final_data$OutcomeAsilomar)
levels(final_data$IntakeAsilomar)
levels(final_data$OutcomeAsilomar)
#test improvement logic
final_data %>% filter(as.numeric(final_data$OutcomeAsilomar) < as.numeric(final_data$IntakeAsilomar)) %>%
select(c('IntakeAsilomar','OutcomeAsilomar'))
final_data$AsilomarImprovement <- ifelse(as.numeric(final_data$OutcomeAsilomar) < as.numeric(final_data$IntakeAsilomar), 1, 0)
#one hot incode intake asilomar
final_data$HealthyAsilomar <- ifelse(final_data$IntakeAsilomar == 'Healthy', 1, 0)
final_data$TreatRehabAsilomar <- ifelse(final_data$IntakeAsilomar == 'Treatable-Rehabilitatable', 1,0)
final_data$TreatManAsilomar <- ifelse(final_data$IntakeAsilomar == 'Treatable-Manageable', 1, 0)
final_data$UnhealthyAsilomar <- ifelse(final_data$IntakeAsilomar == 'Unhealthy/Untreatable', 1, 0)
#derive features from intake condition for emaciated, feral, heartworm positive, combine sick with sick & injured and injured wounds, skin condition
final_data$Emaciated <- ifelse(final_data$IntakeCondition == 'Emaciated/Thin', 1, 0)
final_data$Feral <- ifelse(final_data$IntakeCondition == 'Feral', 1, 0)
final_data$Heartworm <- ifelse(final_data$IntakeCondition == 'Heartworm Positive', 1, 0)
final_data$SickInjured <- ifelse(final_data$IntakeCondition %in% c('Injured/woulds', 'Sick','Sick & Injured'), 1, 0)
final_data$SkinIssue <- ifelse(final_data$IntakeCondition == 'Skin Condition', 1, 0)
#one hot encode intake types
final_data$SurrenderIntake <- ifelse(final_data$IntakeType == 'Owner/Guardian Surrender', 1, 0)
final_data$SeizeCustodyIntake<- ifelse(final_data$IntakeType == 'Seized / Custody', 1, 0)
final_data$StrayIntake<- ifelse(final_data$IntakeType == 'Stray', 1, 0)
final_data$TransferIntake<- ifelse(final_data$IntakeType == 'Transfer In', 1, 0)
#create binary feature for small animals
final_data$SmallAnimal <- ifelse(final_data$Size == 'Small', 1, 0)
#create Prealtered binary variable
final_data$AlteredAtIntake <- ifelse(final_data$PreAltered == 'Y', 1, 0)
#foster and offsite flags
final_data$Foster <- ifelse(final_data$OutcomeSubType == 'Foster Home/ Fast Track', 1, 0)
final_data$Offsite <- ifelse(final_data$OutcomeSubType %in% c('No Fleas Market', 'Special Event') |
grepl('Off-Site', final_data$OutcomeSubType) |
grepl('Offsite', final_data$OutcomeSubType), 1, 0)
#create multiclass target for Barn Cat, Foster, Louisiana SPCA Offsite, Veterinary Partner Offsite, Retail Location Offsite, and Onsite.
final_data$AdoptionChannelGroup <- as.factor(ifelse(final_data$OutcomeSubType %in% c('Off-Site Jefferson Fee', 'Off-Site Petco',
'Off-Site Petco Algiers','Off-Site PetSmart',
'Off-site PetSmart Manhattan',
'Offsite Petco Tchoup', 'Off-Site Petco Mid-City',
'Off-Site Petco Harvey', 'Off-Site Petco Kenner'),
'Retail Partner Offsite',
ifelse(final_data$OutcomeSubType %in% c('Off-Site Cat Practice','Off-Site MidCity Vet.Clinic'),
'Veterinary Partner Offsite',
ifelse(final_data$OutcomeSubType %in% c('No Fleas Market','Off-Site', 'Off-Site Clearview Volunteer',
'Special Event'), 'Louisiana SPCA Offsite',
as.character(final_data$OutcomeSubType)))))
#Dog Features
#create a feature for Mix Specified based on values in primary breed.
final_data$DogMixed <- ifelse(grepl('Mix', final_data$PrimaryBreed), 1, 0)
#create a feature for PitBullType based on primary breed
final_data$DogPitBullType <- ifelse(final_data$PrimaryBreed %in% c('Terrier, Pit Bull', 'Terrier, American Pit Bull',
'Bulldog, American', 'Terrier, American Staffordshire',
'Terrier, Staffordshire Bull','Bullmastiff'), 1, 0)
#create encouragement room feature based on locations
final_data$DogEncouragement <- ifelse(grepl('Encourage', final_data$IntakeLocation) |
grepl('Encourage', final_data$IntakeSubLocation) |
grepl('Encourage', final_data$OutcomeLocation) |
grepl('Encourage', final_data$OutcomeSubLocation), 1, 0)
#Cat Features
#create a feature for breed specified based on primary breed
final_data$CatBreedSpecified <- ifelse(grepl('Domestic', final_data$PrimaryBreed), 0,1)
#make ismale variable
final_data$Male <- ifelse(final_data$Gender == 'M', 1, 0)
final_data$Female <- ifelse(final_data$Gender == 'F', 1, 0)
#######################################################################################################
#create prepared dataset
prepared <- c('Species', 'IntakeMonth', 'OutcomeMonth', 'IntakeWeekday','OutcomeWeekday', 'Log10AgeInMonths', 'Male', 'Female',
'AsilomarImprovement', 'HealthyAsilomar', 'TreatRehabAsilomar', 'TreatManAsilomar',
'UnhealthyAsilomar', 'Emaciated', 'Feral', 'Heartworm', 'SickInjured','SkinIssue',
'SurrenderIntake', 'SeizeCustodyIntake', 'StrayIntake', 'TransferIntake','SmallAnimal',
'AlteredAtIntake', 'Foster', 'Offsite', 'CatBreedSpecified', 'DogEncouragement', 'DogMixed',
'DogPitBullType', 'LengthOfStayDays', 'AdoptedInSevenDays','AdoptionChannelGroup')
prepareddata <- final_data[prepared]
prepareddata$AdoptionChannelNum <- as.numeric(prepareddata$AdoptionChannelGroup)
#split between dog and cats
dogdata <- subset(prepareddata %>% filter(Species == 'Dog'), select=-c(CatBreedSpecified, Species, AdoptionChannelGroup))
catdata <- subset(prepareddata %>% filter(Species == 'Cat'), select=-c(DogEncouragement, DogMixed,DogPitBullType, Species, AdoptionChannelGroup))
#drop categorical channel column
#1 - Barn Cat
#2 -Foster Home/ Fast Track
#3 -Louisiana SPCA Offsite
#4 -Onsite
#5 -Retail Partner Offsite
#6 -Veterinary Partner Offsite
######################################################################################
#plot newly transformed age feature
prepareddata %>%
ggplot(aes(x=Log10AgeInMonths)) +
geom_histogram(aes(y=..density..), fill = 'white', color='black', binwidth = .25) +
facet_wrap(~Species)+
labs(title='Log 10 Intake Age in Months Distribution', subtitle='For Adopted Animals')
########################################################################################
#correlation plots
res <-rcorr(as.matrix(dogdata))
corrplot(round(res$r,4), type = "upper", order = "hclust",
tl.col = "black", tl.srt = 45, title="Dog Feature and Target Correlation Plot", mar=c(0,0,1,0))
#after viewing the correlation plot, I noticed high negative correlation between male and female so I am removing one.
# I also noticed a high correlation between intake month and intake month so outcome month which makes sense because
# we are only looking at a seven day period
#one hot encoded categorical variables had slight negative and positive correlations with each other but I would
#like to see how each contributes to the model before removing any
dogdata <- subset(dogdata, select=-c(Female, OutcomeMonth))
catres <-rcorr(as.matrix(catdata))
#heartworm created a null value in the correlation data, need to remove before plotting
catdata <- subset(catdata, select=-c(Heartworm))
catres <-rcorr(as.matrix(catdata))
corrplot(round(catres$r,4), type = "upper", order = "hclust",
tl.col = "black", tl.srt = 45, title="Cat Feature and Target Correlation Plot", mar=c(0,0,1,0))
#making same changes as dogs: removing female and outcomemonth
#interesting note in that the unhealthy asilomar is correlated with feral status
catdata <- subset(catdata, select=-c(Female, OutcomeMonth))
#save catdata and dogdata
write.csv(catdata, 'catdata.csv', row.names = FALSE)
write.csv(dogdata, 'dogdata.csv', row.names = FALSE)
########################################################################################
#view details on target variables
#LEngth of stay days
ggplot(dogdata, aes(LengthOfStayDays)) +
geom_histogram(aes(y=..density..),fill = 'white', color='black', binwidth = 10) +
labs(title='Dog - Length of Stay in Days Distribution', subtitle='For Adopted Animals')
summary(dogdata$LengthOfStayDays)
dim(dogdata %>% filter(LengthOfStayDays > 29))
dim(dogdata)
ggplot(catdata, aes(LengthOfStayDays)) +
geom_histogram(aes(y=..density..),fill = 'white', color='black', binwidth = 10) +
labs(title='Cat - Length of Stay in Days Distribution', subtitle='For Adopted Animals')
summary(catdata$LengthOfStayDays)
#Adopted in seven days
prepareddata %>% group_by(Species, AdoptedInSevenDays) %>% tally() %>% mutate(pct=n/sum(n)) %>%
ggplot(aes(x=as.factor(AdoptedInSevenDays), y=n, fill=as.factor(AdoptedInSevenDays))) +
geom_bar(stat='Identity') +
facet_wrap(~Species) +
geom_text(aes(label = scales::percent(pct)), position = position_stack(vjust= .5), color="white") +
labs(title='Adopted in Seven Days', subtitle='For Adopted Animals') +
scale_x_discrete(labels = c('No', 'Yes')) +
scale_fill_discrete(name = "Adopted in Seven Days", labels = c("No", "Yes")) +
theme(axis.title.x = element_blank())
#Target classes are highly imbalanced
summary(as.factor(dogdata$AdoptedInSevenDays))
summary(as.factor(catdata$AdoptedInSevenDays))
#Adoption Channel
freq(prepareddata %>% filter(Species =='Dog') %>% select(AdoptionChannelGroup))
freq(prepareddata %>% filter(Species =='Cat') %>% select(AdoptionChannelGroup))
cross_plot(data = prepareddata, target = 'LengthOfStayDays')
#Adoption Channel
prepareddata %>% group_by(Species, AdoptionChannelGroup) %>% tally() %>% mutate(pct=n/sum(n)) %>%
ggplot(aes(x=as.factor(AdoptionChannelGroup), y=n, fill=as.factor(AdoptionChannelGroup))) +
geom_bar(stat='Identity') +
facet_wrap(~Species) +
geom_text(aes(label = scales::percent(pct)), position = position_stack(vjust= .5), color="black") +
labs(title='Adoption Channels by Species', subtitle='For Adopted Animals', fill = 'Adoption Channels') +
theme(axis.title.x = element_blank(), axis.text.x = element_blank(), axis.ticks.x = element_blank())
| /Scripts/LewisRebecca_Analysis.R | no_license | RebeccaLewis-DS/dsc630-finalproject | R | false | false | 14,411 | r | #Rebecca Lews
#Course Project Preliminary Analysis
# read in final prepped dataset
library(ggplot2)
library(dplyr)
library(scales)
library(funModeling)
library(Hmisc)
library(corrplot)
# set strings as factors to false
options(stringsAsFactors = FALSE)
#read in data
final_data <- read.csv('FinalDataset.csv', header=TRUE)
dog_data <- final_data %>% filter(Species == 'Dog') %>% droplevels()
cat_data <- final_data %>% filter(Species == 'Cat') %>% droplevels()
profiling_num(dog_data)
profiling_num(cat_data)
str(dog_data)
freq(data=dog_data, input='PrimaryBreed')
freq(data=dog_data, input='Gender')
freq(data=dog_data, input='IntakeDateOnly')
freq(data=dog_data, input='OutcomeDateOnly')
freq(data=dog_data, input='IntakeAsilomar')
freq(data=dog_data, input='IntakeCondition')
freq(data=dog_data, input='IntakeType')
freq(data=dog_data, input='OutcomeAsilomar')
freq(data=dog_data, input='OutcomeSubType')
freq(data=dog_data, input='Size')
freq(data=dog_data, input='IntakeLocation')
freq(data=dog_data, input='IntakeSubLocation')
freq(data=dog_data, input='PreAltered')
freq(data=dog_data, input='OutcomeLocation')
freq(data=dog_data, input='OutcomeSubLocation')
str(cat_data)
freq(data=cat_data, input='PrimaryBreed')
freq(data=cat_data, input='Gender')
freq(data=cat_data, input='IntakeDateOnly')
freq(data=cat_data, input='OutcomeDateOnly')
freq(data=cat_data, input='IntakeAsilomar')
freq(data=cat_data, input='IntakeCondition')
freq(data=cat_data, input='IntakeType')
freq(data=cat_data, input='OutcomeAsilomar')
freq(data=cat_data, input='OutcomeSubType')
freq(data=cat_data, input='Size')
freq(data=cat_data, input='IntakeLocation')
freq(data=cat_data, input='IntakeSubLocation')
freq(data=cat_data, input='PreAltered')
freq(data=cat_data, input='OutcomeLocation')
freq(data=cat_data, input='OutcomeSubLocation')
###########################################################################################################################################
#feature extraction and selection
#before split data into dogs and cats
#drop unneeded date fields - IntakeDate, OutcomeDate, IntakeDateOnly, OutcomeDateOnly, OutcomeYear, OutcomeMonth, Month
#remove intake subtype from overall feature set
#remove intake reason from overall feature set
#exclude length owned and Intake Agendcy Association from overall feature set
#remove found zip from overall feature set
#create month and weekday variables for outcome date and intake date
library(lubridate)
final_data$IntakeMonth <- month(final_data$IntakeDateOnly)
final_data$OutcomeMonth <- month(final_data$OutcomeDateOnly)
final_data$IntakeWeekday <- wday(final_data$IntakeDateOnly)
final_data$OutcomeWeekday <- wday(final_data$OutcomeDateOnly)
#log10 transform intake age in months
#check for 0 values
final_data %>% filter(IntakeAgeMonths == 0)
#none
final_data$Log10AgeInMonths <- log10(final_data$IntakeAgeMonths)
#calculate the length of stay from intake and outcome dates as a target variable
final_data$LengthOfStayDays <- as.numeric( difftime(final_data$OutcomeDateOnly, final_data$IntakeDateOnly, units = "days"))
#derive binary target of adoption in under 7 days
final_data$AdoptedInSevenDays <- ifelse(final_data$LengthOfStayDays <= 7, 1, 0)
#create a binary feature to indicate improvement in asilomar status
final_data$IntakeAsilomar <- as.factor(final_data$IntakeAsilomar)
final_data$OutcomeAsilomar <- as.factor(final_data$OutcomeAsilomar)
levels(final_data$IntakeAsilomar)
levels(final_data$OutcomeAsilomar)
#test improvement logic
final_data %>% filter(as.numeric(final_data$OutcomeAsilomar) < as.numeric(final_data$IntakeAsilomar)) %>%
select(c('IntakeAsilomar','OutcomeAsilomar'))
final_data$AsilomarImprovement <- ifelse(as.numeric(final_data$OutcomeAsilomar) < as.numeric(final_data$IntakeAsilomar), 1, 0)
#one hot incode intake asilomar
final_data$HealthyAsilomar <- ifelse(final_data$IntakeAsilomar == 'Healthy', 1, 0)
final_data$TreatRehabAsilomar <- ifelse(final_data$IntakeAsilomar == 'Treatable-Rehabilitatable', 1,0)
final_data$TreatManAsilomar <- ifelse(final_data$IntakeAsilomar == 'Treatable-Manageable', 1, 0)
final_data$UnhealthyAsilomar <- ifelse(final_data$IntakeAsilomar == 'Unhealthy/Untreatable', 1, 0)
#derive features from intake condition for emaciated, feral, heartworm positive, combine sick with sick & injured and injured wounds, skin condition
final_data$Emaciated <- ifelse(final_data$IntakeCondition == 'Emaciated/Thin', 1, 0)
final_data$Feral <- ifelse(final_data$IntakeCondition == 'Feral', 1, 0)
final_data$Heartworm <- ifelse(final_data$IntakeCondition == 'Heartworm Positive', 1, 0)
final_data$SickInjured <- ifelse(final_data$IntakeCondition %in% c('Injured/woulds', 'Sick','Sick & Injured'), 1, 0)
final_data$SkinIssue <- ifelse(final_data$IntakeCondition == 'Skin Condition', 1, 0)
#one hot encode intake types
final_data$SurrenderIntake <- ifelse(final_data$IntakeType == 'Owner/Guardian Surrender', 1, 0)
final_data$SeizeCustodyIntake<- ifelse(final_data$IntakeType == 'Seized / Custody', 1, 0)
final_data$StrayIntake<- ifelse(final_data$IntakeType == 'Stray', 1, 0)
final_data$TransferIntake<- ifelse(final_data$IntakeType == 'Transfer In', 1, 0)
#create binary feature for small animals
final_data$SmallAnimal <- ifelse(final_data$Size == 'Small', 1, 0)
#create Prealtered binary variable
final_data$AlteredAtIntake <- ifelse(final_data$PreAltered == 'Y', 1, 0)
#foster and offsite flags
final_data$Foster <- ifelse(final_data$OutcomeSubType == 'Foster Home/ Fast Track', 1, 0)
final_data$Offsite <- ifelse(final_data$OutcomeSubType %in% c('No Fleas Market', 'Special Event') |
grepl('Off-Site', final_data$OutcomeSubType) |
grepl('Offsite', final_data$OutcomeSubType), 1, 0)
#create multiclass target for Barn Cat, Foster, Louisiana SPCA Offsite, Veterinary Partner Offsite, Retail Location Offsite, and Onsite.
final_data$AdoptionChannelGroup <- as.factor(ifelse(final_data$OutcomeSubType %in% c('Off-Site Jefferson Fee', 'Off-Site Petco',
'Off-Site Petco Algiers','Off-Site PetSmart',
'Off-site PetSmart Manhattan',
'Offsite Petco Tchoup', 'Off-Site Petco Mid-City',
'Off-Site Petco Harvey', 'Off-Site Petco Kenner'),
'Retail Partner Offsite',
ifelse(final_data$OutcomeSubType %in% c('Off-Site Cat Practice','Off-Site MidCity Vet.Clinic'),
'Veterinary Partner Offsite',
ifelse(final_data$OutcomeSubType %in% c('No Fleas Market','Off-Site', 'Off-Site Clearview Volunteer',
'Special Event'), 'Louisiana SPCA Offsite',
as.character(final_data$OutcomeSubType)))))
#Dog Features
#create a feature for Mix Specified based on values in primary breed.
final_data$DogMixed <- ifelse(grepl('Mix', final_data$PrimaryBreed), 1, 0)
#create a feature for PitBullType based on primary breed
final_data$DogPitBullType <- ifelse(final_data$PrimaryBreed %in% c('Terrier, Pit Bull', 'Terrier, American Pit Bull',
'Bulldog, American', 'Terrier, American Staffordshire',
'Terrier, Staffordshire Bull','Bullmastiff'), 1, 0)
#create encouragement room feature based on locations
final_data$DogEncouragement <- ifelse(grepl('Encourage', final_data$IntakeLocation) |
grepl('Encourage', final_data$IntakeSubLocation) |
grepl('Encourage', final_data$OutcomeLocation) |
grepl('Encourage', final_data$OutcomeSubLocation), 1, 0)
#Cat Features
#create a feature for breed specified based on primary breed
final_data$CatBreedSpecified <- ifelse(grepl('Domestic', final_data$PrimaryBreed), 0,1)
#make ismale variable
final_data$Male <- ifelse(final_data$Gender == 'M', 1, 0)
final_data$Female <- ifelse(final_data$Gender == 'F', 1, 0)
#######################################################################################################
#create prepared dataset
prepared <- c('Species', 'IntakeMonth', 'OutcomeMonth', 'IntakeWeekday','OutcomeWeekday', 'Log10AgeInMonths', 'Male', 'Female',
'AsilomarImprovement', 'HealthyAsilomar', 'TreatRehabAsilomar', 'TreatManAsilomar',
'UnhealthyAsilomar', 'Emaciated', 'Feral', 'Heartworm', 'SickInjured','SkinIssue',
'SurrenderIntake', 'SeizeCustodyIntake', 'StrayIntake', 'TransferIntake','SmallAnimal',
'AlteredAtIntake', 'Foster', 'Offsite', 'CatBreedSpecified', 'DogEncouragement', 'DogMixed',
'DogPitBullType', 'LengthOfStayDays', 'AdoptedInSevenDays','AdoptionChannelGroup')
prepareddata <- final_data[prepared]
prepareddata$AdoptionChannelNum <- as.numeric(prepareddata$AdoptionChannelGroup)
#split between dog and cats
dogdata <- subset(prepareddata %>% filter(Species == 'Dog'), select=-c(CatBreedSpecified, Species, AdoptionChannelGroup))
catdata <- subset(prepareddata %>% filter(Species == 'Cat'), select=-c(DogEncouragement, DogMixed,DogPitBullType, Species, AdoptionChannelGroup))
#drop categorical channel column
#1 - Barn Cat
#2 -Foster Home/ Fast Track
#3 -Louisiana SPCA Offsite
#4 -Onsite
#5 -Retail Partner Offsite
#6 -Veterinary Partner Offsite
######################################################################################
#plot newly transformed age feature
prepareddata %>%
ggplot(aes(x=Log10AgeInMonths)) +
geom_histogram(aes(y=..density..), fill = 'white', color='black', binwidth = .25) +
facet_wrap(~Species)+
labs(title='Log 10 Intake Age in Months Distribution', subtitle='For Adopted Animals')
########################################################################################
#correlation plots
res <-rcorr(as.matrix(dogdata))
corrplot(round(res$r,4), type = "upper", order = "hclust",
tl.col = "black", tl.srt = 45, title="Dog Feature and Target Correlation Plot", mar=c(0,0,1,0))
#after viewing the correlation plot, I noticed high negative correlation between male and female so I am removing one.
# I also noticed a high correlation between intake month and intake month so outcome month which makes sense because
# we are only looking at a seven day period
#one hot encoded categorical variables had slight negative and positive correlations with each other but I would
#like to see how each contributes to the model before removing any
dogdata <- subset(dogdata, select=-c(Female, OutcomeMonth))
catres <-rcorr(as.matrix(catdata))
#heartworm created a null value in the correlation data, need to remove before plotting
catdata <- subset(catdata, select=-c(Heartworm))
catres <-rcorr(as.matrix(catdata))
corrplot(round(catres$r,4), type = "upper", order = "hclust",
tl.col = "black", tl.srt = 45, title="Cat Feature and Target Correlation Plot", mar=c(0,0,1,0))
#making same changes as dogs: removing female and outcomemonth
#interesting note in that the unhealthy asilomar is correlated with feral status
catdata <- subset(catdata, select=-c(Female, OutcomeMonth))
#save catdata and dogdata
write.csv(catdata, 'catdata.csv', row.names = FALSE)
write.csv(dogdata, 'dogdata.csv', row.names = FALSE)
########################################################################################
#view details on target variables
#LEngth of stay days
ggplot(dogdata, aes(LengthOfStayDays)) +
geom_histogram(aes(y=..density..),fill = 'white', color='black', binwidth = 10) +
labs(title='Dog - Length of Stay in Days Distribution', subtitle='For Adopted Animals')
summary(dogdata$LengthOfStayDays)
dim(dogdata %>% filter(LengthOfStayDays > 29))
dim(dogdata)
ggplot(catdata, aes(LengthOfStayDays)) +
geom_histogram(aes(y=..density..),fill = 'white', color='black', binwidth = 10) +
labs(title='Cat - Length of Stay in Days Distribution', subtitle='For Adopted Animals')
summary(catdata$LengthOfStayDays)
#Adopted in seven days
prepareddata %>% group_by(Species, AdoptedInSevenDays) %>% tally() %>% mutate(pct=n/sum(n)) %>%
ggplot(aes(x=as.factor(AdoptedInSevenDays), y=n, fill=as.factor(AdoptedInSevenDays))) +
geom_bar(stat='Identity') +
facet_wrap(~Species) +
geom_text(aes(label = scales::percent(pct)), position = position_stack(vjust= .5), color="white") +
labs(title='Adopted in Seven Days', subtitle='For Adopted Animals') +
scale_x_discrete(labels = c('No', 'Yes')) +
scale_fill_discrete(name = "Adopted in Seven Days", labels = c("No", "Yes")) +
theme(axis.title.x = element_blank())
#Target classes are highly imbalanced
summary(as.factor(dogdata$AdoptedInSevenDays))
summary(as.factor(catdata$AdoptedInSevenDays))
#Adoption Channel
freq(prepareddata %>% filter(Species =='Dog') %>% select(AdoptionChannelGroup))
freq(prepareddata %>% filter(Species =='Cat') %>% select(AdoptionChannelGroup))
cross_plot(data = prepareddata, target = 'LengthOfStayDays')
#Adoption Channel
prepareddata %>% group_by(Species, AdoptionChannelGroup) %>% tally() %>% mutate(pct=n/sum(n)) %>%
ggplot(aes(x=as.factor(AdoptionChannelGroup), y=n, fill=as.factor(AdoptionChannelGroup))) +
geom_bar(stat='Identity') +
facet_wrap(~Species) +
geom_text(aes(label = scales::percent(pct)), position = position_stack(vjust= .5), color="black") +
labs(title='Adoption Channels by Species', subtitle='For Adopted Animals', fill = 'Adoption Channels') +
theme(axis.title.x = element_blank(), axis.text.x = element_blank(), axis.ticks.x = element_blank())
|
# Yige Wu @ WashU 2019 Feb
## show the different sites are regulated in the same kinase-substrate protein pair in different cancers
# source-------------------------
wd <- getwd()
if (wd != "/Users/yigewu/Box Sync") {
setwd("/Users/yigewu/Box Sync")
}
source('./cptac2p_analysis/phospho_network/phospho_network_shared.R')
source('./cptac2p_analysis/phospho_network/phospho_network_plotting.R')
# set variables -----------------------------------------------------------
reg_nonNA <- 20
size <- 83
cancers2process <- c("BRCA", "OV", "CO", "UCEC", "CCRCC")
# gather regression results ----------------------------------------------------------------
fdr_thres <- 0.1
file_path_tmp <- paste0("./cptac2p/analysis_results/phospho_network/regression/tables/generate_regression_regulated_uniq_marked/", "regression_size", size, "_FDR", fdr_thres, "_detected_in_", paste0(cancers2process, collapse = "_"),".txt")
sup_tab <- fread(input = file_path_tmp, data.table = F)
sup_tab$pair_pro <- paste0(sup_tab$GENE, ":", sup_tab$SUB_GENE)
# bussiness ---------------------------------------------------------------
for (SELF in c("cis", "trans")) {
tab_tmp <- sup_tab[sup_tab$SELF == SELF,]
pair_pro_cancer_summary <- data.frame(table(unique(tab_tmp[tab_tmp$regulated,c("pair", "pair_pro", "Cancer", "regulated", "SELF")])[,c("pair_pro", "Cancer")]))
pair_pro_cancer_summary <- pair_pro_cancer_summary[pair_pro_cancer_summary$Freq > 0,]
pair_pro_cancer_summary %>%
arrange(-Freq) %>%
head()
pair_cancer_summary <- unique(tab_tmp[tab_tmp$regulated & tab_tmp$pair_pro %in% pair_pro_cancer_summary$pair_pro, c("pair_pro", "Cancer", "SUB_MOD_RSD", "pair", "GENE", "SUB_GENE")])
pair_cancer_summary <- merge(pair_cancer_summary, pair_pro_cancer_summary, by = c("pair_pro", "Cancer"), all.x = T)
pair_cancer_summary %>% head()
tab2p <- pair_cancer_summary
## filter for cancer SMGs in substrate
tab2p$is.SUB_GENE.smg <- get_SMG_by_cancer(gene_vector = tab2p[, "SUB_GENE"], cancer_vector = tab2p$Cancer)
tab2p$is.GENE.smg <- get_SMG_by_cancer(gene_vector = tab2p[, "GENE"], cancer_vector = tab2p$Cancer)
tab2p <- tab2p[tab2p$is.GENE.smg | tab2p$is.SUB_GENE.smg,]
tab2p$bar_len <- 1/tab2p$Freq
tab2p$id <- paste0(tab2p$pair_pro, ":", tab2p$Cancer)
tab2p <- tab2p[order(tab2p$id),]
y_print <- vector(mode = "numeric", length = nrow(tab2p))
for (i in 1:nrow(tab2p)) {
if (duplicated(tab2p$id)[i]) {
y_print[i] <- y_print[i-1] + tab2p$bar_len[i]
} else {
y_print[i] <- tab2p$bar_len[i]/2
}
}
tab2p$y_print <- y_print
tab2p$Cancer <- order_cancer_rev(tab2p$Cancer)
p <- ggplot()
p <- p + geom_bar(data = tab2p, mapping = aes(x = pair_pro, y = bar_len, fill = Cancer), stat = "identity", position = "stack", color = "black")
p <- p + geom_text(data = tab2p, mapping = aes(x = pair_pro, y = y_print, label = SUB_MOD_RSD), size = 2.5)
p <- p + scale_fill_manual(values = color_cancers2)
p <- p + facet_grid(SUB_GENE~Cancer, drop=T,shrink = T, space = "free",scales = "free")#, space = "free", scales = "free")
p <- p + coord_flip()
p <- p + theme_nogrid()
p <- p + theme(axis.text.x = element_blank(), axis.ticks = element_blank(),
axis.title.y = element_blank(), axis.title.x = element_blank(),
strip.text.y = element_text(size = 10, angle = 0),
strip.background = element_rect(fill = "white", color = "white"),
panel.spacing.y = unit(0, "lines"),
panel.spacing.x = unit(0, "lines"))
p <- p + guides(fill = F)
p
fn <- paste0(makeOutDir(resultD = resultD), "cancer_specific_", SELF, "_regulated_pairs_in_SMGs.pdf")
ggsave(filename = fn, width = 4.5, height = 5)
}
stop()
# mut_cnv_cans %>%
# filter(GENE == "CTNNB1", SUB_GENE %in% c("PRKD1", "PRKCD", "PRKACB", "PRKACA", "PAK4", "GSK3B"), cancer == "CO", p < 0.1) %>%
# arrange(p)
# ## PRKD1
#
# mut_cnv_cans %>%
# filter(SUB_GENE %in% c("GSK3B"), cancer == "CO") %>%
# arrange(p)
#
# mut_cnv_cans %>%
# filter(GENE == "CTNNB1", SUB_GENE %in% c("PRKD1", "PRKCD", "PRKACB", "PRKACA", "PAK4", "GSK3B"), cancer == "UCEC", p < 0.05) %>%
# arrange(p)
# ## confusing down-regualtion
#
# mut_cnv_cans %>%
# filter(GENE == "RB1", SUB_GENE %in% c("PRKAA1", "PPP1CB", "CDK18", "CDK1"), cancer == "OV") %>%
# arrange(p)
#
# mut_cnv_cans %>%
# filter(GENE == "RB1", SUB_GENE %in% c("PRKAA1", "PPP1CB", "CDK18", "CDK1"), cancer == "BRCA") %>%
# arrange(p)
#
# mut_cnv_cans %>%
# filter(GENE == "TP53", SUB_GENE %in% c("IKBKB"), cancer == "OV") %>%
# arrange(p)
#
# mut_cnv_cans %>%
# filter(GENE == "PTEN", SUB_GENE %in% c("CREB1"), cancer == "CCRCC") %>%
# arrange(p)
#
# mut_cnv_cans %>%
# filter(GENE == "MAP2K4", SUB_GENE %in% c("PAK1"), cancer == "BRCA") %>%
# arrange(p)
| /phospho_network/regression/figures/grid_different_sites_same_pair_pro.R | no_license | ding-lab/phospho-signaling | R | false | false | 4,836 | r | # Yige Wu @ WashU 2019 Feb
## show the different sites are regulated in the same kinase-substrate protein pair in different cancers
# source-------------------------
wd <- getwd()
if (wd != "/Users/yigewu/Box Sync") {
setwd("/Users/yigewu/Box Sync")
}
source('./cptac2p_analysis/phospho_network/phospho_network_shared.R')
source('./cptac2p_analysis/phospho_network/phospho_network_plotting.R')
# set variables -----------------------------------------------------------
reg_nonNA <- 20
size <- 83
cancers2process <- c("BRCA", "OV", "CO", "UCEC", "CCRCC")
# gather regression results ----------------------------------------------------------------
fdr_thres <- 0.1
file_path_tmp <- paste0("./cptac2p/analysis_results/phospho_network/regression/tables/generate_regression_regulated_uniq_marked/", "regression_size", size, "_FDR", fdr_thres, "_detected_in_", paste0(cancers2process, collapse = "_"),".txt")
sup_tab <- fread(input = file_path_tmp, data.table = F)
sup_tab$pair_pro <- paste0(sup_tab$GENE, ":", sup_tab$SUB_GENE)
# bussiness ---------------------------------------------------------------
for (SELF in c("cis", "trans")) {
tab_tmp <- sup_tab[sup_tab$SELF == SELF,]
pair_pro_cancer_summary <- data.frame(table(unique(tab_tmp[tab_tmp$regulated,c("pair", "pair_pro", "Cancer", "regulated", "SELF")])[,c("pair_pro", "Cancer")]))
pair_pro_cancer_summary <- pair_pro_cancer_summary[pair_pro_cancer_summary$Freq > 0,]
pair_pro_cancer_summary %>%
arrange(-Freq) %>%
head()
pair_cancer_summary <- unique(tab_tmp[tab_tmp$regulated & tab_tmp$pair_pro %in% pair_pro_cancer_summary$pair_pro, c("pair_pro", "Cancer", "SUB_MOD_RSD", "pair", "GENE", "SUB_GENE")])
pair_cancer_summary <- merge(pair_cancer_summary, pair_pro_cancer_summary, by = c("pair_pro", "Cancer"), all.x = T)
pair_cancer_summary %>% head()
tab2p <- pair_cancer_summary
## filter for cancer SMGs in substrate
tab2p$is.SUB_GENE.smg <- get_SMG_by_cancer(gene_vector = tab2p[, "SUB_GENE"], cancer_vector = tab2p$Cancer)
tab2p$is.GENE.smg <- get_SMG_by_cancer(gene_vector = tab2p[, "GENE"], cancer_vector = tab2p$Cancer)
tab2p <- tab2p[tab2p$is.GENE.smg | tab2p$is.SUB_GENE.smg,]
tab2p$bar_len <- 1/tab2p$Freq
tab2p$id <- paste0(tab2p$pair_pro, ":", tab2p$Cancer)
tab2p <- tab2p[order(tab2p$id),]
y_print <- vector(mode = "numeric", length = nrow(tab2p))
for (i in 1:nrow(tab2p)) {
if (duplicated(tab2p$id)[i]) {
y_print[i] <- y_print[i-1] + tab2p$bar_len[i]
} else {
y_print[i] <- tab2p$bar_len[i]/2
}
}
tab2p$y_print <- y_print
tab2p$Cancer <- order_cancer_rev(tab2p$Cancer)
p <- ggplot()
p <- p + geom_bar(data = tab2p, mapping = aes(x = pair_pro, y = bar_len, fill = Cancer), stat = "identity", position = "stack", color = "black")
p <- p + geom_text(data = tab2p, mapping = aes(x = pair_pro, y = y_print, label = SUB_MOD_RSD), size = 2.5)
p <- p + scale_fill_manual(values = color_cancers2)
p <- p + facet_grid(SUB_GENE~Cancer, drop=T,shrink = T, space = "free",scales = "free")#, space = "free", scales = "free")
p <- p + coord_flip()
p <- p + theme_nogrid()
p <- p + theme(axis.text.x = element_blank(), axis.ticks = element_blank(),
axis.title.y = element_blank(), axis.title.x = element_blank(),
strip.text.y = element_text(size = 10, angle = 0),
strip.background = element_rect(fill = "white", color = "white"),
panel.spacing.y = unit(0, "lines"),
panel.spacing.x = unit(0, "lines"))
p <- p + guides(fill = F)
p
fn <- paste0(makeOutDir(resultD = resultD), "cancer_specific_", SELF, "_regulated_pairs_in_SMGs.pdf")
ggsave(filename = fn, width = 4.5, height = 5)
}
stop()
# mut_cnv_cans %>%
# filter(GENE == "CTNNB1", SUB_GENE %in% c("PRKD1", "PRKCD", "PRKACB", "PRKACA", "PAK4", "GSK3B"), cancer == "CO", p < 0.1) %>%
# arrange(p)
# ## PRKD1
#
# mut_cnv_cans %>%
# filter(SUB_GENE %in% c("GSK3B"), cancer == "CO") %>%
# arrange(p)
#
# mut_cnv_cans %>%
# filter(GENE == "CTNNB1", SUB_GENE %in% c("PRKD1", "PRKCD", "PRKACB", "PRKACA", "PAK4", "GSK3B"), cancer == "UCEC", p < 0.05) %>%
# arrange(p)
# ## confusing down-regualtion
#
# mut_cnv_cans %>%
# filter(GENE == "RB1", SUB_GENE %in% c("PRKAA1", "PPP1CB", "CDK18", "CDK1"), cancer == "OV") %>%
# arrange(p)
#
# mut_cnv_cans %>%
# filter(GENE == "RB1", SUB_GENE %in% c("PRKAA1", "PPP1CB", "CDK18", "CDK1"), cancer == "BRCA") %>%
# arrange(p)
#
# mut_cnv_cans %>%
# filter(GENE == "TP53", SUB_GENE %in% c("IKBKB"), cancer == "OV") %>%
# arrange(p)
#
# mut_cnv_cans %>%
# filter(GENE == "PTEN", SUB_GENE %in% c("CREB1"), cancer == "CCRCC") %>%
# arrange(p)
#
# mut_cnv_cans %>%
# filter(GENE == "MAP2K4", SUB_GENE %in% c("PAK1"), cancer == "BRCA") %>%
# arrange(p)
|
\name{huePosition}
\alias{huePosition}
\title{Munsell Hue Position for Soil Color Description}
\description{Munsell hues are typically arranged from 5R to 5PB in Munsell soil color books. This function matches a vector of Munsell hues to the position in this arrangement of 29 hues.}
\usage{
huePosition(x, returnHues=FALSE)
}
\arguments{
\item{x}{character vector of hues, e.g. '10YR'}
\item{returnHues}{logical, should the unique set of Munsell hues used for ordering be returned? See details.}
}
\details{This function is fully vectorized.}
\value{A vector of integer hue positions is typically returned, of the same length and order as \code{x}. If \code{returnHues} is TRUE, then the hue names and ordering is returned and \code{x} is ignored.
}
\references{
https://www.nrcs.usda.gov/wps/portal/nrcs/detail/soils/ref/?cid=nrcs142p2_053569
}
\author{D.E. Beaudette}
\seealso{
\code{\link{colorContrast}}
}
\examples{
# get hue ordering for setting levels of a factor
huePosition(x=NULL, returnHues=TRUE)
# get position of the '10YR' hue (7)
huePosition(x='10YR')
}
\keyword{ manip }
| /man/huePosition.Rd | no_license | rsbivand/aqp | R | false | false | 1,103 | rd | \name{huePosition}
\alias{huePosition}
\title{Munsell Hue Position for Soil Color Description}
\description{Munsell hues are typically arranged from 5R to 5PB in Munsell soil color books. This function matches a vector of Munsell hues to the position in this arrangement of 29 hues.}
\usage{
huePosition(x, returnHues=FALSE)
}
\arguments{
\item{x}{character vector of hues, e.g. '10YR'}
\item{returnHues}{logical, should the unique set of Munsell hues used for ordering be returned? See details.}
}
\details{This function is fully vectorized.}
\value{A vector of integer hue positions is typically returned, of the same length and order as \code{x}. If \code{returnHues} is TRUE, then the hue names and ordering is returned and \code{x} is ignored.
}
\references{
https://www.nrcs.usda.gov/wps/portal/nrcs/detail/soils/ref/?cid=nrcs142p2_053569
}
\author{D.E. Beaudette}
\seealso{
\code{\link{colorContrast}}
}
\examples{
# get hue ordering for setting levels of a factor
huePosition(x=NULL, returnHues=TRUE)
# get position of the '10YR' hue (7)
huePosition(x='10YR')
}
\keyword{ manip }
|
06e5a2f2bf894df70f5ad92c00d16280 tlc02-uniform-depth-27.qdimacs 6413 16904 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Miller-Marin/trafficlight-controller/tlc02-uniform-depth-27/tlc02-uniform-depth-27.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 74 | r | 06e5a2f2bf894df70f5ad92c00d16280 tlc02-uniform-depth-27.qdimacs 6413 16904 |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is.na.R
\name{is.na}
\alias{is.na}
\title{fun_name}
\usage{
is.na(params)
}
\arguments{
\item{param}{fun_name}
}
\description{
kolejna funkcja podmieniona
}
\keyword{Ktos}
\keyword{Odwiedzam}
\keyword{beka}
\keyword{chlop}
\keyword{czlowieka,}
\keyword{kaleka.}
\keyword{krecona}
\keyword{mlody}
\keyword{nim}
\keyword{steka,jest}
\keyword{tu}
\keyword{z}
| /man/is.na.Rd | no_license | granatb/RapeR | R | false | true | 434 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/is.na.R
\name{is.na}
\alias{is.na}
\title{fun_name}
\usage{
is.na(params)
}
\arguments{
\item{param}{fun_name}
}
\description{
kolejna funkcja podmieniona
}
\keyword{Ktos}
\keyword{Odwiedzam}
\keyword{beka}
\keyword{chlop}
\keyword{czlowieka,}
\keyword{kaleka.}
\keyword{krecona}
\keyword{mlody}
\keyword{nim}
\keyword{steka,jest}
\keyword{tu}
\keyword{z}
|
#matrice simple operations solved example 2.8.3
#create a matrice
X = matrix(
c(9, 4, 3, 1, 8, 12),
nrow=3,
ncol=2)
# Define the column and row names.
rownames = c("A", "B", "C")
colnames = c("c1", "c2")
X = matrix(
c(2, 4, 3, 1, 5, 7),
nrow=3,
ncol=2,dimnames = list(rownames, colnames))
print(X)
# The class and attributes of X indicate that it is a matrix of three rows and two columns
class(X)
attributes(X)
#Accessing elements of 3rd row and 2 column
print(X[3,2])
# Sum of elements row wisw
rowSums(X)
# Sum of elements column wisw
colSums(X)
# Mean of elements row wisw
rowMeans(X)
# Mean of elements coulmn wisw
colMeans(X)
# transpose of matrix
print(t(X))
| /Ex.2_8_3.r | no_license | himanshu6980/RSolvedProblems | R | false | false | 723 | r | #matrice simple operations solved example 2.8.3
#create a matrice
X = matrix(
c(9, 4, 3, 1, 8, 12),
nrow=3,
ncol=2)
# Define the column and row names.
rownames = c("A", "B", "C")
colnames = c("c1", "c2")
X = matrix(
c(2, 4, 3, 1, 5, 7),
nrow=3,
ncol=2,dimnames = list(rownames, colnames))
print(X)
# The class and attributes of X indicate that it is a matrix of three rows and two columns
class(X)
attributes(X)
#Accessing elements of 3rd row and 2 column
print(X[3,2])
# Sum of elements row wisw
rowSums(X)
# Sum of elements column wisw
colSums(X)
# Mean of elements row wisw
rowMeans(X)
# Mean of elements coulmn wisw
colMeans(X)
# transpose of matrix
print(t(X))
|
test_that("ms_lines works with all classes", {
out_json <- ms_lines(innerlines_poly)
expect_s3_class(out_json, "json")
expect_snapshot_value(out_json, style = "json2")
expect_s3_class(ms_lines(unclass(innerlines_poly)), "geojson")
expected_sp <- GeoJSON_to_sp(out_json)
expect_equivalent(ms_lines(innerlines_poly_spdf),expected_sp)
expect_equivalent(ms_lines(innerlines_poly_sp), as(expected_sp, "SpatialLines"))
expected_sf <- read_sf(unclass(out_json))
expected_sfc <- st_geometry(expected_sf)
expect_equivalent(st_geometry(ms_lines(innerlines_poly_sf)), expected_sfc)
expect_equivalent(ms_lines(innerlines_poly_sfc), expected_sfc)
})
test_that("ms_lines works with fields specified", {
out_json <- ms_lines(innerlines_poly, "foo")
expect_s3_class(out_json, "geojson")
expect_snapshot_value(out_json, style = "json2")
expected_sp <- GeoJSON_to_sp(out_json)
expect_equivalent(ms_lines(innerlines_poly_spdf, "foo"), expected_sp)
expect_equivalent(ms_lines(innerlines_poly_sf, "foo")$RANK, c(2,2,1,1,0,0,0,0))
})
test_that("ms_lines errors correctly", {
expect_error(ms_lines('{foo: "bar"}'), "Input is not valid geojson")
# Don't test this as the V8 error throws a warning
expect_warning(ms_lines(innerlines_poly, "bar"), "The command returned an empty response")
expect_error(ms_lines(innerlines_poly_spdf, "bar"), "not all fields specified exist in input data")
expect_error(ms_lines(innerlines_poly, 1), "fields must be a character vector")
expect_error(ms_lines(innerlines_poly, force_FC = "true"), "force_FC must be TRUE or FALSE")
expect_error(ms_lines(innerlines_poly_sfc, "foo"), "Do not specify fields for sfc classes")
})
test_that("ms_innerlines works with sys = TRUE", {
skip_if_not(has_sys_mapshaper())
expect_s3_class(ms_lines(innerlines_poly, sys = TRUE), "geojson")
expect_snapshot_value(ms_lines(innerlines_poly, sys = TRUE), style = "json2")
expect_s4_class(ms_lines(innerlines_poly_spdf, sys = TRUE), "SpatialLinesDataFrame")
expect_s3_class(ms_lines(innerlines_poly_sf, sys = TRUE), "sf")
})
| /tests/testthat/test-lines.R | permissive | ateucher/rmapshaper | R | false | false | 2,079 | r | test_that("ms_lines works with all classes", {
out_json <- ms_lines(innerlines_poly)
expect_s3_class(out_json, "json")
expect_snapshot_value(out_json, style = "json2")
expect_s3_class(ms_lines(unclass(innerlines_poly)), "geojson")
expected_sp <- GeoJSON_to_sp(out_json)
expect_equivalent(ms_lines(innerlines_poly_spdf),expected_sp)
expect_equivalent(ms_lines(innerlines_poly_sp), as(expected_sp, "SpatialLines"))
expected_sf <- read_sf(unclass(out_json))
expected_sfc <- st_geometry(expected_sf)
expect_equivalent(st_geometry(ms_lines(innerlines_poly_sf)), expected_sfc)
expect_equivalent(ms_lines(innerlines_poly_sfc), expected_sfc)
})
test_that("ms_lines works with fields specified", {
out_json <- ms_lines(innerlines_poly, "foo")
expect_s3_class(out_json, "geojson")
expect_snapshot_value(out_json, style = "json2")
expected_sp <- GeoJSON_to_sp(out_json)
expect_equivalent(ms_lines(innerlines_poly_spdf, "foo"), expected_sp)
expect_equivalent(ms_lines(innerlines_poly_sf, "foo")$RANK, c(2,2,1,1,0,0,0,0))
})
test_that("ms_lines errors correctly", {
expect_error(ms_lines('{foo: "bar"}'), "Input is not valid geojson")
# Don't test this as the V8 error throws a warning
expect_warning(ms_lines(innerlines_poly, "bar"), "The command returned an empty response")
expect_error(ms_lines(innerlines_poly_spdf, "bar"), "not all fields specified exist in input data")
expect_error(ms_lines(innerlines_poly, 1), "fields must be a character vector")
expect_error(ms_lines(innerlines_poly, force_FC = "true"), "force_FC must be TRUE or FALSE")
expect_error(ms_lines(innerlines_poly_sfc, "foo"), "Do not specify fields for sfc classes")
})
test_that("ms_innerlines works with sys = TRUE", {
skip_if_not(has_sys_mapshaper())
expect_s3_class(ms_lines(innerlines_poly, sys = TRUE), "geojson")
expect_snapshot_value(ms_lines(innerlines_poly, sys = TRUE), style = "json2")
expect_s4_class(ms_lines(innerlines_poly_spdf, sys = TRUE), "SpatialLinesDataFrame")
expect_s3_class(ms_lines(innerlines_poly_sf, sys = TRUE), "sf")
})
|
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(3.97314911878724e-307, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(1.46950620900256e+302, 4.11932782999429e-175, -3.85515401974544e+79, -3.02137085628715e+143, -1.07335709985308e+237, 1.20695523931594e-309, 3.32562378928678e+80, -1.04944149130577e-291, -6.66433666280476e+260, -1.24299680236504e+248, 9.70815500676051e+204, 4.46572320545082e-23, -1.13853964838196e+217, 95.7774360421032, 2.0018737059126e-28, -4636800105173434, 1.65447250389292e-256, -2.30374790479512e+88, 9.31444420548792e+294, 1.87140051912765e+293, 7.81174850164908e+153, -1.81388628605987e-210, 2.97417034753781e-112, 3.07889205700993e+72, -5.68358142431207e+115, -1.49905137588813e-296, -4.83607699504741e+296, -4.39048939437592e-283, 6.14411608709023e-73, -7.97009455945399e-175, -7.74871223767381e-132, 4.16882816770762e+216, 1.77638799941844e-103, 3.10673888773823e+67, 7.78963466942964e+235, -3.58131929196381e+99, -0.000144958566634, -1.97272183211855e+299, -4.80684530567003e-211, 1.27171785317634e+32, 7.27866839395753e-304, -4.03745792148629e+247, 6.98516021012687e+303, -1.47416531241142e-29, -9.26916759452804e-30, 2.80442413482245e+93, -3.49120966287497e+274, -1.64918989358022e+230, -6.65976989513026e-283, 4.42844269247337e-45, 1.98141864604823e-95, -2.80316332377215e+114, 3.39496965625457e+134, -1.15574798364676e+282, -4.86507829573234e+261, -1.12181685914956e-204, 4.83444858402713e-21, 4.44411230227823e-288, 1.74273204902173e-84, 3.6354008294539e-305), temp = c(1.4174931883648e-311, -9.27191279380401e-227, -3.30454338512553e-220, 0.00326457501838524, -4.11828281046168e-243, -1.95893925610339e-77, -7.57690586869615e+160, 1.77288451463919e+81, 7.30351788343351e+245, 1.14935825540514e+262, 9.09252021533702e-172, 1.65646662424464e-91, 2.77067322468006e+114, 6.44719590123194e+27, -1.82639555575468e-07, -4.2372858822964e-119, -1.19043356885614e+85, 3.31651557487312e-262, 1.82363221083299e-238, 4.35812421290471e+289, 1.11765367033464e-296))
result <- do.call(meteor:::ET0_PriestleyTaylor,testlist)
str(result) | /meteor/inst/testfiles/ET0_PriestleyTaylor/AFL_ET0_PriestleyTaylor/ET0_PriestleyTaylor_valgrind_files/1615844774-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 2,233 | r | testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(3.97314911878724e-307, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), relh = c(1.46950620900256e+302, 4.11932782999429e-175, -3.85515401974544e+79, -3.02137085628715e+143, -1.07335709985308e+237, 1.20695523931594e-309, 3.32562378928678e+80, -1.04944149130577e-291, -6.66433666280476e+260, -1.24299680236504e+248, 9.70815500676051e+204, 4.46572320545082e-23, -1.13853964838196e+217, 95.7774360421032, 2.0018737059126e-28, -4636800105173434, 1.65447250389292e-256, -2.30374790479512e+88, 9.31444420548792e+294, 1.87140051912765e+293, 7.81174850164908e+153, -1.81388628605987e-210, 2.97417034753781e-112, 3.07889205700993e+72, -5.68358142431207e+115, -1.49905137588813e-296, -4.83607699504741e+296, -4.39048939437592e-283, 6.14411608709023e-73, -7.97009455945399e-175, -7.74871223767381e-132, 4.16882816770762e+216, 1.77638799941844e-103, 3.10673888773823e+67, 7.78963466942964e+235, -3.58131929196381e+99, -0.000144958566634, -1.97272183211855e+299, -4.80684530567003e-211, 1.27171785317634e+32, 7.27866839395753e-304, -4.03745792148629e+247, 6.98516021012687e+303, -1.47416531241142e-29, -9.26916759452804e-30, 2.80442413482245e+93, -3.49120966287497e+274, -1.64918989358022e+230, -6.65976989513026e-283, 4.42844269247337e-45, 1.98141864604823e-95, -2.80316332377215e+114, 3.39496965625457e+134, -1.15574798364676e+282, -4.86507829573234e+261, -1.12181685914956e-204, 4.83444858402713e-21, 4.44411230227823e-288, 1.74273204902173e-84, 3.6354008294539e-305), temp = c(1.4174931883648e-311, -9.27191279380401e-227, -3.30454338512553e-220, 0.00326457501838524, -4.11828281046168e-243, -1.95893925610339e-77, -7.57690586869615e+160, 1.77288451463919e+81, 7.30351788343351e+245, 1.14935825540514e+262, 9.09252021533702e-172, 1.65646662424464e-91, 2.77067322468006e+114, 6.44719590123194e+27, -1.82639555575468e-07, -4.2372858822964e-119, -1.19043356885614e+85, 3.31651557487312e-262, 1.82363221083299e-238, 4.35812421290471e+289, 1.11765367033464e-296))
result <- do.call(meteor:::ET0_PriestleyTaylor,testlist)
str(result) |
#' PDF utilities
#'
#' Utilities based on libpoppler for extracting text, fonts, attachments
#' and metadata from a pdf file.
#'
#' Poppler is pretty verbose when encountering minor errors in PDF files,
#' in especially \code{\link{pdf_text}}. These messages are usually safe
#' to ignore, use \code{\link{suppressMessages}} to hide them alltogether.
#'
#' @export
#' @param pdf file path or raw vector with pdf data
#' @param opw string with owner password to open pdf
#' @param upw string with user password to open pdf
#' @useDynLib pdftools
#' @rdname pdftools
#' @aliases pdftools
#' @importFrom Rcpp sourceCpp
#' @family pdftools
#' @examples # Just a random pdf file
#' file.copy(file.path(Sys.getenv("R_DOC_DIR"), "NEWS.pdf"), "news.pdf")
#' info <- pdf_info("news.pdf")
#' text <- pdf_text("news.pdf")
#' fonts <- pdf_fonts("news.pdf")
#' files <- pdf_attachments("news.pdf")
pdf_info <- function(pdf, opw = "", upw = "") {
poppler_pdf_info(loadfile(pdf), opw, upw)
}
#' @rdname pdftools
#' @export
pdf_text <- function(pdf, opw = "", upw = "") {
poppler_pdf_text(loadfile(pdf), opw, upw)
}
#' @rdname pdftools
#' @export
pdf_fonts<- function(pdf, opw = "", upw = "") {
poppler_pdf_fonts(loadfile(pdf), opw, upw)
}
#' @rdname pdftools
#' @export
pdf_attachments<- function(pdf, opw = "", upw = "") {
poppler_pdf_files(loadfile(pdf), opw, upw)
}
#' @rdname pdftools
#' @export
pdf_toc<- function(pdf, opw = "", upw = "") {
poppler_pdf_toc(loadfile(pdf), opw, upw)
}
loadfile <- function(pdf){
if(is.character(pdf)){
if(grepl("^https?://", pdf[1])){
pdf <- url(pdf)
} else {
path <- normalizePath(pdf, mustWork = TRUE)
pdf <- readBin(path, raw(), file.info(path)$size)
}
}
if(inherits(pdf, "connection")){
con <- pdf
pdf <- raw()
if(!isOpen(con)){
open(con, "rb")
on.exit(close(con))
}
while(length(buf <- readBin(con, raw(), 1e6))){
pdf <- c(pdf, buf)
}
}
if(!is.raw(pdf))
stop("Argument pdf must be a path or raw vector with PDF data")
pdf
}
| /pdftools/R/tools.R | no_license | ingted/R-Examples | R | false | false | 2,054 | r | #' PDF utilities
#'
#' Utilities based on libpoppler for extracting text, fonts, attachments
#' and metadata from a pdf file.
#'
#' Poppler is pretty verbose when encountering minor errors in PDF files,
#' in especially \code{\link{pdf_text}}. These messages are usually safe
#' to ignore, use \code{\link{suppressMessages}} to hide them alltogether.
#'
#' @export
#' @param pdf file path or raw vector with pdf data
#' @param opw string with owner password to open pdf
#' @param upw string with user password to open pdf
#' @useDynLib pdftools
#' @rdname pdftools
#' @aliases pdftools
#' @importFrom Rcpp sourceCpp
#' @family pdftools
#' @examples # Just a random pdf file
#' file.copy(file.path(Sys.getenv("R_DOC_DIR"), "NEWS.pdf"), "news.pdf")
#' info <- pdf_info("news.pdf")
#' text <- pdf_text("news.pdf")
#' fonts <- pdf_fonts("news.pdf")
#' files <- pdf_attachments("news.pdf")
pdf_info <- function(pdf, opw = "", upw = "") {
poppler_pdf_info(loadfile(pdf), opw, upw)
}
#' @rdname pdftools
#' @export
pdf_text <- function(pdf, opw = "", upw = "") {
poppler_pdf_text(loadfile(pdf), opw, upw)
}
#' @rdname pdftools
#' @export
pdf_fonts<- function(pdf, opw = "", upw = "") {
poppler_pdf_fonts(loadfile(pdf), opw, upw)
}
#' @rdname pdftools
#' @export
pdf_attachments<- function(pdf, opw = "", upw = "") {
poppler_pdf_files(loadfile(pdf), opw, upw)
}
#' @rdname pdftools
#' @export
pdf_toc<- function(pdf, opw = "", upw = "") {
poppler_pdf_toc(loadfile(pdf), opw, upw)
}
loadfile <- function(pdf){
if(is.character(pdf)){
if(grepl("^https?://", pdf[1])){
pdf <- url(pdf)
} else {
path <- normalizePath(pdf, mustWork = TRUE)
pdf <- readBin(path, raw(), file.info(path)$size)
}
}
if(inherits(pdf, "connection")){
con <- pdf
pdf <- raw()
if(!isOpen(con)){
open(con, "rb")
on.exit(close(con))
}
while(length(buf <- readBin(con, raw(), 1e6))){
pdf <- c(pdf, buf)
}
}
if(!is.raw(pdf))
stop("Argument pdf must be a path or raw vector with PDF data")
pdf
}
|
efa <- function(x, ncomp)
{
nx <- nrow(x)
Tos <- Fros <- matrix(0, nx, ncomp)
for (i in 3:nx)
Tos[i,] <- svd(scale(x[1:i,], scale = FALSE))$d[1:ncomp]
for (i in (nx-2):1)
Fros[i,] <- svd(scale(x[i:nx,], scale = FALSE))$d[1:ncomp]
Combos <- array(c(Tos, Fros[,ncomp:1]), c(nx, ncomp, 2))
list(forward = Tos, backward = Fros,
pure.comp = apply(Combos, c(1,2), min))
}
opa <- function(x, ncomp)
{
Xref <- colMeans(x)
Xref <- Xref / sqrt(sum(crossprod(Xref))) # scaling
selected <- rep(0, ncomp)
for (i in 1:ncomp) {
Xs <- lapply(1:nrow(x),
function(ii, xx, xref) rbind(xref, xx[ii,]),
x, Xref)
dissims <- sapply(Xs, function(xx) det(tcrossprod(xx)))
selected[i] <- which.max(dissims)
newX <- x[selected[i],]
if (i == 1) {
Xref <- newX / sqrt(crossprod(newX))
} else {
Xref <- rbind(Xref, newX / sqrt(sum(crossprod(newX))))
}
}
dimnames(Xref) <- NULL
list(pure.comp = t(Xref), selected = selected)
}
mcr <- function(x, init, what = c("row", "col"),
convergence = 1e-8, maxit = 50)
{
what <- match.arg(what)
if (what == "col") {
CX <- init
SX <- ginv(CX) %*% x
} else {
SX <- init
CX <- x %*% ginv(SX)
}
rms <- rep(NA, maxit + 1)
rms[1] <- sqrt(mean((x - CX %*% SX)^2))
for (i in 1:maxit) {
CX <- x %*% ginv(SX)
SX <- ginv(CX) %*% x
resids <- x - CX %*% SX
rms[i+1] <- sqrt(mean(resids^2))
if ((rms[i] - rms[i+1]) < convergence) break;
}
list(C = CX, S = SX, resids = resids, rms = rms[!is.na(rms)])
}
| /ChemometricsWithR/R/MCR.R | no_license | ingted/R-Examples | R | false | false | 1,592 | r | efa <- function(x, ncomp)
{
nx <- nrow(x)
Tos <- Fros <- matrix(0, nx, ncomp)
for (i in 3:nx)
Tos[i,] <- svd(scale(x[1:i,], scale = FALSE))$d[1:ncomp]
for (i in (nx-2):1)
Fros[i,] <- svd(scale(x[i:nx,], scale = FALSE))$d[1:ncomp]
Combos <- array(c(Tos, Fros[,ncomp:1]), c(nx, ncomp, 2))
list(forward = Tos, backward = Fros,
pure.comp = apply(Combos, c(1,2), min))
}
opa <- function(x, ncomp)
{
Xref <- colMeans(x)
Xref <- Xref / sqrt(sum(crossprod(Xref))) # scaling
selected <- rep(0, ncomp)
for (i in 1:ncomp) {
Xs <- lapply(1:nrow(x),
function(ii, xx, xref) rbind(xref, xx[ii,]),
x, Xref)
dissims <- sapply(Xs, function(xx) det(tcrossprod(xx)))
selected[i] <- which.max(dissims)
newX <- x[selected[i],]
if (i == 1) {
Xref <- newX / sqrt(crossprod(newX))
} else {
Xref <- rbind(Xref, newX / sqrt(sum(crossprod(newX))))
}
}
dimnames(Xref) <- NULL
list(pure.comp = t(Xref), selected = selected)
}
mcr <- function(x, init, what = c("row", "col"),
convergence = 1e-8, maxit = 50)
{
what <- match.arg(what)
if (what == "col") {
CX <- init
SX <- ginv(CX) %*% x
} else {
SX <- init
CX <- x %*% ginv(SX)
}
rms <- rep(NA, maxit + 1)
rms[1] <- sqrt(mean((x - CX %*% SX)^2))
for (i in 1:maxit) {
CX <- x %*% ginv(SX)
SX <- ginv(CX) %*% x
resids <- x - CX %*% SX
rms[i+1] <- sqrt(mean(resids^2))
if ((rms[i] - rms[i+1]) < convergence) break;
}
list(C = CX, S = SX, resids = resids, rms = rms[!is.na(rms)])
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cde.r
\docType{package}
\name{cde}
\alias{cde}
\alias{cde-package}
\title{cde: Download Water Framework Directive (WFD) data from the
Environment Agency Catchment Data Explorer (CDE) website.}
\description{
Facilitates searching and download of the WFD-related
data for all waterbodies within the Environment Agency area (i.e. England).
The types of data that can be downloaded are: WFD status classification
data, Reasons for Not Achieving Good (RNAG) status, objectives set for
waterbodies, measures put in place to improve water quality and details
of associated protected areas. Default plots can also be produced from the
data downloaded (form of plot depends on data type).
}
\details{
The website that is accessed is:
\url{https://environment.data.gov.uk/catchment-planning/}.
The data accessed by and included within the package are made available
under the Open Government Licence v3.0
\url{https://www.nationalarchives.gov.uk/doc/open-government-licence/version/3/}
}
| /man/cde.Rd | no_license | cran/cde | R | false | true | 1,088 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cde.r
\docType{package}
\name{cde}
\alias{cde}
\alias{cde-package}
\title{cde: Download Water Framework Directive (WFD) data from the
Environment Agency Catchment Data Explorer (CDE) website.}
\description{
Facilitates searching and download of the WFD-related
data for all waterbodies within the Environment Agency area (i.e. England).
The types of data that can be downloaded are: WFD status classification
data, Reasons for Not Achieving Good (RNAG) status, objectives set for
waterbodies, measures put in place to improve water quality and details
of associated protected areas. Default plots can also be produced from the
data downloaded (form of plot depends on data type).
}
\details{
The website that is accessed is:
\url{https://environment.data.gov.uk/catchment-planning/}.
The data accessed by and included within the package are made available
under the Open Government Licence v3.0
\url{https://www.nationalarchives.gov.uk/doc/open-government-licence/version/3/}
}
|
library("sas7bdat")
library("dplyr")
library("plyr")
temp = list.files(pattern = ".sas7bdat")
exp_analysis_elig <- read.sas7bdat(file = "exp_analysis_elig.sas7bdat")
exp_analysis_wide <- read.sas7bdat(file = "exp_analysis_wide.sas7bdat")
save(exp_analysis_elig, file = "exp_analysis_elig.rda")
save(exp_analysis_wide, file = "exp_analysis_wide.rda")
setwd("G:/Air Pollution and Autism in Denmark/Data")
load(file = "exp_analysis_elig.rda")
#Data cleanning
elig = data.frame(exp_analysis_elig)
colnames(elig)
elig <- elig[,c(3:5,7,16:18,21,29,37,45:52,54,61,62,65)]
elig <- subset(elig, is.na(elig$not_eligible))
elig <- mutate(elig,m_age = ifelse(maternal_age <= 18,1,
ifelse(maternal_age %in% 19:25,2,
ifelse(maternal_age %in% 26:30,3,
ifelse(maternal_age %in% 31:35,4,5
)))))
elig <- mutate(elig,f_age = ifelse(paternal_age <= 18,1,
ifelse(paternal_age %in% 19:25,2,
ifelse(paternal_age %in% 26:30,3,
ifelse(paternal_age %in% 31:35,4,5
)))))
typeof(elig$maternal_smoking)
elig$maternal_smoking <- revalue(elig$maternal_smoking, c("00"="0","99"=NA))
elig$maternal_smoking <- as.numeric(as.character(elig$maternal_smoking))
typeof(elig$maternal_smoking)
elig <- mutate(elig,smoking = ifelse(maternal_smoking>0,1,
ifelse(maternal_smoking==0,0,NA)))
elig$birth_location_g[elig$birth_location_g <= 3] <- 1
elig$birth_location_g[elig$birth_location_g == 4] <- 2
elig$birth_location_g[elig$birth_location_g == 5] <- 3
elig$paternal_age[elig$paternal_age == -1] <- NA
elig$paternal_age[elig$paternal_age == 0] <- NA
elig$birthyear <- as.factor(elig$birthyear)
elig$m_age <- as.factor(elig$m_age)
elig$f_age <- as.factor(elig$f_age)
elig$birth_location_g <- as.character(as.numeric(elig$birth_location_g))
elig$birth_location_g <- as.factor(elig$birth_location_g)
#Data analysis
adjusted <- function(period,exposure){
elig_sub <- elig[which(elig$period==period),]
adj.model <- glm(elig_sub$case ~ elig_sub[[exposure]]+elig_sub$birthyear+elig_sub$gender+elig_sub$maternal_smoking
+elig_sub$m_age+elig_sub$f_age+elig_sub$birth_location_g,
data = elig_sub, family = binomial(link = logit))
summary(adj.model)
coef_exp <- data.frame(coef(adj.model))
ci_exp <- data.frame(confint.default(adj.model))
results <- exp(cbind(OR = coef_exp[2,],ci_exp[2,]))
return(results)
}
exposure <- c("NO2_pregIQR","NOX_pregIQR","O3_pregIQR","CO_pregIQR","SO2_pregIQR",
"PM10_pregIQR","PM2_5_pregIQR")
adjres_final <- data.frame(matrix(ncol = 3, nrow = 7))
colnames(adjres_final) <- c("OR","Lower_ci","Upper_ci")
rownames(adjres_final) <- c("NO2_pregIQR","NOX_pregIQR","O3_pregIQR","CO_pregIQR","SO2_pregIQR",
"PM10_pregIQR","PM2_5_pregIQR")
for(i in 1:length(exposure)){
adjres_final[i,] <- data.frame(adjusted(period = "preg", exposure = exposure[i]))
}
adjres_final
#Spline
| /DCS_Splines_qy.R | no_license | yanqi219/DCS_github | R | false | false | 3,268 | r | library("sas7bdat")
library("dplyr")
library("plyr")
temp = list.files(pattern = ".sas7bdat")
exp_analysis_elig <- read.sas7bdat(file = "exp_analysis_elig.sas7bdat")
exp_analysis_wide <- read.sas7bdat(file = "exp_analysis_wide.sas7bdat")
save(exp_analysis_elig, file = "exp_analysis_elig.rda")
save(exp_analysis_wide, file = "exp_analysis_wide.rda")
setwd("G:/Air Pollution and Autism in Denmark/Data")
load(file = "exp_analysis_elig.rda")
#Data cleanning
elig = data.frame(exp_analysis_elig)
colnames(elig)
elig <- elig[,c(3:5,7,16:18,21,29,37,45:52,54,61,62,65)]
elig <- subset(elig, is.na(elig$not_eligible))
elig <- mutate(elig,m_age = ifelse(maternal_age <= 18,1,
ifelse(maternal_age %in% 19:25,2,
ifelse(maternal_age %in% 26:30,3,
ifelse(maternal_age %in% 31:35,4,5
)))))
elig <- mutate(elig,f_age = ifelse(paternal_age <= 18,1,
ifelse(paternal_age %in% 19:25,2,
ifelse(paternal_age %in% 26:30,3,
ifelse(paternal_age %in% 31:35,4,5
)))))
typeof(elig$maternal_smoking)
elig$maternal_smoking <- revalue(elig$maternal_smoking, c("00"="0","99"=NA))
elig$maternal_smoking <- as.numeric(as.character(elig$maternal_smoking))
typeof(elig$maternal_smoking)
elig <- mutate(elig,smoking = ifelse(maternal_smoking>0,1,
ifelse(maternal_smoking==0,0,NA)))
elig$birth_location_g[elig$birth_location_g <= 3] <- 1
elig$birth_location_g[elig$birth_location_g == 4] <- 2
elig$birth_location_g[elig$birth_location_g == 5] <- 3
elig$paternal_age[elig$paternal_age == -1] <- NA
elig$paternal_age[elig$paternal_age == 0] <- NA
elig$birthyear <- as.factor(elig$birthyear)
elig$m_age <- as.factor(elig$m_age)
elig$f_age <- as.factor(elig$f_age)
elig$birth_location_g <- as.character(as.numeric(elig$birth_location_g))
elig$birth_location_g <- as.factor(elig$birth_location_g)
#Data analysis
adjusted <- function(period,exposure){
elig_sub <- elig[which(elig$period==period),]
adj.model <- glm(elig_sub$case ~ elig_sub[[exposure]]+elig_sub$birthyear+elig_sub$gender+elig_sub$maternal_smoking
+elig_sub$m_age+elig_sub$f_age+elig_sub$birth_location_g,
data = elig_sub, family = binomial(link = logit))
summary(adj.model)
coef_exp <- data.frame(coef(adj.model))
ci_exp <- data.frame(confint.default(adj.model))
results <- exp(cbind(OR = coef_exp[2,],ci_exp[2,]))
return(results)
}
exposure <- c("NO2_pregIQR","NOX_pregIQR","O3_pregIQR","CO_pregIQR","SO2_pregIQR",
"PM10_pregIQR","PM2_5_pregIQR")
adjres_final <- data.frame(matrix(ncol = 3, nrow = 7))
colnames(adjres_final) <- c("OR","Lower_ci","Upper_ci")
rownames(adjres_final) <- c("NO2_pregIQR","NOX_pregIQR","O3_pregIQR","CO_pregIQR","SO2_pregIQR",
"PM10_pregIQR","PM2_5_pregIQR")
for(i in 1:length(exposure)){
adjres_final[i,] <- data.frame(adjusted(period = "preg", exposure = exposure[i]))
}
adjres_final
#Spline
|
delayedAssign("kgpe", local({
if (requireNamespace("tibble", quietly = TRUE)) {
tibble::as_tibble(kgp:::kgpe)
} else {
kgp:::kgpe
}
}))
delayedAssign("kgp3", local({
if (requireNamespace("tibble", quietly = TRUE)) {
tibble::as_tibble(kgp:::kgp3)
} else {
kgp:::kgp3
}
}))
delayedAssign("kgpmeta", local({
if (requireNamespace("tibble", quietly = TRUE)) {
tibble::as_tibble(kgp:::kgpmeta)
} else {
kgp:::kgpmeta
}
}))
delayedAssign("allmeta", local({
if (requireNamespace("tibble", quietly = TRUE)) {
tibble::as_tibble(kgp:::allmeta)
} else {
kgp:::allmeta
}
}))
| /data/kgp.R | permissive | stephenturner/kgp | R | false | false | 621 | r | delayedAssign("kgpe", local({
if (requireNamespace("tibble", quietly = TRUE)) {
tibble::as_tibble(kgp:::kgpe)
} else {
kgp:::kgpe
}
}))
delayedAssign("kgp3", local({
if (requireNamespace("tibble", quietly = TRUE)) {
tibble::as_tibble(kgp:::kgp3)
} else {
kgp:::kgp3
}
}))
delayedAssign("kgpmeta", local({
if (requireNamespace("tibble", quietly = TRUE)) {
tibble::as_tibble(kgp:::kgpmeta)
} else {
kgp:::kgpmeta
}
}))
delayedAssign("allmeta", local({
if (requireNamespace("tibble", quietly = TRUE)) {
tibble::as_tibble(kgp:::allmeta)
} else {
kgp:::allmeta
}
}))
|
#' Convert number of eggs to spawning biomass.
#'
#' Calculate the conversion factor for the number of Pacific Herring eggs to the
#' spawn index (i.e., biomass) in tonnes.
#'
#' @param omega Numeric. The number of eggs per kilogram of female spawners;
#' from \code{\link{pars}}.
#' @param phi Numeric. The proportion of spawners that are female; from
#' \code{\link{pars}}.
#' @importFrom Rdpack reprompt
#' @return Numeric. The conversion factor for eggs to spawn index in tonnes
#' (i.e., biomass). Divide the number of eggs by the conversion factor to get
#' biomass.
#' @seealso \code{\link{pars}}
#' @export
#' @examples
#' data(pars)
#' CalcEggConversion()
CalcEggConversion <- function(omega = pars$conversion$omega,
phi = pars$conversion$phi) {
# Eggs per tonne: eggs/kilogram female * proportion female * kilograms/tonne
theta <- omega * phi * 1000
# Return the conversion factor
return(theta)
} # End CalcEggConversion function
#' Calculate spawning biomass from spawn-on-kelp (SOK) harvest.
#'
#' Calculate spawning biomass in tonnes from spawn-on-kelp (SOK) harvest in
#' kilograms.
#'
#' @param SOK Numeric. Weight of spawn-on-kelp (SOK) harvest in kilograms.
#' @param nu Numeric. Proportion of SOK product that is kelp; from
#' \code{\link{pars}}.
#' @param upsilon Numeric. SOK product weight increase due to brining as a
#' proportion; from \code{\link{pars}}.
#' @param M Numeric. Average weight in kilograms of a fertilized egg; from
#' \code{\link{pars}}.
#' @param theta Numeric. Egg conversion factor (eggs to biomass); from
#' \code{\link{CalcEggConversion}}.
#' @importFrom Rdpack reprompt
#' @return Numeric. Spawning biomass in tonnes.
#' @seealso \code{\link{CalcEggConversion}} \code{\link{pars}}
#' @export
#' @examples
#' data(pars)
#' CalcBiomassSOK(SOK = 100)
CalcBiomassSOK <- function(SOK,
nu = pars$SOK$nu,
upsilon = pars$SOK$upsilon,
M = pars$SOK$M,
theta = CalcEggConversion()) {
# Spawnin biomass in tonnes: (kg SOK * proportion eggs * proportion eggs) /
# (kg per egg * eggs per tonne )
SB <- (SOK * (1 - nu) * 1 / (1 + upsilon)) / (M * theta)
# Return the spawning biomass
return(SB)
} # End CalcBiomassSOK
#' Calculate the surface spawn index.
#'
#' Calculate the Pacific Herring surface spawn index in tonnes.
#'
#' @param where List. Location of the Pacific Herring surface spawn database
#' (see examples).
#' @param a Tibble. Table of geographic information indicating the subset of
#' spawn survey observations to inlude in calculations; from
#' \code{\link{LoadAreaData}}.
#' @param widths Tibble. Table of median region, section, and pool widths in
#' metres (m); from \code{\link{GetWidth}}.
#' @param yrs Numeric vector. Years(s) to include in the calculations, usually
#' staring in 1951.
#' @param intense Tibble. Table of spawn intensity categories and number of egg
#' layers; from \code{\link{intensity}}.
#' @param intYrs Numeric vector. Years where intensity categores are used to
#' determine egg layers.
#' @param rsYrs Numeric vector. Years where intensity needs to be re-scaled from
#' 5 to 9 categories.
#' @param alpha Numeric. Regression intercept; from \code{\link{pars}}
#' \insertCite{SchweigertEtal1997}{SpawnIndex}.
#' @param beta Numeric. Regression slope; from \code{\link{pars}}
#' \insertCite{SchweigertEtal1997}{SpawnIndex}.
#' @param theta Numeric. Egg conversion factor (eggs to biomass); from
#' \code{\link{CalcEggConversion}}.
#' @importFrom RODBC odbcConnectAccess sqlFetch odbcClose
#' @importFrom dplyr select distinct rename left_join filter %>%
#' @importFrom tibble as_tibble
#' @importFrom stringr str_to_title
#' @importFrom gfiscamutils MeanNA SumNA
#' @importFrom tidyr replace_na
#' @importFrom Rdpack reprompt
#' @return List. The element \code{SI} is a tibble with surface spawn index
#' (\code{SurfSI}) in tonnes by spawn number and year. The spawn number is the
#' finest spatial scale at which we calculate the spawn index. Other
#' information in this tibble comes from \code{a}: Region, Statistical Area,
#' Section, and Location code.
#' @references \insertAllCited
#' @note The `spawn index' is a relative index of spawning biomass.
#' @seealso \code{\link{LoadAreaData}} \code{\link{GetWidth}}
#' \code{\link{CalcEggConversion}} \code{\link{pars}} \code{\link{intensity}}
#' @export
#' @examples
#' dbLoc <- system.file("extdata", package = "SpawnIndex")
#' areaLoc <- list(
#' loc = dbLoc, db = "HerringSpawn.mdb",
#' fns = list(sections = "Sections", locations = "Location")
#' )
#' areas <- LoadAreaData(reg = "WCVI", where = areaLoc)
#' widthLoc <- list(
#' loc = dbLoc, db = "HerringSpawn.mdb",
#' fns = list(
#' regionStd = "RegionStd", sectionStd = "SectionStd", poolStd = "PoolStd"
#' )
#' )
#' barWidth <- GetWidth(where = widthLoc, a = areas)
#' data(pars)
#' data(intensity)
#' surfLoc <- list(
#' loc = dbLoc, db = "HerringSpawn.mdb",
#' fns = list(surface = "tSSSurface", allSpawn = "tSSAllspawn")
#' )
#' surfSpawn <- CalcSurfSpawn(
#' where = surfLoc, a = areas, widths = barWidth, yrs = 2010:2015
#' )
#' surfSpawn$SI
CalcSurfSpawn <- function(where,
a,
widths,
yrs,
intense = intensity,
intYrs = yrs[yrs < 1979],
rsYrs = intYrs[intYrs < 1951],
alpha = pars$surface$alpha,
beta = pars$surface$beta,
theta = CalcEggConversion()) {
# Establish connection with access
accessDB <- RODBC::odbcConnectAccess(access.file = file.path(
where$loc,
where$db
))
# Get a small subset of area data
areasSm <- a %>%
dplyr::select(SAR, Region, StatArea, Section, LocationCode, Pool) %>%
dplyr::distinct() %>%
tibble::as_tibble()
# Load all spawn
spawn <- RODBC::sqlFetch(channel = accessDB, sqtable = where$fns$allSpawn) %>%
dplyr::rename(
LocationCode = Loc_Code, SpawnNumber = Spawn_Number, WidthObs = Width
) %>%
dplyr::mutate(Method = stringr::str_to_title(Method)) %>%
dplyr::filter(Year %in% yrs, LocationCode %in% a$LocationCode) %>%
dplyr::select(
Year, LocationCode, SpawnNumber, Length, WidthObs, Method
) %>%
tibble::as_tibble()
# Extract relevant surface data
surface <- RODBC::sqlFetch(
channel = accessDB,
sqtable = where$fns$surface
) %>%
dplyr::rename(LocationCode = Loc_Code, SpawnNumber = Spawn_Number) %>%
dplyr::filter(Year %in% yrs, LocationCode %in% a$LocationCode) %>%
dplyr::left_join(y = areasSm, by = "LocationCode") %>%
dplyr::left_join(
y = spawn,
by = c("Year", "LocationCode", "SpawnNumber")
) %>%
tidyr::replace_na(replace = list(
Lay_Grass = 0, Grass_Percent = 0, Lay_Rockweed = 0, Rockweed_Percent = 0,
Lay_Kelp = 0, Kelp_Percent = 0, Lay_Brown_Algae = 0,
Brown_Algae_Percent = 0, Lay_Leafy_Red = 0, Leafy_Red_Percent = 0,
Lay_Stringy_Red = 0, Stringy_Red_Percent = 0, Lay_Rock = 0,
Rock_Percent = 0, Lay_Other = 0, Other_Percent = 0
)) %>%
# Substrate i
dplyr::mutate(
Grass = Lay_Grass * Grass_Percent / 100,
Rockweed = Lay_Rockweed * Rockweed_Percent / 100,
Kelp = Lay_Kelp * Kelp_Percent / 100,
BrownAlgae = Lay_Brown_Algae * Brown_Algae_Percent / 100,
LeafyRed = Lay_Leafy_Red * Leafy_Red_Percent / 100,
StringyRed = Lay_Stringy_Red * Stringy_Red_Percent / 100,
Rock = Lay_Rock * Rock_Percent / 100,
Other = Lay_Other * Other_Percent / 100
) %>%
tibble::as_tibble()
# Grab the percent cover data
pCover <- surface %>%
dplyr::select(dplyr::ends_with("Percent"))
# Error if any percents are greater than 100
if (any(pCover > 100, na.rm = TRUE)) {
stop("Percent cover > 100 in surface spawn data", call. = FALSE)
}
# Continue with calculating egg layers
surface <- surface %>%
# Sample j
dplyr::mutate(
EggLyrs = Grass + Rockweed + Kelp + BrownAlgae + LeafyRed +
StringyRed + Rock + Other,
Intensity = ifelse(Year %in% rsYrs & Intensity > 0,
Intensity * 2 - 1, Intensity
)
) %>%
dplyr::filter(Method %in% c("Surface", "Dive")) %>%
dplyr::select(
Year, Region, StatArea, Section, LocationCode, Pool, SpawnNumber, Length,
WidthObs, Intensity, EggLyrs
)
# Fill-in missing egg layers manually
surface <- surface %>%
dplyr::mutate(
# SoG (1 record): update Intensity from 0 to 1 (surveyed but not reported)
Intensity = ifelse(Year == 1962 & StatArea == 14 & Section == 142 &
LocationCode == 820 & Intensity == 0, 1, Intensity)
)
# Calculate egg density based on intensity or direct measurements
eggs <- surface %>%
dplyr::left_join(y = intense, by = "Intensity") %>%
dplyr::mutate(
EggLyrs = ifelse(Year %in% intYrs, Layers, EggLyrs),
# Egg density in thousands (eggs * 10^3 / m^2; Schweigert et al.
# 1997). Yes, thousands: the report is wrong (J. Schweigert,
# personal communication, 21 February 2017); sample j
EggDens = alpha + beta * EggLyrs
)
# These are the 'original' manual updates that were in the Microsoft Access
# database: some overwrite good data with no documented reason and have been
# omitted, others have been omitted because the spawn survey was incomplete.
# However, they (and others) are still present in the Microsoft Access
# database, causing discrepancies for HG 1979, as well as WCVI 1982 and 1984.
# They should get removed from the Microsoft Access database (especially
# updates 1, 4, and 5 which cause errros). Update 2 is still relevant;
# updates 3 and 6 no longer have an effect.
# 1. HG (15 records): Year 1979, SA 2, Intensity 4 (update EggLyrs to
# 2.1496 using intensity table; 14 records overwrite good data)
# 2. SoG (1 record): Year 1962, SA 14, Intensity 0 (update EggLyrs to
# 0.5529 using intensity 1: spawn was surveyed but not reported)
# 3. WCVI (4 records): Year 1981, SA 24, EggLyrs 0 (update EggLyrs to
# 0.5529 using intensity table)
# 4. WCVI (7 records): Year 1982, SA 23, Intensity 3 (update EggLyrs to
# 1.3360 using intensity table; 7 records overwrite good data)
# 5. WCVI (41 records): Year 1984, SA 24, Intensity 0 (update EggLyrs to
# 2.33 -- not sure why/how; 41 records overwrite good data)
# 6. A27 (14 records): Year 1982, SA 27, EggLyrs 0 (update EggLyrs to
# 2.98 using a historical average)
# Get the number of records with no egg layer info
noLayers <- eggs %>% dplyr::filter(EggLyrs == 0) # %>%
# left_join( y=select(areas, LocationCode, LocationName),
# by="LocationCode" ) %>%
# select( Year, Region, StatArea, Section, LocationCode, LocationName,
# SpawnNumber ) %>%
# arrange( Year, Region, StatArea, Section, LocationCode, SpawnNumber ) %>%
# write_csv( path=file.path(regName, "NoEggs.csv") )
# Error if there are missing values
if (nrow(noLayers) > 0) {
stop("Missing egg layers for ", nrow(noLayers), " record(s):",
print(noLayers),
sep = ""
)
}
# Output egg layer info
eggLyrs <- eggs %>%
dplyr::group_by(
Year, Region, StatArea, Section, LocationCode, SpawnNumber
) %>%
dplyr::summarise(SurfLyrs = gfiscamutils::MeanNA(EggLyrs)) %>%
dplyr::ungroup()
# Calculate egg density per spawn number/pool
eggsSpawn <- eggs %>%
dplyr::group_by(
Year, Region, StatArea, Section, LocationCode, SpawnNumber, Pool
) %>%
# Spawn s
dplyr::summarise(EggDens = gfiscamutils::MeanNA(EggDens)) %>%
dplyr::ungroup()
# Calculate annual fish biomass by spawn number/pool
biomassSpawn <- eggsSpawn %>%
dplyr::left_join(y = spawn, by = c("Year", "LocationCode", "SpawnNumber")) %>%
dplyr::left_join(y = widths, by = c("Region", "Section", "Pool")) %>%
# Width is set to pool, section, region, or observed width (in that order)
dplyr::mutate(
Width = WidthPool,
Width = ifelse(is.na(Width), WidthSec, Width),
Width = ifelse(is.na(Width), WidthReg, Width),
Width = ifelse(is.na(Width), WidthObs, Width),
# Biomass in tonnes, based on Hay (1985), and Hay and Brett (1988)
SurfSI = EggDens * Length * Width * 1000 / theta
) %>%
# Group to account for 'pool' level (want 'spawn' level)
dplyr::group_by(
Year, Region, StatArea, Section, LocationCode, SpawnNumber
) %>%
# Spawn s
dplyr::summarise(SurfSI = gfiscamutils::SumNA(SurfSI)) %>%
dplyr::ungroup() %>%
dplyr::full_join(
y = eggLyrs,
by = c(
"Year", "Region", "StatArea", "Section", "LocationCode",
"SpawnNumber"
)
)
# Calculate annual SI by spawn number
SI <- biomassSpawn %>%
dplyr::select(
Year, Region, StatArea, Section, LocationCode, SpawnNumber, SurfSI
)
# Close the connection
RODBC::odbcClose(accessDB)
# Return the data
return(list(
surface = surface, eggs = eggs, eggsSpawn = eggsSpawn,
biomassSpawn = biomassSpawn, SI = SI
))
} # End CalcSurfSpawn function
#' Calculate the Macrocystis spawn index.
#'
#' Calculate the Pacific Herring Macrocystis spawn index in tonnes.
#'
#' @param where List. Location of the Pacific Herring Macrocystis spawn database
#' (see examples).
#' @param a Tibble. Table of geographic information indicating the subset of
#' spawn survey observations to inlude in calculations; from
#' \code{\link{LoadAreaData}}.
#' @param yrs Numeric vector. Years(s) to include in the calculations, usually
#' staring in 1951.
#' @param tSwath Numeric. Transect swath (i.e., width) in metres.
#' @param beta Numeric. Regression slope; from \code{\link{pars}}
#' \insertCite{HaegeleSchweigert1990}{SpawnIndex}.
#' @param gamma Numeric. Regression exponent on egg layers; from
#' \code{\link{pars}} \insertCite{HaegeleSchweigert1990}{SpawnIndex}.
#' @param delta Numeric. Regression exponent on plant height; from
#' \code{\link{pars}} \insertCite{HaegeleSchweigert1990}{SpawnIndex}.
#' @param epsilon Numeric. Regression exponent on numnber of stalks per plant;
#' from \code{\link{pars}} \insertCite{HaegeleSchweigert1990}{SpawnIndex}.
#' @param theta Numeric. Egg conversion factor (eggs to biomass); from
#' \code{\link{CalcEggConversion}}.
#' @importFrom RODBC odbcConnectAccess sqlFetch odbcClose
#' @importFrom dplyr select distinct rename left_join filter %>%
#' @importFrom tibble as_tibble
#' @importFrom stringr str_to_title
#' @importFrom gfiscamutils MeanNA SumNA UniqueNA
#' @importFrom tidyr replace_na
#' @importFrom Rdpack reprompt
#' @return List. The element \code{SI} is a tibble with Macrocystis spawn index
#' (\code{MacroSI}) in tonnes by spawn number and year. The spawn number is
#' the finest spatial scale at which we calculate the spawn index. Other
#' information in this tibble comes from \code{a}: Region, Statistical Area,
#' Section, and Location code.
#' @references \insertAllCited
#' @note The `spawn index' is a relative index of spawning biomass.
#' @seealso \code{\link{LoadAreaData}} \code{\link{CalcEggConversion}}
#' \code{\link{pars}}
#' @export
#' @examples
#' dbLoc <- system.file("extdata", package = "SpawnIndex")
#' areaLoc <- list(
#' loc = dbLoc, db = "HerringSpawn.mdb",
#' fns = list(sections = "Sections", locations = "Location")
#' )
#' areas <- LoadAreaData(reg = "WCVI", where = areaLoc)
#' data(pars)
#' macroLoc <- list(
#' loc = dbLoc, db = "HerringSpawn.mdb",
#' fns = list(
#' allSpawn = "tSSAllspawn", plants = "tSSMacPlant",
#' transects = "tSSMacTrans"
#' )
#' )
#' macroSpawn <- CalcMacroSpawn(where = macroLoc, a = areas, yrs = 2010:2015)
#' macroSpawn$SI
CalcMacroSpawn <- function(where,
a,
yrs,
tSwath = 2,
beta = pars$macrocystis$beta,
gamma = pars$macrocystis$gamma,
delta = pars$macrocystis$delta,
epsilon = pars$macrocystis$epsilon,
theta = CalcEggConversion()) {
# Establish connection with access
accessDB <- RODBC::odbcConnectAccess(access.file = file.path(
where$loc,
where$db
))
# Load all spawn
spawn <- RODBC::sqlFetch(channel = accessDB, sqtable = where$fns$allSpawn) %>%
dplyr::rename(
LocationCode = Loc_Code, SpawnNumber = Spawn_Number,
LengthMacro = Length_Macrocystis
) %>%
dplyr::mutate(Method = stringr::str_to_title(Method)) %>%
dplyr::filter(Year %in% yrs, LocationCode %in% a$LocationCode) %>%
dplyr::select(
Year, LocationCode, SpawnNumber, LengthMacro, Length, Method
) %>%
tibble::as_tibble()
# Get plant-level data
plants <- RODBC::sqlFetch(channel = accessDB, sqtable = where$fns$plants) %>%
dplyr::rename(LocationCode = Loc_Code, SpawnNumber = Spawn_Number) %>%
dplyr::filter(
Year %in% yrs, LocationCode %in% a$LocationCode,
!is.na(Mature)
) %>%
dplyr::select(Year, LocationCode, SpawnNumber, Transect, Mature) %>%
tibble::as_tibble()
# Get a small subset of area data
areasSm <- a %>%
dplyr::select(Region, StatArea, Section, LocationCode, Pool) %>%
dplyr::distinct() %>%
tibble::as_tibble()
# Get transect-level data
transects <- RODBC::sqlFetch(
channel = accessDB,
sqtable = where$fns$transects
) %>%
dplyr::rename(LocationCode = Loc_Code, SpawnNumber = Spawn_Number) %>%
dplyr::filter(Year %in% yrs, LocationCode %in% a$LocationCode) %>%
dplyr::left_join(y = areasSm, by = "LocationCode") %>%
dplyr::select(
Year, Region, StatArea, Section, LocationCode, SpawnNumber, Transect,
Height, Width, Layers
) %>%
tibble::as_tibble()
# Merge the data
dat <- transects %>%
dplyr::left_join(y = plants, by = c(
"Year", "LocationCode", "SpawnNumber", "Transect"
)) %>%
tidyr::replace_na(replace = list(Mature = 0)) %>%
dplyr::mutate(Swath = tSwath)
# Calculate transect-level data
datTrans <- dat %>%
dplyr::group_by(
Year, Region, StatArea, Section, LocationCode, SpawnNumber, Transect
) %>%
# Transect t
dplyr::summarise(
# Transect metrics for all transects (not just those with mature plants)
Width = unique(Width),
Swath = unique(Swath),
Area = Width * Swath,
# Plant metrics for mature plants only
Height = gfiscamutils::UniqueNA(Height[Mature > 0]),
EggLyrs = gfiscamutils::UniqueNA(Layers[Mature > 0]),
Stalks = gfiscamutils::SumNA(Mature[Mature > 0]),
Plants = length(Mature[Mature > 0])
) %>%
dplyr::ungroup()
# Calculate spawn-level data
biomassSpawn <- datTrans %>%
dplyr::left_join(
y = spawn,
by = c("Year", "LocationCode", "SpawnNumber")
) %>%
dplyr::mutate(
LengthMacro = ifelse(is.na(LengthMacro), Length, LengthMacro)
) %>%
dplyr::filter(Method %in% c("Surface", "Dive")) %>%
dplyr::group_by(
Year, Region, StatArea, Section, LocationCode, SpawnNumber
) %>%
# Spawn s
dplyr::summarise(
LengthMacro = unique(LengthMacro),
Width = gfiscamutils::MeanNA(Width),
Area = gfiscamutils::SumNA(Area),
Plants = gfiscamutils::SumNA(Plants),
Stalks = gfiscamutils::SumNA(Stalks),
Height = gfiscamutils::MeanNA(Height),
EggLyrs = gfiscamutils::MeanNA(EggLyrs),
StalksPerPlant = Stalks / Plants,
# Eggs per plant in thousands (eggs * 10^3 / plant; Haegele and
# Schweigert 1990); spawn s
EggsPerPlant = beta * EggLyrs^gamma * Height^delta *
StalksPerPlant^epsilon * 1000,
# Eggs density in thousands (eggs * 10^3 / m^2; spawn s
EggDens = EggsPerPlant * Plants / Area,
# Biomass in tonnes, based on Hay (1985), and Hay and Brett (1988); spawn
# s
MacroSI = EggDens * LengthMacro * Width * 1000 / theta
) %>%
dplyr::rename(MacroLyrs = EggLyrs) %>%
dplyr::ungroup()
# Return the macrocystis spawn
SI <- biomassSpawn %>%
dplyr::select(
Year, Region, StatArea, Section, LocationCode, SpawnNumber, MacroSI
)
# Close the connection
RODBC::odbcClose(accessDB)
# Return the data
return(list(
dat = dat, datTrans = datTrans, biomassSpawn = biomassSpawn, SI = SI
))
} # End CalcMacroSpawn function
#' Calculate the understory spawn index.
#'
#' Calculate the Pacific Herring understory spawn index in tonnes.
#'
#' @param where List. Location of the Pacific Herring understory spawn database
#' (see examples).
#' @param a Tibble. Table of geographic information indicating the subset of
#' spawn survey observations to inlude in calculations; from
#' \code{\link{LoadAreaData}}.
#' @param yrs Numeric vector. Years(s) to include in the calculations, usually
#' staring in 1951.
#' @param algCoefs Tibble. Table of algae coefficients; from
#' \code{\link{algaeCoefs}}.
#' @param tau Tibble. Table of understory spawn width adjustment factors from
#' \code{\link{underWidthFac}}.
#' @param alpha Numeric. Regression slope for substrate; from \code{\link{pars}}
#' \insertCite{HaegeleEtal1979}{SpawnIndex}.
#' @param beta Numeric. Regression slope for algae; from \code{\link{pars}}
#' \insertCite{Schweigert2005}{SpawnIndex}.
#' @param gamma Numeric. Regression exponent on number of egg layers; from
#' \code{\link{pars}} \insertCite{Schweigert2005}{SpawnIndex}.
#' @param delta Numeric. Regression exponent on proportion of algae; from
#' \code{\link{pars}} \insertCite{Schweigert2005}{SpawnIndex}.
#' @param theta Numeric. Egg conversion factor (eggs to biomass); from
#' \code{\link{CalcEggConversion}}.
#' @importFrom RODBC odbcConnectAccess sqlFetch odbcClose
#' @importFrom dplyr select distinct rename left_join filter %>%
#' @importFrom tibble as_tibble
#' @importFrom stringr str_to_title
#' @importFrom gfiscamutils MeanNA SumNA UniqueNA
#' @importFrom tidyr replace_na gather
#' @importFrom Rdpack reprompt
#' @return List. The element \code{SI} is a tibble with understory spawn index
#' (\code{UnderSI}) in tonnes by spawn number and year. The spawn number is
#' the finest spatial scale at which we calculate the spawn index. Other
#' information in this tibble comes from \code{a}: Region, Statistical Area,
#' Section, and Location code.
#' @references \insertAllCited
#' @note The `spawn index' is a relative index of spawning biomass.
#' @seealso \code{\link{LoadAreaData}} \code{\link{CalcEggConversion}}
#' \code{\link{pars}} \code{\link{algaeCoefs}}
#' @export
#' @examples
#' dbLoc <- system.file("extdata", package = "SpawnIndex")
#' areaLoc <- list(
#' loc = dbLoc, db = "HerringSpawn.mdb",
#' fns = list(sections = "Sections", locations = "Location")
#' )
#' areas <- LoadAreaData(reg = "WCVI", where = areaLoc)
#' underLoc <- list(
#' loc = dbLoc, db = "HerringSpawn.mdb",
#' fns = list(
#' allSpawn = "tSSAllspawn", algTrans = "tSSVegTrans",
#' stations = "tSSStations", algae = "tSSVegetation"
#' )
#' )
#' data(underWidthFac)
#' data(pars)
#' data(algaeCoefs)
#' underSpawn <- CalcUnderSpawn(where = underLoc, a = areas, yrs = 2010:2015)
#' underSpawn$SI
CalcUnderSpawn <- function(where,
a,
yrs,
algCoefs = algaeCoefs,
tau = underWidthFac,
alpha = pars$understory$alpha,
beta = pars$understory$beta,
gamma = pars$under$gamma,
delta = pars$understory$delta,
theta = CalcEggConversion()) {
# Establish connection with access
accessDB <- RODBC::odbcConnectAccess(access.file = file.path(
where$loc,
where$db
))
# Get a small subset of area data
areasSm1 <- a %>%
dplyr::select(Region, LocationCode) %>%
dplyr::distinct() %>%
tibble::as_tibble()
# Load all spawn
spawn <- RODBC::sqlFetch(channel = accessDB, sqtable = where$fns$allSpawn) %>%
dplyr::rename(
LocationCode = Loc_Code, SpawnNumber = Spawn_Number,
LengthAlgae = Length_Vegetation
) %>%
dplyr::mutate(Method = stringr::str_to_title(Method)) %>%
dplyr::filter(Year %in% yrs, LocationCode %in% a$LocationCode) %>%
dplyr::select(
Year, LocationCode, SpawnNumber, LengthAlgae, Length, Method
) %>%
tibble::as_tibble()
# Load algae transects
algTrans <- RODBC::sqlFetch(
channel = accessDB,
sqtable = where$fns$algTrans
) %>%
dplyr::rename(
LocationCode = Loc_Code, SpawnNumber = Spawn_Number,
QuadratSize = Quadrat_Size, WidthObs = Width_Recorded
) %>%
dplyr::filter(Year %in% yrs, LocationCode %in% a$LocationCode) %>%
# TODO: This is a temporary cludge (all survey quadrats are 0.5); MT will
# fix it in the database
dplyr::mutate(QuadratSize = ifelse(QuadratSize == 0, 0.5, QuadratSize)) %>%
dplyr::select(
Year, LocationCode, SpawnNumber, Transect, WidthObs, QuadratSize
) %>%
dplyr::left_join(y = areasSm1, by = "LocationCode") %>%
tibble::as_tibble()
# Correction factors for region(s) by year (to fix lead line shrinkage issue)
widthFacs <- tau %>%
tidyr::gather(key = Region, value = WidthFac, -Year)
# Merge the width factors and correct transect widths
algTrans <- algTrans %>%
dplyr::left_join(y = widthFacs, by = c("Year", "Region")) %>%
tidyr::replace_na(replace = list(WidthFac = 1.0)) %>%
dplyr::mutate(Width = WidthObs * WidthFac)
# Error if any quadrats are not 0.5 m^2
if (any(algTrans$QuadratSize != 0.5)) {
stop("All quadrats must be 0.5m^2", call. = FALSE)
}
# Load station data
stations <- RODBC::sqlFetch(
channel = accessDB,
sqtable = where$fns$stations
) %>%
dplyr::rename(
LocationCode = Loc_Code, SpawnNumber = Spawn_Number,
SubLyrs = Layers_Bottom
) %>%
dplyr::filter(Year %in% yrs, LocationCode %in% a$LocationCode) %>%
dplyr::mutate(SubProp = Percent_Bottom / 100) %>%
dplyr::select(
Year, LocationCode, SpawnNumber, Transect, Station, SubLyrs, SubProp
) %>%
tibble::as_tibble()
# Get egg layer info: substrate
eggLyrsSub <- stations %>%
dplyr::group_by(Year, LocationCode, SpawnNumber, Transect) %>%
dplyr::summarise(Layers = gfiscamutils::MeanNA(SubLyrs)) %>%
dplyr::ungroup() %>%
dplyr::mutate(Source = "Substrate")
# Load algae
algae <- RODBC::sqlFetch(channel = accessDB, sqtable = where$fns$algae) %>%
dplyr::rename(
LocationCode = Loc_Code, SpawnNumber = Spawn_Number,
AlgType = Type_Vegetation, AlgLyrs = Layers_Vegetation
) %>%
dplyr::filter(Year %in% yrs, LocationCode %in% a$LocationCode) %>%
dplyr::mutate(
AlgType = stringr::str_to_upper(AlgType),
AlgProp = Percent_Vegetation / 100,
AlgProp = ifelse(AlgProp > 1, 1, AlgProp)
) %>%
dplyr::select(
Year, LocationCode, SpawnNumber, Transect, Station, AlgType, AlgLyrs,
AlgProp
) %>%
tibble::as_tibble()
# Get egg layer info: algae
eggLyrsAlg <- algae %>%
dplyr::group_by(Year, LocationCode, SpawnNumber, Transect) %>%
dplyr::summarise(Layers = gfiscamutils::MeanNA(AlgLyrs)) %>%
dplyr::ungroup() %>%
dplyr::mutate(Source = "Algae")
# Combine egg layer info
eggLyrs <- dplyr::bind_rows(eggLyrsSub, eggLyrsAlg) %>%
dplyr::group_by(Year, LocationCode, SpawnNumber, Transect) %>%
dplyr::summarise(Layers = gfiscamutils::MeanNA(Layers)) %>%
dplyr::group_by(Year, LocationCode, SpawnNumber) %>%
dplyr::summarise(UnderLyrs = gfiscamutils::MeanNA(Layers)) %>%
dplyr::ungroup()
# If there are missing algae types
if (any(!algae$AlgType %in% algCoefs$AlgType)) {
# Get missing algae type(s)
missAlg <- unique(algae$AlgType[!algae$AlgType %in%
algCoefs$AlgType])
# Error, and show missing type(s)
stop("Missing algae type(s): ", paste(missAlg, collapse = ", "),
call. = FALSE
)
} # End if there are missing algae types
# Get a small subset of area data
areasSm2 <- a %>%
dplyr::select(Region, StatArea, Section, LocationCode) %>%
dplyr::distinct() %>%
tibble::as_tibble()
# Error if proportion > 1
if (any(stations$SubProp > 1, na.rm = TRUE)) {
stop("Substrate proportion > 1 in understory spawn data", call. = FALSE)
}
# Calculate substrate egg density
eggsSub <- stations %>%
dplyr::full_join(y = algTrans, by = c(
"Year", "LocationCode", "SpawnNumber", "Transect"
)) %>%
dplyr::left_join(y = areasSm2, by = c("Region", "LocationCode")) %>%
# Egg density in thousands (eggs x 10^3 / m^2; Haegele et al. 1979);
# quadrat q
dplyr::mutate(EggDensSub = alpha * SubLyrs * SubProp) %>%
tidyr::replace_na(replace = list(EggDensSub = 0)) %>%
dplyr::select(
Year, Region, StatArea, Section, LocationCode, SpawnNumber, Transect,
Station, Width, EggDensSub
)
# Error if proportion > 1
if (any(algae$AlgProp > 1, na.rm = TRUE)) {
stop("Algae proportion > 1 in understory spawn data", call. = FALSE)
}
# Calculate substrate egg density by quadrat/station
eggsAlg <- algae %>%
dplyr::left_join(y = algCoefs, by = "AlgType") %>%
dplyr::left_join(y = areasSm2, by = "LocationCode") %>%
dplyr::left_join(
y = dplyr::select(.data = algTrans, -Width),
by = c("Year", "Region", "LocationCode", "SpawnNumber", "Transect")
) %>%
# Egg density in thousands (eggs * 10^3 / m^2; Schweigert 2005); quadrat
# size coefficients not required because all quadrats are 0.5m^2 (1.0512)
# Algae a
dplyr::mutate(EggDensAlg = beta * AlgLyrs^gamma * AlgProp^delta * Coef *
1.0512) %>%
dplyr::group_by(
Year, Region, StatArea, Section, LocationCode, SpawnNumber, Transect,
Station
) %>%
# Quadrat q
dplyr::summarise(EggDensAlg = gfiscamutils::SumNA(EggDensAlg)) %>%
tidyr::replace_na(replace = list(EggDensAlg = 0)) %>%
dplyr::ungroup()
# Combine eggs
eggs <- eggsSub %>%
dplyr::full_join(y = eggsAlg, by = c(
"Year", "Region", "StatArea", "Section", "LocationCode", "SpawnNumber",
"Transect", "Station"
)) %>%
tidyr::replace_na(replace = list(
Width = 0, EggDensSub = 0, EggDensAlg = 0
)) %>%
dplyr::mutate(EggDensSub = ifelse(Width > 0, EggDensSub, 0))
# Calculate total egg density by station/quadrat
eggsStation <- eggs %>%
# Total egg density in thousands (eggs * 10^3 / m^2); quadrat q
dplyr::mutate(EggDens = EggDensSub + EggDensAlg) %>%
dplyr::filter(!is.na(Station))
# Widths
widths <- eggsStation %>%
dplyr::group_by(
Year, Region, StatArea, Section, LocationCode, SpawnNumber, Transect
) %>%
dplyr::summarise(Width = unique(Width)) %>%
dplyr::group_by(
Year, Region, StatArea, Section, LocationCode, SpawnNumber
) %>%
# Spawn s
dplyr::summarise(
WidthBar = gfiscamutils::MeanNA(Width)
) %>%
dplyr::ungroup()
# Calculate transect-level metrics
eggsTrans <- eggsStation %>%
dplyr::filter(Width > 0) %>%
dplyr::group_by(
Year, Region, StatArea, Section, LocationCode, SpawnNumber, Transect
) %>%
# Transect t
dplyr::summarise(
EggDens = gfiscamutils::MeanNA(EggDens),
Width = unique(Width)
) %>%
dplyr::ungroup()
# Calculate spawn number-level metrics
eggsSpawn <- eggsTrans %>%
dplyr::left_join(
y = spawn,
by = c("Year", "LocationCode", "SpawnNumber")
) %>%
dplyr::mutate(LengthAlgae = ifelse(is.na(LengthAlgae), Length,
LengthAlgae
)) %>%
dplyr::filter(Method %in% c("Surface", "Dive")) %>%
dplyr::left_join(y = widths, by = c(
"Year", "Region", "StatArea", "Section", "LocationCode", "SpawnNumber"
)) %>%
dplyr::group_by(
Year, Region, StatArea, Section, LocationCode, SpawnNumber
) %>%
# Spawn s
dplyr::summarise(
WidthBar = unique(WidthBar),
LengthAlgae = unique(LengthAlgae),
EggDens = gfiscamutils::WtMeanNA(EggDens, w = Width)
) %>%
dplyr::ungroup()
# Calculate understory biomass by spawn number
biomassSpawn <- eggsSpawn %>%
dplyr::mutate(
# Biomass in tonnes, based on Hay (1985), and Hay and Brett (1988); spawn
# s
UnderSI = EggDens * LengthAlgae * WidthBar * 1000 / theta
) %>%
dplyr::left_join(y = eggLyrs, by = c("Year", "LocationCode", "SpawnNumber"))
# Calculate understory SI by spawn number
SI <- biomassSpawn %>%
dplyr::select(
Year, Region, StatArea, Section, LocationCode, SpawnNumber, UnderSI
)
# Close the connection
RODBC::odbcClose(accessDB)
# Return the data
return(list(
stations = stations, algae = algae, eggs = eggs, eggsStation = eggsStation,
eggsTrans = eggsTrans, eggsSpawn = eggsSpawn, biomassSpawn = biomassSpawn,
SI = SI
))
} # End CalcUnderSpawn function
| /R/calcs.R | no_license | brigitte-dorner/SpawnIndex | R | false | false | 33,162 | r | #' Convert number of eggs to spawning biomass.
#'
#' Calculate the conversion factor for the number of Pacific Herring eggs to the
#' spawn index (i.e., biomass) in tonnes.
#'
#' @param omega Numeric. The number of eggs per kilogram of female spawners;
#' from \code{\link{pars}}.
#' @param phi Numeric. The proportion of spawners that are female; from
#' \code{\link{pars}}.
#' @importFrom Rdpack reprompt
#' @return Numeric. The conversion factor for eggs to spawn index in tonnes
#' (i.e., biomass). Divide the number of eggs by the conversion factor to get
#' biomass.
#' @seealso \code{\link{pars}}
#' @export
#' @examples
#' data(pars)
#' CalcEggConversion()
CalcEggConversion <- function(omega = pars$conversion$omega,
phi = pars$conversion$phi) {
# Eggs per tonne: eggs/kilogram female * proportion female * kilograms/tonne
theta <- omega * phi * 1000
# Return the conversion factor
return(theta)
} # End CalcEggConversion function
#' Calculate spawning biomass from spawn-on-kelp (SOK) harvest.
#'
#' Calculate spawning biomass in tonnes from spawn-on-kelp (SOK) harvest in
#' kilograms.
#'
#' @param SOK Numeric. Weight of spawn-on-kelp (SOK) harvest in kilograms.
#' @param nu Numeric. Proportion of SOK product that is kelp; from
#' \code{\link{pars}}.
#' @param upsilon Numeric. SOK product weight increase due to brining as a
#' proportion; from \code{\link{pars}}.
#' @param M Numeric. Average weight in kilograms of a fertilized egg; from
#' \code{\link{pars}}.
#' @param theta Numeric. Egg conversion factor (eggs to biomass); from
#' \code{\link{CalcEggConversion}}.
#' @importFrom Rdpack reprompt
#' @return Numeric. Spawning biomass in tonnes.
#' @seealso \code{\link{CalcEggConversion}} \code{\link{pars}}
#' @export
#' @examples
#' data(pars)
#' CalcBiomassSOK(SOK = 100)
CalcBiomassSOK <- function(SOK,
nu = pars$SOK$nu,
upsilon = pars$SOK$upsilon,
M = pars$SOK$M,
theta = CalcEggConversion()) {
# Spawnin biomass in tonnes: (kg SOK * proportion eggs * proportion eggs) /
# (kg per egg * eggs per tonne )
SB <- (SOK * (1 - nu) * 1 / (1 + upsilon)) / (M * theta)
# Return the spawning biomass
return(SB)
} # End CalcBiomassSOK
#' Calculate the surface spawn index.
#'
#' Calculate the Pacific Herring surface spawn index in tonnes.
#'
#' @param where List. Location of the Pacific Herring surface spawn database
#' (see examples).
#' @param a Tibble. Table of geographic information indicating the subset of
#' spawn survey observations to inlude in calculations; from
#' \code{\link{LoadAreaData}}.
#' @param widths Tibble. Table of median region, section, and pool widths in
#' metres (m); from \code{\link{GetWidth}}.
#' @param yrs Numeric vector. Years(s) to include in the calculations, usually
#' staring in 1951.
#' @param intense Tibble. Table of spawn intensity categories and number of egg
#' layers; from \code{\link{intensity}}.
#' @param intYrs Numeric vector. Years where intensity categores are used to
#' determine egg layers.
#' @param rsYrs Numeric vector. Years where intensity needs to be re-scaled from
#' 5 to 9 categories.
#' @param alpha Numeric. Regression intercept; from \code{\link{pars}}
#' \insertCite{SchweigertEtal1997}{SpawnIndex}.
#' @param beta Numeric. Regression slope; from \code{\link{pars}}
#' \insertCite{SchweigertEtal1997}{SpawnIndex}.
#' @param theta Numeric. Egg conversion factor (eggs to biomass); from
#' \code{\link{CalcEggConversion}}.
#' @importFrom RODBC odbcConnectAccess sqlFetch odbcClose
#' @importFrom dplyr select distinct rename left_join filter %>%
#' @importFrom tibble as_tibble
#' @importFrom stringr str_to_title
#' @importFrom gfiscamutils MeanNA SumNA
#' @importFrom tidyr replace_na
#' @importFrom Rdpack reprompt
#' @return List. The element \code{SI} is a tibble with surface spawn index
#' (\code{SurfSI}) in tonnes by spawn number and year. The spawn number is the
#' finest spatial scale at which we calculate the spawn index. Other
#' information in this tibble comes from \code{a}: Region, Statistical Area,
#' Section, and Location code.
#' @references \insertAllCited
#' @note The `spawn index' is a relative index of spawning biomass.
#' @seealso \code{\link{LoadAreaData}} \code{\link{GetWidth}}
#' \code{\link{CalcEggConversion}} \code{\link{pars}} \code{\link{intensity}}
#' @export
#' @examples
#' dbLoc <- system.file("extdata", package = "SpawnIndex")
#' areaLoc <- list(
#' loc = dbLoc, db = "HerringSpawn.mdb",
#' fns = list(sections = "Sections", locations = "Location")
#' )
#' areas <- LoadAreaData(reg = "WCVI", where = areaLoc)
#' widthLoc <- list(
#' loc = dbLoc, db = "HerringSpawn.mdb",
#' fns = list(
#' regionStd = "RegionStd", sectionStd = "SectionStd", poolStd = "PoolStd"
#' )
#' )
#' barWidth <- GetWidth(where = widthLoc, a = areas)
#' data(pars)
#' data(intensity)
#' surfLoc <- list(
#' loc = dbLoc, db = "HerringSpawn.mdb",
#' fns = list(surface = "tSSSurface", allSpawn = "tSSAllspawn")
#' )
#' surfSpawn <- CalcSurfSpawn(
#' where = surfLoc, a = areas, widths = barWidth, yrs = 2010:2015
#' )
#' surfSpawn$SI
CalcSurfSpawn <- function(where,
a,
widths,
yrs,
intense = intensity,
intYrs = yrs[yrs < 1979],
rsYrs = intYrs[intYrs < 1951],
alpha = pars$surface$alpha,
beta = pars$surface$beta,
theta = CalcEggConversion()) {
# Establish connection with access
accessDB <- RODBC::odbcConnectAccess(access.file = file.path(
where$loc,
where$db
))
# Get a small subset of area data
areasSm <- a %>%
dplyr::select(SAR, Region, StatArea, Section, LocationCode, Pool) %>%
dplyr::distinct() %>%
tibble::as_tibble()
# Load all spawn
spawn <- RODBC::sqlFetch(channel = accessDB, sqtable = where$fns$allSpawn) %>%
dplyr::rename(
LocationCode = Loc_Code, SpawnNumber = Spawn_Number, WidthObs = Width
) %>%
dplyr::mutate(Method = stringr::str_to_title(Method)) %>%
dplyr::filter(Year %in% yrs, LocationCode %in% a$LocationCode) %>%
dplyr::select(
Year, LocationCode, SpawnNumber, Length, WidthObs, Method
) %>%
tibble::as_tibble()
# Extract relevant surface data
surface <- RODBC::sqlFetch(
channel = accessDB,
sqtable = where$fns$surface
) %>%
dplyr::rename(LocationCode = Loc_Code, SpawnNumber = Spawn_Number) %>%
dplyr::filter(Year %in% yrs, LocationCode %in% a$LocationCode) %>%
dplyr::left_join(y = areasSm, by = "LocationCode") %>%
dplyr::left_join(
y = spawn,
by = c("Year", "LocationCode", "SpawnNumber")
) %>%
tidyr::replace_na(replace = list(
Lay_Grass = 0, Grass_Percent = 0, Lay_Rockweed = 0, Rockweed_Percent = 0,
Lay_Kelp = 0, Kelp_Percent = 0, Lay_Brown_Algae = 0,
Brown_Algae_Percent = 0, Lay_Leafy_Red = 0, Leafy_Red_Percent = 0,
Lay_Stringy_Red = 0, Stringy_Red_Percent = 0, Lay_Rock = 0,
Rock_Percent = 0, Lay_Other = 0, Other_Percent = 0
)) %>%
# Substrate i
dplyr::mutate(
Grass = Lay_Grass * Grass_Percent / 100,
Rockweed = Lay_Rockweed * Rockweed_Percent / 100,
Kelp = Lay_Kelp * Kelp_Percent / 100,
BrownAlgae = Lay_Brown_Algae * Brown_Algae_Percent / 100,
LeafyRed = Lay_Leafy_Red * Leafy_Red_Percent / 100,
StringyRed = Lay_Stringy_Red * Stringy_Red_Percent / 100,
Rock = Lay_Rock * Rock_Percent / 100,
Other = Lay_Other * Other_Percent / 100
) %>%
tibble::as_tibble()
# Grab the percent cover data
pCover <- surface %>%
dplyr::select(dplyr::ends_with("Percent"))
# Error if any percents are greater than 100
if (any(pCover > 100, na.rm = TRUE)) {
stop("Percent cover > 100 in surface spawn data", call. = FALSE)
}
# Continue with calculating egg layers
surface <- surface %>%
# Sample j
dplyr::mutate(
EggLyrs = Grass + Rockweed + Kelp + BrownAlgae + LeafyRed +
StringyRed + Rock + Other,
Intensity = ifelse(Year %in% rsYrs & Intensity > 0,
Intensity * 2 - 1, Intensity
)
) %>%
dplyr::filter(Method %in% c("Surface", "Dive")) %>%
dplyr::select(
Year, Region, StatArea, Section, LocationCode, Pool, SpawnNumber, Length,
WidthObs, Intensity, EggLyrs
)
# Fill-in missing egg layers manually
surface <- surface %>%
dplyr::mutate(
# SoG (1 record): update Intensity from 0 to 1 (surveyed but not reported)
Intensity = ifelse(Year == 1962 & StatArea == 14 & Section == 142 &
LocationCode == 820 & Intensity == 0, 1, Intensity)
)
# Calculate egg density based on intensity or direct measurements
eggs <- surface %>%
dplyr::left_join(y = intense, by = "Intensity") %>%
dplyr::mutate(
EggLyrs = ifelse(Year %in% intYrs, Layers, EggLyrs),
# Egg density in thousands (eggs * 10^3 / m^2; Schweigert et al.
# 1997). Yes, thousands: the report is wrong (J. Schweigert,
# personal communication, 21 February 2017); sample j
EggDens = alpha + beta * EggLyrs
)
# These are the 'original' manual updates that were in the Microsoft Access
# database: some overwrite good data with no documented reason and have been
# omitted, others have been omitted because the spawn survey was incomplete.
# However, they (and others) are still present in the Microsoft Access
# database, causing discrepancies for HG 1979, as well as WCVI 1982 and 1984.
# They should get removed from the Microsoft Access database (especially
# updates 1, 4, and 5 which cause errros). Update 2 is still relevant;
# updates 3 and 6 no longer have an effect.
# 1. HG (15 records): Year 1979, SA 2, Intensity 4 (update EggLyrs to
# 2.1496 using intensity table; 14 records overwrite good data)
# 2. SoG (1 record): Year 1962, SA 14, Intensity 0 (update EggLyrs to
# 0.5529 using intensity 1: spawn was surveyed but not reported)
# 3. WCVI (4 records): Year 1981, SA 24, EggLyrs 0 (update EggLyrs to
# 0.5529 using intensity table)
# 4. WCVI (7 records): Year 1982, SA 23, Intensity 3 (update EggLyrs to
# 1.3360 using intensity table; 7 records overwrite good data)
# 5. WCVI (41 records): Year 1984, SA 24, Intensity 0 (update EggLyrs to
# 2.33 -- not sure why/how; 41 records overwrite good data)
# 6. A27 (14 records): Year 1982, SA 27, EggLyrs 0 (update EggLyrs to
# 2.98 using a historical average)
# Get the number of records with no egg layer info
noLayers <- eggs %>% dplyr::filter(EggLyrs == 0) # %>%
# left_join( y=select(areas, LocationCode, LocationName),
# by="LocationCode" ) %>%
# select( Year, Region, StatArea, Section, LocationCode, LocationName,
# SpawnNumber ) %>%
# arrange( Year, Region, StatArea, Section, LocationCode, SpawnNumber ) %>%
# write_csv( path=file.path(regName, "NoEggs.csv") )
# Error if there are missing values
if (nrow(noLayers) > 0) {
stop("Missing egg layers for ", nrow(noLayers), " record(s):",
print(noLayers),
sep = ""
)
}
# Output egg layer info
eggLyrs <- eggs %>%
dplyr::group_by(
Year, Region, StatArea, Section, LocationCode, SpawnNumber
) %>%
dplyr::summarise(SurfLyrs = gfiscamutils::MeanNA(EggLyrs)) %>%
dplyr::ungroup()
# Calculate egg density per spawn number/pool
eggsSpawn <- eggs %>%
dplyr::group_by(
Year, Region, StatArea, Section, LocationCode, SpawnNumber, Pool
) %>%
# Spawn s
dplyr::summarise(EggDens = gfiscamutils::MeanNA(EggDens)) %>%
dplyr::ungroup()
# Calculate annual fish biomass by spawn number/pool
biomassSpawn <- eggsSpawn %>%
dplyr::left_join(y = spawn, by = c("Year", "LocationCode", "SpawnNumber")) %>%
dplyr::left_join(y = widths, by = c("Region", "Section", "Pool")) %>%
# Width is set to pool, section, region, or observed width (in that order)
dplyr::mutate(
Width = WidthPool,
Width = ifelse(is.na(Width), WidthSec, Width),
Width = ifelse(is.na(Width), WidthReg, Width),
Width = ifelse(is.na(Width), WidthObs, Width),
# Biomass in tonnes, based on Hay (1985), and Hay and Brett (1988)
SurfSI = EggDens * Length * Width * 1000 / theta
) %>%
# Group to account for 'pool' level (want 'spawn' level)
dplyr::group_by(
Year, Region, StatArea, Section, LocationCode, SpawnNumber
) %>%
# Spawn s
dplyr::summarise(SurfSI = gfiscamutils::SumNA(SurfSI)) %>%
dplyr::ungroup() %>%
dplyr::full_join(
y = eggLyrs,
by = c(
"Year", "Region", "StatArea", "Section", "LocationCode",
"SpawnNumber"
)
)
# Calculate annual SI by spawn number
SI <- biomassSpawn %>%
dplyr::select(
Year, Region, StatArea, Section, LocationCode, SpawnNumber, SurfSI
)
# Close the connection
RODBC::odbcClose(accessDB)
# Return the data
return(list(
surface = surface, eggs = eggs, eggsSpawn = eggsSpawn,
biomassSpawn = biomassSpawn, SI = SI
))
} # End CalcSurfSpawn function
#' Calculate the Macrocystis spawn index.
#'
#' Calculate the Pacific Herring Macrocystis spawn index in tonnes.
#'
#' @param where List. Location of the Pacific Herring Macrocystis spawn database
#' (see examples).
#' @param a Tibble. Table of geographic information indicating the subset of
#' spawn survey observations to inlude in calculations; from
#' \code{\link{LoadAreaData}}.
#' @param yrs Numeric vector. Years(s) to include in the calculations, usually
#' staring in 1951.
#' @param tSwath Numeric. Transect swath (i.e., width) in metres.
#' @param beta Numeric. Regression slope; from \code{\link{pars}}
#' \insertCite{HaegeleSchweigert1990}{SpawnIndex}.
#' @param gamma Numeric. Regression exponent on egg layers; from
#' \code{\link{pars}} \insertCite{HaegeleSchweigert1990}{SpawnIndex}.
#' @param delta Numeric. Regression exponent on plant height; from
#' \code{\link{pars}} \insertCite{HaegeleSchweigert1990}{SpawnIndex}.
#' @param epsilon Numeric. Regression exponent on numnber of stalks per plant;
#' from \code{\link{pars}} \insertCite{HaegeleSchweigert1990}{SpawnIndex}.
#' @param theta Numeric. Egg conversion factor (eggs to biomass); from
#' \code{\link{CalcEggConversion}}.
#' @importFrom RODBC odbcConnectAccess sqlFetch odbcClose
#' @importFrom dplyr select distinct rename left_join filter %>%
#' @importFrom tibble as_tibble
#' @importFrom stringr str_to_title
#' @importFrom gfiscamutils MeanNA SumNA UniqueNA
#' @importFrom tidyr replace_na
#' @importFrom Rdpack reprompt
#' @return List. The element \code{SI} is a tibble with Macrocystis spawn index
#' (\code{MacroSI}) in tonnes by spawn number and year. The spawn number is
#' the finest spatial scale at which we calculate the spawn index. Other
#' information in this tibble comes from \code{a}: Region, Statistical Area,
#' Section, and Location code.
#' @references \insertAllCited
#' @note The `spawn index' is a relative index of spawning biomass.
#' @seealso \code{\link{LoadAreaData}} \code{\link{CalcEggConversion}}
#' \code{\link{pars}}
#' @export
#' @examples
#' dbLoc <- system.file("extdata", package = "SpawnIndex")
#' areaLoc <- list(
#' loc = dbLoc, db = "HerringSpawn.mdb",
#' fns = list(sections = "Sections", locations = "Location")
#' )
#' areas <- LoadAreaData(reg = "WCVI", where = areaLoc)
#' data(pars)
#' macroLoc <- list(
#' loc = dbLoc, db = "HerringSpawn.mdb",
#' fns = list(
#' allSpawn = "tSSAllspawn", plants = "tSSMacPlant",
#' transects = "tSSMacTrans"
#' )
#' )
#' macroSpawn <- CalcMacroSpawn(where = macroLoc, a = areas, yrs = 2010:2015)
#' macroSpawn$SI
CalcMacroSpawn <- function(where,
a,
yrs,
tSwath = 2,
beta = pars$macrocystis$beta,
gamma = pars$macrocystis$gamma,
delta = pars$macrocystis$delta,
epsilon = pars$macrocystis$epsilon,
theta = CalcEggConversion()) {
# Establish connection with access
accessDB <- RODBC::odbcConnectAccess(access.file = file.path(
where$loc,
where$db
))
# Load all spawn
spawn <- RODBC::sqlFetch(channel = accessDB, sqtable = where$fns$allSpawn) %>%
dplyr::rename(
LocationCode = Loc_Code, SpawnNumber = Spawn_Number,
LengthMacro = Length_Macrocystis
) %>%
dplyr::mutate(Method = stringr::str_to_title(Method)) %>%
dplyr::filter(Year %in% yrs, LocationCode %in% a$LocationCode) %>%
dplyr::select(
Year, LocationCode, SpawnNumber, LengthMacro, Length, Method
) %>%
tibble::as_tibble()
# Get plant-level data
plants <- RODBC::sqlFetch(channel = accessDB, sqtable = where$fns$plants) %>%
dplyr::rename(LocationCode = Loc_Code, SpawnNumber = Spawn_Number) %>%
dplyr::filter(
Year %in% yrs, LocationCode %in% a$LocationCode,
!is.na(Mature)
) %>%
dplyr::select(Year, LocationCode, SpawnNumber, Transect, Mature) %>%
tibble::as_tibble()
# Get a small subset of area data
areasSm <- a %>%
dplyr::select(Region, StatArea, Section, LocationCode, Pool) %>%
dplyr::distinct() %>%
tibble::as_tibble()
# Get transect-level data
transects <- RODBC::sqlFetch(
channel = accessDB,
sqtable = where$fns$transects
) %>%
dplyr::rename(LocationCode = Loc_Code, SpawnNumber = Spawn_Number) %>%
dplyr::filter(Year %in% yrs, LocationCode %in% a$LocationCode) %>%
dplyr::left_join(y = areasSm, by = "LocationCode") %>%
dplyr::select(
Year, Region, StatArea, Section, LocationCode, SpawnNumber, Transect,
Height, Width, Layers
) %>%
tibble::as_tibble()
# Merge the data
dat <- transects %>%
dplyr::left_join(y = plants, by = c(
"Year", "LocationCode", "SpawnNumber", "Transect"
)) %>%
tidyr::replace_na(replace = list(Mature = 0)) %>%
dplyr::mutate(Swath = tSwath)
# Calculate transect-level data
datTrans <- dat %>%
dplyr::group_by(
Year, Region, StatArea, Section, LocationCode, SpawnNumber, Transect
) %>%
# Transect t
dplyr::summarise(
# Transect metrics for all transects (not just those with mature plants)
Width = unique(Width),
Swath = unique(Swath),
Area = Width * Swath,
# Plant metrics for mature plants only
Height = gfiscamutils::UniqueNA(Height[Mature > 0]),
EggLyrs = gfiscamutils::UniqueNA(Layers[Mature > 0]),
Stalks = gfiscamutils::SumNA(Mature[Mature > 0]),
Plants = length(Mature[Mature > 0])
) %>%
dplyr::ungroup()
# Calculate spawn-level data
biomassSpawn <- datTrans %>%
dplyr::left_join(
y = spawn,
by = c("Year", "LocationCode", "SpawnNumber")
) %>%
dplyr::mutate(
LengthMacro = ifelse(is.na(LengthMacro), Length, LengthMacro)
) %>%
dplyr::filter(Method %in% c("Surface", "Dive")) %>%
dplyr::group_by(
Year, Region, StatArea, Section, LocationCode, SpawnNumber
) %>%
# Spawn s
dplyr::summarise(
LengthMacro = unique(LengthMacro),
Width = gfiscamutils::MeanNA(Width),
Area = gfiscamutils::SumNA(Area),
Plants = gfiscamutils::SumNA(Plants),
Stalks = gfiscamutils::SumNA(Stalks),
Height = gfiscamutils::MeanNA(Height),
EggLyrs = gfiscamutils::MeanNA(EggLyrs),
StalksPerPlant = Stalks / Plants,
# Eggs per plant in thousands (eggs * 10^3 / plant; Haegele and
# Schweigert 1990); spawn s
EggsPerPlant = beta * EggLyrs^gamma * Height^delta *
StalksPerPlant^epsilon * 1000,
# Eggs density in thousands (eggs * 10^3 / m^2; spawn s
EggDens = EggsPerPlant * Plants / Area,
# Biomass in tonnes, based on Hay (1985), and Hay and Brett (1988); spawn
# s
MacroSI = EggDens * LengthMacro * Width * 1000 / theta
) %>%
dplyr::rename(MacroLyrs = EggLyrs) %>%
dplyr::ungroup()
# Return the macrocystis spawn
SI <- biomassSpawn %>%
dplyr::select(
Year, Region, StatArea, Section, LocationCode, SpawnNumber, MacroSI
)
# Close the connection
RODBC::odbcClose(accessDB)
# Return the data
return(list(
dat = dat, datTrans = datTrans, biomassSpawn = biomassSpawn, SI = SI
))
} # End CalcMacroSpawn function
#' Calculate the understory spawn index.
#'
#' Calculate the Pacific Herring understory spawn index in tonnes.
#'
#' @param where List. Location of the Pacific Herring understory spawn database
#' (see examples).
#' @param a Tibble. Table of geographic information indicating the subset of
#' spawn survey observations to inlude in calculations; from
#' \code{\link{LoadAreaData}}.
#' @param yrs Numeric vector. Years(s) to include in the calculations, usually
#' staring in 1951.
#' @param algCoefs Tibble. Table of algae coefficients; from
#' \code{\link{algaeCoefs}}.
#' @param tau Tibble. Table of understory spawn width adjustment factors from
#' \code{\link{underWidthFac}}.
#' @param alpha Numeric. Regression slope for substrate; from \code{\link{pars}}
#' \insertCite{HaegeleEtal1979}{SpawnIndex}.
#' @param beta Numeric. Regression slope for algae; from \code{\link{pars}}
#' \insertCite{Schweigert2005}{SpawnIndex}.
#' @param gamma Numeric. Regression exponent on number of egg layers; from
#' \code{\link{pars}} \insertCite{Schweigert2005}{SpawnIndex}.
#' @param delta Numeric. Regression exponent on proportion of algae; from
#' \code{\link{pars}} \insertCite{Schweigert2005}{SpawnIndex}.
#' @param theta Numeric. Egg conversion factor (eggs to biomass); from
#' \code{\link{CalcEggConversion}}.
#' @importFrom RODBC odbcConnectAccess sqlFetch odbcClose
#' @importFrom dplyr select distinct rename left_join filter %>%
#' @importFrom tibble as_tibble
#' @importFrom stringr str_to_title
#' @importFrom gfiscamutils MeanNA SumNA UniqueNA
#' @importFrom tidyr replace_na gather
#' @importFrom Rdpack reprompt
#' @return List. The element \code{SI} is a tibble with understory spawn index
#' (\code{UnderSI}) in tonnes by spawn number and year. The spawn number is
#' the finest spatial scale at which we calculate the spawn index. Other
#' information in this tibble comes from \code{a}: Region, Statistical Area,
#' Section, and Location code.
#' @references \insertAllCited
#' @note The `spawn index' is a relative index of spawning biomass.
#' @seealso \code{\link{LoadAreaData}} \code{\link{CalcEggConversion}}
#' \code{\link{pars}} \code{\link{algaeCoefs}}
#' @export
#' @examples
#' dbLoc <- system.file("extdata", package = "SpawnIndex")
#' areaLoc <- list(
#' loc = dbLoc, db = "HerringSpawn.mdb",
#' fns = list(sections = "Sections", locations = "Location")
#' )
#' areas <- LoadAreaData(reg = "WCVI", where = areaLoc)
#' underLoc <- list(
#' loc = dbLoc, db = "HerringSpawn.mdb",
#' fns = list(
#' allSpawn = "tSSAllspawn", algTrans = "tSSVegTrans",
#' stations = "tSSStations", algae = "tSSVegetation"
#' )
#' )
#' data(underWidthFac)
#' data(pars)
#' data(algaeCoefs)
#' underSpawn <- CalcUnderSpawn(where = underLoc, a = areas, yrs = 2010:2015)
#' underSpawn$SI
CalcUnderSpawn <- function(where,
a,
yrs,
algCoefs = algaeCoefs,
tau = underWidthFac,
alpha = pars$understory$alpha,
beta = pars$understory$beta,
gamma = pars$under$gamma,
delta = pars$understory$delta,
theta = CalcEggConversion()) {
# Establish connection with access
accessDB <- RODBC::odbcConnectAccess(access.file = file.path(
where$loc,
where$db
))
# Get a small subset of area data
areasSm1 <- a %>%
dplyr::select(Region, LocationCode) %>%
dplyr::distinct() %>%
tibble::as_tibble()
# Load all spawn
spawn <- RODBC::sqlFetch(channel = accessDB, sqtable = where$fns$allSpawn) %>%
dplyr::rename(
LocationCode = Loc_Code, SpawnNumber = Spawn_Number,
LengthAlgae = Length_Vegetation
) %>%
dplyr::mutate(Method = stringr::str_to_title(Method)) %>%
dplyr::filter(Year %in% yrs, LocationCode %in% a$LocationCode) %>%
dplyr::select(
Year, LocationCode, SpawnNumber, LengthAlgae, Length, Method
) %>%
tibble::as_tibble()
# Load algae transects
algTrans <- RODBC::sqlFetch(
channel = accessDB,
sqtable = where$fns$algTrans
) %>%
dplyr::rename(
LocationCode = Loc_Code, SpawnNumber = Spawn_Number,
QuadratSize = Quadrat_Size, WidthObs = Width_Recorded
) %>%
dplyr::filter(Year %in% yrs, LocationCode %in% a$LocationCode) %>%
# TODO: This is a temporary cludge (all survey quadrats are 0.5); MT will
# fix it in the database
dplyr::mutate(QuadratSize = ifelse(QuadratSize == 0, 0.5, QuadratSize)) %>%
dplyr::select(
Year, LocationCode, SpawnNumber, Transect, WidthObs, QuadratSize
) %>%
dplyr::left_join(y = areasSm1, by = "LocationCode") %>%
tibble::as_tibble()
# Correction factors for region(s) by year (to fix lead line shrinkage issue)
widthFacs <- tau %>%
tidyr::gather(key = Region, value = WidthFac, -Year)
# Merge the width factors and correct transect widths
algTrans <- algTrans %>%
dplyr::left_join(y = widthFacs, by = c("Year", "Region")) %>%
tidyr::replace_na(replace = list(WidthFac = 1.0)) %>%
dplyr::mutate(Width = WidthObs * WidthFac)
# Error if any quadrats are not 0.5 m^2
if (any(algTrans$QuadratSize != 0.5)) {
stop("All quadrats must be 0.5m^2", call. = FALSE)
}
# Load station data
stations <- RODBC::sqlFetch(
channel = accessDB,
sqtable = where$fns$stations
) %>%
dplyr::rename(
LocationCode = Loc_Code, SpawnNumber = Spawn_Number,
SubLyrs = Layers_Bottom
) %>%
dplyr::filter(Year %in% yrs, LocationCode %in% a$LocationCode) %>%
dplyr::mutate(SubProp = Percent_Bottom / 100) %>%
dplyr::select(
Year, LocationCode, SpawnNumber, Transect, Station, SubLyrs, SubProp
) %>%
tibble::as_tibble()
# Get egg layer info: substrate
eggLyrsSub <- stations %>%
dplyr::group_by(Year, LocationCode, SpawnNumber, Transect) %>%
dplyr::summarise(Layers = gfiscamutils::MeanNA(SubLyrs)) %>%
dplyr::ungroup() %>%
dplyr::mutate(Source = "Substrate")
# Load algae
algae <- RODBC::sqlFetch(channel = accessDB, sqtable = where$fns$algae) %>%
dplyr::rename(
LocationCode = Loc_Code, SpawnNumber = Spawn_Number,
AlgType = Type_Vegetation, AlgLyrs = Layers_Vegetation
) %>%
dplyr::filter(Year %in% yrs, LocationCode %in% a$LocationCode) %>%
dplyr::mutate(
AlgType = stringr::str_to_upper(AlgType),
AlgProp = Percent_Vegetation / 100,
AlgProp = ifelse(AlgProp > 1, 1, AlgProp)
) %>%
dplyr::select(
Year, LocationCode, SpawnNumber, Transect, Station, AlgType, AlgLyrs,
AlgProp
) %>%
tibble::as_tibble()
# Get egg layer info: algae
eggLyrsAlg <- algae %>%
dplyr::group_by(Year, LocationCode, SpawnNumber, Transect) %>%
dplyr::summarise(Layers = gfiscamutils::MeanNA(AlgLyrs)) %>%
dplyr::ungroup() %>%
dplyr::mutate(Source = "Algae")
# Combine egg layer info
eggLyrs <- dplyr::bind_rows(eggLyrsSub, eggLyrsAlg) %>%
dplyr::group_by(Year, LocationCode, SpawnNumber, Transect) %>%
dplyr::summarise(Layers = gfiscamutils::MeanNA(Layers)) %>%
dplyr::group_by(Year, LocationCode, SpawnNumber) %>%
dplyr::summarise(UnderLyrs = gfiscamutils::MeanNA(Layers)) %>%
dplyr::ungroup()
# If there are missing algae types
if (any(!algae$AlgType %in% algCoefs$AlgType)) {
# Get missing algae type(s)
missAlg <- unique(algae$AlgType[!algae$AlgType %in%
algCoefs$AlgType])
# Error, and show missing type(s)
stop("Missing algae type(s): ", paste(missAlg, collapse = ", "),
call. = FALSE
)
} # End if there are missing algae types
# Get a small subset of area data
areasSm2 <- a %>%
dplyr::select(Region, StatArea, Section, LocationCode) %>%
dplyr::distinct() %>%
tibble::as_tibble()
# Error if proportion > 1
if (any(stations$SubProp > 1, na.rm = TRUE)) {
stop("Substrate proportion > 1 in understory spawn data", call. = FALSE)
}
# Calculate substrate egg density
eggsSub <- stations %>%
dplyr::full_join(y = algTrans, by = c(
"Year", "LocationCode", "SpawnNumber", "Transect"
)) %>%
dplyr::left_join(y = areasSm2, by = c("Region", "LocationCode")) %>%
# Egg density in thousands (eggs x 10^3 / m^2; Haegele et al. 1979);
# quadrat q
dplyr::mutate(EggDensSub = alpha * SubLyrs * SubProp) %>%
tidyr::replace_na(replace = list(EggDensSub = 0)) %>%
dplyr::select(
Year, Region, StatArea, Section, LocationCode, SpawnNumber, Transect,
Station, Width, EggDensSub
)
# Error if proportion > 1
if (any(algae$AlgProp > 1, na.rm = TRUE)) {
stop("Algae proportion > 1 in understory spawn data", call. = FALSE)
}
# Calculate substrate egg density by quadrat/station
eggsAlg <- algae %>%
dplyr::left_join(y = algCoefs, by = "AlgType") %>%
dplyr::left_join(y = areasSm2, by = "LocationCode") %>%
dplyr::left_join(
y = dplyr::select(.data = algTrans, -Width),
by = c("Year", "Region", "LocationCode", "SpawnNumber", "Transect")
) %>%
# Egg density in thousands (eggs * 10^3 / m^2; Schweigert 2005); quadrat
# size coefficients not required because all quadrats are 0.5m^2 (1.0512)
# Algae a
dplyr::mutate(EggDensAlg = beta * AlgLyrs^gamma * AlgProp^delta * Coef *
1.0512) %>%
dplyr::group_by(
Year, Region, StatArea, Section, LocationCode, SpawnNumber, Transect,
Station
) %>%
# Quadrat q
dplyr::summarise(EggDensAlg = gfiscamutils::SumNA(EggDensAlg)) %>%
tidyr::replace_na(replace = list(EggDensAlg = 0)) %>%
dplyr::ungroup()
# Combine eggs
eggs <- eggsSub %>%
dplyr::full_join(y = eggsAlg, by = c(
"Year", "Region", "StatArea", "Section", "LocationCode", "SpawnNumber",
"Transect", "Station"
)) %>%
tidyr::replace_na(replace = list(
Width = 0, EggDensSub = 0, EggDensAlg = 0
)) %>%
dplyr::mutate(EggDensSub = ifelse(Width > 0, EggDensSub, 0))
# Calculate total egg density by station/quadrat
eggsStation <- eggs %>%
# Total egg density in thousands (eggs * 10^3 / m^2); quadrat q
dplyr::mutate(EggDens = EggDensSub + EggDensAlg) %>%
dplyr::filter(!is.na(Station))
# Widths
widths <- eggsStation %>%
dplyr::group_by(
Year, Region, StatArea, Section, LocationCode, SpawnNumber, Transect
) %>%
dplyr::summarise(Width = unique(Width)) %>%
dplyr::group_by(
Year, Region, StatArea, Section, LocationCode, SpawnNumber
) %>%
# Spawn s
dplyr::summarise(
WidthBar = gfiscamutils::MeanNA(Width)
) %>%
dplyr::ungroup()
# Calculate transect-level metrics
eggsTrans <- eggsStation %>%
dplyr::filter(Width > 0) %>%
dplyr::group_by(
Year, Region, StatArea, Section, LocationCode, SpawnNumber, Transect
) %>%
# Transect t
dplyr::summarise(
EggDens = gfiscamutils::MeanNA(EggDens),
Width = unique(Width)
) %>%
dplyr::ungroup()
# Calculate spawn number-level metrics
eggsSpawn <- eggsTrans %>%
dplyr::left_join(
y = spawn,
by = c("Year", "LocationCode", "SpawnNumber")
) %>%
dplyr::mutate(LengthAlgae = ifelse(is.na(LengthAlgae), Length,
LengthAlgae
)) %>%
dplyr::filter(Method %in% c("Surface", "Dive")) %>%
dplyr::left_join(y = widths, by = c(
"Year", "Region", "StatArea", "Section", "LocationCode", "SpawnNumber"
)) %>%
dplyr::group_by(
Year, Region, StatArea, Section, LocationCode, SpawnNumber
) %>%
# Spawn s
dplyr::summarise(
WidthBar = unique(WidthBar),
LengthAlgae = unique(LengthAlgae),
EggDens = gfiscamutils::WtMeanNA(EggDens, w = Width)
) %>%
dplyr::ungroup()
# Calculate understory biomass by spawn number
biomassSpawn <- eggsSpawn %>%
dplyr::mutate(
# Biomass in tonnes, based on Hay (1985), and Hay and Brett (1988); spawn
# s
UnderSI = EggDens * LengthAlgae * WidthBar * 1000 / theta
) %>%
dplyr::left_join(y = eggLyrs, by = c("Year", "LocationCode", "SpawnNumber"))
# Calculate understory SI by spawn number
SI <- biomassSpawn %>%
dplyr::select(
Year, Region, StatArea, Section, LocationCode, SpawnNumber, UnderSI
)
# Close the connection
RODBC::odbcClose(accessDB)
# Return the data
return(list(
stations = stations, algae = algae, eggs = eggs, eggsStation = eggsStation,
eggsTrans = eggsTrans, eggsSpawn = eggsSpawn, biomassSpawn = biomassSpawn,
SI = SI
))
} # End CalcUnderSpawn function
|
\name{statFuns}
\alias{statFuns}
\title{Apply summary statistics functions given a type of vector}
\usage{
statFuns(type, x)
}
\arguments{
\item{type}{The type of computation. To choose in
\code{c("double", "integer", "logical", "factor",
"character")}.}
\item{x}{Alternatively, the vector can be given instead.}
}
\value{
A list of functions to use in order to computre summary
statistics.
}
\description{
Apply summary statistics functions given a type of vector
}
\keyword{internal}
| /man/statFuns.Rd | no_license | SESjo/SES | R | false | false | 496 | rd | \name{statFuns}
\alias{statFuns}
\title{Apply summary statistics functions given a type of vector}
\usage{
statFuns(type, x)
}
\arguments{
\item{type}{The type of computation. To choose in
\code{c("double", "integer", "logical", "factor",
"character")}.}
\item{x}{Alternatively, the vector can be given instead.}
}
\value{
A list of functions to use in order to computre summary
statistics.
}
\description{
Apply summary statistics functions given a type of vector
}
\keyword{internal}
|
# read data file
tbl <- read.table("household_power_consumption.txt",
header=TRUE, sep=";", na.strings = "?")
# filter data from Feb. 1, 2007 to Feb. 2, 2007
data <- subset(tbl, Date %in% c("1/2/2007","2/2/2007"))
# Plot 1: Global active power
png("plot1.png", width=480, height=480)
hist(data$Global_active_power, main = "Global Active Power",
col = "red", xlab = "Global Active Power (kilowatts)",
breaks=12, ylim = c(0,1200))
dev.off()
| /plot1.R | no_license | StarS127/ExData_Plotting1 | R | false | false | 475 | r | # read data file
tbl <- read.table("household_power_consumption.txt",
header=TRUE, sep=";", na.strings = "?")
# filter data from Feb. 1, 2007 to Feb. 2, 2007
data <- subset(tbl, Date %in% c("1/2/2007","2/2/2007"))
# Plot 1: Global active power
png("plot1.png", width=480, height=480)
hist(data$Global_active_power, main = "Global Active Power",
col = "red", xlab = "Global Active Power (kilowatts)",
breaks=12, ylim = c(0,1200))
dev.off()
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#' @include R6.R
`arrow::ipc::feather::TableWriter` <- R6Class("arrow::ipc::feather::TableWriter", inherit = `arrow::Object`,
public = list(
SetDescription = function(description) ipc___feather___TableWriter__SetDescription(self, description),
SetNumRows = function(num_rows) ipc___feather___TableWriter__SetNumRows(self, num_rows),
Append = function(name, values) ipc___feather___TableWriter__Append(self, name, values),
Finalize = function() ipc___feather___TableWriter__Finalize(self)
)
)
`arrow::ipc::feather::TableReader` <- R6Class("arrow::ipc::feather::TableReader", inherit = `arrow::Object`,
public = list(
GetDescription = function() ipc___feather___TableReader__GetDescription(self),
HasDescription = function() ipc__feather___TableReader__HasDescription(self),
version = function() ipc___feather___TableReader__version(self),
num_rows = function() ipc___feather___TableReader__num_rows(self),
num_columns = function() ipc___feather___TableReader__num_columns(self),
GetColumnName = function(i) ipc___feather___TableReader__GetColumnName(self, i),
GetColumn = function(i) shared_ptr(`arrow::Column`, ipc___feather___TableReader__GetColumn(self, i)),
Read = function(columns) {
shared_ptr(`arrow::Table`, ipc___feather___TableReader__Read(self, columns))
}
)
)
#' Create TableWriter that writes into a stream
#'
#' @param stream an OutputStream
#'
#' @export
FeatherTableWriter <- function(stream) {
UseMethod("FeatherTableWriter")
}
#' @export
`FeatherTableWriter.arrow::io::OutputStream` <- function(stream){
unique_ptr(`arrow::ipc::feather::TableWriter`, ipc___feather___TableWriter__Open(stream))
}
#' Write data in the feather format
#'
#' @param data frame or arrow::RecordBatch
#' @param stream A file path or an arrow::io::OutputStream
#'
#' @export
write_feather <- function(data, stream) {
UseMethod("write_feather", data)
}
#' @export
write_feather.default <- function(data, stream) {
stop("unsupported")
}
#' @export
write_feather.data.frame <- function(data, stream) {
# splice the columns in the record_batch() call
# e.g. if we had data <- data.frame(x = <...>, y = <...>)
# then record_batch(!!!data) is the same as
# record_batch(x = data$x, y = data$y)
# see ?rlang::list2()
write_feather(record_batch(!!!data), stream)
}
#' @method write_feather arrow::RecordBatch
#' @export
`write_feather.arrow::RecordBatch` <- function(data, stream) {
write_feather_RecordBatch(data, stream)
}
#' @rdname write_feather
#' @export
write_feather_RecordBatch <- function(data, stream) {
UseMethod("write_feather_RecordBatch", stream)
}
#' @export
#' @method write_feather_RecordBatch default
`write_feather_RecordBatch.default` <- function(data, stream) {
stop("unsupported")
}
#' @export
#' @method write_feather_RecordBatch character
`write_feather_RecordBatch.character` <- function(data, stream) {
`write_feather_RecordBatch.fs_path`(data, fs::path_abs(stream))
}
#' @export
#' @method write_feather_RecordBatch fs_path
`write_feather_RecordBatch.fs_path` <- function(data, stream) {
file_stream <- FileOutputStream(stream)
on.exit(file_stream$close())
`write_feather_RecordBatch.arrow::io::OutputStream`(data, file_stream)
}
#' @export
#' @method write_feather_RecordBatch arrow::io::OutputStream
`write_feather_RecordBatch.arrow::io::OutputStream` <- function(data, stream) {
ipc___TableWriter__RecordBatch__WriteFeather(FeatherTableWriter(stream), data)
}
#' A arrow::ipc::feather::TableReader to read from a file
#'
#' @param file A file path, arrow::io::RandomAccessFile
#' @param mmap Is the file memory mapped (applicable to the character and fs_path methods)
#' @param ... extra parameters
#'
#' @export
FeatherTableReader <- function(file, mmap = TRUE, ...){
UseMethod("FeatherTableReader")
}
#' @export
FeatherTableReader.default <- function(file, mmap = TRUE, ...) {
stop("unsupported")
}
#' @export
FeatherTableReader.character <- function(file, mmap = TRUE, ...) {
FeatherTableReader(fs::path_abs(file), mmap = mmap, ...)
}
#' @export
FeatherTableReader.fs_path <- function(file, mmap = TRUE, ...) {
if (isTRUE(mmap)) {
stream <- mmap_open(file, ...)
} else {
stream <- ReadableFile(file, ...)
}
FeatherTableReader(stream)
}
#' @export
`FeatherTableReader.arrow::io::RandomAccessFile` <- function(file, mmap = TRUE, ...){
unique_ptr(`arrow::ipc::feather::TableReader`, ipc___feather___TableReader__Open(file))
}
#' @export
`FeatherTableReader.arrow::ipc::feather::TableReader` <- function(file, mmap = TRUE, ...){
file
}
#' Read a feather file
#'
#' @param file a arrow::ipc::feather::TableReader or whatever the [FeatherTableReader()] function can handle
#' @param col_select [tidy selection][tidyselect::vars_select()] of columns to read.
#' @param as_tibble should the [arrow::Table][arrow__Table] be converted to a tibble.
#' @param ... additional parameters
#'
#' @return a data frame if `as_tibble` is `TRUE` (the default), or a [arrow::Table][arrow__Table] otherwise
#'
#' @export
read_feather <- function(file, col_select = NULL, as_tibble = TRUE, ...){
reader <- FeatherTableReader(file, ...)
all_columns <- ipc___feather___TableReader__column_names(reader)
col_select <- enquo(col_select)
columns <- if (!quo_is_null(col_select)) {
vars_select(all_columns, !!col_select)
}
out <- reader$Read(columns)
if (isTRUE(as_tibble)) {
out <- as.data.frame(out)
}
out
}
| /r/R/feather.R | permissive | fossabot/arrow | R | false | false | 6,262 | r | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#' @include R6.R
`arrow::ipc::feather::TableWriter` <- R6Class("arrow::ipc::feather::TableWriter", inherit = `arrow::Object`,
public = list(
SetDescription = function(description) ipc___feather___TableWriter__SetDescription(self, description),
SetNumRows = function(num_rows) ipc___feather___TableWriter__SetNumRows(self, num_rows),
Append = function(name, values) ipc___feather___TableWriter__Append(self, name, values),
Finalize = function() ipc___feather___TableWriter__Finalize(self)
)
)
`arrow::ipc::feather::TableReader` <- R6Class("arrow::ipc::feather::TableReader", inherit = `arrow::Object`,
public = list(
GetDescription = function() ipc___feather___TableReader__GetDescription(self),
HasDescription = function() ipc__feather___TableReader__HasDescription(self),
version = function() ipc___feather___TableReader__version(self),
num_rows = function() ipc___feather___TableReader__num_rows(self),
num_columns = function() ipc___feather___TableReader__num_columns(self),
GetColumnName = function(i) ipc___feather___TableReader__GetColumnName(self, i),
GetColumn = function(i) shared_ptr(`arrow::Column`, ipc___feather___TableReader__GetColumn(self, i)),
Read = function(columns) {
shared_ptr(`arrow::Table`, ipc___feather___TableReader__Read(self, columns))
}
)
)
#' Create TableWriter that writes into a stream
#'
#' @param stream an OutputStream
#'
#' @export
FeatherTableWriter <- function(stream) {
UseMethod("FeatherTableWriter")
}
#' @export
`FeatherTableWriter.arrow::io::OutputStream` <- function(stream){
unique_ptr(`arrow::ipc::feather::TableWriter`, ipc___feather___TableWriter__Open(stream))
}
#' Write data in the feather format
#'
#' @param data frame or arrow::RecordBatch
#' @param stream A file path or an arrow::io::OutputStream
#'
#' @export
write_feather <- function(data, stream) {
UseMethod("write_feather", data)
}
#' @export
write_feather.default <- function(data, stream) {
stop("unsupported")
}
#' @export
write_feather.data.frame <- function(data, stream) {
# splice the columns in the record_batch() call
# e.g. if we had data <- data.frame(x = <...>, y = <...>)
# then record_batch(!!!data) is the same as
# record_batch(x = data$x, y = data$y)
# see ?rlang::list2()
write_feather(record_batch(!!!data), stream)
}
#' @method write_feather arrow::RecordBatch
#' @export
`write_feather.arrow::RecordBatch` <- function(data, stream) {
write_feather_RecordBatch(data, stream)
}
#' @rdname write_feather
#' @export
write_feather_RecordBatch <- function(data, stream) {
UseMethod("write_feather_RecordBatch", stream)
}
#' @export
#' @method write_feather_RecordBatch default
`write_feather_RecordBatch.default` <- function(data, stream) {
stop("unsupported")
}
#' @export
#' @method write_feather_RecordBatch character
`write_feather_RecordBatch.character` <- function(data, stream) {
`write_feather_RecordBatch.fs_path`(data, fs::path_abs(stream))
}
#' @export
#' @method write_feather_RecordBatch fs_path
`write_feather_RecordBatch.fs_path` <- function(data, stream) {
file_stream <- FileOutputStream(stream)
on.exit(file_stream$close())
`write_feather_RecordBatch.arrow::io::OutputStream`(data, file_stream)
}
#' @export
#' @method write_feather_RecordBatch arrow::io::OutputStream
`write_feather_RecordBatch.arrow::io::OutputStream` <- function(data, stream) {
ipc___TableWriter__RecordBatch__WriteFeather(FeatherTableWriter(stream), data)
}
#' A arrow::ipc::feather::TableReader to read from a file
#'
#' @param file A file path, arrow::io::RandomAccessFile
#' @param mmap Is the file memory mapped (applicable to the character and fs_path methods)
#' @param ... extra parameters
#'
#' @export
FeatherTableReader <- function(file, mmap = TRUE, ...){
UseMethod("FeatherTableReader")
}
#' @export
FeatherTableReader.default <- function(file, mmap = TRUE, ...) {
stop("unsupported")
}
#' @export
FeatherTableReader.character <- function(file, mmap = TRUE, ...) {
FeatherTableReader(fs::path_abs(file), mmap = mmap, ...)
}
#' @export
FeatherTableReader.fs_path <- function(file, mmap = TRUE, ...) {
if (isTRUE(mmap)) {
stream <- mmap_open(file, ...)
} else {
stream <- ReadableFile(file, ...)
}
FeatherTableReader(stream)
}
#' @export
`FeatherTableReader.arrow::io::RandomAccessFile` <- function(file, mmap = TRUE, ...){
unique_ptr(`arrow::ipc::feather::TableReader`, ipc___feather___TableReader__Open(file))
}
#' @export
`FeatherTableReader.arrow::ipc::feather::TableReader` <- function(file, mmap = TRUE, ...){
file
}
#' Read a feather file
#'
#' @param file a arrow::ipc::feather::TableReader or whatever the [FeatherTableReader()] function can handle
#' @param col_select [tidy selection][tidyselect::vars_select()] of columns to read.
#' @param as_tibble should the [arrow::Table][arrow__Table] be converted to a tibble.
#' @param ... additional parameters
#'
#' @return a data frame if `as_tibble` is `TRUE` (the default), or a [arrow::Table][arrow__Table] otherwise
#'
#' @export
read_feather <- function(file, col_select = NULL, as_tibble = TRUE, ...){
reader <- FeatherTableReader(file, ...)
all_columns <- ipc___feather___TableReader__column_names(reader)
col_select <- enquo(col_select)
columns <- if (!quo_is_null(col_select)) {
vars_select(all_columns, !!col_select)
}
out <- reader$Read(columns)
if (isTRUE(as_tibble)) {
out <- as.data.frame(out)
}
out
}
|
#!/usr/bin/env Rscript
#
# Process the enriched AA data, creating field-normalized and categorized versions
# of the perofrmance indicators and converting variables to reasonable alternatives
# Author: Dakota Murray
#
args = commandArgs(trailingOnly=TRUE)
aarmp_path <- args[1]
output_path <- args[2]
# Load the matched AA/RMP data
aarmp <- read.csv(aarmp_path)
# split list of strings
tags <- sapply(aarmp$RMP.Tags, function(x) {
ifelse(is.na(x), NA, stringr::str_split(x, ";"))
})
# Trim extra whitespace form tags
aarmp$tag_list <- sapply(tags, function(t) {
sapply(t, function(x) {
return(trimws(x, "both"))
})
})
# get unique tags
unq <- unique(unlist(aarmp$tag_list))[1:20]
# Setup necessary variables for following loop
tag_frame <- data.frame(matrix(ncol = length(unq), nrow = 0))
colnames(tag_frame) <- unq
# Create a n X t matrix, where n is the number of records (professors) and t is the number of unique tags. Each
# cell of this matrix contains a 1 if that professor was given that tag by a user
for (i in 1:dim(aarmp)[1]) {
tag_frame <- rbind(tag_frame, unq %in% unlist(aarmp$tag_list[i]))
}
# Now that we have these, we can rbind with the other features.
tagdata <- cbind(overall = aarmp$OUR.Overall.mean, tag_frame)
colnames(tagdata) <- c("overall", unq)
tagdata$PersonId <- aarmp$AA_id
# Write the output
write.csv(tagdata, output_path, row.names = FALSE)
| /workflow/scripts/get_tag_data.R | no_license | murrayds/aa_rmp | R | false | false | 1,397 | r | #!/usr/bin/env Rscript
#
# Process the enriched AA data, creating field-normalized and categorized versions
# of the perofrmance indicators and converting variables to reasonable alternatives
# Author: Dakota Murray
#
args = commandArgs(trailingOnly=TRUE)
aarmp_path <- args[1]
output_path <- args[2]
# Load the matched AA/RMP data
aarmp <- read.csv(aarmp_path)
# split list of strings
tags <- sapply(aarmp$RMP.Tags, function(x) {
ifelse(is.na(x), NA, stringr::str_split(x, ";"))
})
# Trim extra whitespace form tags
aarmp$tag_list <- sapply(tags, function(t) {
sapply(t, function(x) {
return(trimws(x, "both"))
})
})
# get unique tags
unq <- unique(unlist(aarmp$tag_list))[1:20]
# Setup necessary variables for following loop
tag_frame <- data.frame(matrix(ncol = length(unq), nrow = 0))
colnames(tag_frame) <- unq
# Create a n X t matrix, where n is the number of records (professors) and t is the number of unique tags. Each
# cell of this matrix contains a 1 if that professor was given that tag by a user
for (i in 1:dim(aarmp)[1]) {
tag_frame <- rbind(tag_frame, unq %in% unlist(aarmp$tag_list[i]))
}
# Now that we have these, we can rbind with the other features.
tagdata <- cbind(overall = aarmp$OUR.Overall.mean, tag_frame)
colnames(tagdata) <- c("overall", unq)
tagdata$PersonId <- aarmp$AA_id
# Write the output
write.csv(tagdata, output_path, row.names = FALSE)
|
#
# Capelin Acoustic Transect Analysis
#
# Identify working directory
#
setwd("C:\\Users\\pjs31\\Documents\\Cornell\\Institute of Marine Research\\capeline acoustics")
#
# Read in data
#
data = read.csv("AllData_new.csv")
head(data)
dim(data)
(iyear = unique(data$year))
table(data$year)
#
setwd("C:\\Users\\pjs31\\Documents\\Cornell\\Institute of Marine Research\\capeline acoustics\\Barents Sea Shp")
#
library(rgdal)
#
# Read in boundary layers
#
ogrInfo(dsn=".",layer="iho")
barents = readOGR(dsn=".",layer="iho")
proj4string(NENY)
plot(barents,add=T,lwd=2)
#
setwd("C:\\Users\\pjs31\\Documents\\Cornell\\Institute of Marine Research\\capeline acoustics")
#
# Plot survey locations
#
plot(barents,lwd=2,axes=T)
points(lat~lon,data=data,col=2)
#
# Plot relative densities
#
yy = 2013
par(mfrow=c(4,3))
for(yy in iyear){
plot(lat~lon,data=data,type="n")
datai = data[data$year==yy,]
points(lat~lon,data=datai,cex=4*datai$capelin/max(datai$capelin),
lwd=2,col="blue")
#title(paste("Capelin Abundance",yy))
title(paste(yy))
plot(barents,lwd=2,add=T)
#locator(1)
}
par(mfrow=c(1,1))
#
##############################################################
#
# Geostatistics
#
#
library(sp)
#
# Read the data into R
#
data$Xloc = data$lon
data$Yloc = data$lat
coordinates(data)=c("Xloc","Yloc")
#
# Do some simple exploratory analysis of the data
#
hist(data$capelin)
stem(data$capelin)
#
# Plot the sample locations
#
par(pty="s")
plot(Yloc~Xloc,data=data)
title("Capelin Locations")
#
#
# Examine the capelin concentrations over the area
#
library(RColorBrewer)
library(classInt)
#
# Plot a point at a location
# The color of the point corresponds to the
# Concentration of cadmium, as specified in the q5Colors object
#
yy = 2014
plot(barents)
datai = data[data$year==yy,]
pal = brewer.pal(5,"Blues")
q5 = classIntervals(datai$capelin, n=5, style="quantile")
q5Colors = findColours(q5,pal)
plot(datai,col=q5Colors,pch=19,add=T)
legend("bottomright",fill=attr(q5Colors,"palette"),
legend = names(attr(q5Colors,"table")),bty="n")
title(paste("Capelin Abundance Over Area",yy))
#
#
yy = 2004
for(yy in iyear){
datai = data[data$year==yy,]
pal = brewer.pal(5,"Blues")
q5 = classIntervals(datai$capelin, n=5, style="quantile")
q5Colors = findColours(q5,pal)
plot(c(min(datai$Xloc),max(datai$Xloc)),
c(min(datai$Yloc),max(datai$Yloc)),
xlab="Longitude",ylab="Latitude",type="n")
plot(datai,col=q5Colors,pch=19,add=T)
legend("bottomright",fill=attr(q5Colors,"palette"),
legend = names(attr(q5Colors,"table")),bty="n")
title(paste("Capelin Abundance Over Area",yy))
locator(1)
}
#
################################################################################
#
# Kriging Capeline
#
library(gstat)
#
# Choose Year ##########################
#
yy = 2007
datai = data.frame(data[data$year==yy,])
datai$Xloc = jitter(datai$Xloc)
datai$Yloc = jitter(datai$Yloc)
coordinates(datai) = c("Xloc","Yloc")
#
par(mfrow=c(1,2))
hist(datai$capelin,main="Capelin",col="dodgerblue")
hist(log(datai$capelin+1),main="Log(Capelin+1)",col="dodgerblue")
par(mfrow=c(1,1))
#
#
# Plot relative abundances
#
plot(lat~lon,data=datai,cex=4*datai$capelin/max(datai$capelin),lwd=2)
title(paste("Capelin Abundance",yy))
#
#
# Plot relative abundances using color
#
library(RColorBrewer)
library(classInt)
#
# Plot a point at a location
# The color of the point corresponds to the
# Concentration of cadmium, as specified in the q5Colors object
#
pal = brewer.pal(5,"Blues")
q5 = classIntervals(datai$capelin, n=5, style="quantile")
q5Colors = findColours(q5,pal)
plot(c(min(datai$Xloc),max(datai$Xloc)),
c(min(datai$Yloc),max(datai$Yloc)),
xlab="Longitude",ylab="Latitude",type="n")
plot(datai,col=q5Colors,pch=19,add=T)
legend("bottomright",fill=attr(q5Colors,"palette"),
legend = names(attr(q5Colors,"table")),bty="n")
title(paste("Capelin Abundance Over Area",yy))
#
# Calculate the empirical variogram
#
capelin.vario = variogram(log(capelin+1)~1,datai,cutoff=20)
#
# Plot the empirical variogram
#
plot(gamma~dist,capelin.vario,
ylim=c(0,max(gamma)),type='n',
xlab="Distance",ylab="Semivariance",
main=paste("Capelin Variogram",yy))
points(gamma~dist,capelin.vario,cex=2*np/max(np),pch=16,col="lightblue")
#
# Fit the model first by eye
#
my.range = locator(1)$x
my.nugget = locator(1)$y
my.psill = locator(1)$y-my.nugget
#
capelin.eye = vgm(model="Sph",psill=my.psill,range=my.range,nugget=my.nugget)
plot(gamma~dist,capelin.vario,
ylim=c(0,max(gamma)),type='n',
xlab="Distance",ylab="Semivariance",
main=paste("Capelin Variogram",yy))
points(gamma~dist,capelin.vario,cex=2*np/max(np),pch=16,col="lightblue")
vgmline = variogramLine(capelin.eye,max(capelin.vario$dist))
lines(gamma~dist,vgmline,lwd=2)
#
# Now use eye parameters to start fit of model
#
#
capelin.fit=fit.variogram(capelin.vario,
vgm(model="Sph",psill=my.psill,range=my.range,nugget=my.nugget),
fit.method=1)
#
# Look at estimates
#
capelin.fit
capelin.psill=capelin.fit$psill[2]
capelin.range=capelin.fit$range[2]
capelin.nugget=capelin.fit$psill[1]
#
# Plot data, model and parameter estimates
#
#
plot(gamma~dist,capelin.vario,
ylim=c(0,max(gamma)),type='n',
xlab="Distance",ylab="Semivariance",
main=paste("Capelin Variogram",yy))
points(gamma~dist,capelin.vario,cex=2*np/max(np),pch=16,col="lightblue")
vgmline = variogramLine(capelin.fit,max(capelin.vario$dist))
lines(gamma~dist,vgmline,lwd=2)
#
legend("bottomright",legend = c(
paste("Psill =",round(capelin.psill,2)),
paste("Range =",round(capelin.range,2)),
paste("Nugget = ",round(capelin.nugget,2))),
bty="n")
#
#
# Create a grid of points to predict over
#
library(geoR)
#
capelin.grid = expand.grid(
Xloc=seq(min(datai$Xloc),max(datai$Xloc),length=50),
Yloc=seq(min(datai$Yloc),max(datai$Yloc),length=50))
names(capelin.grid)=c("Xloc","Yloc")
coordinates(capelin.grid)=c("Xloc","Yloc")
capelin.grid = as(capelin.grid, "SpatialPixels")
#
# Now plot the data and overlay the prediction grid
#
plot(Yloc~Xloc,capelin.grid,cex=1.2,pch='+',col="green")
points(Yloc~Xloc,datai,pch=".")
#
#
# Predict the value at all the points in the domain
#
date()
capelin.ok = krige(log(capelin+1)~1, datai, capelin.grid, capelin.fit)
date()
#
# Plot the prediction
#
#plot(c(min(datai$Xloc),max(datai$Xloc)),
# c(min(datai$Yloc),max(datai$Yloc)),
# type="n",xlab="Longitude",ylab="Latitude")
plot(barents)
image(capelin.ok["var1.pred"],col=rev(heat.colors(4)),add=T)
#contour(capelin.ok["var1.pred"],add=T)
title(paste("Predicted Log(Capelin+1)",yy))
legend("bottomright",legend=c(0,1,2,3),fill=rev(heat.colors(4)),
bty="n",title="log(Capelin+1)")
plot(barents,add=T)
summary(capelin.ok["var1.pred"])
#
#
#
# Plot variance
#
plot(c(min(datai$Xloc),max(datai$Xloc)),
c(min(datai$Yloc),max(datai$Yloc)),
type="n",xlab="Longitude",ylab="Latitude")
image(capelin.ok["var1.var"],col=heat.colors(4),add=T)
points(datai$Xloc,datai$Yloc,pch=".")
legend("bottomright",legend=round(seq(.3,.5,length=4),2),fill=heat.colors(4),
bty="n",title="Variance")
# points(data.frame(capelin.grid)$Xloc,data.frame(capelin.grid)$Yloc,pch="+")
title(paste("Variance in Predictions",yy))
#
#
# Overlay Cod on Capelin Predictions
#
plot(c(min(datai$Xloc),max(datai$Xloc)),
c(min(datai$Yloc),max(datai$Yloc)),
type="n",xlab="Longitude",ylab="Latitude")
image(capelin.ok["var1.pred"],col=heat.colors(4),add=T)
#contour(capelin.ok["var1.pred"],add=T)
title(paste("Predicted Log(Capelin+1) \n Overlayed with Cod",yy))
legend("bottomright",legend=c(0,1,2,3),fill=heat.colors(4),
bty="n",title="log(Capelin+1)")
datai0 = datai[datai$cod>2,]
points(Yloc~Xloc,data=datai0,cex=4*datai0$cod/max(datai0$cod),lwd=1)
| /Spatial-Stats/Cod-Capelin-Project/AllData_new.R | no_license | jlmorano/Reference-R-scripts | R | false | false | 8,028 | r | #
# Capelin Acoustic Transect Analysis
#
# Identify working directory
#
setwd("C:\\Users\\pjs31\\Documents\\Cornell\\Institute of Marine Research\\capeline acoustics")
#
# Read in data
#
data = read.csv("AllData_new.csv")
head(data)
dim(data)
(iyear = unique(data$year))
table(data$year)
#
setwd("C:\\Users\\pjs31\\Documents\\Cornell\\Institute of Marine Research\\capeline acoustics\\Barents Sea Shp")
#
library(rgdal)
#
# Read in boundary layers
#
ogrInfo(dsn=".",layer="iho")
barents = readOGR(dsn=".",layer="iho")
proj4string(NENY)
plot(barents,add=T,lwd=2)
#
setwd("C:\\Users\\pjs31\\Documents\\Cornell\\Institute of Marine Research\\capeline acoustics")
#
# Plot survey locations
#
plot(barents,lwd=2,axes=T)
points(lat~lon,data=data,col=2)
#
# Plot relative densities
#
yy = 2013
par(mfrow=c(4,3))
for(yy in iyear){
plot(lat~lon,data=data,type="n")
datai = data[data$year==yy,]
points(lat~lon,data=datai,cex=4*datai$capelin/max(datai$capelin),
lwd=2,col="blue")
#title(paste("Capelin Abundance",yy))
title(paste(yy))
plot(barents,lwd=2,add=T)
#locator(1)
}
par(mfrow=c(1,1))
#
##############################################################
#
# Geostatistics
#
#
library(sp)
#
# Read the data into R
#
data$Xloc = data$lon
data$Yloc = data$lat
coordinates(data)=c("Xloc","Yloc")
#
# Do some simple exploratory analysis of the data
#
hist(data$capelin)
stem(data$capelin)
#
# Plot the sample locations
#
par(pty="s")
plot(Yloc~Xloc,data=data)
title("Capelin Locations")
#
#
# Examine the capelin concentrations over the area
#
library(RColorBrewer)
library(classInt)
#
# Plot a point at a location
# The color of the point corresponds to the
# Concentration of cadmium, as specified in the q5Colors object
#
yy = 2014
plot(barents)
datai = data[data$year==yy,]
pal = brewer.pal(5,"Blues")
q5 = classIntervals(datai$capelin, n=5, style="quantile")
q5Colors = findColours(q5,pal)
plot(datai,col=q5Colors,pch=19,add=T)
legend("bottomright",fill=attr(q5Colors,"palette"),
legend = names(attr(q5Colors,"table")),bty="n")
title(paste("Capelin Abundance Over Area",yy))
#
#
yy = 2004
for(yy in iyear){
datai = data[data$year==yy,]
pal = brewer.pal(5,"Blues")
q5 = classIntervals(datai$capelin, n=5, style="quantile")
q5Colors = findColours(q5,pal)
plot(c(min(datai$Xloc),max(datai$Xloc)),
c(min(datai$Yloc),max(datai$Yloc)),
xlab="Longitude",ylab="Latitude",type="n")
plot(datai,col=q5Colors,pch=19,add=T)
legend("bottomright",fill=attr(q5Colors,"palette"),
legend = names(attr(q5Colors,"table")),bty="n")
title(paste("Capelin Abundance Over Area",yy))
locator(1)
}
#
################################################################################
#
# Kriging Capeline
#
library(gstat)
#
# Choose Year ##########################
#
yy = 2007
datai = data.frame(data[data$year==yy,])
datai$Xloc = jitter(datai$Xloc)
datai$Yloc = jitter(datai$Yloc)
coordinates(datai) = c("Xloc","Yloc")
#
par(mfrow=c(1,2))
hist(datai$capelin,main="Capelin",col="dodgerblue")
hist(log(datai$capelin+1),main="Log(Capelin+1)",col="dodgerblue")
par(mfrow=c(1,1))
#
#
# Plot relative abundances
#
plot(lat~lon,data=datai,cex=4*datai$capelin/max(datai$capelin),lwd=2)
title(paste("Capelin Abundance",yy))
#
#
# Plot relative abundances using color
#
library(RColorBrewer)
library(classInt)
#
# Plot a point at a location
# The color of the point corresponds to the
# Concentration of cadmium, as specified in the q5Colors object
#
pal = brewer.pal(5,"Blues")
q5 = classIntervals(datai$capelin, n=5, style="quantile")
q5Colors = findColours(q5,pal)
plot(c(min(datai$Xloc),max(datai$Xloc)),
c(min(datai$Yloc),max(datai$Yloc)),
xlab="Longitude",ylab="Latitude",type="n")
plot(datai,col=q5Colors,pch=19,add=T)
legend("bottomright",fill=attr(q5Colors,"palette"),
legend = names(attr(q5Colors,"table")),bty="n")
title(paste("Capelin Abundance Over Area",yy))
#
# Calculate the empirical variogram
#
capelin.vario = variogram(log(capelin+1)~1,datai,cutoff=20)
#
# Plot the empirical variogram
#
plot(gamma~dist,capelin.vario,
ylim=c(0,max(gamma)),type='n',
xlab="Distance",ylab="Semivariance",
main=paste("Capelin Variogram",yy))
points(gamma~dist,capelin.vario,cex=2*np/max(np),pch=16,col="lightblue")
#
# Fit the model first by eye
#
my.range = locator(1)$x
my.nugget = locator(1)$y
my.psill = locator(1)$y-my.nugget
#
capelin.eye = vgm(model="Sph",psill=my.psill,range=my.range,nugget=my.nugget)
plot(gamma~dist,capelin.vario,
ylim=c(0,max(gamma)),type='n',
xlab="Distance",ylab="Semivariance",
main=paste("Capelin Variogram",yy))
points(gamma~dist,capelin.vario,cex=2*np/max(np),pch=16,col="lightblue")
vgmline = variogramLine(capelin.eye,max(capelin.vario$dist))
lines(gamma~dist,vgmline,lwd=2)
#
# Now use eye parameters to start fit of model
#
#
capelin.fit=fit.variogram(capelin.vario,
vgm(model="Sph",psill=my.psill,range=my.range,nugget=my.nugget),
fit.method=1)
#
# Look at estimates
#
capelin.fit
capelin.psill=capelin.fit$psill[2]
capelin.range=capelin.fit$range[2]
capelin.nugget=capelin.fit$psill[1]
#
# Plot data, model and parameter estimates
#
#
plot(gamma~dist,capelin.vario,
ylim=c(0,max(gamma)),type='n',
xlab="Distance",ylab="Semivariance",
main=paste("Capelin Variogram",yy))
points(gamma~dist,capelin.vario,cex=2*np/max(np),pch=16,col="lightblue")
vgmline = variogramLine(capelin.fit,max(capelin.vario$dist))
lines(gamma~dist,vgmline,lwd=2)
#
legend("bottomright",legend = c(
paste("Psill =",round(capelin.psill,2)),
paste("Range =",round(capelin.range,2)),
paste("Nugget = ",round(capelin.nugget,2))),
bty="n")
#
#
# Create a grid of points to predict over
#
library(geoR)
#
capelin.grid = expand.grid(
Xloc=seq(min(datai$Xloc),max(datai$Xloc),length=50),
Yloc=seq(min(datai$Yloc),max(datai$Yloc),length=50))
names(capelin.grid)=c("Xloc","Yloc")
coordinates(capelin.grid)=c("Xloc","Yloc")
capelin.grid = as(capelin.grid, "SpatialPixels")
#
# Now plot the data and overlay the prediction grid
#
plot(Yloc~Xloc,capelin.grid,cex=1.2,pch='+',col="green")
points(Yloc~Xloc,datai,pch=".")
#
#
# Predict the value at all the points in the domain
#
date()
capelin.ok = krige(log(capelin+1)~1, datai, capelin.grid, capelin.fit)
date()
#
# Plot the prediction
#
#plot(c(min(datai$Xloc),max(datai$Xloc)),
# c(min(datai$Yloc),max(datai$Yloc)),
# type="n",xlab="Longitude",ylab="Latitude")
plot(barents)
image(capelin.ok["var1.pred"],col=rev(heat.colors(4)),add=T)
#contour(capelin.ok["var1.pred"],add=T)
title(paste("Predicted Log(Capelin+1)",yy))
legend("bottomright",legend=c(0,1,2,3),fill=rev(heat.colors(4)),
bty="n",title="log(Capelin+1)")
plot(barents,add=T)
summary(capelin.ok["var1.pred"])
#
#
#
# Plot variance
#
plot(c(min(datai$Xloc),max(datai$Xloc)),
c(min(datai$Yloc),max(datai$Yloc)),
type="n",xlab="Longitude",ylab="Latitude")
image(capelin.ok["var1.var"],col=heat.colors(4),add=T)
points(datai$Xloc,datai$Yloc,pch=".")
legend("bottomright",legend=round(seq(.3,.5,length=4),2),fill=heat.colors(4),
bty="n",title="Variance")
# points(data.frame(capelin.grid)$Xloc,data.frame(capelin.grid)$Yloc,pch="+")
title(paste("Variance in Predictions",yy))
#
#
# Overlay Cod on Capelin Predictions
#
plot(c(min(datai$Xloc),max(datai$Xloc)),
c(min(datai$Yloc),max(datai$Yloc)),
type="n",xlab="Longitude",ylab="Latitude")
image(capelin.ok["var1.pred"],col=heat.colors(4),add=T)
#contour(capelin.ok["var1.pred"],add=T)
title(paste("Predicted Log(Capelin+1) \n Overlayed with Cod",yy))
legend("bottomright",legend=c(0,1,2,3),fill=heat.colors(4),
bty="n",title="log(Capelin+1)")
datai0 = datai[datai$cod>2,]
points(Yloc~Xloc,data=datai0,cex=4*datai0$cod/max(datai0$cod),lwd=1)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.