content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2 (4.1.0.9000): do not edit by hand
% Please edit documentation in R/periods.r
\name{new_period}
\alias{new_period}
\title{Create a period object.}
\usage{
new_period(...)
}
\arguments{
\item{...}{a list of time units to be included in the period and their amounts. Seconds, minutes,
hours, days, weeks, months, and years are supported.}
}
\value{
a period object
}
\description{
new_period creates a period object with the specified values. Within a
Period object, time units do not have a fixed length (except for seconds)
until they are added to a date-time. The length of each time unit will
depend on the date-time to which it is added. For example, a year that
begins on 2009-01-01 will be 365 days long. A year that begins on
2012-01-01 will be 366 days long. When math is performed with a period
object, each unit is applied separately. How the length of a period is
distributed among
its units is non-trivial. For example, when leap seconds occur 1 minute
is longer than 60 seconds.
}
\details{
Periods track the change in the "clock time" between two date-times. They
are measured in common time related units: years, months, days, hours,
minutes, and seconds. Each unit except for seconds must be expressed in
integer values.
Period objects can be easily created with the helper functions
\code{\link{years}}, \code{\link{months}}, \code{\link{weeks}},
\code{\link{days}}, \code{\link{minutes}}, \code{\link{seconds}}. These objects
can be added to and subtracted to date-times to create a user interface
similar to object oriented programming.
new_period is meant to be used interactively on the command line. See
\code{\link{period}}, for a version that is better suited to automating
within a function.
}
\examples{
new_period (second = 90, minute = 5)
# "5M 90S"
new_period(day = -1)
# "-1d 0H 0M 0S"
new_period(second = 3, minute = 1, hour = 2, day = 13, week = 1)
# "20d 2H 1M 3S"
new_period(hour = 1, minute = -60)
# "1H -60M 0S"
new_period(second = 0)
# "0S"
}
\seealso{
\code{\link{period}}, \code{\link{as.period}}
}
\keyword{chron}
\keyword{classes}
| /man/new_period.Rd | no_license | apys/lubridate | R | false | false | 2,098 | rd | % Generated by roxygen2 (4.1.0.9000): do not edit by hand
% Please edit documentation in R/periods.r
\name{new_period}
\alias{new_period}
\title{Create a period object.}
\usage{
new_period(...)
}
\arguments{
\item{...}{a list of time units to be included in the period and their amounts. Seconds, minutes,
hours, days, weeks, months, and years are supported.}
}
\value{
a period object
}
\description{
new_period creates a period object with the specified values. Within a
Period object, time units do not have a fixed length (except for seconds)
until they are added to a date-time. The length of each time unit will
depend on the date-time to which it is added. For example, a year that
begins on 2009-01-01 will be 365 days long. A year that begins on
2012-01-01 will be 366 days long. When math is performed with a period
object, each unit is applied separately. How the length of a period is
distributed among
its units is non-trivial. For example, when leap seconds occur 1 minute
is longer than 60 seconds.
}
\details{
Periods track the change in the "clock time" between two date-times. They
are measured in common time related units: years, months, days, hours,
minutes, and seconds. Each unit except for seconds must be expressed in
integer values.
Period objects can be easily created with the helper functions
\code{\link{years}}, \code{\link{months}}, \code{\link{weeks}},
\code{\link{days}}, \code{\link{minutes}}, \code{\link{seconds}}. These objects
can be added to and subtracted to date-times to create a user interface
similar to object oriented programming.
new_period is meant to be used interactively on the command line. See
\code{\link{period}}, for a version that is better suited to automating
within a function.
}
\examples{
new_period (second = 90, minute = 5)
# "5M 90S"
new_period(day = -1)
# "-1d 0H 0M 0S"
new_period(second = 3, minute = 1, hour = 2, day = 13, week = 1)
# "20d 2H 1M 3S"
new_period(hour = 1, minute = -60)
# "1H -60M 0S"
new_period(second = 0)
# "0S"
}
\seealso{
\code{\link{period}}, \code{\link{as.period}}
}
\keyword{chron}
\keyword{classes}
|
#' Read the outpur from snppit into a format useful for comparison
#'
#' More later...
#' @param DIR the directory where the output lives
#' @param S the tibble of sex and date. Must have columns
#' Indiv, SEX, and collection_date
slurp_snppit <- function(DIR, S) {
P <- read_tsv(file.path(DIR, "snppit_output_ParentageAssignments.txt"), trim_ws = TRUE, na = "---") %>%
left_join(S %>% rename(kid_sex = SEX, kid_date = collection_date), by = c("Kid" = "Indiv")) %>%
left_join(S %>% rename(pa_sex = SEX, pa_date = collection_date), by = c("Pa" = "Indiv")) %>%
left_join(S %>% rename(ma_sex = SEX, ma_date = collection_date), by = c("Ma" = "Indiv")) %>%
select(ends_with("sex"), ends_with("date"), everything())
}
| /R/slurp_snppit.R | no_license | abeulke/HatcheryPedAgree | R | false | false | 737 | r | #' Read the outpur from snppit into a format useful for comparison
#'
#' More later...
#' @param DIR the directory where the output lives
#' @param S the tibble of sex and date. Must have columns
#' Indiv, SEX, and collection_date
slurp_snppit <- function(DIR, S) {
P <- read_tsv(file.path(DIR, "snppit_output_ParentageAssignments.txt"), trim_ws = TRUE, na = "---") %>%
left_join(S %>% rename(kid_sex = SEX, kid_date = collection_date), by = c("Kid" = "Indiv")) %>%
left_join(S %>% rename(pa_sex = SEX, pa_date = collection_date), by = c("Pa" = "Indiv")) %>%
left_join(S %>% rename(ma_sex = SEX, ma_date = collection_date), by = c("Ma" = "Indiv")) %>%
select(ends_with("sex"), ends_with("date"), everything())
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/left_to_right.R
\name{evaluate_left_to_right}
\alias{evaluate_left_to_right}
\title{Left-to-right evaluation algorithm}
\usage{
evaluate_left_to_right(corpus, state, n_topics, alpha, beta, n_particles,
resampling)
}
\description{
This is an algorithm for approximating p(w | ...) blabla
}
| /tomer/man/evaluate_left_to_right.Rd | no_license | MansMeg/tomer | R | false | true | 369 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/left_to_right.R
\name{evaluate_left_to_right}
\alias{evaluate_left_to_right}
\title{Left-to-right evaluation algorithm}
\usage{
evaluate_left_to_right(corpus, state, n_topics, alpha, beta, n_particles,
resampling)
}
\description{
This is an algorithm for approximating p(w | ...) blabla
}
|
library('MASS')
## Function for correct rounding
round2 = function(x, n) {
posneg = sign(x)
z = abs(x)*10^n
z = z + 0.5
z = trunc(z)
z = z/10^n
z*posneg
}
dataLearning <- function(dat)
{
dat <- dat[!apply(dat, 1, function(x) all(x==0)), ] # remove a feature with all zeros
dat <- dat[order(-rowSums(dat)),] # order features from most to least
dat <- dat[,order(colSums(dat))] # order sample from least to most
p <- rowSums(dat)/sum(rowSums(dat)) # est of relative abundance
mu <- colSums(dat) # est of sample scale
muij <- t(matrix(mu, ncol=1) %*% matrix(p, nrow=1)) # matrix of expected counts
muij <- round2(muij, 0) # round the expected counts to integers (1)
#################### (2)
# if cumsum > 3, the feature is thought to appear, otherwise not
suffspl.inx <- apply(t(apply(muij, 1, cumsum)), 1, function(x) which(x>3)[1])
suffspl.size <- data.frame(feature = rownames(dat),
size = as.numeric(sapply(suffspl.inx,
function(x) if(!is.na(x)) mu[x] else NA)))
#################### (3)
ovdisp.fit <- NULL
for (expij in unique(sort(as.vector(muij)))[-1]) # not to include 0
{
locij <- which(muij==expij, arr.ind = T)
if (nrow(locij)>50) # only take the expij with the number of a expij greater than 50
{
obsij <- dat[locij]
fitij <- fitdistr(obsij, 'negative binomial')
ovdisp.fit <- rbind(ovdisp.fit,
data.frame(expij = expij,
mu.fitted = fitij$estimate['mu'],
size.fitted = fitij$estimate['size'],
stringsAsFactors = F))
}
}
rownames(ovdisp.fit) <- NULL
ovdisp <- mean(ovdisp.fit$size.fitted)
#################### (4)
p.0 <- NULL
for (expij in unique(sort(as.vector(muij)))[-1])
{
locij <- which(muij==expij, arr.ind = T)
if (nrow(locij)>50)
{
#print(expij)
obsij <- dat[locij]
p.0_nb <- dnbinom(0, size=ovdisp, mu=expij) # probability of 0 estimated from nb distribution
p.0_obs <- length(which(obsij==0))/length(obsij) # probability of 0 observed
if (p.0_obs > p.0_nb) p.0_mass <- (p.0_obs - p.0_nb) else mass0 <- NA
p.0 <- rbind(p.0,
data.frame(expij = expij,
p.0_mass = p.0_mass,
stringsAsFactors = F))
}
}
############# out: mu, p, ovdisp, suffspl.size, p.0
list(mu = mu,
p = p,
ovdisp = ovdisp,
suffspl.size = suffspl.size[!is.na(suffspl.size$size),],
p.0 = p.0)
}
| /data learning func.R | no_license | rdu2017/Normalization-Evaluation | R | false | false | 2,822 | r |
library('MASS')
## Function for correct rounding
round2 = function(x, n) {
posneg = sign(x)
z = abs(x)*10^n
z = z + 0.5
z = trunc(z)
z = z/10^n
z*posneg
}
dataLearning <- function(dat)
{
dat <- dat[!apply(dat, 1, function(x) all(x==0)), ] # remove a feature with all zeros
dat <- dat[order(-rowSums(dat)),] # order features from most to least
dat <- dat[,order(colSums(dat))] # order sample from least to most
p <- rowSums(dat)/sum(rowSums(dat)) # est of relative abundance
mu <- colSums(dat) # est of sample scale
muij <- t(matrix(mu, ncol=1) %*% matrix(p, nrow=1)) # matrix of expected counts
muij <- round2(muij, 0) # round the expected counts to integers (1)
#################### (2)
# if cumsum > 3, the feature is thought to appear, otherwise not
suffspl.inx <- apply(t(apply(muij, 1, cumsum)), 1, function(x) which(x>3)[1])
suffspl.size <- data.frame(feature = rownames(dat),
size = as.numeric(sapply(suffspl.inx,
function(x) if(!is.na(x)) mu[x] else NA)))
#################### (3)
ovdisp.fit <- NULL
for (expij in unique(sort(as.vector(muij)))[-1]) # not to include 0
{
locij <- which(muij==expij, arr.ind = T)
if (nrow(locij)>50) # only take the expij with the number of a expij greater than 50
{
obsij <- dat[locij]
fitij <- fitdistr(obsij, 'negative binomial')
ovdisp.fit <- rbind(ovdisp.fit,
data.frame(expij = expij,
mu.fitted = fitij$estimate['mu'],
size.fitted = fitij$estimate['size'],
stringsAsFactors = F))
}
}
rownames(ovdisp.fit) <- NULL
ovdisp <- mean(ovdisp.fit$size.fitted)
#################### (4)
p.0 <- NULL
for (expij in unique(sort(as.vector(muij)))[-1])
{
locij <- which(muij==expij, arr.ind = T)
if (nrow(locij)>50)
{
#print(expij)
obsij <- dat[locij]
p.0_nb <- dnbinom(0, size=ovdisp, mu=expij) # probability of 0 estimated from nb distribution
p.0_obs <- length(which(obsij==0))/length(obsij) # probability of 0 observed
if (p.0_obs > p.0_nb) p.0_mass <- (p.0_obs - p.0_nb) else mass0 <- NA
p.0 <- rbind(p.0,
data.frame(expij = expij,
p.0_mass = p.0_mass,
stringsAsFactors = F))
}
}
############# out: mu, p, ovdisp, suffspl.size, p.0
list(mu = mu,
p = p,
ovdisp = ovdisp,
suffspl.size = suffspl.size[!is.na(suffspl.size$size),],
p.0 = p.0)
}
|
#' @include Downloader.R
DownloaderNafarroa<-R6Class("SingletonContainer",inherit=Singleton,portable=FALSE,
public=list(
initialize=function(...)
{
Class<<-R6Class("DownloaderNafarroa",portable=F,
inherit=Downloader,
public=list(
getHour=function(pStation,pDate)
{
codEst=substr(pStation$getCode(),3,5);
url<-url(paste("http://meteo.navarra.es/download/estacion_datos.cfm?idestacion=",codEst,"&p_10=7&p_10=6&p_10=1&fecha_desde=",pDate$printDateNoHourNonStandardDash(),"&fecha_hasta=",pDate$getDay()+1,"/",pDate$getMonth(),"/",pDate$getYear(),"&dl=csv",sep=""));
tryCatch({
readVector<-list();
response=read.csv(url,header=F,skip=4);
if(!is.null(response)){
for(i in 1:6)
{
dataRow<-response[(pDate$getHour()*6)+i,];
date=Date$new(pDate$getYear(),pDate$getMonth(),pDate$getDay(),pDate$getHour(),10*i-10);
direction<-as.numeric(dataRow$V2);
speed<-as.numeric(dataRow$V3);
temperature<-as.numeric(dataRow$V4);
if(!(speed=="- -" || direction=="- -" || temperature=="- -"))
{
newRead<-Read$new(date,speed,direction,temperature);
readVector=c(readVector,newRead);
}
}
return(super$commitOrError(pStation,readVector))
}else return(null);
},
error=function(e){
print(e);
})
}
)
)
super$initialize();
}
)
)$new(); | /R/DownloaderNafarroa.R | no_license | AndoniMartin/eate | R | false | false | 3,147 | r | #' @include Downloader.R
DownloaderNafarroa<-R6Class("SingletonContainer",inherit=Singleton,portable=FALSE,
public=list(
initialize=function(...)
{
Class<<-R6Class("DownloaderNafarroa",portable=F,
inherit=Downloader,
public=list(
getHour=function(pStation,pDate)
{
codEst=substr(pStation$getCode(),3,5);
url<-url(paste("http://meteo.navarra.es/download/estacion_datos.cfm?idestacion=",codEst,"&p_10=7&p_10=6&p_10=1&fecha_desde=",pDate$printDateNoHourNonStandardDash(),"&fecha_hasta=",pDate$getDay()+1,"/",pDate$getMonth(),"/",pDate$getYear(),"&dl=csv",sep=""));
tryCatch({
readVector<-list();
response=read.csv(url,header=F,skip=4);
if(!is.null(response)){
for(i in 1:6)
{
dataRow<-response[(pDate$getHour()*6)+i,];
date=Date$new(pDate$getYear(),pDate$getMonth(),pDate$getDay(),pDate$getHour(),10*i-10);
direction<-as.numeric(dataRow$V2);
speed<-as.numeric(dataRow$V3);
temperature<-as.numeric(dataRow$V4);
if(!(speed=="- -" || direction=="- -" || temperature=="- -"))
{
newRead<-Read$new(date,speed,direction,temperature);
readVector=c(readVector,newRead);
}
}
return(super$commitOrError(pStation,readVector))
}else return(null);
},
error=function(e){
print(e);
})
}
)
)
super$initialize();
}
)
)$new(); |
platform = "linux"
rfhome = "/Users/deeksha/Library/R/3.1/R_Rulefit"
source("/Users/deeksha/Library/R/3.1/R_Rulefit/rulefit.r")
install.packages("akima", lib=rfhome)
library(gdata)
data = read.xls("Diamond_data/Diamond_Data.xls")
head(data)
plot(data$Cut, data$Price, xlab = "Cut", ylab = "Price")
hist(data$Price, col="red", xlab="Price", ylab="Distribution", labels = TRUE)
cutData = as.numeric(data$Cut)
hist(cutData, col="green", xlab="Cut", ylab="Distribution", labels = TRUE)
library(akima, lib.loc=rfhome)
indexes = sample(1:nrow(data),5000)
trainingData = data[indexes,]
testData = data[-indexes,]
#(b) Use rulefit to fit regression model
ruleFitModel = rulefit(trainingData[,1:8], trainingData[,9],rfmode = "regress",
cat.vars = c("Cut","Color","Clarity","Polish","Symmetry","Report"),
test.reps = 10,test.fract = 0.1)
rfmodinfo(ruleFitModel)
#(c) use the rules() to view top 10 rules
x = trainingData[,1:8]
rules(beg = 1,end = 10, x)
#(d)
varimp(range = 1:3, col = "cyan")
#(e) Error on Test Data
testPrediction = rfpred(testData[,1:8])
testActual = testData[,9]
averageAbsoluteError = 1/1000 * (sum(testActual - testPrediction))
print(abs(averageAbsoluteError))
#(f) Decision Tree
trainingData$Cut = as.factor(trainingData$Cut)
trainingData$Color = as.factor(trainingData$Color)
trainingData$Clarity = as.factor(trainingData$Clarity)
trainingData$Polish = as.factor(trainingData$Polish)
trainingData$Symmetry = as.factor(trainingData$Symmetry)
trainingData$Report = as.factor(trainingData$Report)
library(rpart) # Recurisve Partitioning(rpart)
dTree <- rpart(Price~., method = "anova", data = trainingData, cp=0.0001)
# Finding the split with minimal xross validation error
plotcp (dTree)
treeTable = dTree$cptable
min(treeTable[,4])
# Find the complexity parameter for the best split
cp = treeTable[which.min(treeTable[,4]),"CP"]
prunedTree = prune(dTree,cp)
# predicion over test data
predictPruneTree = predict(prunedTree, testData[,1:8],type = "vector")
# calculating error on the best pruned tree
averageErrorPrunedTree = 1/1000 * (sum(testActual - predictPruneTree))
print(abs(averageErrorPrunedTree))
| /EnsembleRule_ DiamondDataSet/Q1.R | no_license | deekshasharma/datamining | R | false | false | 2,196 | r | platform = "linux"
rfhome = "/Users/deeksha/Library/R/3.1/R_Rulefit"
source("/Users/deeksha/Library/R/3.1/R_Rulefit/rulefit.r")
install.packages("akima", lib=rfhome)
library(gdata)
data = read.xls("Diamond_data/Diamond_Data.xls")
head(data)
plot(data$Cut, data$Price, xlab = "Cut", ylab = "Price")
hist(data$Price, col="red", xlab="Price", ylab="Distribution", labels = TRUE)
cutData = as.numeric(data$Cut)
hist(cutData, col="green", xlab="Cut", ylab="Distribution", labels = TRUE)
library(akima, lib.loc=rfhome)
indexes = sample(1:nrow(data),5000)
trainingData = data[indexes,]
testData = data[-indexes,]
#(b) Use rulefit to fit regression model
ruleFitModel = rulefit(trainingData[,1:8], trainingData[,9],rfmode = "regress",
cat.vars = c("Cut","Color","Clarity","Polish","Symmetry","Report"),
test.reps = 10,test.fract = 0.1)
rfmodinfo(ruleFitModel)
#(c) use the rules() to view top 10 rules
x = trainingData[,1:8]
rules(beg = 1,end = 10, x)
#(d)
varimp(range = 1:3, col = "cyan")
#(e) Error on Test Data
testPrediction = rfpred(testData[,1:8])
testActual = testData[,9]
averageAbsoluteError = 1/1000 * (sum(testActual - testPrediction))
print(abs(averageAbsoluteError))
#(f) Decision Tree
trainingData$Cut = as.factor(trainingData$Cut)
trainingData$Color = as.factor(trainingData$Color)
trainingData$Clarity = as.factor(trainingData$Clarity)
trainingData$Polish = as.factor(trainingData$Polish)
trainingData$Symmetry = as.factor(trainingData$Symmetry)
trainingData$Report = as.factor(trainingData$Report)
library(rpart) # Recurisve Partitioning(rpart)
dTree <- rpart(Price~., method = "anova", data = trainingData, cp=0.0001)
# Finding the split with minimal xross validation error
plotcp (dTree)
treeTable = dTree$cptable
min(treeTable[,4])
# Find the complexity parameter for the best split
cp = treeTable[which.min(treeTable[,4]),"CP"]
prunedTree = prune(dTree,cp)
# predicion over test data
predictPruneTree = predict(prunedTree, testData[,1:8],type = "vector")
# calculating error on the best pruned tree
averageErrorPrunedTree = 1/1000 * (sum(testActual - predictPruneTree))
print(abs(averageErrorPrunedTree))
|
# NYC Drinking Water Quality Distribution Monitoring Data Analysis
# Oct 8 2020
# Packages
library(dplyr)
library(tidyr)
library(ggplot2)
library(lubridate)
# Load data
setwd('~/Desktop/r_projects/practice')
nyc_water <- read.csv('drinking-water-quality-distribution-monitoring-data-1.csv')
# Data cleaning
water <- rename(nyc_water,
Date = Sample.Date,
Time = Sample.Time,
Site = Sample.Site,
Class = Sample.class,
Chlorine = Residual.Free.Chlorine..mg.L.,
Turb = Turbidity..NTU.,
Ecoli = E.coli.Quanti.Tray...MPN.100mL.,
Coli = Coliform..Quanti.Tray...MPN..100mL.,
Fluoride = Fluoride..mg.L.) %>%
select(-Sample.Number)
str(water)
# transform date and time
# convert date and time to POSIX format using lubridate
water1 <- water %>%
mutate(date_time = mdy_hm(paste(Date, Time, sep = ' ')))
# check missing value in date_time
filter(water1, !is.na(water1$date_time))
# not much valuable info in this observation of missing so remove it
water1 <- water1 %>%
filter(!is.na(date_time)) %>%
select(date_time, everything()) %>%
rename(Date_time = date_time)
# clean coli variable
class(water1$Coli)
unique(water1$Coli)
water1$Coli <- case_when(
water1$Coli %in% c('<1', '<1 ') ~ '0',
water1$Coli == '>200.5' ~ '200.5',
water1$Coli == '-' ~ 'NA',
TRUE ~ as.character(water1$Coli))
unique(water1$Coli)
water1$Coli <- as.numeric(water1$Coli)
unique(water1$Coli)
# clean Ecoli variable
unique(water1$Ecoli)
water1$Ecoli <- case_when(
water1$Ecoli %in% c('<1', '<1 ') ~ '0',
water1$Ecoli == '-' ~ 'NA',
TRUE ~ as.character(water1$Ecoli))
unique(water1$Ecoli)
water1$Ecoli <- as.numeric(water1$Ecoli)
unique(water1$Ecoli)
# check NA for coli and Ecoli
water1 %>%
filter(is.na(Ecoli) | is.na(Coli))
# remove NA row of coli and Ecoli
water1 <- water1 %>%
filter(!is.na(Ecoli) | !is.na(Coli))
water1 %>%
filter(is.na(Ecoli))
# Create year, month, day columns
water1 <- water1 %>%
mutate(Year = year(Date_time),
Month = month(Date_time),
Day = day(Date_time))
# Exploratory data analysis ----
# chlorine
summary(water1$Chlorine)
filter(water1, is.na(Chlorine))
filter(water1, Chlorine < 0)
water1 <- water1 %>%
filter(!is.na(Chlorine)) %>%
filter(Chlorine >= 0)
summary(water1$Chlorine)
water1 %>%
group_by(Month) %>%
summarise(mean.chlorine = mean(Chlorine, na.rm = T),
mean.coli = mean(Coli),
mean.ecoli = mean(Ecoli))
# seperate chlorine into 3 baskets
cutpoints <- quantile(water1$Chlorine,
seq(0, 1, length=4),
na.rm=T)
water1$Chlorine_range <- cut(water1$Chlorine, cutpoints, include.lowest = T)
table(water1$Chlorine_range)
water1 %>%
group_by(Month) %>%
filter(Site == '1S07') %>%
ggplot(.) +
geom_point(aes(x = Month, y = Chlorine))
water1 %>%
filter(Site == '1S07')
water1 %>%
group_by(Month) %>%
ggplot(.) +
geom_bar(aes(x = Site))
water1 %>%
group_by(Day) %>%
ggplot(.) +
geom_violin(aes(x = Month, y = Chlorine, group = Month))
# convert chlorine to levels
water <- water %>%
mutate(Chlorine_level = case_when(
Chlorine <= 0.5 ~ '0-0.5',
Chlorine > 0.5 & Chlorine <= 1 ~ '0.5-1',
Chlorine > 1 ~ '> 1')
)
class(water$Chlorine_level)
water$Chlorine_level <- factor(water$Chlorine_level,
levels = c('0-0.5', '0.5-1', '> 1'))
class(water$Chlorine_level)
levels(water$Chlorine_level)
ggplot(data = water) +
geom_histogram(aes(x = Chlorine_level))
ggplot(data = water, mapping = aes(x= Chlorine_level)) +
geom_bar()
# Coli number & chlorine level
ggplot(data = water) +
geom_bar(mapping = aes(x = Coli))
ggplot(data = water, aes(x = Date, y = Coli)) +
geom_point()
| /nyc_water.R | no_license | auGGuo/r_data_projects | R | false | false | 3,905 | r | # NYC Drinking Water Quality Distribution Monitoring Data Analysis
# Oct 8 2020
# Packages
library(dplyr)
library(tidyr)
library(ggplot2)
library(lubridate)
# Load data
setwd('~/Desktop/r_projects/practice')
nyc_water <- read.csv('drinking-water-quality-distribution-monitoring-data-1.csv')
# Data cleaning
water <- rename(nyc_water,
Date = Sample.Date,
Time = Sample.Time,
Site = Sample.Site,
Class = Sample.class,
Chlorine = Residual.Free.Chlorine..mg.L.,
Turb = Turbidity..NTU.,
Ecoli = E.coli.Quanti.Tray...MPN.100mL.,
Coli = Coliform..Quanti.Tray...MPN..100mL.,
Fluoride = Fluoride..mg.L.) %>%
select(-Sample.Number)
str(water)
# transform date and time
# convert date and time to POSIX format using lubridate
water1 <- water %>%
mutate(date_time = mdy_hm(paste(Date, Time, sep = ' ')))
# check missing value in date_time
filter(water1, !is.na(water1$date_time))
# not much valuable info in this observation of missing so remove it
water1 <- water1 %>%
filter(!is.na(date_time)) %>%
select(date_time, everything()) %>%
rename(Date_time = date_time)
# clean coli variable
class(water1$Coli)
unique(water1$Coli)
water1$Coli <- case_when(
water1$Coli %in% c('<1', '<1 ') ~ '0',
water1$Coli == '>200.5' ~ '200.5',
water1$Coli == '-' ~ 'NA',
TRUE ~ as.character(water1$Coli))
unique(water1$Coli)
water1$Coli <- as.numeric(water1$Coli)
unique(water1$Coli)
# clean Ecoli variable
unique(water1$Ecoli)
water1$Ecoli <- case_when(
water1$Ecoli %in% c('<1', '<1 ') ~ '0',
water1$Ecoli == '-' ~ 'NA',
TRUE ~ as.character(water1$Ecoli))
unique(water1$Ecoli)
water1$Ecoli <- as.numeric(water1$Ecoli)
unique(water1$Ecoli)
# check NA for coli and Ecoli
water1 %>%
filter(is.na(Ecoli) | is.na(Coli))
# remove NA row of coli and Ecoli
water1 <- water1 %>%
filter(!is.na(Ecoli) | !is.na(Coli))
water1 %>%
filter(is.na(Ecoli))
# Create year, month, day columns
water1 <- water1 %>%
mutate(Year = year(Date_time),
Month = month(Date_time),
Day = day(Date_time))
# Exploratory data analysis ----
# chlorine
summary(water1$Chlorine)
filter(water1, is.na(Chlorine))
filter(water1, Chlorine < 0)
water1 <- water1 %>%
filter(!is.na(Chlorine)) %>%
filter(Chlorine >= 0)
summary(water1$Chlorine)
water1 %>%
group_by(Month) %>%
summarise(mean.chlorine = mean(Chlorine, na.rm = T),
mean.coli = mean(Coli),
mean.ecoli = mean(Ecoli))
# seperate chlorine into 3 baskets
cutpoints <- quantile(water1$Chlorine,
seq(0, 1, length=4),
na.rm=T)
water1$Chlorine_range <- cut(water1$Chlorine, cutpoints, include.lowest = T)
table(water1$Chlorine_range)
water1 %>%
group_by(Month) %>%
filter(Site == '1S07') %>%
ggplot(.) +
geom_point(aes(x = Month, y = Chlorine))
water1 %>%
filter(Site == '1S07')
water1 %>%
group_by(Month) %>%
ggplot(.) +
geom_bar(aes(x = Site))
water1 %>%
group_by(Day) %>%
ggplot(.) +
geom_violin(aes(x = Month, y = Chlorine, group = Month))
# convert chlorine to levels
water <- water %>%
mutate(Chlorine_level = case_when(
Chlorine <= 0.5 ~ '0-0.5',
Chlorine > 0.5 & Chlorine <= 1 ~ '0.5-1',
Chlorine > 1 ~ '> 1')
)
class(water$Chlorine_level)
water$Chlorine_level <- factor(water$Chlorine_level,
levels = c('0-0.5', '0.5-1', '> 1'))
class(water$Chlorine_level)
levels(water$Chlorine_level)
ggplot(data = water) +
geom_histogram(aes(x = Chlorine_level))
ggplot(data = water, mapping = aes(x= Chlorine_level)) +
geom_bar()
# Coli number & chlorine level
ggplot(data = water) +
geom_bar(mapping = aes(x = Coli))
ggplot(data = water, aes(x = Date, y = Coli)) +
geom_point()
|
# if you did not install the following package (ggplot2, dplyr), please install.
# install.packages("ggplot2")
# install.packages("dplyr")
library(ggplot2)
library(dplyr)
# correlation
x <- c(1, 2, 3, 4, 5)
y <- c(1, 2, 3, 4, 5)
df <- data.frame(x, y)
ggplot(df, aes(x, y)) +
geom_point()
cor.test(x,y)
x <- c(1, 4, 8, 5)
y <- c(2, 3, 9, 8)
df <- data.frame(x, y)
ggplot(df, aes(x, y)) +
geom_point()
cor.test(x,y)
################ Pearson Coefficient ###################
# SPSS in focus 15.4
mood <- c(6, 4, 7, 4, 2, 5, 3, 1)
eating <- c(480, 490, 500, 590, 600, 400, 545, 650)
df <- data.frame(mood, eating)
mx <- mean(df$mood)
my <- mean(df$eating)
# create a table 15.1
df <- df %>% mutate(x_mx = mood - mx, y_my = eating - my, xy = x_mx*y_my, x2 = x_mx^2, y2 = y_my^2)
# manually calculate r value
ssxy <- sum(df$xy)
ssx <- sum(df$x2)
ssy <- sum(df$y2)
r <- ssxy / sqrt(ssx * ssy)
# use correlation test function
cor.test(df$mood, df$eating)
cor(df$mood, df$eating)
# linear regression prediction model
coef(lm(eating ~ mood, data = df))
# plot
p <- ggplot(df, aes(x=mood, y=eating)) +
geom_point() +
geom_abline(intercept = 651.16071, slope = -29.82143)
################ Spearman Coefficient ###################
# example 15.2
food <- c(1.5, 1.5, 3, 4, 5, 6, 7, 8)
water <- c(1, 3, 2, 6, 4, 7, 8, 5)
df <- data.frame(food, water, D = food-water)
df <- df %>% mutate(D2 = D^2)
ssD2 <- sum(df$D2)
n <- length(df[,1])
rs <- 1- 6 * ssD2 / n / (n^2 -1)
# methods
cor.test(df$food, df$water, method = "spearman")
################ Point-biserial correlation coefficient ###################
# input the data
sex <- c(rep(1, 5), rep(2, 7))
dur_laugh <- c(23, 9, 12, 12, 29, 32, 10, 8, 20, 12, 24, 34)
# build a data frame
df <- data.frame(sex, dur_laugh)
# select male only
male <- df$sex == 1
My1 <- mean(df[male,]$dur_laugh)
# select female only
female <- df$sex == 2
My2 <- mean(df[female,]$dur_laugh)
# calculate a total mean
My <- mean(df$dur_laugh)
# create table 15.4
df <- df %>% mutate( y_my = dur_laugh-My, y_my2 = y_my^2)
# manually calculate point-biserial coefficient
ssy <- sum(df$y_my2)
n <- length(df$dur_laugh)
n1 <- length(df[male,]$dur_laugh)
n2 <- length(df[female,]$dur_laugh)
sy <- sqrt(ssy/n)
rpb <- (My1-My2)/sy * sqrt(n1/n * n2/n)
rpb
# point-biserial coefficient function
install.packages("ltm")
library(ltm)
biserial.cor(df$dur_laugh, df$sex)
#### Phi correlation coefficient ####
install.packages("psych")
library(psych)
empl <- c(rep(0, 14), rep(1, 6), rep(0, 6), rep(1, 14))
happ <- c(rep(0, 14), rep(0, 6), rep(1, 6), rep(1, 14))
df <- data.frame(empl, happ)
table(df)
phi(table(df))
| /07. Correlation.R | no_license | jkim205/BER540_Spring19 | R | false | false | 2,658 | r | # if you did not install the following package (ggplot2, dplyr), please install.
# install.packages("ggplot2")
# install.packages("dplyr")
library(ggplot2)
library(dplyr)
# correlation
x <- c(1, 2, 3, 4, 5)
y <- c(1, 2, 3, 4, 5)
df <- data.frame(x, y)
ggplot(df, aes(x, y)) +
geom_point()
cor.test(x,y)
x <- c(1, 4, 8, 5)
y <- c(2, 3, 9, 8)
df <- data.frame(x, y)
ggplot(df, aes(x, y)) +
geom_point()
cor.test(x,y)
################ Pearson Coefficient ###################
# SPSS in focus 15.4
mood <- c(6, 4, 7, 4, 2, 5, 3, 1)
eating <- c(480, 490, 500, 590, 600, 400, 545, 650)
df <- data.frame(mood, eating)
mx <- mean(df$mood)
my <- mean(df$eating)
# create a table 15.1
df <- df %>% mutate(x_mx = mood - mx, y_my = eating - my, xy = x_mx*y_my, x2 = x_mx^2, y2 = y_my^2)
# manually calculate r value
ssxy <- sum(df$xy)
ssx <- sum(df$x2)
ssy <- sum(df$y2)
r <- ssxy / sqrt(ssx * ssy)
# use correlation test function
cor.test(df$mood, df$eating)
cor(df$mood, df$eating)
# linear regression prediction model
coef(lm(eating ~ mood, data = df))
# plot
p <- ggplot(df, aes(x=mood, y=eating)) +
geom_point() +
geom_abline(intercept = 651.16071, slope = -29.82143)
################ Spearman Coefficient ###################
# example 15.2
food <- c(1.5, 1.5, 3, 4, 5, 6, 7, 8)
water <- c(1, 3, 2, 6, 4, 7, 8, 5)
df <- data.frame(food, water, D = food-water)
df <- df %>% mutate(D2 = D^2)
ssD2 <- sum(df$D2)
n <- length(df[,1])
rs <- 1- 6 * ssD2 / n / (n^2 -1)
# methods
cor.test(df$food, df$water, method = "spearman")
################ Point-biserial correlation coefficient ###################
# input the data
sex <- c(rep(1, 5), rep(2, 7))
dur_laugh <- c(23, 9, 12, 12, 29, 32, 10, 8, 20, 12, 24, 34)
# build a data frame
df <- data.frame(sex, dur_laugh)
# select male only
male <- df$sex == 1
My1 <- mean(df[male,]$dur_laugh)
# select female only
female <- df$sex == 2
My2 <- mean(df[female,]$dur_laugh)
# calculate a total mean
My <- mean(df$dur_laugh)
# create table 15.4
df <- df %>% mutate( y_my = dur_laugh-My, y_my2 = y_my^2)
# manually calculate point-biserial coefficient
ssy <- sum(df$y_my2)
n <- length(df$dur_laugh)
n1 <- length(df[male,]$dur_laugh)
n2 <- length(df[female,]$dur_laugh)
sy <- sqrt(ssy/n)
rpb <- (My1-My2)/sy * sqrt(n1/n * n2/n)
rpb
# point-biserial coefficient function
install.packages("ltm")
library(ltm)
biserial.cor(df$dur_laugh, df$sex)
#### Phi correlation coefficient ####
install.packages("psych")
library(psych)
empl <- c(rep(0, 14), rep(1, 6), rep(0, 6), rep(1, 14))
happ <- c(rep(0, 14), rep(0, 6), rep(1, 6), rep(1, 14))
df <- data.frame(empl, happ)
table(df)
phi(table(df))
|
ann_function <- function(training_data, test_data) {
#install.packages("neuralnet")
library(neuralnet)
#setup training data for ANN
training_data$diagnosis=as.factor(training_data$diagnosis)
factors1 <- sapply(training_data,is.factor)
M1<-sapply(training_data[,factors1],unclass)
ann_training_data<-cbind(training_data[,!factors1],M1)
ann_training_data = na.omit(ann_training_data)
#setup test data for ANN
test_data$diagnosis=as.factor(test_data$diagnosis)
factors2 <- sapply(test_data,is.factor)
M2<-sapply(test_data[,factors2],unclass)
ann_test_data<-cbind(test_data[,!factors2],M2)
ann_test_data = na.omit(ann_test_data)
#set seed
set.seed(7)
#fit neural network
neuralNet = neuralnet(M1 ~ .,ann_training_data, hidden =10, linear.output = T)
#plot(neuralNet)
#predict
prediction = compute(neuralNet, ann_test_data[-ncol(ann_test_data)])
#generate results
results <- data.frame(actual = ann_test_data$M, prediction = prediction$net.result)
roundedresults<-sapply(results,round,digits=0)
roundedresultsdf=data.frame(roundedresults)-1
attach(roundedresultsdf)
return (roundedresultsdf)
} | /classification/artificial_neural_network_2.R | no_license | CBProgramming/machine-learning-breast-cancer | R | false | false | 1,142 | r | ann_function <- function(training_data, test_data) {
#install.packages("neuralnet")
library(neuralnet)
#setup training data for ANN
training_data$diagnosis=as.factor(training_data$diagnosis)
factors1 <- sapply(training_data,is.factor)
M1<-sapply(training_data[,factors1],unclass)
ann_training_data<-cbind(training_data[,!factors1],M1)
ann_training_data = na.omit(ann_training_data)
#setup test data for ANN
test_data$diagnosis=as.factor(test_data$diagnosis)
factors2 <- sapply(test_data,is.factor)
M2<-sapply(test_data[,factors2],unclass)
ann_test_data<-cbind(test_data[,!factors2],M2)
ann_test_data = na.omit(ann_test_data)
#set seed
set.seed(7)
#fit neural network
neuralNet = neuralnet(M1 ~ .,ann_training_data, hidden =10, linear.output = T)
#plot(neuralNet)
#predict
prediction = compute(neuralNet, ann_test_data[-ncol(ann_test_data)])
#generate results
results <- data.frame(actual = ann_test_data$M, prediction = prediction$net.result)
roundedresults<-sapply(results,round,digits=0)
roundedresultsdf=data.frame(roundedresults)-1
attach(roundedresultsdf)
return (roundedresultsdf)
} |
seed <- 896
log.wt <- 0.0
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 225055.57212259946
df.resid <- 35402
df <- 165
coefs <- c(6.773843317767446, 5.745560335041355, 5.5588620191808795, 5.2440293649441, 5.149502905064497, 4.935928515007903, 4.850840575684367, 4.697621734503491, 4.434124950032732, 4.225476228541113, 4.3925906293967225, 4.204161760539209, 4.112552112056338, 3.99700279214841, 3.7874191716201375, 3.55135420498801, 3.276153857920989, 2.9695442998910795, 2.5460996442182253, 2.0728025272096673, 1.6948165555007093, 0.910726478846749, 0.8617817835947721, 0.3142290267499135, -0.12040643356038433, -1.268625227095324, -0.4981290992576348, 0.9840647568809436, 0.9649811378642756, -1.3858752104800192, -2.3314818294931983, -1.8686536274079992, -0.1981236621688798, 0.7368814802939254, 1.1658755557148586, -0.853693981309952, 0.17348568765397396, -0.4592806027581435, -0.15690970741705124, -0.8681051878345283, 0.8671509126842645, 0.8456409469002836, -0.6865678039306257, -2.0724401293304044, -0.5920828243324132, -0.7999086994432221, -0.3667990903649718, 0.43021322987630106, 0.21757622237843685, -1.286883444997211, -0.25052910131055545, 0.9978736523265276, -2.048768909071061, 1.7186852384370148, 0.7200902682832191, 0.9101281827565536, -1.7837649421709696, -2.752319351392389e-2, -0.33426929446029474, 0.9829101187809485, 1.0353033549805544, 1.0276433372814702, -0.7331879929538814, -0.7380555218654168, -0.5178679494784477, 0.14371791229420458, 0.5415540779855342, -0.49387082144639555, -1.5556706396435183, -0.462827197665836, -1.670308677430088, -8.818354947545427e-2, 0.36605207424604413, 1.0655403508379508, 0.5446519733714332, -1.3830161533407823, -1.3814920911699666, -1.2142347274974414, 6.496774981195987e-2, 0.6242965283951487, 1.1065453163614873, 4.353788101494682e-2, 0.24967601654576038, -1.5511844622363924, -0.12643964102664115, 0.2973451324027197, 1.166343054773736, 0.6097152974220814, 0.8392891989314554, -2.1806395706539456, 0.4882309758995381, 0.6278762103495582, 0.7599035301712707, 0.25605255124726933, 4.2460647402988896e-2, 1.0527550511195256, 0.11290301004130959, 0.14223439832082885, -0.2973195622888876, -0.5059341638755269, 0.3077829188978556, -0.31580474825632787, 0.8322864461000352, 0.3382936018273763, 0.7165938173232991, 0.809435881798284, 1.0035579843842368, -0.5168785736260824, -0.8608258925799541, -0.8605373215115386, 0.3786332289568482, 0.6171878292128343, 1.5055164684169617, -0.26626181212099914, -0.15453024389908782, -0.8812273001961423, 0.7652942560214043, -0.25083330946549925, 0.3483981820830796, 0.3409951393219135, -0.7512909069120073, -0.5899797305869225, -0.8702149338716465, -0.5470458486533207, 0.30431481665423893, 0.7428576549325432, -4.47972287580618e-3, 0.8881814543113996, -0.9377642083059783, -0.48209252182273404, 0.22052241995747246, 0.845326728604451, 0.7366507592601281, 0.5267803596866181, 2.398217430403898e-2, 1.297534008498764, -0.47005978044678043, 1.0505452975307181, 0.6875686149036219, 0.9387905313377712, 0.7527984928993172, -0.6262498565825195, -1.2888408630598425, 0.46257251693737156, 0.3691075637842579, 0.5776279876502489, -0.3656920862943102, -7.945464960969507e-2, -1.8458510008660138, 1.2634124663983428, 9.629327267819271e-2, 1.0920648634980037, -0.17022523524699168, -0.10719949470761879, -0.2320546643680042, -1.4190612447425819, -1.1521157982657173, 0.8281347607693471, 1.0583720519878173, -0.38476401916188757, 1.4750042775834356, -0.4020434474845054, -0.25393180546341115, -0.10343949409489794, 1.1364960990919533)
| /analysis/boot/boot896.R | no_license | patperry/interaction-proc | R | false | false | 3,757 | r | seed <- 896
log.wt <- 0.0
penalty <- 2.8115950178536287e-8
intervals.send <- c()
intervals.recv <- c(56, 112, 225, 450, 900, 1800, 3600, 7200, 14400, 28800, 57600, 115200, 230400, 460800, 921600, 1843200, 3686400, 7372800, 14745600, 29491200, 58982400)
dev.null <- 358759.0022669336
df.null <- 35567
dev.resid <- 225055.57212259946
df.resid <- 35402
df <- 165
coefs <- c(6.773843317767446, 5.745560335041355, 5.5588620191808795, 5.2440293649441, 5.149502905064497, 4.935928515007903, 4.850840575684367, 4.697621734503491, 4.434124950032732, 4.225476228541113, 4.3925906293967225, 4.204161760539209, 4.112552112056338, 3.99700279214841, 3.7874191716201375, 3.55135420498801, 3.276153857920989, 2.9695442998910795, 2.5460996442182253, 2.0728025272096673, 1.6948165555007093, 0.910726478846749, 0.8617817835947721, 0.3142290267499135, -0.12040643356038433, -1.268625227095324, -0.4981290992576348, 0.9840647568809436, 0.9649811378642756, -1.3858752104800192, -2.3314818294931983, -1.8686536274079992, -0.1981236621688798, 0.7368814802939254, 1.1658755557148586, -0.853693981309952, 0.17348568765397396, -0.4592806027581435, -0.15690970741705124, -0.8681051878345283, 0.8671509126842645, 0.8456409469002836, -0.6865678039306257, -2.0724401293304044, -0.5920828243324132, -0.7999086994432221, -0.3667990903649718, 0.43021322987630106, 0.21757622237843685, -1.286883444997211, -0.25052910131055545, 0.9978736523265276, -2.048768909071061, 1.7186852384370148, 0.7200902682832191, 0.9101281827565536, -1.7837649421709696, -2.752319351392389e-2, -0.33426929446029474, 0.9829101187809485, 1.0353033549805544, 1.0276433372814702, -0.7331879929538814, -0.7380555218654168, -0.5178679494784477, 0.14371791229420458, 0.5415540779855342, -0.49387082144639555, -1.5556706396435183, -0.462827197665836, -1.670308677430088, -8.818354947545427e-2, 0.36605207424604413, 1.0655403508379508, 0.5446519733714332, -1.3830161533407823, -1.3814920911699666, -1.2142347274974414, 6.496774981195987e-2, 0.6242965283951487, 1.1065453163614873, 4.353788101494682e-2, 0.24967601654576038, -1.5511844622363924, -0.12643964102664115, 0.2973451324027197, 1.166343054773736, 0.6097152974220814, 0.8392891989314554, -2.1806395706539456, 0.4882309758995381, 0.6278762103495582, 0.7599035301712707, 0.25605255124726933, 4.2460647402988896e-2, 1.0527550511195256, 0.11290301004130959, 0.14223439832082885, -0.2973195622888876, -0.5059341638755269, 0.3077829188978556, -0.31580474825632787, 0.8322864461000352, 0.3382936018273763, 0.7165938173232991, 0.809435881798284, 1.0035579843842368, -0.5168785736260824, -0.8608258925799541, -0.8605373215115386, 0.3786332289568482, 0.6171878292128343, 1.5055164684169617, -0.26626181212099914, -0.15453024389908782, -0.8812273001961423, 0.7652942560214043, -0.25083330946549925, 0.3483981820830796, 0.3409951393219135, -0.7512909069120073, -0.5899797305869225, -0.8702149338716465, -0.5470458486533207, 0.30431481665423893, 0.7428576549325432, -4.47972287580618e-3, 0.8881814543113996, -0.9377642083059783, -0.48209252182273404, 0.22052241995747246, 0.845326728604451, 0.7366507592601281, 0.5267803596866181, 2.398217430403898e-2, 1.297534008498764, -0.47005978044678043, 1.0505452975307181, 0.6875686149036219, 0.9387905313377712, 0.7527984928993172, -0.6262498565825195, -1.2888408630598425, 0.46257251693737156, 0.3691075637842579, 0.5776279876502489, -0.3656920862943102, -7.945464960969507e-2, -1.8458510008660138, 1.2634124663983428, 9.629327267819271e-2, 1.0920648634980037, -0.17022523524699168, -0.10719949470761879, -0.2320546643680042, -1.4190612447425819, -1.1521157982657173, 0.8281347607693471, 1.0583720519878173, -0.38476401916188757, 1.4750042775834356, -0.4020434474845054, -0.25393180546341115, -0.10343949409489794, 1.1364960990919533)
|
#' Speed and Acceleration Plots over Time
#'
#' @description
#' This function returns a player's in-situ speed and acceleration v time curves
#'
#'
#' @param player_profile player profile
#'
#' @return A player's speed and acceleration curves over time
#' @export
#'
#' @examples
#' data(player_a)
#' player_a_data <- tracking_data(player_a$speed, player_a$accel)
#' player_a_profile <- player_profile(player_a_data)#'
#' player_a_speed_accel_curves <- player_plot(player_profile)}
game_player_plot <- function (player_profile)
{
UseMethod("game_player_plot")
}
#' @export
game_player_plot.default <- function(player_profile) {
par(mfrow = c(1,1))
player_plot <- plot(speed ~ time_splits, player_plot_values(player_profile), type = "l",
xlim = c(0, 5),
ylim = c(0, 16),
xlab = "Time (s)",
ylab = "Speed & Acceleration (yd/s & yd/s/s)",
main = "Speed-Acceleration Plot")
lines(acceleration ~ time_splits, player_plot_values(player_profile), col = "red")
legend("bottomright",
c(paste("Max Speed:", player_profile[[1]]),
paste("Max Accel:", player_profile[[2]]),
paste("Tau:", round(player_profile[[3]], 2))),
col = c("black", "red", "grey"), pch = 17)
recordPlot(player_plot)
}
| /R/game_player_plot.R | no_license | aadler/midsprint | R | false | false | 1,392 | r | #' Speed and Acceleration Plots over Time
#'
#' @description
#' This function returns a player's in-situ speed and acceleration v time curves
#'
#'
#' @param player_profile player profile
#'
#' @return A player's speed and acceleration curves over time
#' @export
#'
#' @examples
#' data(player_a)
#' player_a_data <- tracking_data(player_a$speed, player_a$accel)
#' player_a_profile <- player_profile(player_a_data)#'
#' player_a_speed_accel_curves <- player_plot(player_profile)}
game_player_plot <- function (player_profile)
{
UseMethod("game_player_plot")
}
#' @export
game_player_plot.default <- function(player_profile) {
par(mfrow = c(1,1))
player_plot <- plot(speed ~ time_splits, player_plot_values(player_profile), type = "l",
xlim = c(0, 5),
ylim = c(0, 16),
xlab = "Time (s)",
ylab = "Speed & Acceleration (yd/s & yd/s/s)",
main = "Speed-Acceleration Plot")
lines(acceleration ~ time_splits, player_plot_values(player_profile), col = "red")
legend("bottomright",
c(paste("Max Speed:", player_profile[[1]]),
paste("Max Accel:", player_profile[[2]]),
paste("Tau:", round(player_profile[[3]], 2))),
col = c("black", "red", "grey"), pch = 17)
recordPlot(player_plot)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/looker.tools.R
\name{looker.chunker}
\alias{looker.chunker}
\title{looker.chunker}
\usage{
looker.chunker(looker.path, data.path, start.date, end.date,
interval = "1 month")
}
\arguments{
\item{looker.path}{- The path to the file that contains the looker dictionary}
\item{data.path}{- The path to your data dump}
\item{start.date}{- The start date of the query}
\item{end.date}{- The end date of the query.}
\item{interval}{- String input for the interval size. See the 'by' input
parameter from seq()}
}
\value{
Returns null. The function writes out the data frames to data.path.
}
\description{
This function takes a looker query and chops it up into smaller
portions, ideally to improve runtime. The looker dictionary must only have
two date fields, and they must be sequentially ordered so start.date comes
first.
}
| /man/looker.chunker.Rd | no_license | christiantillich/AnaliTools | R | false | true | 905 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/looker.tools.R
\name{looker.chunker}
\alias{looker.chunker}
\title{looker.chunker}
\usage{
looker.chunker(looker.path, data.path, start.date, end.date,
interval = "1 month")
}
\arguments{
\item{looker.path}{- The path to the file that contains the looker dictionary}
\item{data.path}{- The path to your data dump}
\item{start.date}{- The start date of the query}
\item{end.date}{- The end date of the query.}
\item{interval}{- String input for the interval size. See the 'by' input
parameter from seq()}
}
\value{
Returns null. The function writes out the data frames to data.path.
}
\description{
This function takes a looker query and chops it up into smaller
portions, ideally to improve runtime. The looker dictionary must only have
two date fields, and they must be sequentially ordered so start.date comes
first.
}
|
exam <- read.csv("csv_exam.csv")
head(exam)
tail(exam)
View(exam)
dim(exam)
str(exam)
summary(exam)
quantile(exam$math)
# Mile per Gallon Data
mpg <- as.data.frame(ggplot2::mpg)
write.csv(mpg, "mpg.csv")
head(mpg)
tail(mpg)
View(mpg)
dim(mpg)
str(mpg)
?mpg
summary(mpg)
install.packages("dplyr")
library(dplyr)
df_raw <- data.frame(var1 = c(1, 2, 1), var2 = c(2, 3, 2))
df_raw
df_new <- df_raw
df_new <- rename(df_new, v2 = var2)
df_new
# 혼자서 해보기 (p112)
mpg_new <- mpg
mpg_new <- rename(mpg_new, city = cty)
mpg_new <- rename(mpg_new, highway = hwy)
head(mpg_new)
df <- data.frame(var1=c(4,3,8), var2=c(2,6,1))
df
df$var_sum <- df$var1 + df$var2
df
df$var_mean <- df$var_sum / 2
df
mpg$total <- (mpg$cty + mpg$hwy) / 2
head(mpg)
mean(mpg$total)
summary(mpg$total)
hist(mpg$total)
mpg$test <- ifelse(mpg$total >= 20, "pass", "fail")
tail(mpg, 10)
table(mpg$test)
library(ggplot2)
qplot(mpg$test)
mpg$grade <- ifelse(mpg$total >= 30, "A",
ifelse(mpg$total >= 20, "B", "C"))
tail(mpg, 10)
head(mpg, 100) %>% tail(10)
table(mpg$grade)
qplot(mpg$grade)
# 분석 도전 (p123)
midwest <- as.data.frame(ggplot2::midwest)
head(midwest)
write.csv(midwest, "midwest.csv")
dim(midwest)
str(midwest)
summary(midwest)
midwest <- rename(midwest, asian=popasian)
midwest <- rename(midwest, total=poptotal)
head(midwest)
midwest$perasian <- midwest$asian / midwest$total * 100
hist(midwest$perasian)
midwest$grade <- ifelse(midwest$perasian >= mean(midwest$perasian),
"large", "small")
table(midwest$grade)
qplot(midwest$grade)
quantile(midwest$perasian)
midwest$group <- ifelse(midwest$perasian >= 0.5211608, "A",
ifelse(midwest$perasian >= 0.2971697, "B",
ifelse(midwest$perasian >= 0.1737387, "C", "D")))
table(midwest$group)
qplot(midwest$group) | /easy_r/Scripts/sc05-1.R | no_license | ckiekim/BigDataWithR-Lecture | R | false | false | 1,857 | r | exam <- read.csv("csv_exam.csv")
head(exam)
tail(exam)
View(exam)
dim(exam)
str(exam)
summary(exam)
quantile(exam$math)
# Mile per Gallon Data
mpg <- as.data.frame(ggplot2::mpg)
write.csv(mpg, "mpg.csv")
head(mpg)
tail(mpg)
View(mpg)
dim(mpg)
str(mpg)
?mpg
summary(mpg)
install.packages("dplyr")
library(dplyr)
df_raw <- data.frame(var1 = c(1, 2, 1), var2 = c(2, 3, 2))
df_raw
df_new <- df_raw
df_new <- rename(df_new, v2 = var2)
df_new
# 혼자서 해보기 (p112)
mpg_new <- mpg
mpg_new <- rename(mpg_new, city = cty)
mpg_new <- rename(mpg_new, highway = hwy)
head(mpg_new)
df <- data.frame(var1=c(4,3,8), var2=c(2,6,1))
df
df$var_sum <- df$var1 + df$var2
df
df$var_mean <- df$var_sum / 2
df
mpg$total <- (mpg$cty + mpg$hwy) / 2
head(mpg)
mean(mpg$total)
summary(mpg$total)
hist(mpg$total)
mpg$test <- ifelse(mpg$total >= 20, "pass", "fail")
tail(mpg, 10)
table(mpg$test)
library(ggplot2)
qplot(mpg$test)
mpg$grade <- ifelse(mpg$total >= 30, "A",
ifelse(mpg$total >= 20, "B", "C"))
tail(mpg, 10)
head(mpg, 100) %>% tail(10)
table(mpg$grade)
qplot(mpg$grade)
# 분석 도전 (p123)
midwest <- as.data.frame(ggplot2::midwest)
head(midwest)
write.csv(midwest, "midwest.csv")
dim(midwest)
str(midwest)
summary(midwest)
midwest <- rename(midwest, asian=popasian)
midwest <- rename(midwest, total=poptotal)
head(midwest)
midwest$perasian <- midwest$asian / midwest$total * 100
hist(midwest$perasian)
midwest$grade <- ifelse(midwest$perasian >= mean(midwest$perasian),
"large", "small")
table(midwest$grade)
qplot(midwest$grade)
quantile(midwest$perasian)
midwest$group <- ifelse(midwest$perasian >= 0.5211608, "A",
ifelse(midwest$perasian >= 0.2971697, "B",
ifelse(midwest$perasian >= 0.1737387, "C", "D")))
table(midwest$group)
qplot(midwest$group) |
#' Method to initialize EM parameters. Carries out a single GLM fit and applies random noise to form starting space.
#' @inheritParams em.glm
#' @return A K-length list, each holding parameters.
#' @examples
#' x <- model.matrix(~ 1 + factor(wool) + factor(tension), data = warpbreaks)
#' y <- warpbreaks$breaks
#'
#' init.fit(y = y, x = x, K = 2)
#'
#' @export
init.fit <- function(y, x, K, weight = c(1), family=poisson(), noise = 1){
if (length(weight) != length(y)){
weight <- rep(1, length(y))
}
if (family$family == "poisson"){
model <- glm(y ~ -1 + x, family=family, offset = log(weight))
}
else{
rate <- y / weight
model <- glm(rate ~ -1 + x, family=family, weights = weight)
}
param <- coef(model)
lapply(1:K, function(i) param * rnorm(length(param), 1, noise))
}
#' Method to initialize EM parameters. Purely standard normal noise.
#'
#' @inheritParams em.glm
#' @return A K-length list, each holding parameters.
#'
#' @examples
#' x <- model.matrix(~ 1 + factor(wool) + factor(tension), data = warpbreaks)
#'
#' init.random(x = x, K = 2)
#'
#' @export
init.random <- function(x, K, noise = 1){
p <- dim(x)[2]
lapply(1:K, function(i) rnorm(p, 1, noise))
}
| /R/initializers.R | no_license | Stat-Cook/emax.glm | R | false | false | 1,208 | r | #' Method to initialize EM parameters. Carries out a single GLM fit and applies random noise to form starting space.
#' @inheritParams em.glm
#' @return A K-length list, each holding parameters.
#' @examples
#' x <- model.matrix(~ 1 + factor(wool) + factor(tension), data = warpbreaks)
#' y <- warpbreaks$breaks
#'
#' init.fit(y = y, x = x, K = 2)
#'
#' @export
init.fit <- function(y, x, K, weight = c(1), family=poisson(), noise = 1){
if (length(weight) != length(y)){
weight <- rep(1, length(y))
}
if (family$family == "poisson"){
model <- glm(y ~ -1 + x, family=family, offset = log(weight))
}
else{
rate <- y / weight
model <- glm(rate ~ -1 + x, family=family, weights = weight)
}
param <- coef(model)
lapply(1:K, function(i) param * rnorm(length(param), 1, noise))
}
#' Method to initialize EM parameters. Purely standard normal noise.
#'
#' @inheritParams em.glm
#' @return A K-length list, each holding parameters.
#'
#' @examples
#' x <- model.matrix(~ 1 + factor(wool) + factor(tension), data = warpbreaks)
#'
#' init.random(x = x, K = 2)
#'
#' @export
init.random <- function(x, K, noise = 1){
p <- dim(x)[2]
lapply(1:K, function(i) rnorm(p, 1, noise))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.R
\name{pcor1}
\alias{pcor1}
\title{partial correlation}
\usage{
pcor1(x, y, z)
}
\arguments{
\item{x:}{vector}
\item{y:}{vector}
\item{z:}{vector}
}
\value{
partial correlation
}
\description{
partial correlation cor(x,y|z)
}
\details{
pcor1
partial correlation cor(x,y|z)
}
\examples{
pcor1(rnorm(100),rnorm(100),rnorm(100))
}
\references{
Handbook \emph{Statistical foundations of machine learning} available in \url{http://www.ulb.ac.be/di/map/gbonte/mod_stoch/syl.pdf}
}
\author{
Gianluca Bontempi \email{gbonte@ulb.ac.be}
}
| /man/pcor1.Rd | no_license | gbonte/gbcode | R | false | true | 618 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/util.R
\name{pcor1}
\alias{pcor1}
\title{partial correlation}
\usage{
pcor1(x, y, z)
}
\arguments{
\item{x:}{vector}
\item{y:}{vector}
\item{z:}{vector}
}
\value{
partial correlation
}
\description{
partial correlation cor(x,y|z)
}
\details{
pcor1
partial correlation cor(x,y|z)
}
\examples{
pcor1(rnorm(100),rnorm(100),rnorm(100))
}
\references{
Handbook \emph{Statistical foundations of machine learning} available in \url{http://www.ulb.ac.be/di/map/gbonte/mod_stoch/syl.pdf}
}
\author{
Gianluca Bontempi \email{gbonte@ulb.ac.be}
}
|
#BMW model for R
#from Wynne Godley and Marc Lavoie
#Monetary Economics
#Chapter 7
#Created by Marco Veronese Passarella on 30 May 2019
#STEP 1: Clear the workspace and define the number of periods and scenarios
rm(list=ls(all=TRUE))
#Number of periods
nPeriods = 90
#Number of scenarios
nScenarios=3
#STEP 2:
#Variables
#Amortization funds
af=matrix(data=0,nrow=nScenarios,ncol=nPeriods)
#Consumption goods demand by households
c=matrix(data=0,nrow=nScenarios,ncol=nPeriods)
#Consumption goods supply
cs=matrix(data=0,nrow=nScenarios,ncol=nPeriods)
#Depreciation allowances
da=matrix(data=0,nrow=nScenarios,ncol=nPeriods)
#Stock of capital
k=matrix(data=200,nrow=nScenarios,ncol=nPeriods)
#Target stock of capital
kt=matrix(data=0,nrow=nScenarios,ncol=nPeriods)
#Demans for bank loans
ld=matrix(data=200,nrow=nScenarios,ncol=nPeriods)
#Supply of bank loans
ls=matrix(data=200,nrow=nScenarios,ncol=nPeriods)
#Demand for Investment
id=matrix(data=0,nrow=nScenarios,ncol=nPeriods)
#Supply of Investment
is=matrix(data=0,nrow=nScenarios,ncol=nPeriods)
#Bank deposits held by households
mh=matrix(data=200,nrow=nScenarios,ncol=nPeriods)
#Supply of bank deposits
ms=matrix(data=200,nrow=nScenarios,ncol=nPeriods)
#Labour
n=matrix(data=0,nrow=nScenarios,ncol=nPeriods)
#Labour productivity
pr=matrix(data=1,nrow=nScenarios,ncol=nPeriods)
#Rate of interests on banks loans
rl=matrix(data=0.04,nrow=nScenarios,ncol=nPeriods)
#Rate of interests on bank loans - exogenously set
rl_bar=matrix(data=0.04,nrow=nScenarios,ncol=nPeriods)
#Rate of interests on bank deposits
rm=matrix(data=0.04,nrow=nScenarios,ncol=nPeriods)
#Wage rate
w=matrix(data=0.86,nrow=nScenarios,ncol=nPeriods)
#Wage Bill
wbd=matrix(data=0,nrow=nScenarios,ncol=nPeriods)
#Supply of Wages
wbs=matrix(data=0,nrow=nScenarios,ncol=nPeriods)
#Income
y=matrix(data=200,nrow=nScenarios,ncol=nPeriods)
#Disposal Income of households
yd=matrix(data=0,nrow=nScenarios,ncol=nPeriods)
#Set autonomous component of consumption as a matrix (for shocks)
alpha0=matrix(data=25,nrow=nScenarios,ncol=nPeriods)
#Set autonomous propensity to consume out of income as a matrix (for shocks)
alpha1=matrix(data=0.75,nrow=nScenarios,ncol=nPeriods)
#STEP 3: Set values for parameters and exogenous variables (excluding shocks)
alpha2=0.1 #Propensity to consume out of wealth
delta=0.1 #Depreciation rate
gamma=0.15 #Reaction speed of adjustment of capital to its target value
kappa=1 #Capital-Output ratio
#Choose scenario
for (j in 1:nScenarios){
#Define time loop
for (i in 2:nPeriods){
#Define iterations
for (iterations in 1:100){
#Introduce shocks
if (i>=6 && j==2){
alpha0[j,i]=28
}
if (i>=6 && j==3){
alpha1[j,i]=0.74
}
#STEP 5: Model
#Households
yd[j,i] = wbd[j,i] + rm[j,i-1]*mh[j,i-1]
mh[j,i] = mh[j,i-1] + yd[j,i] - c[j,i]
c[j,i] = alpha0[j,i] + alpha1[j,i]*yd[j,i] + alpha2*mh[j,i-1]
#Firms
y[j,i] = c[j,i] + id[j,i]
kt[j,i] = kappa*y[j,i-1]
da[j,i] = delta*k[j,i-1]
af[j,i] = da[j,i]
id[j,i] = gamma*(kt[j,i] - k[j,i-1]) + da[j,i]
k[j,i] = k[j,i-1] + id[j,i] - da[j,i]
ld[j,i] = ld[j,i-1] + id[j,i] - af[j,i]
wbd[j,i] = y[j,i] - rl[j,i-1]*ld[j,i-1] - af[j,i]
#Banks
ls[j,i] = ls[j,i-1] + (ld[j,i] - ld[j,i-1]) #Supply of bank loans
ms[j,i] = ms[j,i-1] + (ls[j,i] - ls[j,i-1])
rm[j,i] = rl[j,i]
rl[j,i] = rl_bar[j,i]
# Wage Bill equations
wbs[j,i] = w[j,i]*n[j,i] #Supply of wages
n[j,i] = y[j,i]/pr[j,i] #Labour demand
w[j,i] = wbd[j,i]/n[j,i] #Wage rate
}
}
}
#STEP 5: Consistency check (redundant equation)
plot(mh[1,2:nPeriods]-ms[1,2:nPeriods], type="l", ylim = range(-5,5))
#STEP 6: Create and display graphs
x=c("1958":"2001")
#Figure 7.1
plot(yd[2,2:45],xaxt='n',type="l",col="1",lwd=2,lty=1,font.main=1,cex.main=0.75,main="Figure 7.1: Evolution of household disposable income and \n consumption, following an increase in autonomous \n consumption expenditures",ylab = '',xlab = '',ylim=range(180,210))
lines(c[2,2:45],type="l",lwd=2,lty=2,col="4")
#par(xpd=TRUE)
legend("bottomright",c("Disposable income","Consumption"), bty = "n", cex = 0.8, lty=c(1,2), lwd=c(2,2), col = c(1,4), box.lwd=0)
axis(side=1,at=1:44,labels=x,tck=-0.07)
#Figure 7.2
plot(id[2,2:45],xaxt='n',type="l",col="2",lwd=2,lty=1,font.main=1,cex.main=0.75,main="Figure 7.2: Evolution of gross investment and disposable investment, \n following an increase in autonomous consumption expenditures",ylab = '',xlab = '',ylim=range(19,24))
lines(da[2,2:45],type="l",lwd=2,lty=2,col="3")
#par(xpd=TRUE)
legend(10,21.5,c("Gross investment","Replacement investment \n (capital depreciation)"), bty = "n", cex = 0.8, lty=c(1,1), lwd=c(2,2), col = c(2,3), box.lwd=0)
axis(side=1,at=1:44,labels=x,tck=-0.07)
#Figure 7.3
plot(yd[3,2:45],xaxt='n',type="l",col="1",lwd=2,lty=1,font.main=1,cex.main=0.75,main="Figure 7.3: Evolution of household disposable income and \n consumption, following an increase in the propensity \n to save out of disposable income",ylab = '',xlab = '',ylim=range(165,180))
lines(c[3,2:45],type="l",lwd=2,lty=2,col="5")
#par(xpd=TRUE)
legend("topright",c("Disposable income","Consumption"), bty = "n", cex = 0.8, lty=c(1,2), lwd=c(2,2), col = c(1,5), box.lwd=0)
axis(side=1,at=1:44,labels=x,tck=-0.07)
#Figure 7.4
plot(y[3,2:45]/k[3,2:45],xaxt='n',type="l",col="2",lwd=2,lty=1,font.main=1,cex.main=0.75,main="Figure 7.4: Evolution of the output to capital ratio following \n an increase in the propensity to save out of disposable income",ylab = '',xlab = '',ylim=range(0.94,1.001))
#par(xpd=TRUE)
legend(15,0.97,c("Output to capital ratio \n (a proxy for the output \n to capacity ratio)"), bty = "n", cex = 0.8, lty=c(1), lwd=c(2), col = c(2), box.lwd=0)
axis(side=1,at=1:44,labels=x,tck=-0.07) | /BMW.R | no_license | anhnguyendepocen/SFC-models-R | R | false | false | 6,149 | r | #BMW model for R
#from Wynne Godley and Marc Lavoie
#Monetary Economics
#Chapter 7
#Created by Marco Veronese Passarella on 30 May 2019
#STEP 1: Clear the workspace and define the number of periods and scenarios
rm(list=ls(all=TRUE))
#Number of periods
nPeriods = 90
#Number of scenarios
nScenarios=3
#STEP 2:
#Variables
#Amortization funds
af=matrix(data=0,nrow=nScenarios,ncol=nPeriods)
#Consumption goods demand by households
c=matrix(data=0,nrow=nScenarios,ncol=nPeriods)
#Consumption goods supply
cs=matrix(data=0,nrow=nScenarios,ncol=nPeriods)
#Depreciation allowances
da=matrix(data=0,nrow=nScenarios,ncol=nPeriods)
#Stock of capital
k=matrix(data=200,nrow=nScenarios,ncol=nPeriods)
#Target stock of capital
kt=matrix(data=0,nrow=nScenarios,ncol=nPeriods)
#Demans for bank loans
ld=matrix(data=200,nrow=nScenarios,ncol=nPeriods)
#Supply of bank loans
ls=matrix(data=200,nrow=nScenarios,ncol=nPeriods)
#Demand for Investment
id=matrix(data=0,nrow=nScenarios,ncol=nPeriods)
#Supply of Investment
is=matrix(data=0,nrow=nScenarios,ncol=nPeriods)
#Bank deposits held by households
mh=matrix(data=200,nrow=nScenarios,ncol=nPeriods)
#Supply of bank deposits
ms=matrix(data=200,nrow=nScenarios,ncol=nPeriods)
#Labour
n=matrix(data=0,nrow=nScenarios,ncol=nPeriods)
#Labour productivity
pr=matrix(data=1,nrow=nScenarios,ncol=nPeriods)
#Rate of interests on banks loans
rl=matrix(data=0.04,nrow=nScenarios,ncol=nPeriods)
#Rate of interests on bank loans - exogenously set
rl_bar=matrix(data=0.04,nrow=nScenarios,ncol=nPeriods)
#Rate of interests on bank deposits
rm=matrix(data=0.04,nrow=nScenarios,ncol=nPeriods)
#Wage rate
w=matrix(data=0.86,nrow=nScenarios,ncol=nPeriods)
#Wage Bill
wbd=matrix(data=0,nrow=nScenarios,ncol=nPeriods)
#Supply of Wages
wbs=matrix(data=0,nrow=nScenarios,ncol=nPeriods)
#Income
y=matrix(data=200,nrow=nScenarios,ncol=nPeriods)
#Disposal Income of households
yd=matrix(data=0,nrow=nScenarios,ncol=nPeriods)
#Set autonomous component of consumption as a matrix (for shocks)
alpha0=matrix(data=25,nrow=nScenarios,ncol=nPeriods)
#Set autonomous propensity to consume out of income as a matrix (for shocks)
alpha1=matrix(data=0.75,nrow=nScenarios,ncol=nPeriods)
#STEP 3: Set values for parameters and exogenous variables (excluding shocks)
alpha2=0.1 #Propensity to consume out of wealth
delta=0.1 #Depreciation rate
gamma=0.15 #Reaction speed of adjustment of capital to its target value
kappa=1 #Capital-Output ratio
#Choose scenario
for (j in 1:nScenarios){
#Define time loop
for (i in 2:nPeriods){
#Define iterations
for (iterations in 1:100){
#Introduce shocks
if (i>=6 && j==2){
alpha0[j,i]=28
}
if (i>=6 && j==3){
alpha1[j,i]=0.74
}
#STEP 5: Model
#Households
yd[j,i] = wbd[j,i] + rm[j,i-1]*mh[j,i-1]
mh[j,i] = mh[j,i-1] + yd[j,i] - c[j,i]
c[j,i] = alpha0[j,i] + alpha1[j,i]*yd[j,i] + alpha2*mh[j,i-1]
#Firms
y[j,i] = c[j,i] + id[j,i]
kt[j,i] = kappa*y[j,i-1]
da[j,i] = delta*k[j,i-1]
af[j,i] = da[j,i]
id[j,i] = gamma*(kt[j,i] - k[j,i-1]) + da[j,i]
k[j,i] = k[j,i-1] + id[j,i] - da[j,i]
ld[j,i] = ld[j,i-1] + id[j,i] - af[j,i]
wbd[j,i] = y[j,i] - rl[j,i-1]*ld[j,i-1] - af[j,i]
#Banks
ls[j,i] = ls[j,i-1] + (ld[j,i] - ld[j,i-1]) #Supply of bank loans
ms[j,i] = ms[j,i-1] + (ls[j,i] - ls[j,i-1])
rm[j,i] = rl[j,i]
rl[j,i] = rl_bar[j,i]
# Wage Bill equations
wbs[j,i] = w[j,i]*n[j,i] #Supply of wages
n[j,i] = y[j,i]/pr[j,i] #Labour demand
w[j,i] = wbd[j,i]/n[j,i] #Wage rate
}
}
}
#STEP 5: Consistency check (redundant equation)
plot(mh[1,2:nPeriods]-ms[1,2:nPeriods], type="l", ylim = range(-5,5))
#STEP 6: Create and display graphs
x=c("1958":"2001")
#Figure 7.1
plot(yd[2,2:45],xaxt='n',type="l",col="1",lwd=2,lty=1,font.main=1,cex.main=0.75,main="Figure 7.1: Evolution of household disposable income and \n consumption, following an increase in autonomous \n consumption expenditures",ylab = '',xlab = '',ylim=range(180,210))
lines(c[2,2:45],type="l",lwd=2,lty=2,col="4")
#par(xpd=TRUE)
legend("bottomright",c("Disposable income","Consumption"), bty = "n", cex = 0.8, lty=c(1,2), lwd=c(2,2), col = c(1,4), box.lwd=0)
axis(side=1,at=1:44,labels=x,tck=-0.07)
#Figure 7.2
plot(id[2,2:45],xaxt='n',type="l",col="2",lwd=2,lty=1,font.main=1,cex.main=0.75,main="Figure 7.2: Evolution of gross investment and disposable investment, \n following an increase in autonomous consumption expenditures",ylab = '',xlab = '',ylim=range(19,24))
lines(da[2,2:45],type="l",lwd=2,lty=2,col="3")
#par(xpd=TRUE)
legend(10,21.5,c("Gross investment","Replacement investment \n (capital depreciation)"), bty = "n", cex = 0.8, lty=c(1,1), lwd=c(2,2), col = c(2,3), box.lwd=0)
axis(side=1,at=1:44,labels=x,tck=-0.07)
#Figure 7.3
plot(yd[3,2:45],xaxt='n',type="l",col="1",lwd=2,lty=1,font.main=1,cex.main=0.75,main="Figure 7.3: Evolution of household disposable income and \n consumption, following an increase in the propensity \n to save out of disposable income",ylab = '',xlab = '',ylim=range(165,180))
lines(c[3,2:45],type="l",lwd=2,lty=2,col="5")
#par(xpd=TRUE)
legend("topright",c("Disposable income","Consumption"), bty = "n", cex = 0.8, lty=c(1,2), lwd=c(2,2), col = c(1,5), box.lwd=0)
axis(side=1,at=1:44,labels=x,tck=-0.07)
#Figure 7.4
plot(y[3,2:45]/k[3,2:45],xaxt='n',type="l",col="2",lwd=2,lty=1,font.main=1,cex.main=0.75,main="Figure 7.4: Evolution of the output to capital ratio following \n an increase in the propensity to save out of disposable income",ylab = '',xlab = '',ylim=range(0.94,1.001))
#par(xpd=TRUE)
legend(15,0.97,c("Output to capital ratio \n (a proxy for the output \n to capacity ratio)"), bty = "n", cex = 0.8, lty=c(1), lwd=c(2), col = c(2), box.lwd=0)
axis(side=1,at=1:44,labels=x,tck=-0.07) |
\name{simfun}
\alias{simfun}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Create a function to simulate data
}
\description{
This function is used to create a new function that will simulate data. This could be used by a teacher to create homework or test conditions that the students would then simulate data from (each student could have their own unique data set) or this function could be used in simulations for power or other values of interest.
}
\usage{
simfun(expr, drop, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{expr}{
This is an expression, usually just one or more statements, that will generate the simulated data.
}
\item{drop}{
A character vector of names of objects/columns that will be dropped from the return value. These are usually intermediate objects or parameter values that you don't want carried into the final returned object.
}
\item{\dots}{
Additional named items that will be in the environment when \code{expr} is evaluated.
}
}
\details{
This function creates another function to simulate data. You supply the general ideas of the simulation to this function and the resulting function can then be used to create simulated datasets. The resulting function can then be given to students for them to simulate datasets, or used localy as part of larger simulations\
The environment where the expression is evaluated will have all the columns or elements of the \code{data} argument available as well as the \code{data} argument itself. Any variables/parameters passed through \code{...} in the original function will also be available. You then supply the code based on those variables to create the simulated data. The names of any columns or parameters submitted as part of \code{data} will need to match the code exactly (provide specific directions to the users on what columns need to be named). Rember that indexing using factors indexes based on the underlying integers not the character representation. See the examples for details.
The resulting function can be saved and loaded/attached in different R sessions (it is important to use \code{save} rather than something like \code{dput} so that the environment of the function is preserved).
The function includes an optional seed that will be used with the \code{\link{char2seed}} function (if the seed is a character) so that each student could use a unique but identifiable seed (such as their name or something based on their name) so that each student will use a different dataset, but the instructor will be able to generate the exact same dataset to check answers.
The "True" parameters are hidden in the environment of the function so the student will not see the "true" values by simply printing the function. However an intermediate level R programmer/user would be able to extract the simulation parameters (but the correct homework or test answer will not be the simulation parameters).
}
\value{
The return value is a function that will generate simulated datasets. The function will have 2 arguments, \code{data} and \code{seed}. The \code{data} argument can be either a data frame of the predictor variables (study design) or a list of simulation parameters. The \code{seed} argument will be passed on to \code{\link{set.seed}} if it is numeric and \code{\link{char2seed}} if it is a character.
The return value of this function is a dataframe with the simulated data and any explanitory variables passed to the function.
See the examples for how to use the result function.
}
\author{Greg Snow, \email{greg.snow@imail.org}}
\note{
This function was not designed for speed, if you are doing long simulations then hand crafting the simulation function will probably run quicker than one created using this function.
Like the prediction functions the data frame passed in as the data argument will need to have exact names of the columns to match with the code (including capitolization).
This function is different from the \code{\link{simulate}} functions in that it allows for different sample sizes, user specified parameters, and different predictor variables.
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{set.seed}}, \code{\link{char2seed}}, \code{\link{within}}, \code{\link{simulate}}, \code{\link{save}}, \code{\link{load}}, \code{\link{attach}}
}
\examples{
# Create a function to simulate heights for a given dataset
simheight <- simfun( {h <- c(64,69); height<-h[sex]+ rnorm(10,0,3)}, drop='h' )
my.df <- data.frame(sex=rep(c('Male','Female'),each=5))
simdat <- simheight(my.df)
t.test(height~sex, data=simdat)
# a more general version, and have the expression predefined
# (note that this assumes that the levels are Female, Male in that order)
myexpr <- quote({
n <- length(sex)
h <- c(64,69)
height <- h[sex] + rnorm(n,0,3)
})
simheight <- simfun(eval(myexpr), drop=c('n','h'))
my.df <- data.frame(sex=sample(rep(c('Male','Female'),c(5,10))))
(simdat <- simheight(my.df))
# similar to above, but use named parameter vector and index by names
myexpr <- quote({
n <- length(sex)
height <- h[ as.character(sex)] + rnorm(n,0,sig)
})
simheight <- simfun(eval(myexpr), drop=c('n','h','sig'),
h=c(Male=69,Female=64), sig=3)
my.df <- data.frame( sex=sample(c('Male','Female'),100, replace=TRUE))
(simdat <- simheight(my.df, seed='example'))
# Create a function to simulate Sex and Height for a given sample size
# (actually it will generate n males and n females for a total of 2*n samples)
# then use it in a set of simulations
simheight <- simfun( {sex <- factor(rep(c('Male','Female'),each=n))
height <- h[sex] + rnorm(2*n,0,s)
}, drop=c('h','n'), h=c(64,69), s=3)
(simdat <- simheight(list(n=10)))
out5 <- replicate(1000, t.test(height~sex, data=simheight(list(n= 5)))$p.value)
out15 <- replicate(1000, t.test(height~sex, data=simheight(list(n=15)))$p.value)
mean(out5 <= 0.05)
mean(out15 <= 0.05)
# use a fixed population
simstate <- simfun({
tmp <- state.df[as.character(State),]
Population <- tmp[['Population']]
Income <- tmp[['Income']]
Illiteracy <- tmp[['Illiteracy']]
}, state.df=as.data.frame(state.x77), drop=c('tmp','state.df'))
simstate(data.frame(State=sample(state.name,10)))
# Use simulation, but override setting the seed
simheight <- simfun({
set.seed(1234)
h <- c(64,69)
sex <- factor(rep(c('Female','Male'),each=50))
height <- round(rnorm(100, rep(h,each=50),3),1)
sex <- sex[ID]
height <- height[ID]
}, drop='h')
(newdat <- simheight(list(ID=c(1:5,51:55))))
(newdat2<- simheight(list(ID=1:10)))
# Using a fitted object
fit <- lm(Fertility ~ . , data=swiss)
simfert <- simfun({
Fertility <- predict(fit, newdata=data)
Fertility <- Fertility + rnorm(length(Fertility),0,summary(fit)$sigma)
}, drop=c('fit'), fit=fit)
tmpdat <- as.data.frame(lapply(swiss[,-1],
function(x) round(runif(100, min(x), max(x)))))
names(tmpdat) <- names(swiss)[-1]
fertdat <- simfert(tmpdat)
head(fertdat)
rbind(coef(fit), coef(lm(Fertility~., data=fertdat)))
# simulate a nested mixed effects model
simheight <- simfun({
n.city <- length(unique(city))
n.state <- length(unique(state))
n <- length(city)
height <- h[sex] + rnorm(n.state,0,sig.state)[state] +
rnorm(n.city,0,sig.city)[city] + rnorm(n,0,sig.e)
}, sig.state=1, sig.city=0.5, sig.e=3, h=c(64,69),
drop=c('sig.state','sig.city','sig.e','h','n.city','n.state','n'))
tmpdat <- data.frame(state=gl(5,20), city=gl(10,10),
sex=gl(2,5,length=100, labels=c('F','M')))
heightdat <- simheight(tmpdat)
# similar to above, but include cost information, this assumes that
# each new state costs $100, each new city is $10, and each subject is $1
# this shows 2 possible methods
simheight <- simfun({
n.city <- length(unique(city))
n.state <- length(unique(state))
n <- length(city)
height <- h[sex] + rnorm(n.state,0,sig.state)[state] +
rnorm(n.city,0,sig.city)[city] + rnorm(n,0,sig.e)
cost <- 100 * (!duplicated(state)) + 10*(!duplicated(city)) + 1
cat('The total cost for this design is $', 100*n.state+10*n.city+1*n,
'\n', sep='')
}, sig.state=1, sig.city=0.5, sig.e=3, h=c(64,69),
drop=c('sig.state','sig.city','sig.e','h','n.city','n.state','n'))
tmpdat <- data.frame(state=gl(5,20), city=gl(10,10),
sex=gl(2,5,length=100, labels=c('F','M')))
heightdat <- simheight(tmpdat)
sum(heightdat$cost)
# another mixed model method
simheight <- simfun({
state <- gl(n.state, n/n.state)
city <- gl(n.city*n.state, n/n.city/n.state)
sex <- gl(2, n.city, length=n, labels=c('F','M') )
height <- h[sex] + rnorm(n.state,0,sig.state)[state] +
rnorm(n.city*n.state,0,sig.city)[city] + rnorm(n,0,sig.e)
}, drop=c('n.state','n.city','n','sig.city','sig.state','sig.e','h'))
heightdat <- simheight( list(
n.state=5, n.city=2, n=100, sig.state=10, sig.city=3, sig.e=1, h=c(64,69)
))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ datagen }
\keyword{ design }
| /man/simfun.Rd | no_license | svats2k/TeachingDemos | R | false | false | 9,316 | rd | \name{simfun}
\alias{simfun}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Create a function to simulate data
}
\description{
This function is used to create a new function that will simulate data. This could be used by a teacher to create homework or test conditions that the students would then simulate data from (each student could have their own unique data set) or this function could be used in simulations for power or other values of interest.
}
\usage{
simfun(expr, drop, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{expr}{
This is an expression, usually just one or more statements, that will generate the simulated data.
}
\item{drop}{
A character vector of names of objects/columns that will be dropped from the return value. These are usually intermediate objects or parameter values that you don't want carried into the final returned object.
}
\item{\dots}{
Additional named items that will be in the environment when \code{expr} is evaluated.
}
}
\details{
This function creates another function to simulate data. You supply the general ideas of the simulation to this function and the resulting function can then be used to create simulated datasets. The resulting function can then be given to students for them to simulate datasets, or used localy as part of larger simulations\
The environment where the expression is evaluated will have all the columns or elements of the \code{data} argument available as well as the \code{data} argument itself. Any variables/parameters passed through \code{...} in the original function will also be available. You then supply the code based on those variables to create the simulated data. The names of any columns or parameters submitted as part of \code{data} will need to match the code exactly (provide specific directions to the users on what columns need to be named). Rember that indexing using factors indexes based on the underlying integers not the character representation. See the examples for details.
The resulting function can be saved and loaded/attached in different R sessions (it is important to use \code{save} rather than something like \code{dput} so that the environment of the function is preserved).
The function includes an optional seed that will be used with the \code{\link{char2seed}} function (if the seed is a character) so that each student could use a unique but identifiable seed (such as their name or something based on their name) so that each student will use a different dataset, but the instructor will be able to generate the exact same dataset to check answers.
The "True" parameters are hidden in the environment of the function so the student will not see the "true" values by simply printing the function. However an intermediate level R programmer/user would be able to extract the simulation parameters (but the correct homework or test answer will not be the simulation parameters).
}
\value{
The return value is a function that will generate simulated datasets. The function will have 2 arguments, \code{data} and \code{seed}. The \code{data} argument can be either a data frame of the predictor variables (study design) or a list of simulation parameters. The \code{seed} argument will be passed on to \code{\link{set.seed}} if it is numeric and \code{\link{char2seed}} if it is a character.
The return value of this function is a dataframe with the simulated data and any explanitory variables passed to the function.
See the examples for how to use the result function.
}
\author{Greg Snow, \email{greg.snow@imail.org}}
\note{
This function was not designed for speed, if you are doing long simulations then hand crafting the simulation function will probably run quicker than one created using this function.
Like the prediction functions the data frame passed in as the data argument will need to have exact names of the columns to match with the code (including capitolization).
This function is different from the \code{\link{simulate}} functions in that it allows for different sample sizes, user specified parameters, and different predictor variables.
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{\link{set.seed}}, \code{\link{char2seed}}, \code{\link{within}}, \code{\link{simulate}}, \code{\link{save}}, \code{\link{load}}, \code{\link{attach}}
}
\examples{
# Create a function to simulate heights for a given dataset
simheight <- simfun( {h <- c(64,69); height<-h[sex]+ rnorm(10,0,3)}, drop='h' )
my.df <- data.frame(sex=rep(c('Male','Female'),each=5))
simdat <- simheight(my.df)
t.test(height~sex, data=simdat)
# a more general version, and have the expression predefined
# (note that this assumes that the levels are Female, Male in that order)
myexpr <- quote({
n <- length(sex)
h <- c(64,69)
height <- h[sex] + rnorm(n,0,3)
})
simheight <- simfun(eval(myexpr), drop=c('n','h'))
my.df <- data.frame(sex=sample(rep(c('Male','Female'),c(5,10))))
(simdat <- simheight(my.df))
# similar to above, but use named parameter vector and index by names
myexpr <- quote({
n <- length(sex)
height <- h[ as.character(sex)] + rnorm(n,0,sig)
})
simheight <- simfun(eval(myexpr), drop=c('n','h','sig'),
h=c(Male=69,Female=64), sig=3)
my.df <- data.frame( sex=sample(c('Male','Female'),100, replace=TRUE))
(simdat <- simheight(my.df, seed='example'))
# Create a function to simulate Sex and Height for a given sample size
# (actually it will generate n males and n females for a total of 2*n samples)
# then use it in a set of simulations
simheight <- simfun( {sex <- factor(rep(c('Male','Female'),each=n))
height <- h[sex] + rnorm(2*n,0,s)
}, drop=c('h','n'), h=c(64,69), s=3)
(simdat <- simheight(list(n=10)))
out5 <- replicate(1000, t.test(height~sex, data=simheight(list(n= 5)))$p.value)
out15 <- replicate(1000, t.test(height~sex, data=simheight(list(n=15)))$p.value)
mean(out5 <= 0.05)
mean(out15 <= 0.05)
# use a fixed population
simstate <- simfun({
tmp <- state.df[as.character(State),]
Population <- tmp[['Population']]
Income <- tmp[['Income']]
Illiteracy <- tmp[['Illiteracy']]
}, state.df=as.data.frame(state.x77), drop=c('tmp','state.df'))
simstate(data.frame(State=sample(state.name,10)))
# Use simulation, but override setting the seed
simheight <- simfun({
set.seed(1234)
h <- c(64,69)
sex <- factor(rep(c('Female','Male'),each=50))
height <- round(rnorm(100, rep(h,each=50),3),1)
sex <- sex[ID]
height <- height[ID]
}, drop='h')
(newdat <- simheight(list(ID=c(1:5,51:55))))
(newdat2<- simheight(list(ID=1:10)))
# Using a fitted object
fit <- lm(Fertility ~ . , data=swiss)
simfert <- simfun({
Fertility <- predict(fit, newdata=data)
Fertility <- Fertility + rnorm(length(Fertility),0,summary(fit)$sigma)
}, drop=c('fit'), fit=fit)
tmpdat <- as.data.frame(lapply(swiss[,-1],
function(x) round(runif(100, min(x), max(x)))))
names(tmpdat) <- names(swiss)[-1]
fertdat <- simfert(tmpdat)
head(fertdat)
rbind(coef(fit), coef(lm(Fertility~., data=fertdat)))
# simulate a nested mixed effects model
simheight <- simfun({
n.city <- length(unique(city))
n.state <- length(unique(state))
n <- length(city)
height <- h[sex] + rnorm(n.state,0,sig.state)[state] +
rnorm(n.city,0,sig.city)[city] + rnorm(n,0,sig.e)
}, sig.state=1, sig.city=0.5, sig.e=3, h=c(64,69),
drop=c('sig.state','sig.city','sig.e','h','n.city','n.state','n'))
tmpdat <- data.frame(state=gl(5,20), city=gl(10,10),
sex=gl(2,5,length=100, labels=c('F','M')))
heightdat <- simheight(tmpdat)
# similar to above, but include cost information, this assumes that
# each new state costs $100, each new city is $10, and each subject is $1
# this shows 2 possible methods
simheight <- simfun({
n.city <- length(unique(city))
n.state <- length(unique(state))
n <- length(city)
height <- h[sex] + rnorm(n.state,0,sig.state)[state] +
rnorm(n.city,0,sig.city)[city] + rnorm(n,0,sig.e)
cost <- 100 * (!duplicated(state)) + 10*(!duplicated(city)) + 1
cat('The total cost for this design is $', 100*n.state+10*n.city+1*n,
'\n', sep='')
}, sig.state=1, sig.city=0.5, sig.e=3, h=c(64,69),
drop=c('sig.state','sig.city','sig.e','h','n.city','n.state','n'))
tmpdat <- data.frame(state=gl(5,20), city=gl(10,10),
sex=gl(2,5,length=100, labels=c('F','M')))
heightdat <- simheight(tmpdat)
sum(heightdat$cost)
# another mixed model method
simheight <- simfun({
state <- gl(n.state, n/n.state)
city <- gl(n.city*n.state, n/n.city/n.state)
sex <- gl(2, n.city, length=n, labels=c('F','M') )
height <- h[sex] + rnorm(n.state,0,sig.state)[state] +
rnorm(n.city*n.state,0,sig.city)[city] + rnorm(n,0,sig.e)
}, drop=c('n.state','n.city','n','sig.city','sig.state','sig.e','h'))
heightdat <- simheight( list(
n.state=5, n.city=2, n=100, sig.state=10, sig.city=3, sig.e=1, h=c(64,69)
))
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ datagen }
\keyword{ design }
|
library(tidyverse)
library(ggraph)
library(igraph)
theme_set(theme_graph())
set_graph_style(foreground = 'grey80')
set.seed(1)
rm(list = ls())
source("scripts/load_data_long.R")
data_long
data_wide <- data_long %>%
spread(station_name_type, station_name)
top_stations <- data %>%
select(from_station_name) %>%
count(from_station_name, sort = TRUE) %>%
top_n(3, n) %>%
select(from_station_name) %>%
unlist()
network_data <- data_wide %>%
select(from_station_name, to_station_name) %>%
filter(from_station_name %in% top_stations) %>%
count(from_station_name, to_station_name, sort = TRUE)
network_data %>%
select(from_station_name) %>%
unique()
graph <- graph_from_data_frame(network_data)
ggraph(graph) +
geom_edge_fan(aes(edge_alpha = n,
edge_width = n)) +
geom_edge_loop() +
geom_node_point() +
geom_node_label(aes(label = name)) +
scale_edge_alpha_continuous(range = c(.5, 1)) +
theme_graph()
#?geom_edge_
simple_network <- data_wide %>%
select(from_station_name, to_station_name) %>%
mutate(is_same = ifelse(from_station_name == to_station_name, "same_location", "different_location")) %>%
group_by(is_same) %>%
count() %>%
spread(is_same, n) %>%
gather(location_type, n) %>%
mutate(from_location = "from_location") %>%
select(from_location, location_type, n) %>%
graph_from_data_frame()
#get rid of "same location". try just having a loop
ggraph(simple_network, layout = "graphopt") +
geom_edge_diagonal(aes(width = n),
arrow = arrow(length = unit(3, 'mm')),
end_cap = circle(10, 'mm')) +
geom_node_label(aes(label = name)) +
scale_edge_width_continuous(range = c(1, 3)) +
labs(title = "Do most trips end at a different Healthy Ride station?")
top_locations <- data %>%
select(from_station_name) %>%
count(from_station_name, sort = TRUE) %>%
top_n(10, n) %>%
select(from_station_name) %>%
unlist()
top_locations[11:12] <- c("Same station", "Different station")
#from_station_name = ifelse(from_station_name %in% top_locations, from_station_name, "the_rest")
simple_network_2 <- data_wide %>%
select(from_station_name, to_station_name, is_weekday) %>%
mutate(is_same = ifelse(from_station_name == to_station_name, "Same station", "Different station")) %>%
mutate(from_location = "from_location") %>%
select(from_station_name, is_same, is_weekday) %>%
#filter(from_station_name %in% top_locations) %>%
group_by(from_station_name, is_same, is_weekday) %>%
summarize(number_of_rides = n()) %>%
graph_from_data_frame()
data_wide %>%
select(from_station_name, to_station_name) %>%
mutate(is_same = ifelse(from_station_name == to_station_name, "Same station", "Different station")) %>%
mutate(from_location = "from_location") %>%
select(from_station_name, is_same) %>%
group_by(from_station_name, is_same) %>%
summarize(number_of_rides = n()) %>%
select(from_station_name) %>%
unique()
#maybe people that start in The Strip and end somewhere else are parking in the strip and commuting downtowm?
#facet by weekday/weekend
#people might park at a bike trailhead, bike up and back to the same station, and leave. this could explain some heavy loops
ggraph(simple_network_2, layout = "dh") +
geom_edge_diagonal(aes(edge_alpha = number_of_rides)) +
#geom_edge_density(aes(fill = is_same)) +
geom_node_label(aes(label = ifelse(name %in% top_locations,
V(simple_network_2)$name, "")),
size = 3) +
scale_edge_alpha_continuous(range = c(.1, 1)) +
facet_edges(~is_weekday,
ncol = 2) +
labs(title = "Do most trips start and end at different Healthy Ride stations?",
subtitle = "Only stations in the top 10 in terms of number of rides are labeled",
caption = "@conor_tompkins, data from wprdc.org")
ggsave("healthy ride simple network is_weekday.png", width = 20, height = 10)
| /scripts/network graph.R | no_license | conorotompkins/healthy_ride | R | false | false | 3,986 | r | library(tidyverse)
library(ggraph)
library(igraph)
theme_set(theme_graph())
set_graph_style(foreground = 'grey80')
set.seed(1)
rm(list = ls())
source("scripts/load_data_long.R")
data_long
data_wide <- data_long %>%
spread(station_name_type, station_name)
top_stations <- data %>%
select(from_station_name) %>%
count(from_station_name, sort = TRUE) %>%
top_n(3, n) %>%
select(from_station_name) %>%
unlist()
network_data <- data_wide %>%
select(from_station_name, to_station_name) %>%
filter(from_station_name %in% top_stations) %>%
count(from_station_name, to_station_name, sort = TRUE)
network_data %>%
select(from_station_name) %>%
unique()
graph <- graph_from_data_frame(network_data)
ggraph(graph) +
geom_edge_fan(aes(edge_alpha = n,
edge_width = n)) +
geom_edge_loop() +
geom_node_point() +
geom_node_label(aes(label = name)) +
scale_edge_alpha_continuous(range = c(.5, 1)) +
theme_graph()
#?geom_edge_
simple_network <- data_wide %>%
select(from_station_name, to_station_name) %>%
mutate(is_same = ifelse(from_station_name == to_station_name, "same_location", "different_location")) %>%
group_by(is_same) %>%
count() %>%
spread(is_same, n) %>%
gather(location_type, n) %>%
mutate(from_location = "from_location") %>%
select(from_location, location_type, n) %>%
graph_from_data_frame()
#get rid of "same location". try just having a loop
ggraph(simple_network, layout = "graphopt") +
geom_edge_diagonal(aes(width = n),
arrow = arrow(length = unit(3, 'mm')),
end_cap = circle(10, 'mm')) +
geom_node_label(aes(label = name)) +
scale_edge_width_continuous(range = c(1, 3)) +
labs(title = "Do most trips end at a different Healthy Ride station?")
top_locations <- data %>%
select(from_station_name) %>%
count(from_station_name, sort = TRUE) %>%
top_n(10, n) %>%
select(from_station_name) %>%
unlist()
top_locations[11:12] <- c("Same station", "Different station")
#from_station_name = ifelse(from_station_name %in% top_locations, from_station_name, "the_rest")
simple_network_2 <- data_wide %>%
select(from_station_name, to_station_name, is_weekday) %>%
mutate(is_same = ifelse(from_station_name == to_station_name, "Same station", "Different station")) %>%
mutate(from_location = "from_location") %>%
select(from_station_name, is_same, is_weekday) %>%
#filter(from_station_name %in% top_locations) %>%
group_by(from_station_name, is_same, is_weekday) %>%
summarize(number_of_rides = n()) %>%
graph_from_data_frame()
data_wide %>%
select(from_station_name, to_station_name) %>%
mutate(is_same = ifelse(from_station_name == to_station_name, "Same station", "Different station")) %>%
mutate(from_location = "from_location") %>%
select(from_station_name, is_same) %>%
group_by(from_station_name, is_same) %>%
summarize(number_of_rides = n()) %>%
select(from_station_name) %>%
unique()
#maybe people that start in The Strip and end somewhere else are parking in the strip and commuting downtowm?
#facet by weekday/weekend
#people might park at a bike trailhead, bike up and back to the same station, and leave. this could explain some heavy loops
ggraph(simple_network_2, layout = "dh") +
geom_edge_diagonal(aes(edge_alpha = number_of_rides)) +
#geom_edge_density(aes(fill = is_same)) +
geom_node_label(aes(label = ifelse(name %in% top_locations,
V(simple_network_2)$name, "")),
size = 3) +
scale_edge_alpha_continuous(range = c(.1, 1)) +
facet_edges(~is_weekday,
ncol = 2) +
labs(title = "Do most trips start and end at different Healthy Ride stations?",
subtitle = "Only stations in the top 10 in terms of number of rides are labeled",
caption = "@conor_tompkins, data from wprdc.org")
ggsave("healthy ride simple network is_weekday.png", width = 20, height = 10)
|
\docType{methods}
\name{CS10-method}
\alias{CS10,CDS-method}
\alias{CS10-method}
\title{S4 method CS10}
\usage{
\S4method{CS10}{CDS}(object)
}
\arguments{
\item{object}{the input CDS class object}
}
\description{
The CS10 method for CDS class
}
| /pkg/man/CS10-methods.Rd | permissive | Kevin-Jin/CDS | R | false | false | 248 | rd | \docType{methods}
\name{CS10-method}
\alias{CS10,CDS-method}
\alias{CS10-method}
\title{S4 method CS10}
\usage{
\S4method{CS10}{CDS}(object)
}
\arguments{
\item{object}{the input CDS class object}
}
\description{
The CS10 method for CDS class
}
|
require('epi')
# Simulate data
fluData1 <- simSIR(0.002,0.1,400,1)
# fluData1 <- simExp(0.2,400)
# fluData2 <- simSIR(0.002,0.1,500,10)
# Get data from dataframe
# Ensure first is larger than second
# nSum <- 4
positiveInfectious1 <- fluData1$data[,3]
# positiveInfectious <- sumData(fluData$data[,3], nSum)
# positiveInfectious2 <- fluData2$data[,3]
# positiveInfectious1 <- sumData(fluData1$data[,3], nSum)
# Offset of t0 for second epidemic
# offset1 <- 50
offset1 <- 50
# offset2 <- 100
# Total length of the combined data assuming last epidemic is longest time
totalLength <- offset1 + length(positiveInfectious1)
# Padding of zeros to offset data
positiveInfectiousPadStart1 <- array(0, offset1)
# Combine data with padding offset zeros
# Add together the different predicted infectious values truncated to required size
data <- c(positiveInfectiousPadStart1, positiveInfectious1)
# data <- positiveInfectious1
tmax <- length(data)
times <- c(1:tmax)
# Fitting epidemics
startOffset <- 1
endOffset <- 1
minTruncation <- 4
offsets <- list(startOffset=startOffset, endOffset=endOffset, minTruncation=minTruncation)
# Target rSquare error
thresholds <- list(lim=0.9)
# Epidemic type array epidemic types correspond to the number of parameters of the sub epidemic model
epiTypes <- c(0)
# epiTypes <- c(4)
initParams <- c()
# initParams <- c(log(0.001), log(0.1), log(10), 0)
# Init Conds = S0, I0, R0
# I0 from first data point
initConds <- c()
# initConds <- c(1,log(data[startOffset + 1]),0,0)
plotConfig <- list(title="Synthedemic Decomposition of Simulated Data", fileName="output/graphs/offset/", dataFile="output/data/offset/offsetData.RData", envFile="output/data/offset/offsetEnv.RData", pat=5, rat=30)
# Fit parameters
fitOverTimeMulti("LMS", times, data, initConds, initParams, epiTypes, offsets, thresholds, plotConfig) | /launch/processOffset.r | no_license | tomwilding/emp | R | false | false | 1,840 | r | require('epi')
# Simulate data
fluData1 <- simSIR(0.002,0.1,400,1)
# fluData1 <- simExp(0.2,400)
# fluData2 <- simSIR(0.002,0.1,500,10)
# Get data from dataframe
# Ensure first is larger than second
# nSum <- 4
positiveInfectious1 <- fluData1$data[,3]
# positiveInfectious <- sumData(fluData$data[,3], nSum)
# positiveInfectious2 <- fluData2$data[,3]
# positiveInfectious1 <- sumData(fluData1$data[,3], nSum)
# Offset of t0 for second epidemic
# offset1 <- 50
offset1 <- 50
# offset2 <- 100
# Total length of the combined data assuming last epidemic is longest time
totalLength <- offset1 + length(positiveInfectious1)
# Padding of zeros to offset data
positiveInfectiousPadStart1 <- array(0, offset1)
# Combine data with padding offset zeros
# Add together the different predicted infectious values truncated to required size
data <- c(positiveInfectiousPadStart1, positiveInfectious1)
# data <- positiveInfectious1
tmax <- length(data)
times <- c(1:tmax)
# Fitting epidemics
startOffset <- 1
endOffset <- 1
minTruncation <- 4
offsets <- list(startOffset=startOffset, endOffset=endOffset, minTruncation=minTruncation)
# Target rSquare error
thresholds <- list(lim=0.9)
# Epidemic type array epidemic types correspond to the number of parameters of the sub epidemic model
epiTypes <- c(0)
# epiTypes <- c(4)
initParams <- c()
# initParams <- c(log(0.001), log(0.1), log(10), 0)
# Init Conds = S0, I0, R0
# I0 from first data point
initConds <- c()
# initConds <- c(1,log(data[startOffset + 1]),0,0)
plotConfig <- list(title="Synthedemic Decomposition of Simulated Data", fileName="output/graphs/offset/", dataFile="output/data/offset/offsetData.RData", envFile="output/data/offset/offsetEnv.RData", pat=5, rat=30)
# Fit parameters
fitOverTimeMulti("LMS", times, data, initConds, initParams, epiTypes, offsets, thresholds, plotConfig) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parsing_functions.R
\name{.go_ids_lookup}
\alias{.go_ids_lookup}
\title{GAPGOM internal - .go_ids_lookup()}
\usage{
.go_ids_lookup(ids, go_data, custom_genes = NULL, drop = NULL)
}
\arguments{
\item{ids}{general ids that you want to search for in godata.}
\item{go_data}{the queried godata neccesary for the lookup}
\item{drop}{list of evidences you want to ignore.}
}
\value{
return the translation dataframe containing conversion from id to
goids.
}
\description{
This function is an internal function and should not be called by the user.
}
\details{
Looks up goids per id (entrez/ensembl).
}
\section{Notes}{
Internal function used in ().
}
\keyword{internal}
| /man/dot-go_ids_lookup.Rd | permissive | Berghopper/GAPGOM | R | false | true | 747 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/parsing_functions.R
\name{.go_ids_lookup}
\alias{.go_ids_lookup}
\title{GAPGOM internal - .go_ids_lookup()}
\usage{
.go_ids_lookup(ids, go_data, custom_genes = NULL, drop = NULL)
}
\arguments{
\item{ids}{general ids that you want to search for in godata.}
\item{go_data}{the queried godata neccesary for the lookup}
\item{drop}{list of evidences you want to ignore.}
}
\value{
return the translation dataframe containing conversion from id to
goids.
}
\description{
This function is an internal function and should not be called by the user.
}
\details{
Looks up goids per id (entrez/ensembl).
}
\section{Notes}{
Internal function used in ().
}
\keyword{internal}
|
library(dplyr)
source('preprocess/load_data/data_loader.R')
load_hotel_reserve()
# 下記から本書掲載
# checkin_dateの条件式によって、判定結果のTRUE/FALSEのベクトルを取得
# 条件式を&でつなぐことによって、判定結果がともにTRUEの場合のみTRUEとなるベクトルを取得
# reserve_tbの2次元配列の1次元目にTRUE/FALSEのベクトルを指定し、条件適合する行を抽出
# reserve_tbの2次元配列の2次元目を空にすることで、すべての列を抽出
reserve_tb[reserve_tb$checkin_date >= '2016-10-12' &
reserve_tb$checkin_date <= '2016-10-13', ]
| /preprocess/002_selection/02/a_r_1_not_awesome.R | permissive | KazukiUeno/awesomebook | R | false | false | 641 | r | library(dplyr)
source('preprocess/load_data/data_loader.R')
load_hotel_reserve()
# 下記から本書掲載
# checkin_dateの条件式によって、判定結果のTRUE/FALSEのベクトルを取得
# 条件式を&でつなぐことによって、判定結果がともにTRUEの場合のみTRUEとなるベクトルを取得
# reserve_tbの2次元配列の1次元目にTRUE/FALSEのベクトルを指定し、条件適合する行を抽出
# reserve_tbの2次元配列の2次元目を空にすることで、すべての列を抽出
reserve_tb[reserve_tb$checkin_date >= '2016-10-12' &
reserve_tb$checkin_date <= '2016-10-13', ]
|
library(dplyr)
library(ggplot2)
library(RColorBrewer)
setwd("/Users/Desktop/melanoma")
kirc_02=analysis_size=readRDS("analysis_size_all_old_condition.rds")
kirc_02@selection_results$gene=apply(data.frame(kirc_02@selection_results[,1]),1,FUN = function(x){strsplit(x,"_")[[1]][1]})
summary_multivar <- function(data, group_name) {
data_clean <- data %>%
filter(group == group_name) %>%
arrange(desc(selection_intensity)) %>%
filter(selection_intensity > 1)
# Summarise information of gene with multiple variants
info1 <- data_clean %>% group_by(gene) %>%
summarise(cum_si = sum(selection_intensity), # change sum to mean and sd
mean_si = mean(selection_intensity),
sd = sd(selection_intensity),
max_si = max(selection_intensity),
n_variant = n_distinct(variant)) %>%
filter(n_variant > 1)
top_variant <- data_clean %>%
group_by(gene) %>% filter(row_number() == 1)
merge_info <- merge(info1, top_variant[, -3], by.x = "gene") %>%
arrange(desc(cum_si), desc(n_variant))
return(merge_info)
}
stage_data <-
data.frame(variant = kirc_02@selection_results$variant,
gene = kirc_02@selection_results$gene,
selection_intensity = kirc_02@selection_results$selection_intensity,
group = kirc_02@selection_results$progression) %>%
filter(selection_intensity > 1)
stage_data=stage_data[stage_data$group %in% c("Metastasis","Primary"),]
stage_data$group=droplevels(stage_data$group)
SR=data.frame(kirc_02@selection_results)
stage1_info <- summary_multivar(stage_data, group_name = "Primary")
stage234_info <- summary_multivar(stage_data, group_name = "Metastasis")
genes_Pri=as.character(stage1_info[order(stage1_info$mean_si,decreasing = T),]$gene[1:10])
genes_Meta=as.character(stage234_info[order(stage234_info$mean_si,decreasing = T),]$gene[1:10])
top_gene_stage=genes_Pri
top_gene_stage=genes_Meta
stage_plot_data <- stage_data %>%
filter(gene %in% top_gene_stage) %>%
mutate(gene = factor(gene, levels = top_gene_stage))
si_boxplot <- function(data, group_names, genes, colormap1,colormap2, color_num,yticks,main) {
palette <- c(brewer.pal(6, colormap1)[color_num],brewer.pal(6, colormap2)[color_num])
myplt <-
boxplot(selection_intensity ~ group*gene, data = data, boxwex=0.4,
col = palette, xlab = "", ylab = "",
xaxt="n", yaxt="n")
title(ylab = expression(paste("Selection intensity /", "10"^"4")),
mgp = c(2, 0, 0))
title(xlab = "Gene", mgp = c(2, 0, 0))
title(main=main)
axis(1, mgp = c(0, 0.2, 0),
at = seq(1.5 , 20 , 2),
labels = genes,
tick=FALSE , cex.axis=0.5)
axis(2, at = yticks * 1e4, las=2,
labels = yticks, cex.axis=0.6)
# Add the grey vertical lines
for(i in seq(0.5 , 21 , 2)){
abline(v=i, lty=1, col="grey")
}
# Add a legend
legend("topright", legend = group_names,
col=palette,
pch = 15, bty = "n", pt.cex = 2, cex = 1, horiz = F)
}
stage_plot_data$group=relevel(stage_plot_data$group,"Primary")
pdf("melanoma_progression_Primary_top10_genes.pdf", width = 8, height = 6)
si_boxplot(stage_plot_data, c( "Primary","Metastasis"),
top_gene_stage, "Blues","Oranges",2, yticks = seq(0, 40, 2.5),main="Top10_Primary_genes")
dev.off()
pdf("melanoma_progression_Metastasis_top10_genes.pdf", width = 8, height = 6)
si_boxplot(stage_plot_data, c( "Primary","Metastasis"),
top_gene_stage, "Blues","Oranges",2 ,yticks = seq(0, 40, 2.5),main="Top10_Met_genes")
dev.off()
| /Boxplot_SI_original.R | no_license | dongzhblake/melanoma | R | false | false | 3,600 | r | library(dplyr)
library(ggplot2)
library(RColorBrewer)
setwd("/Users/Desktop/melanoma")
kirc_02=analysis_size=readRDS("analysis_size_all_old_condition.rds")
kirc_02@selection_results$gene=apply(data.frame(kirc_02@selection_results[,1]),1,FUN = function(x){strsplit(x,"_")[[1]][1]})
summary_multivar <- function(data, group_name) {
data_clean <- data %>%
filter(group == group_name) %>%
arrange(desc(selection_intensity)) %>%
filter(selection_intensity > 1)
# Summarise information of gene with multiple variants
info1 <- data_clean %>% group_by(gene) %>%
summarise(cum_si = sum(selection_intensity), # change sum to mean and sd
mean_si = mean(selection_intensity),
sd = sd(selection_intensity),
max_si = max(selection_intensity),
n_variant = n_distinct(variant)) %>%
filter(n_variant > 1)
top_variant <- data_clean %>%
group_by(gene) %>% filter(row_number() == 1)
merge_info <- merge(info1, top_variant[, -3], by.x = "gene") %>%
arrange(desc(cum_si), desc(n_variant))
return(merge_info)
}
stage_data <-
data.frame(variant = kirc_02@selection_results$variant,
gene = kirc_02@selection_results$gene,
selection_intensity = kirc_02@selection_results$selection_intensity,
group = kirc_02@selection_results$progression) %>%
filter(selection_intensity > 1)
stage_data=stage_data[stage_data$group %in% c("Metastasis","Primary"),]
stage_data$group=droplevels(stage_data$group)
SR=data.frame(kirc_02@selection_results)
stage1_info <- summary_multivar(stage_data, group_name = "Primary")
stage234_info <- summary_multivar(stage_data, group_name = "Metastasis")
genes_Pri=as.character(stage1_info[order(stage1_info$mean_si,decreasing = T),]$gene[1:10])
genes_Meta=as.character(stage234_info[order(stage234_info$mean_si,decreasing = T),]$gene[1:10])
top_gene_stage=genes_Pri
top_gene_stage=genes_Meta
stage_plot_data <- stage_data %>%
filter(gene %in% top_gene_stage) %>%
mutate(gene = factor(gene, levels = top_gene_stage))
si_boxplot <- function(data, group_names, genes, colormap1,colormap2, color_num,yticks,main) {
palette <- c(brewer.pal(6, colormap1)[color_num],brewer.pal(6, colormap2)[color_num])
myplt <-
boxplot(selection_intensity ~ group*gene, data = data, boxwex=0.4,
col = palette, xlab = "", ylab = "",
xaxt="n", yaxt="n")
title(ylab = expression(paste("Selection intensity /", "10"^"4")),
mgp = c(2, 0, 0))
title(xlab = "Gene", mgp = c(2, 0, 0))
title(main=main)
axis(1, mgp = c(0, 0.2, 0),
at = seq(1.5 , 20 , 2),
labels = genes,
tick=FALSE , cex.axis=0.5)
axis(2, at = yticks * 1e4, las=2,
labels = yticks, cex.axis=0.6)
# Add the grey vertical lines
for(i in seq(0.5 , 21 , 2)){
abline(v=i, lty=1, col="grey")
}
# Add a legend
legend("topright", legend = group_names,
col=palette,
pch = 15, bty = "n", pt.cex = 2, cex = 1, horiz = F)
}
stage_plot_data$group=relevel(stage_plot_data$group,"Primary")
pdf("melanoma_progression_Primary_top10_genes.pdf", width = 8, height = 6)
si_boxplot(stage_plot_data, c( "Primary","Metastasis"),
top_gene_stage, "Blues","Oranges",2, yticks = seq(0, 40, 2.5),main="Top10_Primary_genes")
dev.off()
pdf("melanoma_progression_Metastasis_top10_genes.pdf", width = 8, height = 6)
si_boxplot(stage_plot_data, c( "Primary","Metastasis"),
top_gene_stage, "Blues","Oranges",2 ,yticks = seq(0, 40, 2.5),main="Top10_Met_genes")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/securityhub_operations.R
\name{securityhub_list_invitations}
\alias{securityhub_list_invitations}
\title{Lists all Security Hub membership invitations that were sent to the
current AWS account}
\usage{
securityhub_list_invitations(MaxResults, NextToken)
}
\arguments{
\item{MaxResults}{The maximum number of items to return in the response.}
\item{NextToken}{The token that is required for pagination. On your first call to the
\code{ListInvitations} operation, set the value of this parameter to \code{NULL}.
For subsequent calls to the operation, to continue listing data, set the
value of this parameter to the value returned from the previous
response.}
}
\description{
Lists all Security Hub membership invitations that were sent to the
current AWS account.
}
\section{Request syntax}{
\preformatted{svc$list_invitations(
MaxResults = 123,
NextToken = "string"
)
}
}
\keyword{internal}
| /paws/man/securityhub_list_invitations.Rd | permissive | jcheng5/paws | R | false | true | 976 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/securityhub_operations.R
\name{securityhub_list_invitations}
\alias{securityhub_list_invitations}
\title{Lists all Security Hub membership invitations that were sent to the
current AWS account}
\usage{
securityhub_list_invitations(MaxResults, NextToken)
}
\arguments{
\item{MaxResults}{The maximum number of items to return in the response.}
\item{NextToken}{The token that is required for pagination. On your first call to the
\code{ListInvitations} operation, set the value of this parameter to \code{NULL}.
For subsequent calls to the operation, to continue listing data, set the
value of this parameter to the value returned from the previous
response.}
}
\description{
Lists all Security Hub membership invitations that were sent to the
current AWS account.
}
\section{Request syntax}{
\preformatted{svc$list_invitations(
MaxResults = 123,
NextToken = "string"
)
}
}
\keyword{internal}
|
#clusters info
clust7 <- read.csv("data_clust_7_v2.csv")
clust7np <- read.csv("data_clust_7_minus_pap_v2.csv")
sd <- read.csv("virome_individual_SD_all.csv")
clust7 <- dplyr::rename(clust7, study_id = X)
clust7np <- dplyr::rename(clust7np, study_id = X)
sd <- dplyr::rename(sd, study_id = Participants)
#merge with virome data
meta <- read.csv("viromeall_metadata_full.csv")
total <- join(clust7, clust7np, type="full")
total <- join(total, meta, type="full")
total <- join(total, sd, type="full")
#format and clean
total$X <- NULL
total$X.1 <- NULL
total$Med.Duration[is.na(total$Med.Duration)] <- 0
total$Duration.of.HIV.Infection.[is.na(total$Duration.of.HIV.Infection.)] <- 0
total$Highest.VL.Ever..[is.na(total$Highest.VL.Ever..)] <- 0
total$VL..copies.mL..[is.na(total$VL..copies.mL..)] <- 0
total$Feminine.products.48hrs[is.na(total$Feminine.products.48hrs)] <- 0
total$Group <- factor(total$Group)
total$Group_minus_pap <- factor(total$Group_minus_pap)
#loop for Fisher's and ANOVA tests for CST(viral) as dependent
#lists of variables
factors <- c("Age.cat", "BMI.under.cat",
"BMI.over.cat", "Ethnicity.cat", "Ethnicity2.cat", "Yeast.ever",
"UTI.ever", "Trich.ever", "Chlamydia.ever", "Condyloma.ever",
"GenHerpes.ever", "Gonorrhea.ever", "Syphillis.ever",
"Presence.Symptoms.2wks", "Presence.Symptoms.48hrs", "Symptom.pain",
"oralsxfrequency.cat", "analsxfrequency.cat", "sextoyfrequency.cat",
"sexpartner1yr.cat", "sexpartner2mo.cat", "Contraception.H",
"Contraception.B.M", "Contraception.IUD", "Contraception.none",
"condoms.48h", "Pregnancy.cat", "Feminine.products",
"Feminine.products.48hrs", "Tampon.Use.cat",
"Tampon.use.1mth", "smoking.current", "druguse", "substanceuse",
"nugent_score_result", "sexpartner", "contramethnotactive___1",
"abnormaldischarge2wk", "abnormaldischarge48","abnormalodor2wk",
"abnormalodor48", "irritationdiscomfort2wk", "irritationdiscomfort48",
"vaginalsymptomother2wk", "rxdrug",
"antimicrodrug", "vaginalintercourse48hr", "t16", "t18",
"t26", "t33", "t35", "t42", "t45",
"t51", "t52", "t53", "t54", "t56", "t61", "t62", "t66",
"t67", "t68", "t70", "t71", "t72", "t73", "t81", "t83",
"t84", "t89", "Is.the.patient.antiretroviral.naive.",
"HIV.Clade...Result", "Likely.mode.of.HIV.acquisition",
"HCV.Antibody...Result", "HCV.PCR...Result", "HBV.sAb...Result",
"HBV.sAg...Result", "HBV.cAb...Result", "study_arm")
contins <- c("age", "bmi", "bv_life", "bv_infecttotal_1yr", "bv_infecttotal_2mo",
"days.since.LMP", "Number.of.Different.HPV.Types", "Med.Duration",
"Duration.of.HIV.Infection.", "CD4.Nadir.", "Highest.VL.Ever..", "CD4.",
"VL..copies.mL..", "ShannonsDiversity")
#Fisher's loop
df <- data.frame(var = c(), pval = c(), phi = c(), cramer = c())
df_list_Group_minus_pap_factors <- lapply(factors, function(factor) {
cat("variable: ", factor, "\n")
formula <- paste0("~", factor, " + Group_minus_pap")
result <- xtabs(formula, data = total)
fisher <- fisher.test(result)
assoc <- assocstats(result)
pval <- fisher$p.value
phi <- assoc$phi
cramer <- assoc$cramer
row <- data.frame(var = factor, pval = pval, phi = phi, cramer = cramer)
row
})
df_list_Group_minus_pap_factors <- do.call(rbind, df_list_Group_minus_pap_factors)
###################################
#ANOVA loop
df <- data.frame(var = c(), pval = c(), Fvalue = c())
df_list_Group_minus_pap_contin <- lapply(contins, function(contin) {
cat("variable: ", contin, "\n")
result <- summary(aov(total[[contin]] ~total$Group_minus_pap))
pval <- result[[1]]$`Pr(>F)`[1]
Fvalue <- result[[1]]$`F value`[1]
row <- data.frame(var = contin, pval = pval, Fvalue = Fvalue)
row
})
df_list_Group_minus_pap_contin <- do.call(rbind, df_list_Group_minus_pap_contin)
#######################################################
#for those with more than 2 factor levels
#####################################################
#loop for correlation and t-tests for Shannon's Diversity as dependent
#list of variables
factors <- c("Age.cat", "BMI.under.cat",
"BMI.over.cat", "Ethnicity.cat", "Yeast.ever",
"UTI.ever", "Trich.ever", "Chlamydia.ever", "Condyloma.ever",
"GenHerpes.ever", "Gonorrhea.ever",
"Presence.Symptoms.2wks", "Presence.Symptoms.48hrs", "Symptom.pain",
"oralsxfrequency.cat", "analsxfrequency.cat", "sextoyfrequency.cat",
"sexpartner1yr.cat", "sexpartner2mo.cat", "Contraception.H",
"Contraception.B.M", "Contraception.none",
"condoms.48h", "Pregnancy.cat", "Feminine.products",
"Feminine.products.48hrs", "Tampon.Use.cat",
"Tampon.use.1mth", "smoking.current", "substanceuse",
"contramethnotactive___1",
"abnormaldischarge2wk", "abnormaldischarge48","abnormalodor2wk",
"abnormalodor48", "irritationdiscomfort2wk", "irritationdiscomfort48",
"rxdrug",
"antimicrodrug", "vaginalintercourse48hr", "t16", "t33", "t42",
"t51", "t54", "t56", "t61", "t62", "t66",
"t67", "t70", "t81", "t84", "HBV.sAg...Result")
contins <- c("age", "bmi", "bv_life", "bv_infecttotal_1yr", "bv_infecttotal_2mo",
"days.since.LMP", "Number.of.Different.HPV.Types", "Med.Duration",
"Duration.of.HIV.Infection.", "CD4.Nadir.", "Highest.VL.Ever..", "CD4.",
"VL..copies.mL..", "ShannonsDiversity")
#correlation loop
df <- data.frame(var = c(), pval = c(), pearson = c())
df_list_SD_contin <- lapply(contins, function(contin) {
cat("variable: ", contin, "\n")
result <- cor.test(~total$ShannonsDiversity + total[[contin]], method = c("pearson"))
pval <- result$p.value
pearson <- result$estimate
row <- data.frame(var = contin, pval = pval, pearson = pearson)
row
})
df_list_SD_contin <- do.call(rbind, df_list_SD_contin)
#############
#t.test loop
df <- data.frame(var = c(), pval = c(), cohend = c())
df_list_SD_factor <- lapply(factors, function(factor) {
cat("variable: ", factor, "\n")
ttest <- paste0("t.test(ShannonsDiversity~", factor, ", data = total)")
result_ttest <- eval(parse(text = ttest))
cohend <- paste0("cohen.d(ShannonsDiversity~", factor, ", data = total)")
result_cohend <- eval(parse(text = cohend))
pval <- result_ttest$p.value
cohend <- result_cohend$estimate
row <- data.frame(var = factor, pval = pval, cohend = cohend)
row
})
df_list_SD_factor <- do.call(rbind, df_list_SD_factor)
#####################################################################################
#for those with more than 2 factor levels
#lists of variables
factors <- c("Group", "Group_minus_pap", "CST", "Ethnicity2.cat", "Syphillis.ever",
"Contraception.IUD", "druguse", "nugent_score_result", "sexpartner",
"vaginalsymptomother2wk", "t18", "t26", "t35", "t45", "t52", "t53",
"t68", "t71", "t72", "t73", "t83", "t89",
"Is.the.patient.antiretroviral.naive.", "HIV.Clade...Result",
"Likely.mode.of.HIV.acquisition", "HCV.Antibody...Result",
"HCV.PCR...Result", "HBV.sAb...Result", "HBV.cAb...Result", "study_arm")
###################################
#ANOVA loop
df <- data.frame(var = c(), pval = c(), Fvalue = c())
df_list_SD_factor_more <- lapply(factors, function(factor) {
cat("variable: ", factor, "\n")
result <- summary(aov(total$ShannonsDiversity ~total[[factor]]))
pval <- result[[1]]$`Pr(>F)`[1]
Fvalue <- result[[1]]$`F value`[1]
row <- data.frame(var = factor, pval = pval, Fvalue = Fvalue)
row
})
df_list_SD_factor_more <- do.call(rbind, df_list_SD_factor_more)
####################################################################################
#new BV cats
total <- read.csv(file = "viromeall_metadata_full.csv")
total$BV.2mths.cat[total$bv_infecttotal_2mo > 0] <- "1" #1+
total$BV.2mths.cat[total$bv_infecttotal_2mo <= 0] <- "0" #0
total$BV.2mths.cat <- factor(total$BV.2mths.cat)
total$BV.year.cat[total$bv_infecttotal_1yr > 2] <- "2" #3+
total$BV.year.cat[total$bv_infecttotal_1yr > 0 & total$bv_infecttotal_1yr <= 2] <- "1" #1-2
total$BV.year.cat[total$bv_infecttotal_1yr <= 0] <- "0" #0
total$BV.year.cat <- factor(total$BV.year.cat)
total$BV.life.cat[total$bv_life > 9] <- "3" #10+
total$BV.life.cat[total$bv_life > 2 & total$bv_life <= 9] <- "2" #3-9
total$BV.life.cat[total$bv_life > 0 & total$bv_life <= 2] <- "1" #1-2
total$BV.life.cat[total$bv_life <= 0] <- "0" #0
total$BV.life.cat <- factor(total$BV.life.cat)
#SD for 2+ factors
summary(aov(total$ShannonsDiversity ~total$BV.year.cat))
summary(aov(total$ShannonsDiversity ~total$BV.life.cat))
#sd for 2 levels
t.test(ShannonsDiversity~BV.2mths.cat, data = total)
cohen.d(ShannonsDiversity~BV.2mths.cat, data = total)
#######################
#Virome Groups for new cats
a <- xtabs(~BV.year.cat + Group , data = total)
fisher.test(a)
assocstats(a)
a <- xtabs(~BV.life.cat + Group , data = total)
fisher.test(a)
assocstats(a)
a <- xtabs(~BV.2mths.cat + Group , data = total)
fisher.test(a)
assocstats(a)
a <- xtabs(~CST + Group , data = total)
fisher.test(a)
assocstats(a)
#####################
#Virome Groups minus pap for new cats
a <- xtabs(~BV.year.cat + Group_minus_pap , data = total)
fisher.test(a)
assocstats(a)
a <- xtabs(~BV.life.cat + Group_minus_pap , data = total)
fisher.test(a)
assocstats(a)
a <- xtabs(~BV.2mths.cat + Group_minus_pap , data = total)
fisher.test(a)
assocstats(a)
a <- xtabs(~CST + Group_minus_pap , data = total)
fisher.test(a)
assocstats(a)
| /virome_univariate.R | no_license | KeshiniD/Vogue | R | false | false | 9,999 | r | #clusters info
clust7 <- read.csv("data_clust_7_v2.csv")
clust7np <- read.csv("data_clust_7_minus_pap_v2.csv")
sd <- read.csv("virome_individual_SD_all.csv")
clust7 <- dplyr::rename(clust7, study_id = X)
clust7np <- dplyr::rename(clust7np, study_id = X)
sd <- dplyr::rename(sd, study_id = Participants)
#merge with virome data
meta <- read.csv("viromeall_metadata_full.csv")
total <- join(clust7, clust7np, type="full")
total <- join(total, meta, type="full")
total <- join(total, sd, type="full")
#format and clean
total$X <- NULL
total$X.1 <- NULL
total$Med.Duration[is.na(total$Med.Duration)] <- 0
total$Duration.of.HIV.Infection.[is.na(total$Duration.of.HIV.Infection.)] <- 0
total$Highest.VL.Ever..[is.na(total$Highest.VL.Ever..)] <- 0
total$VL..copies.mL..[is.na(total$VL..copies.mL..)] <- 0
total$Feminine.products.48hrs[is.na(total$Feminine.products.48hrs)] <- 0
total$Group <- factor(total$Group)
total$Group_minus_pap <- factor(total$Group_minus_pap)
#loop for Fisher's and ANOVA tests for CST(viral) as dependent
#lists of variables
factors <- c("Age.cat", "BMI.under.cat",
"BMI.over.cat", "Ethnicity.cat", "Ethnicity2.cat", "Yeast.ever",
"UTI.ever", "Trich.ever", "Chlamydia.ever", "Condyloma.ever",
"GenHerpes.ever", "Gonorrhea.ever", "Syphillis.ever",
"Presence.Symptoms.2wks", "Presence.Symptoms.48hrs", "Symptom.pain",
"oralsxfrequency.cat", "analsxfrequency.cat", "sextoyfrequency.cat",
"sexpartner1yr.cat", "sexpartner2mo.cat", "Contraception.H",
"Contraception.B.M", "Contraception.IUD", "Contraception.none",
"condoms.48h", "Pregnancy.cat", "Feminine.products",
"Feminine.products.48hrs", "Tampon.Use.cat",
"Tampon.use.1mth", "smoking.current", "druguse", "substanceuse",
"nugent_score_result", "sexpartner", "contramethnotactive___1",
"abnormaldischarge2wk", "abnormaldischarge48","abnormalodor2wk",
"abnormalodor48", "irritationdiscomfort2wk", "irritationdiscomfort48",
"vaginalsymptomother2wk", "rxdrug",
"antimicrodrug", "vaginalintercourse48hr", "t16", "t18",
"t26", "t33", "t35", "t42", "t45",
"t51", "t52", "t53", "t54", "t56", "t61", "t62", "t66",
"t67", "t68", "t70", "t71", "t72", "t73", "t81", "t83",
"t84", "t89", "Is.the.patient.antiretroviral.naive.",
"HIV.Clade...Result", "Likely.mode.of.HIV.acquisition",
"HCV.Antibody...Result", "HCV.PCR...Result", "HBV.sAb...Result",
"HBV.sAg...Result", "HBV.cAb...Result", "study_arm")
contins <- c("age", "bmi", "bv_life", "bv_infecttotal_1yr", "bv_infecttotal_2mo",
"days.since.LMP", "Number.of.Different.HPV.Types", "Med.Duration",
"Duration.of.HIV.Infection.", "CD4.Nadir.", "Highest.VL.Ever..", "CD4.",
"VL..copies.mL..", "ShannonsDiversity")
#Fisher's loop
df <- data.frame(var = c(), pval = c(), phi = c(), cramer = c())
df_list_Group_minus_pap_factors <- lapply(factors, function(factor) {
cat("variable: ", factor, "\n")
formula <- paste0("~", factor, " + Group_minus_pap")
result <- xtabs(formula, data = total)
fisher <- fisher.test(result)
assoc <- assocstats(result)
pval <- fisher$p.value
phi <- assoc$phi
cramer <- assoc$cramer
row <- data.frame(var = factor, pval = pval, phi = phi, cramer = cramer)
row
})
df_list_Group_minus_pap_factors <- do.call(rbind, df_list_Group_minus_pap_factors)
###################################
#ANOVA loop
df <- data.frame(var = c(), pval = c(), Fvalue = c())
df_list_Group_minus_pap_contin <- lapply(contins, function(contin) {
cat("variable: ", contin, "\n")
result <- summary(aov(total[[contin]] ~total$Group_minus_pap))
pval <- result[[1]]$`Pr(>F)`[1]
Fvalue <- result[[1]]$`F value`[1]
row <- data.frame(var = contin, pval = pval, Fvalue = Fvalue)
row
})
df_list_Group_minus_pap_contin <- do.call(rbind, df_list_Group_minus_pap_contin)
#######################################################
#for those with more than 2 factor levels
#####################################################
#loop for correlation and t-tests for Shannon's Diversity as dependent
#list of variables
factors <- c("Age.cat", "BMI.under.cat",
"BMI.over.cat", "Ethnicity.cat", "Yeast.ever",
"UTI.ever", "Trich.ever", "Chlamydia.ever", "Condyloma.ever",
"GenHerpes.ever", "Gonorrhea.ever",
"Presence.Symptoms.2wks", "Presence.Symptoms.48hrs", "Symptom.pain",
"oralsxfrequency.cat", "analsxfrequency.cat", "sextoyfrequency.cat",
"sexpartner1yr.cat", "sexpartner2mo.cat", "Contraception.H",
"Contraception.B.M", "Contraception.none",
"condoms.48h", "Pregnancy.cat", "Feminine.products",
"Feminine.products.48hrs", "Tampon.Use.cat",
"Tampon.use.1mth", "smoking.current", "substanceuse",
"contramethnotactive___1",
"abnormaldischarge2wk", "abnormaldischarge48","abnormalodor2wk",
"abnormalodor48", "irritationdiscomfort2wk", "irritationdiscomfort48",
"rxdrug",
"antimicrodrug", "vaginalintercourse48hr", "t16", "t33", "t42",
"t51", "t54", "t56", "t61", "t62", "t66",
"t67", "t70", "t81", "t84", "HBV.sAg...Result")
contins <- c("age", "bmi", "bv_life", "bv_infecttotal_1yr", "bv_infecttotal_2mo",
"days.since.LMP", "Number.of.Different.HPV.Types", "Med.Duration",
"Duration.of.HIV.Infection.", "CD4.Nadir.", "Highest.VL.Ever..", "CD4.",
"VL..copies.mL..", "ShannonsDiversity")
#correlation loop
df <- data.frame(var = c(), pval = c(), pearson = c())
df_list_SD_contin <- lapply(contins, function(contin) {
cat("variable: ", contin, "\n")
result <- cor.test(~total$ShannonsDiversity + total[[contin]], method = c("pearson"))
pval <- result$p.value
pearson <- result$estimate
row <- data.frame(var = contin, pval = pval, pearson = pearson)
row
})
df_list_SD_contin <- do.call(rbind, df_list_SD_contin)
#############
#t.test loop
df <- data.frame(var = c(), pval = c(), cohend = c())
df_list_SD_factor <- lapply(factors, function(factor) {
cat("variable: ", factor, "\n")
ttest <- paste0("t.test(ShannonsDiversity~", factor, ", data = total)")
result_ttest <- eval(parse(text = ttest))
cohend <- paste0("cohen.d(ShannonsDiversity~", factor, ", data = total)")
result_cohend <- eval(parse(text = cohend))
pval <- result_ttest$p.value
cohend <- result_cohend$estimate
row <- data.frame(var = factor, pval = pval, cohend = cohend)
row
})
df_list_SD_factor <- do.call(rbind, df_list_SD_factor)
#####################################################################################
#for those with more than 2 factor levels
#lists of variables
factors <- c("Group", "Group_minus_pap", "CST", "Ethnicity2.cat", "Syphillis.ever",
"Contraception.IUD", "druguse", "nugent_score_result", "sexpartner",
"vaginalsymptomother2wk", "t18", "t26", "t35", "t45", "t52", "t53",
"t68", "t71", "t72", "t73", "t83", "t89",
"Is.the.patient.antiretroviral.naive.", "HIV.Clade...Result",
"Likely.mode.of.HIV.acquisition", "HCV.Antibody...Result",
"HCV.PCR...Result", "HBV.sAb...Result", "HBV.cAb...Result", "study_arm")
###################################
#ANOVA loop
df <- data.frame(var = c(), pval = c(), Fvalue = c())
df_list_SD_factor_more <- lapply(factors, function(factor) {
cat("variable: ", factor, "\n")
result <- summary(aov(total$ShannonsDiversity ~total[[factor]]))
pval <- result[[1]]$`Pr(>F)`[1]
Fvalue <- result[[1]]$`F value`[1]
row <- data.frame(var = factor, pval = pval, Fvalue = Fvalue)
row
})
df_list_SD_factor_more <- do.call(rbind, df_list_SD_factor_more)
####################################################################################
#new BV cats
total <- read.csv(file = "viromeall_metadata_full.csv")
total$BV.2mths.cat[total$bv_infecttotal_2mo > 0] <- "1" #1+
total$BV.2mths.cat[total$bv_infecttotal_2mo <= 0] <- "0" #0
total$BV.2mths.cat <- factor(total$BV.2mths.cat)
total$BV.year.cat[total$bv_infecttotal_1yr > 2] <- "2" #3+
total$BV.year.cat[total$bv_infecttotal_1yr > 0 & total$bv_infecttotal_1yr <= 2] <- "1" #1-2
total$BV.year.cat[total$bv_infecttotal_1yr <= 0] <- "0" #0
total$BV.year.cat <- factor(total$BV.year.cat)
total$BV.life.cat[total$bv_life > 9] <- "3" #10+
total$BV.life.cat[total$bv_life > 2 & total$bv_life <= 9] <- "2" #3-9
total$BV.life.cat[total$bv_life > 0 & total$bv_life <= 2] <- "1" #1-2
total$BV.life.cat[total$bv_life <= 0] <- "0" #0
total$BV.life.cat <- factor(total$BV.life.cat)
#SD for 2+ factors
summary(aov(total$ShannonsDiversity ~total$BV.year.cat))
summary(aov(total$ShannonsDiversity ~total$BV.life.cat))
#sd for 2 levels
t.test(ShannonsDiversity~BV.2mths.cat, data = total)
cohen.d(ShannonsDiversity~BV.2mths.cat, data = total)
#######################
#Virome Groups for new cats
a <- xtabs(~BV.year.cat + Group , data = total)
fisher.test(a)
assocstats(a)
a <- xtabs(~BV.life.cat + Group , data = total)
fisher.test(a)
assocstats(a)
a <- xtabs(~BV.2mths.cat + Group , data = total)
fisher.test(a)
assocstats(a)
a <- xtabs(~CST + Group , data = total)
fisher.test(a)
assocstats(a)
#####################
#Virome Groups minus pap for new cats
a <- xtabs(~BV.year.cat + Group_minus_pap , data = total)
fisher.test(a)
assocstats(a)
a <- xtabs(~BV.life.cat + Group_minus_pap , data = total)
fisher.test(a)
assocstats(a)
a <- xtabs(~BV.2mths.cat + Group_minus_pap , data = total)
fisher.test(a)
assocstats(a)
a <- xtabs(~CST + Group_minus_pap , data = total)
fisher.test(a)
assocstats(a)
|
library(KarsTS)
### Name: verifyCharEntry
### Title: verifyCharEntry: verify character entries
### Aliases: verifyCharEntry
### Keywords: ~kwd1 ~kwd2
### ** Examples
verifyCharEntry("Strawberry", noValid = "isNoValid")
verifyCharEntry(235, noValid = "isNoValid")
verifyCharEntry(235, noValid = NA)
| /data/genthat_extracted_code/KarsTS/examples/verifyCharEntry.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 307 | r | library(KarsTS)
### Name: verifyCharEntry
### Title: verifyCharEntry: verify character entries
### Aliases: verifyCharEntry
### Keywords: ~kwd1 ~kwd2
### ** Examples
verifyCharEntry("Strawberry", noValid = "isNoValid")
verifyCharEntry(235, noValid = "isNoValid")
verifyCharEntry(235, noValid = NA)
|
# The functions below here were adapted from the functions in the fields package! (image.plot and subroutines)
# fields, Tools for spatial data
# Copyright 2004-2007, Institute for Mathematics Applied Geosciences
# University Corporation for Atmospheric Research
# Licensed under the GPL -- www.gpl.org/licenses/gpl.html
# Adaptations for the raster package:
# Author: Robert J. Hijmans
# Date : May 2010
# Version 1.0
# Licence GPL v3
.plotSpace <- function(asp=1, legend.mar = 3.1, legend.width = 0.5, legend.shrink = 0.5) {
pars <- graphics::par()
char.size <- pars$cin[1] / pars$din[1]
offset <- char.size * pars$mar[4]
legend.width <- char.size * legend.width
legend.mar <- legend.mar * char.size
legendPlot <- pars$plt
legendPlot[2] <- 1 - legend.mar
legendPlot[1] <- legendPlot[2] - legend.width
pr <- (legendPlot[4] - legendPlot[3]) * ((1 - legend.shrink)/2)
legendPlot[4] <- legendPlot[4] - pr
legendPlot[3] <- legendPlot[3] + pr
bp <- pars$plt
bp[2] <- min(bp[2], legendPlot[1] - offset)
aspbp = (bp[4]-bp[3]) / (bp[2]-bp[1])
adj = aspbp / asp
if (adj < 1) {
adjust = (bp[4]-bp[3]) - ((bp[4]-bp[3]) * adj)
} else {
adjust = (bp[4]-bp[3]) / adj - ((bp[4]-bp[3]))
}
adjust <- adjust / 2
bp[3] <- bp[3] + adjust
bp[4] <- bp[4] - adjust
dp <- legendPlot[2] - legendPlot[1]
legendPlot[1] <- min(bp[2] + 0.5 * offset, legendPlot[1])
legendPlot[2] <- legendPlot[1] + dp
return(list(legendPlot = legendPlot, mainPlot = bp))
}
.plotLegend <- function(z, col, legend.at='classic', lab.breaks = NULL, axis.args = NULL, legend.lab = NULL, legend.args = NULL, ...) {
horizontal=FALSE
ix <- 1
zlim <- range(z, na.rm = TRUE, finite=TRUE)
zrange <- zlim[2]-zlim[1]
if (zrange > 10) { decs <- 0
} else if (zrange > 1) { decs <- 1
} else { decs <- ceiling(abs(log10(zrange)) + 1) }
pow <- 10^decs
minz <- floor(zlim[1] * pow) / pow
maxz <- ceiling(zlim[2] * pow) / pow
zrange <- maxz - minz
nlevel = length(col)
binwidth <- c(0, 1:nlevel * (1/nlevel))
iy <- minz + zrange * binwidth
# binwidth <- 1 + (maxz - minz)/nlevel
# iy <- seq(minz, maxz, by = binwidth)
iz <- matrix(iy, nrow = 1, ncol = length(iy))
breaks <- list(...)$breaks
if (!is.null(breaks) & !is.null(lab.breaks)) {
axis.args <- c(list(side = ifelse(horizontal, 1, 4), mgp = c(3, 1, 0), las = ifelse(horizontal, 0, 2), at = breaks, labels = lab.breaks), axis.args)
} else {
if (legend.at == 'quantile') {
z <- z[is.finite(z)]
at = stats::quantile(z, names=F, na.rm=TRUE)
axis.args <- c(list(side = ifelse(horizontal, 1, 4), mgp = c(3, 1, 0), las = ifelse(horizontal, 0, 2), at=at), axis.args)
# at <- c(0, 1:5 * (1/5))
# at <- minz + zrange * at
} else {
at <- graphics::axTicks(2, c(minz, maxz, 4))
}
at <- round(at, decs)
axis.args <- c(list(side = ifelse(horizontal, 1, 4), mgp = c(3, 1, 0), las = ifelse(horizontal, 0, 2), at=at), axis.args)
}
if (!horizontal) {
if (is.null(breaks)) {
image(ix, iy, iz, xaxt="n", yaxt="n", xlab = "", ylab = "", col = col)
} else {
image(ix, iy, iz, xaxt="n", yaxt="n", xlab = "", ylab = "", col = col, breaks = breaks)
}
} else {
if (is.null(breaks)) {
image(iy, ix, t(iz), xaxt = "n", yaxt = "n", xlab = "", ylab = "", col = col)
} else {
image(iy, ix, t(iz), xaxt = "n", yaxt = "n", xlab = "", ylab = "", col = col, breaks = breaks)
}
}
axis.args = c(axis.args, cex.axis=0.75, tcl=-0.15, list(mgp=c(3, 0.4, 0)) )
do.call("axis", axis.args)
#graphics::axis(axis.args$side, at=min(iz), las=ifelse(horizontal, 0, 2))
graphics::box()
# title(main = list(legend.lab, cex=1, font=1))
if (!is.null(legend.lab)) {
# graphics::mtext(legend.lab, side=3, line=0.75)
#legend.args <- list(text = legend.lab, side = ifelse(horizontal, 1, 4), line = legend.mar - 2)
legend.args <- list(text = legend.lab, side=3, line=0.75)
}
if (!is.null(legend.args)) {
#do.call(graphics::mtext, legend.args)
}
}
.plot2 <- function(x, maxpixels=100000, col=rev(terrain.colors(25)), xlab='', ylab='', asp, box=TRUE, add=FALSE, legend=TRUE, legend.at='', ...) {
if (!add & missing(asp)) {
if (couldBeLonLat(x)) {
ym <- mean(x@extent@ymax + x@extent@ymin)
asp <- min(5, 1/cos((ym * pi)/180))
} else {
asp = 1
}
}
plotArea <- .plotSpace(asp)
x <- sampleRegular(x, maxpixels, asRaster=TRUE, useGDAL=TRUE)
xticks <- graphics::axTicks(1, c(xmin(x), xmax(x), 4))
yticks <- graphics::axTicks(2, c(ymin(x), ymax(x), 4))
if (xres(x) %% 1 == 0) xticks = round(xticks)
if (yres(x) %% 1 == 0) yticks = round(yticks)
y <- yFromRow(x, nrow(x):1)
z <- t((getValues(x, format='matrix'))[nrow(x):1,])
x <- xFromCol(x,1:ncol(x))
if (add) {
image(x=x, y=y, z=z, col=col, axes=FALSE, xlab=xlab, ylab=ylab, add=TRUE, ...)
} else {
if (legend) {
graphics::par(pty = "m", plt=plotArea$legendPlot, err = -1)
.plotLegend(z, col, legend.at=legend.at, ...)
graphics::par(new=TRUE, plt=plotArea$mainPlot)
}
image(x=x, y=y, z=z, col=col, axes=FALSE, xlab=xlab, ylab=ylab, asp=asp, ...)
graphics::axis(1, at=xticks, cex.axis=0.67, tcl=-0.3, mgp=c(3, 0.25, 0))
las = ifelse(max(.nchar(as.character(yticks)))> 5, 0, 1)
graphics::axis(2, at=yticks, las = las, cex.axis=0.67, tcl=-0.3, mgp=c(3, 0.75, 0) )
#graphics::axis(3, at=xticks, labels=FALSE, lwd.ticks=0)
#graphics::axis(4, at=yticks, labels=FALSE, lwd.ticks=0)
if (box) graphics::box()
}
}
#.plot2(r, legend=T)
# .plot2(r, legend.at='quantile')
# plot(wrld_simpl, add=T)
| /for875-18/packages/raster/R/newPLot.R | no_license | anhnguyendepocen/for875 | R | false | false | 5,780 | r | # The functions below here were adapted from the functions in the fields package! (image.plot and subroutines)
# fields, Tools for spatial data
# Copyright 2004-2007, Institute for Mathematics Applied Geosciences
# University Corporation for Atmospheric Research
# Licensed under the GPL -- www.gpl.org/licenses/gpl.html
# Adaptations for the raster package:
# Author: Robert J. Hijmans
# Date : May 2010
# Version 1.0
# Licence GPL v3
.plotSpace <- function(asp=1, legend.mar = 3.1, legend.width = 0.5, legend.shrink = 0.5) {
pars <- graphics::par()
char.size <- pars$cin[1] / pars$din[1]
offset <- char.size * pars$mar[4]
legend.width <- char.size * legend.width
legend.mar <- legend.mar * char.size
legendPlot <- pars$plt
legendPlot[2] <- 1 - legend.mar
legendPlot[1] <- legendPlot[2] - legend.width
pr <- (legendPlot[4] - legendPlot[3]) * ((1 - legend.shrink)/2)
legendPlot[4] <- legendPlot[4] - pr
legendPlot[3] <- legendPlot[3] + pr
bp <- pars$plt
bp[2] <- min(bp[2], legendPlot[1] - offset)
aspbp = (bp[4]-bp[3]) / (bp[2]-bp[1])
adj = aspbp / asp
if (adj < 1) {
adjust = (bp[4]-bp[3]) - ((bp[4]-bp[3]) * adj)
} else {
adjust = (bp[4]-bp[3]) / adj - ((bp[4]-bp[3]))
}
adjust <- adjust / 2
bp[3] <- bp[3] + adjust
bp[4] <- bp[4] - adjust
dp <- legendPlot[2] - legendPlot[1]
legendPlot[1] <- min(bp[2] + 0.5 * offset, legendPlot[1])
legendPlot[2] <- legendPlot[1] + dp
return(list(legendPlot = legendPlot, mainPlot = bp))
}
.plotLegend <- function(z, col, legend.at='classic', lab.breaks = NULL, axis.args = NULL, legend.lab = NULL, legend.args = NULL, ...) {
horizontal=FALSE
ix <- 1
zlim <- range(z, na.rm = TRUE, finite=TRUE)
zrange <- zlim[2]-zlim[1]
if (zrange > 10) { decs <- 0
} else if (zrange > 1) { decs <- 1
} else { decs <- ceiling(abs(log10(zrange)) + 1) }
pow <- 10^decs
minz <- floor(zlim[1] * pow) / pow
maxz <- ceiling(zlim[2] * pow) / pow
zrange <- maxz - minz
nlevel = length(col)
binwidth <- c(0, 1:nlevel * (1/nlevel))
iy <- minz + zrange * binwidth
# binwidth <- 1 + (maxz - minz)/nlevel
# iy <- seq(minz, maxz, by = binwidth)
iz <- matrix(iy, nrow = 1, ncol = length(iy))
breaks <- list(...)$breaks
if (!is.null(breaks) & !is.null(lab.breaks)) {
axis.args <- c(list(side = ifelse(horizontal, 1, 4), mgp = c(3, 1, 0), las = ifelse(horizontal, 0, 2), at = breaks, labels = lab.breaks), axis.args)
} else {
if (legend.at == 'quantile') {
z <- z[is.finite(z)]
at = stats::quantile(z, names=F, na.rm=TRUE)
axis.args <- c(list(side = ifelse(horizontal, 1, 4), mgp = c(3, 1, 0), las = ifelse(horizontal, 0, 2), at=at), axis.args)
# at <- c(0, 1:5 * (1/5))
# at <- minz + zrange * at
} else {
at <- graphics::axTicks(2, c(minz, maxz, 4))
}
at <- round(at, decs)
axis.args <- c(list(side = ifelse(horizontal, 1, 4), mgp = c(3, 1, 0), las = ifelse(horizontal, 0, 2), at=at), axis.args)
}
if (!horizontal) {
if (is.null(breaks)) {
image(ix, iy, iz, xaxt="n", yaxt="n", xlab = "", ylab = "", col = col)
} else {
image(ix, iy, iz, xaxt="n", yaxt="n", xlab = "", ylab = "", col = col, breaks = breaks)
}
} else {
if (is.null(breaks)) {
image(iy, ix, t(iz), xaxt = "n", yaxt = "n", xlab = "", ylab = "", col = col)
} else {
image(iy, ix, t(iz), xaxt = "n", yaxt = "n", xlab = "", ylab = "", col = col, breaks = breaks)
}
}
axis.args = c(axis.args, cex.axis=0.75, tcl=-0.15, list(mgp=c(3, 0.4, 0)) )
do.call("axis", axis.args)
#graphics::axis(axis.args$side, at=min(iz), las=ifelse(horizontal, 0, 2))
graphics::box()
# title(main = list(legend.lab, cex=1, font=1))
if (!is.null(legend.lab)) {
# graphics::mtext(legend.lab, side=3, line=0.75)
#legend.args <- list(text = legend.lab, side = ifelse(horizontal, 1, 4), line = legend.mar - 2)
legend.args <- list(text = legend.lab, side=3, line=0.75)
}
if (!is.null(legend.args)) {
#do.call(graphics::mtext, legend.args)
}
}
.plot2 <- function(x, maxpixels=100000, col=rev(terrain.colors(25)), xlab='', ylab='', asp, box=TRUE, add=FALSE, legend=TRUE, legend.at='', ...) {
if (!add & missing(asp)) {
if (couldBeLonLat(x)) {
ym <- mean(x@extent@ymax + x@extent@ymin)
asp <- min(5, 1/cos((ym * pi)/180))
} else {
asp = 1
}
}
plotArea <- .plotSpace(asp)
x <- sampleRegular(x, maxpixels, asRaster=TRUE, useGDAL=TRUE)
xticks <- graphics::axTicks(1, c(xmin(x), xmax(x), 4))
yticks <- graphics::axTicks(2, c(ymin(x), ymax(x), 4))
if (xres(x) %% 1 == 0) xticks = round(xticks)
if (yres(x) %% 1 == 0) yticks = round(yticks)
y <- yFromRow(x, nrow(x):1)
z <- t((getValues(x, format='matrix'))[nrow(x):1,])
x <- xFromCol(x,1:ncol(x))
if (add) {
image(x=x, y=y, z=z, col=col, axes=FALSE, xlab=xlab, ylab=ylab, add=TRUE, ...)
} else {
if (legend) {
graphics::par(pty = "m", plt=plotArea$legendPlot, err = -1)
.plotLegend(z, col, legend.at=legend.at, ...)
graphics::par(new=TRUE, plt=plotArea$mainPlot)
}
image(x=x, y=y, z=z, col=col, axes=FALSE, xlab=xlab, ylab=ylab, asp=asp, ...)
graphics::axis(1, at=xticks, cex.axis=0.67, tcl=-0.3, mgp=c(3, 0.25, 0))
las = ifelse(max(.nchar(as.character(yticks)))> 5, 0, 1)
graphics::axis(2, at=yticks, las = las, cex.axis=0.67, tcl=-0.3, mgp=c(3, 0.75, 0) )
#graphics::axis(3, at=xticks, labels=FALSE, lwd.ticks=0)
#graphics::axis(4, at=yticks, labels=FALSE, lwd.ticks=0)
if (box) graphics::box()
}
}
#.plot2(r, legend=T)
# .plot2(r, legend.at='quantile')
# plot(wrld_simpl, add=T)
|
# plot_scen_types.r
####
# California Natural and Working Lands Carbon and Greenhouse Gas
# Model (CALAND) Copyright (c) 2020, The Regents of the University of
# California, through Lawrence Berkeley National Laboratory (subject to
# receipt of any required approvals from the U.S. Dept. of Energy). All
# rights reserved.
# If you have questions about your rights to use or distribute this software,
# please contact Berkeley Lab's Intellectual Property Office at
# IPO@lbl.gov.
#
# NOTICE. This Software was developed under funding from the U.S. Department
# of Energy and the U.S. Government consequently retains certain rights. As
# such, the U.S. Government has been granted for itself and others acting on
# its behalf a paid-up, nonexclusive, irrevocable, worldwide license in the
# Software to reproduce, distribute copies to the public, prepare derivative
# works, and perform publicly and display publicly, and to permit others to do so.
####
# This software and its associated input data are licensed under a modified BSD open source license
# Please see license.txt for details
# put a variable for specified land types within a region and ownership on the same plot, for each scenario in
# the diagnostic output file
# this script reads the csv files produced by plot_caland()
############################################ Overview of `plot_scen_types()` ###########################################
# The `plot_scen_types()` function is designed to use the .csv outputs from `plot_caland()` to plot individual land types
# for a designated variable and all available scenarios in the .csv file. You also specify which land types, region
# (or all regions aggregated), and ownership (or all ownerships aggregated) to plot for each scenario.
############################################## Inputs to `plot_scen_types()` ############################################
# The .csv input files, created by `plot_caland()`, are assumed to be in caland/`data_dir`/`figdir`, in the appropriate
# land type and ownership directories. The `plot_scen_types()` output files will go directly into
# caland/`data_dir`/`figdir`/`reg`/`own`, which should be the same as used in `plot_caland()`.
########################################### Arguments in `plot_scen_types()`#############################################
# 1. `varname`: name of variable to plot. See the outputs from `plot_caland()`; the name is between the land type and
# "_output" in these file names; do not include the surrounding "_" characters.
# 2. `ylabel`: Label for the y-axis; it should indicate the units and whether it is a difference from baseline.
# 3. `data_dir`: The path to the directory containing the `CALAND()` output files, which also contains `figdir`; do not
# include the "/" character at the end; default is `data_dir = "./outputs"`.
# 4. `file_tag`: Additional tag to file name to note what regions, landtypes, and/or ownerships are included; default
# is "" (nothing added).
# 5. `reg`: Vector of region names to plot; default is:
# `reg = c("All_region", "Central_Coast", "Central_Valley", "Delta",
# "Deserts", "Eastside", "Klamath", "North_Coast", "Sierra_Cascades", "South_Coast")`.
# 6. `lt`: Vector of land types to plot; can be any number of available land types; default is:
# `lt = c("Water", "Ice", "Barren", "Sparse", "Desert", "Shrubland", "Grassland", "Savanna", "Woodland", "Forest",
# "Meadow", "Coastal_marsh", "Fresh_marsh", "Cultivated", "Developed_all")`.
# 7. `own`: Vector of ownerships to plot; can be any number of available ownerships; default is: `own = c("All_own")`
# 8. `figdir`: The directory within `data_dir` containing the .csv data to plot, and where to save the figures that
# `plot_scen_types()` creates; do not include the "/" character at the end.
# Notes on `plot_scen_types()`:
# Plotting the Ocean region does not provide any comparison with land types because only Seagrass exists in the Ocean
# Seagrass has only a subset of the `plot_caland()` output files, so if including All_region make sure that the desired
# `varname` is available for Seagrass.
# Seagrass is not in the default land type list.
########################################### Outputs from `plot_scen_types()`#############################################
# Output files consist of a suite of graphs (.pdf) and corresponding data tables (.csv), which are written to
# caland/data_dir/figdir/ within each region directory, where data_dir and figdir are arguments to plot_scen_types().
# Naming of the .pdf and .csv filenames is automatic and determined from the varname argument, the scenarios present in
# the source data files, and an optional file_tag argument.
####################################################### start script ####################################################
# setwd("<your_path>/caland/")
setwd("./")
# this enables java to use up to 4GB of memory for reading and writing excel files
options(java.parameters = "-Xmx4g" )
# Load all the required packages
libs <- c( "ggplot2", "grid", "RColorBrewer" )
for( i in libs ) {
if( !require( i, character.only=T ) ) {
cat( "Couldn't load", i, "\n" )
stop( "Use install.packages() to download this library\nOr use the GUI Package Installer\nInclude dependencies, and install it for local user if you do not have root access\n" )
}
library( i, character.only=T )
}
plot_scen_types <- function(varname, ylabel, data_dir = "./outputs", file_tag="", reg = c("All_region", "Central_Coast", "Central_Valley", "Delta", "Deserts", "Eastside", "Klamath", "North_Coast", "Sierra_Cascades", "South_Coast"), lt = c("Water", "Ice", "Barren", "Sparse", "Desert", "Shrubland", "Grassland", "Savanna", "Woodland", "Forest", "Meadow", "Coastal_marsh", "Fresh_marsh", "Cultivated", "Developed_all"), own = c("All_own"), figdir = "figures") {
outputdir = paste0(data_dir, "/")
num_reg = length(reg)
num_lt = length(lt)
num_own = length(own)
# need to add an underscore if an optional file tag is input
if (nchar(file_tag) > 0) { added_file_tag = paste0("_", file_tag)
} else {added_file_tag = file_tag}
for (r in 1:num_reg) {
reg_lab = reg[r]
# loop over the ownerships
for (o in 1:num_own) {
own_lab = own[o]
all_df = NULL
# loop over land types to get data
for (i in 1:num_lt) {
lt_lab = lt[i]
# Seagrass exists only in Ocean and All_region and (All_own or Other_fed)
# Seagrass is the only land type in Ocean, and the only ownerships are All_own or Other_fed
if ((reg_lab != "All_region" & reg_lab != "Ocean" & lt_lab != "Seagrass") | reg_lab == "All_region" | (reg_lab == "Ocean" & lt_lab == "Seagrass" & (own_lab == "All_own" | own_lab == "Other_fed"))) {
fname = paste0(outputdir, figdir, "/", reg_lab, "/", lt_lab, "/", own_lab, "/", reg_lab, "_", lt_lab, "_", own_lab, "_", varname, "_output.csv")
in_df = read.csv(fname)
in_df = in_df[in_df$Region == reg_lab & in_df$Ownership == own_lab,]
all_df = rbind(all_df, in_df)
}
} # end for i loop over reading land types
scen_names = unique(all_df$Scenario)
num_scen = length(scen_names)
# make plot for each scenario within the file
for ( i in 1:num_scen) {
plot_df = all_df[all_df$Scenario == scen_names[i],]
# drop output forward for label - watch out for scenario distinctions beyond the "output" tag
scen_lab = substr(scen_names[i], 1, regexpr("_output", scen_names[i])-1)
title = paste(reg_lab, own_lab, scen_lab, varname)
# plot the data on a single plot
FIGURE_DIMS <- list(dpi=300, width=2560/300, height=1440/300)
theme_set(theme_bw())
out_file = paste0(outputdir, figdir, "/", reg_lab, "/", reg_lab, "_", own_lab, "_", scen_names[i], "_", varname, "_comp", added_file_tag, ".pdf")
p <- ( ggplot(plot_df, aes(Year, Value, color=Land_Type))
+ scale_shape_manual(values=1:nlevels(plot_df$Land_Type))
+ geom_line(size = 0.3)
+ geom_point(aes(shape= Land_Type), size = 1.5)
+ ylab( ylabel )
+ theme(legend.key.size = unit(0.4,"cm"))
+ ggtitle(title)
)
p$save_args <- FIGURE_DIMS
#print(p)
do.call( ggsave, c(list(filename=out_file, plot=p), p$save_args ) )
out_file = paste0(outputdir, figdir, "/", reg_lab, "/", reg_lab, "_", own_lab, "_", scen_names[i], "_", varname, "_comp", added_file_tag, ".csv")
write.csv(plot_df, out_file, quote=FALSE, row.names=FALSE)
} # end i loop over scenarios
} # end for o loop over ownerships
} # end for r loop over regions
} # end plot_scen_types() | /plot_scen_types.r | permissive | sbassett/caland | R | false | false | 8,609 | r | # plot_scen_types.r
####
# California Natural and Working Lands Carbon and Greenhouse Gas
# Model (CALAND) Copyright (c) 2020, The Regents of the University of
# California, through Lawrence Berkeley National Laboratory (subject to
# receipt of any required approvals from the U.S. Dept. of Energy). All
# rights reserved.
# If you have questions about your rights to use or distribute this software,
# please contact Berkeley Lab's Intellectual Property Office at
# IPO@lbl.gov.
#
# NOTICE. This Software was developed under funding from the U.S. Department
# of Energy and the U.S. Government consequently retains certain rights. As
# such, the U.S. Government has been granted for itself and others acting on
# its behalf a paid-up, nonexclusive, irrevocable, worldwide license in the
# Software to reproduce, distribute copies to the public, prepare derivative
# works, and perform publicly and display publicly, and to permit others to do so.
####
# This software and its associated input data are licensed under a modified BSD open source license
# Please see license.txt for details
# put a variable for specified land types within a region and ownership on the same plot, for each scenario in
# the diagnostic output file
# this script reads the csv files produced by plot_caland()
############################################ Overview of `plot_scen_types()` ###########################################
# The `plot_scen_types()` function is designed to use the .csv outputs from `plot_caland()` to plot individual land types
# for a designated variable and all available scenarios in the .csv file. You also specify which land types, region
# (or all regions aggregated), and ownership (or all ownerships aggregated) to plot for each scenario.
############################################## Inputs to `plot_scen_types()` ############################################
# The .csv input files, created by `plot_caland()`, are assumed to be in caland/`data_dir`/`figdir`, in the appropriate
# land type and ownership directories. The `plot_scen_types()` output files will go directly into
# caland/`data_dir`/`figdir`/`reg`/`own`, which should be the same as used in `plot_caland()`.
########################################### Arguments in `plot_scen_types()`#############################################
# 1. `varname`: name of variable to plot. See the outputs from `plot_caland()`; the name is between the land type and
# "_output" in these file names; do not include the surrounding "_" characters.
# 2. `ylabel`: Label for the y-axis; it should indicate the units and whether it is a difference from baseline.
# 3. `data_dir`: The path to the directory containing the `CALAND()` output files, which also contains `figdir`; do not
# include the "/" character at the end; default is `data_dir = "./outputs"`.
# 4. `file_tag`: Additional tag to file name to note what regions, landtypes, and/or ownerships are included; default
# is "" (nothing added).
# 5. `reg`: Vector of region names to plot; default is:
# `reg = c("All_region", "Central_Coast", "Central_Valley", "Delta",
# "Deserts", "Eastside", "Klamath", "North_Coast", "Sierra_Cascades", "South_Coast")`.
# 6. `lt`: Vector of land types to plot; can be any number of available land types; default is:
# `lt = c("Water", "Ice", "Barren", "Sparse", "Desert", "Shrubland", "Grassland", "Savanna", "Woodland", "Forest",
# "Meadow", "Coastal_marsh", "Fresh_marsh", "Cultivated", "Developed_all")`.
# 7. `own`: Vector of ownerships to plot; can be any number of available ownerships; default is: `own = c("All_own")`
# 8. `figdir`: The directory within `data_dir` containing the .csv data to plot, and where to save the figures that
# `plot_scen_types()` creates; do not include the "/" character at the end.
# Notes on `plot_scen_types()`:
# Plotting the Ocean region does not provide any comparison with land types because only Seagrass exists in the Ocean
# Seagrass has only a subset of the `plot_caland()` output files, so if including All_region make sure that the desired
# `varname` is available for Seagrass.
# Seagrass is not in the default land type list.
########################################### Outputs from `plot_scen_types()`#############################################
# Output files consist of a suite of graphs (.pdf) and corresponding data tables (.csv), which are written to
# caland/data_dir/figdir/ within each region directory, where data_dir and figdir are arguments to plot_scen_types().
# Naming of the .pdf and .csv filenames is automatic and determined from the varname argument, the scenarios present in
# the source data files, and an optional file_tag argument.
####################################################### start script ####################################################
# setwd("<your_path>/caland/")
setwd("./")
# this enables java to use up to 4GB of memory for reading and writing excel files
options(java.parameters = "-Xmx4g" )
# Load all the required packages
libs <- c( "ggplot2", "grid", "RColorBrewer" )
for( i in libs ) {
if( !require( i, character.only=T ) ) {
cat( "Couldn't load", i, "\n" )
stop( "Use install.packages() to download this library\nOr use the GUI Package Installer\nInclude dependencies, and install it for local user if you do not have root access\n" )
}
library( i, character.only=T )
}
plot_scen_types <- function(varname, ylabel, data_dir = "./outputs", file_tag="", reg = c("All_region", "Central_Coast", "Central_Valley", "Delta", "Deserts", "Eastside", "Klamath", "North_Coast", "Sierra_Cascades", "South_Coast"), lt = c("Water", "Ice", "Barren", "Sparse", "Desert", "Shrubland", "Grassland", "Savanna", "Woodland", "Forest", "Meadow", "Coastal_marsh", "Fresh_marsh", "Cultivated", "Developed_all"), own = c("All_own"), figdir = "figures") {
outputdir = paste0(data_dir, "/")
num_reg = length(reg)
num_lt = length(lt)
num_own = length(own)
# need to add an underscore if an optional file tag is input
if (nchar(file_tag) > 0) { added_file_tag = paste0("_", file_tag)
} else {added_file_tag = file_tag}
for (r in 1:num_reg) {
reg_lab = reg[r]
# loop over the ownerships
for (o in 1:num_own) {
own_lab = own[o]
all_df = NULL
# loop over land types to get data
for (i in 1:num_lt) {
lt_lab = lt[i]
# Seagrass exists only in Ocean and All_region and (All_own or Other_fed)
# Seagrass is the only land type in Ocean, and the only ownerships are All_own or Other_fed
if ((reg_lab != "All_region" & reg_lab != "Ocean" & lt_lab != "Seagrass") | reg_lab == "All_region" | (reg_lab == "Ocean" & lt_lab == "Seagrass" & (own_lab == "All_own" | own_lab == "Other_fed"))) {
fname = paste0(outputdir, figdir, "/", reg_lab, "/", lt_lab, "/", own_lab, "/", reg_lab, "_", lt_lab, "_", own_lab, "_", varname, "_output.csv")
in_df = read.csv(fname)
in_df = in_df[in_df$Region == reg_lab & in_df$Ownership == own_lab,]
all_df = rbind(all_df, in_df)
}
} # end for i loop over reading land types
scen_names = unique(all_df$Scenario)
num_scen = length(scen_names)
# make plot for each scenario within the file
for ( i in 1:num_scen) {
plot_df = all_df[all_df$Scenario == scen_names[i],]
# drop output forward for label - watch out for scenario distinctions beyond the "output" tag
scen_lab = substr(scen_names[i], 1, regexpr("_output", scen_names[i])-1)
title = paste(reg_lab, own_lab, scen_lab, varname)
# plot the data on a single plot
FIGURE_DIMS <- list(dpi=300, width=2560/300, height=1440/300)
theme_set(theme_bw())
out_file = paste0(outputdir, figdir, "/", reg_lab, "/", reg_lab, "_", own_lab, "_", scen_names[i], "_", varname, "_comp", added_file_tag, ".pdf")
p <- ( ggplot(plot_df, aes(Year, Value, color=Land_Type))
+ scale_shape_manual(values=1:nlevels(plot_df$Land_Type))
+ geom_line(size = 0.3)
+ geom_point(aes(shape= Land_Type), size = 1.5)
+ ylab( ylabel )
+ theme(legend.key.size = unit(0.4,"cm"))
+ ggtitle(title)
)
p$save_args <- FIGURE_DIMS
#print(p)
do.call( ggsave, c(list(filename=out_file, plot=p), p$save_args ) )
out_file = paste0(outputdir, figdir, "/", reg_lab, "/", reg_lab, "_", own_lab, "_", scen_names[i], "_", varname, "_comp", added_file_tag, ".csv")
write.csv(plot_df, out_file, quote=FALSE, row.names=FALSE)
} # end i loop over scenarios
} # end for o loop over ownerships
} # end for r loop over regions
} # end plot_scen_types() |
x<-c(121,116,114,110,105,99,96,95,93,92,95,94,88,87,82,81,79,76,74,69,70,71,68,58,53,52,48,44,45,41,40,37,34,29,26,29,31,33,34,30,28,31,27,24,22,21,
26,25,28,27,34,35,39,41,47,49,51,55,57,59,62,64,71,76,79,84,87,90,101,102,107,111,115,117,119,122,125,126,129,134,139,142,146,145,146,143,142,
145,144,147,152,153,155,158,160,163,162,166,162,165,169,163,159,157,158,156,155,154,156,155,158,161,162,164,171,174,173,174,173,175,176,179,180,
182,183,182,189,188,187,195,198,201,206,207,211,213,215,217,220,225,227,229,231,234,238,243,244,248,249,250,249,252,260,265,267,266,264,261,266,
274,275,277,278,282,284,286,288,287,288,285,289,288,285,286,288,286,285,283,282,281,290,294,297,300,304,303,308,310,312,316,320,322,326,327,332,
331,333,334,332,335,339,341,336,335,334,332,329,327,325,323,319,315,313,309,305,302,300,301,298,296,295,292,288,283,281,279,280,282,277,274,273,
271,270,266,264,263,267,266,261,260,256,254,255,251,249,250,244,243,241,232,230,227,225,224,221,220,219,217,213,211,204,201,197,199,197,193,190,
188,177,178,174,173,174,168,161,158,157,154,150,148,146,143,139,130,129,127,125)
y<-c(325,322,321,316,317,314,312,313,312,313,318,320,316,313,312,307,306,307,304,302,299,296,292,290,286,282,280,277,273,271,268,267,263,260,256,
249,250,246,244,241,240,236,233,231,232,231,224,219,214,210,208,206,205,196,197,193,192,195,189,186,185,183,189,190,191,187,188,187,189,192,
196,195,196,193,190,195,194,192,191,187,188,182,181,175,170,168,162,158,156,154,153,151,150,145,138,136,129,126,123,119,117,109,108,106,104,
101,96,95,92,90,88,87,84,76,71,69,65,61,59,52,47,43,40,41,36,34,29,24,21,19,20,18,17,16,21,24,23,16,17,21,20,24,28,29,34,37,39,42,50,54,55,59,
61,63,64,67,74,78,85,86,89,90,94,93,96,99,100,107,111,113,119,123,127,131,134,138,140,141,146,149,152,155,159,160,165,169,171,172,177,178,183,
190,189,187,191,195,199,202,204,205,204,206,210,214,215,214,216,221,220,214,209,208,213,212,211,213,214,217,221,222,228,230,229,231,236,237,
239,240,247,248,255,256,259,261,262,265,270,272,273,276,278,279,280,282,285,291,295,298,299,297,298,296,299,301,300,299,300,299,301,304,303,
304,299,295,294,296,295,298,297,293,292,293,297,299,297,299,305,306,305,308,309,305,308,310,309,316,317)
x1<-c(116,114,110,105,99,96,95,93,92,95,94,88,87,82,81,79,76,74,69,70,71,68,58,53,52,48,44,45,41,40,37,34,29,26,29,31,33,34,30,28,31,27,24,22,21,
26,25,28,27,34,35,39,41,47,49,51,55,57,59,62,64,71,76,79,84,87,90,101,102,107,111,115,117,119,122,125,126,129,134,139,142,146,145,146,143,
142,145,144,147,152,153,155,158,160,163,162,166,162,165,169,163,159,157,158,156,155,154,156,155,158,161,162,164,171,174,173,174,173,175,176,
179,180,182,183,182,189,188,187,195,198,201,206,207,211,213,215,217,220,225,227,229,231,234,238,243,244,248,249,250,249,252,260,265,267,266,
264,261,266,274,275,277,278,282,284,286,288,287,288,285,289,288,285,286,288,286,285,283,282,281,290,294,297,300,304,303,308,310,312,316,320,
322,326,327,332,331,333,334,332,335,339,341,336,335,334,332,329,327,325,323,319,315,313,309,305,302,300,301,298,296,295,292,288,283,281,279,
280,282,277,274,273,271,270,266,264,263,267,266,261,260,256,254,255,251,249,250,244,243,241,232,230,227,225,224,221,220,219,217,213,211,204,
201,197,199,197,193,190,188,177,178,174,173,174,168,161,158,157,154,150,148,146,143,139,130,129,127,125,122)
y1<-c(322,321,316,317,314,312,313,312,313,318,320,316,313,312,307,306,307,304,302,299,296,292,290,286,282,280,277,273,271,268,267,263,260,256,249,
250,246,244,241,240,236,233,231,232,231,224,219,214,210,208,206,205,196,197,193,192,195,189,186,185,183,189,190,191,187,188,187,189,192,196,
195,196,193,190,195,194,192,191,187,188,182,181,175,170,168,162,158,156,154,153,151,150,145,138,136,129,126,123,119,117,109,108,106,104,101,
96,95,92,90,88,87,84,76,71,69,65,61,59,52,47,43,40,41,36,34,29,24,21,19,20,18,17,16,21,24,23,16,17,21,20,24,28,29,34,37,39,42,50,54,55,59,61,
63,64,67,74,78,85,86,89,90,94,93,96,99,100,107,111,113,119,123,127,131,134,138,140,141,146,149,152,155,159,160,165,169,171,172,177,178,183,
190,189,187,191,195,199,202,204,205,204,206,210,214,215,214,216,221,220,214,209,208,213,212,211,213,214,217,221,222,228,230,229,231,236,237,
239,240,247,248,255,256,259,261,262,265,270,272,273,276,278,279,280,282,285,291,295,298,299,297,298,296,299,301,300,299,300,299,301,304,303,
304,299,295,294,296,295,298,297,293,292,293,297,299,297,299,305,306,305,308,309,305,308,310,309,316,317,324)
nigerX <-c( 14.235 , 14.25 , 14.08 , 13.76 , 13.59 , 13.27 , 13.152 , 13.212 , 13.396 , 13.444 , 13.391 , 13.362 , 13.297 , 13.016 , 12.864 , 12.876 ,
12.832 , 12.911 , 13.12 , 13.361 , 13.384 , 13.364 , 13.243 , 13.172 , 13.177 , 13.085 , 13.152 , 13.326 , 13.538 , 13.639 , 13.72 , 13.805 ,
13.884 , 13.879 , 13.787 , 13.743 , 13.69 , 13.601 , 13.55 , 13.499 , 13.157 , 12.908 , 12.719 , 12.566 , 12.139 , 11.937 , 11.684 , 11.921 ,
12.151 , 12.341 , 12.435 , 12.37 , 12.237 , 12.114 , 12.221 , 12.414 , 12.609 , 12.79 , 12.738 , 12.736 , 12.818 , 12.985 , 13.175 , 13.279 ,
13.395 , 13.514 , 13.556 , 13.541 , 13.719 , 13.986 , 14.352 , 14.579 , 14.831 , 15.066 , 15.049 , 14.975 , 15.017 , 15.099 , 15.108 , 15.129 ,
15.181 , 15.292 , 15.359 , 15.56 , 15.757 , 16.043 , 16.41 , 16.951 , 17.06 , 17.538 , 18.122 , 18.711 , 19.243 , 19.331 , 19.422 , 19.571 ,
19.834 , 20.181 , 20.553 , 20.886 , 21.25 , 21.516 , 21.822 , 22.069 , 22.383 , 22.664 , 22.968 , 23.306 , 23.597 , 23.541 , 23.522 , 23.537 ,
23.506 , 23.482 , 23.439 , 23.206 , 23.047 , 23.144 , 22.576 , 21.982 , 21.66 , 21.255 , 21.11 , 21.152 , 21.034 , 20.936 , 20.701 , 20.76 ,
20.488 , 19.959 , 19.452 , 18.961 , 18.459 , 17.983 , 17.429 , 17.061 , 16.741 , 16.464 , 16.108 , 15.762 , 15.388 , 15.136 , 15.008 , 14.715 ,
14.627 , 14.555 , 14.235)
nigerY <- c( 13.514 ,13.42 ,13.08 ,13.322 ,12.958 ,12.565 ,12.336 ,12.13 ,11.712 ,11.472 ,10.82 ,10.426 ,10.2 ,9.95 ,9.799 ,9.443 ,9.047 ,8.774 ,8.398 ,
8.017 ,7.828 ,7.668 ,7.497 ,7.306 ,7.123 ,7.009 ,6.829 ,6.674 ,6.54 ,6.401 ,6.036 ,5.753 ,5.464 ,5.282 ,5.206 ,4.823 ,4.491 ,4.297 ,4.256 ,
4.115 ,4.071 ,3.978 ,3.817 ,3.643 ,3.639 ,3.615 ,3.545 ,3.273 ,3.005 ,2.808 ,2.707 ,2.567 ,2.454 ,2.439 ,2.121 ,2.173 ,2.193 ,2.089 ,1.933 ,
1.744 ,1.42 ,1.213 ,1.086 ,1.104 ,1.138 ,1.114 ,0.973 ,0.714 ,0.709 ,0.507 ,0.247 ,0.247 ,0.222 ,0.104 ,0.29 ,0.614 ,0.824 ,1.016 ,1.429 ,
1.857 ,2.35 ,2.828 ,3.311 ,3.348 ,3.785 ,3.976 ,4.034 ,4.113 ,4.29 ,4.292 ,4.301 ,4.29 ,4.27 ,4.594 ,4.937 ,5.626 ,6.018 ,6.479 ,7.039 ,
7.481 ,8.005 ,8.475 ,8.967 ,9.449 ,9.996 ,10.446 ,11.022 ,11.508 ,12.045 ,12.073 ,12.553 ,12.994 ,13.534 ,14.001 ,14.198 ,14.57 ,14.933 ,
15.26 ,15.334 ,15.229 ,15.187 ,15.362 ,15.505 ,15.569 ,15.651 ,15.484 ,15.616 ,15.741 ,16.007 ,15.755 ,15.654 ,15.582 ,15.555 ,15.554 ,15.58 ,
15.586 ,15.538 ,15.254 ,14.988 ,14.579 ,14.243 ,13.981 ,13.971 ,13.853 ,13.781 ,13.647 ,13.514)
| /R_code/africa_shape.R | permissive | Albertios/master-thesis | R | false | false | 7,179 | r | x<-c(121,116,114,110,105,99,96,95,93,92,95,94,88,87,82,81,79,76,74,69,70,71,68,58,53,52,48,44,45,41,40,37,34,29,26,29,31,33,34,30,28,31,27,24,22,21,
26,25,28,27,34,35,39,41,47,49,51,55,57,59,62,64,71,76,79,84,87,90,101,102,107,111,115,117,119,122,125,126,129,134,139,142,146,145,146,143,142,
145,144,147,152,153,155,158,160,163,162,166,162,165,169,163,159,157,158,156,155,154,156,155,158,161,162,164,171,174,173,174,173,175,176,179,180,
182,183,182,189,188,187,195,198,201,206,207,211,213,215,217,220,225,227,229,231,234,238,243,244,248,249,250,249,252,260,265,267,266,264,261,266,
274,275,277,278,282,284,286,288,287,288,285,289,288,285,286,288,286,285,283,282,281,290,294,297,300,304,303,308,310,312,316,320,322,326,327,332,
331,333,334,332,335,339,341,336,335,334,332,329,327,325,323,319,315,313,309,305,302,300,301,298,296,295,292,288,283,281,279,280,282,277,274,273,
271,270,266,264,263,267,266,261,260,256,254,255,251,249,250,244,243,241,232,230,227,225,224,221,220,219,217,213,211,204,201,197,199,197,193,190,
188,177,178,174,173,174,168,161,158,157,154,150,148,146,143,139,130,129,127,125)
y<-c(325,322,321,316,317,314,312,313,312,313,318,320,316,313,312,307,306,307,304,302,299,296,292,290,286,282,280,277,273,271,268,267,263,260,256,
249,250,246,244,241,240,236,233,231,232,231,224,219,214,210,208,206,205,196,197,193,192,195,189,186,185,183,189,190,191,187,188,187,189,192,
196,195,196,193,190,195,194,192,191,187,188,182,181,175,170,168,162,158,156,154,153,151,150,145,138,136,129,126,123,119,117,109,108,106,104,
101,96,95,92,90,88,87,84,76,71,69,65,61,59,52,47,43,40,41,36,34,29,24,21,19,20,18,17,16,21,24,23,16,17,21,20,24,28,29,34,37,39,42,50,54,55,59,
61,63,64,67,74,78,85,86,89,90,94,93,96,99,100,107,111,113,119,123,127,131,134,138,140,141,146,149,152,155,159,160,165,169,171,172,177,178,183,
190,189,187,191,195,199,202,204,205,204,206,210,214,215,214,216,221,220,214,209,208,213,212,211,213,214,217,221,222,228,230,229,231,236,237,
239,240,247,248,255,256,259,261,262,265,270,272,273,276,278,279,280,282,285,291,295,298,299,297,298,296,299,301,300,299,300,299,301,304,303,
304,299,295,294,296,295,298,297,293,292,293,297,299,297,299,305,306,305,308,309,305,308,310,309,316,317)
x1<-c(116,114,110,105,99,96,95,93,92,95,94,88,87,82,81,79,76,74,69,70,71,68,58,53,52,48,44,45,41,40,37,34,29,26,29,31,33,34,30,28,31,27,24,22,21,
26,25,28,27,34,35,39,41,47,49,51,55,57,59,62,64,71,76,79,84,87,90,101,102,107,111,115,117,119,122,125,126,129,134,139,142,146,145,146,143,
142,145,144,147,152,153,155,158,160,163,162,166,162,165,169,163,159,157,158,156,155,154,156,155,158,161,162,164,171,174,173,174,173,175,176,
179,180,182,183,182,189,188,187,195,198,201,206,207,211,213,215,217,220,225,227,229,231,234,238,243,244,248,249,250,249,252,260,265,267,266,
264,261,266,274,275,277,278,282,284,286,288,287,288,285,289,288,285,286,288,286,285,283,282,281,290,294,297,300,304,303,308,310,312,316,320,
322,326,327,332,331,333,334,332,335,339,341,336,335,334,332,329,327,325,323,319,315,313,309,305,302,300,301,298,296,295,292,288,283,281,279,
280,282,277,274,273,271,270,266,264,263,267,266,261,260,256,254,255,251,249,250,244,243,241,232,230,227,225,224,221,220,219,217,213,211,204,
201,197,199,197,193,190,188,177,178,174,173,174,168,161,158,157,154,150,148,146,143,139,130,129,127,125,122)
y1<-c(322,321,316,317,314,312,313,312,313,318,320,316,313,312,307,306,307,304,302,299,296,292,290,286,282,280,277,273,271,268,267,263,260,256,249,
250,246,244,241,240,236,233,231,232,231,224,219,214,210,208,206,205,196,197,193,192,195,189,186,185,183,189,190,191,187,188,187,189,192,196,
195,196,193,190,195,194,192,191,187,188,182,181,175,170,168,162,158,156,154,153,151,150,145,138,136,129,126,123,119,117,109,108,106,104,101,
96,95,92,90,88,87,84,76,71,69,65,61,59,52,47,43,40,41,36,34,29,24,21,19,20,18,17,16,21,24,23,16,17,21,20,24,28,29,34,37,39,42,50,54,55,59,61,
63,64,67,74,78,85,86,89,90,94,93,96,99,100,107,111,113,119,123,127,131,134,138,140,141,146,149,152,155,159,160,165,169,171,172,177,178,183,
190,189,187,191,195,199,202,204,205,204,206,210,214,215,214,216,221,220,214,209,208,213,212,211,213,214,217,221,222,228,230,229,231,236,237,
239,240,247,248,255,256,259,261,262,265,270,272,273,276,278,279,280,282,285,291,295,298,299,297,298,296,299,301,300,299,300,299,301,304,303,
304,299,295,294,296,295,298,297,293,292,293,297,299,297,299,305,306,305,308,309,305,308,310,309,316,317,324)
nigerX <-c( 14.235 , 14.25 , 14.08 , 13.76 , 13.59 , 13.27 , 13.152 , 13.212 , 13.396 , 13.444 , 13.391 , 13.362 , 13.297 , 13.016 , 12.864 , 12.876 ,
12.832 , 12.911 , 13.12 , 13.361 , 13.384 , 13.364 , 13.243 , 13.172 , 13.177 , 13.085 , 13.152 , 13.326 , 13.538 , 13.639 , 13.72 , 13.805 ,
13.884 , 13.879 , 13.787 , 13.743 , 13.69 , 13.601 , 13.55 , 13.499 , 13.157 , 12.908 , 12.719 , 12.566 , 12.139 , 11.937 , 11.684 , 11.921 ,
12.151 , 12.341 , 12.435 , 12.37 , 12.237 , 12.114 , 12.221 , 12.414 , 12.609 , 12.79 , 12.738 , 12.736 , 12.818 , 12.985 , 13.175 , 13.279 ,
13.395 , 13.514 , 13.556 , 13.541 , 13.719 , 13.986 , 14.352 , 14.579 , 14.831 , 15.066 , 15.049 , 14.975 , 15.017 , 15.099 , 15.108 , 15.129 ,
15.181 , 15.292 , 15.359 , 15.56 , 15.757 , 16.043 , 16.41 , 16.951 , 17.06 , 17.538 , 18.122 , 18.711 , 19.243 , 19.331 , 19.422 , 19.571 ,
19.834 , 20.181 , 20.553 , 20.886 , 21.25 , 21.516 , 21.822 , 22.069 , 22.383 , 22.664 , 22.968 , 23.306 , 23.597 , 23.541 , 23.522 , 23.537 ,
23.506 , 23.482 , 23.439 , 23.206 , 23.047 , 23.144 , 22.576 , 21.982 , 21.66 , 21.255 , 21.11 , 21.152 , 21.034 , 20.936 , 20.701 , 20.76 ,
20.488 , 19.959 , 19.452 , 18.961 , 18.459 , 17.983 , 17.429 , 17.061 , 16.741 , 16.464 , 16.108 , 15.762 , 15.388 , 15.136 , 15.008 , 14.715 ,
14.627 , 14.555 , 14.235)
nigerY <- c( 13.514 ,13.42 ,13.08 ,13.322 ,12.958 ,12.565 ,12.336 ,12.13 ,11.712 ,11.472 ,10.82 ,10.426 ,10.2 ,9.95 ,9.799 ,9.443 ,9.047 ,8.774 ,8.398 ,
8.017 ,7.828 ,7.668 ,7.497 ,7.306 ,7.123 ,7.009 ,6.829 ,6.674 ,6.54 ,6.401 ,6.036 ,5.753 ,5.464 ,5.282 ,5.206 ,4.823 ,4.491 ,4.297 ,4.256 ,
4.115 ,4.071 ,3.978 ,3.817 ,3.643 ,3.639 ,3.615 ,3.545 ,3.273 ,3.005 ,2.808 ,2.707 ,2.567 ,2.454 ,2.439 ,2.121 ,2.173 ,2.193 ,2.089 ,1.933 ,
1.744 ,1.42 ,1.213 ,1.086 ,1.104 ,1.138 ,1.114 ,0.973 ,0.714 ,0.709 ,0.507 ,0.247 ,0.247 ,0.222 ,0.104 ,0.29 ,0.614 ,0.824 ,1.016 ,1.429 ,
1.857 ,2.35 ,2.828 ,3.311 ,3.348 ,3.785 ,3.976 ,4.034 ,4.113 ,4.29 ,4.292 ,4.301 ,4.29 ,4.27 ,4.594 ,4.937 ,5.626 ,6.018 ,6.479 ,7.039 ,
7.481 ,8.005 ,8.475 ,8.967 ,9.449 ,9.996 ,10.446 ,11.022 ,11.508 ,12.045 ,12.073 ,12.553 ,12.994 ,13.534 ,14.001 ,14.198 ,14.57 ,14.933 ,
15.26 ,15.334 ,15.229 ,15.187 ,15.362 ,15.505 ,15.569 ,15.651 ,15.484 ,15.616 ,15.741 ,16.007 ,15.755 ,15.654 ,15.582 ,15.555 ,15.554 ,15.58 ,
15.586 ,15.538 ,15.254 ,14.988 ,14.579 ,14.243 ,13.981 ,13.971 ,13.853 ,13.781 ,13.647 ,13.514)
|
\name{Spgrr}
\alias{Spgrr}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Use Spgr to estimate subgroups with repeated measures with both z
and x}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
Spgrr(indexy,y, z, x, weights, betam0, ...)
}
\arguments{
\item{indexy}{numeric numbers, index of repeated meausre sort assending}
\item{y}{ response variable.}
\item{z}{ explanatory variables which have the same coefficients among individuals.}
\item{x}{ explanatory variables which have individual coefficients.}
\item{weights}{weights associated with pairwise penalties.}
\item{betam0}{initial values for betas.}
\item{nu}{penalty parameter in, default value is 1.}
\item{gam}{parameter in SCAD penalty, default value is 1.}
\item{lam}{tuning parameter, default value is 0.5.}
\item{maxiter}{maximum number of iterations.}
\item{tolabs}{absolute tolerance, default value is 1e-4. }
\item{tolrel}{relative tolerance, default value is 1e-2. }
}
\details{
The details of absolute tolerance and relative tolerance can be found in Boyd, S., Parikh, N., Chu, E., Peleato, B., and Eckstein, J. (2011). Distributed opti- mization and statistical learning via the alternating direction method of multipliers.
}
\value{
\item{beta}{estimated individual coefficients.}
\item{betaest}{estimated group coefficients.}
\item{eta}{estimated common coefficients for z.}
\item{sig2}{estimated varaince.}
\item{group}{estimated group.}
\item{deltam}{coefficient differences.}
\item{flag}{code for convergence, 0 means converged.}
\item{rm}{primal residuals.}
\item{sm}{dual residuals.}
\item{tolpri}{primal tolerance.}
\item{toldual}{dual tolerance.}
\item{niteration}{number of iterations.}
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
data(simdat2)
y <- simdat2$y
z <- as.matrix(simdat2[,paste("z",1:5,sep="")])
x <- as.matrix(simdat2[,paste("x",1:2,sep="")])
nr <- 49
betam0 <- cal_initialr(indexy = simdat2$indexy,y = simdat2$y,z = z,x = x )
wt <- rep(1, nr*(nr-1)/2)
res1 <- Spgrr(indexy = simdat2$indexy, y, z, x, wt, betam0, lam = 0.1,maxiter = 1000)
groupest1 <- getgroup(res1$deltam, nr)
BICcr(obj = res1,indexy = simdat2$indexy,y,z,x,c0 = 0.2)
data(Cmat2)
ordervalue <- getorder(Cmat2)
wts <- exp(0.7*(1-ordervalue))
res2 <- Spgrr(indexy = simdat2$indexy,y, z, x, wts, betam0, lam = 0.2, maxiter = 1000)
groupest2 <- getgroup(res2$deltam, nr) ### the same as res2$group
BICcr(obj = res2,indexy = simdat2$indexy,y,z,x,c0 = 0.2)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/Spgrr.Rd | no_license | wangx23/Spgr | R | false | false | 2,981 | rd | \name{Spgrr}
\alias{Spgrr}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Use Spgr to estimate subgroups with repeated measures with both z
and x}
\description{
%% ~~ A concise (1-5 lines) description of what the function does. ~~
}
\usage{
Spgrr(indexy,y, z, x, weights, betam0, ...)
}
\arguments{
\item{indexy}{numeric numbers, index of repeated meausre sort assending}
\item{y}{ response variable.}
\item{z}{ explanatory variables which have the same coefficients among individuals.}
\item{x}{ explanatory variables which have individual coefficients.}
\item{weights}{weights associated with pairwise penalties.}
\item{betam0}{initial values for betas.}
\item{nu}{penalty parameter in, default value is 1.}
\item{gam}{parameter in SCAD penalty, default value is 1.}
\item{lam}{tuning parameter, default value is 0.5.}
\item{maxiter}{maximum number of iterations.}
\item{tolabs}{absolute tolerance, default value is 1e-4. }
\item{tolrel}{relative tolerance, default value is 1e-2. }
}
\details{
The details of absolute tolerance and relative tolerance can be found in Boyd, S., Parikh, N., Chu, E., Peleato, B., and Eckstein, J. (2011). Distributed opti- mization and statistical learning via the alternating direction method of multipliers.
}
\value{
\item{beta}{estimated individual coefficients.}
\item{betaest}{estimated group coefficients.}
\item{eta}{estimated common coefficients for z.}
\item{sig2}{estimated varaince.}
\item{group}{estimated group.}
\item{deltam}{coefficient differences.}
\item{flag}{code for convergence, 0 means converged.}
\item{rm}{primal residuals.}
\item{sm}{dual residuals.}
\item{tolpri}{primal tolerance.}
\item{toldual}{dual tolerance.}
\item{niteration}{number of iterations.}
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
data(simdat2)
y <- simdat2$y
z <- as.matrix(simdat2[,paste("z",1:5,sep="")])
x <- as.matrix(simdat2[,paste("x",1:2,sep="")])
nr <- 49
betam0 <- cal_initialr(indexy = simdat2$indexy,y = simdat2$y,z = z,x = x )
wt <- rep(1, nr*(nr-1)/2)
res1 <- Spgrr(indexy = simdat2$indexy, y, z, x, wt, betam0, lam = 0.1,maxiter = 1000)
groupest1 <- getgroup(res1$deltam, nr)
BICcr(obj = res1,indexy = simdat2$indexy,y,z,x,c0 = 0.2)
data(Cmat2)
ordervalue <- getorder(Cmat2)
wts <- exp(0.7*(1-ordervalue))
res2 <- Spgrr(indexy = simdat2$indexy,y, z, x, wts, betam0, lam = 0.2, maxiter = 1000)
groupest2 <- getgroup(res2$deltam, nr) ### the same as res2$group
BICcr(obj = res2,indexy = simdat2$indexy,y,z,x,c0 = 0.2)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
#' Prints the system date/time in only numbers (no spaces or punctuation)
#'
#' @return
#' @export
#'
#' @examples
squysh_time <- function(){
str_replace_all(Sys.time(), "[^[:alnum:]]", "")
}
| /R/squysh_time.R | no_license | brentscott93/biophysr | R | false | false | 194 | r | #' Prints the system date/time in only numbers (no spaces or punctuation)
#'
#' @return
#' @export
#'
#' @examples
squysh_time <- function(){
str_replace_all(Sys.time(), "[^[:alnum:]]", "")
}
|
library(nimble, lib.loc = '~/Documents/')
nimbleOptions(MCMCprogressBar = FALSE)
library(coda)
setwd('~/github/hybridBlockSamplers')
source('data/modelData.R')
source('AFSS_to_RW_block_sampler.R')
##
##nreps <- 1
nreps <- 10
##
compareDF <- data.frame(sampler = NA, nfa = NA, nIter = NA, runTime = NA, minEfficiency = NA)
allEffDF <- data.frame(sampler = NA, nfa = NA, nIter = NA, runTime = NA, repNum = NA, param = NA, eff = NA)
allSamplersTable <- data.frame(sampler = 'RW_block', nfa = NA)
allSamplersTable <- rbind(allSamplersTable,
expand.grid(sampler = 'RW_block_goodCov', nfa = NA))
allSamplersTable <- rbind(allSamplersTable,
expand.grid(sampler = 'AF_slice',
nfa = NA))
allSamplersTable <- rbind(allSamplersTable,
expand.grid(sampler = 'AFSS_to_RW_block',
nfa = c(1,2,4,8)))
##
modelName <- 'mhp'
##
model <- get(paste0(modelName,'Model'))
code <- get(paste0(modelName, 'Code'))
params <- get(paste0(modelName,'Params'))
constants <- get(paste0(modelName, 'Consts'))
inits <- get(paste0(modelName,'Initial'))
data <- get(paste0(modelName, 'Data'))
propCov <- get(paste0(modelName, 'GoodPropCov'))
goodScale <- get(paste0(modelName, 'GoodScale'))
##iterVals <- get(paste0(modelName, 'NumIters'))
##
iterVals <- c(1000, 2000, 5000, 10000, 20000, 50000, 100000)
if(modelName == 'mhp') iterVals <- c(1000, 2000, 5000, 10000)
##
cmodel <- compileNimble(model)
##
for(samplerRowNum in 1:dim(allSamplersTable)[1]){
samplerRow <- allSamplersTable[samplerRowNum,]
sampler <- as.character(samplerRow$sampler)
print(samplerRow)
conf <- configureMCMC(model)
conf$removeSamplers(params)
if(sampler == 'RW_block'){
conf$addSampler(params, type = sampler, control = list())
}
else if(sampler == 'RW_block_goodCov'){
conf$addSampler(params, type = 'RW_block', control = list(propCov = propCov, scale = goodScale))
}
else if(sampler == 'AF_slice'){
conf$addSampler(params, type = 'AF_slice', control = list())
}
else if(sampler == 'AFSS_to_RW_block'){
conf$addSampler(params, type = 'AFSS_to_RW_block',
control = list(AF_sliceControl = list(sliceWidths = 'oneVec',
sliceAdaptFactorMaxIter = 15000, sliceAdaptFactorInterval = 1000,
sliceAdaptWidthMaxIter = 512, sliceAdaptWidthTolerance = 0.1, sliceMaxSteps = 100),
RWcontrol = list(propCov = diag(length(params)), scale = 1,
adaptInterval = 200, adaptScaleOnly = FALSE, adaptive = TRUE),
numFactorAdaptations = samplerRow$nfa))
}
mcmc <- buildMCMC(conf)
cmcmc <- compileNimble(mcmc)
cmodel$setInits(inits)
for(nIter in iterVals){
message(paste0('using nIter = ', as.character(nIter)))
mcmcTime <- numeric(nreps)
mcmcEff <- numeric(nreps)
cat('rep: ')
for(iter in 1:nreps){
cat(paste0(iter, ' '))
mcmcTime[iter] <- system.time(cmcmc$run(nIter))[['elapsed']]
##browser() ## ?????
effValues <- coda::effectiveSize(coda::as.mcmc(as.matrix(cmcmc$mvSamples)))/(mcmcTime[iter]+.01)
effValues <- effValues[!(names(effValues) %in% c('alpha[1]','be[2]','bep[4]','bp[4]'))]
effNames <- names(effValues)
effValues <- as.numeric(effValues)
mcmcEff[iter] <- min(effValues)
newRow <- cbind(samplerRow, data.frame(nIter = nIter, runTime = mcmcTime[iter], minEfficiency = mcmcEff[iter]))
compareDF <- rbind(compareDF, newRow)
newAllEffDF <- data.frame(sampler = sampler, nfa = samplerRow$nfa, nIter = nIter,
runTime = mcmcTime[iter], repNum = iter, param = effNames, eff = effValues,
stringsAsFactors = FALSE)
allEffDF <- rbind(allEffDF, newAllEffDF)
}
cat('
')
}
if(all(is.na(compareDF[1,]))) compareDF <- compareDF[-1, ]
if(all(is.na(allEffDF[1,]))) allEffDF <- allEffDF[-1, ]
}
##
filename <- paste0('results/', modelName, 'Results.RData')
save(compareDF, allEffDF, file = filename)
| /run_mhp.R | no_license | danielturek/hybridBlockSamplers | R | false | false | 4,374 | r |
library(nimble, lib.loc = '~/Documents/')
nimbleOptions(MCMCprogressBar = FALSE)
library(coda)
setwd('~/github/hybridBlockSamplers')
source('data/modelData.R')
source('AFSS_to_RW_block_sampler.R')
##
##nreps <- 1
nreps <- 10
##
compareDF <- data.frame(sampler = NA, nfa = NA, nIter = NA, runTime = NA, minEfficiency = NA)
allEffDF <- data.frame(sampler = NA, nfa = NA, nIter = NA, runTime = NA, repNum = NA, param = NA, eff = NA)
allSamplersTable <- data.frame(sampler = 'RW_block', nfa = NA)
allSamplersTable <- rbind(allSamplersTable,
expand.grid(sampler = 'RW_block_goodCov', nfa = NA))
allSamplersTable <- rbind(allSamplersTable,
expand.grid(sampler = 'AF_slice',
nfa = NA))
allSamplersTable <- rbind(allSamplersTable,
expand.grid(sampler = 'AFSS_to_RW_block',
nfa = c(1,2,4,8)))
##
modelName <- 'mhp'
##
model <- get(paste0(modelName,'Model'))
code <- get(paste0(modelName, 'Code'))
params <- get(paste0(modelName,'Params'))
constants <- get(paste0(modelName, 'Consts'))
inits <- get(paste0(modelName,'Initial'))
data <- get(paste0(modelName, 'Data'))
propCov <- get(paste0(modelName, 'GoodPropCov'))
goodScale <- get(paste0(modelName, 'GoodScale'))
##iterVals <- get(paste0(modelName, 'NumIters'))
##
iterVals <- c(1000, 2000, 5000, 10000, 20000, 50000, 100000)
if(modelName == 'mhp') iterVals <- c(1000, 2000, 5000, 10000)
##
cmodel <- compileNimble(model)
##
for(samplerRowNum in 1:dim(allSamplersTable)[1]){
samplerRow <- allSamplersTable[samplerRowNum,]
sampler <- as.character(samplerRow$sampler)
print(samplerRow)
conf <- configureMCMC(model)
conf$removeSamplers(params)
if(sampler == 'RW_block'){
conf$addSampler(params, type = sampler, control = list())
}
else if(sampler == 'RW_block_goodCov'){
conf$addSampler(params, type = 'RW_block', control = list(propCov = propCov, scale = goodScale))
}
else if(sampler == 'AF_slice'){
conf$addSampler(params, type = 'AF_slice', control = list())
}
else if(sampler == 'AFSS_to_RW_block'){
conf$addSampler(params, type = 'AFSS_to_RW_block',
control = list(AF_sliceControl = list(sliceWidths = 'oneVec',
sliceAdaptFactorMaxIter = 15000, sliceAdaptFactorInterval = 1000,
sliceAdaptWidthMaxIter = 512, sliceAdaptWidthTolerance = 0.1, sliceMaxSteps = 100),
RWcontrol = list(propCov = diag(length(params)), scale = 1,
adaptInterval = 200, adaptScaleOnly = FALSE, adaptive = TRUE),
numFactorAdaptations = samplerRow$nfa))
}
mcmc <- buildMCMC(conf)
cmcmc <- compileNimble(mcmc)
cmodel$setInits(inits)
for(nIter in iterVals){
message(paste0('using nIter = ', as.character(nIter)))
mcmcTime <- numeric(nreps)
mcmcEff <- numeric(nreps)
cat('rep: ')
for(iter in 1:nreps){
cat(paste0(iter, ' '))
mcmcTime[iter] <- system.time(cmcmc$run(nIter))[['elapsed']]
##browser() ## ?????
effValues <- coda::effectiveSize(coda::as.mcmc(as.matrix(cmcmc$mvSamples)))/(mcmcTime[iter]+.01)
effValues <- effValues[!(names(effValues) %in% c('alpha[1]','be[2]','bep[4]','bp[4]'))]
effNames <- names(effValues)
effValues <- as.numeric(effValues)
mcmcEff[iter] <- min(effValues)
newRow <- cbind(samplerRow, data.frame(nIter = nIter, runTime = mcmcTime[iter], minEfficiency = mcmcEff[iter]))
compareDF <- rbind(compareDF, newRow)
newAllEffDF <- data.frame(sampler = sampler, nfa = samplerRow$nfa, nIter = nIter,
runTime = mcmcTime[iter], repNum = iter, param = effNames, eff = effValues,
stringsAsFactors = FALSE)
allEffDF <- rbind(allEffDF, newAllEffDF)
}
cat('
')
}
if(all(is.na(compareDF[1,]))) compareDF <- compareDF[-1, ]
if(all(is.na(allEffDF[1,]))) allEffDF <- allEffDF[-1, ]
}
##
filename <- paste0('results/', modelName, 'Results.RData')
save(compareDF, allEffDF, file = filename)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/export-catch.R
\name{export_catch}
\alias{export_catch}
\title{Create files with Freezer trawler and Shoreside catch in the format required
for direct pasting into the iSCAM data file}
\usage{
export_catch(
ct,
areas = c("3[CD]+", "5[ABCDE]+"),
years = 1996:2021,
divisor = 1e+06,
digits = 4,
write_files = TRUE,
single_fleet = FALSE,
fns = c(paste0("catch-ft-", Sys.Date(), ".txt"), paste0("catch-ss-", Sys.Date(),
".txt")),
middle_text = c(gear = 1, group = 1, area = 1, sex = 0, type = 1),
...
)
}
\arguments{
\item{ct}{Output from \code{\link[gfdata:get_data]{gfdata::get_catch()}}}
\item{areas}{Area list as required by \code{\link[gfplot:plot_catch]{gfplot::tidy_catch()}}}
\item{years}{Years to include in the output}
\item{divisor}{A value to divide all catch values by}
\item{digits}{Number of significant figures to output}
\item{write_files}{Logical. If \code{TRUE}, write the catch tables to files. If
\code{FALSE}, return a list of two data frames}
\item{single_fleet}{Logical. If \code{TRUE}, the catch will be for one fleet,
which contains the sum of all catch for both fleets}
\item{fns}{A vector of two filenames}
\item{middle_text}{A vector of middle column data to insert in the output}
\item{...}{Arguments passed to \code{\link[gfplot:plot_catch]{gfplot::tidy_catch()}} and in turn, to
\code{\link[gfplot:plot_catch]{gfplot::set_fishing_year()}}}
}
\value{
Nothing is \code{write_files} is \code{TRUE}. A list of two data frames if
\code{write_files} is \code{FALSE}
}
\description{
Create files with Freezer trawler and Shoreside catch in the format required
for direct pasting into the iSCAM data file
}
| /man/export_catch.Rd | no_license | pbs-assess/arrowtooth | R | false | true | 1,738 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/export-catch.R
\name{export_catch}
\alias{export_catch}
\title{Create files with Freezer trawler and Shoreside catch in the format required
for direct pasting into the iSCAM data file}
\usage{
export_catch(
ct,
areas = c("3[CD]+", "5[ABCDE]+"),
years = 1996:2021,
divisor = 1e+06,
digits = 4,
write_files = TRUE,
single_fleet = FALSE,
fns = c(paste0("catch-ft-", Sys.Date(), ".txt"), paste0("catch-ss-", Sys.Date(),
".txt")),
middle_text = c(gear = 1, group = 1, area = 1, sex = 0, type = 1),
...
)
}
\arguments{
\item{ct}{Output from \code{\link[gfdata:get_data]{gfdata::get_catch()}}}
\item{areas}{Area list as required by \code{\link[gfplot:plot_catch]{gfplot::tidy_catch()}}}
\item{years}{Years to include in the output}
\item{divisor}{A value to divide all catch values by}
\item{digits}{Number of significant figures to output}
\item{write_files}{Logical. If \code{TRUE}, write the catch tables to files. If
\code{FALSE}, return a list of two data frames}
\item{single_fleet}{Logical. If \code{TRUE}, the catch will be for one fleet,
which contains the sum of all catch for both fleets}
\item{fns}{A vector of two filenames}
\item{middle_text}{A vector of middle column data to insert in the output}
\item{...}{Arguments passed to \code{\link[gfplot:plot_catch]{gfplot::tidy_catch()}} and in turn, to
\code{\link[gfplot:plot_catch]{gfplot::set_fishing_year()}}}
}
\value{
Nothing is \code{write_files} is \code{TRUE}. A list of two data frames if
\code{write_files} is \code{FALSE}
}
\description{
Create files with Freezer trawler and Shoreside catch in the format required
for direct pasting into the iSCAM data file
}
|
library(phyViz)
load("./tree.rda")
treeGraph <- processTreeGraph(tree)
# mygraph <- graph.data.frame(treeGraph, directed=F)
varieties <- unique(c(tree$child,tree$parent))
varieties <- subset(varieties, !grepl(" x ", varieties) & !is.na(varieties))
varieties <- varieties[order(varieties)]
kevinbacon <- matrix(1, nrow=length(varieties), ncol=length(varieties))
row.names(kevinbacon) <- varieties
dimnames(kevinbacon)[[2]] <- varieties
kevinbacon[lower.tri(kevinbacon)] <- NA
kevinbacon[diag(kevinbacon)] <- 0
kevinbacon <- kevinbacon[order(row.names(kevinbacon)), order(dimnames(kevinbacon)[[2]])]
library(reshape2)
kevinbacon <- melt(kevinbacon, value.name = "degree", stringsAsFactors=FALSE)
kevinbacon$Var1 <- as.character(kevinbacon$Var1)
kevinbacon$Var2 <- as.character(kevinbacon$Var2)
kevinbacon$degree <- 1000
library(parallel)
library(dplyr)
kevinbacon <- kevinbacon %>% rowwise
library(doMC)
registerDoMC()
kevinbaconPaths <- ldply(1:nrow(kevinbacon), function(i){data.frame(Var1=kevinbacon$Var1[i], Var2=kevinbacon$Var2[i], as.data.frame(getPath(kevinbacon$Var1[i], kevinbacon$Var2[i], treeGraph, tree=tree), stringsAsFactors=FALSE), stringsAsFactors=FALSE)}, .parallel=T)
kevinbacon$degree <- unlist(mclapply(1:nrow(kevinbacon), function(i){
getDegree(kevinbacon$Var1[i], kevinbacon$Var2[i], treeGraph, tree=tree)
}, mc.cores=12, mc.preschedule=TRUE))
# kevinbacon$degree[kevinbacon$degree==-1] <-
# sapply(which(kevinbacon$degree==-1), function(i){
# getDegree(kevinbacon$Var1[i], kevinbacon$Var2[i], mygraph)
# })
# kevinbacon <- kevinbacon[order(kevinbacon$Var1, kevinbacon$Var2),]
#
# kevinbacon$Var1 <- factor(kevinbacon$Var1, levels=varieties[order(varieties)])
# kevinbacon$Var2 <- factor(kevinbacon$Var2, levels=varieties[order(varieties, decreasing=T)])
kevinbacon$degree[kevinbacon$degree<0] <- NA
qplot(data=kevinbacon, x=Var1, y=Var2, geom="tile", fill=degree) + theme(axis.text.x=element_text(angle=90, hjust = 1), axis.title=element_blank()) + scale_fill_continuous("Generational\nDistance")
write.csv(kevinbacon, "kevinbaconTree.csv", row.names=FALSE)
kevinbacon <- read.csv("kevinbaconTree.csv", stringsAsFactors=FALSE)
baconmat <- acast(kevinbacon, Var1 ~ Var2, value.var="degree")
baconmat[is.na(baconmat) | baconmat<0] <- 1000
dd.col <- rev(as.dendrogram(hclust(as.dist(baconmat), method="ward.D")))
dd.row <- as.dendrogram(hclust(as.dist(baconmat), method="ward.D"))
col.ord <- labels(dd.col)
row.ord <- labels(dd.row)
library(ggdendro)
library(grid)
ddata_x <- dendro_data(dd.row)
ddata_y <- dendro_data(dd.col)
dendro.multiplier <- .25
offset <- length(col.ord)
cutoff <- 250
kevinbacon$Var1fac <- factor(kevinbacon$Var1, levels=col.ord)
kevinbacon$Var2fac <- factor(kevinbacon$Var2, levels=row.ord)
kevinbacon$degree[kevinbacon$degree<0] <- NA
qplot(x=as.numeric(Var1fac), y=as.numeric(Var2fac), geom="tile", fill=degree, data=kevinbacon) +
theme_bw() +
theme(axis.text.x=element_text(angle=90, hjust=1, vjust=.5)) +
xlab("Variety 1") +
ylab("Variety 2") +
scale_x_continuous(expand=c(0, 1), breaks=1:offset-.5, labels=col.ord, limits=c(0, cutoff)) +
scale_y_continuous(expand=c(0, 1), breaks=1:offset-.5, labels=row.ord, limits=c(0, cutoff)) +
scale_fill_gradient("Generational\nDistance",low="#ffffff", high="#374f6b") +
ggtitle('"Kevin Bacon" Distance between Soybean Varieties') +
coord_equal()+
geom_segment(data=segment(ddata_y), aes(x=x, y=y*dendro.multiplier+offset+3, xend=xend, yend=yend*dendro.multiplier+offset+3), inherit.aes=F) +
geom_segment(data=segment(ddata_x), aes(x=y*dendro.multiplier+offset+3, y=x, xend=yend*dendro.multiplier+offset+3, yend=xend), inherit.aes=F)
# ggsave("KevinBaconDistance.png", width=12, height=12)
| /Shiny/Genealogy/kevinbacon.R | no_license | srvanderplas/USDA-soybeans | R | false | false | 3,747 | r |
library(phyViz)
load("./tree.rda")
treeGraph <- processTreeGraph(tree)
# mygraph <- graph.data.frame(treeGraph, directed=F)
varieties <- unique(c(tree$child,tree$parent))
varieties <- subset(varieties, !grepl(" x ", varieties) & !is.na(varieties))
varieties <- varieties[order(varieties)]
kevinbacon <- matrix(1, nrow=length(varieties), ncol=length(varieties))
row.names(kevinbacon) <- varieties
dimnames(kevinbacon)[[2]] <- varieties
kevinbacon[lower.tri(kevinbacon)] <- NA
kevinbacon[diag(kevinbacon)] <- 0
kevinbacon <- kevinbacon[order(row.names(kevinbacon)), order(dimnames(kevinbacon)[[2]])]
library(reshape2)
kevinbacon <- melt(kevinbacon, value.name = "degree", stringsAsFactors=FALSE)
kevinbacon$Var1 <- as.character(kevinbacon$Var1)
kevinbacon$Var2 <- as.character(kevinbacon$Var2)
kevinbacon$degree <- 1000
library(parallel)
library(dplyr)
kevinbacon <- kevinbacon %>% rowwise
library(doMC)
registerDoMC()
kevinbaconPaths <- ldply(1:nrow(kevinbacon), function(i){data.frame(Var1=kevinbacon$Var1[i], Var2=kevinbacon$Var2[i], as.data.frame(getPath(kevinbacon$Var1[i], kevinbacon$Var2[i], treeGraph, tree=tree), stringsAsFactors=FALSE), stringsAsFactors=FALSE)}, .parallel=T)
kevinbacon$degree <- unlist(mclapply(1:nrow(kevinbacon), function(i){
getDegree(kevinbacon$Var1[i], kevinbacon$Var2[i], treeGraph, tree=tree)
}, mc.cores=12, mc.preschedule=TRUE))
# kevinbacon$degree[kevinbacon$degree==-1] <-
# sapply(which(kevinbacon$degree==-1), function(i){
# getDegree(kevinbacon$Var1[i], kevinbacon$Var2[i], mygraph)
# })
# kevinbacon <- kevinbacon[order(kevinbacon$Var1, kevinbacon$Var2),]
#
# kevinbacon$Var1 <- factor(kevinbacon$Var1, levels=varieties[order(varieties)])
# kevinbacon$Var2 <- factor(kevinbacon$Var2, levels=varieties[order(varieties, decreasing=T)])
kevinbacon$degree[kevinbacon$degree<0] <- NA
qplot(data=kevinbacon, x=Var1, y=Var2, geom="tile", fill=degree) + theme(axis.text.x=element_text(angle=90, hjust = 1), axis.title=element_blank()) + scale_fill_continuous("Generational\nDistance")
write.csv(kevinbacon, "kevinbaconTree.csv", row.names=FALSE)
kevinbacon <- read.csv("kevinbaconTree.csv", stringsAsFactors=FALSE)
baconmat <- acast(kevinbacon, Var1 ~ Var2, value.var="degree")
baconmat[is.na(baconmat) | baconmat<0] <- 1000
dd.col <- rev(as.dendrogram(hclust(as.dist(baconmat), method="ward.D")))
dd.row <- as.dendrogram(hclust(as.dist(baconmat), method="ward.D"))
col.ord <- labels(dd.col)
row.ord <- labels(dd.row)
library(ggdendro)
library(grid)
ddata_x <- dendro_data(dd.row)
ddata_y <- dendro_data(dd.col)
dendro.multiplier <- .25
offset <- length(col.ord)
cutoff <- 250
kevinbacon$Var1fac <- factor(kevinbacon$Var1, levels=col.ord)
kevinbacon$Var2fac <- factor(kevinbacon$Var2, levels=row.ord)
kevinbacon$degree[kevinbacon$degree<0] <- NA
qplot(x=as.numeric(Var1fac), y=as.numeric(Var2fac), geom="tile", fill=degree, data=kevinbacon) +
theme_bw() +
theme(axis.text.x=element_text(angle=90, hjust=1, vjust=.5)) +
xlab("Variety 1") +
ylab("Variety 2") +
scale_x_continuous(expand=c(0, 1), breaks=1:offset-.5, labels=col.ord, limits=c(0, cutoff)) +
scale_y_continuous(expand=c(0, 1), breaks=1:offset-.5, labels=row.ord, limits=c(0, cutoff)) +
scale_fill_gradient("Generational\nDistance",low="#ffffff", high="#374f6b") +
ggtitle('"Kevin Bacon" Distance between Soybean Varieties') +
coord_equal()+
geom_segment(data=segment(ddata_y), aes(x=x, y=y*dendro.multiplier+offset+3, xend=xend, yend=yend*dendro.multiplier+offset+3), inherit.aes=F) +
geom_segment(data=segment(ddata_x), aes(x=y*dendro.multiplier+offset+3, y=x, xend=yend*dendro.multiplier+offset+3, yend=xend), inherit.aes=F)
# ggsave("KevinBaconDistance.png", width=12, height=12)
|
library(shiny)
library(waiter)
library(pushbar)
library(grapher)
shiny::addResourcePath("assets", "./assets")
code <- readLines("./script/script.R") %>%
paste0(collapse = "\n")
load("pkgs.RData")
ui <- fluidPage(
title = "CRAN Dependency Network",
tags$head(
tags$link( rel="stylesheet", type="text/css", href = "./assets/css/prism.css"),
tags$link( rel="stylesheet", type="text/css", href = "./assets/css/styles.css")
),
use_waiter(),
pushbar_deps(),
tags$script(src = "./assets/js/prism.js"),
show_waiter_on_load(
color = "#000",
tagList(
spin_folding_cube(),
span("Loading dependency graph", style = "color:white;")
)
),
div(
dqshiny::autocomplete_input("search", "Package", pkgs, placeholder = "e.g.: dplyr, data.table"),
graphOutput("g", height = "100vh"),
uiOutput("clicked"),
div(
id = "buttons",
actionLink("code", "", icon = icon("code fa-lg")),
actionLink("about", "", icon = icon("question fa-lg"))
)
),
pushbar(
id = "code_bar",
from = "left",
class = "bars",
h1("Source code"),
p(
"The visualisation is powered by the",
tags$a("grapher package", href = "https://grapher.network/")
),
style = "width:30%;",
tags$pre(tags$code(class = "language-r", code))
),
pushbar(
id = "about_bar",
class = "bars",
from = "right",
h1("CRAN Dependency Graph"),
p(
"Each node is an R package on CRAN, connections represent dependencies",
tags$code("Depends", class = "language-r"), tags$code("Imports", class = "language-r"),
"and", tags$code("LinkingTo.", class = "language-r")
),
p(
"You can navigate the graph with the", tags$kbd("w"), tags$kbd("a"),
tags$kbd("s"), tags$kbd("d"), "and the arrow keys (",
tags$kbd(HTML("←")), tags$kbd(HTML("↑")), tags$kbd(HTML("→")),
tags$kbd(HTML("↓")), ") to rotate the camera", tags$kbd("a"), tags$kbd("e"),
"will rotate it."
),
p("Click on a node to reveal more information about it."),
p("Type the name of a package in the search box in the top left corner to zoom in on it."),
p(
"While all packages are visualised not all dependencies are, to avoid",
"a hairball graph edges that are over a certain length are hidden. This",
"allows keeping sight of smaller communities."
),
p("You view the source used to build the visualisation", actionLink("code2", "here")),
p(tags$a("with 💕 by John Coene", id = "footer", href = "https://john-coene.com")),
style = "width:30%;"
),
hide_waiter_on_drawn("g"),
tags$script(src = "./assets/js/mobile.js"),
)
server <- function(input, output, session){
setup_pushbar()
output$g <- render_graph({
graph("./assets/data/graph.json")
})
observeEvent(input$search, {
graph_proxy("g") %>%
graph_focus_node(input$search, dist = -40)
})
observeEvent(input$code, {
pushbar_open(id = "code_bar")
})
observeEvent(input$code2, {
pushbar_open(id = "code_bar")
})
observeEvent(input$about, {
pushbar_open(id = "about_bar")
})
focus <- reactiveValues(pkg = NULL)
observeEvent(input$g_node_click, {
focus$pkg <- input$g_node_click
})
observeEvent(input$g_retrieve_node, {
focus$pkg <- input$g_retrieve_node
})
observeEvent(input$search, {
graph_proxy("g") %>%
retrieve_node(input$search)
})
output$clicked <- renderUI({
sel <- focus$pkg
if(is.null(sel))
return(span())
deps <- sel$links %>%
dplyr::filter(fromId != sel$id) %>%
nrow()
tagList(
strong(sel$id, style = "color:white;"),
br(),
span("Reverse Dependencies:", prettyNum(deps, big.mark = ","), style = "color:white;")
)
})
observeEvent(input$screen_width, {
if(input$screen_width < 760)
showModal(
modalDialog(
title = NULL,
"Apologies, this website is only available on desktop 🖥️",
footer = NULL,
fade = FALSE
)
)
})
}
shinyApp(ui, server)
| /app.R | no_license | frdanconia/cran_dependency_graph | R | false | false | 4,107 | r | library(shiny)
library(waiter)
library(pushbar)
library(grapher)
shiny::addResourcePath("assets", "./assets")
code <- readLines("./script/script.R") %>%
paste0(collapse = "\n")
load("pkgs.RData")
ui <- fluidPage(
title = "CRAN Dependency Network",
tags$head(
tags$link( rel="stylesheet", type="text/css", href = "./assets/css/prism.css"),
tags$link( rel="stylesheet", type="text/css", href = "./assets/css/styles.css")
),
use_waiter(),
pushbar_deps(),
tags$script(src = "./assets/js/prism.js"),
show_waiter_on_load(
color = "#000",
tagList(
spin_folding_cube(),
span("Loading dependency graph", style = "color:white;")
)
),
div(
dqshiny::autocomplete_input("search", "Package", pkgs, placeholder = "e.g.: dplyr, data.table"),
graphOutput("g", height = "100vh"),
uiOutput("clicked"),
div(
id = "buttons",
actionLink("code", "", icon = icon("code fa-lg")),
actionLink("about", "", icon = icon("question fa-lg"))
)
),
pushbar(
id = "code_bar",
from = "left",
class = "bars",
h1("Source code"),
p(
"The visualisation is powered by the",
tags$a("grapher package", href = "https://grapher.network/")
),
style = "width:30%;",
tags$pre(tags$code(class = "language-r", code))
),
pushbar(
id = "about_bar",
class = "bars",
from = "right",
h1("CRAN Dependency Graph"),
p(
"Each node is an R package on CRAN, connections represent dependencies",
tags$code("Depends", class = "language-r"), tags$code("Imports", class = "language-r"),
"and", tags$code("LinkingTo.", class = "language-r")
),
p(
"You can navigate the graph with the", tags$kbd("w"), tags$kbd("a"),
tags$kbd("s"), tags$kbd("d"), "and the arrow keys (",
tags$kbd(HTML("←")), tags$kbd(HTML("↑")), tags$kbd(HTML("→")),
tags$kbd(HTML("↓")), ") to rotate the camera", tags$kbd("a"), tags$kbd("e"),
"will rotate it."
),
p("Click on a node to reveal more information about it."),
p("Type the name of a package in the search box in the top left corner to zoom in on it."),
p(
"While all packages are visualised not all dependencies are, to avoid",
"a hairball graph edges that are over a certain length are hidden. This",
"allows keeping sight of smaller communities."
),
p("You view the source used to build the visualisation", actionLink("code2", "here")),
p(tags$a("with 💕 by John Coene", id = "footer", href = "https://john-coene.com")),
style = "width:30%;"
),
hide_waiter_on_drawn("g"),
tags$script(src = "./assets/js/mobile.js"),
)
server <- function(input, output, session){
setup_pushbar()
output$g <- render_graph({
graph("./assets/data/graph.json")
})
observeEvent(input$search, {
graph_proxy("g") %>%
graph_focus_node(input$search, dist = -40)
})
observeEvent(input$code, {
pushbar_open(id = "code_bar")
})
observeEvent(input$code2, {
pushbar_open(id = "code_bar")
})
observeEvent(input$about, {
pushbar_open(id = "about_bar")
})
focus <- reactiveValues(pkg = NULL)
observeEvent(input$g_node_click, {
focus$pkg <- input$g_node_click
})
observeEvent(input$g_retrieve_node, {
focus$pkg <- input$g_retrieve_node
})
observeEvent(input$search, {
graph_proxy("g") %>%
retrieve_node(input$search)
})
output$clicked <- renderUI({
sel <- focus$pkg
if(is.null(sel))
return(span())
deps <- sel$links %>%
dplyr::filter(fromId != sel$id) %>%
nrow()
tagList(
strong(sel$id, style = "color:white;"),
br(),
span("Reverse Dependencies:", prettyNum(deps, big.mark = ","), style = "color:white;")
)
})
observeEvent(input$screen_width, {
if(input$screen_width < 760)
showModal(
modalDialog(
title = NULL,
"Apologies, this website is only available on desktop 🖥️",
footer = NULL,
fade = FALSE
)
)
})
}
shinyApp(ui, server)
|
CrissCross <- function(x,w=matrix(1,dim(x)[1],dim(x)[2]),dimens=2, a0=NULL, b0=NULL, maxiter=100, tol=10e-5, addsvd=TRUE, lambda=0){
n=dim(x)[1]
p=dim(x)[2]
x[which(w==0)]=0
DimNames = "Dim 1"
for (i in 2:dimens) DimNames = c(DimNames, paste("Dim", i))
if (is.null(a0))
a0=matrix(rnorm(n*dimens),n,dimens)
if (is.null(b0))
b0=matrix(rnorm(p*dimens),p,dimens)
esp=(a0 %*% t(b0))
resid=(x-esp)
gfit0= sum((resid*w)^2)/sum((x*w)^2)
error=0.1
iter=0
a=a0
b=b0
while ((error>tol) & (iter<maxiter)){
iter=iter+1
for (i in 1:n)
a[i,]= ginv(t(b0) %*% diag(w[i,]+lambda) %*% b0) %*% t(b0) %*% diag(w[i,]) %*% (x[i,])
for (j in 1:p)
b[j,]=ginv(t(a) %*% diag(w[,j]+lambda) %*% a) %*% t(a) %*% diag(w[,j]) %*% (x[,j])
esp=(a %*% t(b))
resid=(x-esp)
gfit= sum((resid*w)^2)/sum((x*w)^2)
error = abs(gfit0-gfit)
a0=a
b0=b
gfit0=gfit
print(c(iter,error))
}
if (addsvd){
sol=svd(esp)
a=sol$u[,1:dimens] %*% sqrt(diag(sol$d[1:dimens]))
b=sol$v[,1:dimens] %*% sqrt(diag(sol$d[1:dimens]))
}
rowfit=matrix(0,n,dimens)
colfit=matrix(0,p,dimens)
dimfit=matrix(0,dimens,1)
rownames(a)=rownames(x)
colnames(a)=DimNames
rownames(b)=colnames(x)
colnames(b)=DimNames
for (k in 1:dimens){
esp=(a[,k] %*% t(b[,k]))
for (i in 1:n)
rowfit[i,k]= wcor(x[i,], esp[i,], w[i,])^2
for (j in 1:p)
colfit[j,k]= wcor(x[,j], esp[,j], w[,j])^2
}
rownames(rowfit)=rownames(x)
rownames(colfit)=colnames(x)
colnames(rowfit)=DimNames
colnames(colfit)=DimNames
gfit=1-gfit
CrissCross=list()
CrissCross$Title = "ALS Biplot (CrissCross)"
CrissCross$Weigths=w
CrissCross$nrows=n
CrissCross$ncols=p
CrissCross$nrowsSup=0
CrissCross$ncolsSup=0
CrissCross$Dimension=dimens
CrissCross$EigenValues
CrissCross$Inertia
CrissCross$CumInertia
CrissCross$Structure
CrissCross$RowCoordinates=a
CrissCross$ColCoordinates=b
CrissCross$RowContributions=rowfit*100
CrissCross$ColContributions=colfit*100
CrissCross$Scale_Factor=1
CrissCross$alpha=1
CrissCross$Dimension=dimens
class(CrissCross) <- "ContinuousBiplot"
return(CrissCross)
}
| /R/CrissCross.R | no_license | villardon/MultBiplotR | R | false | false | 2,232 | r | CrissCross <- function(x,w=matrix(1,dim(x)[1],dim(x)[2]),dimens=2, a0=NULL, b0=NULL, maxiter=100, tol=10e-5, addsvd=TRUE, lambda=0){
n=dim(x)[1]
p=dim(x)[2]
x[which(w==0)]=0
DimNames = "Dim 1"
for (i in 2:dimens) DimNames = c(DimNames, paste("Dim", i))
if (is.null(a0))
a0=matrix(rnorm(n*dimens),n,dimens)
if (is.null(b0))
b0=matrix(rnorm(p*dimens),p,dimens)
esp=(a0 %*% t(b0))
resid=(x-esp)
gfit0= sum((resid*w)^2)/sum((x*w)^2)
error=0.1
iter=0
a=a0
b=b0
while ((error>tol) & (iter<maxiter)){
iter=iter+1
for (i in 1:n)
a[i,]= ginv(t(b0) %*% diag(w[i,]+lambda) %*% b0) %*% t(b0) %*% diag(w[i,]) %*% (x[i,])
for (j in 1:p)
b[j,]=ginv(t(a) %*% diag(w[,j]+lambda) %*% a) %*% t(a) %*% diag(w[,j]) %*% (x[,j])
esp=(a %*% t(b))
resid=(x-esp)
gfit= sum((resid*w)^2)/sum((x*w)^2)
error = abs(gfit0-gfit)
a0=a
b0=b
gfit0=gfit
print(c(iter,error))
}
if (addsvd){
sol=svd(esp)
a=sol$u[,1:dimens] %*% sqrt(diag(sol$d[1:dimens]))
b=sol$v[,1:dimens] %*% sqrt(diag(sol$d[1:dimens]))
}
rowfit=matrix(0,n,dimens)
colfit=matrix(0,p,dimens)
dimfit=matrix(0,dimens,1)
rownames(a)=rownames(x)
colnames(a)=DimNames
rownames(b)=colnames(x)
colnames(b)=DimNames
for (k in 1:dimens){
esp=(a[,k] %*% t(b[,k]))
for (i in 1:n)
rowfit[i,k]= wcor(x[i,], esp[i,], w[i,])^2
for (j in 1:p)
colfit[j,k]= wcor(x[,j], esp[,j], w[,j])^2
}
rownames(rowfit)=rownames(x)
rownames(colfit)=colnames(x)
colnames(rowfit)=DimNames
colnames(colfit)=DimNames
gfit=1-gfit
CrissCross=list()
CrissCross$Title = "ALS Biplot (CrissCross)"
CrissCross$Weigths=w
CrissCross$nrows=n
CrissCross$ncols=p
CrissCross$nrowsSup=0
CrissCross$ncolsSup=0
CrissCross$Dimension=dimens
CrissCross$EigenValues
CrissCross$Inertia
CrissCross$CumInertia
CrissCross$Structure
CrissCross$RowCoordinates=a
CrissCross$ColCoordinates=b
CrissCross$RowContributions=rowfit*100
CrissCross$ColContributions=colfit*100
CrissCross$Scale_Factor=1
CrissCross$alpha=1
CrissCross$Dimension=dimens
class(CrissCross) <- "ContinuousBiplot"
return(CrissCross)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{fbarplot}
\alias{fbarplot}
\alias{fbarplot.sam}
\alias{fbarplot.samset}
\alias{fbarplot.samforecast}
\alias{fbarplot.hcr}
\title{SAM Fbar plot}
\usage{
fbarplot(fit, ...)
\method{fbarplot}{sam}(
fit,
partial = TRUE,
drop = NULL,
pcol = "lightblue",
page = NULL,
plot = TRUE,
...
)
\method{fbarplot}{samset}(
fit,
partial = FALSE,
drop = NULL,
pcol = "lightblue",
page = NULL,
...
)
\method{fbarplot}{samforecast}(
fit,
partial = FALSE,
drop = NULL,
pcol = "lightblue",
page = NULL,
...
)
\method{fbarplot}{hcr}(
fit,
partial = FALSE,
drop = NULL,
pcol = "lightblue",
page = NULL,
...
)
}
\arguments{
\item{fit}{the object returned from sam.fit}
\item{...}{extra arguments transferred to plot including the following: \cr
\code{add} logical, plotting is to be added on existing plot \cr
\code{ci} logical, confidence intervals should be plotted \cr
\code{cicol} color to plot the confidence polygon}
\item{partial}{true if included partial F's are to be plotted}
\item{drop}{number of years to be left unplotted at the end. Default (NULL) is to not show years at the end with no catch information}
\item{pcol}{color of partial lines}
\item{page}{partial ages to plot}
\item{plot}{true if fbar should be plotted}
}
\description{
SAM Fbar plot
}
\details{
Plot the defined fbar.
}
| /stockassessment/man/fbarplot.Rd | no_license | fishfollower/SAM | R | false | true | 1,429 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.R
\name{fbarplot}
\alias{fbarplot}
\alias{fbarplot.sam}
\alias{fbarplot.samset}
\alias{fbarplot.samforecast}
\alias{fbarplot.hcr}
\title{SAM Fbar plot}
\usage{
fbarplot(fit, ...)
\method{fbarplot}{sam}(
fit,
partial = TRUE,
drop = NULL,
pcol = "lightblue",
page = NULL,
plot = TRUE,
...
)
\method{fbarplot}{samset}(
fit,
partial = FALSE,
drop = NULL,
pcol = "lightblue",
page = NULL,
...
)
\method{fbarplot}{samforecast}(
fit,
partial = FALSE,
drop = NULL,
pcol = "lightblue",
page = NULL,
...
)
\method{fbarplot}{hcr}(
fit,
partial = FALSE,
drop = NULL,
pcol = "lightblue",
page = NULL,
...
)
}
\arguments{
\item{fit}{the object returned from sam.fit}
\item{...}{extra arguments transferred to plot including the following: \cr
\code{add} logical, plotting is to be added on existing plot \cr
\code{ci} logical, confidence intervals should be plotted \cr
\code{cicol} color to plot the confidence polygon}
\item{partial}{true if included partial F's are to be plotted}
\item{drop}{number of years to be left unplotted at the end. Default (NULL) is to not show years at the end with no catch information}
\item{pcol}{color of partial lines}
\item{page}{partial ages to plot}
\item{plot}{true if fbar should be plotted}
}
\description{
SAM Fbar plot
}
\details{
Plot the defined fbar.
}
|
# http://www.cookbook-r.com/Graphs/Titles_(ggplot2)/
bp + ggtitle("Plant growth")
# Equivalent to
bp + labs(title="Plant growth")
# If the title is long, it can be split into multiple lines with \n
bp + ggtitle("Plant growth with\ndifferent treatments")
# Reduce line spacing and use bold text
bp + ggtitle("Plant growth with\ndifferent treatments") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
| /bp.R | no_license | DublinR/ggplot2workshop | R | false | false | 421 | r | # http://www.cookbook-r.com/Graphs/Titles_(ggplot2)/
bp + ggtitle("Plant growth")
# Equivalent to
bp + labs(title="Plant growth")
# If the title is long, it can be split into multiple lines with \n
bp + ggtitle("Plant growth with\ndifferent treatments")
# Reduce line spacing and use bold text
bp + ggtitle("Plant growth with\ndifferent treatments") +
theme(plot.title = element_text(lineheight=.8, face="bold"))
|
#! /usr/bin/Rscript
# Analyze the effect of different methods on alpha diversity
library(phyloseq)
library(argparse)
library(ggplot2)
library(gridExtra)
options(stringsAsFactors=F)
# Arguments
parser=ArgumentParser()
parser$add_argument("-i", "--infile", help="RDS file with saved phyloseq object for analysis")
parser$add_argument("-o", "--outprefix", help="Output file prefix for graphics")
args=parser$parse_args()
# setwd('/home/jgwall/Projects/Microbiomes/MicrobiomeMethodsDevelopment/CompareSampleExtractionAndAmplification_Mohsen_Cecelia/2019 10 Mohsen Final Data/2_Analysis_clean/')
# args=parser$parse_args(c("-i","2b_filtered_data.phyloseq.RDS", "-o",'99_tmp.png'))
# Load data
cat("Loading data for alpha diversity analysis\n")
mydata = readRDS(args$infile)
# Create a new combined metadata column of sample type + treatment
metadata = sample_data(mydata)
metadata$sample_type_and_treatment = paste(metadata$sample.type, metadata$treatment, sep="~")
sample_data(mydata) = metadata
# Plot
cat("Plotting alpha diversity\n")
plots = lapply(unique(metadata$sample.type), function(my_sample){
tokeep = rownames(metadata)[metadata$sample.type==my_sample]
subdata = prune_samples(mydata, samples=tokeep) # subset_samples would be clearer but kept having an error
plot_richness(subdata, measures=c("Observed", "Chao1", "Shannon"), x="treatment",
color="treatment") +
ggtitle(my_sample)
})
png(paste(args$outprefix, ".alpha_diversity.png", sep=""), width=10, height=5*length(plots), units='in', res=300)
grid.arrange(grobs=plots, nrow=length(plots), ncol=1)
dev.off()
# Make a plot of how much each treatment captures all possible OTUs
# Stacked barplot with "shared" and "unique", split by sample type
# Collapse
cat("Plotting shared & unique OTUs\n")
merged = merge_samples(mydata, group="sample_type_and_treatment")
# Split by sample type (don't want to compare soil versus leaf)
mergecounts = as.data.frame(otu_table(merged))
key=strsplit(rownames(mergecounts), split="~") # Split back into treatment and sample type
key = as.data.frame(do.call(rbind, key))
names(key) = c("sample.type", "treatment")
# Split by treatment
splitcounts = split(mergecounts, key$sample.type)
# Convert to presence/absence
presence = lapply(splitcounts, function(x){
x[x>1] = 1 # Take any count > 1 and turn to 1
return(x)
})
# Get shared/unique OTUs in each
shared = lapply(presence, function(mypresence){
# Determine which OTUs are absent from this group, unique to one treatment, or shared among them
num_present = colSums(mypresence)
is_absent = num_present==0 # Not used, but good for sanity-checking
is_unique = num_present==1
is_shared = num_present>=2
# Calculate shared & unique OTUs for each treatment
unique_counts = rowSums(mypresence[,is_unique, drop=FALSE])
shared_counts = rowSums(mypresence[,is_shared, drop=FALSE])
# Create a final data frame
tallied = strsplit(row.names(mypresence), split="~")
tallied = as.data.frame(do.call(rbind, tallied))
names(tallied) = c('sample.type', 'treatment')
myshared = data.frame(tallied, otu_count=shared_counts, count_type='shared')
myunique = data.frame(tallied, otu_count=unique_counts, count_type='unique')
return(rbind(myshared, myunique))
})
shared=do.call(rbind, shared)
shared$count_type = factor(shared$count_type, levels= c('unique', 'shared')) # Put in the order I want for plotting
# Plot stacked barplot of shared & unique sequences
ggplot(shared, mapping=aes(x=treatment, y=otu_count, fill=treatment, alpha=factor(count_type))) +
geom_bar(stat='identity', position='stack') +
facet_wrap( ~ sample.type, scales='free') + # Facet into one plot per sample type
theme_bw() +
theme(strip.background = element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.minor.x=element_blank()) +
scale_alpha_manual(values=c(0.6, 1)) # Fix alpha values so aren't too transparent
ggsave(paste(args$outprefix, ".unique_otus.png", sep=""))
# Plot of fraction of all OTUs each method captures
fraction_total = lapply(presence, function(mypresence){
# Since presence is 0/1 matrix, easy to get fraction of total
mypresence = subset(mypresence, select = colSums(mypresence) != 0) # Remove any OTUs not present in any of these samples
fraction_present = rowSums(mypresence) / ncol(mypresence) # Get fraction of total remaining OTUs in each method
# Create a final data frame
tallied = strsplit(row.names(mypresence), split="~")
tallied = as.data.frame(do.call(rbind, tallied))
names(tallied) = c('sample.type', 'treatment')
mycounts = data.frame(tallied, fraction_total=fraction_present)
return(mycounts)
})
fraction_total=do.call(rbind, fraction_total)
# Plot stacked barplot of shared & unique sequences
ggplot(fraction_total, mapping=aes(x=treatment, y=fraction_total, fill=treatment)) +
geom_bar(stat='identity') +
facet_wrap( ~ sample.type, scales='free') + # Facet into one plot per sample type
theme_bw() +
theme(strip.background = element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.minor.x=element_blank())
ggsave(paste(args$outprefix, ".fraction_total_otus.png", sep=""))
| /TestPrimers/2h_AnalyzeAlphaDiversity.r | no_license | wallacelab/paper-giangacomo-16s-methods | R | false | false | 5,230 | r | #! /usr/bin/Rscript
# Analyze the effect of different methods on alpha diversity
library(phyloseq)
library(argparse)
library(ggplot2)
library(gridExtra)
options(stringsAsFactors=F)
# Arguments
parser=ArgumentParser()
parser$add_argument("-i", "--infile", help="RDS file with saved phyloseq object for analysis")
parser$add_argument("-o", "--outprefix", help="Output file prefix for graphics")
args=parser$parse_args()
# setwd('/home/jgwall/Projects/Microbiomes/MicrobiomeMethodsDevelopment/CompareSampleExtractionAndAmplification_Mohsen_Cecelia/2019 10 Mohsen Final Data/2_Analysis_clean/')
# args=parser$parse_args(c("-i","2b_filtered_data.phyloseq.RDS", "-o",'99_tmp.png'))
# Load data
cat("Loading data for alpha diversity analysis\n")
mydata = readRDS(args$infile)
# Create a new combined metadata column of sample type + treatment
metadata = sample_data(mydata)
metadata$sample_type_and_treatment = paste(metadata$sample.type, metadata$treatment, sep="~")
sample_data(mydata) = metadata
# Plot
cat("Plotting alpha diversity\n")
plots = lapply(unique(metadata$sample.type), function(my_sample){
tokeep = rownames(metadata)[metadata$sample.type==my_sample]
subdata = prune_samples(mydata, samples=tokeep) # subset_samples would be clearer but kept having an error
plot_richness(subdata, measures=c("Observed", "Chao1", "Shannon"), x="treatment",
color="treatment") +
ggtitle(my_sample)
})
png(paste(args$outprefix, ".alpha_diversity.png", sep=""), width=10, height=5*length(plots), units='in', res=300)
grid.arrange(grobs=plots, nrow=length(plots), ncol=1)
dev.off()
# Make a plot of how much each treatment captures all possible OTUs
# Stacked barplot with "shared" and "unique", split by sample type
# Collapse
cat("Plotting shared & unique OTUs\n")
merged = merge_samples(mydata, group="sample_type_and_treatment")
# Split by sample type (don't want to compare soil versus leaf)
mergecounts = as.data.frame(otu_table(merged))
key=strsplit(rownames(mergecounts), split="~") # Split back into treatment and sample type
key = as.data.frame(do.call(rbind, key))
names(key) = c("sample.type", "treatment")
# Split by treatment
splitcounts = split(mergecounts, key$sample.type)
# Convert to presence/absence
presence = lapply(splitcounts, function(x){
x[x>1] = 1 # Take any count > 1 and turn to 1
return(x)
})
# Get shared/unique OTUs in each
shared = lapply(presence, function(mypresence){
# Determine which OTUs are absent from this group, unique to one treatment, or shared among them
num_present = colSums(mypresence)
is_absent = num_present==0 # Not used, but good for sanity-checking
is_unique = num_present==1
is_shared = num_present>=2
# Calculate shared & unique OTUs for each treatment
unique_counts = rowSums(mypresence[,is_unique, drop=FALSE])
shared_counts = rowSums(mypresence[,is_shared, drop=FALSE])
# Create a final data frame
tallied = strsplit(row.names(mypresence), split="~")
tallied = as.data.frame(do.call(rbind, tallied))
names(tallied) = c('sample.type', 'treatment')
myshared = data.frame(tallied, otu_count=shared_counts, count_type='shared')
myunique = data.frame(tallied, otu_count=unique_counts, count_type='unique')
return(rbind(myshared, myunique))
})
shared=do.call(rbind, shared)
shared$count_type = factor(shared$count_type, levels= c('unique', 'shared')) # Put in the order I want for plotting
# Plot stacked barplot of shared & unique sequences
ggplot(shared, mapping=aes(x=treatment, y=otu_count, fill=treatment, alpha=factor(count_type))) +
geom_bar(stat='identity', position='stack') +
facet_wrap( ~ sample.type, scales='free') + # Facet into one plot per sample type
theme_bw() +
theme(strip.background = element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.minor.x=element_blank()) +
scale_alpha_manual(values=c(0.6, 1)) # Fix alpha values so aren't too transparent
ggsave(paste(args$outprefix, ".unique_otus.png", sep=""))
# Plot of fraction of all OTUs each method captures
fraction_total = lapply(presence, function(mypresence){
# Since presence is 0/1 matrix, easy to get fraction of total
mypresence = subset(mypresence, select = colSums(mypresence) != 0) # Remove any OTUs not present in any of these samples
fraction_present = rowSums(mypresence) / ncol(mypresence) # Get fraction of total remaining OTUs in each method
# Create a final data frame
tallied = strsplit(row.names(mypresence), split="~")
tallied = as.data.frame(do.call(rbind, tallied))
names(tallied) = c('sample.type', 'treatment')
mycounts = data.frame(tallied, fraction_total=fraction_present)
return(mycounts)
})
fraction_total=do.call(rbind, fraction_total)
# Plot stacked barplot of shared & unique sequences
ggplot(fraction_total, mapping=aes(x=treatment, y=fraction_total, fill=treatment)) +
geom_bar(stat='identity') +
facet_wrap( ~ sample.type, scales='free') + # Facet into one plot per sample type
theme_bw() +
theme(strip.background = element_blank(),
panel.grid.major.x=element_blank(),
panel.grid.minor.x=element_blank())
ggsave(paste(args$outprefix, ".fraction_total_otus.png", sep=""))
|
###############################################################
# #
# Part 1: Simulating rockpool hydroperiod #
# #
###############################################################
# First set up the different environments in a matrix
# so that the sensitivity analyses are based on exactly the same
# sequences of inundations.
ave.hp <- 6 # Median length of the hydroperiod
replicates <- 1000 # Number of replicates
ts.length <- 1100 #set desired length of time series
# set up hydorperiod matrix by drawing hydroperiod from a log-normal distribution.
env.mat <- matrix(NA,ncol=ts.length,nrow=replicates)
for (i in 1:replicates){
env.mat[i,] <- (rlnorm(ts.length,meanlog=log(ave.hp)))
}
###############################################################
# #
# Part 2: Running the matrix population model #
# #
###############################################################
# First enter all the free life-history parameters
# To run the sensitivity analysis, sequentially adjust these parameters WITHOUT
# simulating a new set of hydrological regimes. This ensures that the sensitivity
# analysis is based on EXACTLY the same hydroperiods.
Es0 <- 0.55 # survival of freshly produced eggs
Eh0 <- 0.47 # proportion of freshly produced eggs that hatch
Es1 <- 0.8 # survival of eggs in the egg bank
Eh1 <- .09 # proportion of dormant eggs that hatch
As <- 0.74 # survival rate of adults in the active population
Rep <- 13 # Reproduction rate (fecundity): number of eggs per pair
maturation.age <- 6 # This is the age, in days, at which individuals produce first offspring
# enter vital rates (= fitness components = demographic rates)
# ----------------------------------------
N0 <- 0 # Starting number of generation 1 eggs (Freshly hatched eggs)
N1 <- 25000 # Starting number of generation >2 eggs (egg bank)
N2 <- 0 # Starting number of adults
# Matrix population model
# ------------------------------------
# This is the matrix that describes population growth in instances where
# the hydorperiod is shorter than the maturation age. It assumes that eggs hatch as they always do
# (i.e. no hatching cues other than rain), but that hatchlings do not have enough
# time to reach maturity an reproduce successfullly.
mat.dry <- matrix(c(0, 0, 0,
Es0*(1-Eh0),Es1*(1-Eh1),0,
Es0*Eh0,Es1*Eh1,0), nrow=3,byrow=TRUE)
# The next command creates a blank temporary matrix with the three life stages
# (e.g. Newly hatched eggs, egg bank and adult) as columns and the numbers of
# inundations as rows.
temp.nr <- matrix(c(rep(NA, ts.length*3)),nrow=ts.length,ncol=3)
temp.nr[1,1:3] <- c(N0,N1,N2) # Adds starting values for population vector
# This is a blank vector, which will be used to place the growth rates (lambdaS)
# for each of the replicates
lambdaS <- rep(NA,replicates)
# This is a blank vector, which will be used to place the variation in population changes
# for each of the replicates
Var <- rep(NA,replicates)
# This is the loop which runs the models for the number of replicates
for (k in 1:replicates) {
environment <- env.mat[k,]
for (i in 1:(ts.length-1)) {
if(environment[i] >= maturation.age){
Rain <- environment[i]
# This is the matrix that represents population dynamics when the length of hydrroperiod,
# exceeeds the time required to reach maturation and produce offspring
mat.rain <- matrix(c(0, 0, ((Rep*(sum(As^(1:Rain)))) - (Rep*(sum(As^(1:maturation.age))))),
Es0*(1-Eh0),Es1*(1-Eh1),0,
Es0*Eh0,Es1*Eh1,0),nrow=3,byrow=TRUE)
temp.nr[i+1,1:3] <- mat.rain%*%temp.nr[i,1:3]
} else {
temp.nr[i+1,1:3] <- mat.dry%*%temp.nr[i,1:3]
}
}
# This loop summarises the change in egg bank size over each successive time-step
ratioN <- rep(NA,ts.length-1)
for (i in 1:ts.length-1) {
ratioN[i] <- log((temp.nr[i+1,2])/(temp.nr[i,2]))
}
# Population growth rate [log(lambda)]: See supplementary appendix
lambdaS[k] <- (sum(ratioN[100:ts.length-1])/length(ratioN[100:ts.length-1]))
# Variation in the changes in population between successive time-steps
Var[k] <- var(ratioN)
}
# Summarise the population growth rates and variation in population by avergaing across all replicates
mean(lambdaS,na.rm=T)
hist(lambdaS)
mean(Var,na.rm=T)
# These values can be saved in a spreadsheet to calculate the sensitivities and extinction risk using basic arithmatic.
# See main manuscript and appendices for more details.
| /Stochastic_bet-hedging.r | no_license | falko-buschke/Stochastic-bet-hedging | R | false | false | 4,934 | r | ###############################################################
# #
# Part 1: Simulating rockpool hydroperiod #
# #
###############################################################
# First set up the different environments in a matrix
# so that the sensitivity analyses are based on exactly the same
# sequences of inundations.
ave.hp <- 6 # Median length of the hydroperiod
replicates <- 1000 # Number of replicates
ts.length <- 1100 #set desired length of time series
# set up hydorperiod matrix by drawing hydroperiod from a log-normal distribution.
env.mat <- matrix(NA,ncol=ts.length,nrow=replicates)
for (i in 1:replicates){
env.mat[i,] <- (rlnorm(ts.length,meanlog=log(ave.hp)))
}
###############################################################
# #
# Part 2: Running the matrix population model #
# #
###############################################################
# First enter all the free life-history parameters
# To run the sensitivity analysis, sequentially adjust these parameters WITHOUT
# simulating a new set of hydrological regimes. This ensures that the sensitivity
# analysis is based on EXACTLY the same hydroperiods.
Es0 <- 0.55 # survival of freshly produced eggs
Eh0 <- 0.47 # proportion of freshly produced eggs that hatch
Es1 <- 0.8 # survival of eggs in the egg bank
Eh1 <- .09 # proportion of dormant eggs that hatch
As <- 0.74 # survival rate of adults in the active population
Rep <- 13 # Reproduction rate (fecundity): number of eggs per pair
maturation.age <- 6 # This is the age, in days, at which individuals produce first offspring
# enter vital rates (= fitness components = demographic rates)
# ----------------------------------------
N0 <- 0 # Starting number of generation 1 eggs (Freshly hatched eggs)
N1 <- 25000 # Starting number of generation >2 eggs (egg bank)
N2 <- 0 # Starting number of adults
# Matrix population model
# ------------------------------------
# This is the matrix that describes population growth in instances where
# the hydorperiod is shorter than the maturation age. It assumes that eggs hatch as they always do
# (i.e. no hatching cues other than rain), but that hatchlings do not have enough
# time to reach maturity an reproduce successfullly.
mat.dry <- matrix(c(0, 0, 0,
Es0*(1-Eh0),Es1*(1-Eh1),0,
Es0*Eh0,Es1*Eh1,0), nrow=3,byrow=TRUE)
# The next command creates a blank temporary matrix with the three life stages
# (e.g. Newly hatched eggs, egg bank and adult) as columns and the numbers of
# inundations as rows.
temp.nr <- matrix(c(rep(NA, ts.length*3)),nrow=ts.length,ncol=3)
temp.nr[1,1:3] <- c(N0,N1,N2) # Adds starting values for population vector
# This is a blank vector, which will be used to place the growth rates (lambdaS)
# for each of the replicates
lambdaS <- rep(NA,replicates)
# This is a blank vector, which will be used to place the variation in population changes
# for each of the replicates
Var <- rep(NA,replicates)
# This is the loop which runs the models for the number of replicates
for (k in 1:replicates) {
environment <- env.mat[k,]
for (i in 1:(ts.length-1)) {
if(environment[i] >= maturation.age){
Rain <- environment[i]
# This is the matrix that represents population dynamics when the length of hydrroperiod,
# exceeeds the time required to reach maturation and produce offspring
mat.rain <- matrix(c(0, 0, ((Rep*(sum(As^(1:Rain)))) - (Rep*(sum(As^(1:maturation.age))))),
Es0*(1-Eh0),Es1*(1-Eh1),0,
Es0*Eh0,Es1*Eh1,0),nrow=3,byrow=TRUE)
temp.nr[i+1,1:3] <- mat.rain%*%temp.nr[i,1:3]
} else {
temp.nr[i+1,1:3] <- mat.dry%*%temp.nr[i,1:3]
}
}
# This loop summarises the change in egg bank size over each successive time-step
ratioN <- rep(NA,ts.length-1)
for (i in 1:ts.length-1) {
ratioN[i] <- log((temp.nr[i+1,2])/(temp.nr[i,2]))
}
# Population growth rate [log(lambda)]: See supplementary appendix
lambdaS[k] <- (sum(ratioN[100:ts.length-1])/length(ratioN[100:ts.length-1]))
# Variation in the changes in population between successive time-steps
Var[k] <- var(ratioN)
}
# Summarise the population growth rates and variation in population by avergaing across all replicates
mean(lambdaS,na.rm=T)
hist(lambdaS)
mean(Var,na.rm=T)
# These values can be saved in a spreadsheet to calculate the sensitivities and extinction risk using basic arithmatic.
# See main manuscript and appendices for more details.
|
templates <- function(
page = NULL,
page_size = NULL,
language_id = NULL,
category_id = NULL,
only_mine = TRUE,
fields = NULL,
api_key = getOption('sm_api_key'),
oauth_token = getOption('sm_oauth_token')
){
if(!is.null(api_key)) {
u <- paste('https://api.surveymonkey.net/v2/templates/get_template_list?',
'api_key=', api_key, sep='')
} else
stop("Must specify 'api_key'")
if(!is.null(oauth_token))
token <- paste('bearer', oauth_token)
else
stop("Must specify 'oauth_token'")
b <- list(page = page, page_size = page_size,
language_id = language_id, category_id = category_id,
show_only_available_to_current_user = only_mine,
fields = as.list(fields))
nulls <- sapply(b, is.null)
if(all(nulls))
b <- '{}'
else
b <- toJSON(b[!nulls])
h <- add_headers(Authorization=token,
'Content-Type'='application/json')
out <- POST(u, config = h, body = b)
stop_for_status(out)
content <- content(out, as='parsed')
if(content$status==3) {
warning("An error occurred: ",content$errmsg)
return(content)
} else {
if('upgrade_info' %in% names(content$data)){
lapply(content$data$upgrade_info$restrictions, function(x){
message(x$message)
})
}
lapply(content$data$templates, `class<-`, 'sm_template')
}
}
print.sm_template <- function(x,...){
if(!is.null(x$template_id))
cat('Template ID:',x$template_id,'\n')
if(!is.null(x$title))
cat('Title:',x$title,'\n')
if(!is.null(x$short_description))
cat('Description:',x$short_description,'\n')
#if(!is.null(x$long_description))
# cat(x$long_description,'\n')
if(!is.null(x$language_id))
cat(x$language_id,'\n')
if(!is.null(x$is_available_to_current_user))
cat('Available?',x$is_available_to_current_user,'\n')
if(!is.null(x$is_featured))
cat('Featured? ',x$is_featured,'\n')
if(!is.null(x$is_certified))
cat('Certified?',x$is_certified,'\n')
if(!is.null(x$page_count))
cat('Pages:',x$page_count,'\n')
if(!is.null(x$question_count))
cat('Questions:',x$question_count,'\n')
if(!is.null(x$preview_url))
cat('Preview URL:',x$preview_url,'\n')
if(!is.null(x$category_name)){
if(!is.null(x$category_id))
cat('Category ',x$category_name,' (',x$category_id,'):\n',sep='')
if(!is.null(x$category_description))
cat(x$category_description,'\n')
}
if(!is.null(x$date_created))
cat('Date created: ',x$date_created,'\n')
if(!is.null(x$date_modified))
cat('Date modified:',x$date_modified,'\n')
invisible(x)
}
| /R/templates.r | no_license | patilv/Rmonkey | R | false | false | 2,831 | r | templates <- function(
page = NULL,
page_size = NULL,
language_id = NULL,
category_id = NULL,
only_mine = TRUE,
fields = NULL,
api_key = getOption('sm_api_key'),
oauth_token = getOption('sm_oauth_token')
){
if(!is.null(api_key)) {
u <- paste('https://api.surveymonkey.net/v2/templates/get_template_list?',
'api_key=', api_key, sep='')
} else
stop("Must specify 'api_key'")
if(!is.null(oauth_token))
token <- paste('bearer', oauth_token)
else
stop("Must specify 'oauth_token'")
b <- list(page = page, page_size = page_size,
language_id = language_id, category_id = category_id,
show_only_available_to_current_user = only_mine,
fields = as.list(fields))
nulls <- sapply(b, is.null)
if(all(nulls))
b <- '{}'
else
b <- toJSON(b[!nulls])
h <- add_headers(Authorization=token,
'Content-Type'='application/json')
out <- POST(u, config = h, body = b)
stop_for_status(out)
content <- content(out, as='parsed')
if(content$status==3) {
warning("An error occurred: ",content$errmsg)
return(content)
} else {
if('upgrade_info' %in% names(content$data)){
lapply(content$data$upgrade_info$restrictions, function(x){
message(x$message)
})
}
lapply(content$data$templates, `class<-`, 'sm_template')
}
}
print.sm_template <- function(x,...){
if(!is.null(x$template_id))
cat('Template ID:',x$template_id,'\n')
if(!is.null(x$title))
cat('Title:',x$title,'\n')
if(!is.null(x$short_description))
cat('Description:',x$short_description,'\n')
#if(!is.null(x$long_description))
# cat(x$long_description,'\n')
if(!is.null(x$language_id))
cat(x$language_id,'\n')
if(!is.null(x$is_available_to_current_user))
cat('Available?',x$is_available_to_current_user,'\n')
if(!is.null(x$is_featured))
cat('Featured? ',x$is_featured,'\n')
if(!is.null(x$is_certified))
cat('Certified?',x$is_certified,'\n')
if(!is.null(x$page_count))
cat('Pages:',x$page_count,'\n')
if(!is.null(x$question_count))
cat('Questions:',x$question_count,'\n')
if(!is.null(x$preview_url))
cat('Preview URL:',x$preview_url,'\n')
if(!is.null(x$category_name)){
if(!is.null(x$category_id))
cat('Category ',x$category_name,' (',x$category_id,'):\n',sep='')
if(!is.null(x$category_description))
cat(x$category_description,'\n')
}
if(!is.null(x$date_created))
cat('Date created: ',x$date_created,'\n')
if(!is.null(x$date_modified))
cat('Date modified:',x$date_modified,'\n')
invisible(x)
}
|
library(eplusr)
### Name: read_epw
### Title: Read and Parse EnergyPlus Weather File (EPW)
### Aliases: read_epw
### ** Examples
# read an EPW file from EnergyPlus v8.8 installation folder
if (is_avail_eplus(8.8)) {
path_epw <- file.path(
eplus_config(8.8)$dir,
"WeatherData",
"USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw"
)
epw <- read_epw(path_epw)
}
## Not run:
##D # read an EPW file from EnergyPlus website
##D path_base <- "https://energyplus.net/weather-download"
##D path_region <- "north_and_central_america_wmo_region_4/USA/CA"
##D path_file <- "USA_CA_San.Francisco.Intl.AP.724940_TMY3/USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw"
##D path_epw <- file.path(path_base, path_region, path_file)
##D epw <- read_epw(path_epw)
## End(Not run)
| /data/genthat_extracted_code/eplusr/examples/read_epw.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 794 | r | library(eplusr)
### Name: read_epw
### Title: Read and Parse EnergyPlus Weather File (EPW)
### Aliases: read_epw
### ** Examples
# read an EPW file from EnergyPlus v8.8 installation folder
if (is_avail_eplus(8.8)) {
path_epw <- file.path(
eplus_config(8.8)$dir,
"WeatherData",
"USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw"
)
epw <- read_epw(path_epw)
}
## Not run:
##D # read an EPW file from EnergyPlus website
##D path_base <- "https://energyplus.net/weather-download"
##D path_region <- "north_and_central_america_wmo_region_4/USA/CA"
##D path_file <- "USA_CA_San.Francisco.Intl.AP.724940_TMY3/USA_CA_San.Francisco.Intl.AP.724940_TMY3.epw"
##D path_epw <- file.path(path_base, path_region, path_file)
##D epw <- read_epw(path_epw)
## End(Not run)
|
# library(devtools)
# install_github("agilearning/RPTT")
# install.packages("XML")
library(RPTT)
listPageUrls = getListPageUrls("Gossiping")[1:20]
postUrls = unlist(lapply(listPageUrls,getPostUrls))
postData = lapply(postUrls, function(url) try(getPostData(url),TRUE))
error_idx = which(sapply(postData, function(x) class(x)) == "try-error")
data_idx = which(sapply(postData, function(x) class(x)) != "try-error")
data = postData[data_idx]
postDf = data.frame(do.call(rbind,lapply(postData[data_idx],function(xx) xx$postData )))
pushDf = do.call(rbind,lapply(postData[data_idx],function(xx) xx$pushDf))
GpostDf <- postDf
GpushDf <- pushDf
save(GpostDf,GpushDf,file="Gossiping.RData")
| /Week2/getGossipingData.R | no_license | KuiMing/RTextMining | R | false | false | 689 | r | # library(devtools)
# install_github("agilearning/RPTT")
# install.packages("XML")
library(RPTT)
listPageUrls = getListPageUrls("Gossiping")[1:20]
postUrls = unlist(lapply(listPageUrls,getPostUrls))
postData = lapply(postUrls, function(url) try(getPostData(url),TRUE))
error_idx = which(sapply(postData, function(x) class(x)) == "try-error")
data_idx = which(sapply(postData, function(x) class(x)) != "try-error")
data = postData[data_idx]
postDf = data.frame(do.call(rbind,lapply(postData[data_idx],function(xx) xx$postData )))
pushDf = do.call(rbind,lapply(postData[data_idx],function(xx) xx$pushDf))
GpostDf <- postDf
GpushDf <- pushDf
save(GpostDf,GpushDf,file="Gossiping.RData")
|
# Collection tests
url <- "http://a02235015-6.bluezone.usu.edu/api/"
context("Categories")
vcr::use_cassette(name = "Categories_id", {
data <- Categories(id = 1, url = url)
})
test_that("Categories_id", {
expect_equal(length(data), 12)
expect_type(data, "list")
})
vcr::use_cassette(name = "Categories_page", {
data <- Categories(page = 1, url = url)
})
test_that("Categories_page", {
expect_equal(length(data), 110)
expect_type(data, "list")
})
context("Institutions")
vcr::use_cassette(name = "Institutions_id", {
data <- Institutions(id = 5, url = url)
})
test_that("Institutions_id", {
expect_equal(length(data), 21)
expect_type(data, "list")
})
vcr::use_cassette(name = "Institutions_page", {
data <- Institutions(page = 1, url = url)
})
test_that("Institutions_page", {
expect_equal(length(data), 600)
expect_type(data, "list")
})
context("Stats")
vcr::use_cassette(name = "Stats_id", {
data <- Stats(id = 1, url = url)
})
test_that("Stats_id", {
expect_equal(length(data), 14)
expect_type(data, "list")
})
vcr::use_cassette(name = "Stats_page", {
data <- Stats(page = 1, url = url)
})
test_that("Stats_page", {
expect_equal(length(data), 390)
expect_type(data, "list")
})
context("Collections")
vcr::use_cassette(name = "Collections_id", {
data <- Collections(id = 1, url = url)
})
test_that("Collections_id", {
expect_equal(length(data), 37)
expect_type(data, "list")
})
vcr::use_cassette(name = "Collections_page", {
data <- Collections(page = 1, url = url)
})
test_that("Collections_page", {
expect_equal(length(data), 36)
expect_type(data, "list")
})
| /tests/testthat/test_Collection.R | permissive | ropensci/SymbiotaR2 | R | false | false | 1,614 | r | # Collection tests
url <- "http://a02235015-6.bluezone.usu.edu/api/"
context("Categories")
vcr::use_cassette(name = "Categories_id", {
data <- Categories(id = 1, url = url)
})
test_that("Categories_id", {
expect_equal(length(data), 12)
expect_type(data, "list")
})
vcr::use_cassette(name = "Categories_page", {
data <- Categories(page = 1, url = url)
})
test_that("Categories_page", {
expect_equal(length(data), 110)
expect_type(data, "list")
})
context("Institutions")
vcr::use_cassette(name = "Institutions_id", {
data <- Institutions(id = 5, url = url)
})
test_that("Institutions_id", {
expect_equal(length(data), 21)
expect_type(data, "list")
})
vcr::use_cassette(name = "Institutions_page", {
data <- Institutions(page = 1, url = url)
})
test_that("Institutions_page", {
expect_equal(length(data), 600)
expect_type(data, "list")
})
context("Stats")
vcr::use_cassette(name = "Stats_id", {
data <- Stats(id = 1, url = url)
})
test_that("Stats_id", {
expect_equal(length(data), 14)
expect_type(data, "list")
})
vcr::use_cassette(name = "Stats_page", {
data <- Stats(page = 1, url = url)
})
test_that("Stats_page", {
expect_equal(length(data), 390)
expect_type(data, "list")
})
context("Collections")
vcr::use_cassette(name = "Collections_id", {
data <- Collections(id = 1, url = url)
})
test_that("Collections_id", {
expect_equal(length(data), 37)
expect_type(data, "list")
})
vcr::use_cassette(name = "Collections_page", {
data <- Collections(page = 1, url = url)
})
test_that("Collections_page", {
expect_equal(length(data), 36)
expect_type(data, "list")
})
|
#' @name getCriticalCoalitionsOfPlayer
#' @title getCriticalCoalitionsOfPlayer
#' @description The function getCriticalCoalitionsOfPlayer identifies all coalitions for one player
#' in which that player is critical.
#' These coalitions are characterized by the circumstance that without this player the other players
#' generate no value (then also called a losing coalition) - therefore this player is also described as a critical player.
#' @aliases getCriticalCoalitionsOfPlayer
#' @export getCriticalCoalitionsOfPlayer
#' @template author/JA
#' @template author/JS
#' @param player represents the observed player
#' @template cites/DEEGAN_ET_PACKEL_1978
#' @templateVar DEEGAN_ET_PACKEL_1978_P pp. 151-161
#' @template param/A
#' @return A data frame containing all minimal winning coalitions for one special player
#' @examples
#' library(CoopGame)
#' A=c(0,1,0,1,0,1,1)
#'
#' #Get coalitions where player 2 is critical:
#' getCriticalCoalitionsOfPlayer(2,A)
#' #Output are all coalitions where player 2 is involved.
#' #Observe that player 2 is dictator in this game.
#'#
#'# V1 V2 V3 cVal bmRow
#'# 2 0 1 0 1 2
#'# 4 1 1 0 1 4
#'# 6 0 1 1 1 6
#'# 7 1 1 1 1 7
#'
getCriticalCoalitionsOfPlayer<-function(player,A){
paramCheckResult=getEmptyParamCheckResult()
stopOnInvalidGameVectorA(paramCheckResult,A)
stopOnInvalidNumber(paramCheckResult, player)
getCoalitionsWherePlayerCritical(player,A)
}
getCoalitionsWherePlayerCritical=function(player,A){
bmTemp = NULL
if(!isSimpleGame(A)){
print("Game is not simple therefore no minimum winning coalitions can be retrieved.")
}
else
{
n=as.numeric(getNumberOfPlayers(A))
bitMatrix = as.data.frame(createBitMatrix(n,A))
#rowsToBeRemoved is intended for containing all coalitions where player is not critical
#and which finally get removed before returning a matrix with additional column for referencing according entries in the bit matrix
rowsToBeRemoved=c()
ci=c(rep(0,n))
bm=as.data.frame(bitMatrix)
#Get coalitions where player is involved
bmTemp=bm[bm[,player]==1,,drop=FALSE]
#Ignore these coalitions where coalition value is 0
bmTemp=bmTemp[bmTemp[,n+1]!=0,,drop=FALSE]
if(nrow(bmTemp)!=0){
#add extra column for reference to corresponding bitMatrix row
bmTemp=cbind(bmTemp,bmRow=0)
for(j in 1:nrow(bmTemp)){
bmTempRow=bmTemp[j,]
#Get involved players: players u {player}
players=getPlayersFromBMRow(bmTempRow)
#Remove player from players: players / {player}
players=setdiff(players,player)
#check if players / {player} is empty
if(length(players)==0){
#if players without player is empty check if coalition value for only player as coalition's member is equal or smaller than 0
#in case condition applies mark for removal and else keep row
if(bm[player,"cVal"]<=0){
rowsToBeRemoved=c(rowsToBeRemoved,j) #mark j for removal
}else{
ci[player]=ci[player]+bmTempRow["cVal"] # keep row
bmTemp[j,"bmRow"]=indexCoalition(n,S=player)
}
}else{
ix=indexCoalition(n,S=players) #identify index for corresponding coalition to bmTempRow without player i
if(bm[ix,"cVal"]==0){
ci[player]=ci[player]+bmTempRow["cVal"] # keep row
#Write reference for row in bit matrix to bmRow
bmTemp[j,"bmRow"]=indexCoalition(n,S=c(players,player))
}else{
rowsToBeRemoved=c(rowsToBeRemoved,j) #mark j for removal
}
}
}
if(!is.null(rowsToBeRemoved)){
bmTemp=rbind(bmTemp)[-rowsToBeRemoved, , drop = FALSE] # remove rows
}
}else{
bmTemp=NULL
}
}
return(bmTemp)
}
| /CoopGame/R/getCriticalCoalitionsOfPlayer.R | no_license | anwanjohannes/CoopGame | R | false | false | 3,947 | r | #' @name getCriticalCoalitionsOfPlayer
#' @title getCriticalCoalitionsOfPlayer
#' @description The function getCriticalCoalitionsOfPlayer identifies all coalitions for one player
#' in which that player is critical.
#' These coalitions are characterized by the circumstance that without this player the other players
#' generate no value (then also called a losing coalition) - therefore this player is also described as a critical player.
#' @aliases getCriticalCoalitionsOfPlayer
#' @export getCriticalCoalitionsOfPlayer
#' @template author/JA
#' @template author/JS
#' @param player represents the observed player
#' @template cites/DEEGAN_ET_PACKEL_1978
#' @templateVar DEEGAN_ET_PACKEL_1978_P pp. 151-161
#' @template param/A
#' @return A data frame containing all minimal winning coalitions for one special player
#' @examples
#' library(CoopGame)
#' A=c(0,1,0,1,0,1,1)
#'
#' #Get coalitions where player 2 is critical:
#' getCriticalCoalitionsOfPlayer(2,A)
#' #Output are all coalitions where player 2 is involved.
#' #Observe that player 2 is dictator in this game.
#'#
#'# V1 V2 V3 cVal bmRow
#'# 2 0 1 0 1 2
#'# 4 1 1 0 1 4
#'# 6 0 1 1 1 6
#'# 7 1 1 1 1 7
#'
getCriticalCoalitionsOfPlayer<-function(player,A){
paramCheckResult=getEmptyParamCheckResult()
stopOnInvalidGameVectorA(paramCheckResult,A)
stopOnInvalidNumber(paramCheckResult, player)
getCoalitionsWherePlayerCritical(player,A)
}
getCoalitionsWherePlayerCritical=function(player,A){
bmTemp = NULL
if(!isSimpleGame(A)){
print("Game is not simple therefore no minimum winning coalitions can be retrieved.")
}
else
{
n=as.numeric(getNumberOfPlayers(A))
bitMatrix = as.data.frame(createBitMatrix(n,A))
#rowsToBeRemoved is intended for containing all coalitions where player is not critical
#and which finally get removed before returning a matrix with additional column for referencing according entries in the bit matrix
rowsToBeRemoved=c()
ci=c(rep(0,n))
bm=as.data.frame(bitMatrix)
#Get coalitions where player is involved
bmTemp=bm[bm[,player]==1,,drop=FALSE]
#Ignore these coalitions where coalition value is 0
bmTemp=bmTemp[bmTemp[,n+1]!=0,,drop=FALSE]
if(nrow(bmTemp)!=0){
#add extra column for reference to corresponding bitMatrix row
bmTemp=cbind(bmTemp,bmRow=0)
for(j in 1:nrow(bmTemp)){
bmTempRow=bmTemp[j,]
#Get involved players: players u {player}
players=getPlayersFromBMRow(bmTempRow)
#Remove player from players: players / {player}
players=setdiff(players,player)
#check if players / {player} is empty
if(length(players)==0){
#if players without player is empty check if coalition value for only player as coalition's member is equal or smaller than 0
#in case condition applies mark for removal and else keep row
if(bm[player,"cVal"]<=0){
rowsToBeRemoved=c(rowsToBeRemoved,j) #mark j for removal
}else{
ci[player]=ci[player]+bmTempRow["cVal"] # keep row
bmTemp[j,"bmRow"]=indexCoalition(n,S=player)
}
}else{
ix=indexCoalition(n,S=players) #identify index for corresponding coalition to bmTempRow without player i
if(bm[ix,"cVal"]==0){
ci[player]=ci[player]+bmTempRow["cVal"] # keep row
#Write reference for row in bit matrix to bmRow
bmTemp[j,"bmRow"]=indexCoalition(n,S=c(players,player))
}else{
rowsToBeRemoved=c(rowsToBeRemoved,j) #mark j for removal
}
}
}
if(!is.null(rowsToBeRemoved)){
bmTemp=rbind(bmTemp)[-rowsToBeRemoved, , drop = FALSE] # remove rows
}
}else{
bmTemp=NULL
}
}
return(bmTemp)
}
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/intervals.r
\name{\%within\%}
\alias{\%within\%}
\alias{\%within\%,Interval,Interval-method}
\title{Tests whether a date or interval falls within an interval}
\usage{
a \%within\% b
}
\arguments{
\item{a}{An interval or date-time object}
\item{b}{An interval}
}
\value{
A logical
}
\description{
%within% returns TRUE if a falls within interval b, FALSE otherwise.
If a is an interval, both its start and end dates must fall within b
to return TRUE.
}
\examples{
int <- new_interval(ymd("2001-01-01"), ymd("2002-01-01"))
# 2001-01-01 UTC--2002-01-01 UTC
int2 <- new_interval(ymd("2001-06-01"), ymd("2002-01-01"))
# 2001-06-01 UTC--2002-01-01 UTC
ymd("2001-05-03") \%within\% int # TRUE
int2 \%within\% int # TRUE
ymd("1999-01-01") \%within\% int # FALSE
}
| /man/within-interval.Rd | no_license | larmarange/lubridate | R | false | false | 845 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/intervals.r
\name{\%within\%}
\alias{\%within\%}
\alias{\%within\%,Interval,Interval-method}
\title{Tests whether a date or interval falls within an interval}
\usage{
a \%within\% b
}
\arguments{
\item{a}{An interval or date-time object}
\item{b}{An interval}
}
\value{
A logical
}
\description{
%within% returns TRUE if a falls within interval b, FALSE otherwise.
If a is an interval, both its start and end dates must fall within b
to return TRUE.
}
\examples{
int <- new_interval(ymd("2001-01-01"), ymd("2002-01-01"))
# 2001-01-01 UTC--2002-01-01 UTC
int2 <- new_interval(ymd("2001-06-01"), ymd("2002-01-01"))
# 2001-06-01 UTC--2002-01-01 UTC
ymd("2001-05-03") \%within\% int # TRUE
int2 \%within\% int # TRUE
ymd("1999-01-01") \%within\% int # FALSE
}
|
# Takes in a Cox proportional hazards model and returns a table of the parameter estimates on a format
# specified by the user. The purpose of the function is to easily generate tables for research papers.
analysis_hazard_table <- function(fit,nr_digits=3,pval_type="stars",remove_adjustment=NULL,make_excel=T){
require(xlsx)
require(dplyr)
require(xlsx)
fix_decimal_places <- function(x,nr_digits){
round(x*10^nr_digits)/10^nr_digits
}
summary_of_fit <- format(fix_decimal_places(summary(fit)$conf.int,nr_digits),
scientific=F)
pvals <- summary(fit)$coefficients[,"Pr(>|z|)"]
HR_conf_int <- paste(summary_of_fit[,"exp(coef)"],
" [",summary_of_fit[,"lower .95"],",",
summary_of_fit[,"upper .95"],"]",sep="")
variable_names <- gsub("TRUE","",rownames(summary_of_fit))
cox_table <- data.frame(col1=variable_names,col2=HR_conf_int)
names(cox_table) <- c("Variable","HR [95%]")
#Representation of p-values as specified by the user
if(pval_type=="column"){
#use cutoff deduced from nr_digits to represent pval in a seperate column
cox_table$`p-value` <- ifelse(pvals<10^-nr_digits,paste0("<",10^-nr_digits),
format(fix_decimal_places(pvals,nr_digits),
scientific=F))
}else if(pval_type=="stars"){
#Level of significance labeled with different nr of stars
stars <- cut(pvals,breaks=c(0,0.001,0.01,0.05,Inf),label=c("***","**","*",""),right=FALSE)
cox_table$`HR [95%]` <- paste(cox_table$`HR [95%]`,stars,sep="")
}else{
stop("pval_type can only take the values 'column' or 'stars'. Type ?analysis_hazard_table for details")
}
#In some cases it is feasible to only show the non-adjusting variables
cox_table %>% filter(!(Variable %in% remove_adjustment))
if(make_excel){
write.xlsx(cox_table,file = "cox_table.xlsx",row.names=F)
}else{
return(cox_table)
}
}
| /R/analysis_hazard_table.R | no_license | saemirogg/swedr | R | false | false | 1,966 | r | # Takes in a Cox proportional hazards model and returns a table of the parameter estimates on a format
# specified by the user. The purpose of the function is to easily generate tables for research papers.
analysis_hazard_table <- function(fit,nr_digits=3,pval_type="stars",remove_adjustment=NULL,make_excel=T){
require(xlsx)
require(dplyr)
require(xlsx)
fix_decimal_places <- function(x,nr_digits){
round(x*10^nr_digits)/10^nr_digits
}
summary_of_fit <- format(fix_decimal_places(summary(fit)$conf.int,nr_digits),
scientific=F)
pvals <- summary(fit)$coefficients[,"Pr(>|z|)"]
HR_conf_int <- paste(summary_of_fit[,"exp(coef)"],
" [",summary_of_fit[,"lower .95"],",",
summary_of_fit[,"upper .95"],"]",sep="")
variable_names <- gsub("TRUE","",rownames(summary_of_fit))
cox_table <- data.frame(col1=variable_names,col2=HR_conf_int)
names(cox_table) <- c("Variable","HR [95%]")
#Representation of p-values as specified by the user
if(pval_type=="column"){
#use cutoff deduced from nr_digits to represent pval in a seperate column
cox_table$`p-value` <- ifelse(pvals<10^-nr_digits,paste0("<",10^-nr_digits),
format(fix_decimal_places(pvals,nr_digits),
scientific=F))
}else if(pval_type=="stars"){
#Level of significance labeled with different nr of stars
stars <- cut(pvals,breaks=c(0,0.001,0.01,0.05,Inf),label=c("***","**","*",""),right=FALSE)
cox_table$`HR [95%]` <- paste(cox_table$`HR [95%]`,stars,sep="")
}else{
stop("pval_type can only take the values 'column' or 'stars'. Type ?analysis_hazard_table for details")
}
#In some cases it is feasible to only show the non-adjusting variables
cox_table %>% filter(!(Variable %in% remove_adjustment))
if(make_excel){
write.xlsx(cox_table,file = "cox_table.xlsx",row.names=F)
}else{
return(cox_table)
}
}
|
nc <- getdata::get_location(type = system.file("shape/nc.shp", package = "sf"), crs = 3857)
basemap <-
ggplot2::ggplot() +
ggplot2::theme_void() +
layer_location_data(data = nc)
# icon can be set by name matching a name from map_icons
basemap +
layer_icon(data = nc, icon = "point-start", size = 8)
# layer_icon can also use a column from the sf object
nc$icon <- rep(c("1", "2", "3", "4"), nrow(nc) / 4)
basemap +
layer_icon(data = nc, iconname_col = "icon", size = 6)
| /examples/layer_icon.R | permissive | elipousson/maplayer | R | false | false | 484 | r | nc <- getdata::get_location(type = system.file("shape/nc.shp", package = "sf"), crs = 3857)
basemap <-
ggplot2::ggplot() +
ggplot2::theme_void() +
layer_location_data(data = nc)
# icon can be set by name matching a name from map_icons
basemap +
layer_icon(data = nc, icon = "point-start", size = 8)
# layer_icon can also use a column from the sf object
nc$icon <- rep(c("1", "2", "3", "4"), nrow(nc) / 4)
basemap +
layer_icon(data = nc, iconname_col = "icon", size = 6)
|
#' Mongo Client
#'
#' This function returns mongo client connected to configurated environment variables
#' mongoUrl, mDb, mCollection
#'
#' @export
#' @example
#' mongoconnect()
mongoConnect <- function(){
m <- mongolite::mongo(collection = Sys.getenv("mCollection"),db =Sys.getenv("mDb"), url = Sys.getenv("mongoUrl"))
return(m)
}
| /R/mongoConnect.R | no_license | rsangra/fhirexample | R | false | false | 338 | r | #' Mongo Client
#'
#' This function returns mongo client connected to configurated environment variables
#' mongoUrl, mDb, mCollection
#'
#' @export
#' @example
#' mongoconnect()
mongoConnect <- function(){
m <- mongolite::mongo(collection = Sys.getenv("mCollection"),db =Sys.getenv("mDb"), url = Sys.getenv("mongoUrl"))
return(m)
}
|
shinyServer(function(input, output, session) {
output$preImage <- renderImage({
filename <- normalizePath(file.path('./www',
paste(input$mealinput, '.jpeg', sep='')))
list(src = filename,
alt = input$mealinput)
}, deleteFile = FALSE)
output$preImage2 <- renderImage({
filename <- normalizePath(file.path('./www',
paste(input$drinkinput, '.jpeg', sep='')))
list(src = filename,
alt = input$drinkinput)
}, deleteFile = FALSE)
output$text <- renderText({
if (input$mealinput == "Fish")
{text <- "Pan-seared Salmon Fish, served with mashed potato, green beans and chilli."}
else if (input$mealinput == "Chicken")
{text <- "Hainanese rice topped with sliced roasted chicken and signature chilli sauce.
Allergens info: Gluten, soy."}
else if (input$mealinput == "Vegetarian")
{text <- "Pumpkin rice, served with vegetarian honey soy chicken alongside capsicum, carrots, broccoli and sesame seeds."}
})
#pull the number from database we made: fish
counterfish <- dbGetQuery(
conn = db,
statement =
'SELECT stock1
FROM stocks
WHERE meal1 = "fish"')
#make counter out of it with the if else conditions
fish_counter <- reactiveValues(count = counterfish)
observeEvent(input$submit,{
if(input$mealchoice=="Fish" & fish_counter$count==0)
{
session$sendCustomMessage(type = 'testmessage',
message = 'No more stock for fish, please choose a different menu.')
}
if(input$mealchoice=="Chicken"){
fish_counter$count <- fish_counter$count}
if(input$mealchoice=="Vegetarian"){
fish_counter$count <- fish_counter$count}
})
observeEvent(input$mealinput, {
if (input$mealinput=="Fish")
{
output$my_audio <-renderUI(get_audio_tag("salmon.mp3"))
}
else if (input$mealinput=="Chicken")
{
output$my_audio <-renderUI(get_audio_tag("hainanese.mp3"))
}
else if (input$mealinput=="Vegetarian")
{
output$my_audio <-renderUI(get_audio_tag("pumpkin.mp3"))
}
})
observeEvent(input$mealchoice, {
if (input$mealchoice=="Fish")
{
output$my_audiomeal <-renderUI(get_audio_tag("fish.mp3"))
}
else if (input$mealchoice=="Chicken")
{
output$my_audiomeal <-renderUI(get_audio_tag("chicken.mp3"))
}
else if (input$mealchoice=="Vegetarian")
{
output$my_audiomeal <-renderUI(get_audio_tag("vegetarian.mp3"))
}
})
observeEvent(input$drinkchoice, {
if (input$drinkchoice=="Water")
{
output$my_audiodrink <-renderUI(get_audio_tag("water.mp3"))
}
else if (input$drinkchoice=="Coca-Cola")
{
output$my_audiodrink <-renderUI(get_audio_tag("coca-cola.mp3"))
}
else if (input$drinkchoice=="Beer")
{
output$my_audiodrink <-renderUI(get_audio_tag("beer.mp3"))
}
else if (input$drinkchoice=="Wine")
{
output$my_audiodrink <-renderUI(get_audio_tag("wine.mp3"))
}
else if (input$drinkchoice=="Orange Juice")
{
output$my_audiodrink <-renderUI(get_audio_tag("orange juice.mp3"))
}
else if (input$drinkchoice=="Apple Juice")
{
output$my_audiodrink <-renderUI(get_audio_tag("apple juice.mp3"))
}
else if (input$drinkchoice=="Champagne")
{
output$my_audiodrink <-renderUI(get_audio_tag("champagne.mp3"))
}
else if (input$drinkchoice=="Tea")
{
output$my_audiodrink <-renderUI(get_audio_tag("tea.mp3"))
}
else if (input$drinkchoice=="Coffee")
{
output$my_audiodrink <-renderUI(get_audio_tag("coffee.mp3"))
}
})
observeEvent(input$submit, {
if (input$drinkchoice=="Water")
{sqldrink <- "UPDATE meals
SET drink = ?drink
WHERE seat = ?seatno"
sqldrink1 <- sqlInterpolate(ANSI(), sqldrink, drink="Water", seatno = "32C")
conn = db
dbExecute(conn, sqldrink1)
}
else if (input$drinkchoice=="Coca-Cola")
{sqldrink <- "UPDATE meals
SET drink = ?drink
WHERE seat = ?seatno"
sqldrink1 <- sqlInterpolate(ANSI(), sqldrink, drink="Coca-Cola", seatno = "32C")
conn = db
dbExecute(conn, sqldrink1)
}
else if (input$drinkchoice=="Beer")
{
sqldrink <- "UPDATE meals
SET drink = ?drink
WHERE seat = ?seatno"
sqldrink1 <- sqlInterpolate(ANSI(), sqldrink, drink="Beer", seatno = "32C")
conn = db
dbExecute(conn, sqldrink1)
}
else if (input$drinkchoice=="Wine")
{
sqldrink <- "UPDATE meals
SET drink = ?drink
WHERE seat = ?seatno"
sqldrink1 <- sqlInterpolate(ANSI(), sqldrink, drink="Wine", seatno = "32C")
conn = db
dbExecute(conn, sqldrink1)
}
else if (input$drinkchoice=="Orange Juice")
{sqldrink <- "UPDATE meals
SET drink = ?drink
WHERE seat = ?seatno"
sqldrink1 <- sqlInterpolate(ANSI(), sqldrink, drink="Orange Juice", seatno = "32C")
conn = db
dbExecute(conn, sqldrink1)
}
else if (input$drinkchoice=="Apple Juice")
{
sqldrink <- "UPDATE meals
SET drink = ?drink
WHERE seat = ?seatno"
sqldrink1 <- sqlInterpolate(ANSI(), sqldrink, drink="Apple Juice", seatno = "32C")
conn = db
dbExecute(conn, sqldrink1)
}
else if (input$drinkchoice=="Champagne")
{
sqldrink <- "UPDATE meals
SET drink = ?drink
WHERE seat = ?seatno"
sqldrink1 <- sqlInterpolate(ANSI(), sqldrink, drink="Champagne", seatno = "32C")
conn = db
dbExecute(conn, sqldrink1)
}
else if (input$drinkchoice=="Tea")
{
sqldrink <- "UPDATE meals
SET drink = ?drink
WHERE seat = ?seatno"
sqldrink1 <- sqlInterpolate(ANSI(), sqldrink, drink="Tea", seatno = "32C")
conn = db
dbExecute(conn, sqldrink1)
}
else if (input$drinkchoice=="Coffee")
{
sqldrink <- "UPDATE meals
SET drink = ?drink
WHERE seat = ?seatno"
sqldrink1 <- sqlInterpolate(ANSI(), sqldrink, drink="Coffee", seatno = "32C")
conn = db
dbExecute(conn, sqldrink1)
}
})
observeEvent(input$submit, {
shinyjs::disable("submit")
})
ord <- dbGetQuery(
conn = db,
statement =
'SELECT ordered
FROM meals
WHERE seat = "32C"')
if (ord$ordered == "Ordered")
{shinyjs::disable("submit")
}
#pull the number from database we made: fish
counterfish <- dbGetQuery(
conn = db,
statement =
'SELECT stock1
FROM stocks
WHERE meal1 = "fish"')
#make counter out of it with the if else conditions
fish_counter <- reactiveValues(count = counterfish$stock1)
observeEvent(input$submit,{
if(input$mealchoice=="Fish" & fish_counter$count>0){
sql <- "UPDATE stocks
SET stock1 = ?count
WHERE meal1 = ?name"
sqlmeal <- "UPDATE meals
SET food = ?food,
ordered = ?notord
WHERE seat = ?seatno"
sql1 <- sqlInterpolate(ANSI(), sql, count = fish_counter$count-1, name = "fish")
sqlmeal1 <- sqlInterpolate(ANSI(), sqlmeal, food="Fish", notord="Ordered", seatno = "32C")
conn = db
dbExecute(conn, sql1)
dbExecute(conn, sqlmeal1)
session$sendCustomMessage(type = 'testmessage',
message = 'Thank you for your submission. We will serve your desired meal shortly.')
}
if(input$mealchoice=="Chicken"){
fish_counter$count <- fish_counter$count}
if(input$mealchoice=="Vegetarian"){
fish_counter$count <- fish_counter$count}
})
observeEvent(input$submit,{
if(input$mealchoice=="Fish" & fish_counter$count==0)
{
session$sendCustomMessage(type = 'testmessage',
message = 'No more stock for fish, please choose a different menu.')
}
if(input$mealchoice=="Chicken"){
fish_counter$count <- fish_counter$count}
if(input$mealchoice=="Vegetarian"){
fish_counter$count <- fish_counter$count}
})
#pull the number from database we made: chicken
counterchicken <- dbGetQuery(
conn = db,
statement =
'SELECT stock1
FROM stocks
WHERE meal1 = "chicken"')
#use it to make a counter for vege
chicken_counter <- reactiveValues(count = counterchicken$stock1)
observeEvent(input$submit,{
if(input$mealchoice=="Fish"){
chicken_counter$count <- chicken_counter$count}
if(input$mealchoice=="Chicken" & chicken_counter$count==0){
session$sendCustomMessage(type = 'testmessage',
message = 'No more stock for chicken, please choose a different menu.')
}
if(input$mealchoice=="Vegetarian"){
chicken_counter$count <- chicken_counter$count}
})
observeEvent(input$submit,{
if(input$mealchoice=="Fish"){
chicken_counter$count <- chicken_counter$count}
if(input$mealchoice=="Chicken" & chicken_counter$count>0){
sql <- "UPDATE stocks
SET stock1 = ?count
WHERE meal1 = ?name"
sqlmeal <- "UPDATE meals
SET food = ?foodname,
ordered = ?notord
WHERE seat = ?seatno"
sql1 <- sqlInterpolate(ANSI(), sql, count = chicken_counter$count-1, name = "chicken")
sqlmeal1 <- sqlInterpolate(ANSI(), sqlmeal, foodname="Chicken", notord = "Ordered", seatno = "32C")
conn = db
dbExecute(conn, sql1)
dbExecute(conn, sqlmeal1)
session$sendCustomMessage(type = 'testmessage',
message = 'Thank you for your submission. We will serve your desired meal shortly.')}
if(input$mealchoice=="Vegetarian"){
chicken_counter$count <- chicken_counter$count}
})
#pull the number from database we made: vege
countervege <- dbGetQuery(
conn = db,
statement =
'SELECT stock1
FROM stocks
WHERE meal1 = "vegetarian"')
#use it to make a counter for vege
vege_counter <- reactiveValues(count = countervege$stock1)
observeEvent(input$submit,{
if(input$mealchoice=="Fish"){
vege_counter$count <- vege_counter$count}
if(input$mealchoice=="Chicken"){
vege_counter$count <- vege_counter$count}
if(input$mealchoice=="Vegetarian" & vege_counter$count==0)
{
session$sendCustomMessage(type = 'testmessage',
message = 'No more stock for vegetarian, please choose a different menu.')
}
})
observeEvent(input$submit,{
if(input$mealchoice=="Fish"){
vege_counter$count <- vege_counter$count}
if(input$mealchoice=="Chicken"){
vege_counter$count <- vege_counter$count}
if(input$mealchoice=="Vegetarian" & vege_counter$count>0){
sql <- "UPDATE stocks
SET stock1 = ?count
WHERE meal1 = ?name"
sqlmeal <- "UPDATE meals
SET food = ?foodname,
ordered = ?notord
WHERE seat = ?seatno"
sql1 <- sqlInterpolate(ANSI(), sql, count = vege_counter$count-1, name = "vegetarian")
sqlmeal1 <- sqlInterpolate(ANSI(), sqlmeal, foodname="Vegetarian", notord="Ordered", seatno = "32C")
conn = db
dbExecute(conn, sql1)
dbExecute(conn, sqlmeal1)
session$sendCustomMessage(type = 'testmessage',
message = 'Thank you for your submission. We will serve your desired meal shortly.')}
})
output$fishcounter <- renderUI({
HTML(fish_counter$count)
})
output$chickencounter <- renderUI({
HTML(chicken_counter$count)
})
output$vegecounter <- renderUI({
HTML(vege_counter$count)
})
output$fishcounter <- renderValueBox({
valueBox(
renderUI({
HTML(fish_counter$count)
}), "Stock left for fish", icon = icon("fish"),
color = "purple"
)
})
output$chickencounter <- renderValueBox({
valueBox(
renderUI({
HTML(chicken_counter$count)
}), "Stock left for chicken", icon = icon("drumstick-bite"),
color = "yellow"
)
})
output$vegecounter <- renderValueBox({
valueBox(
renderUI({
HTML(vege_counter$count)
}), "Stock left for vegetarian", icon = icon("leaf"),
color = "green"
)
})
observe(
{
if(!isTruthy(input$feedbacktext) || !isTruthy(input$mealtext))
{
disable("send")
}
else
{
enable("send")
}
})
observeEvent(input$send,{
sql <- "INSERT INTO feedbacks (meal, feedback)
VALUES (?foodname, ?feedbacktext)"
sqlnew <- sqlInterpolate(ANSI(), sql, foodname = input$mealtext, feedbacktext = input$feedbacktext)
conn = db
dbExecute(conn, sqlnew)
session$sendCustomMessage(type = 'testmessage',
message = 'Thank you for your feedback! We will continue to improve our in-flight meals!')}
)
}
) | /dashboard/server.R | no_license | valencialie11/airlines | R | false | false | 13,462 | r |
shinyServer(function(input, output, session) {
output$preImage <- renderImage({
filename <- normalizePath(file.path('./www',
paste(input$mealinput, '.jpeg', sep='')))
list(src = filename,
alt = input$mealinput)
}, deleteFile = FALSE)
output$preImage2 <- renderImage({
filename <- normalizePath(file.path('./www',
paste(input$drinkinput, '.jpeg', sep='')))
list(src = filename,
alt = input$drinkinput)
}, deleteFile = FALSE)
output$text <- renderText({
if (input$mealinput == "Fish")
{text <- "Pan-seared Salmon Fish, served with mashed potato, green beans and chilli."}
else if (input$mealinput == "Chicken")
{text <- "Hainanese rice topped with sliced roasted chicken and signature chilli sauce.
Allergens info: Gluten, soy."}
else if (input$mealinput == "Vegetarian")
{text <- "Pumpkin rice, served with vegetarian honey soy chicken alongside capsicum, carrots, broccoli and sesame seeds."}
})
#pull the number from database we made: fish
counterfish <- dbGetQuery(
conn = db,
statement =
'SELECT stock1
FROM stocks
WHERE meal1 = "fish"')
#make counter out of it with the if else conditions
fish_counter <- reactiveValues(count = counterfish)
observeEvent(input$submit,{
if(input$mealchoice=="Fish" & fish_counter$count==0)
{
session$sendCustomMessage(type = 'testmessage',
message = 'No more stock for fish, please choose a different menu.')
}
if(input$mealchoice=="Chicken"){
fish_counter$count <- fish_counter$count}
if(input$mealchoice=="Vegetarian"){
fish_counter$count <- fish_counter$count}
})
observeEvent(input$mealinput, {
if (input$mealinput=="Fish")
{
output$my_audio <-renderUI(get_audio_tag("salmon.mp3"))
}
else if (input$mealinput=="Chicken")
{
output$my_audio <-renderUI(get_audio_tag("hainanese.mp3"))
}
else if (input$mealinput=="Vegetarian")
{
output$my_audio <-renderUI(get_audio_tag("pumpkin.mp3"))
}
})
observeEvent(input$mealchoice, {
if (input$mealchoice=="Fish")
{
output$my_audiomeal <-renderUI(get_audio_tag("fish.mp3"))
}
else if (input$mealchoice=="Chicken")
{
output$my_audiomeal <-renderUI(get_audio_tag("chicken.mp3"))
}
else if (input$mealchoice=="Vegetarian")
{
output$my_audiomeal <-renderUI(get_audio_tag("vegetarian.mp3"))
}
})
observeEvent(input$drinkchoice, {
if (input$drinkchoice=="Water")
{
output$my_audiodrink <-renderUI(get_audio_tag("water.mp3"))
}
else if (input$drinkchoice=="Coca-Cola")
{
output$my_audiodrink <-renderUI(get_audio_tag("coca-cola.mp3"))
}
else if (input$drinkchoice=="Beer")
{
output$my_audiodrink <-renderUI(get_audio_tag("beer.mp3"))
}
else if (input$drinkchoice=="Wine")
{
output$my_audiodrink <-renderUI(get_audio_tag("wine.mp3"))
}
else if (input$drinkchoice=="Orange Juice")
{
output$my_audiodrink <-renderUI(get_audio_tag("orange juice.mp3"))
}
else if (input$drinkchoice=="Apple Juice")
{
output$my_audiodrink <-renderUI(get_audio_tag("apple juice.mp3"))
}
else if (input$drinkchoice=="Champagne")
{
output$my_audiodrink <-renderUI(get_audio_tag("champagne.mp3"))
}
else if (input$drinkchoice=="Tea")
{
output$my_audiodrink <-renderUI(get_audio_tag("tea.mp3"))
}
else if (input$drinkchoice=="Coffee")
{
output$my_audiodrink <-renderUI(get_audio_tag("coffee.mp3"))
}
})
observeEvent(input$submit, {
if (input$drinkchoice=="Water")
{sqldrink <- "UPDATE meals
SET drink = ?drink
WHERE seat = ?seatno"
sqldrink1 <- sqlInterpolate(ANSI(), sqldrink, drink="Water", seatno = "32C")
conn = db
dbExecute(conn, sqldrink1)
}
else if (input$drinkchoice=="Coca-Cola")
{sqldrink <- "UPDATE meals
SET drink = ?drink
WHERE seat = ?seatno"
sqldrink1 <- sqlInterpolate(ANSI(), sqldrink, drink="Coca-Cola", seatno = "32C")
conn = db
dbExecute(conn, sqldrink1)
}
else if (input$drinkchoice=="Beer")
{
sqldrink <- "UPDATE meals
SET drink = ?drink
WHERE seat = ?seatno"
sqldrink1 <- sqlInterpolate(ANSI(), sqldrink, drink="Beer", seatno = "32C")
conn = db
dbExecute(conn, sqldrink1)
}
else if (input$drinkchoice=="Wine")
{
sqldrink <- "UPDATE meals
SET drink = ?drink
WHERE seat = ?seatno"
sqldrink1 <- sqlInterpolate(ANSI(), sqldrink, drink="Wine", seatno = "32C")
conn = db
dbExecute(conn, sqldrink1)
}
else if (input$drinkchoice=="Orange Juice")
{sqldrink <- "UPDATE meals
SET drink = ?drink
WHERE seat = ?seatno"
sqldrink1 <- sqlInterpolate(ANSI(), sqldrink, drink="Orange Juice", seatno = "32C")
conn = db
dbExecute(conn, sqldrink1)
}
else if (input$drinkchoice=="Apple Juice")
{
sqldrink <- "UPDATE meals
SET drink = ?drink
WHERE seat = ?seatno"
sqldrink1 <- sqlInterpolate(ANSI(), sqldrink, drink="Apple Juice", seatno = "32C")
conn = db
dbExecute(conn, sqldrink1)
}
else if (input$drinkchoice=="Champagne")
{
sqldrink <- "UPDATE meals
SET drink = ?drink
WHERE seat = ?seatno"
sqldrink1 <- sqlInterpolate(ANSI(), sqldrink, drink="Champagne", seatno = "32C")
conn = db
dbExecute(conn, sqldrink1)
}
else if (input$drinkchoice=="Tea")
{
sqldrink <- "UPDATE meals
SET drink = ?drink
WHERE seat = ?seatno"
sqldrink1 <- sqlInterpolate(ANSI(), sqldrink, drink="Tea", seatno = "32C")
conn = db
dbExecute(conn, sqldrink1)
}
else if (input$drinkchoice=="Coffee")
{
sqldrink <- "UPDATE meals
SET drink = ?drink
WHERE seat = ?seatno"
sqldrink1 <- sqlInterpolate(ANSI(), sqldrink, drink="Coffee", seatno = "32C")
conn = db
dbExecute(conn, sqldrink1)
}
})
observeEvent(input$submit, {
shinyjs::disable("submit")
})
ord <- dbGetQuery(
conn = db,
statement =
'SELECT ordered
FROM meals
WHERE seat = "32C"')
if (ord$ordered == "Ordered")
{shinyjs::disable("submit")
}
#pull the number from database we made: fish
counterfish <- dbGetQuery(
conn = db,
statement =
'SELECT stock1
FROM stocks
WHERE meal1 = "fish"')
#make counter out of it with the if else conditions
fish_counter <- reactiveValues(count = counterfish$stock1)
observeEvent(input$submit,{
if(input$mealchoice=="Fish" & fish_counter$count>0){
sql <- "UPDATE stocks
SET stock1 = ?count
WHERE meal1 = ?name"
sqlmeal <- "UPDATE meals
SET food = ?food,
ordered = ?notord
WHERE seat = ?seatno"
sql1 <- sqlInterpolate(ANSI(), sql, count = fish_counter$count-1, name = "fish")
sqlmeal1 <- sqlInterpolate(ANSI(), sqlmeal, food="Fish", notord="Ordered", seatno = "32C")
conn = db
dbExecute(conn, sql1)
dbExecute(conn, sqlmeal1)
session$sendCustomMessage(type = 'testmessage',
message = 'Thank you for your submission. We will serve your desired meal shortly.')
}
if(input$mealchoice=="Chicken"){
fish_counter$count <- fish_counter$count}
if(input$mealchoice=="Vegetarian"){
fish_counter$count <- fish_counter$count}
})
observeEvent(input$submit,{
if(input$mealchoice=="Fish" & fish_counter$count==0)
{
session$sendCustomMessage(type = 'testmessage',
message = 'No more stock for fish, please choose a different menu.')
}
if(input$mealchoice=="Chicken"){
fish_counter$count <- fish_counter$count}
if(input$mealchoice=="Vegetarian"){
fish_counter$count <- fish_counter$count}
})
#pull the number from database we made: chicken
counterchicken <- dbGetQuery(
conn = db,
statement =
'SELECT stock1
FROM stocks
WHERE meal1 = "chicken"')
#use it to make a counter for vege
chicken_counter <- reactiveValues(count = counterchicken$stock1)
observeEvent(input$submit,{
if(input$mealchoice=="Fish"){
chicken_counter$count <- chicken_counter$count}
if(input$mealchoice=="Chicken" & chicken_counter$count==0){
session$sendCustomMessage(type = 'testmessage',
message = 'No more stock for chicken, please choose a different menu.')
}
if(input$mealchoice=="Vegetarian"){
chicken_counter$count <- chicken_counter$count}
})
observeEvent(input$submit,{
if(input$mealchoice=="Fish"){
chicken_counter$count <- chicken_counter$count}
if(input$mealchoice=="Chicken" & chicken_counter$count>0){
sql <- "UPDATE stocks
SET stock1 = ?count
WHERE meal1 = ?name"
sqlmeal <- "UPDATE meals
SET food = ?foodname,
ordered = ?notord
WHERE seat = ?seatno"
sql1 <- sqlInterpolate(ANSI(), sql, count = chicken_counter$count-1, name = "chicken")
sqlmeal1 <- sqlInterpolate(ANSI(), sqlmeal, foodname="Chicken", notord = "Ordered", seatno = "32C")
conn = db
dbExecute(conn, sql1)
dbExecute(conn, sqlmeal1)
session$sendCustomMessage(type = 'testmessage',
message = 'Thank you for your submission. We will serve your desired meal shortly.')}
if(input$mealchoice=="Vegetarian"){
chicken_counter$count <- chicken_counter$count}
})
#pull the number from database we made: vege
countervege <- dbGetQuery(
conn = db,
statement =
'SELECT stock1
FROM stocks
WHERE meal1 = "vegetarian"')
#use it to make a counter for vege
vege_counter <- reactiveValues(count = countervege$stock1)
observeEvent(input$submit,{
if(input$mealchoice=="Fish"){
vege_counter$count <- vege_counter$count}
if(input$mealchoice=="Chicken"){
vege_counter$count <- vege_counter$count}
if(input$mealchoice=="Vegetarian" & vege_counter$count==0)
{
session$sendCustomMessage(type = 'testmessage',
message = 'No more stock for vegetarian, please choose a different menu.')
}
})
observeEvent(input$submit,{
if(input$mealchoice=="Fish"){
vege_counter$count <- vege_counter$count}
if(input$mealchoice=="Chicken"){
vege_counter$count <- vege_counter$count}
if(input$mealchoice=="Vegetarian" & vege_counter$count>0){
sql <- "UPDATE stocks
SET stock1 = ?count
WHERE meal1 = ?name"
sqlmeal <- "UPDATE meals
SET food = ?foodname,
ordered = ?notord
WHERE seat = ?seatno"
sql1 <- sqlInterpolate(ANSI(), sql, count = vege_counter$count-1, name = "vegetarian")
sqlmeal1 <- sqlInterpolate(ANSI(), sqlmeal, foodname="Vegetarian", notord="Ordered", seatno = "32C")
conn = db
dbExecute(conn, sql1)
dbExecute(conn, sqlmeal1)
session$sendCustomMessage(type = 'testmessage',
message = 'Thank you for your submission. We will serve your desired meal shortly.')}
})
output$fishcounter <- renderUI({
HTML(fish_counter$count)
})
output$chickencounter <- renderUI({
HTML(chicken_counter$count)
})
output$vegecounter <- renderUI({
HTML(vege_counter$count)
})
output$fishcounter <- renderValueBox({
valueBox(
renderUI({
HTML(fish_counter$count)
}), "Stock left for fish", icon = icon("fish"),
color = "purple"
)
})
output$chickencounter <- renderValueBox({
valueBox(
renderUI({
HTML(chicken_counter$count)
}), "Stock left for chicken", icon = icon("drumstick-bite"),
color = "yellow"
)
})
output$vegecounter <- renderValueBox({
valueBox(
renderUI({
HTML(vege_counter$count)
}), "Stock left for vegetarian", icon = icon("leaf"),
color = "green"
)
})
observe(
{
if(!isTruthy(input$feedbacktext) || !isTruthy(input$mealtext))
{
disable("send")
}
else
{
enable("send")
}
})
observeEvent(input$send,{
sql <- "INSERT INTO feedbacks (meal, feedback)
VALUES (?foodname, ?feedbacktext)"
sqlnew <- sqlInterpolate(ANSI(), sql, foodname = input$mealtext, feedbacktext = input$feedbacktext)
conn = db
dbExecute(conn, sqlnew)
session$sendCustomMessage(type = 'testmessage',
message = 'Thank you for your feedback! We will continue to improve our in-flight meals!')}
)
}
) |
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = numeric(0), relh = numeric(0), temp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_PriestleyTaylor,testlist)
str(result) | /meteor/inst/testfiles/ET0_PriestleyTaylor/libFuzzer_ET0_PriestleyTaylor/ET0_PriestleyTaylor_valgrind_files/1612737201-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 431 | r | testlist <- list(G = numeric(0), Rn = numeric(0), atmp = numeric(0), relh = numeric(0), temp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
result <- do.call(meteor:::ET0_PriestleyTaylor,testlist)
str(result) |
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/randomtourney.R
\name{randomtourney}
\alias{randomtourney}
\title{Generates a randomized tournament with random outcomes}
\usage{
randomtourney(n, matchups = 2, pties = 0, ints = 100, type = "char")
}
\arguments{
\item{n}{Number of individuals in tournament}
\item{matchups}{Number of times individuals compete in tournament. Can
be a numeric input, or, if \strong{\code{matchups}="random"} interactions
are random}
\item{pties}{Probability of each individual matchup ending in a tie.
Default is 0, i.e. no ties. Needs to be a number between 0 and 1.}
\item{ints}{The number of interactions in the tournament if matchups
are set to random.}
\item{type}{Whether to return results as W/L characters or 1/2 numbers.
\strong{\code{type}="char"} is the default, \strong{\code{type}="nums"}
returns 1/2 numbers referring to winner as id1 or id2}
}
\value{
A competition results dataframe
}
\description{
Generates a randomized tournament with random outcomes
}
\section{Further details}{
Specify number of individuals to compete in a tournament and
the number of times they compete with each other. Winners
and losers are determined at random. The resulting dataframe
will have variables: \code{id1}, \code{id2}, \code{result}.
Result refers to the outcome from \code{id1}'s perspective, i.e. a "W"
refers to \code{id1} beating \code{id2}, and a "L" refers to
\code{id2} beating \code{id1}. Individuals are referred to by a
random assignment of two conjoined letters.
}
\examples{
randomtourney(20,2) #20 individuals interact twice with each other
randomtourney(5,6) #5 individuals interact six times with each other
randomtourney(8) #8 individuals interact twice with each other
}
| /man/randomtourney.Rd | no_license | Sandy4321/compete | R | false | false | 1,771 | rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/randomtourney.R
\name{randomtourney}
\alias{randomtourney}
\title{Generates a randomized tournament with random outcomes}
\usage{
randomtourney(n, matchups = 2, pties = 0, ints = 100, type = "char")
}
\arguments{
\item{n}{Number of individuals in tournament}
\item{matchups}{Number of times individuals compete in tournament. Can
be a numeric input, or, if \strong{\code{matchups}="random"} interactions
are random}
\item{pties}{Probability of each individual matchup ending in a tie.
Default is 0, i.e. no ties. Needs to be a number between 0 and 1.}
\item{ints}{The number of interactions in the tournament if matchups
are set to random.}
\item{type}{Whether to return results as W/L characters or 1/2 numbers.
\strong{\code{type}="char"} is the default, \strong{\code{type}="nums"}
returns 1/2 numbers referring to winner as id1 or id2}
}
\value{
A competition results dataframe
}
\description{
Generates a randomized tournament with random outcomes
}
\section{Further details}{
Specify number of individuals to compete in a tournament and
the number of times they compete with each other. Winners
and losers are determined at random. The resulting dataframe
will have variables: \code{id1}, \code{id2}, \code{result}.
Result refers to the outcome from \code{id1}'s perspective, i.e. a "W"
refers to \code{id1} beating \code{id2}, and a "L" refers to
\code{id2} beating \code{id1}. Individuals are referred to by a
random assignment of two conjoined letters.
}
\examples{
randomtourney(20,2) #20 individuals interact twice with each other
randomtourney(5,6) #5 individuals interact six times with each other
randomtourney(8) #8 individuals interact twice with each other
}
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/mip.R
\name{as.mip}
\alias{as.mip}
\title{Write an editset into a mip representation}
\usage{
as.mip(E, x = NULL, weight = NULL, M = 1e+07, epsilon = 0.001,
prefix = "delta.", ...)
}
\arguments{
\item{E}{an \code{link{editset}} or an object that is coerciable to an
\code{editset}}
\item{x}{named \code{list}/\code{vector} with variable values}
\item{weight}{reliability weights for values of \code{x}}
\item{M}{Constant that is used for allowing the values to differ from \code{x}}
\item{epsilon}{Constant that is used for converting '<' into '<='}
\item{prefix}{prefix for dummy variables that are created}
\item{...}{not used}
}
\value{
a mip object containing al information for transforming it
into an lp/mip problem
}
\description{
Writes an editset or an object coercable to an editset as a mip problem.
}
| /pkg/man/as.mip.Rd | no_license | jenifferYingyiWu/editrules | R | false | false | 908 | rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/mip.R
\name{as.mip}
\alias{as.mip}
\title{Write an editset into a mip representation}
\usage{
as.mip(E, x = NULL, weight = NULL, M = 1e+07, epsilon = 0.001,
prefix = "delta.", ...)
}
\arguments{
\item{E}{an \code{link{editset}} or an object that is coerciable to an
\code{editset}}
\item{x}{named \code{list}/\code{vector} with variable values}
\item{weight}{reliability weights for values of \code{x}}
\item{M}{Constant that is used for allowing the values to differ from \code{x}}
\item{epsilon}{Constant that is used for converting '<' into '<='}
\item{prefix}{prefix for dummy variables that are created}
\item{...}{not used}
}
\value{
a mip object containing al information for transforming it
into an lp/mip problem
}
\description{
Writes an editset or an object coercable to an editset as a mip problem.
}
|
library(mgcv)
library(tidyverse)
library(ggplot2)
library(reshape2)
setwd("\\\\141.20.140.91/SAN_Projects/Spring/workspace/Katja/germany/spectral")
data <- read.csv(header=TRUE, sep=",", file="data_clear.csv")
data <- subset(data, data$year == 2017)
#################################################################################
# find base doy for evi
base <- data_base %>% group_by(plotid) %>% filter(evi == min(evi))
#base_doy <- data_base %>% group_by(dwd_stat) %>% filter(ndvi == min(ndvi))
data <- merge(data, base[, c("plotid", "doy")], by= "plotid", all.x=TRUE)
names(data)[names(data) == 'doy.y'] <- 'base_doy_evi'
names(data)[names(data) == 'doy.x'] <- 'doy'
pheno_model <- function(plotid,
index,
doy,
year,
base_doy_index,
min_obs = 10){
data = data.frame(plotid,
index,
doy,
year,
base_doy_index)
l_samples <- length(unique(data$plotid))
nls_fit_result <- vector("list", l_samples)
sp_fit_result <- vector("list", l_samples)
k <- 0
for( p in unique(data$plotid)){
print(paste("plot: ", p))
transition <- c()
d = subset(data, data$plotid == p & data$year == "2017")
#print(paste(length(d$doy)))
k <- k + 1
if(length(d$doy) >= min_obs){
transition <- with(d, doy[index == max(index)]) + 10
base_doy <- d$base_doy_index[1]
dat <- subset(d, d$doy <= transition & d$doy >= base_doy)
#print(paste("length:", length(dat$doy)))
#LOGISTIC FIT
#par_b3 <- seq(0.1, 0.9, 0.5)
#for(i in par_b3){
nls_fit <- tryCatch(nls(index ~ b1 + (b2 / (1 + exp(- b3 * (doy - b4)))),
start = list(b1 = min(index),
b2 = max(index),
b3 = 0.2,
b4 = round(mean(dat[which(dat$index > median(dat$index)), "doy"]), 0)),
data = dat), error = function(e) return(NA))
# if (class(nls_fit) == "nls"){
# break
# }
#}
if (class(nls_fit) == "nls") {
#print(paste(round(coef(nls_fit), 2)))
nls_fit_result[[k]] <- as.data.frame(data.frame(t(coef(nls_fit)),
"plotid"=p,
"obs_error"= 0,
"fit_error"= 0,
"transition" = transition,
"observations" = length(d$doy),
"RSS" = sum(resid(nls_fit)^2) ))
}
# if class NA:
else {
nls_fit_result[[k]] <- as.data.frame(data.frame("b1" = NA,
"b2" = NA,
"b3" = NA,
"b4" =NA,
"plotid" = p,
"obs_error" = 0,
"fit_error" = 1,
"transition" = transition,
"observations" = length(d$doy),
"RSS" = NA))
}
#SPLINE FIT
fit_sp <- tryCatch(gam(index ~ s(doy), data = dat), error = function(e) return(NA))
## 1st derivative to estimate slope
if(class(fit_sp) == "gam"){
newDF <- with(dat, data.frame(doy = seq(0, transition, 1)))
B <- predict(fit_sp, newDF, type = "response", se.fit = TRUE)
eps <- 1e-7
X0 <- predict(fit_sp, newDF, type = 'lpmatrix')
newDFeps_p <- newDF + eps
X1 <- predict(fit_sp, newDFeps_p, type = 'lpmatrix')
Xp <- (X0 - X1) / eps
fd_d1 <- Xp %*% coef(fit_sp)
sp_doy <- which.min(fd_d1) + base_doy -1
#print(paste("spline_doy: ",sp_doy ))
sp_fit_result[[k]] <- as.data.frame(data.frame("sp" = sp_doy,
"plotid"=p,
"obs_error_sp" = 0,
"fit_error_sp" = 0,
"transition" = transition,
"observations" = length(d$doy),
"RSS" = sum(resid(fit_sp)^2) ))
fd_d1 = NULL
fit_sp = NULL
}
# if class NA:
else {
sp_fit_result[[k]] <- as.data.frame(data.frame("sp" = NA,
"plotid" = p,
"obs_error_sp" = 0,
"fit_error_sp"= 1,
"transition" = transition,
"observations" = length(d$doy),
"RSS" = NA ))
}
}
# if observations < 10
else {
sp_fit_result[[k]] <- as.data.frame(data.frame("sp" = NA,
"plotid"= p,
"obs_error_sp" = 1,
"fit_error_sp" = 0,
"transition" = 0,
"observations" = length(d$doy),
"RSS" = NA ))
nls_fit_result [[k]] <- as.data.frame(data.frame("b1" = NA,
"b2" = NA,
"b3" = NA,
"b4" = NA,
"plotid"=p,
"obs_error" = 1,
"fit_error" = 0,
"transition" = 0,
"observations" = length(d$doy),
"RSS" = NA ))
}
}
return(list(nls_fit_result, sp_fit_result))
}
#################################################################################
ptm <- proc.time()
pheno_result_v5 <- pheno_model(data$plotid, data$evi, data$doy, data$year, data$base_doy_evi)
(proc.time() - ptm) / 60
res_nls_v5 <- data.frame(do.call(rbind, pheno_result_v5[[1]]))
res_spl_v5 <- data.frame(do.call(rbind, pheno_result_v5[[2]]))
results_v5 <- cbind(res_nls_v5, res_spl_v5[,c(1,3,4)])
mean(!is.na(results_v5$b4))
mean(!is.na(results_v5$sp))
cor.test(results_v5$b4, results_v5$sp, use="complete.obs")
range(results_v5$b4, na.rm=TRUE)
ggplot(results_v5, aes(x = sp, y = b4)) +
geom_point() +
coord_equal() +
geom_abline(intercept = 0, slope = 1)+
#geom_text(aes(label = plotid))
labs(x="DOY (GAM)", y="DOY (LOG)")
sub_res_v2 <- results_v2[, c("b4","sp")]
df_v2 <- melt(sub_res_v2)
ggplot(data= results)+
geom_histogram(aes(x= sp))
ggplot(data= df_v2, aes(x=variable, y=value))+
geom_boxplot()+
stat_boxplot(geom="errorbar", width=0.5)
| /pheno_model_v5.R | no_license | katjakowalski/MA | R | false | false | 7,995 | r | library(mgcv)
library(tidyverse)
library(ggplot2)
library(reshape2)
setwd("\\\\141.20.140.91/SAN_Projects/Spring/workspace/Katja/germany/spectral")
data <- read.csv(header=TRUE, sep=",", file="data_clear.csv")
data <- subset(data, data$year == 2017)
#################################################################################
# find base doy for evi
base <- data_base %>% group_by(plotid) %>% filter(evi == min(evi))
#base_doy <- data_base %>% group_by(dwd_stat) %>% filter(ndvi == min(ndvi))
data <- merge(data, base[, c("plotid", "doy")], by= "plotid", all.x=TRUE)
names(data)[names(data) == 'doy.y'] <- 'base_doy_evi'
names(data)[names(data) == 'doy.x'] <- 'doy'
pheno_model <- function(plotid,
index,
doy,
year,
base_doy_index,
min_obs = 10){
data = data.frame(plotid,
index,
doy,
year,
base_doy_index)
l_samples <- length(unique(data$plotid))
nls_fit_result <- vector("list", l_samples)
sp_fit_result <- vector("list", l_samples)
k <- 0
for( p in unique(data$plotid)){
print(paste("plot: ", p))
transition <- c()
d = subset(data, data$plotid == p & data$year == "2017")
#print(paste(length(d$doy)))
k <- k + 1
if(length(d$doy) >= min_obs){
transition <- with(d, doy[index == max(index)]) + 10
base_doy <- d$base_doy_index[1]
dat <- subset(d, d$doy <= transition & d$doy >= base_doy)
#print(paste("length:", length(dat$doy)))
#LOGISTIC FIT
#par_b3 <- seq(0.1, 0.9, 0.5)
#for(i in par_b3){
nls_fit <- tryCatch(nls(index ~ b1 + (b2 / (1 + exp(- b3 * (doy - b4)))),
start = list(b1 = min(index),
b2 = max(index),
b3 = 0.2,
b4 = round(mean(dat[which(dat$index > median(dat$index)), "doy"]), 0)),
data = dat), error = function(e) return(NA))
# if (class(nls_fit) == "nls"){
# break
# }
#}
if (class(nls_fit) == "nls") {
#print(paste(round(coef(nls_fit), 2)))
nls_fit_result[[k]] <- as.data.frame(data.frame(t(coef(nls_fit)),
"plotid"=p,
"obs_error"= 0,
"fit_error"= 0,
"transition" = transition,
"observations" = length(d$doy),
"RSS" = sum(resid(nls_fit)^2) ))
}
# if class NA:
else {
nls_fit_result[[k]] <- as.data.frame(data.frame("b1" = NA,
"b2" = NA,
"b3" = NA,
"b4" =NA,
"plotid" = p,
"obs_error" = 0,
"fit_error" = 1,
"transition" = transition,
"observations" = length(d$doy),
"RSS" = NA))
}
#SPLINE FIT
fit_sp <- tryCatch(gam(index ~ s(doy), data = dat), error = function(e) return(NA))
## 1st derivative to estimate slope
if(class(fit_sp) == "gam"){
newDF <- with(dat, data.frame(doy = seq(0, transition, 1)))
B <- predict(fit_sp, newDF, type = "response", se.fit = TRUE)
eps <- 1e-7
X0 <- predict(fit_sp, newDF, type = 'lpmatrix')
newDFeps_p <- newDF + eps
X1 <- predict(fit_sp, newDFeps_p, type = 'lpmatrix')
Xp <- (X0 - X1) / eps
fd_d1 <- Xp %*% coef(fit_sp)
sp_doy <- which.min(fd_d1) + base_doy -1
#print(paste("spline_doy: ",sp_doy ))
sp_fit_result[[k]] <- as.data.frame(data.frame("sp" = sp_doy,
"plotid"=p,
"obs_error_sp" = 0,
"fit_error_sp" = 0,
"transition" = transition,
"observations" = length(d$doy),
"RSS" = sum(resid(fit_sp)^2) ))
fd_d1 = NULL
fit_sp = NULL
}
# if class NA:
else {
sp_fit_result[[k]] <- as.data.frame(data.frame("sp" = NA,
"plotid" = p,
"obs_error_sp" = 0,
"fit_error_sp"= 1,
"transition" = transition,
"observations" = length(d$doy),
"RSS" = NA ))
}
}
# if observations < 10
else {
sp_fit_result[[k]] <- as.data.frame(data.frame("sp" = NA,
"plotid"= p,
"obs_error_sp" = 1,
"fit_error_sp" = 0,
"transition" = 0,
"observations" = length(d$doy),
"RSS" = NA ))
nls_fit_result [[k]] <- as.data.frame(data.frame("b1" = NA,
"b2" = NA,
"b3" = NA,
"b4" = NA,
"plotid"=p,
"obs_error" = 1,
"fit_error" = 0,
"transition" = 0,
"observations" = length(d$doy),
"RSS" = NA ))
}
}
return(list(nls_fit_result, sp_fit_result))
}
#################################################################################
ptm <- proc.time()
pheno_result_v5 <- pheno_model(data$plotid, data$evi, data$doy, data$year, data$base_doy_evi)
(proc.time() - ptm) / 60
res_nls_v5 <- data.frame(do.call(rbind, pheno_result_v5[[1]]))
res_spl_v5 <- data.frame(do.call(rbind, pheno_result_v5[[2]]))
results_v5 <- cbind(res_nls_v5, res_spl_v5[,c(1,3,4)])
mean(!is.na(results_v5$b4))
mean(!is.na(results_v5$sp))
cor.test(results_v5$b4, results_v5$sp, use="complete.obs")
range(results_v5$b4, na.rm=TRUE)
ggplot(results_v5, aes(x = sp, y = b4)) +
geom_point() +
coord_equal() +
geom_abline(intercept = 0, slope = 1)+
#geom_text(aes(label = plotid))
labs(x="DOY (GAM)", y="DOY (LOG)")
sub_res_v2 <- results_v2[, c("b4","sp")]
df_v2 <- melt(sub_res_v2)
ggplot(data= results)+
geom_histogram(aes(x= sp))
ggplot(data= df_v2, aes(x=variable, y=value))+
geom_boxplot()+
stat_boxplot(geom="errorbar", width=0.5)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/flag_outlooktime.R
\name{flag_outlooktime}
\alias{flag_outlooktime}
\title{Flag unusual outlook time settings for work day start and end time}
\usage{
flag_outlooktime(data, threshold = c(4, 15), return = "message")
}
\arguments{
\item{data}{A data frame containing a Person Query.}
\item{threshold}{A numeric vector of length two, specifying the hour threshold for flagging.
Defaults to c(4, 15).}
\item{return}{String to specify what to return.
Valid options include "text" (default), "message", and "data".}
}
\description{
This function flags unusual outlook calendar settings for
start and end time of work day.
}
\examples{
# Demo with `dv_data`
flag_outlooktime(dv_data)
# Example where Outlook Start and End times are imputed
spq_df <- sq_data
spq_df$WorkingStartTimeSetInOutlook <- "6:30"
spq_df$WorkingEndTimeSetInOutlook <- "23:30"
flag_outlooktime(spq_df, threshold = c(5, 13))
}
\seealso{
Other Data Validation:
\code{\link{check_query}()},
\code{\link{flag_ch_ratio}()},
\code{\link{flag_em_ratio}()},
\code{\link{flag_extreme}()},
\code{\link{hr_trend}()},
\code{\link{hrvar_count_all}()},
\code{\link{hrvar_count}()},
\code{\link{identify_holidayweeks}()},
\code{\link{identify_inactiveweeks}()},
\code{\link{identify_nkw}()},
\code{\link{identify_outlier}()},
\code{\link{identify_privacythreshold}()},
\code{\link{identify_query}()},
\code{\link{identify_tenure}()},
\code{\link{remove_outliers}()},
\code{\link{subject_validate_report}()},
\code{\link{subject_validate}()},
\code{\link{track_HR_change}()}
}
\concept{Data Validation}
| /man/flag_outlooktime.Rd | permissive | Global19/wpa | R | false | true | 1,636 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/flag_outlooktime.R
\name{flag_outlooktime}
\alias{flag_outlooktime}
\title{Flag unusual outlook time settings for work day start and end time}
\usage{
flag_outlooktime(data, threshold = c(4, 15), return = "message")
}
\arguments{
\item{data}{A data frame containing a Person Query.}
\item{threshold}{A numeric vector of length two, specifying the hour threshold for flagging.
Defaults to c(4, 15).}
\item{return}{String to specify what to return.
Valid options include "text" (default), "message", and "data".}
}
\description{
This function flags unusual outlook calendar settings for
start and end time of work day.
}
\examples{
# Demo with `dv_data`
flag_outlooktime(dv_data)
# Example where Outlook Start and End times are imputed
spq_df <- sq_data
spq_df$WorkingStartTimeSetInOutlook <- "6:30"
spq_df$WorkingEndTimeSetInOutlook <- "23:30"
flag_outlooktime(spq_df, threshold = c(5, 13))
}
\seealso{
Other Data Validation:
\code{\link{check_query}()},
\code{\link{flag_ch_ratio}()},
\code{\link{flag_em_ratio}()},
\code{\link{flag_extreme}()},
\code{\link{hr_trend}()},
\code{\link{hrvar_count_all}()},
\code{\link{hrvar_count}()},
\code{\link{identify_holidayweeks}()},
\code{\link{identify_inactiveweeks}()},
\code{\link{identify_nkw}()},
\code{\link{identify_outlier}()},
\code{\link{identify_privacythreshold}()},
\code{\link{identify_query}()},
\code{\link{identify_tenure}()},
\code{\link{remove_outliers}()},
\code{\link{subject_validate_report}()},
\code{\link{subject_validate}()},
\code{\link{track_HR_change}()}
}
\concept{Data Validation}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_region.R
\name{read_region}
\alias{read_region}
\title{Download shape file of Brazil Regions as sf objects.}
\usage{
read_region(year = 2010, simplified = TRUE, showProgress = TRUE, tp)
}
\arguments{
\item{year}{Year of the data (defaults to 2010)}
\item{simplified}{Logic TRUE or FALSE, indicating whether the function returns the 'original' dataset with high resolution or a dataset with 'simplified' borders (Defaults to TRUE)}
\item{showProgress}{Logical. Defaults to (TRUE) display progress bar}
\item{tp}{Argument deprecated. Please use argument 'simplified'}
}
\description{
Data at scale 1:250,000, using Geodetic reference system "SIRGAS2000" and CRS(4674)
}
\examples{
\donttest{
library(geobr)
# Read specific year
reg <- read_region(year=2018)
}
}
\seealso{
Other general area functions:
\code{\link{read_amazon}()},
\code{\link{read_biomes}()},
\code{\link{read_census_tract}()},
\code{\link{read_conservation_units}()},
\code{\link{read_country}()},
\code{\link{read_immediate_region}()},
\code{\link{read_intermediate_region}()},
\code{\link{read_meso_region}()},
\code{\link{read_micro_region}()},
\code{\link{read_municipality}()},
\code{\link{read_neighborhood}()},
\code{\link{read_semiarid}()},
\code{\link{read_state}()},
\code{\link{read_statistical_grid}()},
\code{\link{read_weighting_area}()}
}
\concept{general area functions}
| /r-package/man/read_region.Rd | no_license | fcotelo/geobr | R | false | true | 1,445 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_region.R
\name{read_region}
\alias{read_region}
\title{Download shape file of Brazil Regions as sf objects.}
\usage{
read_region(year = 2010, simplified = TRUE, showProgress = TRUE, tp)
}
\arguments{
\item{year}{Year of the data (defaults to 2010)}
\item{simplified}{Logic TRUE or FALSE, indicating whether the function returns the 'original' dataset with high resolution or a dataset with 'simplified' borders (Defaults to TRUE)}
\item{showProgress}{Logical. Defaults to (TRUE) display progress bar}
\item{tp}{Argument deprecated. Please use argument 'simplified'}
}
\description{
Data at scale 1:250,000, using Geodetic reference system "SIRGAS2000" and CRS(4674)
}
\examples{
\donttest{
library(geobr)
# Read specific year
reg <- read_region(year=2018)
}
}
\seealso{
Other general area functions:
\code{\link{read_amazon}()},
\code{\link{read_biomes}()},
\code{\link{read_census_tract}()},
\code{\link{read_conservation_units}()},
\code{\link{read_country}()},
\code{\link{read_immediate_region}()},
\code{\link{read_intermediate_region}()},
\code{\link{read_meso_region}()},
\code{\link{read_micro_region}()},
\code{\link{read_municipality}()},
\code{\link{read_neighborhood}()},
\code{\link{read_semiarid}()},
\code{\link{read_state}()},
\code{\link{read_statistical_grid}()},
\code{\link{read_weighting_area}()}
}
\concept{general area functions}
|
#Written by Rafae Deliz-Aguirre
#For showing cases, projection over time
DIRECTORY = "~/COVID-19/Texas"
#LOCATIONS = c("Puerto Rico", "Texas", "New York", "California")
#LOCATIONS = c("Webb", "Dallas","Harris","Bexar")
LOCATIONS = c("Webb")
#Load libraries
library(stringr)
library(gganimate)
library(av)
library(dplyr)
library(parallel)
#Import data
setwd(DIRECTORY)
CasesExport <- read.csv("CasesTable.csv", header = T)
PredictionCasesExport <- read.csv("PredictionsTable.csv", header = T)
#Create export folder
FOLDER = paste(DIRECTORY, "CasesVideo", sep ="/")
dir.create(FOLDER)
setwd(FOLDER)
#Wave Date
WaveDate <-
PredictionCasesExport %>%
filter(
Location %in% LOCATIONS
) %>%
filter(
Wave > 1
) %>%
group_by(
Location,
Wave
) %>%
summarize(
Day = min(Day),
Date = min(Date),
Date = as.Date(Date)
)
CasesExport <-
CasesExport %>%
filter(
Location %in% LOCATIONS
) %>%
mutate(
NewCases = ifelse(NewCases<0, NA, NewCases),
NewCasesRatio = ifelse(NewCasesRatio<0, NA, NewCasesRatio),
Date = as.Date(Date)
)
PredictionCasesExport <-
PredictionCasesExport %>%
filter(
Location %in% LOCATIONS
) %>%
mutate(
CasesMin = ifelse(CasesMin < 0, 0, CasesMin),
Cases = ifelse(Cases < 0, 0, Cases),
CasesMax = ifelse(CasesMax < 0, 0, CasesMax),
Date = as.Date(Date)
)
DayFx <- function(DayX) {
PlottingTable <-
CasesExport %>%
filter(
Day <= DayX
)
PredictionCasesTable <-
PredictionCasesExport %>%
filter(
Day <= DayX
)
Test <- ifelse(max(CasesExport$Day) < DayX, T, F)
DayCases <-
ifelse(
Test == F,
paste(str_pad(max(PlottingTable$Cases), 4, pad = "0"), " Cases + ", sep = ""),
paste(str_pad(round(max(PredictionCasesTable$Cases, na.rm = T)), 4, pad = "0"), " Predicted Cases + ", sep = "")
)
NewCases <-
ifelse(
Test == F,
paste(str_pad(round(tail(PlottingTable$NewCases, n= 1)), 3, pad = "0"), " New Cases", sep = ""),
paste(str_pad(round(tail(PredictionCasesTable$NewCases, n = 1, na.rm = T)), 3, pad = "0"), " Predicted New Cases", sep = "")
)
DayN = ifelse(DayX < 13, 0, DayX - 12)
PlotTitle <-
paste(
"Day ", str_pad(DayN, 3, pad = "0"), ": ",
DayCases,
NewCases,
sep = "")
ggplot() +
geom_ribbon(
data = PredictionCasesTable,
aes(
x = Date,
ymin = CasesMin,
ymax = CasesMax
),
alpha=0.2) +
geom_line(
data = PredictionCasesTable,
aes(
x = Date,
y = Cases,
group = Location,
color = "Predicted Cases"
)
) +
geom_line(
data = PlottingTable,
aes(
x = Date,
y = Cases,
group = Location,
color = "Actual Cases"
),
) +
geom_vline(
data = WaveDate,
aes(
xintercept = Date
)
) +
scale_colour_manual(values=c("black","red"))+
scale_y_continuous(breaks= scales::pretty_breaks())+
theme_classic() +
labs(
title = PlotTitle,
subtitle = "Twitter @RHDeliz • delizaguirre.com/corona",
y = "Cases",
x = "Date",
color = "Data"
) +
facet_wrap(
~Location
) +
theme(
legend.position = "bottom"
) +
ggsave(
paste(str_pad(DayX, 4, pad = "0"), ".png"),
height = 4.5,
width = 8
)
}
tictoc::tic()
nDays = 1:max(PredictionCasesExport$Day)
Cases <- mclapply(nDays, DayFx)
tictoc::toc()
av_encode_video(list.files(FOLDER, "*.png"), framerate = 14, output = "Cases.mp4")
| /VideoCases.R | no_license | rhdeliz/COVID-19 | R | false | false | 3,613 | r | #Written by Rafae Deliz-Aguirre
#For showing cases, projection over time
DIRECTORY = "~/COVID-19/Texas"
#LOCATIONS = c("Puerto Rico", "Texas", "New York", "California")
#LOCATIONS = c("Webb", "Dallas","Harris","Bexar")
LOCATIONS = c("Webb")
#Load libraries
library(stringr)
library(gganimate)
library(av)
library(dplyr)
library(parallel)
#Import data
setwd(DIRECTORY)
CasesExport <- read.csv("CasesTable.csv", header = T)
PredictionCasesExport <- read.csv("PredictionsTable.csv", header = T)
#Create export folder
FOLDER = paste(DIRECTORY, "CasesVideo", sep ="/")
dir.create(FOLDER)
setwd(FOLDER)
#Wave Date
WaveDate <-
PredictionCasesExport %>%
filter(
Location %in% LOCATIONS
) %>%
filter(
Wave > 1
) %>%
group_by(
Location,
Wave
) %>%
summarize(
Day = min(Day),
Date = min(Date),
Date = as.Date(Date)
)
CasesExport <-
CasesExport %>%
filter(
Location %in% LOCATIONS
) %>%
mutate(
NewCases = ifelse(NewCases<0, NA, NewCases),
NewCasesRatio = ifelse(NewCasesRatio<0, NA, NewCasesRatio),
Date = as.Date(Date)
)
PredictionCasesExport <-
PredictionCasesExport %>%
filter(
Location %in% LOCATIONS
) %>%
mutate(
CasesMin = ifelse(CasesMin < 0, 0, CasesMin),
Cases = ifelse(Cases < 0, 0, Cases),
CasesMax = ifelse(CasesMax < 0, 0, CasesMax),
Date = as.Date(Date)
)
DayFx <- function(DayX) {
PlottingTable <-
CasesExport %>%
filter(
Day <= DayX
)
PredictionCasesTable <-
PredictionCasesExport %>%
filter(
Day <= DayX
)
Test <- ifelse(max(CasesExport$Day) < DayX, T, F)
DayCases <-
ifelse(
Test == F,
paste(str_pad(max(PlottingTable$Cases), 4, pad = "0"), " Cases + ", sep = ""),
paste(str_pad(round(max(PredictionCasesTable$Cases, na.rm = T)), 4, pad = "0"), " Predicted Cases + ", sep = "")
)
NewCases <-
ifelse(
Test == F,
paste(str_pad(round(tail(PlottingTable$NewCases, n= 1)), 3, pad = "0"), " New Cases", sep = ""),
paste(str_pad(round(tail(PredictionCasesTable$NewCases, n = 1, na.rm = T)), 3, pad = "0"), " Predicted New Cases", sep = "")
)
DayN = ifelse(DayX < 13, 0, DayX - 12)
PlotTitle <-
paste(
"Day ", str_pad(DayN, 3, pad = "0"), ": ",
DayCases,
NewCases,
sep = "")
ggplot() +
geom_ribbon(
data = PredictionCasesTable,
aes(
x = Date,
ymin = CasesMin,
ymax = CasesMax
),
alpha=0.2) +
geom_line(
data = PredictionCasesTable,
aes(
x = Date,
y = Cases,
group = Location,
color = "Predicted Cases"
)
) +
geom_line(
data = PlottingTable,
aes(
x = Date,
y = Cases,
group = Location,
color = "Actual Cases"
),
) +
geom_vline(
data = WaveDate,
aes(
xintercept = Date
)
) +
scale_colour_manual(values=c("black","red"))+
scale_y_continuous(breaks= scales::pretty_breaks())+
theme_classic() +
labs(
title = PlotTitle,
subtitle = "Twitter @RHDeliz • delizaguirre.com/corona",
y = "Cases",
x = "Date",
color = "Data"
) +
facet_wrap(
~Location
) +
theme(
legend.position = "bottom"
) +
ggsave(
paste(str_pad(DayX, 4, pad = "0"), ".png"),
height = 4.5,
width = 8
)
}
tictoc::tic()
nDays = 1:max(PredictionCasesExport$Day)
Cases <- mclapply(nDays, DayFx)
tictoc::toc()
av_encode_video(list.files(FOLDER, "*.png"), framerate = 14, output = "Cases.mp4")
|
# Yige Wu @WashU May 2021
## source activate signac
# set up libraries and output directory -----------------------------------
## getting the path to the current script
thisFile <- function() {
cmdArgs <- commandArgs(trailingOnly = FALSE)
needle <- "--file="
match <- grep(needle, cmdArgs)
if (length(match) > 0) {
# Rscript
return(normalizePath(sub(needle, "", cmdArgs[match])))
} else {
# 'source'd via R console
return(normalizePath(sys.frames()[[1]]$ofile))
}
}
path_this_script <- thisFile()
## set working directory
dir_base = "/diskmnt/Projects/ccRCC_scratch/ccRCC_snRNA/"
setwd(dir_base)
## load libraries
packages = c(
"rstudioapi",
"plyr",
"dplyr",
"stringr",
"reshape2",
"data.table",
"Signac",
"Seurat",
"ggplot2"
)
for (pkg_name_tmp in packages) {
if (!(pkg_name_tmp %in% installed.packages()[,1])) {
print(paste0(pkg_name_tmp, "is being installed!"))
BiocManager::install(pkgs = pkg_name_tmp, update = F)
install.packages(pkg_name_tmp, dependencies = T)
}
print(paste0(pkg_name_tmp, " is installed!"))
library(package = pkg_name_tmp, character.only = T)
}
source("./ccRCC_snRNA_analysis/functions.R")
## set run id
version_tmp <- 1
run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp)
## set output directory
dir_out <- paste0(makeOutDir_katmai(path_this_script), run_id, "/")
dir.create(dir_out)
# input dependencies ------------------------------------------------------
## input the merged object
atac=readRDS(paste('/diskmnt/Projects/ccRCC_scratch/ccRCC_snATAC/Resources/snATAC_Processed_Data/Signac.1.0.0/3.Merge_snATAC/Merge.SelectPeaks.v.20210706/28_ccRCC_snATAC.selectedPeaks.chromvar.cicero.v3.20210725.rds',sep=''))
Idents(atac)=atac$Piece_ID
## input motif-peak mapping result
# peak2motif_df <- fread(data.table = F, input = "./Resources/snATAC_Processed_Data/Motifs_Mapped_to_Peaks/Motifs_matched.DEG_associated_Peaks.Motif_annotation.20210517.v1.tsv")
peak2motif_df <- fread(data.table = F, input = "/diskmnt/Projects/ccRCC_scratch/ccRCC_snATAC/Resources/snATAC_Processed_Data/Signac.1.0.0/3.Merge_snATAC/Merge.SelectPeaks.v.20210706/peaks/Motifs_matched.28_snATAC_merged.object.20210827.tsv")
## input peak fold changes
peak2fcs_df <- fread(data.table = F, input = "./Resources/snATAC_Processed_Data/Differential_Peaks/ccRCC_Specific/DA_peaks_Tumor_vs_PT_affected_byCNV_removed.tsv")
# specify parameters ------------------------------------------------------
motifs_plot <- c("KLF9")
motifs_plot <- c("MA1107.2")
peak_plot <- c("chr2-74833672-74834172")
topn_plot <- 24
motif_coord <- peak2motif_df$motif_coord[(peak2motif_df$Peak %in% "chr2-74833672-74834172") & (peak2motif_df$group_name %in% "MA1107.2")]
motif_coord <- unique(motif_coord)
motif_coord <- sort(motif_coord); motif_coord
# [1] "chr2-74834043-74834058" "chr2-74856215-74856224" "chr2-74876328-74876337"
## KLF9, MXI1, MXI1
# preprocess samples to show ----------------------------------------------
pieceids_tumor_selected <- c("C3L-00448-T1", "C3L-01302-T1","C3L-00088-T1", "C3N-00242-T1", "C3L-00790-T1", "C3L-00088-T2",
"C3L-01313-T1", "C3L-00917-T1", "C3N-01200-T1", "C3L-00610-T1","C3N-01213-T1", "C3L-00079-T1",
"C3L-00583-T1", "C3N-00733-T1", "C3N-00317-T1", "C3L-00908-T1", "C3L-00026-T1", "C3L-01287-T1",
"C3L-00004-T1", "C3L-00416-T2", "C3L-00096-T1", "C3N-00437-T1", "C3L-00010-T1", "C3N-00495-T1")
pieceids_nat_selected <- c("C3N-00242-N", 'C3L-00088-N', "C3L-00079-N", 'C3N-01200-N')
# preprocess ATAC object --------------------------------------------------
head(atac@meta.data)
atac_subset=subset(atac,(cell_type %in% c('Tumor') & (Piece_ID %in% pieceids_tumor_selected)) | cell_type=='PT' & Piece_ID %in% pieceids_nat_selected)
## make colors
color_tumorcell <- RColorBrewer::brewer.pal(n = 9, name = "Dark2")[4]
color_pt <- RColorBrewer::brewer.pal(n = 9, name = "Dark2")[1]
colors_celltype <- c(rep(x = color_tumorcell, length(pieceids_tumor_selected)), rep(x = color_pt, length(pieceids_nat_selected)))
names(colors_celltype) <- c(pieceids_tumor_selected, pieceids_nat_selected)
# process coordinates ------------------------------------------------------------
chr=strsplit(x = peak_plot, split = "\\-")[[1]][1]
st=strsplit(x = peak_plot, split = "\\-")[[1]][2]; st = as.numeric(st)
en=strsplit(x = peak_plot, split = "\\-")[[1]][3]; en = as.numeric(en)
new_st=st-1000
new_en=en+1000
peak_plot_expanded=paste(chr,new_st,new_en,sep='-')
# peak_plot_expanded <- "chr2-74833572-74876777"
## change atac ident
# print(head(atac@meta.data))
Idents(atac_subset)=factor(atac_subset$Piece_ID, levels=c(pieceids_tumor_selected, pieceids_nat_selected))
# plot --------------------------------------------------------------------
cov_plot= Signac::CoveragePlot(
object = atac_subset,
region = peak_plot_expanded,
annotation = F,
peaks = F,
links=FALSE)
cov_plot <- cov_plot + scale_fill_manual(values = colors_celltype)
print("Finished cov_plot")
peakplot_obj <- Signac::PeakPlot(
object = atac_subset,
region = peak_plot_expanded,
peaks = StringToGRanges(peak_plot, sep = c("-", "-")))
print("Finished peak plot")
motifplot_obj <- Signac::PeakPlot(
object = atac_subset,
region = peak_plot_expanded,
peaks = StringToGRanges(motif_coord, sep = c("-", "-")))
print("Finished motif plot")
gene_plot <- Signac::AnnotationPlot(
object = atac_subset,
region = peak_plot_expanded)
p <- Signac::CombineTracks(
plotlist = list(cov_plot, peakplot_obj, motifplot_obj, gene_plot),
heights = c(8, 0.5, 0.2, 1))
print("Finished CombineTracks")
## write output
# file2write <- paste0(dir_out, gsub(x = peak_plot, pattern = "\\-", replacement = "_"), ".", motif_plot, ".png")
# png(file2write, width = 1000, height = 800, res = 150)
# print(p)
# dev.off()
file2write <- paste0(dir_out, gsub(x = peak_plot[1], pattern = "\\-", replacement = "_"), ".", paste0(motifs_plot, collapse = "_"), ".pdf")
pdf(file2write, width = 6, height = 10, useDingbats = F)
print(p)
dev.off()
| /snatac/coverage_plots/Tumor_vs_PT/coverageplot_HK2_promoter_peak_motif_28samples_KLF9_motif_katmai.R | no_license | ding-lab/ccRCC_snRNA_analysis | R | false | false | 6,087 | r | # Yige Wu @WashU May 2021
## source activate signac
# set up libraries and output directory -----------------------------------
## getting the path to the current script
thisFile <- function() {
cmdArgs <- commandArgs(trailingOnly = FALSE)
needle <- "--file="
match <- grep(needle, cmdArgs)
if (length(match) > 0) {
# Rscript
return(normalizePath(sub(needle, "", cmdArgs[match])))
} else {
# 'source'd via R console
return(normalizePath(sys.frames()[[1]]$ofile))
}
}
path_this_script <- thisFile()
## set working directory
dir_base = "/diskmnt/Projects/ccRCC_scratch/ccRCC_snRNA/"
setwd(dir_base)
## load libraries
packages = c(
"rstudioapi",
"plyr",
"dplyr",
"stringr",
"reshape2",
"data.table",
"Signac",
"Seurat",
"ggplot2"
)
for (pkg_name_tmp in packages) {
if (!(pkg_name_tmp %in% installed.packages()[,1])) {
print(paste0(pkg_name_tmp, "is being installed!"))
BiocManager::install(pkgs = pkg_name_tmp, update = F)
install.packages(pkg_name_tmp, dependencies = T)
}
print(paste0(pkg_name_tmp, " is installed!"))
library(package = pkg_name_tmp, character.only = T)
}
source("./ccRCC_snRNA_analysis/functions.R")
## set run id
version_tmp <- 1
run_id <- paste0(format(Sys.Date(), "%Y%m%d") , ".v", version_tmp)
## set output directory
dir_out <- paste0(makeOutDir_katmai(path_this_script), run_id, "/")
dir.create(dir_out)
# input dependencies ------------------------------------------------------
## input the merged object
atac=readRDS(paste('/diskmnt/Projects/ccRCC_scratch/ccRCC_snATAC/Resources/snATAC_Processed_Data/Signac.1.0.0/3.Merge_snATAC/Merge.SelectPeaks.v.20210706/28_ccRCC_snATAC.selectedPeaks.chromvar.cicero.v3.20210725.rds',sep=''))
Idents(atac)=atac$Piece_ID
## input motif-peak mapping result
# peak2motif_df <- fread(data.table = F, input = "./Resources/snATAC_Processed_Data/Motifs_Mapped_to_Peaks/Motifs_matched.DEG_associated_Peaks.Motif_annotation.20210517.v1.tsv")
peak2motif_df <- fread(data.table = F, input = "/diskmnt/Projects/ccRCC_scratch/ccRCC_snATAC/Resources/snATAC_Processed_Data/Signac.1.0.0/3.Merge_snATAC/Merge.SelectPeaks.v.20210706/peaks/Motifs_matched.28_snATAC_merged.object.20210827.tsv")
## input peak fold changes
peak2fcs_df <- fread(data.table = F, input = "./Resources/snATAC_Processed_Data/Differential_Peaks/ccRCC_Specific/DA_peaks_Tumor_vs_PT_affected_byCNV_removed.tsv")
# specify parameters ------------------------------------------------------
motifs_plot <- c("KLF9")
motifs_plot <- c("MA1107.2")
peak_plot <- c("chr2-74833672-74834172")
topn_plot <- 24
motif_coord <- peak2motif_df$motif_coord[(peak2motif_df$Peak %in% "chr2-74833672-74834172") & (peak2motif_df$group_name %in% "MA1107.2")]
motif_coord <- unique(motif_coord)
motif_coord <- sort(motif_coord); motif_coord
# [1] "chr2-74834043-74834058" "chr2-74856215-74856224" "chr2-74876328-74876337"
## KLF9, MXI1, MXI1
# preprocess samples to show ----------------------------------------------
pieceids_tumor_selected <- c("C3L-00448-T1", "C3L-01302-T1","C3L-00088-T1", "C3N-00242-T1", "C3L-00790-T1", "C3L-00088-T2",
"C3L-01313-T1", "C3L-00917-T1", "C3N-01200-T1", "C3L-00610-T1","C3N-01213-T1", "C3L-00079-T1",
"C3L-00583-T1", "C3N-00733-T1", "C3N-00317-T1", "C3L-00908-T1", "C3L-00026-T1", "C3L-01287-T1",
"C3L-00004-T1", "C3L-00416-T2", "C3L-00096-T1", "C3N-00437-T1", "C3L-00010-T1", "C3N-00495-T1")
pieceids_nat_selected <- c("C3N-00242-N", 'C3L-00088-N', "C3L-00079-N", 'C3N-01200-N')
# preprocess ATAC object --------------------------------------------------
head(atac@meta.data)
atac_subset=subset(atac,(cell_type %in% c('Tumor') & (Piece_ID %in% pieceids_tumor_selected)) | cell_type=='PT' & Piece_ID %in% pieceids_nat_selected)
## make colors
color_tumorcell <- RColorBrewer::brewer.pal(n = 9, name = "Dark2")[4]
color_pt <- RColorBrewer::brewer.pal(n = 9, name = "Dark2")[1]
colors_celltype <- c(rep(x = color_tumorcell, length(pieceids_tumor_selected)), rep(x = color_pt, length(pieceids_nat_selected)))
names(colors_celltype) <- c(pieceids_tumor_selected, pieceids_nat_selected)
# process coordinates ------------------------------------------------------------
chr=strsplit(x = peak_plot, split = "\\-")[[1]][1]
st=strsplit(x = peak_plot, split = "\\-")[[1]][2]; st = as.numeric(st)
en=strsplit(x = peak_plot, split = "\\-")[[1]][3]; en = as.numeric(en)
new_st=st-1000
new_en=en+1000
peak_plot_expanded=paste(chr,new_st,new_en,sep='-')
# peak_plot_expanded <- "chr2-74833572-74876777"
## change atac ident
# print(head(atac@meta.data))
Idents(atac_subset)=factor(atac_subset$Piece_ID, levels=c(pieceids_tumor_selected, pieceids_nat_selected))
# plot --------------------------------------------------------------------
cov_plot= Signac::CoveragePlot(
object = atac_subset,
region = peak_plot_expanded,
annotation = F,
peaks = F,
links=FALSE)
cov_plot <- cov_plot + scale_fill_manual(values = colors_celltype)
print("Finished cov_plot")
peakplot_obj <- Signac::PeakPlot(
object = atac_subset,
region = peak_plot_expanded,
peaks = StringToGRanges(peak_plot, sep = c("-", "-")))
print("Finished peak plot")
motifplot_obj <- Signac::PeakPlot(
object = atac_subset,
region = peak_plot_expanded,
peaks = StringToGRanges(motif_coord, sep = c("-", "-")))
print("Finished motif plot")
gene_plot <- Signac::AnnotationPlot(
object = atac_subset,
region = peak_plot_expanded)
p <- Signac::CombineTracks(
plotlist = list(cov_plot, peakplot_obj, motifplot_obj, gene_plot),
heights = c(8, 0.5, 0.2, 1))
print("Finished CombineTracks")
## write output
# file2write <- paste0(dir_out, gsub(x = peak_plot, pattern = "\\-", replacement = "_"), ".", motif_plot, ".png")
# png(file2write, width = 1000, height = 800, res = 150)
# print(p)
# dev.off()
file2write <- paste0(dir_out, gsub(x = peak_plot[1], pattern = "\\-", replacement = "_"), ".", paste0(motifs_plot, collapse = "_"), ".pdf")
pdf(file2write, width = 6, height = 10, useDingbats = F)
print(p)
dev.off()
|
library(ordinalRR)
### Name: summary.ordinalRR
### Title: Summarize an object of class ordinalRR.
### Aliases: summary.ordinalRR
### ** Examples
## No test:
data(followup)
followup
x=preprocess(followup)
g.random<-ordinalRR(x)
summary(g.random)
## End(No test)
| /data/genthat_extracted_code/ordinalRR/examples/summary.ordinalRR.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 272 | r | library(ordinalRR)
### Name: summary.ordinalRR
### Title: Summarize an object of class ordinalRR.
### Aliases: summary.ordinalRR
### ** Examples
## No test:
data(followup)
followup
x=preprocess(followup)
g.random<-ordinalRR(x)
summary(g.random)
## End(No test)
|
library(shiny)
source("mappings.R")
ui <- navbarPage(title = "Single-case effect size calculator",
id = "SCD_es_calculator",
tabPanel("About",
navlistPanel(widths = c(3,9),
tabPanel("About",
includeMarkdown("markdown/About.md")),
tabPanel("Accessing the calculator",
includeMarkdown("markdown/Accessing.md")),
tabPanel("Using the single-series calculator",
includeMarkdown("markdown/using_single_series.md")),
tabPanel("Using the multiple-series calculator",
includeMarkdown("markdown/using_multiple_series.md")),
tabPanel("Example data",
includeMarkdown("markdown/example-data.md"))
)),
tabPanel("Single-Series Calculator",
fluidRow(column(12,
h3("Data input"),
h5("Enter data values, separated by commas, spaces, or tabs.")
)
),
fluidRow(
column(4,
textInput("A_dat", label = "Phase A", value = "")
),
column(4,
textInput("B_dat", label = "Phase B", value = "")
),
column(4,
checkboxInput("toggleSinglePlot","Show graph", value = FALSE)
)
),
conditionalPanel(condition = "input.toggleSinglePlot",
fluidRow(
column(12,
plotOutput('SCDplot', height = "300px")
)
)),
fluidRow(
hr(),
column(4,
h3("Effect sizes")
),
column(8,
h3(textOutput("ES_name"))
)
),
sidebarLayout(
sidebarPanel(width = 4,
tabsetPanel(id = "ES_family", type = "pills",
tabPanel("Non-overlap",
br(),
selectInput("NOM_ES",
label = "Effect size index",
choices = c("IRD",
"NAP",
"PAND",
"PEM",
"PND",
"Tau",
"Tau-BC" = "Tau_BC",
"Tau-U" = "Tau_U"),
selected = "NAP")
),
tabPanel("Parametric",
br(),
selectInput("parametric_ES",
label = "Effect size index",
choices = c("LOR", "LRRd", "LRRi", "LRM", "PoGO", "SMD"),
selected = "LRRd"),
conditionalPanel(condition = "input.parametric_ES=='LRRi'|input.parametric_ES=='LRRd'",
checkboxInput("pct_change","Convert LRR to % change")),
conditionalPanel(condition = "input.parametric_ES=='PoGO'",
numericInput("goal_level",
label = "Goal level for the behavior",
value = NULL))
)
),
selectInput("improvement",
label = "Direction of improvement",
choices = c("increase", "decrease")),
conditionalPanel(condition = "input.ES_family=='Non-overlap' & input.NOM_ES == 'Tau_BC'",
radioButtons("tau_calculation",
label = "Choose a method for calculating Tau index",
choices = c("Tau (non-overlap)" = "Nlap",
"Kendall rank correlation" = "Kendall"))
),
conditionalPanel(condition = "input.ES_family=='Non-overlap' & input.NOM_ES == 'Tau_BC'",
radioButtons("baseline_check",
label = "Test for baseline trend",
choices = c("Always adjust for baseline trend" = "No",
"Pretest for baseline trend, adjust if significant" = "Yes"))
),
conditionalPanel(condition = "input.ES_family=='Non-overlap' &
input.NOM_ES == 'Tau_BC' & input.baseline_check == 'Yes'",
numericInput("significance_level",
label = "Significance level for the initial baseline trend test",
value = 0.05, step = .01,
min = 0.01,
max = 0.99)
),
conditionalPanel("input.ES_family=='Parametric'",
conditionalPanel(condition = "input.parametric_ES == 'SMD'",
radioButtons("SMD_denom",
label = "Standardized by",
choices = c("baseline SD","pooled SD"))),
conditionalPanel(condition = "input.parametric_ES=='LOR'|input.parametric_ES=='LRRi'|input.parametric_ES=='LRRd'",
selectInput("outScale", label = "Outcome Scale",
choices = c("percentage",
"proportion",
"count",
"rate",
"other")),
numericInput("intervals", label = "If observed via interval recording, total intervals per session", value = NULL)),
conditionalPanel(condition = "input.parametric_ES=='LRRi'|input.parametric_ES=='LRRd'",
numericInput("obslength", label = "Session length (in minutes)", value = NULL)),
conditionalPanel(condition = "input.parametric_ES=='LOR'|input.parametric_ES=='LRRi'|input.parametric_ES=='LRRd'",
numericInput("lrrfloor", label = "User-specified floor constant",value = NULL))
),
conditionalPanel(condition = "input.ES_family=='Parametric'|input.NOM_ES=='NAP'|input.NOM_ES=='Tau'|input.NOM_ES=='Tau_BC'",
numericInput("confidence",
label = "Confidence level",
value = 95,
min = 0,
max = 100)
),
numericInput("digits","Digits",
value = 2, min = 1,
max = 16, step = 1)
),
mainPanel(width = 8,
conditionalPanel(condition = "input.ES_family=='Non-overlap' & input.NOM_ES == 'PND'",
includeMarkdown("markdown/PND-message.md")),
htmlOutput("result"),
checkboxInput("explanation",
label = "Show methods and references",
value = FALSE),
conditionalPanel("input.explanation==true",
conditionalPanel("input.ES_family=='Non-overlap'",
conditionalPanel("input.NOM_ES == 'IRD'", withMathJax(includeMarkdown("markdown/IRD.md"))),
conditionalPanel("input.NOM_ES == 'NAP'", withMathJax(includeMarkdown("markdown/NAP.md"))),
conditionalPanel("input.NOM_ES == 'PAND'", withMathJax(includeMarkdown("markdown/PAND.md"))),
conditionalPanel("input.NOM_ES == 'PEM'", withMathJax(includeMarkdown("markdown/PEM.md"))),
conditionalPanel("input.NOM_ES == 'PND'", withMathJax(includeMarkdown("markdown/PND.md"))),
conditionalPanel("input.NOM_ES == 'Tau'", withMathJax(includeMarkdown("markdown/Tau.md"))),
conditionalPanel("input.NOM_ES == 'Tau_BC'", withMathJax(includeMarkdown("markdown/Tau-BC.md"))),
conditionalPanel("input.NOM_ES == 'Tau_U'", withMathJax(includeMarkdown("markdown/Tau-U.md")))
),
conditionalPanel("input.ES_family=='Parametric'",
conditionalPanel("input.parametric_ES == 'LRRi'|input.parametric_ES == 'LRRd'", withMathJax(includeMarkdown("markdown/LRR.md"))),
conditionalPanel("input.parametric_ES == 'LRM'", withMathJax(includeMarkdown("markdown/LRM.md"))),
conditionalPanel("input.parametric_ES == 'SMD'", withMathJax(includeMarkdown("markdown/SMD.md"))),
conditionalPanel("input.parametric_ES == 'LOR'", withMathJax(includeMarkdown("markdown/LOR.md"))),
conditionalPanel("input.parametric_ES == 'PoGO'", withMathJax(includeMarkdown("markdown/PoGO.md")))
)
)
)
)
),
tabPanel("Multiple-Series Calculator",
tabsetPanel(
id = "BatchEntryTabs",
tabPanel("Data",
sidebarLayout(sidebarPanel(radioButtons('dat_type', 'What data do you want to use?',
c("Use an example" = "example",
"Upload data from a .csv or .txt file" = "dat",
"Upload data from a .xlsx file" = "xlsx")),
conditionalPanel(
condition = "input.dat_type == 'example'",
selectInput("example", label = "Choose an example",
choices = example_list)
),
conditionalPanel(
condition = "input.dat_type == 'dat'",
fileInput('dat', 'Upload a .csv or .txt file', accept=c('text/csv', 'text/comma-separated-values,text/plain', '.csv', '.txt')),
checkboxInput('header', 'File has a header?', TRUE),
radioButtons('sep', 'Data seperator', c(Commas=',', Semicolons=';', Tabs='\t', Spaces=' '), inline = TRUE),
radioButtons('quote', 'Include quotes?', c('No'='', 'Double Quotes'='"', 'Single Quotes'="'"), inline = TRUE)
),
conditionalPanel(
condition = "input.dat_type == 'xlsx'",
fileInput('xlsx', 'Upload a .xlsx file', accept = c('.xlsx')),
checkboxInput('col_names', 'File has a header?', TRUE),
selectInput("inSelect", "Select a sheet", "")
),
uiOutput("filtervarMapping"),
uiOutput("filterMapping")
),
mainPanel(tableOutput("datview")))
),
tabPanel("Variables",
sidebarLayout(
sidebarPanel(
style = "max-height: 1200px; overflow-y: auto",
conditionalPanel(condition = "input.dat_type == 'dat' | input.dat_type == 'xlsx'",
checkboxInput("calcPhasePair", "Calculate phase pair numbers for ABAB designs.", value = FALSE)),
uiOutput("clusterPhase"),
uiOutput("baseDefine"),
uiOutput("treatDefine"),
uiOutput("outOrderImp"),
conditionalPanel(condition = "input.bimprovement == 'series'",
uiOutput("improvementVar")),
conditionalPanel(condition = "input.bimprovement == 'series'",
uiOutput("improvementDir")),
br(),
br(),
br()
),
mainPanel(tableOutput("datview2"))
)
),
tabPanel("Plot",
sidebarLayout(
sidebarPanel(
style = "max-height: 800px; overflow-y: auto",
uiOutput("facetSelector"),
uiOutput("graph_filters"),
br(),
br(),
br(),
br()
),
mainPanel(plotOutput('batchPlot', height = "auto"))
)
),
tabPanel("Estimate",
sidebarLayout(
sidebarPanel(
h4("Select Effect Sizes"),
checkboxGroupInput("bESno", "Non-Overlap Effect Sizes", choices = c("IRD","NAP","PAND","PEM","PND","Tau","Tau-BC" = "Tau_BC","Tau-U" = "Tau_U"), inline = TRUE),
checkboxGroupInput("bESpar", "Parametric Effect Sizes", choices = c("LOR", "LRRd", "LRRi", "LRM", "PoGO", "SMD"), inline = TRUE),
conditionalPanel(condition = "input.bESno.includes('Tau_BC')",
radioButtons("btau_calculation", label = "Choose a method for calculating Tau index",
choices = c("Tau (non-overlap)" = "Nlap",
"Kendall rank correlation" = "Kendall"),
inline = TRUE)),
conditionalPanel(condition = "input.bESno.includes('Tau_BC')",
radioButtons("bbaseline_check", label = "Use baseline trend test for Tau-BC?",
choices = c("Always adjusting for baseline trend" = "No",
"Pretest for baseline trend, adjust if significant" = "Yes"),
inline = FALSE)),
conditionalPanel(condition = "input.bESno.includes('Tau_BC') & input.bbaseline_check == 'Yes'",
numericInput("bsignificance_level",
label = "Significance level for the baseline trend test",
value = 0.05, step = .01,
min = 0.01,
max = 0.99)),
conditionalPanel(condition = "input.bESpar.includes('LRRi') | input.bESpar.includes('LRRd') | input.bESpar.includes('LOR')",
checkboxInput("b_pct_change", "Convert LRR to % change")),
conditionalPanel(condition = "input.bESpar.includes('LOR')",
strong(style="color:orange","LOR will only be calculated for outcomes measured as percentages or proportions."),
br("")),
conditionalPanel(condition = "input.bESpar.includes('SMD')",
radioButtons("bSMD_denom", label = "Standardize SMD ",
choices = c("baseline SD" = "baseline", "pooled SD" = "pool"), inline = TRUE)),
conditionalPanel(condition = "input.bESpar.includes('LRRi') | input.bESpar.includes('LRRd') | input.bESpar.includes('LOR')",
uiOutput("outcomeScale")),
conditionalPanel(condition = "input.bESpar.includes('LRRi') | input.bESpar.includes('LRRd') | input.bESpar.includes('LOR')",
uiOutput("measurementProc")),
conditionalPanel(condition = "input.bESpar.includes('PoGO')", uiOutput("goalLevel")),
conditionalPanel(condition = "input.b_aggregate != ''",
radioButtons('weighting_scheme',
label = "Weighting scheme to use for aggregating.",
choices = c("equal", "1/V", "nA", "nB", "nA*nB", "1/nA + 1/nB"))
),
numericInput("bconfidence", label = "Confidence level (for any effect size with standard errors)", value = 95, min = 0, max = 100),
numericInput("bdigits","Digits",
value = 2, min = 1,
max = 16, step = 1),
radioButtons("resultsformat", "Long or wide format?", c("Long" = "long", "Wide" = "wide"), inline = TRUE),
conditionalPanel(condition = "input.bESpar.length > 0 || input.bESno.length > 0",
actionButton("batchest", "Estimate"))
),
mainPanel(
conditionalPanel(condition = "input.bESno.includes('PND')",
includeMarkdown("markdown/PND-message.md")),
tableOutput("batchTable"),
p(),
conditionalPanel(condition = "input.batchest > 0",
downloadButton("downloadES", label = "Download results"))
)
)
),
tabPanel("Syntax for R",
rclipboard::rclipboardSetup(),
uiOutput("clip"),
verbatimTextOutput("syntax")
)
)
))
| /inst/shiny-examples/SCD-effect-sizes/ui.R | no_license | jepusto/SingleCaseES | R | false | false | 25,048 | r | library(shiny)
source("mappings.R")
ui <- navbarPage(title = "Single-case effect size calculator",
id = "SCD_es_calculator",
tabPanel("About",
navlistPanel(widths = c(3,9),
tabPanel("About",
includeMarkdown("markdown/About.md")),
tabPanel("Accessing the calculator",
includeMarkdown("markdown/Accessing.md")),
tabPanel("Using the single-series calculator",
includeMarkdown("markdown/using_single_series.md")),
tabPanel("Using the multiple-series calculator",
includeMarkdown("markdown/using_multiple_series.md")),
tabPanel("Example data",
includeMarkdown("markdown/example-data.md"))
)),
tabPanel("Single-Series Calculator",
fluidRow(column(12,
h3("Data input"),
h5("Enter data values, separated by commas, spaces, or tabs.")
)
),
fluidRow(
column(4,
textInput("A_dat", label = "Phase A", value = "")
),
column(4,
textInput("B_dat", label = "Phase B", value = "")
),
column(4,
checkboxInput("toggleSinglePlot","Show graph", value = FALSE)
)
),
conditionalPanel(condition = "input.toggleSinglePlot",
fluidRow(
column(12,
plotOutput('SCDplot', height = "300px")
)
)),
fluidRow(
hr(),
column(4,
h3("Effect sizes")
),
column(8,
h3(textOutput("ES_name"))
)
),
sidebarLayout(
sidebarPanel(width = 4,
tabsetPanel(id = "ES_family", type = "pills",
tabPanel("Non-overlap",
br(),
selectInput("NOM_ES",
label = "Effect size index",
choices = c("IRD",
"NAP",
"PAND",
"PEM",
"PND",
"Tau",
"Tau-BC" = "Tau_BC",
"Tau-U" = "Tau_U"),
selected = "NAP")
),
tabPanel("Parametric",
br(),
selectInput("parametric_ES",
label = "Effect size index",
choices = c("LOR", "LRRd", "LRRi", "LRM", "PoGO", "SMD"),
selected = "LRRd"),
conditionalPanel(condition = "input.parametric_ES=='LRRi'|input.parametric_ES=='LRRd'",
checkboxInput("pct_change","Convert LRR to % change")),
conditionalPanel(condition = "input.parametric_ES=='PoGO'",
numericInput("goal_level",
label = "Goal level for the behavior",
value = NULL))
)
),
selectInput("improvement",
label = "Direction of improvement",
choices = c("increase", "decrease")),
conditionalPanel(condition = "input.ES_family=='Non-overlap' & input.NOM_ES == 'Tau_BC'",
radioButtons("tau_calculation",
label = "Choose a method for calculating Tau index",
choices = c("Tau (non-overlap)" = "Nlap",
"Kendall rank correlation" = "Kendall"))
),
conditionalPanel(condition = "input.ES_family=='Non-overlap' & input.NOM_ES == 'Tau_BC'",
radioButtons("baseline_check",
label = "Test for baseline trend",
choices = c("Always adjust for baseline trend" = "No",
"Pretest for baseline trend, adjust if significant" = "Yes"))
),
conditionalPanel(condition = "input.ES_family=='Non-overlap' &
input.NOM_ES == 'Tau_BC' & input.baseline_check == 'Yes'",
numericInput("significance_level",
label = "Significance level for the initial baseline trend test",
value = 0.05, step = .01,
min = 0.01,
max = 0.99)
),
conditionalPanel("input.ES_family=='Parametric'",
conditionalPanel(condition = "input.parametric_ES == 'SMD'",
radioButtons("SMD_denom",
label = "Standardized by",
choices = c("baseline SD","pooled SD"))),
conditionalPanel(condition = "input.parametric_ES=='LOR'|input.parametric_ES=='LRRi'|input.parametric_ES=='LRRd'",
selectInput("outScale", label = "Outcome Scale",
choices = c("percentage",
"proportion",
"count",
"rate",
"other")),
numericInput("intervals", label = "If observed via interval recording, total intervals per session", value = NULL)),
conditionalPanel(condition = "input.parametric_ES=='LRRi'|input.parametric_ES=='LRRd'",
numericInput("obslength", label = "Session length (in minutes)", value = NULL)),
conditionalPanel(condition = "input.parametric_ES=='LOR'|input.parametric_ES=='LRRi'|input.parametric_ES=='LRRd'",
numericInput("lrrfloor", label = "User-specified floor constant",value = NULL))
),
conditionalPanel(condition = "input.ES_family=='Parametric'|input.NOM_ES=='NAP'|input.NOM_ES=='Tau'|input.NOM_ES=='Tau_BC'",
numericInput("confidence",
label = "Confidence level",
value = 95,
min = 0,
max = 100)
),
numericInput("digits","Digits",
value = 2, min = 1,
max = 16, step = 1)
),
mainPanel(width = 8,
conditionalPanel(condition = "input.ES_family=='Non-overlap' & input.NOM_ES == 'PND'",
includeMarkdown("markdown/PND-message.md")),
htmlOutput("result"),
checkboxInput("explanation",
label = "Show methods and references",
value = FALSE),
conditionalPanel("input.explanation==true",
conditionalPanel("input.ES_family=='Non-overlap'",
conditionalPanel("input.NOM_ES == 'IRD'", withMathJax(includeMarkdown("markdown/IRD.md"))),
conditionalPanel("input.NOM_ES == 'NAP'", withMathJax(includeMarkdown("markdown/NAP.md"))),
conditionalPanel("input.NOM_ES == 'PAND'", withMathJax(includeMarkdown("markdown/PAND.md"))),
conditionalPanel("input.NOM_ES == 'PEM'", withMathJax(includeMarkdown("markdown/PEM.md"))),
conditionalPanel("input.NOM_ES == 'PND'", withMathJax(includeMarkdown("markdown/PND.md"))),
conditionalPanel("input.NOM_ES == 'Tau'", withMathJax(includeMarkdown("markdown/Tau.md"))),
conditionalPanel("input.NOM_ES == 'Tau_BC'", withMathJax(includeMarkdown("markdown/Tau-BC.md"))),
conditionalPanel("input.NOM_ES == 'Tau_U'", withMathJax(includeMarkdown("markdown/Tau-U.md")))
),
conditionalPanel("input.ES_family=='Parametric'",
conditionalPanel("input.parametric_ES == 'LRRi'|input.parametric_ES == 'LRRd'", withMathJax(includeMarkdown("markdown/LRR.md"))),
conditionalPanel("input.parametric_ES == 'LRM'", withMathJax(includeMarkdown("markdown/LRM.md"))),
conditionalPanel("input.parametric_ES == 'SMD'", withMathJax(includeMarkdown("markdown/SMD.md"))),
conditionalPanel("input.parametric_ES == 'LOR'", withMathJax(includeMarkdown("markdown/LOR.md"))),
conditionalPanel("input.parametric_ES == 'PoGO'", withMathJax(includeMarkdown("markdown/PoGO.md")))
)
)
)
)
),
tabPanel("Multiple-Series Calculator",
tabsetPanel(
id = "BatchEntryTabs",
tabPanel("Data",
sidebarLayout(sidebarPanel(radioButtons('dat_type', 'What data do you want to use?',
c("Use an example" = "example",
"Upload data from a .csv or .txt file" = "dat",
"Upload data from a .xlsx file" = "xlsx")),
conditionalPanel(
condition = "input.dat_type == 'example'",
selectInput("example", label = "Choose an example",
choices = example_list)
),
conditionalPanel(
condition = "input.dat_type == 'dat'",
fileInput('dat', 'Upload a .csv or .txt file', accept=c('text/csv', 'text/comma-separated-values,text/plain', '.csv', '.txt')),
checkboxInput('header', 'File has a header?', TRUE),
radioButtons('sep', 'Data seperator', c(Commas=',', Semicolons=';', Tabs='\t', Spaces=' '), inline = TRUE),
radioButtons('quote', 'Include quotes?', c('No'='', 'Double Quotes'='"', 'Single Quotes'="'"), inline = TRUE)
),
conditionalPanel(
condition = "input.dat_type == 'xlsx'",
fileInput('xlsx', 'Upload a .xlsx file', accept = c('.xlsx')),
checkboxInput('col_names', 'File has a header?', TRUE),
selectInput("inSelect", "Select a sheet", "")
),
uiOutput("filtervarMapping"),
uiOutput("filterMapping")
),
mainPanel(tableOutput("datview")))
),
tabPanel("Variables",
sidebarLayout(
sidebarPanel(
style = "max-height: 1200px; overflow-y: auto",
conditionalPanel(condition = "input.dat_type == 'dat' | input.dat_type == 'xlsx'",
checkboxInput("calcPhasePair", "Calculate phase pair numbers for ABAB designs.", value = FALSE)),
uiOutput("clusterPhase"),
uiOutput("baseDefine"),
uiOutput("treatDefine"),
uiOutput("outOrderImp"),
conditionalPanel(condition = "input.bimprovement == 'series'",
uiOutput("improvementVar")),
conditionalPanel(condition = "input.bimprovement == 'series'",
uiOutput("improvementDir")),
br(),
br(),
br()
),
mainPanel(tableOutput("datview2"))
)
),
tabPanel("Plot",
sidebarLayout(
sidebarPanel(
style = "max-height: 800px; overflow-y: auto",
uiOutput("facetSelector"),
uiOutput("graph_filters"),
br(),
br(),
br(),
br()
),
mainPanel(plotOutput('batchPlot', height = "auto"))
)
),
tabPanel("Estimate",
sidebarLayout(
sidebarPanel(
h4("Select Effect Sizes"),
checkboxGroupInput("bESno", "Non-Overlap Effect Sizes", choices = c("IRD","NAP","PAND","PEM","PND","Tau","Tau-BC" = "Tau_BC","Tau-U" = "Tau_U"), inline = TRUE),
checkboxGroupInput("bESpar", "Parametric Effect Sizes", choices = c("LOR", "LRRd", "LRRi", "LRM", "PoGO", "SMD"), inline = TRUE),
conditionalPanel(condition = "input.bESno.includes('Tau_BC')",
radioButtons("btau_calculation", label = "Choose a method for calculating Tau index",
choices = c("Tau (non-overlap)" = "Nlap",
"Kendall rank correlation" = "Kendall"),
inline = TRUE)),
conditionalPanel(condition = "input.bESno.includes('Tau_BC')",
radioButtons("bbaseline_check", label = "Use baseline trend test for Tau-BC?",
choices = c("Always adjusting for baseline trend" = "No",
"Pretest for baseline trend, adjust if significant" = "Yes"),
inline = FALSE)),
conditionalPanel(condition = "input.bESno.includes('Tau_BC') & input.bbaseline_check == 'Yes'",
numericInput("bsignificance_level",
label = "Significance level for the baseline trend test",
value = 0.05, step = .01,
min = 0.01,
max = 0.99)),
conditionalPanel(condition = "input.bESpar.includes('LRRi') | input.bESpar.includes('LRRd') | input.bESpar.includes('LOR')",
checkboxInput("b_pct_change", "Convert LRR to % change")),
conditionalPanel(condition = "input.bESpar.includes('LOR')",
strong(style="color:orange","LOR will only be calculated for outcomes measured as percentages or proportions."),
br("")),
conditionalPanel(condition = "input.bESpar.includes('SMD')",
radioButtons("bSMD_denom", label = "Standardize SMD ",
choices = c("baseline SD" = "baseline", "pooled SD" = "pool"), inline = TRUE)),
conditionalPanel(condition = "input.bESpar.includes('LRRi') | input.bESpar.includes('LRRd') | input.bESpar.includes('LOR')",
uiOutput("outcomeScale")),
conditionalPanel(condition = "input.bESpar.includes('LRRi') | input.bESpar.includes('LRRd') | input.bESpar.includes('LOR')",
uiOutput("measurementProc")),
conditionalPanel(condition = "input.bESpar.includes('PoGO')", uiOutput("goalLevel")),
conditionalPanel(condition = "input.b_aggregate != ''",
radioButtons('weighting_scheme',
label = "Weighting scheme to use for aggregating.",
choices = c("equal", "1/V", "nA", "nB", "nA*nB", "1/nA + 1/nB"))
),
numericInput("bconfidence", label = "Confidence level (for any effect size with standard errors)", value = 95, min = 0, max = 100),
numericInput("bdigits","Digits",
value = 2, min = 1,
max = 16, step = 1),
radioButtons("resultsformat", "Long or wide format?", c("Long" = "long", "Wide" = "wide"), inline = TRUE),
conditionalPanel(condition = "input.bESpar.length > 0 || input.bESno.length > 0",
actionButton("batchest", "Estimate"))
),
mainPanel(
conditionalPanel(condition = "input.bESno.includes('PND')",
includeMarkdown("markdown/PND-message.md")),
tableOutput("batchTable"),
p(),
conditionalPanel(condition = "input.batchest > 0",
downloadButton("downloadES", label = "Download results"))
)
)
),
tabPanel("Syntax for R",
rclipboard::rclipboardSetup(),
uiOutput("clip"),
verbatimTextOutput("syntax")
)
)
))
|
testlist <- list(AgeVector = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ExpressionSet = structure(c(1.63064200184954e+212, 1.11558267938731e+157, 2.3453288146775e+59, 7.59955788945772e-256, 5.55398349536846e-07, 2.63833996739876e-240, 1.7158565271506e-24, 3.45241123006119e+96, 3.49016504742521e+83, 8.75421973838948e-251, 1.93656435398732e-237, 3.86617962359471e-308, 1.51457052685755e+122, 6.35453708406506e-226, 1.34149999500835e+258, 3.08695662079571e+274, 1.2778384355529e-304, 1.3429648484931e-231, 7085.87319714646, 4.26173394238389e+31, 3.05695536508135e-40, 2.80384286150823e-70, 4.98598164707396e+226, 2.67284746621031e-50, 1.268983112604e+270, 6.96927128326474e-92, 0.00315105907067092, 2.28082165029915e+210, 964215356953.314, 3.48762608111849e-233, 1.57025504623905e+177, 2.36697187507964e+42, 3.62903965781702e+225, 1.7243009391465e-142, 1.46182058652606e-281), .Dim = c(5L, 7L)))
result <- do.call(myTAI:::cpp_omitMatrix,testlist)
str(result) | /myTAI/inst/testfiles/cpp_omitMatrix/AFL_cpp_omitMatrix/cpp_omitMatrix_valgrind_files/1615846757-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 1,090 | r | testlist <- list(AgeVector = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ExpressionSet = structure(c(1.63064200184954e+212, 1.11558267938731e+157, 2.3453288146775e+59, 7.59955788945772e-256, 5.55398349536846e-07, 2.63833996739876e-240, 1.7158565271506e-24, 3.45241123006119e+96, 3.49016504742521e+83, 8.75421973838948e-251, 1.93656435398732e-237, 3.86617962359471e-308, 1.51457052685755e+122, 6.35453708406506e-226, 1.34149999500835e+258, 3.08695662079571e+274, 1.2778384355529e-304, 1.3429648484931e-231, 7085.87319714646, 4.26173394238389e+31, 3.05695536508135e-40, 2.80384286150823e-70, 4.98598164707396e+226, 2.67284746621031e-50, 1.268983112604e+270, 6.96927128326474e-92, 0.00315105907067092, 2.28082165029915e+210, 964215356953.314, 3.48762608111849e-233, 1.57025504623905e+177, 2.36697187507964e+42, 3.62903965781702e+225, 1.7243009391465e-142, 1.46182058652606e-281), .Dim = c(5L, 7L)))
result <- do.call(myTAI:::cpp_omitMatrix,testlist)
str(result) |
library(shiny)
library(V8)
library(sodium)
library(openssl)
library(rJava) #For sending an email from R
library(mailR) #For sending an email from R
library(DBI)
library(pool)
library(RSQLite)
#DataBase
pool <- dbPool(drv = RSQLite::SQLite())
onStop(function() {
poolClose(pool)
})
#Create table user in DB
dbExecute(pool, 'CREATE TABLE user (user_name TEXT, country TEXT, email TEXT, password TEXT)')
#Countries
countries.list <- read.table("www/countries.txt", header = FALSE, sep = "|",
stringsAsFactors = FALSE, quote = "",
col.names = c("abbr", "country"))
choice.country <- as.list(as.character(countries.list$country))
names(choice.country) <- countries.list$country
server <- function(input, output, session) {
#####################################################################################
########################## Start LogIn ################################################
#####################################################################################
## Initialize - user is not logged in
#user_abu <- reactiveValues(login = FALSE, name = NULL, role = NULL, header = NULL)
loggedIn <- reactiveVal(value = FALSE)
user <- reactiveValues(name = NULL, id=NULL)
#observeEvent will execute only if butLogin is pressed. observe is executed uniformly over time.#
#THis after pressing login#
observeEvent(input$butLogin, {
#browser() #: for debug mode test
req(input$username, input$pwInp) #Make sure username and passowrd are entered#
query <- sqlInterpolate(pool,"select * from user where user_name=?user or email=?email;",user=input$username,email=input$username)
user_data <- dbGetQuery(pool,query)
if(nrow(user_data) > 0){ # If the active user is in the DB then logged in
if(sha256(input$pwInp) == user_data[1, "password"]){
user$name <- user_data[1, "user_name"]
user$id <- user_data[1, "user_id"]
loggedIn(TRUE)
#print(paste("- User:", user$name, "logged in"))
#removeModal() ## remove the modal
toggleModal(session, "window", toggle = "close")
output$App_Panel <- renderUI({
span(
strong(paste("welcome", user$name, "|")),
actionLink(inputId = "logout", "Logout")
)
})
}
} else {
loggedIn(FALSE)
}
})
output$login_status <- renderUI({
if(input$butLogin == 0){
return(NULL)
} else {
if(!loggedIn()){
return(span("The Username or Password is Incorrect", style = "color:red"))
}
}
})
#For creating a new account#
observeEvent(input$create_account, {
showModal(
modalDialog(title = "Create an account", size = "m",
textInput(inputId = "new_user", label = "Username"),
textInput(inputId = "new_email", label = "Email"),
selectizeInput(inputId = 'country', 'Country', choices = choice.country),
passwordInput(inputId = "new_pw", label = "Password"),
passwordInput(inputId = "new_pw_conf", label = "Confirm password"),
checkboxInput(inputId = "terms", label = a("I, agree for terms and conditions",target="_blank",href="Disclaimer-TermsandConditions.html")),
actionButton(inputId = "register_user", label = "Submit"),
#p(input$register_user),
uiOutput("register_status"),
footer = actionButton("dismiss_modal",label = "Dismiss")
)
)
register_user()
})
observeEvent(input$dismiss_modal,{
removeModal()
})
register_user <- eventReactive(input$register_user, {
if(!isTruthy(input$new_user) | !isTruthy(input$new_email) | !isTruthy(input$new_pw) ){
return(span("Fill required information correctly", style = "color:red"))
}
if (!isValidEmail(input$new_email)){
return(span("Please provide a valid email address", style = "color:red"))
}
if (sha256(input$new_pw)!=sha256(input$new_pw_conf)){
return(span("Entered passwords do not match.", style = "color:red"))
}
if (!input$terms){
return(span("Please tick the box to show that you agree with terms and conditions", style = "color:red"))
}
query <- sqlInterpolate(pool,"select * from user where user_name=?user or email=?email;",user=input$new_user,email=input$new_email)
users_data <- dbGetQuery(pool,query)
#users_data <- DB_get_user(input$new_user)
if(nrow(users_data) > 0){
return(span("User already exists", style = "color:red"))
}
new_hash <- sha256(input$new_pw)
new_user <- input$new_user
dbExecute(pool,paste0("INSERT INTO user (user_name, country, email, password) values ","('",new_user,"','",input$country,"','",input$new_email,"','",new_hash,"')", ";"))
print("- New user added to database")
#Send an email to the newly regitered user. The email will provide him with username and password#
# isolate({send.mail(from = "....@gmail.com",
# to = input$new_email,
# subject = "Welcome to ... App",
# body = HTML(paste(paste("Hi",new_user,","),
# "<p>Thank you for using https://test.com. Please find below your credentials for future reference:</p>",
# paste("Username:",new_user,"<br>"),
# paste("Password:",input$new_pw,"<br><br><br>"),
# paste("Best regards, <br><br>Test.com Team"))),
# smtp = list(host.name = "smtp.gmail.com", port = 465, user.name = "...@gmail.com", passwd = "...", ssl = TRUE),
# authenticate = TRUE,
# html = TRUE,
# send = TRUE)})
#
return(span("Your registration was successful. An email with your credential is sent to the registred email adrress", style = "color:green"))
loggedIn(FALSE)
})
output$register_status <- renderUI({
if(input$register_user == 0){
return(NULL)
} else {
isolate(register_user())
}
})
observeEvent(input$logout, {
user$name <- NULL
user$id <- NULL
loggedIn(FALSE)
js$reset2()
#stopApp()
#print("- User: logged out")
})
}
| /server.R | no_license | HanjoStudy/Shiny-login-page | R | false | false | 6,585 | r | library(shiny)
library(V8)
library(sodium)
library(openssl)
library(rJava) #For sending an email from R
library(mailR) #For sending an email from R
library(DBI)
library(pool)
library(RSQLite)
#DataBase
pool <- dbPool(drv = RSQLite::SQLite())
onStop(function() {
poolClose(pool)
})
#Create table user in DB
dbExecute(pool, 'CREATE TABLE user (user_name TEXT, country TEXT, email TEXT, password TEXT)')
#Countries
countries.list <- read.table("www/countries.txt", header = FALSE, sep = "|",
stringsAsFactors = FALSE, quote = "",
col.names = c("abbr", "country"))
choice.country <- as.list(as.character(countries.list$country))
names(choice.country) <- countries.list$country
server <- function(input, output, session) {
#####################################################################################
########################## Start LogIn ################################################
#####################################################################################
## Initialize - user is not logged in
#user_abu <- reactiveValues(login = FALSE, name = NULL, role = NULL, header = NULL)
loggedIn <- reactiveVal(value = FALSE)
user <- reactiveValues(name = NULL, id=NULL)
#observeEvent will execute only if butLogin is pressed. observe is executed uniformly over time.#
#THis after pressing login#
observeEvent(input$butLogin, {
#browser() #: for debug mode test
req(input$username, input$pwInp) #Make sure username and passowrd are entered#
query <- sqlInterpolate(pool,"select * from user where user_name=?user or email=?email;",user=input$username,email=input$username)
user_data <- dbGetQuery(pool,query)
if(nrow(user_data) > 0){ # If the active user is in the DB then logged in
if(sha256(input$pwInp) == user_data[1, "password"]){
user$name <- user_data[1, "user_name"]
user$id <- user_data[1, "user_id"]
loggedIn(TRUE)
#print(paste("- User:", user$name, "logged in"))
#removeModal() ## remove the modal
toggleModal(session, "window", toggle = "close")
output$App_Panel <- renderUI({
span(
strong(paste("welcome", user$name, "|")),
actionLink(inputId = "logout", "Logout")
)
})
}
} else {
loggedIn(FALSE)
}
})
output$login_status <- renderUI({
if(input$butLogin == 0){
return(NULL)
} else {
if(!loggedIn()){
return(span("The Username or Password is Incorrect", style = "color:red"))
}
}
})
#For creating a new account#
observeEvent(input$create_account, {
showModal(
modalDialog(title = "Create an account", size = "m",
textInput(inputId = "new_user", label = "Username"),
textInput(inputId = "new_email", label = "Email"),
selectizeInput(inputId = 'country', 'Country', choices = choice.country),
passwordInput(inputId = "new_pw", label = "Password"),
passwordInput(inputId = "new_pw_conf", label = "Confirm password"),
checkboxInput(inputId = "terms", label = a("I, agree for terms and conditions",target="_blank",href="Disclaimer-TermsandConditions.html")),
actionButton(inputId = "register_user", label = "Submit"),
#p(input$register_user),
uiOutput("register_status"),
footer = actionButton("dismiss_modal",label = "Dismiss")
)
)
register_user()
})
observeEvent(input$dismiss_modal,{
removeModal()
})
register_user <- eventReactive(input$register_user, {
if(!isTruthy(input$new_user) | !isTruthy(input$new_email) | !isTruthy(input$new_pw) ){
return(span("Fill required information correctly", style = "color:red"))
}
if (!isValidEmail(input$new_email)){
return(span("Please provide a valid email address", style = "color:red"))
}
if (sha256(input$new_pw)!=sha256(input$new_pw_conf)){
return(span("Entered passwords do not match.", style = "color:red"))
}
if (!input$terms){
return(span("Please tick the box to show that you agree with terms and conditions", style = "color:red"))
}
query <- sqlInterpolate(pool,"select * from user where user_name=?user or email=?email;",user=input$new_user,email=input$new_email)
users_data <- dbGetQuery(pool,query)
#users_data <- DB_get_user(input$new_user)
if(nrow(users_data) > 0){
return(span("User already exists", style = "color:red"))
}
new_hash <- sha256(input$new_pw)
new_user <- input$new_user
dbExecute(pool,paste0("INSERT INTO user (user_name, country, email, password) values ","('",new_user,"','",input$country,"','",input$new_email,"','",new_hash,"')", ";"))
print("- New user added to database")
#Send an email to the newly regitered user. The email will provide him with username and password#
# isolate({send.mail(from = "....@gmail.com",
# to = input$new_email,
# subject = "Welcome to ... App",
# body = HTML(paste(paste("Hi",new_user,","),
# "<p>Thank you for using https://test.com. Please find below your credentials for future reference:</p>",
# paste("Username:",new_user,"<br>"),
# paste("Password:",input$new_pw,"<br><br><br>"),
# paste("Best regards, <br><br>Test.com Team"))),
# smtp = list(host.name = "smtp.gmail.com", port = 465, user.name = "...@gmail.com", passwd = "...", ssl = TRUE),
# authenticate = TRUE,
# html = TRUE,
# send = TRUE)})
#
return(span("Your registration was successful. An email with your credential is sent to the registred email adrress", style = "color:green"))
loggedIn(FALSE)
})
output$register_status <- renderUI({
if(input$register_user == 0){
return(NULL)
} else {
isolate(register_user())
}
})
observeEvent(input$logout, {
user$name <- NULL
user$id <- NULL
loggedIn(FALSE)
js$reset2()
#stopApp()
#print("- User: logged out")
})
}
|
\name{mean.grouped.data}
\alias{mean.grouped.data}
\title{Arithmetic Mean}
\description{
Mean of grouped data objects.
}
\usage{
\method{mean}{grouped.data}(x, \dots)
}
\arguments{
\item{x}{an object of class \code{"grouped.data"}.}
\item{\dots}{further arguments passed to or from other methods.}
}
\details{
The mean of grouped data with group boundaries \eqn{c_0, c_1, \dots,
c_r}{c[0], c[1], \dots, c[r]} and group frequencies \eqn{n_1, \dots,
n_r}{n[1], \dots, n[r]} is
\deqn{\frac{1}{n} \sum_{j = 1}^r a_j n_j,}{%
(1/n) * sum(j; a[j] * n[j]),}
where
\eqn{a_j = (c_{j - 1} + c_j)/2}{a[j] = (c[j - 1] + c[j])/2}
is the midpoint of the \eqn{j}th interval, and
\eqn{n = \sum_{j = 1}^r n_j}{n = sum(j; n[j])}.
}
\value{
A named vector of means.
}
\seealso{
\code{\link{grouped.data}} to create grouped data objects;
\code{\link{emm}} to compute higher moments.
}
\references{
Klugman, S. A., Panjer, H. H. and Willmot, G. E. (1998),
\emph{Loss Models, From Data to Decisions}, Wiley.
}
\author{
Vincent Goulet \email{vincent.goulet@act.ulaval.ca}
}
\examples{
data(gdental)
mean(gdental)
}
\keyword{univar}
| /man/mean.grouped.data.Rd | no_license | cran/actuar | R | false | false | 1,150 | rd | \name{mean.grouped.data}
\alias{mean.grouped.data}
\title{Arithmetic Mean}
\description{
Mean of grouped data objects.
}
\usage{
\method{mean}{grouped.data}(x, \dots)
}
\arguments{
\item{x}{an object of class \code{"grouped.data"}.}
\item{\dots}{further arguments passed to or from other methods.}
}
\details{
The mean of grouped data with group boundaries \eqn{c_0, c_1, \dots,
c_r}{c[0], c[1], \dots, c[r]} and group frequencies \eqn{n_1, \dots,
n_r}{n[1], \dots, n[r]} is
\deqn{\frac{1}{n} \sum_{j = 1}^r a_j n_j,}{%
(1/n) * sum(j; a[j] * n[j]),}
where
\eqn{a_j = (c_{j - 1} + c_j)/2}{a[j] = (c[j - 1] + c[j])/2}
is the midpoint of the \eqn{j}th interval, and
\eqn{n = \sum_{j = 1}^r n_j}{n = sum(j; n[j])}.
}
\value{
A named vector of means.
}
\seealso{
\code{\link{grouped.data}} to create grouped data objects;
\code{\link{emm}} to compute higher moments.
}
\references{
Klugman, S. A., Panjer, H. H. and Willmot, G. E. (1998),
\emph{Loss Models, From Data to Decisions}, Wiley.
}
\author{
Vincent Goulet \email{vincent.goulet@act.ulaval.ca}
}
\examples{
data(gdental)
mean(gdental)
}
\keyword{univar}
|
parse_triqler_results_for_cox_benchmark <- function(){
list <- read_lines("tmp/triqler_results/triqler_result.tsv") %>%
str_split("\t")
headers <- list[[1]]
triqler_result_df_raw <- list[-1] %>%
map_dfr(function(chr){
c(set_names(as.list(chr[which(headers != "peptides")]), headers[headers != "peptides"]),
list(peptides = paste0(chr[-which(headers != "peptides")], collapse=";")))
}) %>%
transmute(protein = protein, peptides = peptides,
q_value = parse_number(q_value),
posterior_error_prob = parse_number(posterior_error_prob),
num_peptides = parse_number(num_peptides),
log2_fold_change = parse_number(log2_fold_change),
diff_exp_prob_0.0 = parse_number(diff_exp_prob_0.0))
all_prots_species_assignment <- bind_rows(
read_lines("../data/cox_proteome_benchmark/species_fastas/UP000005640_9606.fasta") %>%
keep(function(x) str_starts(x, ">")) %>%
str_split_fixed("\\|", n=3) %>%
as_tibble() %>%
dplyr::rename_all(~ c("DB", "Protein", "Description")) %>%
mutate(DB = str_sub(DB, 2)) %>%
mutate(Origin = "HS"),
read_lines("../data/cox_proteome_benchmark/species_fastas/UP000000625_83333.fasta") %>%
keep(function(x) str_starts(x, ">")) %>%
str_split_fixed("\\|", n=3) %>%
as_tibble() %>%
dplyr::rename_all(~ c("DB", "Protein", "Description")) %>%
mutate(DB = str_sub(DB, 2)) %>%
mutate(Origin = "EC"))
triqler_res <- triqler_result_df_raw %>%
left_join(all_prots_species_assignment, by=c(protein = "Protein")) %>%
mutate(Origin = case_when(
str_starts(protein, "REV__") ~ "Decoy",
str_starts(protein, "CON__") ~ "Contamination",
TRUE ~ Origin
)) %>%
filter(Origin != "Decoy" & Origin != "Contamination")
triqler_res %>%
ungroup() %>%
filter(Origin == "HS" | Origin == "EC") %>%
transmute(name = protein, pval = NA, adj_pval = q_value,
diff = -log2_fold_change,
organism_short = ifelse(Origin == "HS", "H. sapiens", "E. coli"))
} | /compare_performance/run_triqler.R | no_license | const-ae/proDA-Paper | R | false | false | 2,131 | r |
parse_triqler_results_for_cox_benchmark <- function(){
list <- read_lines("tmp/triqler_results/triqler_result.tsv") %>%
str_split("\t")
headers <- list[[1]]
triqler_result_df_raw <- list[-1] %>%
map_dfr(function(chr){
c(set_names(as.list(chr[which(headers != "peptides")]), headers[headers != "peptides"]),
list(peptides = paste0(chr[-which(headers != "peptides")], collapse=";")))
}) %>%
transmute(protein = protein, peptides = peptides,
q_value = parse_number(q_value),
posterior_error_prob = parse_number(posterior_error_prob),
num_peptides = parse_number(num_peptides),
log2_fold_change = parse_number(log2_fold_change),
diff_exp_prob_0.0 = parse_number(diff_exp_prob_0.0))
all_prots_species_assignment <- bind_rows(
read_lines("../data/cox_proteome_benchmark/species_fastas/UP000005640_9606.fasta") %>%
keep(function(x) str_starts(x, ">")) %>%
str_split_fixed("\\|", n=3) %>%
as_tibble() %>%
dplyr::rename_all(~ c("DB", "Protein", "Description")) %>%
mutate(DB = str_sub(DB, 2)) %>%
mutate(Origin = "HS"),
read_lines("../data/cox_proteome_benchmark/species_fastas/UP000000625_83333.fasta") %>%
keep(function(x) str_starts(x, ">")) %>%
str_split_fixed("\\|", n=3) %>%
as_tibble() %>%
dplyr::rename_all(~ c("DB", "Protein", "Description")) %>%
mutate(DB = str_sub(DB, 2)) %>%
mutate(Origin = "EC"))
triqler_res <- triqler_result_df_raw %>%
left_join(all_prots_species_assignment, by=c(protein = "Protein")) %>%
mutate(Origin = case_when(
str_starts(protein, "REV__") ~ "Decoy",
str_starts(protein, "CON__") ~ "Contamination",
TRUE ~ Origin
)) %>%
filter(Origin != "Decoy" & Origin != "Contamination")
triqler_res %>%
ungroup() %>%
filter(Origin == "HS" | Origin == "EC") %>%
transmute(name = protein, pval = NA, adj_pval = q_value,
diff = -log2_fold_change,
organism_short = ifelse(Origin == "HS", "H. sapiens", "E. coli"))
} |
# Find optimal permutations
# called from clustalign
# svd loss
# ARGUMENTS: lambda: list of factor samples
# pivot: matrix to align permutation to
# stop: stopping criterion, largest reasonable norm for an aligned cluster mean
# itermax: maximum number of permutations to search, can be larger than vector memory
source("permuter.R")
source("signer.R")
PSFout = function(lambda, pivot, stop, itermax = 100000){
k = ncol(lambda[[1]])
p = nrow(lambda[[1]])
m = length(lambda)
first = Reduce("+", lambda)/length(lambda) # align median of cluster samples to the pivot
k = ncol(first)
i = 1
mindiff = Inf
minperm = 0L
while(i < itermax){
perm = permuter(1:k, i) # iteratively search
sign = signer(pivot, first[,perm], stop)
signed = t(t(first[,perm]) * sign)
diff = norm(pivot - signed, type = "2")
if(diff < stop) break
if(diff < mindiff){
mindiff = diff
minperm = perm
minsign = sign
}
i = i + 1
}
if(i == itermax) {
print(paste("permsignfact itermax of", i, "reached"))
print(minperm * minsign)
return(rep(list(minperm * minsign), m))
}
print("cluster successfully permuted")
print(perm * sign)
return(rep(list(perm * sign), m))
}
| /PSFout.R | permissive | poworoznek/sparse_bayesian_infinite_factor_models | R | false | false | 1,309 | r | # Find optimal permutations
# called from clustalign
# svd loss
# ARGUMENTS: lambda: list of factor samples
# pivot: matrix to align permutation to
# stop: stopping criterion, largest reasonable norm for an aligned cluster mean
# itermax: maximum number of permutations to search, can be larger than vector memory
source("permuter.R")
source("signer.R")
PSFout = function(lambda, pivot, stop, itermax = 100000){
k = ncol(lambda[[1]])
p = nrow(lambda[[1]])
m = length(lambda)
first = Reduce("+", lambda)/length(lambda) # align median of cluster samples to the pivot
k = ncol(first)
i = 1
mindiff = Inf
minperm = 0L
while(i < itermax){
perm = permuter(1:k, i) # iteratively search
sign = signer(pivot, first[,perm], stop)
signed = t(t(first[,perm]) * sign)
diff = norm(pivot - signed, type = "2")
if(diff < stop) break
if(diff < mindiff){
mindiff = diff
minperm = perm
minsign = sign
}
i = i + 1
}
if(i == itermax) {
print(paste("permsignfact itermax of", i, "reached"))
print(minperm * minsign)
return(rep(list(minperm * minsign), m))
}
print("cluster successfully permuted")
print(perm * sign)
return(rep(list(perm * sign), m))
}
|
testlist <- list(rates = c(-2.70494424383608e-11, -2.70494424244937e-11, -2.70494424244937e-11, -2.70494424244937e-11, 7.06683208934072e-304, -Inf, NaN, 1.23411474800827e+182, NaN, NaN, 5.43223507567683e-312, NaN, -1.34765572425519e+28, 6.47733882085731e-145, -9.52713064494326e+139, 1.36258221338603e-105, -1.34765550943381e+28, 4.73812995908318e+43, -2.70493314160584e-11, NaN, 7.29032584869847e-304, 1.80331547217436e-130, 2.12196682636678e-314, 1.26217821871384e-27, NaN, 4.46014905249241e+43, -2.74306263116906e+304, 9.12404584912149e+194, 2.77448001762435e+180, Inf, 2.77448001762435e+180, 2.77448001762435e+180, 2.77448001762435e+180, 2.77448001762435e+180, -2.74306263440699e+304, 9.12404584912149e+194, 2.77448001762435e+180, 1.20089319277908e-178, -7.21018141782026e+304, 2.77448001762442e+180, 2.77448001762435e+180, 2.68176210458615e-29, 3.83698199462024e+117, 6.94947255553754e-309, 2.77448001764249e+180, -5.35359615124229e+305, NA, 7.2903258603164e-304, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, 1.61013452731574e-24, 1.23175343670293e+194, -1.36160598003672e+28, -1.34765550943381e+28, 6.20102779604429e-305, 1.91561942608236e+53, 9.49129894553818e+194, 0), thresholds = c(3.83677274890702e+117, 8.07538171686265e+115, 2.91240594961789e-144, 4.18998493012724e-144, 7.50871604544118e-308, 2.63554839664967e-82, 2.12196341187911e-314, 2.90435604895085e-144, -3.42891577535994e+304, 3.8368721898241e+117, 3.78259285893079e+117, 2.17165531282319e+45, NaN, 2.77448001762435e+180, 2.77448001761692e+180, 2.77448001762435e+180, 4.1410356212496e+204, 4.1410356681522e+204, 4.1410356681522e+204, 4.1410356681522e+204, 4.1410356681522e+204, 4.1410356681522e+204, 4.1410356681522e+204, -7.2101814244246e+304, 2.77448001459322e+180, 2.77448001762435e+180, 2.77448001762435e+180, 9.52157095844336e-307, 2.77448001676702e+180, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, 1.390671161567e-309, -1.92537522473369e+183, 7.28399830647822e-304, 1.5019240163966e-307, 1.08618507381958e-306, 1.36276004428566e-105, 2.71663823569962e-311, 1.39067116156574e-309, 2.78134232502494e-307, 4.16348356759272e+202, 2.77448001771535e+180, 2.77448001762435e+180, -1.72227260337227e-93, 1.39124676429592e-316, 1.39065209773244e-309, 3.20762789152191e-211, 2.84132113906601e-173, 2.84132113906601e-173, 4.79031896568503e-109, 1.75980614900606e-130, 1.80331613616957e-130, 4.46015329317765e+43, 3.83698282132418e+117, NaN, NaN, -3.53607015069902e-93, 3.56159180157686e-314, 7.29032242438082e-304, 1.60266322567036e-24, 2.84132113906601e-173, 2.84132113906601e-173, 1.34275506324855e+241, -7.22669353876649e+304, 5.43157195199216e-312, 5.43223507585964e-312, 3.83698439012239e+117, 7.18520929158341e-304, 1.50192485449233e-307, -5.3172514347146e+305, 9.94684568527618e-316, 5.12272806694661e+120, 1.44632716009107e-307, NaN, -2.74306263117642e+304, 1.66880379407532e-308, 1.38926515211963e-309, 1.80331613627316e-130, 2.12196682636678e-314, 1.26217821871384e-27), x = numeric(0))
result <- do.call(grattan::IncomeTax,testlist)
str(result) | /grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610125874-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 3,079 | r | testlist <- list(rates = c(-2.70494424383608e-11, -2.70494424244937e-11, -2.70494424244937e-11, -2.70494424244937e-11, 7.06683208934072e-304, -Inf, NaN, 1.23411474800827e+182, NaN, NaN, 5.43223507567683e-312, NaN, -1.34765572425519e+28, 6.47733882085731e-145, -9.52713064494326e+139, 1.36258221338603e-105, -1.34765550943381e+28, 4.73812995908318e+43, -2.70493314160584e-11, NaN, 7.29032584869847e-304, 1.80331547217436e-130, 2.12196682636678e-314, 1.26217821871384e-27, NaN, 4.46014905249241e+43, -2.74306263116906e+304, 9.12404584912149e+194, 2.77448001762435e+180, Inf, 2.77448001762435e+180, 2.77448001762435e+180, 2.77448001762435e+180, 2.77448001762435e+180, -2.74306263440699e+304, 9.12404584912149e+194, 2.77448001762435e+180, 1.20089319277908e-178, -7.21018141782026e+304, 2.77448001762442e+180, 2.77448001762435e+180, 2.68176210458615e-29, 3.83698199462024e+117, 6.94947255553754e-309, 2.77448001764249e+180, -5.35359615124229e+305, NA, 7.2903258603164e-304, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, 1.61013452731574e-24, 1.23175343670293e+194, -1.36160598003672e+28, -1.34765550943381e+28, 6.20102779604429e-305, 1.91561942608236e+53, 9.49129894553818e+194, 0), thresholds = c(3.83677274890702e+117, 8.07538171686265e+115, 2.91240594961789e-144, 4.18998493012724e-144, 7.50871604544118e-308, 2.63554839664967e-82, 2.12196341187911e-314, 2.90435604895085e-144, -3.42891577535994e+304, 3.8368721898241e+117, 3.78259285893079e+117, 2.17165531282319e+45, NaN, 2.77448001762435e+180, 2.77448001761692e+180, 2.77448001762435e+180, 4.1410356212496e+204, 4.1410356681522e+204, 4.1410356681522e+204, 4.1410356681522e+204, 4.1410356681522e+204, 4.1410356681522e+204, 4.1410356681522e+204, -7.2101814244246e+304, 2.77448001459322e+180, 2.77448001762435e+180, 2.77448001762435e+180, 9.52157095844336e-307, 2.77448001676702e+180, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, NaN, 1.390671161567e-309, -1.92537522473369e+183, 7.28399830647822e-304, 1.5019240163966e-307, 1.08618507381958e-306, 1.36276004428566e-105, 2.71663823569962e-311, 1.39067116156574e-309, 2.78134232502494e-307, 4.16348356759272e+202, 2.77448001771535e+180, 2.77448001762435e+180, -1.72227260337227e-93, 1.39124676429592e-316, 1.39065209773244e-309, 3.20762789152191e-211, 2.84132113906601e-173, 2.84132113906601e-173, 4.79031896568503e-109, 1.75980614900606e-130, 1.80331613616957e-130, 4.46015329317765e+43, 3.83698282132418e+117, NaN, NaN, -3.53607015069902e-93, 3.56159180157686e-314, 7.29032242438082e-304, 1.60266322567036e-24, 2.84132113906601e-173, 2.84132113906601e-173, 1.34275506324855e+241, -7.22669353876649e+304, 5.43157195199216e-312, 5.43223507585964e-312, 3.83698439012239e+117, 7.18520929158341e-304, 1.50192485449233e-307, -5.3172514347146e+305, 9.94684568527618e-316, 5.12272806694661e+120, 1.44632716009107e-307, NaN, -2.74306263117642e+304, 1.66880379407532e-308, 1.38926515211963e-309, 1.80331613627316e-130, 2.12196682636678e-314, 1.26217821871384e-27), x = numeric(0))
result <- do.call(grattan::IncomeTax,testlist)
str(result) |
library(uavRst)
### Name: treepos_RL
### Title: 'rLiDAR' based tree detection of a LiDAR-derived Canopy Height
### Model (CHM)
### Aliases: treepos_RL
### ** Examples
## required packages
require(uavRst)
## load data
data(chm_seg)
## find trees
tPosRL <- treepos_RL(chm = chm_seg[[1]],
movingWin = 3,
minTreeAlt = 10)
## visualisation
raster::plot(tPosRL)
| /data/genthat_extracted_code/uavRst/examples/treepos_RL.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 408 | r | library(uavRst)
### Name: treepos_RL
### Title: 'rLiDAR' based tree detection of a LiDAR-derived Canopy Height
### Model (CHM)
### Aliases: treepos_RL
### ** Examples
## required packages
require(uavRst)
## load data
data(chm_seg)
## find trees
tPosRL <- treepos_RL(chm = chm_seg[[1]],
movingWin = 3,
minTreeAlt = 10)
## visualisation
raster::plot(tPosRL)
|
\name{agFun}
\alias{agFun}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Aggregation Function
}
\description{
Function to create summary tables: Aggregated mean, median, sd and se
}
\usage{
agFun(dta, anty, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dta}{ Input data, in the data.frame format }
\item{anty}{ Name of column with type of analyses to be done }
\item{...}{ Further arguments to be passed to the aggregation function. Vector of aggregation factor. }
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Marisa Vedor, Ivo da Costa, Nuno Queiroz
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
# Read shark data
tbl <- data('SharkData')
agFun(dta = tbl, anty = 'Ov_mean', tbl$Spp)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /GSMP_Rpackage/GSMP/man/agFun.Rd | no_license | HilarioMurua/GlobalSpatialRisk | R | false | false | 1,407 | rd | \name{agFun}
\alias{agFun}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Aggregation Function
}
\description{
Function to create summary tables: Aggregated mean, median, sd and se
}
\usage{
agFun(dta, anty, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dta}{ Input data, in the data.frame format }
\item{anty}{ Name of column with type of analyses to be done }
\item{...}{ Further arguments to be passed to the aggregation function. Vector of aggregation factor. }
}
\details{
%% ~~ If necessary, more details than the description above ~~
}
\value{
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Marisa Vedor, Ivo da Costa, Nuno Queiroz
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
# Read shark data
tbl <- data('SharkData')
agFun(dta = tbl, anty = 'Ov_mean', tbl$Spp)
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/constants.R
\docType{data}
\name{.h2o.__DKV}
\alias{.h2o.__DKV}
\title{Removal Endpoints}
\format{
An object of class \code{character} of length 1.
}
\usage{
.h2o.__DKV
}
\description{
Removal Endpoints
}
\keyword{datasets}
| /man/dot-h2o.__DKV.Rd | no_license | cran/h2o | R | false | true | 302 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/constants.R
\docType{data}
\name{.h2o.__DKV}
\alias{.h2o.__DKV}
\title{Removal Endpoints}
\format{
An object of class \code{character} of length 1.
}
\usage{
.h2o.__DKV
}
\description{
Removal Endpoints
}
\keyword{datasets}
|
\name{draw.shape}
\alias{draw.shape}
\title{
Draw shapefiles in an existing plot
}
\description{
Draw shapefiles in an existing plot
}
\usage{
draw.shape(shape, type = "poly", col = 1, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{shape}{
a shape list object created by (\code{\link[shapefiles]{shapefiles}}
}
\item{type}{
type of plot desired. The following values are possible: \code{"p"} for points, \code{"l"} or \code{"lines"} for lines and \code{"poly"} (default) for polygons.
}
\item{col}{
the colour of the points, lines or polygons
}
\item{\dots}{
other arguments to be passed to \code{\link{points}}, \code{\link{lines}} or \code{\link{polygon}}
}
}
\note{
The shapefile needs to have the WGS 84 Geographic Coordinate System in order to display properly on a map of longitude and latitude.
}
\author{
Hans Gerritsen
}
\seealso{
\code{\link[shapefiles]{read.shapefile}}
}
\examples{
library(shapefiles)
shp.file <- file.path(system.file(package = "mapplots", "extdata"), "Ireland")
irl <- read.shapefile(shp.file)
xlim <- c(-11,-5.5)
ylim <- c(51.5,55.5)
basemap(xlim, ylim)
draw.shape(irl, col="cornsilk")
} | /man/draw.shape.Rd | no_license | marchtaylor/mapplots | R | false | false | 1,205 | rd | \name{draw.shape}
\alias{draw.shape}
\title{
Draw shapefiles in an existing plot
}
\description{
Draw shapefiles in an existing plot
}
\usage{
draw.shape(shape, type = "poly", col = 1, ...)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{shape}{
a shape list object created by (\code{\link[shapefiles]{shapefiles}}
}
\item{type}{
type of plot desired. The following values are possible: \code{"p"} for points, \code{"l"} or \code{"lines"} for lines and \code{"poly"} (default) for polygons.
}
\item{col}{
the colour of the points, lines or polygons
}
\item{\dots}{
other arguments to be passed to \code{\link{points}}, \code{\link{lines}} or \code{\link{polygon}}
}
}
\note{
The shapefile needs to have the WGS 84 Geographic Coordinate System in order to display properly on a map of longitude and latitude.
}
\author{
Hans Gerritsen
}
\seealso{
\code{\link[shapefiles]{read.shapefile}}
}
\examples{
library(shapefiles)
shp.file <- file.path(system.file(package = "mapplots", "extdata"), "Ireland")
irl <- read.shapefile(shp.file)
xlim <- c(-11,-5.5)
ylim <- c(51.5,55.5)
basemap(xlim, ylim)
draw.shape(irl, col="cornsilk")
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_references_df_from_list.R
\name{get_secondary_authors}
\alias{get_secondary_authors}
\title{Helper function: get secondary authors from list for a reference}
\usage{
get_secondary_authors(record_list, collapse = FALSE)
}
\arguments{
\item{record_list}{list with one record of create_endnote_list()}
\item{collapse}{should separate fields in "style" be collapsed to one field?
(default: FALSE)}
}
\value{
one row authors data frame
}
\description{
Helper function: get secondary authors from list for a reference
}
| /man/get_secondary_authors.Rd | permissive | KWB-R/kwb.endnote | R | false | true | 600 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/create_references_df_from_list.R
\name{get_secondary_authors}
\alias{get_secondary_authors}
\title{Helper function: get secondary authors from list for a reference}
\usage{
get_secondary_authors(record_list, collapse = FALSE)
}
\arguments{
\item{record_list}{list with one record of create_endnote_list()}
\item{collapse}{should separate fields in "style" be collapsed to one field?
(default: FALSE)}
}
\value{
one row authors data frame
}
\description{
Helper function: get secondary authors from list for a reference
}
|
context('Stan Bridge Sampler Bugs')
test_that("bridge_sampler.stanfit multicore works for one-parameter model.", {
skip_on_cran()
skip_on_travis()
skip_on_os("windows")
if (require(rstan)) {
set.seed(12345)
# compute difference scores
n <- 10
y <- rnorm(n)
# models
stancodeH0 <- '
data {
int<lower=1> n; // number of observations
vector[n] y; // observations
}
parameters {
real<lower=0> sigma2; // variance parameter
}
model {
target += log(1/sigma2); // Jeffreys prior on sigma2
target += normal_lpdf(y | 0, sqrt(sigma2)); // likelihood
}
'
# compile models
tmp <- capture.output(
stanmodelH0 <- stan_model(model_code = stancodeH0, model_name="stanmodel")
)
# fit models
tmp <- capture.output(
stanfitH0 <- sampling(stanmodelH0, data = list(y = y, n = n),
iter = 10000, warmup = 1000, chains = 4,
control = list(adapt_delta = 0.95))
)
######### bridge sampling ###########
suppressWarnings(H0 <- bridge_sampler(stanfitH0, cores = 2, silent = TRUE))
}
})
test_that("turtle example",{
skip_on_cran()
if (require(rstan)) {
data("turtles")
### m1 (model with random intercepts) ###
m1_code_nc <-
"data {
int<lower = 1> nobs;
int<lower = 0, upper = 1> y[nobs];
real<lower = 0> x[nobs];
int<lower = 1> m;
int<lower = 1> clutch[nobs];
}
parameters {
real alpha0_raw;
real alpha1_raw;
vector[m] b_raw;
real<lower = 0> sigma2;
}
transformed parameters {
vector[m] b;
real<lower = 0> sigma = sqrt(sigma2);
real alpha0 = sqrt(10.0)*alpha0_raw;
real alpha1 = sqrt(10.0)*alpha1_raw;
b = b_raw*sigma;
}
model {
// priors
target += -2*log(1 + sigma2); // p(sigma2) = 1/(1 + sigma2)^2
target += normal_lpdf(alpha0_raw | 0, 1);
target += normal_lpdf(alpha1_raw | 0, 1);
// random effects
target += normal_lpdf(b_raw | 0, 1);
// likelihood
for (i in 1:nobs)
target += bernoulli_lpmf(y[i] | Phi(alpha0 + alpha1*x[i] + b[clutch[i]]));
}"
tmp <- capture.output(stanobject_m1_nc <- stan(model_code = m1_code_nc,
data = list(y = turtles$y, x = turtles$x,
nobs = nrow(turtles),
m = max(turtles$clutch),
clutch = turtles$clutch),
iter = 10500, warmup = 500, chains = 4))
bs_m1_nc <- bridge_sampler(stanobject_m1_nc, method = "warp3",
repetitions = 25, silent=TRUE)
m0_code_nc <-
"data {
int<lower = 1> nobs;
int<lower = 0, upper = 1> y[nobs];
real<lower = 0> x[nobs];
}
parameters {
real alpha0_raw;
real alpha1_raw;
}
transformed parameters {
real alpha0 = sqrt(10.0)*alpha0_raw;
real alpha1 = sqrt(10.0)*alpha1_raw;
}
model {
// priors
target += normal_lpdf(alpha0_raw | 0, 1);
target += normal_lpdf(alpha1_raw | 0, 1);
// likelihood
for (i in 1:nobs)
target += bernoulli_lpmf(y[i] | Phi(alpha0 + alpha1*x[i]));
}"
tmp <- capture.output(stanobject_m0_nc <- stan(model_code = m0_code_nc,
data = list(y = turtles$y, x = turtles$x,
nobs = nrow(turtles),
m = max(turtles$clutch),
clutch = turtles$clucth),
iter = 10500, warmup = 500, chains = 4))
bs_m0_nc <- bridge_sampler(stanobject_m0_nc, method = "warp3",
repetitions = 25, silent=TRUE)
expect_equal(bf(bs_m0_nc, bs_m1_nc)$bf, rep(1.27, 25), tolerance = 0.02)
}
})
| /tests/testthat/test-stan_bridge_sampler_bugs.R | no_license | vandenman/bridgesampling | R | false | false | 3,958 | r |
context('Stan Bridge Sampler Bugs')
test_that("bridge_sampler.stanfit multicore works for one-parameter model.", {
skip_on_cran()
skip_on_travis()
skip_on_os("windows")
if (require(rstan)) {
set.seed(12345)
# compute difference scores
n <- 10
y <- rnorm(n)
# models
stancodeH0 <- '
data {
int<lower=1> n; // number of observations
vector[n] y; // observations
}
parameters {
real<lower=0> sigma2; // variance parameter
}
model {
target += log(1/sigma2); // Jeffreys prior on sigma2
target += normal_lpdf(y | 0, sqrt(sigma2)); // likelihood
}
'
# compile models
tmp <- capture.output(
stanmodelH0 <- stan_model(model_code = stancodeH0, model_name="stanmodel")
)
# fit models
tmp <- capture.output(
stanfitH0 <- sampling(stanmodelH0, data = list(y = y, n = n),
iter = 10000, warmup = 1000, chains = 4,
control = list(adapt_delta = 0.95))
)
######### bridge sampling ###########
suppressWarnings(H0 <- bridge_sampler(stanfitH0, cores = 2, silent = TRUE))
}
})
test_that("turtle example",{
skip_on_cran()
if (require(rstan)) {
data("turtles")
### m1 (model with random intercepts) ###
m1_code_nc <-
"data {
int<lower = 1> nobs;
int<lower = 0, upper = 1> y[nobs];
real<lower = 0> x[nobs];
int<lower = 1> m;
int<lower = 1> clutch[nobs];
}
parameters {
real alpha0_raw;
real alpha1_raw;
vector[m] b_raw;
real<lower = 0> sigma2;
}
transformed parameters {
vector[m] b;
real<lower = 0> sigma = sqrt(sigma2);
real alpha0 = sqrt(10.0)*alpha0_raw;
real alpha1 = sqrt(10.0)*alpha1_raw;
b = b_raw*sigma;
}
model {
// priors
target += -2*log(1 + sigma2); // p(sigma2) = 1/(1 + sigma2)^2
target += normal_lpdf(alpha0_raw | 0, 1);
target += normal_lpdf(alpha1_raw | 0, 1);
// random effects
target += normal_lpdf(b_raw | 0, 1);
// likelihood
for (i in 1:nobs)
target += bernoulli_lpmf(y[i] | Phi(alpha0 + alpha1*x[i] + b[clutch[i]]));
}"
tmp <- capture.output(stanobject_m1_nc <- stan(model_code = m1_code_nc,
data = list(y = turtles$y, x = turtles$x,
nobs = nrow(turtles),
m = max(turtles$clutch),
clutch = turtles$clutch),
iter = 10500, warmup = 500, chains = 4))
bs_m1_nc <- bridge_sampler(stanobject_m1_nc, method = "warp3",
repetitions = 25, silent=TRUE)
m0_code_nc <-
"data {
int<lower = 1> nobs;
int<lower = 0, upper = 1> y[nobs];
real<lower = 0> x[nobs];
}
parameters {
real alpha0_raw;
real alpha1_raw;
}
transformed parameters {
real alpha0 = sqrt(10.0)*alpha0_raw;
real alpha1 = sqrt(10.0)*alpha1_raw;
}
model {
// priors
target += normal_lpdf(alpha0_raw | 0, 1);
target += normal_lpdf(alpha1_raw | 0, 1);
// likelihood
for (i in 1:nobs)
target += bernoulli_lpmf(y[i] | Phi(alpha0 + alpha1*x[i]));
}"
tmp <- capture.output(stanobject_m0_nc <- stan(model_code = m0_code_nc,
data = list(y = turtles$y, x = turtles$x,
nobs = nrow(turtles),
m = max(turtles$clutch),
clutch = turtles$clucth),
iter = 10500, warmup = 500, chains = 4))
bs_m0_nc <- bridge_sampler(stanobject_m0_nc, method = "warp3",
repetitions = 25, silent=TRUE)
expect_equal(bf(bs_m0_nc, bs_m1_nc)$bf, rep(1.27, 25), tolerance = 0.02)
}
})
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/upper_aerodigestive_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.04,family="gaussian",standardize=TRUE)
sink('./upper_aerodigestive_tract_019.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/Correlation/upper_aerodigestive_tract/upper_aerodigestive_tract_019.R | no_license | esbgkannan/QSMART | R | false | false | 388 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/Correlation/upper_aerodigestive_tract.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.04,family="gaussian",standardize=TRUE)
sink('./upper_aerodigestive_tract_019.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
shinyServer(
function(input, output, session) {
output$mapPlot = renderPlotly({
df <- accident_count
df$hover <- with(df, paste(State_full, '<br>', "Number of accidents:", n))
l <- list(color = toRGB("grey"), width = 0.5)
g <- list(
scope = 'usa',
projection = list(type = 'albers usa'),
showlakes = TRUE,
lakecolor = toRGB('white')
)
fig <- plot_geo(df, locationmode = 'USA-states')
fig <- fig %>% add_trace(
z = ~n, text = ~hover, locations = ~State,
color = ~n, colors = 'Blues'
)
fig <- fig %>% colorbar(title = "Accident Count")
fig <- fig %>% layout(
geo = g
)
fig
})
#===================================================
#=============== Quantity tab =================
#===================================================
output$barPlot = renderPlotly({
fig <- new_accident_count %>% plot_ly(labels = ~State_full, values = ~n)
fig <- fig %>% add_pie(hole = 0.6)
fig <- fig %>% layout(title = "", showlegend = T,
xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE))
fig
})
output$linePlot = renderPlotly({
month <- c('January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'September', 'October', 'November', 'December')
number_of_accidnets <- table(accidents$Month)
month_data <- data.frame(month, number_of_accidnets)
month_data$month <- factor(month_data$month, levels = month_data[["month"]])
fig <- plot_ly(month_data, x = ~month, y = ~number_of_accidnets,
type = 'scatter', mode = 'lines',
line = list(color = 'rgb(158,202,225)', width = 4))
fig <- fig %>% layout(paper_bgcolor='rgb(255,255,255)', plot_bgcolor='rgb(229,229,229)',
xaxis = list(title = ""),
yaxis = list(title = "The number of accidents",
gridcolor = 'rgb(255,255,255)'))
fig
})
#===================================================
#================= Time tab =================
#===================================================
output$barTimePlot = renderPlotly({
if (input$showBarchart == "Yes") {
x <- c("00:00","01:00","02:00","03:00","04:00","05:00","06:00","07:00",
"08:00","09:00","10:00","11:00","12:00","13:00","14:00","15:00",
"16:00","17:00","18:00","19:00","20:00","21:00","22:00","23:00")
y <- c(table(accidents$Hour))
time_data <- data.frame(x,y)
fig <- plot_ly(time_data, x = ~x, y = ~y, type = 'bar',
marker = list(color = c('rgb(158,202,225)', 'rgb(158,202,225)','rgb(158,202,225)', 'rgb(158,202,225)',
'rgb(158,202,225)', 'rgb(158,202,225)','rgb(158,202,225)', 'rgb(255,102,102)',
'rgb(255,102,102)', 'rgb(158,202,225)','rgb(158,202,225)', 'rgb(158,202,225)',
'rgb(158,202,225)', 'rgb(158,202,225)','rgb(158,202,225)', 'rgb(158,202,225)',
'rgb(255,102,102)', 'rgb(255,102,102)','rgb(158,202,225)', 'rgb(158,202,225)',
'rgb(158,202,225)', 'rgb(158,202,225)','rgb(158,202,225)', 'rgb(158,202,225)')))
fig <- fig %>% layout(xaxis = list(title = "", tickangle = -45),
yaxis = list(title = ""))
fig
}
else {
x <- c("00:00","01:00","02:00","03:00","04:00","05:00","06:00","07:00",
"08:00","09:00","10:00","11:00","12:00","13:00","14:00","15:00",
"16:00","17:00","18:00","19:00","20:00","21:00","22:00","23:00")
y <- c(table(accidents$Hour))
time_data <- data.frame(x,y)
fig <- plot_ly(time_data, x = ~x, y = ~y, type = 'scatter', mode = 'lines',
line = list(color = 'rgb(255,102,102)', width = 4))
fig <- fig %>% layout(title = "",
xaxis = list(title = ""),
yaxis = list (title = ""))
fig
}
})
#===================================================
#========= Weather Conditions tab ===========
#===================================================
output$weather_wc = renderWordcloud2(
wordcloud2(weather_data, size = 2, minRotation = -pi/2, maxRotation = -pi/2)
)
})
| /server.R | no_license | Jiaoli413/Data-Visualization-Project | R | false | false | 4,793 | r | shinyServer(
function(input, output, session) {
output$mapPlot = renderPlotly({
df <- accident_count
df$hover <- with(df, paste(State_full, '<br>', "Number of accidents:", n))
l <- list(color = toRGB("grey"), width = 0.5)
g <- list(
scope = 'usa',
projection = list(type = 'albers usa'),
showlakes = TRUE,
lakecolor = toRGB('white')
)
fig <- plot_geo(df, locationmode = 'USA-states')
fig <- fig %>% add_trace(
z = ~n, text = ~hover, locations = ~State,
color = ~n, colors = 'Blues'
)
fig <- fig %>% colorbar(title = "Accident Count")
fig <- fig %>% layout(
geo = g
)
fig
})
#===================================================
#=============== Quantity tab =================
#===================================================
output$barPlot = renderPlotly({
fig <- new_accident_count %>% plot_ly(labels = ~State_full, values = ~n)
fig <- fig %>% add_pie(hole = 0.6)
fig <- fig %>% layout(title = "", showlegend = T,
xaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE),
yaxis = list(showgrid = FALSE, zeroline = FALSE, showticklabels = FALSE))
fig
})
output$linePlot = renderPlotly({
month <- c('January', 'February', 'March', 'April', 'May', 'June', 'July',
'August', 'September', 'October', 'November', 'December')
number_of_accidnets <- table(accidents$Month)
month_data <- data.frame(month, number_of_accidnets)
month_data$month <- factor(month_data$month, levels = month_data[["month"]])
fig <- plot_ly(month_data, x = ~month, y = ~number_of_accidnets,
type = 'scatter', mode = 'lines',
line = list(color = 'rgb(158,202,225)', width = 4))
fig <- fig %>% layout(paper_bgcolor='rgb(255,255,255)', plot_bgcolor='rgb(229,229,229)',
xaxis = list(title = ""),
yaxis = list(title = "The number of accidents",
gridcolor = 'rgb(255,255,255)'))
fig
})
#===================================================
#================= Time tab =================
#===================================================
output$barTimePlot = renderPlotly({
if (input$showBarchart == "Yes") {
x <- c("00:00","01:00","02:00","03:00","04:00","05:00","06:00","07:00",
"08:00","09:00","10:00","11:00","12:00","13:00","14:00","15:00",
"16:00","17:00","18:00","19:00","20:00","21:00","22:00","23:00")
y <- c(table(accidents$Hour))
time_data <- data.frame(x,y)
fig <- plot_ly(time_data, x = ~x, y = ~y, type = 'bar',
marker = list(color = c('rgb(158,202,225)', 'rgb(158,202,225)','rgb(158,202,225)', 'rgb(158,202,225)',
'rgb(158,202,225)', 'rgb(158,202,225)','rgb(158,202,225)', 'rgb(255,102,102)',
'rgb(255,102,102)', 'rgb(158,202,225)','rgb(158,202,225)', 'rgb(158,202,225)',
'rgb(158,202,225)', 'rgb(158,202,225)','rgb(158,202,225)', 'rgb(158,202,225)',
'rgb(255,102,102)', 'rgb(255,102,102)','rgb(158,202,225)', 'rgb(158,202,225)',
'rgb(158,202,225)', 'rgb(158,202,225)','rgb(158,202,225)', 'rgb(158,202,225)')))
fig <- fig %>% layout(xaxis = list(title = "", tickangle = -45),
yaxis = list(title = ""))
fig
}
else {
x <- c("00:00","01:00","02:00","03:00","04:00","05:00","06:00","07:00",
"08:00","09:00","10:00","11:00","12:00","13:00","14:00","15:00",
"16:00","17:00","18:00","19:00","20:00","21:00","22:00","23:00")
y <- c(table(accidents$Hour))
time_data <- data.frame(x,y)
fig <- plot_ly(time_data, x = ~x, y = ~y, type = 'scatter', mode = 'lines',
line = list(color = 'rgb(255,102,102)', width = 4))
fig <- fig %>% layout(title = "",
xaxis = list(title = ""),
yaxis = list (title = ""))
fig
}
})
#===================================================
#========= Weather Conditions tab ===========
#===================================================
output$weather_wc = renderWordcloud2(
wordcloud2(weather_data, size = 2, minRotation = -pi/2, maxRotation = -pi/2)
)
})
|
library(plyr);library(dplyr)
library(reshape2)
library(tidyr) # separate
library(magrittr)
library(readxl)
library(RSQLite)
xlsfiles <- file.path("rawdata", list.files(path = "rawdata", pattern = "_[1,2]{1}.xls"))
db <- dbConnect(SQLite(), dbname = "data/ECIS2.sqlite")
loadData2 <- function(xlsfile, db.conn){
# Set path
path <- xlsfile
Date <- sub(".+([0-9]{6}).+","\\1", path)
# get and name sheets
sheets <- excel_sheets(path)
wb <- lapply(sheets, read_excel, path = path)
names(wb) <- sheets
# Find marks
marks <- wb$Comments %>% use_series("<New Experiment>") %>%
"["(c(grep("<Mark>",.), grep("<Mark>",.) + 2)) %>%
matrix(ncol=2, dimnames = list(c(),c("Mark","Label"))) %>%
data.frame() %>%
separate(., Mark, into = c("Mark", "Time"), sep = ": ") %>%
mutate(Time = as.numeric(Time), Date = Date) %>% dplyr::select(-1)
dbWriteTable(conn = db.conn, name = "Marks", value = marks,
row.names = FALSE, append = TRUE)
# Some experimental details
details <- wb$Details %>% "["(1:8,1:2) %>% set_colnames(c("ECIS_Parameters","value"))
details <- data.frame(t(details$value)) %>% set_colnames(details$ECIS_Parameters)
dbWriteTable(conn = db.conn, name = "Details", value = details,
row.names = FALSE, append = TRUE)
# Read data to table
wbs <- wb[names(wb) %>% setdiff(c("Details", "Comments"))] %>%
lapply(.%>% melt(id.vars = c("Time (hrs)"))) %>% ldply %>%
mutate(Well = sub("([A-H][0-9]{1,2}) ([ZRC])", "\\1", variable),
Param = sub("([A-H][0-9]{1,2}) ([ZRC])", "\\2", variable),
Freq = sub(".+ ([0-9]+) .+","\\1", .id))
wbs$Date <- Date
wbs$.id <- NULL
wbs$variable <- NULL
dbWriteTable(conn = db.conn, name = "Data", value = wbs,
row.names = FALSE, append = TRUE)
# Retrieve metadata
metadata <- list.files(path = "rawdata", pattern = paste0(Date,"_Metadata"), full.names = T) %>%
sapply(read.csv, header = F, simplify = F, colClasses = "character") %>% ldply %>% melt(id.vars = ".id") %>%
group_by(.id, variable) %>%
mutate(Well = paste0(LETTERS[1:8], sub("V([0-9]+)","\\1",variable)),
Metadata = sub(".+data_(.+).csv","\\1",.id),
Date = Date) %>% ungroup %>% dplyr::select(-.id, -variable) %>%
as.data.frame
dbWriteTable(conn = db.conn, name = "Metadata", value = metadata,
row.names = FALSE, append = TRUE)
# Read modelingdata to table
rba.file <- list.files(path = "rawdata", pattern = paste0(Date,"_MFT_[0-9]_RbA.csv"), full.names = T)
rbdata <- rba.file %>% read.table(., skip = 23, header = FALSE, sep = ",")
rb.id <- rba.file %>% read.table(., skip = 19, nrows = 2, header = FALSE,
sep = ",", stringsAsFactors = FALSE, strip.white = TRUE)
colnames(rbdata) <- c(rb.id[1, 1], paste(rb.id[2, 2:ncol(rbdata)], rb.id[1, 2:ncol(rbdata)]))
rbdata %<>% melt(id.var = "Time (hrs)") %>%
mutate(Well = sub("([A-H][0-9]{1,2}) (.+)", "\\1", variable),
Param = sub("([A-H][0-9]{1,2}) (.+)", "\\2", variable))
rbdata$Date <- Date
rbdata$variable <- NULL
dbWriteTable(conn = db.conn, name = "Rbmodel", value = rbdata,
row.names = FALSE, append = TRUE)
}
# wb.sm <- wbs %>% group_by(Param, Freq) %>% sample_frac(size = 0.05)
# wb.merge <- metadata %>% spread(Metadata, value) %>% left_join(wb.sm, .)
# head(wb.merge)
start <- Sys.time()
(sapply(xlsfiles, .%>% loadData2(db.conn = db)))
end <- Sys.time()
end-start
dbListTables(db)
dbListFields(db, "Marks")
dbGetQuery(db, 'SELECT * FROM Details')
dbDisconnect(db) # Close connection
| /lib/LoadData2.R | no_license | tpall/ECIS_Project | R | false | false | 3,677 | r |
library(plyr);library(dplyr)
library(reshape2)
library(tidyr) # separate
library(magrittr)
library(readxl)
library(RSQLite)
xlsfiles <- file.path("rawdata", list.files(path = "rawdata", pattern = "_[1,2]{1}.xls"))
db <- dbConnect(SQLite(), dbname = "data/ECIS2.sqlite")
loadData2 <- function(xlsfile, db.conn){
# Set path
path <- xlsfile
Date <- sub(".+([0-9]{6}).+","\\1", path)
# get and name sheets
sheets <- excel_sheets(path)
wb <- lapply(sheets, read_excel, path = path)
names(wb) <- sheets
# Find marks
marks <- wb$Comments %>% use_series("<New Experiment>") %>%
"["(c(grep("<Mark>",.), grep("<Mark>",.) + 2)) %>%
matrix(ncol=2, dimnames = list(c(),c("Mark","Label"))) %>%
data.frame() %>%
separate(., Mark, into = c("Mark", "Time"), sep = ": ") %>%
mutate(Time = as.numeric(Time), Date = Date) %>% dplyr::select(-1)
dbWriteTable(conn = db.conn, name = "Marks", value = marks,
row.names = FALSE, append = TRUE)
# Some experimental details
details <- wb$Details %>% "["(1:8,1:2) %>% set_colnames(c("ECIS_Parameters","value"))
details <- data.frame(t(details$value)) %>% set_colnames(details$ECIS_Parameters)
dbWriteTable(conn = db.conn, name = "Details", value = details,
row.names = FALSE, append = TRUE)
# Read data to table
wbs <- wb[names(wb) %>% setdiff(c("Details", "Comments"))] %>%
lapply(.%>% melt(id.vars = c("Time (hrs)"))) %>% ldply %>%
mutate(Well = sub("([A-H][0-9]{1,2}) ([ZRC])", "\\1", variable),
Param = sub("([A-H][0-9]{1,2}) ([ZRC])", "\\2", variable),
Freq = sub(".+ ([0-9]+) .+","\\1", .id))
wbs$Date <- Date
wbs$.id <- NULL
wbs$variable <- NULL
dbWriteTable(conn = db.conn, name = "Data", value = wbs,
row.names = FALSE, append = TRUE)
# Retrieve metadata
metadata <- list.files(path = "rawdata", pattern = paste0(Date,"_Metadata"), full.names = T) %>%
sapply(read.csv, header = F, simplify = F, colClasses = "character") %>% ldply %>% melt(id.vars = ".id") %>%
group_by(.id, variable) %>%
mutate(Well = paste0(LETTERS[1:8], sub("V([0-9]+)","\\1",variable)),
Metadata = sub(".+data_(.+).csv","\\1",.id),
Date = Date) %>% ungroup %>% dplyr::select(-.id, -variable) %>%
as.data.frame
dbWriteTable(conn = db.conn, name = "Metadata", value = metadata,
row.names = FALSE, append = TRUE)
# Read modelingdata to table
rba.file <- list.files(path = "rawdata", pattern = paste0(Date,"_MFT_[0-9]_RbA.csv"), full.names = T)
rbdata <- rba.file %>% read.table(., skip = 23, header = FALSE, sep = ",")
rb.id <- rba.file %>% read.table(., skip = 19, nrows = 2, header = FALSE,
sep = ",", stringsAsFactors = FALSE, strip.white = TRUE)
colnames(rbdata) <- c(rb.id[1, 1], paste(rb.id[2, 2:ncol(rbdata)], rb.id[1, 2:ncol(rbdata)]))
rbdata %<>% melt(id.var = "Time (hrs)") %>%
mutate(Well = sub("([A-H][0-9]{1,2}) (.+)", "\\1", variable),
Param = sub("([A-H][0-9]{1,2}) (.+)", "\\2", variable))
rbdata$Date <- Date
rbdata$variable <- NULL
dbWriteTable(conn = db.conn, name = "Rbmodel", value = rbdata,
row.names = FALSE, append = TRUE)
}
# wb.sm <- wbs %>% group_by(Param, Freq) %>% sample_frac(size = 0.05)
# wb.merge <- metadata %>% spread(Metadata, value) %>% left_join(wb.sm, .)
# head(wb.merge)
start <- Sys.time()
(sapply(xlsfiles, .%>% loadData2(db.conn = db)))
end <- Sys.time()
end-start
dbListTables(db)
dbListFields(db, "Marks")
dbGetQuery(db, 'SELECT * FROM Details')
dbDisconnect(db) # Close connection
|
rm(list=ls())
n = 6
set.seed(10)
k <- 2
dat <- rnorm(n)
fit_method <- function(x){binseginf::bsfs(x, numSteps = k)}
test_func <- selectiveModel::segment_difference
num_samp <- 2000
cores <- NA
i = 5
set.seed(10*i)
dat <- rnorm(n)
tmp <- selected_model_inference(dat, fit_method = fit_method, test_func = test_func,
num_samp = num_samp, ignore_jump = 1,
cores = cores, verbose = F, param = list(burn_in = 2000, lapse = 1))
#########################################
rm(list=ls())
i = 99
print(i)
set.seed(i)
y <- rnorm(6)
fit_method <- function(x){binseginf::bsfs(x, numSteps = 1)}
test_func <- selectiveModel::segment_difference
num_samp <- 2000
cores <- NA
res <- selected_model_inference(y, fit_method = fit_method, test_func = test_func,
num_samp = num_samp, ignore_jump = 1,
cores = cores, verbose = F, param = list(burn_in = 2000, lapse = 1))
# sigma = NA
# ignore_jump = 1
# verbose = F
# param = list(burn_in = 2000, lapse = 1)
#
# n <- length(y)
# fit <- fit_method(y)
# polyhedra <- binseginf::polyhedra(fit)
# test_stat <- test_func(y, fit, jump = ignore_jump)
#
# #prepare sampler
# segments <- .segments(n, binseginf::jumps(fit), ignore_jump = ignore_jump)
# param <- .fill_in_arguments(param)
#
# burn_in = param$burn_in
# lapse = param$lapse
#
# num_col <- burn_in + num_samp*lapse
# y_mat <- matrix(NA, nrow = length(y), ncol = num_samp)
# seq_idx <- burn_in + (1:num_samp)*lapse
# prev_y <- y
#
# for(i in 1:513){
# print(paste0(i , " in ", num_col, ": ", round(prev_y[1],3)))
#
# next_y <- .hit_run_next_point_radial(prev_y, segments, polyhedra)
# if(i %in% seq_idx){
# y_mat[,which(seq_idx == i)] <- next_y
# }
#
# prev_y <- next_y
# }
#
# # # 514TH ONE
# # .hit_run_next_point_radial(prev_y, segments, polyhedra)
# y = prev_y
# tmp <- .sample_matrix_space(segments, 2, null = T)
# v <- tmp[,1]; w <- tmp[,2]
# x = 1
# .c_form_interval(polyhedra$gamma[x,], polyhedra$u[x], y, v, w)
#
# ##############
#
# load("../experiments/debugging_env.RData")
# c_form_interval(polyhedra$gamma[x,], polyhedra$u[x], y, v, w)
| /experiments/cpp_tester.R | no_license | linnykos/selectiveModel | R | false | false | 2,176 | r | rm(list=ls())
n = 6
set.seed(10)
k <- 2
dat <- rnorm(n)
fit_method <- function(x){binseginf::bsfs(x, numSteps = k)}
test_func <- selectiveModel::segment_difference
num_samp <- 2000
cores <- NA
i = 5
set.seed(10*i)
dat <- rnorm(n)
tmp <- selected_model_inference(dat, fit_method = fit_method, test_func = test_func,
num_samp = num_samp, ignore_jump = 1,
cores = cores, verbose = F, param = list(burn_in = 2000, lapse = 1))
#########################################
rm(list=ls())
i = 99
print(i)
set.seed(i)
y <- rnorm(6)
fit_method <- function(x){binseginf::bsfs(x, numSteps = 1)}
test_func <- selectiveModel::segment_difference
num_samp <- 2000
cores <- NA
res <- selected_model_inference(y, fit_method = fit_method, test_func = test_func,
num_samp = num_samp, ignore_jump = 1,
cores = cores, verbose = F, param = list(burn_in = 2000, lapse = 1))
# sigma = NA
# ignore_jump = 1
# verbose = F
# param = list(burn_in = 2000, lapse = 1)
#
# n <- length(y)
# fit <- fit_method(y)
# polyhedra <- binseginf::polyhedra(fit)
# test_stat <- test_func(y, fit, jump = ignore_jump)
#
# #prepare sampler
# segments <- .segments(n, binseginf::jumps(fit), ignore_jump = ignore_jump)
# param <- .fill_in_arguments(param)
#
# burn_in = param$burn_in
# lapse = param$lapse
#
# num_col <- burn_in + num_samp*lapse
# y_mat <- matrix(NA, nrow = length(y), ncol = num_samp)
# seq_idx <- burn_in + (1:num_samp)*lapse
# prev_y <- y
#
# for(i in 1:513){
# print(paste0(i , " in ", num_col, ": ", round(prev_y[1],3)))
#
# next_y <- .hit_run_next_point_radial(prev_y, segments, polyhedra)
# if(i %in% seq_idx){
# y_mat[,which(seq_idx == i)] <- next_y
# }
#
# prev_y <- next_y
# }
#
# # # 514TH ONE
# # .hit_run_next_point_radial(prev_y, segments, polyhedra)
# y = prev_y
# tmp <- .sample_matrix_space(segments, 2, null = T)
# v <- tmp[,1]; w <- tmp[,2]
# x = 1
# .c_form_interval(polyhedra$gamma[x,], polyhedra$u[x], y, v, w)
#
# ##############
#
# load("../experiments/debugging_env.RData")
# c_form_interval(polyhedra$gamma[x,], polyhedra$u[x], y, v, w)
|
% Please edit documentation in R/ecol.death.sim.R
\name{ecol.death.sim}
\alias{ecol.death.sim}
\title{A simulation of the death of two species with certain probabilities}
\usage{
ecol.death.sim(nr = 10, nc = 10, num.sp = c(50, 50), col.sp = c(1, 2), pch.sp = c(1,
2), col.die = 1, pch.die = 4, cex = 3, ...)
}
\arguments{
\item{nr, nc}{number of rows and columns of the field (plants grow on a
\code{nr} x \code{nc} grid)}
\item{num.sp}{number of two plants respectively}
\item{col.sp, pch.sp}{colors and point symbols of the two species respectively}
\item{col.die, pch.die, cex}{the color, point symbol and magnification to
annotate the plant which dies (symbol default to be an `X')}
\item{\dots}{other arguments passed to \code{\link{plot}} to set up the plot}
}
\value{
a vector (factor) containing 1's and 2's, denoting the plants finally
survived
}
\description{
Suppose there are two plant species in a field: A and B. One of them will die
at each time and a new plant will grow in the place where the old plant died;
the species of the new plant depends on the proportions of two species: the
larger the proportion is, the greater the probability for this species to
come up will be.
}
\note{
\code{2 * ani.options('nmax')} image frames will actually be produced.
}
\examples{
oopt = ani.options(nmax = ifelse(interactive(), 50, 2), interval = 0.3)
par(ann = FALSE, mar = rep(0, 4))
ecol.death.sim()
## large scale simulation
ani.options(nmax = ifelse(interactive(), 1000, 2), interval = 0.02)
ecol.death.sim(col.sp = c(8, 2), pch.sp = c(20, 17))
ani.options(oopt)
}
\author{
Yihui Xie
}
\references{
This animation is motivated by a question raised from Jing Jiao,
a student in biology, to show the evolution of two species.
The original post is in the forum of the ``Capital of Statistics'':
\url{http://cos.name/cn/topic/14093} (in Chinese)
}
| /man/ecol.death.sim.Rd | no_license | dgrtwo/animation | R | false | false | 1,875 | rd | % Please edit documentation in R/ecol.death.sim.R
\name{ecol.death.sim}
\alias{ecol.death.sim}
\title{A simulation of the death of two species with certain probabilities}
\usage{
ecol.death.sim(nr = 10, nc = 10, num.sp = c(50, 50), col.sp = c(1, 2), pch.sp = c(1,
2), col.die = 1, pch.die = 4, cex = 3, ...)
}
\arguments{
\item{nr, nc}{number of rows and columns of the field (plants grow on a
\code{nr} x \code{nc} grid)}
\item{num.sp}{number of two plants respectively}
\item{col.sp, pch.sp}{colors and point symbols of the two species respectively}
\item{col.die, pch.die, cex}{the color, point symbol and magnification to
annotate the plant which dies (symbol default to be an `X')}
\item{\dots}{other arguments passed to \code{\link{plot}} to set up the plot}
}
\value{
a vector (factor) containing 1's and 2's, denoting the plants finally
survived
}
\description{
Suppose there are two plant species in a field: A and B. One of them will die
at each time and a new plant will grow in the place where the old plant died;
the species of the new plant depends on the proportions of two species: the
larger the proportion is, the greater the probability for this species to
come up will be.
}
\note{
\code{2 * ani.options('nmax')} image frames will actually be produced.
}
\examples{
oopt = ani.options(nmax = ifelse(interactive(), 50, 2), interval = 0.3)
par(ann = FALSE, mar = rep(0, 4))
ecol.death.sim()
## large scale simulation
ani.options(nmax = ifelse(interactive(), 1000, 2), interval = 0.02)
ecol.death.sim(col.sp = c(8, 2), pch.sp = c(20, 17))
ani.options(oopt)
}
\author{
Yihui Xie
}
\references{
This animation is motivated by a question raised from Jing Jiao,
a student in biology, to show the evolution of two species.
The original post is in the forum of the ``Capital of Statistics'':
\url{http://cos.name/cn/topic/14093} (in Chinese)
}
|
# set of fonction to query model db and models files
# EXEMPLE
#
if(FALSE){
testFilter<-filterResults(idJob=17,dbOut='../data/base/dbModels.db')
testList<-getEnsembleListSpecies(resultsTable=testFilter,
modelsLoc='../data/models/')
testPresProb<-presProbSpecies(testList)
}
filterResults<-function(species=NULL,method=NULL,group=NULL,predictors=NULL,tss=NULL,idJob=NULL,idRun=NULL,email=NULL,showFailed=FALSE,dbOut){
# Filter sqlite table produced by lebaMod/fun/computePending.r.
# If failed is selected, show only failed model. Else, if any argument is set, make a query on corresponding column.
# all argument can be single value or vectors.
# return : data.table with every columns, filtered by args.
require(RSQLite)
require(data.table)
dbCon<-dbConnect(SQLite(),dbOut)
if(!'models' %in% dbListTables(dbCon)){
NULL
}else{
#
if(showFailed){
res<-dbGetQuery(dbCon, paste("SELECT * FROM models where fail=",as.integer(showFailed)))
}else{
if(!is.null(method)) mtd<-paste0("method in ('",paste0(method,collapse="','"),"')")
if(!is.null(species)) sps<-paste0("species in ('",paste0(species,collapse="','"),"')")
if(!is.null(group)) grp<-paste0("[group] in ('",paste0(group,collapse="','"),"')")
if(!is.null(tss)) tsv<-paste0("tss between ",min(tss)," AND ", max(tss))
if(!is.null(predictors)) prd<-paste0("prdLoCorr glob '*",predictors,"*'",collapse=' AND ')
if(!is.null(email))if(!email=='') eml<-paste0("email in ('",paste0(email,collapse="','"),"')")
if(!is.null(idJob))if(!idJob=='') idj<-paste0("idJob in ('",paste0(idJob,collapse="','"),"')")
if(!is.null(idRun))if(!idRun=='') idr<-paste0("idRun in ('",paste0(idRun,collapse="','"),"')")
lim<-paste0("limit ",1000)
sqlCmd<-paste("SELECT * FROM models WHERE",
if(exists('mtd')){paste(mtd,"AND")},
if(exists('sps')){paste(sps,"AND")},
if(exists('grp')){paste(grp,"AND")},
if(exists('tsv')){paste(tsv,"AND")},
if(exists('prd')){paste(prd,"AND")},
if(exists('idj')){paste(idj,"AND")},
if(exists('idr')){paste(idr,"AND")},
if(exists('eml')){paste(eml,"AND")},
paste('fail=0'),
lim)
res<-data.table(dbGetQuery(dbCon,sqlCmd))
}
dbDisconnect(dbCon)
class(res)<-c(class(res),'lebaResultsTable')
res
}
}
modelDelete<-function(resultsTable,dbOut,modelsLoc){
# remove model and corresponding files.
require(RSQLite)
dbCon<-dbConnect(SQLite(),dbOut)
if(!'models' %in% dbListTables(dbCon)){
NULL }else{
stopifnot('lebaResultsTable' %in% class(resultsTable))
fL<-resultsTable$fileName
id<-resultsTable$id
idSql<-paste0("(",paste(id,collapse=','),")",collapse='')
file.remove(paste(file.path(modelsLoc,fL)))
dbGetQuery(dbCon,paste('DELETE from models where id in',idSql))
dbDisconnect(dbCon)
message(length(jobIdList)," models deleted.")
}
}
getEnsembleListSpecies<-function(resultsTable,modelsLoc){
# produce an ensemble list by species from folder data/models :
# from a result table produced by function 'filterResults' get file content and
# append to a list named by species.
stopifnot('lebaResultsTable' %in% class(resultsTable))
fL<-resultsTable$fileName
spList<-resultsTable$species
mL<-foreach(f=fL)%do%{
readRDS(file.path(modelsLoc,f))
}
names(mL)<-spList
class(mL)<-c(class(mL),'lebaEnsembleListSpecies')
mL
}
presProbSpecies<-function(lebaEnsembleListSpecies){
# From a lebaEnsembleList, produce table of presence probabilities.
require(doMC)
require(foreach)
registerDoMC(8)
stopifnot('lebaEnsembleListSpecies' %in% class(lebaEnsembleListSpecies))
# names of lebaEnsembleList are run identifiant
spList<-lebaEnsembleListSpecies
spNames<-unique(names(spList))
# foreach runs groups found in lebaEnsemble list, get model subsets.
pred<-foreach(spN=spNames)%do%{
modSubset<-spList[spNames %in% spN]
# for each models in group, get prediction
speciesData<-foreach(m=modSubset,.combine='c')%dopar%{
#message(m$lebaData$idMod,';',m$lebaData$idRun)
predSp<-data.table(m$lebaData$dataPredictions)
idMod<-m$lebaData$idMod
idRun<-m$lebaData$idRun
predSp[,idMod:=factor(idMod)]
predSp<-list(pred=predSp)
names(predSp)<-idRun
predSp
}
speciesData
}
names(pred)<-spNames
pred
}
presProbSpeciesByVar<-function(lebaEnsembleListSpecies,varBy='YYYY',rangeX,rangeY,rangeDem,rangeYear){
# From a lebaEnsembleList, produce a summary table of presence probabilities by varBy.
# Extent, elevation and years could be filtered.
require(doMC)
require(foreach)
registerDoMC(8)
stopifnot('lebaEnsembleListSpecies' %in% class(lebaEnsembleListSpecies))
# names of lebaEnsembleList are run identifiant
spList<-lebaEnsembleListSpecies
spNames<-unique(names(spList))
# foreach runs groups found in lebaEnsemble list, get model subsets.
predMed<-foreach(spN=spNames,.combine='rbind')%do%{
modSubset<-spList[spNames %in% spN]
# for each models in group, get prediction
speciesData<-foreach(m=modSubset,.combine='rbind')%dopar%{
#message(m$lebaData$idMod,';',m$lebaData$idRun)
pred<-data.table(m$lebaData$dataPredictions)
idMod<-m$lebaData$idMod
idRun<-m$lebaData$idRun
#idRunString<-paste(m$lebaData[c('species','method','group','idRun')],collapse=';')
idSpeciesString<-paste(m$lebaData[c('species')],collapse=';')
#setkey(pred,dem,YYYY,x,y)
pred<-pred[dem %between% rangeDem]
pred<-pred[x %between% rangeX ]
pred<-pred[y %between% rangeY ]
pred<-pred[YYYY %between% rangeYear]
predMed<-pred[,list(medPres=median(PRES)),by=c(varBy)]
predMed[,idSpeciesString:=factor(idSpeciesString)]
predMed[,idMod:=factor(idMod)]
predMed[,idRun:=factor(idRun)]
setkeyv(predMed,c(varBy,'idRun','idMod','idSpeciesString','medPres'))
}
speciesData
}
}
emptyPlotText<-function(infoText){
par(mar = c(0,0,0,0))
plot(c(0, 1), c(0, 1), ann = F, bty = 'n', type = 'n', xaxt = 'n', yaxt = 'n')
text(x = 0.34, y = 0.9, paste(infoText),
cex = 1.5, col = "black", family="serif", font=2, adj=0.5)
}
| /fun/plotProb.R | no_license | fxi/LebaMod | R | false | false | 6,505 | r | # set of fonction to query model db and models files
# EXEMPLE
#
if(FALSE){
testFilter<-filterResults(idJob=17,dbOut='../data/base/dbModels.db')
testList<-getEnsembleListSpecies(resultsTable=testFilter,
modelsLoc='../data/models/')
testPresProb<-presProbSpecies(testList)
}
filterResults<-function(species=NULL,method=NULL,group=NULL,predictors=NULL,tss=NULL,idJob=NULL,idRun=NULL,email=NULL,showFailed=FALSE,dbOut){
# Filter sqlite table produced by lebaMod/fun/computePending.r.
# If failed is selected, show only failed model. Else, if any argument is set, make a query on corresponding column.
# all argument can be single value or vectors.
# return : data.table with every columns, filtered by args.
require(RSQLite)
require(data.table)
dbCon<-dbConnect(SQLite(),dbOut)
if(!'models' %in% dbListTables(dbCon)){
NULL
}else{
#
if(showFailed){
res<-dbGetQuery(dbCon, paste("SELECT * FROM models where fail=",as.integer(showFailed)))
}else{
if(!is.null(method)) mtd<-paste0("method in ('",paste0(method,collapse="','"),"')")
if(!is.null(species)) sps<-paste0("species in ('",paste0(species,collapse="','"),"')")
if(!is.null(group)) grp<-paste0("[group] in ('",paste0(group,collapse="','"),"')")
if(!is.null(tss)) tsv<-paste0("tss between ",min(tss)," AND ", max(tss))
if(!is.null(predictors)) prd<-paste0("prdLoCorr glob '*",predictors,"*'",collapse=' AND ')
if(!is.null(email))if(!email=='') eml<-paste0("email in ('",paste0(email,collapse="','"),"')")
if(!is.null(idJob))if(!idJob=='') idj<-paste0("idJob in ('",paste0(idJob,collapse="','"),"')")
if(!is.null(idRun))if(!idRun=='') idr<-paste0("idRun in ('",paste0(idRun,collapse="','"),"')")
lim<-paste0("limit ",1000)
sqlCmd<-paste("SELECT * FROM models WHERE",
if(exists('mtd')){paste(mtd,"AND")},
if(exists('sps')){paste(sps,"AND")},
if(exists('grp')){paste(grp,"AND")},
if(exists('tsv')){paste(tsv,"AND")},
if(exists('prd')){paste(prd,"AND")},
if(exists('idj')){paste(idj,"AND")},
if(exists('idr')){paste(idr,"AND")},
if(exists('eml')){paste(eml,"AND")},
paste('fail=0'),
lim)
res<-data.table(dbGetQuery(dbCon,sqlCmd))
}
dbDisconnect(dbCon)
class(res)<-c(class(res),'lebaResultsTable')
res
}
}
modelDelete<-function(resultsTable,dbOut,modelsLoc){
# remove model and corresponding files.
require(RSQLite)
dbCon<-dbConnect(SQLite(),dbOut)
if(!'models' %in% dbListTables(dbCon)){
NULL }else{
stopifnot('lebaResultsTable' %in% class(resultsTable))
fL<-resultsTable$fileName
id<-resultsTable$id
idSql<-paste0("(",paste(id,collapse=','),")",collapse='')
file.remove(paste(file.path(modelsLoc,fL)))
dbGetQuery(dbCon,paste('DELETE from models where id in',idSql))
dbDisconnect(dbCon)
message(length(jobIdList)," models deleted.")
}
}
getEnsembleListSpecies<-function(resultsTable,modelsLoc){
# produce an ensemble list by species from folder data/models :
# from a result table produced by function 'filterResults' get file content and
# append to a list named by species.
stopifnot('lebaResultsTable' %in% class(resultsTable))
fL<-resultsTable$fileName
spList<-resultsTable$species
mL<-foreach(f=fL)%do%{
readRDS(file.path(modelsLoc,f))
}
names(mL)<-spList
class(mL)<-c(class(mL),'lebaEnsembleListSpecies')
mL
}
presProbSpecies<-function(lebaEnsembleListSpecies){
# From a lebaEnsembleList, produce table of presence probabilities.
require(doMC)
require(foreach)
registerDoMC(8)
stopifnot('lebaEnsembleListSpecies' %in% class(lebaEnsembleListSpecies))
# names of lebaEnsembleList are run identifiant
spList<-lebaEnsembleListSpecies
spNames<-unique(names(spList))
# foreach runs groups found in lebaEnsemble list, get model subsets.
pred<-foreach(spN=spNames)%do%{
modSubset<-spList[spNames %in% spN]
# for each models in group, get prediction
speciesData<-foreach(m=modSubset,.combine='c')%dopar%{
#message(m$lebaData$idMod,';',m$lebaData$idRun)
predSp<-data.table(m$lebaData$dataPredictions)
idMod<-m$lebaData$idMod
idRun<-m$lebaData$idRun
predSp[,idMod:=factor(idMod)]
predSp<-list(pred=predSp)
names(predSp)<-idRun
predSp
}
speciesData
}
names(pred)<-spNames
pred
}
presProbSpeciesByVar<-function(lebaEnsembleListSpecies,varBy='YYYY',rangeX,rangeY,rangeDem,rangeYear){
# From a lebaEnsembleList, produce a summary table of presence probabilities by varBy.
# Extent, elevation and years could be filtered.
require(doMC)
require(foreach)
registerDoMC(8)
stopifnot('lebaEnsembleListSpecies' %in% class(lebaEnsembleListSpecies))
# names of lebaEnsembleList are run identifiant
spList<-lebaEnsembleListSpecies
spNames<-unique(names(spList))
# foreach runs groups found in lebaEnsemble list, get model subsets.
predMed<-foreach(spN=spNames,.combine='rbind')%do%{
modSubset<-spList[spNames %in% spN]
# for each models in group, get prediction
speciesData<-foreach(m=modSubset,.combine='rbind')%dopar%{
#message(m$lebaData$idMod,';',m$lebaData$idRun)
pred<-data.table(m$lebaData$dataPredictions)
idMod<-m$lebaData$idMod
idRun<-m$lebaData$idRun
#idRunString<-paste(m$lebaData[c('species','method','group','idRun')],collapse=';')
idSpeciesString<-paste(m$lebaData[c('species')],collapse=';')
#setkey(pred,dem,YYYY,x,y)
pred<-pred[dem %between% rangeDem]
pred<-pred[x %between% rangeX ]
pred<-pred[y %between% rangeY ]
pred<-pred[YYYY %between% rangeYear]
predMed<-pred[,list(medPres=median(PRES)),by=c(varBy)]
predMed[,idSpeciesString:=factor(idSpeciesString)]
predMed[,idMod:=factor(idMod)]
predMed[,idRun:=factor(idRun)]
setkeyv(predMed,c(varBy,'idRun','idMod','idSpeciesString','medPres'))
}
speciesData
}
}
emptyPlotText<-function(infoText){
par(mar = c(0,0,0,0))
plot(c(0, 1), c(0, 1), ann = F, bty = 'n', type = 'n', xaxt = 'n', yaxt = 'n')
text(x = 0.34, y = 0.9, paste(infoText),
cex = 1.5, col = "black", family="serif", font=2, adj=0.5)
}
|
library(ggplot2)
library(plyr)
############################################################################
### Plot simulation grid spatial locations
############################################################################
locs <- readRDS("Data/sim_locs.rds")
grid <- subset(locs, grid == 1)
gp <- ggplot(grid, aes(x=x, y=y)) +
geom_point() +
theme_bw()+
labs(x = "", y = "")
gp
ggsave("Plots/grid.pdf", height = 4, width = 5)
############################################################################
### Plot spatial correlation curves
############################################################################
library(geoR) # used to simulate gaussian random fields
## first lets look at how covariance functions are specified in geoR
library(geoR)
cov.f <- function(x,...){
cov.spatial(x, ...)
}
# plot exponential covariance functions
x <- seq(0,1, length = 1000)
exp_r3 <- cov.f(x, cov.model="exponential", cov.pars=c(1,.3))
exp_r3_df <- data.frame(x = x, y = exp_r3, range = 0.3)
x <- seq(0,1, length = 1000)
exp_r2 <- cov.f(x, cov.model="exponential", cov.pars=c(1,.2))
exp_r2_df <- data.frame(x = x, y = exp_r2, range = 0.2)
x <- seq(0,1, length = 1000)
exp_r1 <- cov.f(x, cov.model="exponential", cov.pars=c(1,.1))
exp_r1_df <- data.frame(x = x, y = exp_r1, range = 0.1)
cov_df <- rbind(exp_r1_df, exp_r2_df, exp_r3_df)
cov_df$range <- factor(cov_df$range)
gp <- ggplot(cov_df, aes(x = x, y = y, group=range, linetype = range, color = range)) +
geom_line()+
theme_bw()+
scale_color_manual(values = c("cornflowerblue", "darkolivegreen3", "firebrick1"))+
labs(x = "", y = "correlation")+
guides(color = guide_legend('range'), linetype = guide_legend('range'))
gp
# ggsave("Plots/exp_corr_funs.pdf", width = 8, height = 5)
############################################################################
### Plot results of covariance function estimates for different levels of spatial dependence and different spatial weights
############################################################################
### Read in sim results
all_dat <- readRDS("Data/sim_res_grid.rds")
res_means <- ddply(subset(all_dat, L2 < 0.55), .(dep, weight), summarize, tmean = mean(L2), se = sd(L2)/50, n = 100 )
res_means <- subset(res_means, weight != 4)
res_ind <- subset(res_means, dep == 0.001)
res_means$dep <- factor(res_means$dep, levels = c(0.100, 0.200, 0.300, 0.001), labels=c( "0.1", "0.2", "0.3", "ind"))
# Define the top and bottom of the errorbars
limits <- aes(ymax = tmean + 2*se, ymin = tmean - 2*se)
gp <- ggplot(res_means, aes(x = weight, y = tmean, group = dep, color=dep, linetype = dep))+
geom_line() + geom_errorbar(limits, width=0.05, linetype = 1, color = "black") +
scale_color_manual(values = c( "cornflowerblue", "darkolivegreen3", "firebrick1", "black"))+
xlim(c(-0.05,1.05))+
theme_bw()+
labs(x="weight parameter, p", y = "mean integrated squared error", color = "spatial\ndependence\n(range)", linetype = "spatial\ndependence\n(range)")
gp
# ggsave("Plots/MSE_trends.pdf", width = 8, height = 5)
############################################################################
###
############################################################################
all_dat <- readRDS("Data/sim_res_india.rds")
| /Slide-Presentation/Images-ordinary-kriging/weighted_cov_est_plots.R | no_license | dan410/Research-Writing | R | false | false | 3,255 | r | library(ggplot2)
library(plyr)
############################################################################
### Plot simulation grid spatial locations
############################################################################
locs <- readRDS("Data/sim_locs.rds")
grid <- subset(locs, grid == 1)
gp <- ggplot(grid, aes(x=x, y=y)) +
geom_point() +
theme_bw()+
labs(x = "", y = "")
gp
ggsave("Plots/grid.pdf", height = 4, width = 5)
############################################################################
### Plot spatial correlation curves
############################################################################
library(geoR) # used to simulate gaussian random fields
## first lets look at how covariance functions are specified in geoR
library(geoR)
cov.f <- function(x,...){
cov.spatial(x, ...)
}
# plot exponential covariance functions
x <- seq(0,1, length = 1000)
exp_r3 <- cov.f(x, cov.model="exponential", cov.pars=c(1,.3))
exp_r3_df <- data.frame(x = x, y = exp_r3, range = 0.3)
x <- seq(0,1, length = 1000)
exp_r2 <- cov.f(x, cov.model="exponential", cov.pars=c(1,.2))
exp_r2_df <- data.frame(x = x, y = exp_r2, range = 0.2)
x <- seq(0,1, length = 1000)
exp_r1 <- cov.f(x, cov.model="exponential", cov.pars=c(1,.1))
exp_r1_df <- data.frame(x = x, y = exp_r1, range = 0.1)
cov_df <- rbind(exp_r1_df, exp_r2_df, exp_r3_df)
cov_df$range <- factor(cov_df$range)
gp <- ggplot(cov_df, aes(x = x, y = y, group=range, linetype = range, color = range)) +
geom_line()+
theme_bw()+
scale_color_manual(values = c("cornflowerblue", "darkolivegreen3", "firebrick1"))+
labs(x = "", y = "correlation")+
guides(color = guide_legend('range'), linetype = guide_legend('range'))
gp
# ggsave("Plots/exp_corr_funs.pdf", width = 8, height = 5)
############################################################################
### Plot results of covariance function estimates for different levels of spatial dependence and different spatial weights
############################################################################
### Read in sim results
all_dat <- readRDS("Data/sim_res_grid.rds")
res_means <- ddply(subset(all_dat, L2 < 0.55), .(dep, weight), summarize, tmean = mean(L2), se = sd(L2)/50, n = 100 )
res_means <- subset(res_means, weight != 4)
res_ind <- subset(res_means, dep == 0.001)
res_means$dep <- factor(res_means$dep, levels = c(0.100, 0.200, 0.300, 0.001), labels=c( "0.1", "0.2", "0.3", "ind"))
# Define the top and bottom of the errorbars
limits <- aes(ymax = tmean + 2*se, ymin = tmean - 2*se)
gp <- ggplot(res_means, aes(x = weight, y = tmean, group = dep, color=dep, linetype = dep))+
geom_line() + geom_errorbar(limits, width=0.05, linetype = 1, color = "black") +
scale_color_manual(values = c( "cornflowerblue", "darkolivegreen3", "firebrick1", "black"))+
xlim(c(-0.05,1.05))+
theme_bw()+
labs(x="weight parameter, p", y = "mean integrated squared error", color = "spatial\ndependence\n(range)", linetype = "spatial\ndependence\n(range)")
gp
# ggsave("Plots/MSE_trends.pdf", width = 8, height = 5)
############################################################################
###
############################################################################
all_dat <- readRDS("Data/sim_res_india.rds")
|
# load libraries
# install.packages("survival")
library(survival)
library(ggplot2)
library(ggrepel)
# identify the data path
path <- "C:/Jiwoo Lee/Myocardial Infarction Research Project 2017/"
path <- "/Users/andreaganna/Documents/Work/Post_doc/jiwoo/"
##################################################
# Process registry data and phenotype data
##################################################
# ID, ICD10, and ICD10 date from registry data
total = read.table(file = paste0(path,'hesin_registry.tsv'), sep = '\t', header = TRUE, stringsAsFactors = FALSE, na.strings = "")
total = total[, c(1, 11, 23)]
icd10 = substr(total$diag_icd10, 0, 3)
total = cbind(total, icd10)
total = total[, c(1, 4, 3)]
write.table(total, file = paste0(path,'hesin_registry_new.tsv'), sep = '\t', row.names=F, quote=F)
rm(icd10)
# plot distribution of ICD10 codes to find timeframe for study
plot(table(total$epistart), xlab = "ICD Date", ylab = "Frequency")
# January 1998 to April 2015
# get and clean assessment center data
load('out4.Rdata')
# sex (f31), assessment center visit date (f53), assessment center visit age (f21003), assessment center location (f54), date of myocardial infarction (f42000), date of stroke (f42006), systolic blood pressure (f4080), diastolic blood pressure (f4079), body mass index (f21001), and smoking status (f20116)
bdE4_new = bdE4[, c("f.eid", "f.31.0.0", "f.53.0.0", "f.21003.0.0", "f.54.0.0", "f.42000.0.0", "f.42006.0.0", "f.4080.0.0", "f.4079.0.0", "f.21001.0.0", "f.20116.0.0")]
write.table(bdE4_new, file='assess_cent_all.tsv',sep='\t', row.names=F, quote=F)
rm(bdE4, bdE4_new)
# merge registry data and assessment center data
reg = read.table(file = paste0(path,'hesin_registry_new.tsv'), sep = '\t', header = TRUE, stringsAsFactors = FALSE)
ac = read.table(file = paste0(path,'assess_cent_all.tsv'), sep = '\t', header = TRUE, stringsAsFactors = FALSE)
load(paste0(path,'dU.Rdata'))
ac.new = ac[, c("f.eid", "f.21003.0.0", "f.42000.0.0", "f.42006.0.0", "f.4080.0.0", "f.4079.0.0")]
ac.all = merge(ac.new, dU, by.x = "f.eid", by.y = "eid", all.x = TRUE)
data = merge(reg, ac.all, by.x = "eid", by.y = "f.eid", all.y = TRUE)
colnames(data) = c("eid", "icd10", "icd10_date", "ac_age", "mi_date", "stroke_date", "sbp", "dbp", "birth_date", "sex", "death", "death_date", "ac_date", "ac_location", "bmi", "bp", "smoke", "age")
data = data[, c("eid", "sex", "age", "birth_date", "death", "death_date", "bp", "sbp", "dbp", "bmi", "smoke", "mi_date", "stroke_date", "icd10", "icd10_date", "ac_date", "ac_location", "ac_age")]
write.table(data, file = paste0(path,'hesin_registry_assess_cent_all_v2.csv'), sep = '\t', row.names=F, quote=F)
rm(reg, ac, ac.new, ac.all, data)
| /process_hesin.R | no_license | andgan/MI_project | R | false | false | 2,701 | r |
# load libraries
# install.packages("survival")
library(survival)
library(ggplot2)
library(ggrepel)
# identify the data path
path <- "C:/Jiwoo Lee/Myocardial Infarction Research Project 2017/"
path <- "/Users/andreaganna/Documents/Work/Post_doc/jiwoo/"
##################################################
# Process registry data and phenotype data
##################################################
# ID, ICD10, and ICD10 date from registry data
total = read.table(file = paste0(path,'hesin_registry.tsv'), sep = '\t', header = TRUE, stringsAsFactors = FALSE, na.strings = "")
total = total[, c(1, 11, 23)]
icd10 = substr(total$diag_icd10, 0, 3)
total = cbind(total, icd10)
total = total[, c(1, 4, 3)]
write.table(total, file = paste0(path,'hesin_registry_new.tsv'), sep = '\t', row.names=F, quote=F)
rm(icd10)
# plot distribution of ICD10 codes to find timeframe for study
plot(table(total$epistart), xlab = "ICD Date", ylab = "Frequency")
# January 1998 to April 2015
# get and clean assessment center data
load('out4.Rdata')
# sex (f31), assessment center visit date (f53), assessment center visit age (f21003), assessment center location (f54), date of myocardial infarction (f42000), date of stroke (f42006), systolic blood pressure (f4080), diastolic blood pressure (f4079), body mass index (f21001), and smoking status (f20116)
bdE4_new = bdE4[, c("f.eid", "f.31.0.0", "f.53.0.0", "f.21003.0.0", "f.54.0.0", "f.42000.0.0", "f.42006.0.0", "f.4080.0.0", "f.4079.0.0", "f.21001.0.0", "f.20116.0.0")]
write.table(bdE4_new, file='assess_cent_all.tsv',sep='\t', row.names=F, quote=F)
rm(bdE4, bdE4_new)
# merge registry data and assessment center data
reg = read.table(file = paste0(path,'hesin_registry_new.tsv'), sep = '\t', header = TRUE, stringsAsFactors = FALSE)
ac = read.table(file = paste0(path,'assess_cent_all.tsv'), sep = '\t', header = TRUE, stringsAsFactors = FALSE)
load(paste0(path,'dU.Rdata'))
ac.new = ac[, c("f.eid", "f.21003.0.0", "f.42000.0.0", "f.42006.0.0", "f.4080.0.0", "f.4079.0.0")]
ac.all = merge(ac.new, dU, by.x = "f.eid", by.y = "eid", all.x = TRUE)
data = merge(reg, ac.all, by.x = "eid", by.y = "f.eid", all.y = TRUE)
colnames(data) = c("eid", "icd10", "icd10_date", "ac_age", "mi_date", "stroke_date", "sbp", "dbp", "birth_date", "sex", "death", "death_date", "ac_date", "ac_location", "bmi", "bp", "smoke", "age")
data = data[, c("eid", "sex", "age", "birth_date", "death", "death_date", "bp", "sbp", "dbp", "bmi", "smoke", "mi_date", "stroke_date", "icd10", "icd10_date", "ac_date", "ac_location", "ac_age")]
write.table(data, file = paste0(path,'hesin_registry_assess_cent_all_v2.csv'), sep = '\t', row.names=F, quote=F)
rm(reg, ac, ac.new, ac.all, data)
|
x <- tibble::tribble(
~chrom, ~start, ~end,
"chr1", 10, 20,
"chr1", 30, 40
)
y <- tibble::tribble(
~chrom, ~start, ~end,
"chr1", 15, 20
)
test_that("jaccard coeff is calculated correctly", {
res <- bed_jaccard(x, y)
expect_equal(res$jaccard, 0.25)
})
test_that("jaccard coeff is calc'd for large data sets", {
genome <- read_genome(valr_example("hg19.chrom.sizes.gz"))
x <- bed_random(genome, n = 1e5, seed = 10000)
y <- bed_random(genome, n = 1e5, seed = 20000)
res <- bed_jaccard(x, y)
expect_equal(round(res$jaccard, 3), 0.016)
})
test_that("jaccard with grouped inputs are calculated", {
genome <- read_genome(valr_example("hg19.chrom.sizes.gz"))
x <- bed_random(genome, n = 1e5, seed = 10000)
y <- bed_random(genome, n = 1e5, seed = 20000)
res <- bed_jaccard(
group_by(x, chrom),
group_by(y, chrom)
)
expect_equal(nrow(res), 24)
expect_true("chrom" %in% names(res))
})
# from https://github.com/arq5x/bedtools2/blob/master/test/jaccard/test-jaccard.sh
test_that("Test symmetry", {
res <- bed_jaccard(x, y)
res2 <- bed_jaccard(y, x)
expect_equal(res$jaccard, res2$jaccard)
})
test_that("Test jaccard with mixed strand files", {
a <- tibble::tribble(
~chrom, ~start, ~end, ~name, ~score, ~strand,
"chr1", 10L, 50L, "a1f", 2L, "+",
"chr1", 20L, 60L, "b1r", 4L, "-",
"chr1", 25L, 70L, "c1q", 8L, ".",
"chr1", 30L, 75L, "d1q", 16L, ".",
"chr1", 40L, 80L, "e1f", 32L, "+",
"chr1", 45L, 90L, "f1r", 64L, "-",
"chr2", 10L, 50L, "a2q", 2L, ".",
"chr2", 20L, 40L, "b2f", 4L, "+",
"chr2", 25L, 50L, "c2r", 8L, "-",
"chr2", 30L, 60L, "d2f", 16L, "+",
"chr2", 35L, 65L, "e2q", 32L, ".",
"chr2", 39L, 80L, "f2r", 64L, "-"
)
b <- tibble::tribble(
~chrom, ~start, ~end, ~name, ~score, ~strand,
"chr1", 10L, 50L, "2a1r", 2L, "-",
"chr1", 40L, 70L, "2b1q", 4L, ".",
"chr1", 60L, 100L, "2c1f", 8L, "+",
"chr2", 15L, 40L, "2d2f", 16L, "+",
"chr2", 30L, 100L, "2e2r", 32L, "-"
)
res <- bed_jaccard(a, b)
expect_equal(res$len_i, 145)
expect_equal(res$len_u, 325)
expect_equal(round(res$jaccard, 5), round(0.8055556, 5))
expect_equal(res$n, 2)
})
| /tests/testthat/test_jaccard.r | permissive | rnabioco/valr | R | false | false | 2,186 | r | x <- tibble::tribble(
~chrom, ~start, ~end,
"chr1", 10, 20,
"chr1", 30, 40
)
y <- tibble::tribble(
~chrom, ~start, ~end,
"chr1", 15, 20
)
test_that("jaccard coeff is calculated correctly", {
res <- bed_jaccard(x, y)
expect_equal(res$jaccard, 0.25)
})
test_that("jaccard coeff is calc'd for large data sets", {
genome <- read_genome(valr_example("hg19.chrom.sizes.gz"))
x <- bed_random(genome, n = 1e5, seed = 10000)
y <- bed_random(genome, n = 1e5, seed = 20000)
res <- bed_jaccard(x, y)
expect_equal(round(res$jaccard, 3), 0.016)
})
test_that("jaccard with grouped inputs are calculated", {
genome <- read_genome(valr_example("hg19.chrom.sizes.gz"))
x <- bed_random(genome, n = 1e5, seed = 10000)
y <- bed_random(genome, n = 1e5, seed = 20000)
res <- bed_jaccard(
group_by(x, chrom),
group_by(y, chrom)
)
expect_equal(nrow(res), 24)
expect_true("chrom" %in% names(res))
})
# from https://github.com/arq5x/bedtools2/blob/master/test/jaccard/test-jaccard.sh
test_that("Test symmetry", {
res <- bed_jaccard(x, y)
res2 <- bed_jaccard(y, x)
expect_equal(res$jaccard, res2$jaccard)
})
test_that("Test jaccard with mixed strand files", {
a <- tibble::tribble(
~chrom, ~start, ~end, ~name, ~score, ~strand,
"chr1", 10L, 50L, "a1f", 2L, "+",
"chr1", 20L, 60L, "b1r", 4L, "-",
"chr1", 25L, 70L, "c1q", 8L, ".",
"chr1", 30L, 75L, "d1q", 16L, ".",
"chr1", 40L, 80L, "e1f", 32L, "+",
"chr1", 45L, 90L, "f1r", 64L, "-",
"chr2", 10L, 50L, "a2q", 2L, ".",
"chr2", 20L, 40L, "b2f", 4L, "+",
"chr2", 25L, 50L, "c2r", 8L, "-",
"chr2", 30L, 60L, "d2f", 16L, "+",
"chr2", 35L, 65L, "e2q", 32L, ".",
"chr2", 39L, 80L, "f2r", 64L, "-"
)
b <- tibble::tribble(
~chrom, ~start, ~end, ~name, ~score, ~strand,
"chr1", 10L, 50L, "2a1r", 2L, "-",
"chr1", 40L, 70L, "2b1q", 4L, ".",
"chr1", 60L, 100L, "2c1f", 8L, "+",
"chr2", 15L, 40L, "2d2f", 16L, "+",
"chr2", 30L, 100L, "2e2r", 32L, "-"
)
res <- bed_jaccard(a, b)
expect_equal(res$len_i, 145)
expect_equal(res$len_u, 325)
expect_equal(round(res$jaccard, 5), round(0.8055556, 5))
expect_equal(res$n, 2)
})
|
# Build Neural Network for classification using neuralnet library.
rm(list=ls(all=TRUE))
# Set the working directory
setwd("C:/Users/gmanish/Dropbox/latest/openminds/slides/MachineLearning/7.ANNs/")
# Importing "data.csv" files's data into R dataframe using read.csv function.
data = read.csv(file="data.csv", header=TRUE, sep=",")
# Understand the structure the summary of the data using str and summary R commands
str(data)
summary(data)
# Using subset remove 'ID' and 'ZIP.Code' columns from the data
data = subset(data, select = -c(ID,ZIP.Code))
# Convert all the variables to appropriate type
# To numeric using as.numeric()
# To categoical using as.factor()
data$Education = as.factor(data$Education)
# R NN library takes only numeric attribues as input
# Convert all categorical attributes to numeric using appropriate technique. Hint: dummies
# Convert "Education" categorical attribute to numeric using dummy function in dummies R library
# Drop actual Education attribute from orginal data set
# Add created dummy Education variables to orginal data set
library(dummies)
education = dummy(data$Education)
data = subset(data, select=-c(Education))
data = cbind(data, education)
rm(education)
# Separate Target Variable and Independent Variables.
# In this case "Personal.Loan" is a target variable and all others are independent variable.
target_Variable = data$Personal.Loan
independent_Variables = subset(data, select = -c(Personal.Loan))
# Standardization the independent variables using decostand funcion in vegan R library
library(vegan)
# Note: To standardize the data using 'Range' method
independent_Variables = decostand(independent_Variables,"range")
data = data.frame(independent_Variables, Personal.Loan = target_Variable)
rm(independent_Variables, target_Variable)
# Use set.seed to get same test and train data
set.seed(123)
# Prepare train and test data in 70:30 ratio
num_Records = nrow(data)
# to take a random sample of 70% of the records for train data
train_Index = sample(1:num_Records, round(num_Records * 0.7, digits = 0))
train_Data = data[train_Index,]
test_Data = data[-train_Index,]
rm(train_Index, num_Records, data)
# See data distribution in response variable in both Train and Test data:
table(train_Data$Personal.Loan)
table(test_Data$Personal.Loan)
# Load neuralnet R library
library(neuralnet)
# Build a Neural Network having 1 hidden layer with 2 nodes
set.seed(1234)
nn = neuralnet(Personal.Loan ~ Age+Experience+Income+Family+CCAvg+Mortgage+
Securities.Account+CD.Account+Online+CreditCard+
Education1+Education2+Education3,
data=train_Data, hidden=2,linear.output = F)
# See covariate and result varaibls of neuralnet model - covariate mens the variables extracted from the data argument
out <- cbind(nn$covariate, nn$net.result[[1]])
head(out)
# Remove rownames and set column names
dimnames(out) = list(NULL,c
("Age","Experience","Income","Family","CCAvg","Mortgage",
"Securities.Account","CD.Account","Online","CreditCard",
"Education1","Education2", "Education3","nn_Output"))
# To view top records in the data set
head(out)
rm(out)
# Plot the neural network
plot(nn)
# Compute confusion matrix for train data.
#predicted = factor(ifelse(nn$net.result[[1]] > 0.5, 1, 0))
#conf_Matrix = table(train_Data$Personal.Loan, predicted)
# Remove target attribute from Test Data
test_Data_No_Target = subset(test_Data, select=-c(Personal.Loan))
# Predict
nn_predict <- compute(nn, covariate= test_Data_No_Target)
rm(test_Data_No_Target)
# View the predicted values
nn_predict$net.result
# Compute confusion matrix and accuracy
predicted = factor(ifelse(nn_predict$net.result > 0.5, 1, 0))
conf_Matrix<-table(test_Data$Personal.Loan, predicted)
sum(diag(conf_Matrix))/sum(conf_Matrix)*100
| /BinaryClass_neuralnet.R | no_license | Murali423/Hand-Written-Digit-Recognition | R | false | false | 4,012 | r | # Build Neural Network for classification using neuralnet library.
rm(list=ls(all=TRUE))
# Set the working directory
setwd("C:/Users/gmanish/Dropbox/latest/openminds/slides/MachineLearning/7.ANNs/")
# Importing "data.csv" files's data into R dataframe using read.csv function.
data = read.csv(file="data.csv", header=TRUE, sep=",")
# Understand the structure the summary of the data using str and summary R commands
str(data)
summary(data)
# Using subset remove 'ID' and 'ZIP.Code' columns from the data
data = subset(data, select = -c(ID,ZIP.Code))
# Convert all the variables to appropriate type
# To numeric using as.numeric()
# To categoical using as.factor()
data$Education = as.factor(data$Education)
# R NN library takes only numeric attribues as input
# Convert all categorical attributes to numeric using appropriate technique. Hint: dummies
# Convert "Education" categorical attribute to numeric using dummy function in dummies R library
# Drop actual Education attribute from orginal data set
# Add created dummy Education variables to orginal data set
library(dummies)
education = dummy(data$Education)
data = subset(data, select=-c(Education))
data = cbind(data, education)
rm(education)
# Separate Target Variable and Independent Variables.
# In this case "Personal.Loan" is a target variable and all others are independent variable.
target_Variable = data$Personal.Loan
independent_Variables = subset(data, select = -c(Personal.Loan))
# Standardization the independent variables using decostand funcion in vegan R library
library(vegan)
# Note: To standardize the data using 'Range' method
independent_Variables = decostand(independent_Variables,"range")
data = data.frame(independent_Variables, Personal.Loan = target_Variable)
rm(independent_Variables, target_Variable)
# Use set.seed to get same test and train data
set.seed(123)
# Prepare train and test data in 70:30 ratio
num_Records = nrow(data)
# to take a random sample of 70% of the records for train data
train_Index = sample(1:num_Records, round(num_Records * 0.7, digits = 0))
train_Data = data[train_Index,]
test_Data = data[-train_Index,]
rm(train_Index, num_Records, data)
# See data distribution in response variable in both Train and Test data:
table(train_Data$Personal.Loan)
table(test_Data$Personal.Loan)
# Load neuralnet R library
library(neuralnet)
# Build a Neural Network having 1 hidden layer with 2 nodes
set.seed(1234)
nn = neuralnet(Personal.Loan ~ Age+Experience+Income+Family+CCAvg+Mortgage+
Securities.Account+CD.Account+Online+CreditCard+
Education1+Education2+Education3,
data=train_Data, hidden=2,linear.output = F)
# See covariate and result varaibls of neuralnet model - covariate mens the variables extracted from the data argument
out <- cbind(nn$covariate, nn$net.result[[1]])
head(out)
# Remove rownames and set column names
dimnames(out) = list(NULL,c
("Age","Experience","Income","Family","CCAvg","Mortgage",
"Securities.Account","CD.Account","Online","CreditCard",
"Education1","Education2", "Education3","nn_Output"))
# To view top records in the data set
head(out)
rm(out)
# Plot the neural network
plot(nn)
# Compute confusion matrix for train data.
#predicted = factor(ifelse(nn$net.result[[1]] > 0.5, 1, 0))
#conf_Matrix = table(train_Data$Personal.Loan, predicted)
# Remove target attribute from Test Data
test_Data_No_Target = subset(test_Data, select=-c(Personal.Loan))
# Predict
nn_predict <- compute(nn, covariate= test_Data_No_Target)
rm(test_Data_No_Target)
# View the predicted values
nn_predict$net.result
# Compute confusion matrix and accuracy
predicted = factor(ifelse(nn_predict$net.result > 0.5, 1, 0))
conf_Matrix<-table(test_Data$Personal.Loan, predicted)
sum(diag(conf_Matrix))/sum(conf_Matrix)*100
|
`capscale` <-
function (formula, data, distance = "euclidean", sqrt.dist = FALSE,
comm = NULL, add = FALSE, dfun = vegdist,
metaMDSdist = FALSE, na.action = na.fail, subset = NULL, ...)
{
EPS <- sqrt(.Machine$double.eps)
if (!inherits(formula, "formula"))
stop("Needs a model formula")
if (missing(data)) {
data <- parent.frame()
}
else {
data <- eval(match.call()$data, environment(formula),
enclos = .GlobalEnv)
}
formula <- formula(terms(formula, data = data))
## The following line was eval'ed in environment(formula), but
## that made update() fail. Rethink the line if capscale() fails
## mysteriously at this point.
X <- eval(formula[[2]], envir=environment(formula),
enclos = globalenv())
## see if user supplied dissimilarities as a matrix
if ((is.matrix(X) || is.data.frame(X)) &&
isSymmetric(unname(as.matrix(X))))
X <- as.dist(X)
if (!inherits(X, "dist")) {
comm <- X
vdata <- as.character(formula[[2]])
dfun <- match.fun(dfun)
if (metaMDSdist) {
commname <- as.character(formula[[2]])
X <- metaMDSdist(comm, distance = distance, zerodist = "ignore",
commname = commname, distfun = dfun, ...)
commname <- attr(X, "commname")
comm <- eval.parent(parse(text=commname))
} else {
X <- dfun(X, distance)
}
} else { # vdata name
if (missing(comm))
vdata <- NULL
else
vdata <- deparse(substitute(comm))
}
inertia <- attr(X, "method")
if (is.null(inertia))
inertia <- "unknown"
inertia <- paste(toupper(substr(inertia, 1, 1)),
substring(inertia, 2), sep = "")
inertia <- paste(inertia, "distance")
if (!sqrt.dist)
inertia <- paste("squared", inertia)
## postpone info on euclidification till we have done so
## evaluate formula: ordiParseFormula will return dissimilarities
## as a symmetric square matrix (except that some rows may be
## deleted due to missing values)
d <- ordiParseFormula(formula,
data,
na.action = na.action,
subset = substitute(subset),
X = X)
## ordiParseFormula subsets rows of dissimilarities: do the same
## for columns ('comm' is handled later). ordiParseFormula
## returned the original data, but we use instead the potentially
## changed X and discard d$X.
if (!is.null(d$subset)) {
X <- as.matrix(X)[d$subset, d$subset, drop = FALSE]
}
## Delete columns if rows were deleted due to missing values
if (!is.null(d$na.action)) {
X <- as.matrix(X)[-d$na.action, -d$na.action, drop = FALSE]
}
X <- as.dist(X)
k <- attr(X, "Size") - 1
if (sqrt.dist)
X <- sqrt(X)
if (max(X) >= 4 + .Machine$double.eps) {
inertia <- paste("mean", inertia)
adjust <- sqrt(k)
X <- X/adjust
}
else {
adjust <- 1
}
nm <- attr(X, "Labels")
## wcmdscale, optionally with additive adjustment
X <- wcmdscale(X, x.ret = TRUE, add = add)
## this may have been euclidified: update inertia
if (!is.na(X$ac) && X$ac > sqrt(.Machine$double.eps))
inertia <- paste(paste0(toupper(substring(X$add, 1, 1)),
substring(X$add, 2)),
"adjusted", inertia)
if (is.null(rownames(X$points)))
rownames(X$points) <- nm
sol <- ordConstrained(X$points, d$Y, d$Z, method = "capscale")
## update for negative eigenvalues
poseig <- length(sol$CA$eig)
if (any(X$eig < 0)) {
negax <- X$eig[X$eig < 0]
sol$CA$imaginary.chi <- sum(negax)
sol$tot.chi <- sol$tot.chi + sol$CA$imaginary.chi
sol$CA$imaginary.rank <- length(negax)
sol$CA$imaginary.u.eig <- X$negaxes
}
if (!is.null(comm)) {
sol$vdata <- vdata
comm <- scale(comm, center = TRUE, scale = FALSE)
sol$colsum <- apply(comm, 2, sd)
## take a 'subset' of the community after scale()
if (!is.null(d$subset))
comm <- comm[d$subset, , drop = FALSE]
## NA action after 'subset'
if (!is.null(d$na.action))
comm <- comm[-d$na.action, , drop = FALSE]
if (!is.null(sol$pCCA) && sol$pCCA$rank > 0)
comm <- qr.resid(sol$pCCA$QR, comm)
if (!is.null(sol$CCA) && sol$CCA$rank > 0) {
v.eig <- t(comm) %*% sol$CCA$u/sqrt(k)
sol$CCA$v <- decostand(v.eig, "normalize", MARGIN = 2)
comm <- qr.resid(sol$CCA$QR, comm)
}
if (!is.null(sol$CA) && sol$CA$rank > 0) {
v.eig <- t(comm) %*% sol$CA$u/sqrt(k)
sol$CA$v <- decostand(v.eig, "normalize", MARGIN = 2)
}
} else {
## input data were dissimilarities, and no 'comm' defined:
## species scores make no sense and are made NA
sol$CA$v[] <- NA
if (!is.null(sol$CCA))
sol$CCA$v[] <- NA
sol$colsum <- NA
}
if (!is.null(sol$CCA) && sol$CCA$rank > 0)
sol$CCA$centroids <- centroids.cca(sol$CCA$wa, d$modelframe)
if (!is.null(sol$CCA$alias))
sol$CCA$centroids <- unique(sol$CCA$centroids)
if (!is.null(sol$CCA$centroids)) {
rs <- rowSums(sol$CCA$centroids^2)
sol$CCA$centroids <- sol$CCA$centroids[rs > 1e-04, ,
drop = FALSE]
if (nrow(sol$CCA$centroids) == 0)
sol$CCA$centroids <- NULL
}
sol$call <- match.call()
sol$terms <- terms(formula, "Condition", data = data)
sol$terminfo <- ordiTerminfo(d, data)
sol$call$formula <- formula(d$terms, width.cutoff = 500)
sol$call$formula[[2]] <- formula[[2]]
sol$sqrt.dist <- sqrt.dist
if (!is.na(X$ac) && X$ac > 0) {
sol$ac <- X$ac
sol$add <- X$add
}
sol$adjust <- adjust
sol$inertia <- inertia
if (metaMDSdist)
sol$metaMDSdist <- commname
sol$subset <- d$subset
sol$na.action <- d$na.action
class(sol) <- c("capscale", "rda", "cca")
if (!is.null(sol$na.action))
sol <- ordiNAexclude(sol, d$excluded)
sol
}
| /R/capscale.R | no_license | Microbiology/vegan | R | false | false | 6,376 | r | `capscale` <-
function (formula, data, distance = "euclidean", sqrt.dist = FALSE,
comm = NULL, add = FALSE, dfun = vegdist,
metaMDSdist = FALSE, na.action = na.fail, subset = NULL, ...)
{
EPS <- sqrt(.Machine$double.eps)
if (!inherits(formula, "formula"))
stop("Needs a model formula")
if (missing(data)) {
data <- parent.frame()
}
else {
data <- eval(match.call()$data, environment(formula),
enclos = .GlobalEnv)
}
formula <- formula(terms(formula, data = data))
## The following line was eval'ed in environment(formula), but
## that made update() fail. Rethink the line if capscale() fails
## mysteriously at this point.
X <- eval(formula[[2]], envir=environment(formula),
enclos = globalenv())
## see if user supplied dissimilarities as a matrix
if ((is.matrix(X) || is.data.frame(X)) &&
isSymmetric(unname(as.matrix(X))))
X <- as.dist(X)
if (!inherits(X, "dist")) {
comm <- X
vdata <- as.character(formula[[2]])
dfun <- match.fun(dfun)
if (metaMDSdist) {
commname <- as.character(formula[[2]])
X <- metaMDSdist(comm, distance = distance, zerodist = "ignore",
commname = commname, distfun = dfun, ...)
commname <- attr(X, "commname")
comm <- eval.parent(parse(text=commname))
} else {
X <- dfun(X, distance)
}
} else { # vdata name
if (missing(comm))
vdata <- NULL
else
vdata <- deparse(substitute(comm))
}
inertia <- attr(X, "method")
if (is.null(inertia))
inertia <- "unknown"
inertia <- paste(toupper(substr(inertia, 1, 1)),
substring(inertia, 2), sep = "")
inertia <- paste(inertia, "distance")
if (!sqrt.dist)
inertia <- paste("squared", inertia)
## postpone info on euclidification till we have done so
## evaluate formula: ordiParseFormula will return dissimilarities
## as a symmetric square matrix (except that some rows may be
## deleted due to missing values)
d <- ordiParseFormula(formula,
data,
na.action = na.action,
subset = substitute(subset),
X = X)
## ordiParseFormula subsets rows of dissimilarities: do the same
## for columns ('comm' is handled later). ordiParseFormula
## returned the original data, but we use instead the potentially
## changed X and discard d$X.
if (!is.null(d$subset)) {
X <- as.matrix(X)[d$subset, d$subset, drop = FALSE]
}
## Delete columns if rows were deleted due to missing values
if (!is.null(d$na.action)) {
X <- as.matrix(X)[-d$na.action, -d$na.action, drop = FALSE]
}
X <- as.dist(X)
k <- attr(X, "Size") - 1
if (sqrt.dist)
X <- sqrt(X)
if (max(X) >= 4 + .Machine$double.eps) {
inertia <- paste("mean", inertia)
adjust <- sqrt(k)
X <- X/adjust
}
else {
adjust <- 1
}
nm <- attr(X, "Labels")
## wcmdscale, optionally with additive adjustment
X <- wcmdscale(X, x.ret = TRUE, add = add)
## this may have been euclidified: update inertia
if (!is.na(X$ac) && X$ac > sqrt(.Machine$double.eps))
inertia <- paste(paste0(toupper(substring(X$add, 1, 1)),
substring(X$add, 2)),
"adjusted", inertia)
if (is.null(rownames(X$points)))
rownames(X$points) <- nm
sol <- ordConstrained(X$points, d$Y, d$Z, method = "capscale")
## update for negative eigenvalues
poseig <- length(sol$CA$eig)
if (any(X$eig < 0)) {
negax <- X$eig[X$eig < 0]
sol$CA$imaginary.chi <- sum(negax)
sol$tot.chi <- sol$tot.chi + sol$CA$imaginary.chi
sol$CA$imaginary.rank <- length(negax)
sol$CA$imaginary.u.eig <- X$negaxes
}
if (!is.null(comm)) {
sol$vdata <- vdata
comm <- scale(comm, center = TRUE, scale = FALSE)
sol$colsum <- apply(comm, 2, sd)
## take a 'subset' of the community after scale()
if (!is.null(d$subset))
comm <- comm[d$subset, , drop = FALSE]
## NA action after 'subset'
if (!is.null(d$na.action))
comm <- comm[-d$na.action, , drop = FALSE]
if (!is.null(sol$pCCA) && sol$pCCA$rank > 0)
comm <- qr.resid(sol$pCCA$QR, comm)
if (!is.null(sol$CCA) && sol$CCA$rank > 0) {
v.eig <- t(comm) %*% sol$CCA$u/sqrt(k)
sol$CCA$v <- decostand(v.eig, "normalize", MARGIN = 2)
comm <- qr.resid(sol$CCA$QR, comm)
}
if (!is.null(sol$CA) && sol$CA$rank > 0) {
v.eig <- t(comm) %*% sol$CA$u/sqrt(k)
sol$CA$v <- decostand(v.eig, "normalize", MARGIN = 2)
}
} else {
## input data were dissimilarities, and no 'comm' defined:
## species scores make no sense and are made NA
sol$CA$v[] <- NA
if (!is.null(sol$CCA))
sol$CCA$v[] <- NA
sol$colsum <- NA
}
if (!is.null(sol$CCA) && sol$CCA$rank > 0)
sol$CCA$centroids <- centroids.cca(sol$CCA$wa, d$modelframe)
if (!is.null(sol$CCA$alias))
sol$CCA$centroids <- unique(sol$CCA$centroids)
if (!is.null(sol$CCA$centroids)) {
rs <- rowSums(sol$CCA$centroids^2)
sol$CCA$centroids <- sol$CCA$centroids[rs > 1e-04, ,
drop = FALSE]
if (nrow(sol$CCA$centroids) == 0)
sol$CCA$centroids <- NULL
}
sol$call <- match.call()
sol$terms <- terms(formula, "Condition", data = data)
sol$terminfo <- ordiTerminfo(d, data)
sol$call$formula <- formula(d$terms, width.cutoff = 500)
sol$call$formula[[2]] <- formula[[2]]
sol$sqrt.dist <- sqrt.dist
if (!is.na(X$ac) && X$ac > 0) {
sol$ac <- X$ac
sol$add <- X$add
}
sol$adjust <- adjust
sol$inertia <- inertia
if (metaMDSdist)
sol$metaMDSdist <- commname
sol$subset <- d$subset
sol$na.action <- d$na.action
class(sol) <- c("capscale", "rda", "cca")
if (!is.null(sol$na.action))
sol <- ordiNAexclude(sol, d$excluded)
sol
}
|
setwd("C:/Users/kate/Documents/datascience/coursera")
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#merge the NEI and SCC data frames
NEISCC<-merge(NEI, SCC, by="SCC", all.x=TRUE)
#Subset the NEISCC dataset to include only Baltimore data
baltimore<-subset(NEISCC, NEISCC$fips=="24510")
#find the values of SCC.Level.Two column that contains "Vehicle"
neiscc<-grep("Vehicle", baltimore$SCC.Level.Two, value=TRUE)
#subset the merged data frame based on the Short.Name values that contain "Vehicle"
subnei<-subset(baltimore, baltimore$SCC.Level.Two==neiscc)
#Sum the emissions values for each year and then make a data table with the year values
data<-rowsum(subnei$Emissions, subnei$year)
table<-data.frame(c(1999, 2002, 2005, 2008), data[,1])
#Subset the NEISCC dataset to include only LA data
la<-subset(NEISCC, NEISCC$fips=="06037")
#find the values of SCC.Level.Two column that contains "Vehicle"
neiscc2<-grep("Vehicle", la$SCC.Level.Two, value=TRUE)
#subset the merged data frame based on the Short.Name values that contain "Vehicle"
subnei2<-subset(la, la$SCC.Level.Two==neiscc2)
#Sum the emissions values for each year and then make a data table with the year values
data2<-rowsum(subnei2$Emissions, subnei2$year)
table2<-data.frame(c(1999, 2002, 2005, 2008), data2[,1])
#make the tables containing the baltimore values and LA values into a single long data frame
table$place<-c("Baltimore", "Baltimore","Baltimore","Baltimore")
table2$place<-c("LA", "LA", "LA", "LA")
colnames(table)=(c("Year", "Emissions", "Place"))
colnames(table2)=(c("Year", "Emissions", "Place"))
tablecombined<-rbind(table, table2)
library(ggplot2)
#plot data using ggplot and save as PNG file
g<-ggplot(tablecombined, aes(x=tablecombined$Year, y=tablecombined$Emissions, colour=Place))
g<-g+geom_line()
g<-g+xlab("years")
g<-g+ylab("PM2.5 emissions(Tons)")
g<-g+ggtitle("Emissions from motor vehicles in Baltimore and LA 1999-2008")
ggsave(filename="plot6.png") | /expl data ass 2/plot6.R | no_license | katemarielewis/datasciencecoursera | R | false | false | 1,990 | r | setwd("C:/Users/kate/Documents/datascience/coursera")
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
#merge the NEI and SCC data frames
NEISCC<-merge(NEI, SCC, by="SCC", all.x=TRUE)
#Subset the NEISCC dataset to include only Baltimore data
baltimore<-subset(NEISCC, NEISCC$fips=="24510")
#find the values of SCC.Level.Two column that contains "Vehicle"
neiscc<-grep("Vehicle", baltimore$SCC.Level.Two, value=TRUE)
#subset the merged data frame based on the Short.Name values that contain "Vehicle"
subnei<-subset(baltimore, baltimore$SCC.Level.Two==neiscc)
#Sum the emissions values for each year and then make a data table with the year values
data<-rowsum(subnei$Emissions, subnei$year)
table<-data.frame(c(1999, 2002, 2005, 2008), data[,1])
#Subset the NEISCC dataset to include only LA data
la<-subset(NEISCC, NEISCC$fips=="06037")
#find the values of SCC.Level.Two column that contains "Vehicle"
neiscc2<-grep("Vehicle", la$SCC.Level.Two, value=TRUE)
#subset the merged data frame based on the Short.Name values that contain "Vehicle"
subnei2<-subset(la, la$SCC.Level.Two==neiscc2)
#Sum the emissions values for each year and then make a data table with the year values
data2<-rowsum(subnei2$Emissions, subnei2$year)
table2<-data.frame(c(1999, 2002, 2005, 2008), data2[,1])
#make the tables containing the baltimore values and LA values into a single long data frame
table$place<-c("Baltimore", "Baltimore","Baltimore","Baltimore")
table2$place<-c("LA", "LA", "LA", "LA")
colnames(table)=(c("Year", "Emissions", "Place"))
colnames(table2)=(c("Year", "Emissions", "Place"))
tablecombined<-rbind(table, table2)
library(ggplot2)
#plot data using ggplot and save as PNG file
g<-ggplot(tablecombined, aes(x=tablecombined$Year, y=tablecombined$Emissions, colour=Place))
g<-g+geom_line()
g<-g+xlab("years")
g<-g+ylab("PM2.5 emissions(Tons)")
g<-g+ggtitle("Emissions from motor vehicles in Baltimore and LA 1999-2008")
ggsave(filename="plot6.png") |
#' Given source_info object, retrieves project information
#' @param source_info is list with source information
#' @return list with stacked dependency files, graph of dependencies, and condensed file information
#' @export
#'@examples
#'\dontrun{
#' source_info <- create_source_file_dir("adaprHome","tree_controller.R")
#' get.project.info.si(source_info)
#'}
#'
get.project.info.si <- function(source_info){
# get project object
dependency.dir <- source_info$dependency.dir
trees <- Harvest.trees(dependency.dir)
g.all <- Make.summary.graph(dependency.dir=NULL,dependency.object=trees,plot.graph=FALSE)
file.info.object <- Condense.file.info(trees)
return(list("tree"=trees,"graph"=g.all,"all.files"=file.info.object))
} | /R/get_project_info_si.R | no_license | bokov/adapr | R | false | false | 760 | r | #' Given source_info object, retrieves project information
#' @param source_info is list with source information
#' @return list with stacked dependency files, graph of dependencies, and condensed file information
#' @export
#'@examples
#'\dontrun{
#' source_info <- create_source_file_dir("adaprHome","tree_controller.R")
#' get.project.info.si(source_info)
#'}
#'
get.project.info.si <- function(source_info){
# get project object
dependency.dir <- source_info$dependency.dir
trees <- Harvest.trees(dependency.dir)
g.all <- Make.summary.graph(dependency.dir=NULL,dependency.object=trees,plot.graph=FALSE)
file.info.object <- Condense.file.info(trees)
return(list("tree"=trees,"graph"=g.all,"all.files"=file.info.object))
} |
household_power_consumption <- read.table('household_power_consumption.txt', header=T, sep=';', skip=66637, nrows=2880)
header <- read.table('household_power_consumption.txt', header=T, sep=';', nrows=1)
colnames(household_power_consumption) <- colnames(header)
household_power_consumption$Timestamp <- strptime(paste(household_power_consumption$Date, household_power_consumption$Time), "%d/%m/%Y %H:%M:%S")
png(filename='plot3.png')
plot(household_power_consumption$Timestamp, household_power_consumption$Sub_metering_1, xlab="", ylab="Energy sub metering", type='l')
lines(household_power_consumption$Timestamp, household_power_consumption$Sub_metering_2, col='red')
lines(household_power_consumption$Timestamp, household_power_consumption$Sub_metering_3, col='blue')
legend("topright", col=c('black', 'red', 'blue'), legend=c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'), lty=1)
dev.off() | /plot3.R | no_license | samadsajanlal/ExData_Plotting1 | R | false | false | 909 | r | household_power_consumption <- read.table('household_power_consumption.txt', header=T, sep=';', skip=66637, nrows=2880)
header <- read.table('household_power_consumption.txt', header=T, sep=';', nrows=1)
colnames(household_power_consumption) <- colnames(header)
household_power_consumption$Timestamp <- strptime(paste(household_power_consumption$Date, household_power_consumption$Time), "%d/%m/%Y %H:%M:%S")
png(filename='plot3.png')
plot(household_power_consumption$Timestamp, household_power_consumption$Sub_metering_1, xlab="", ylab="Energy sub metering", type='l')
lines(household_power_consumption$Timestamp, household_power_consumption$Sub_metering_2, col='red')
lines(household_power_consumption$Timestamp, household_power_consumption$Sub_metering_3, col='blue')
legend("topright", col=c('black', 'red', 'blue'), legend=c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'), lty=1)
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dbexists.R
\name{dbexists}
\alias{dbexists}
\title{Table exists}
\usage{
dbexists(chan, tname)
}
\arguments{
\item{chan}{open RODBC channel}
\item{tname}{tablename (including schema if there is one)}
}
\description{
Checks for the existence of a database table
}
\examples{
dbtest <- odbcConnect('test')
sqlSave(dbtest, mtcars)
dbexists(dbtest, mtcars)
}
\keyword{tables}
| /man/dbexists.Rd | no_license | christinabrady/ckit | R | false | true | 451 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dbexists.R
\name{dbexists}
\alias{dbexists}
\title{Table exists}
\usage{
dbexists(chan, tname)
}
\arguments{
\item{chan}{open RODBC channel}
\item{tname}{tablename (including schema if there is one)}
}
\description{
Checks for the existence of a database table
}
\examples{
dbtest <- odbcConnect('test')
sqlSave(dbtest, mtcars)
dbexists(dbtest, mtcars)
}
\keyword{tables}
|
##Use my standard openning including call function
source('C:/Users/bryan_000/Documents/GitHub/MyWork/StdOpen.R')
call("epicalc")
call("survival")
call("graphics")
studydata <- read.csv("smokerdisease.csv")
df <- ftable(table(studydata$smoker, studydata$disease,studydata$female))
##Nonsmokers - Nondisease
df[1,1] #Male Nonsmokers Nondisease
df[1,2] #Female Nonsmokers Nondisease
##Nonsmokers - disease
df[2,1] #Male Nonsmokers Disease
df[2,2] #Female Nonsmokers Disease
##Smokers - Nondisease
df[3,1] #Male Smokers Nondisease
df[3,2] #Female Smokers Nondisease
##Smokers - Disease
df[4,1] #Male Smokers Disease
df[4,2] #Female Smokers Disease
##OVerall results no breakdown for Sex
##csi(exposed-positive,exposed-negative,nonexposed-positive,nonexposed-negative)
csi(df[4,1]+df[4,2],df[3,1]+df[3,2],df[2,1]+df[2,2],df[1,1]+df[1,2])
## Risk Ratio = .93
## Risk difference = -.02
cci(df[4,1]+df[4,2],df[3,1]+df[3,2],df[2,1]+df[2,2],df[1,1]+df[1,2])
## OR = .91
##results for males only
csi(df[4,1],df[3,1],df[2,1],df[1,1])
## Risk Ratio = 1.81
## Risk difference = .08
cci(df[4,1],df[3,1],df[2,1],df[1,1])
## OR = 1.99
##results for females only
csi(df[4,2],df[3,2],df[2,2],df[1,2])
## Risk Ratio = 1.53
## Risk difference = .16
cci(df[4,2],df[3,2],df[2,2],df[1,2])
## OR = 1.99 | /SmokerDiseaseSex.R | no_license | Prashant0701/PracticeAnalytics | R | false | false | 1,289 | r | ##Use my standard openning including call function
source('C:/Users/bryan_000/Documents/GitHub/MyWork/StdOpen.R')
call("epicalc")
call("survival")
call("graphics")
studydata <- read.csv("smokerdisease.csv")
df <- ftable(table(studydata$smoker, studydata$disease,studydata$female))
##Nonsmokers - Nondisease
df[1,1] #Male Nonsmokers Nondisease
df[1,2] #Female Nonsmokers Nondisease
##Nonsmokers - disease
df[2,1] #Male Nonsmokers Disease
df[2,2] #Female Nonsmokers Disease
##Smokers - Nondisease
df[3,1] #Male Smokers Nondisease
df[3,2] #Female Smokers Nondisease
##Smokers - Disease
df[4,1] #Male Smokers Disease
df[4,2] #Female Smokers Disease
##OVerall results no breakdown for Sex
##csi(exposed-positive,exposed-negative,nonexposed-positive,nonexposed-negative)
csi(df[4,1]+df[4,2],df[3,1]+df[3,2],df[2,1]+df[2,2],df[1,1]+df[1,2])
## Risk Ratio = .93
## Risk difference = -.02
cci(df[4,1]+df[4,2],df[3,1]+df[3,2],df[2,1]+df[2,2],df[1,1]+df[1,2])
## OR = .91
##results for males only
csi(df[4,1],df[3,1],df[2,1],df[1,1])
## Risk Ratio = 1.81
## Risk difference = .08
cci(df[4,1],df[3,1],df[2,1],df[1,1])
## OR = 1.99
##results for females only
csi(df[4,2],df[3,2],df[2,2],df[1,2])
## Risk Ratio = 1.53
## Risk difference = .16
cci(df[4,2],df[3,2],df[2,2],df[1,2])
## OR = 1.99 |
\name{dtgpd_log}
\alias{dtgpd_log}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
internal
}
\description{
internal use only
}
\usage{
dtgpd_log(x, y, z, mar1 = c(0, 1, 0.1), mar2 = c(0, 1, 0.1), mar3 = c(0, 1, 0.1), dep = 1.5)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
%% ~~Describe \code{x} here~~
}
\item{y}{
%% ~~Describe \code{y} here~~
}
\item{z}{
%% ~~Describe \code{z} here~~
}
\item{mar1}{
%% ~~Describe \code{mar1} here~~
}
\item{mar2}{
%% ~~Describe \code{mar2} here~~
}
\item{mar3}{
%% ~~Describe \code{mar3} here~~
}
\item{dep}{
%% ~~Describe \code{dep} here~~
}
}
\details{
internal use only
}
\value{
internal use only
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
internal use only
}
\author{
P. Rakonczai
}
\note{
internal use only
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
internal use only
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (x, y, z, mar1 = c(0, 1, 0.1), mar2 = c(0, 1, 0.1),
mar3 = c(0, 1, 0.1), dep = 1.5)
{
error = FALSE
hxyz = NULL
param = as.numeric(c(mar1, mar2, mar3, dep))
mux = param[1]
muy = param[4]
muz = param[7]
sigx = param[2]
sigy = param[5]
sigz = param[8]
gamx = param[3]
gamy = param[6]
gamz = param[9]
alpha = param[10]
if (gamx > 0) {
epx1 = mux - sigx/gamx
epx2 = Inf
}
else {
epx1 = -Inf
epx2 = mux - sigx/gamx
}
if (gamy > 0) {
epy1 = muy - sigy/gamy
epy2 = Inf
}
else {
epy1 = -Inf
epy2 = muy - sigy/gamy
}
if (gamz > 0) {
epz1 = muz - sigz/gamz
epz2 = Inf
}
else {
epz1 = -Inf
epz2 = muz - sigz/gamz
}
if ((min(x) < epx1) | (max(x) > epx2)) {
error = T
}
if ((min(y) < epy1) | (max(y) > epy2)) {
error = T
}
if ((min(z) < epz1) | (max(z) > epz2)) {
error = T
}
if (sigx < 0 | sigy < 0 | sigz < 0 | alpha < 1) {
error = T
}
if (!error) {
hxyz = NA
tx = tr(x, gamx, mux, sigx)
ty = tr(y, gamy, muy, sigy)
tz = tr(z, gamz, muz, sigz)
tx0 = tr(0, gamx, mux, sigx)
tz0 = tr(0, gamy, muy, sigy)
ty0 = tr(0, gamz, muz, sigz)
dtx = dtr(x, gamx, mux, sigx)
dty = dtr(y, gamy, muy, sigy)
dtz = dtr(z, gamz, muz, sigz)
c0 = -1/mulog(tx0, ty0, tz0, alpha = alpha)
dddpsimu = d123mulog(tx, ty, tz, alpha = alpha)
Jc = dtx * dty * dtz
null = (1 - ((tx < tx0) * (ty < ty0) * (tz < tz0)))
hxyz = c0 * dddpsimu * null * Jc
}
else print("invalid parameter(s)")
hxyz
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ internal }
%% \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
| /man/dtgpd_log.Rd | no_license | muguangyuze/mgpd | R | false | false | 3,309 | rd | \name{dtgpd_log}
\alias{dtgpd_log}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
internal
}
\description{
internal use only
}
\usage{
dtgpd_log(x, y, z, mar1 = c(0, 1, 0.1), mar2 = c(0, 1, 0.1), mar3 = c(0, 1, 0.1), dep = 1.5)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{x}{
%% ~~Describe \code{x} here~~
}
\item{y}{
%% ~~Describe \code{y} here~~
}
\item{z}{
%% ~~Describe \code{z} here~~
}
\item{mar1}{
%% ~~Describe \code{mar1} here~~
}
\item{mar2}{
%% ~~Describe \code{mar2} here~~
}
\item{mar3}{
%% ~~Describe \code{mar3} here~~
}
\item{dep}{
%% ~~Describe \code{dep} here~~
}
}
\details{
internal use only
}
\value{
internal use only
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
internal use only
}
\author{
P. Rakonczai
}
\note{
internal use only
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
internal use only
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (x, y, z, mar1 = c(0, 1, 0.1), mar2 = c(0, 1, 0.1),
mar3 = c(0, 1, 0.1), dep = 1.5)
{
error = FALSE
hxyz = NULL
param = as.numeric(c(mar1, mar2, mar3, dep))
mux = param[1]
muy = param[4]
muz = param[7]
sigx = param[2]
sigy = param[5]
sigz = param[8]
gamx = param[3]
gamy = param[6]
gamz = param[9]
alpha = param[10]
if (gamx > 0) {
epx1 = mux - sigx/gamx
epx2 = Inf
}
else {
epx1 = -Inf
epx2 = mux - sigx/gamx
}
if (gamy > 0) {
epy1 = muy - sigy/gamy
epy2 = Inf
}
else {
epy1 = -Inf
epy2 = muy - sigy/gamy
}
if (gamz > 0) {
epz1 = muz - sigz/gamz
epz2 = Inf
}
else {
epz1 = -Inf
epz2 = muz - sigz/gamz
}
if ((min(x) < epx1) | (max(x) > epx2)) {
error = T
}
if ((min(y) < epy1) | (max(y) > epy2)) {
error = T
}
if ((min(z) < epz1) | (max(z) > epz2)) {
error = T
}
if (sigx < 0 | sigy < 0 | sigz < 0 | alpha < 1) {
error = T
}
if (!error) {
hxyz = NA
tx = tr(x, gamx, mux, sigx)
ty = tr(y, gamy, muy, sigy)
tz = tr(z, gamz, muz, sigz)
tx0 = tr(0, gamx, mux, sigx)
tz0 = tr(0, gamy, muy, sigy)
ty0 = tr(0, gamz, muz, sigz)
dtx = dtr(x, gamx, mux, sigx)
dty = dtr(y, gamy, muy, sigy)
dtz = dtr(z, gamz, muz, sigz)
c0 = -1/mulog(tx0, ty0, tz0, alpha = alpha)
dddpsimu = d123mulog(tx, ty, tz, alpha = alpha)
Jc = dtx * dty * dtz
null = (1 - ((tx < tx0) * (ty < ty0) * (tz < tz0)))
hxyz = c0 * dddpsimu * null * Jc
}
else print("invalid parameter(s)")
hxyz
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ internal }
%% \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
SNPtm2 <-
function(trange=100,tsl=1.0,x6=NULL,r6=NULL) {
testrange<- is.null(trange)
if(testrange == TRUE) trange<- 400 # Time range
testtsl<- is.null(tsl)
if(testtsl == TRUE) tsl<- 1.0 # Time step length
x<- rep(0,trange*6/tsl)
x<- matrix(x,ncol=6)
testr<- is.null(r6)
if(testr == TRUE) r<- c(0.025,0.040,0.045,0.045,0.045,0.045) # 6 growth rates, model 410
if(testr == FALSE) r<- r6
testx<- is.null(x6)
if(testx == TRUE) x[1,]<- c(0.50,0.70,1.70,6.0,13.0,78.0) # 6 initial conditions, model 410
if(testx == FALSE) x[1,]<- x6
nt<- (trange/tsl) # No of time steps
tv<- seq(0,trange-tsl,tsl) # Time vector
symbols<- c(1,4,8,17,22,18)
colors<- c("darkolivegreen4","lightgreen","gold","darkorange","red1","darkred")
vegtypes<- c("Aconitum","Trisetum","Deschampsia","Festuca","Carex","Pinus")
K<- 100 # Carrying capacity
dx<- rep(0,6)
for(t in 2:nt) {
# Differential equations part 1: Logistic growth of vegetation types
dx[1]<- r[1]*x[t-1,1]*((K- x[t-1,1])/K)
dx[2]<- r[2]*x[t-1,2]*((K- x[t-1,1]-x[t-1,2])/K)
dx[3]<- r[3]*x[t-1,3]*((K- x[t-1,1]-x[t-1,2]-x[t-1,3])/K)
dx[4]<- r[4]*x[t-1,4]*((K- x[t-1,1]-x[t-1,2]-x[t-1,3]-x[t-1,4])/K)
dx[5]<- r[5]*x[t-1,5]*((K- x[t-1,1]-x[t-1,2]-x[t-1,3]-x[t-1,4]-x[t-1,5])/K)
dx[6]<- r[6]*x[t-1,6]*((K- x[t-1,1]-x[t-1,2]-x[t-1,3]-x[t-1,4]-x[t-1,5]-x[t-1,6])/K)
# Differential equations part 2: Keeping gains and losses balanced
dx[2]<- dx[2] - (dx[1])*(x[t-1,2]/sum(x[t-1,2:6]))
dx[3]<- dx[3] - (dx[1]+dx[2])*(x[t-1,3]/sum(x[t-1,3:6]))
dx[4]<- dx[4] - (dx[1]+dx[2]+dx[3])*(x[t-1,4]/sum(x[t-1,4:6]))
dx[5]<- dx[5] - (dx[1]+dx[2]+dx[3]+dx[4])*(x[t-1,5]/sum(x[t-1,5:6]))
dx[6]<- dx[6] - (dx[1]+dx[2]+dx[3]+dx[4]+dx[5])*(x[t-1,6]/sum(x[t-1,6]))
# Numerical integration
for(v in 1:6){
x[t,v]<- x[t-1,v]+(dx[v]*tsl)
}
}
o.SNP<- list(n.time.steps=trange,time.step.length=tsl,time.vector=tv,veg.types=vegtypes,growth.rates=r,initial.cond=x[1,],sim.data=x)
}
| /dave/R/SNPtm2.R | no_license | ingted/R-Examples | R | false | false | 2,201 | r | SNPtm2 <-
function(trange=100,tsl=1.0,x6=NULL,r6=NULL) {
testrange<- is.null(trange)
if(testrange == TRUE) trange<- 400 # Time range
testtsl<- is.null(tsl)
if(testtsl == TRUE) tsl<- 1.0 # Time step length
x<- rep(0,trange*6/tsl)
x<- matrix(x,ncol=6)
testr<- is.null(r6)
if(testr == TRUE) r<- c(0.025,0.040,0.045,0.045,0.045,0.045) # 6 growth rates, model 410
if(testr == FALSE) r<- r6
testx<- is.null(x6)
if(testx == TRUE) x[1,]<- c(0.50,0.70,1.70,6.0,13.0,78.0) # 6 initial conditions, model 410
if(testx == FALSE) x[1,]<- x6
nt<- (trange/tsl) # No of time steps
tv<- seq(0,trange-tsl,tsl) # Time vector
symbols<- c(1,4,8,17,22,18)
colors<- c("darkolivegreen4","lightgreen","gold","darkorange","red1","darkred")
vegtypes<- c("Aconitum","Trisetum","Deschampsia","Festuca","Carex","Pinus")
K<- 100 # Carrying capacity
dx<- rep(0,6)
for(t in 2:nt) {
# Differential equations part 1: Logistic growth of vegetation types
dx[1]<- r[1]*x[t-1,1]*((K- x[t-1,1])/K)
dx[2]<- r[2]*x[t-1,2]*((K- x[t-1,1]-x[t-1,2])/K)
dx[3]<- r[3]*x[t-1,3]*((K- x[t-1,1]-x[t-1,2]-x[t-1,3])/K)
dx[4]<- r[4]*x[t-1,4]*((K- x[t-1,1]-x[t-1,2]-x[t-1,3]-x[t-1,4])/K)
dx[5]<- r[5]*x[t-1,5]*((K- x[t-1,1]-x[t-1,2]-x[t-1,3]-x[t-1,4]-x[t-1,5])/K)
dx[6]<- r[6]*x[t-1,6]*((K- x[t-1,1]-x[t-1,2]-x[t-1,3]-x[t-1,4]-x[t-1,5]-x[t-1,6])/K)
# Differential equations part 2: Keeping gains and losses balanced
dx[2]<- dx[2] - (dx[1])*(x[t-1,2]/sum(x[t-1,2:6]))
dx[3]<- dx[3] - (dx[1]+dx[2])*(x[t-1,3]/sum(x[t-1,3:6]))
dx[4]<- dx[4] - (dx[1]+dx[2]+dx[3])*(x[t-1,4]/sum(x[t-1,4:6]))
dx[5]<- dx[5] - (dx[1]+dx[2]+dx[3]+dx[4])*(x[t-1,5]/sum(x[t-1,5:6]))
dx[6]<- dx[6] - (dx[1]+dx[2]+dx[3]+dx[4]+dx[5])*(x[t-1,6]/sum(x[t-1,6]))
# Numerical integration
for(v in 1:6){
x[t,v]<- x[t-1,v]+(dx[v]*tsl)
}
}
o.SNP<- list(n.time.steps=trange,time.step.length=tsl,time.vector=tv,veg.types=vegtypes,growth.rates=r,initial.cond=x[1,],sim.data=x)
}
|
#
# Script outputs the variation frequency plots. Only useful for pretty pictures really.
#
rm(list=ls())
setwd("~/workspace/IGCSA/R")
source("lib/gc_functions.R")
source("lib/varplots.R")
#args = commandArgs(trailingOnly = TRUE)
#ens_dir = args[1]
#gc_dir = args[2]
data_dir = "~/Data/VariationNormal"
plot=F
ens_dir = paste(data_dir, "Frequencies/1000/Ensembl", sep="/")
var_files = list.files(path=ens_dir, pattern=".txt")
gc_dir = paste(data_dir, "GC/1000", sep="/")
gc_files = list.files(path=gc_dir,pattern=".txt")
out_dir = "~/Analysis/Normal"
rnames = c('GC', 'Total.Vars', 'Bins')
seq_ratios = data.frame()
seq_ratios = as.data.frame(matrix(nrow=0, ncol=length(rnames)))
colnames(seq_ratios) = rnames
snv_freq = list()
var_tests = list()
for (i in 1:length(var_files))
{
file = var_files[i]
chr = sub(".txt", "", file)
print(chr)
chrdir = paste(out_dir, chr, sep="/")
if (!file.exists(chrdir)) dir.create(chrdir)
# Variation & gc files
gc_f = paste(gc_dir, paste(chr, "-gc.txt", sep=""), sep="/")
var_f = paste(ens_dir, file, sep="/")
# Data with NA removed
data = load.data(gc_f, var_f)
var_d = data$vars
gc_d = data$gc
all = cbind(var_d, gc_d)
last_var = which(colnames(all) == 'GC')-1
var_tests[[chr]] = ks.test.all(var_d[1:last_var], chr)
snv_freq[[chr]] = table(all[,'SNV'])
if (!nrow(var_d) == nrow(gc_d)) { stop("Bins don't match, quitting") }
# Collect info about each chromosome
#seq_ratios[chr, 'Chr'] = chr
seq_ratios[chr, 'Length'] = nrow(gc_d)
seq_ratios[chr, 'GC'] = round((sum(gc_d$GC)/sum(gc_d$BPs)), digits=2)
seq_ratios[chr, 'Total.Vars'] = sum(var_d[,1:ncol(var_d)], na.rm=T)
seq_ratios[chr, 'Bins'] = nrow(var_d)
if (plot)
{
plot_file = paste(chrdir, chr, sep="/")
png(filename=paste(plot_file, "-overview.png", sep=""), bg="white", height=900, width=600)
par(mfrow=c(2,1))
plotVariations(all, chr, last_var)
plotRatios(gc_d, chr)
dev.off()
plotVariationsSep(var_d, chr, chrdir)
}
all$Total.Vars = rowSums(all[,1:7], na.rm=T)
freq = table(all$Total.Vars)
if (names(freq[ freq == max(freq) ]) != '0') { warning( paste(chr, "variation max frequency is not 0") ) }
# so ignoring 0 which is the start of the poisson
seq_ratios[chr, 'Max1'] = names(freq[freq == max(freq)])
freq = freq[2:length(freq)]
seq_ratios[chr, 'Max2'] = names(freq[freq == max(freq)])
rm(var_d, gc_d, all)
}
## --- GC PLOT --- ##
seq_ratios = seq_ratios[order(seq_ratios$GC),]
if (plot)
{
png(filename=paste(out_dir,"/GC-content", ".png", sep=""), bg="white", height=600, width=1200)
plot(seq_ratios$GC, ann=F, xaxt='n', type='n', ylim=c(0.01, max(seq_ratios$GC)))
lines(seq_ratios$GC, ann=F, xaxt='n', col='blue', type='o', pch=19)
axis(1, at=1:nrow(seq_ratios), lab=rownames(seq_ratios))
text(seq_ratios$GC, col='blue', pos=1, labels=seq_ratios$GC )
#text(1:nrow(seq_ratios), rep(0.25, nrow(seq_ratios)), labels=rownames(seq_ratios))
lines((seq_ratios$Total.Vars/seq_ratios$Length)/100, type='o', col='red', pch=19)
text((seq_ratios$Total.Vars/seq_ratios$Length)/100, col='red', pos=1, labels=round(seq_ratios$Total.Vars/seq_ratios$Length, 2))
legend("topleft", legend=c('GC Ratio', 'Variations/Length'), fill=c('blue', 'red') )
title(main='GC Content Per Chromosome', sub="Subplotted Variations per bin", ylab='GC Ratio')
dev.off()
}
## --- SNV Frequency --- ##
if(plot)
{
png(filename=paste(out_dir, "SNV-freq.png", sep="/"), bg="white", height=600, width=600)
colors = rainbow(length(snv_freq))
plot(0:100, type='n', ylim=c(0,10), ann=F)
for (chr in names(snv_freq))
{
if (chr == "chrY") next
i = which(names(snv_freq) == chr)
lines(log(snv_freq[[chr]]), col=colors[i], type='l')
}
legend("topright", legend=names(snv_freq), fill=colors)
title(main="SNV Frequency Across Genome", ylab="log(SNV count freq)", xlab="Number 1kb fragments")
dev.off()
}
## --- K tests --- ##
if (plot)
{
app=F
# png(filename=paste(out_dir, "var-ktests.png", sep="/"), bg="white", height=900, width=900)
par(mfrow=c(5,5))
for (chr in names(var_tests))
{
test = var_tests[[chr]]
plot(test$norm, col='blue', type='o', ann=F, xaxt='n')
lines(test$pois, col='red', type='o')
axis(1, at=1:nrow(test), labels=rownames(test))
title(main=chr, ylab="p-value")
write.table(paste("###", chr, "###"), file=paste(out_dir, "VariationTests.txt", sep="/"), row.names=F, col.names=F, quote=F, append=app)
write.table(round(test, 4), file=paste(out_dir,"VariationTests.txt", sep="/"), quote=F, col.names=NA, sep="\t", append=T)
app=T
}
# cheap way to get a legend
plot(0:2, axes=F, ann=F, type='n')
legend("topleft", legend=colnames(var_tests[[chr]]), col=c('blue', 'red'), fill=c('blue', 'red'), bty='n')
# dev.off()
}
lapply(var_tests, function(x){
})
colors=rainbow(length(var_tests))
plot(var_tests$chr1$norm, ann=F,ylim=c(0,1), main="FF", type='n')
lapply(var_tests, function(x, colors){
#lines(x[['norm']], type='p', col=colors)
# lines(x[['norm']])
})
| /R/variations.R | permissive | hjanime/IGCSA | R | false | false | 5,152 | r | #
# Script outputs the variation frequency plots. Only useful for pretty pictures really.
#
rm(list=ls())
setwd("~/workspace/IGCSA/R")
source("lib/gc_functions.R")
source("lib/varplots.R")
#args = commandArgs(trailingOnly = TRUE)
#ens_dir = args[1]
#gc_dir = args[2]
data_dir = "~/Data/VariationNormal"
plot=F
ens_dir = paste(data_dir, "Frequencies/1000/Ensembl", sep="/")
var_files = list.files(path=ens_dir, pattern=".txt")
gc_dir = paste(data_dir, "GC/1000", sep="/")
gc_files = list.files(path=gc_dir,pattern=".txt")
out_dir = "~/Analysis/Normal"
rnames = c('GC', 'Total.Vars', 'Bins')
seq_ratios = data.frame()
seq_ratios = as.data.frame(matrix(nrow=0, ncol=length(rnames)))
colnames(seq_ratios) = rnames
snv_freq = list()
var_tests = list()
for (i in 1:length(var_files))
{
file = var_files[i]
chr = sub(".txt", "", file)
print(chr)
chrdir = paste(out_dir, chr, sep="/")
if (!file.exists(chrdir)) dir.create(chrdir)
# Variation & gc files
gc_f = paste(gc_dir, paste(chr, "-gc.txt", sep=""), sep="/")
var_f = paste(ens_dir, file, sep="/")
# Data with NA removed
data = load.data(gc_f, var_f)
var_d = data$vars
gc_d = data$gc
all = cbind(var_d, gc_d)
last_var = which(colnames(all) == 'GC')-1
var_tests[[chr]] = ks.test.all(var_d[1:last_var], chr)
snv_freq[[chr]] = table(all[,'SNV'])
if (!nrow(var_d) == nrow(gc_d)) { stop("Bins don't match, quitting") }
# Collect info about each chromosome
#seq_ratios[chr, 'Chr'] = chr
seq_ratios[chr, 'Length'] = nrow(gc_d)
seq_ratios[chr, 'GC'] = round((sum(gc_d$GC)/sum(gc_d$BPs)), digits=2)
seq_ratios[chr, 'Total.Vars'] = sum(var_d[,1:ncol(var_d)], na.rm=T)
seq_ratios[chr, 'Bins'] = nrow(var_d)
if (plot)
{
plot_file = paste(chrdir, chr, sep="/")
png(filename=paste(plot_file, "-overview.png", sep=""), bg="white", height=900, width=600)
par(mfrow=c(2,1))
plotVariations(all, chr, last_var)
plotRatios(gc_d, chr)
dev.off()
plotVariationsSep(var_d, chr, chrdir)
}
all$Total.Vars = rowSums(all[,1:7], na.rm=T)
freq = table(all$Total.Vars)
if (names(freq[ freq == max(freq) ]) != '0') { warning( paste(chr, "variation max frequency is not 0") ) }
# so ignoring 0 which is the start of the poisson
seq_ratios[chr, 'Max1'] = names(freq[freq == max(freq)])
freq = freq[2:length(freq)]
seq_ratios[chr, 'Max2'] = names(freq[freq == max(freq)])
rm(var_d, gc_d, all)
}
## --- GC PLOT --- ##
seq_ratios = seq_ratios[order(seq_ratios$GC),]
if (plot)
{
png(filename=paste(out_dir,"/GC-content", ".png", sep=""), bg="white", height=600, width=1200)
plot(seq_ratios$GC, ann=F, xaxt='n', type='n', ylim=c(0.01, max(seq_ratios$GC)))
lines(seq_ratios$GC, ann=F, xaxt='n', col='blue', type='o', pch=19)
axis(1, at=1:nrow(seq_ratios), lab=rownames(seq_ratios))
text(seq_ratios$GC, col='blue', pos=1, labels=seq_ratios$GC )
#text(1:nrow(seq_ratios), rep(0.25, nrow(seq_ratios)), labels=rownames(seq_ratios))
lines((seq_ratios$Total.Vars/seq_ratios$Length)/100, type='o', col='red', pch=19)
text((seq_ratios$Total.Vars/seq_ratios$Length)/100, col='red', pos=1, labels=round(seq_ratios$Total.Vars/seq_ratios$Length, 2))
legend("topleft", legend=c('GC Ratio', 'Variations/Length'), fill=c('blue', 'red') )
title(main='GC Content Per Chromosome', sub="Subplotted Variations per bin", ylab='GC Ratio')
dev.off()
}
## --- SNV Frequency --- ##
if(plot)
{
png(filename=paste(out_dir, "SNV-freq.png", sep="/"), bg="white", height=600, width=600)
colors = rainbow(length(snv_freq))
plot(0:100, type='n', ylim=c(0,10), ann=F)
for (chr in names(snv_freq))
{
if (chr == "chrY") next
i = which(names(snv_freq) == chr)
lines(log(snv_freq[[chr]]), col=colors[i], type='l')
}
legend("topright", legend=names(snv_freq), fill=colors)
title(main="SNV Frequency Across Genome", ylab="log(SNV count freq)", xlab="Number 1kb fragments")
dev.off()
}
## --- K tests --- ##
if (plot)
{
app=F
# png(filename=paste(out_dir, "var-ktests.png", sep="/"), bg="white", height=900, width=900)
par(mfrow=c(5,5))
for (chr in names(var_tests))
{
test = var_tests[[chr]]
plot(test$norm, col='blue', type='o', ann=F, xaxt='n')
lines(test$pois, col='red', type='o')
axis(1, at=1:nrow(test), labels=rownames(test))
title(main=chr, ylab="p-value")
write.table(paste("###", chr, "###"), file=paste(out_dir, "VariationTests.txt", sep="/"), row.names=F, col.names=F, quote=F, append=app)
write.table(round(test, 4), file=paste(out_dir,"VariationTests.txt", sep="/"), quote=F, col.names=NA, sep="\t", append=T)
app=T
}
# cheap way to get a legend
plot(0:2, axes=F, ann=F, type='n')
legend("topleft", legend=colnames(var_tests[[chr]]), col=c('blue', 'red'), fill=c('blue', 'red'), bty='n')
# dev.off()
}
lapply(var_tests, function(x){
})
colors=rainbow(length(var_tests))
plot(var_tests$chr1$norm, ann=F,ylim=c(0,1), main="FF", type='n')
lapply(var_tests, function(x, colors){
#lines(x[['norm']], type='p', col=colors)
# lines(x[['norm']])
})
|
plotReachAftereffects <- function(target='inline') {
if (target == 'svg') {
svglite(file='doc/fig/Fig8.svg', width=4, height=4, system_fonts=list(sans='Arial'))
}
styles <- getStyle()
par(mfrow=c(1,1), mar=c(4,4,2,0.1))
ylims=c(-.2*max(styles$rotation),max(styles$rotation)+(.2*max(styles$rotation)))
plot(c(0.5,2.5),c(0,0),type='l',lty=2,col=rgb(.5,.5,.5),main='reach aftereffects',xlim=c(0.5,2.5),ylim=ylims,bty='n',
xaxt='n',yaxt='n',xlab='strategy use',ylab='reach deviation [°]')
for (groupno in c(1:length(styles$group))) {
group <- styles$group[groupno]
reachaftereffects <- read.csv(sprintf('data/%s_reachaftereffects.csv',group), stringsAsFactors=FALSE)
meanExc <- mean(reachaftereffects$exclusive)
meanInc <- mean(reachaftereffects$inclusive)
coord.x <- c(1,1,2,2)
coord.y <- c(t.interval(reachaftereffects$exclusive),rev(t.interval(reachaftereffects$inclusive)))
polygon(coord.x, coord.y, col=as.character(styles$color_trans[groupno]), border=NA)
}
for (groupno in c(1:length(styles$group))) {
group <- styles$group[groupno]
offset <- (groupno - ((length(styles$group) - 1) / 2)) * .035
reachaftereffects <- read.csv(sprintf('data/%s_reachaftereffects.csv',group), stringsAsFactors=FALSE)
meanExc <- mean(reachaftereffects$exclusive)
meanInc <- mean(reachaftereffects$inclusive)
lines(c(1,2),c(meanExc,meanInc),col=as.character(styles$color_solid[groupno]),lty=styles$linestyle[groupno],lw=2)
}
axis(side=1, at=c(1,2), labels=c('without strategy','with strategy'),cex.axis=0.85)
if (max(styles$rotation) == 30) {
axis(side=2, at=c(0,10,20,30),cex.axis=0.85)
}
# legend(0.5,max(styles$rotation)*(7/6),styles$label,col=as.character(styles$color),lty=styles$linestyle,bty='n',cex=0.85)
legend(1.2,13,styles$label,col=as.character(styles$color_solid),lw=2,lty=styles$linestyle,bty='n',cex=0.85)
if (target == 'svg') {
dev.off()
}
}
getRAE4ANOVA <- function(styles) {
agegroup <- c()
instructed <- c()
participant <- c()
strategy <- c()
reachdeviation <- c()
# keeping count of unique participants:
startingID <- 0
for (groupno in c(1:length(styles$group))) {
group <- styles$group[groupno]
# set up some basic descriptors that apply to the group:
if (substr(group, 1, 5) == 'aging') {
thisagegroup <- 'older'
} else {
thisagegroup <- 'younger'
}
thisinstructed <- grepl('explicit', group)
df <- read.csv(sprintf('data/%s_reachaftereffects.csv',group),stringsAsFactors=F)
# we need to know the number of participants to replicate some values:
N <- dim(df)[1]
for (thisstrategy in c('exclusive','inclusive')) {
agegroup <- c(agegroup, rep(thisagegroup, N))
instructed <- c(instructed, rep(thisinstructed, N))
participant <- c(participant, c(startingID : (startingID + N - 1)))
strategy <- c(strategy, rep(thisstrategy, N))
reachdeviation <- c(reachdeviation, df[,thisstrategy])
}
startingID <- startingID + N
}
# put it in a data frame:
RAEaov <- data.frame(agegroup, instructed, participant, strategy, reachdeviation)
# set relevant columns as factors:
RAEaov$agegroup <- as.factor(RAEaov$agegroup)
RAEaov$instructed <- as.factor(RAEaov$instructed)
RAEaov$strategy <- as.factor(RAEaov$strategy)
return(RAEaov)
}
RAE.ANOVA <- function() {
styles <- getStyle()
RAE4aov <- getRAE4ANOVA(styles)
#learning curve ANOVA's
# for ez, case ID should be a factor:
RAE4aov$participant <- as.factor(RAE4aov$participant)
print(ezANOVA(data=RAE4aov, wid=participant, dv=reachdeviation, within=strategy, between=c(instructed, agegroup),type=3))
}
NoCursorANOVA <- function() {
styles <- getStyle()
NC4aov <- getNoCursors4ANOVA(styles)
NC4aov$participant <- as.factor(NC4aov$participant)
print(ezANOVA(data=NC4aov, wid=participant, dv=reachdeviation, within=training, between=c(instructed, agegroup),type=3))
}
getNoCursors4ANOVA <- function(styles) {
# placeholder for data frame:
NC4aov <- NA
# loop through groups to collect their data:
for (groupno in c(1:length(styles$group))) {
group <- styles$group[groupno]
# set up some basic descriptors that apply to the group:
if (substr(group, 1, 5) == 'aging') {
thisagegroup <- 'older'
} else {
thisagegroup <- 'younger'
}
thisinstructed <- grepl('explicit', group)
df <- read.csv(sprintf('data/%s_nocursors.csv',group),stringsAsFactors=F)
AL.NC <- df[,c('participant','aligned')]
colnames(AL.NC)[2] <- 'reachdeviation'
AL.NC$training <- 'aligned'
RO.NC <- df[,c('participant','exclusive')]
colnames(RO.NC)[2] <- 'reachdeviation'
RO.NC$training <- 'rotated'
df <- rbind(AL.NC, RO.NC)
df$agegroup <- thisagegroup
df$instructed <- thisinstructed
if (is.data.frame(NC4aov)) {
NC4aov <- rbind(NC4aov, df)
} else {
NC4aov <- df
}
}
NC4aov$instructed <- as.factor(NC4aov$instructed)
NC4aov$agegroup <- as.factor(NC4aov$agegroup)
NC4aov$training <- as.factor(NC4aov$training)
return(NC4aov)
}
| /reach_aftereffects.R | no_license | chadv9999/The-effect-of-age-on-visuomotor-learning-processes | R | false | false | 5,572 | r |
plotReachAftereffects <- function(target='inline') {
if (target == 'svg') {
svglite(file='doc/fig/Fig8.svg', width=4, height=4, system_fonts=list(sans='Arial'))
}
styles <- getStyle()
par(mfrow=c(1,1), mar=c(4,4,2,0.1))
ylims=c(-.2*max(styles$rotation),max(styles$rotation)+(.2*max(styles$rotation)))
plot(c(0.5,2.5),c(0,0),type='l',lty=2,col=rgb(.5,.5,.5),main='reach aftereffects',xlim=c(0.5,2.5),ylim=ylims,bty='n',
xaxt='n',yaxt='n',xlab='strategy use',ylab='reach deviation [°]')
for (groupno in c(1:length(styles$group))) {
group <- styles$group[groupno]
reachaftereffects <- read.csv(sprintf('data/%s_reachaftereffects.csv',group), stringsAsFactors=FALSE)
meanExc <- mean(reachaftereffects$exclusive)
meanInc <- mean(reachaftereffects$inclusive)
coord.x <- c(1,1,2,2)
coord.y <- c(t.interval(reachaftereffects$exclusive),rev(t.interval(reachaftereffects$inclusive)))
polygon(coord.x, coord.y, col=as.character(styles$color_trans[groupno]), border=NA)
}
for (groupno in c(1:length(styles$group))) {
group <- styles$group[groupno]
offset <- (groupno - ((length(styles$group) - 1) / 2)) * .035
reachaftereffects <- read.csv(sprintf('data/%s_reachaftereffects.csv',group), stringsAsFactors=FALSE)
meanExc <- mean(reachaftereffects$exclusive)
meanInc <- mean(reachaftereffects$inclusive)
lines(c(1,2),c(meanExc,meanInc),col=as.character(styles$color_solid[groupno]),lty=styles$linestyle[groupno],lw=2)
}
axis(side=1, at=c(1,2), labels=c('without strategy','with strategy'),cex.axis=0.85)
if (max(styles$rotation) == 30) {
axis(side=2, at=c(0,10,20,30),cex.axis=0.85)
}
# legend(0.5,max(styles$rotation)*(7/6),styles$label,col=as.character(styles$color),lty=styles$linestyle,bty='n',cex=0.85)
legend(1.2,13,styles$label,col=as.character(styles$color_solid),lw=2,lty=styles$linestyle,bty='n',cex=0.85)
if (target == 'svg') {
dev.off()
}
}
getRAE4ANOVA <- function(styles) {
agegroup <- c()
instructed <- c()
participant <- c()
strategy <- c()
reachdeviation <- c()
# keeping count of unique participants:
startingID <- 0
for (groupno in c(1:length(styles$group))) {
group <- styles$group[groupno]
# set up some basic descriptors that apply to the group:
if (substr(group, 1, 5) == 'aging') {
thisagegroup <- 'older'
} else {
thisagegroup <- 'younger'
}
thisinstructed <- grepl('explicit', group)
df <- read.csv(sprintf('data/%s_reachaftereffects.csv',group),stringsAsFactors=F)
# we need to know the number of participants to replicate some values:
N <- dim(df)[1]
for (thisstrategy in c('exclusive','inclusive')) {
agegroup <- c(agegroup, rep(thisagegroup, N))
instructed <- c(instructed, rep(thisinstructed, N))
participant <- c(participant, c(startingID : (startingID + N - 1)))
strategy <- c(strategy, rep(thisstrategy, N))
reachdeviation <- c(reachdeviation, df[,thisstrategy])
}
startingID <- startingID + N
}
# put it in a data frame:
RAEaov <- data.frame(agegroup, instructed, participant, strategy, reachdeviation)
# set relevant columns as factors:
RAEaov$agegroup <- as.factor(RAEaov$agegroup)
RAEaov$instructed <- as.factor(RAEaov$instructed)
RAEaov$strategy <- as.factor(RAEaov$strategy)
return(RAEaov)
}
RAE.ANOVA <- function() {
styles <- getStyle()
RAE4aov <- getRAE4ANOVA(styles)
#learning curve ANOVA's
# for ez, case ID should be a factor:
RAE4aov$participant <- as.factor(RAE4aov$participant)
print(ezANOVA(data=RAE4aov, wid=participant, dv=reachdeviation, within=strategy, between=c(instructed, agegroup),type=3))
}
NoCursorANOVA <- function() {
styles <- getStyle()
NC4aov <- getNoCursors4ANOVA(styles)
NC4aov$participant <- as.factor(NC4aov$participant)
print(ezANOVA(data=NC4aov, wid=participant, dv=reachdeviation, within=training, between=c(instructed, agegroup),type=3))
}
getNoCursors4ANOVA <- function(styles) {
# placeholder for data frame:
NC4aov <- NA
# loop through groups to collect their data:
for (groupno in c(1:length(styles$group))) {
group <- styles$group[groupno]
# set up some basic descriptors that apply to the group:
if (substr(group, 1, 5) == 'aging') {
thisagegroup <- 'older'
} else {
thisagegroup <- 'younger'
}
thisinstructed <- grepl('explicit', group)
df <- read.csv(sprintf('data/%s_nocursors.csv',group),stringsAsFactors=F)
AL.NC <- df[,c('participant','aligned')]
colnames(AL.NC)[2] <- 'reachdeviation'
AL.NC$training <- 'aligned'
RO.NC <- df[,c('participant','exclusive')]
colnames(RO.NC)[2] <- 'reachdeviation'
RO.NC$training <- 'rotated'
df <- rbind(AL.NC, RO.NC)
df$agegroup <- thisagegroup
df$instructed <- thisinstructed
if (is.data.frame(NC4aov)) {
NC4aov <- rbind(NC4aov, df)
} else {
NC4aov <- df
}
}
NC4aov$instructed <- as.factor(NC4aov$instructed)
NC4aov$agegroup <- as.factor(NC4aov$agegroup)
NC4aov$training <- as.factor(NC4aov$training)
return(NC4aov)
}
|
\name{findInterval_right.closed}
\alias{findInterval_right.closed}
\title{Find Interval Numbers or Indices}
\usage{
findInterval_right.closed(x, vec,
leftmost.closed = FALSE, all.inside = FALSE)
}
\arguments{
\item{x}{numeric.}
\item{vec}{numeric, sorted (weakly) increasingly, of
length \code{N}, say.}
\item{leftmost.closed}{logical; if true, the leftmost
interval,\code{vec[1] .. vec[2]} is treated as
\emph{closed}, see below.}
\item{all.inside}{logical; if true, the returned indices
are coerced into \code{1,\dots,N-1}, i.e., \code{0} is
mapped to \code{1} and \code{N} to \code{N-1}.}
}
\value{
vector of length \code{length(x)} with values in
\code{0:N} (and \code{NA}) where \code{N <- length(vec)},
or values coerced to \code{1:(N-1)} if and only if
\code{all.inside = TRUE} (equivalently coercing all x
values \emph{inside} the intervals). Note that
\code{\link{NA}}s are propagated from \code{x}, and
\code{\link{Inf}} values are allowed in both \code{x} and
\code{vec}.
}
\description{
Given a vector of non-decreasing breakpoints in
\code{vec}, find the interval containing each element of
\code{x}; i.e., if \code{i <-
findInterval_right.closed(x,v)}, for each index \code{j}
in \code{x} \eqn{v_{i_j} < x_j \le v_{i_j + 1}}{v[i[j]] <
x[j] \le v[i[j] + 1]} where \eqn{v_0 := -\infty}{v[0] :=
- Inf}, \eqn{v_{N+1} := +\infty}{v[N+1] := + Inf}, and
\code{N <- length(v)}. At the two boundaries, the
returned index may differ by 1, depending on the optional
arguments \code{leftmost.closed} and \code{all.inside}
}
\details{
The function \code{findInterval_right.closed} finds the
index of one vector \code{x} in another, \code{vec},
where the latter must be non-decreasing. Where this is
trivial, equivalent to \code{apply( outer(x, vec, ">"),
1, sum)},
When \code{leftmost.closed = TRUE}, the result for
\code{x[j] = vec[1]} (\eqn{ = \min vec}{ = min(vec)}), is
\code{1} as for all other values in the first interval.
}
\seealso{
\code{\link{findInterval}}
}
| /man/findInterval_right.closed.Rd | no_license | kuremon/lazyr | R | false | false | 2,115 | rd | \name{findInterval_right.closed}
\alias{findInterval_right.closed}
\title{Find Interval Numbers or Indices}
\usage{
findInterval_right.closed(x, vec,
leftmost.closed = FALSE, all.inside = FALSE)
}
\arguments{
\item{x}{numeric.}
\item{vec}{numeric, sorted (weakly) increasingly, of
length \code{N}, say.}
\item{leftmost.closed}{logical; if true, the leftmost
interval,\code{vec[1] .. vec[2]} is treated as
\emph{closed}, see below.}
\item{all.inside}{logical; if true, the returned indices
are coerced into \code{1,\dots,N-1}, i.e., \code{0} is
mapped to \code{1} and \code{N} to \code{N-1}.}
}
\value{
vector of length \code{length(x)} with values in
\code{0:N} (and \code{NA}) where \code{N <- length(vec)},
or values coerced to \code{1:(N-1)} if and only if
\code{all.inside = TRUE} (equivalently coercing all x
values \emph{inside} the intervals). Note that
\code{\link{NA}}s are propagated from \code{x}, and
\code{\link{Inf}} values are allowed in both \code{x} and
\code{vec}.
}
\description{
Given a vector of non-decreasing breakpoints in
\code{vec}, find the interval containing each element of
\code{x}; i.e., if \code{i <-
findInterval_right.closed(x,v)}, for each index \code{j}
in \code{x} \eqn{v_{i_j} < x_j \le v_{i_j + 1}}{v[i[j]] <
x[j] \le v[i[j] + 1]} where \eqn{v_0 := -\infty}{v[0] :=
- Inf}, \eqn{v_{N+1} := +\infty}{v[N+1] := + Inf}, and
\code{N <- length(v)}. At the two boundaries, the
returned index may differ by 1, depending on the optional
arguments \code{leftmost.closed} and \code{all.inside}
}
\details{
The function \code{findInterval_right.closed} finds the
index of one vector \code{x} in another, \code{vec},
where the latter must be non-decreasing. Where this is
trivial, equivalent to \code{apply( outer(x, vec, ">"),
1, sum)},
When \code{leftmost.closed = TRUE}, the result for
\code{x[j] = vec[1]} (\eqn{ = \min vec}{ = min(vec)}), is
\code{1} as for all other values in the first interval.
}
\seealso{
\code{\link{findInterval}}
}
|
#' Non-Gaussian maximum likelihood identification of SVAR models
#'
#' Given an estimated VAR model, this function applies identification by means of a non-Gaussian likelihood for the structural impact matrix B of the corresponding SVAR model
#' \deqn{y_t=c_t+A_1 y_{t-1}+...+A_p y_{t-p}+u_t =c_t+A_1 y_{t-1}+...+A_p y_{t-p}+B \epsilon_t.}
#' Matrix B corresponds to the unique decomposition of the least squares covariance matrix \eqn{\Sigma_u=B B'} if the vector of structural shocks \eqn{\epsilon_t} contains at most one Gaussian shock (Comon, 94).
#' A likelihood function of independent t-distributed structural shocks \eqn{\epsilon_t=B^{-1}u_t} is maximized with respect to the entries of B and the degrees of freedom of the t-distribution (Lanne et al., 2017).
#'
#' @param x An object of class 'vars', 'vec2var', 'nlVar'. Estimated VAR object
#' @param stage3 Logical. If stage3="TRUE", the VAR parameters are estimated via non-gaussian maximum likelihood (computationally demanding)
#' @param restriction_matrix Matrix. A matrix containing presupposed entries for matrix B, NA if no restriction is imposed (entries to be estimated)
#' @return A list of class "svars" with elements
#' \item{B}{Estimated structural impact matrix B, i.e. unique decomposition of the covariance matrix of reduced form errors}
#' \item{sigma}{Estimated scale of the standardized matrix B_stand, i.e. \eqn{B=B_stand*diag(\sigma_1,...,\sigma_K)}}
#' \item{sigma_SE}{Standard errors of the scale}
#' \item{df}{Estimated degrees of freedom}
#' \item{df_SE}{Standard errors of the degrees of freedom}
#' \item{Fish}{Observed Fisher information matrix}
#' \item{A_hat}{Estimated VAR parameter via ML}
#' \item{B_stand}{Estimated standardized structural impact matrix}
#' \item{B_stand_SE}{Standard errors of standardized matrix B_stand}
#' \item{Lik}{Function value of likelihood}
#' \item{method}{Method applied for identification}
#' \item{n}{Number of observations}
#' \item{type}{Type of the VAR model, e.g. 'const'}
#' \item{y}{Data matrix}
#' \item{p}{Number of lags}
#' \item{K}{Dimension of the VAR}
#' \item{restrictions}{Number of specified restrictions}
#' \item{restriction_matrix}{Specified restriction matrix}
#' \item{stage3}{Logical, whether Stage 3 is performed}
#'
#'@references Lanne, M., Meitz, M., Saikkonen, P., 2017. Identification and estimation of non-Gaussian structural vector autoregressions. J. Econometrics 196 (2), 288-304.\cr
#'Comon, P., 1994. Independent component analysis, A new concept?, Signal Processing, 36, 287-314
#'
#' @seealso For alternative identification approaches see \code{\link{id.st}}, \code{\link{id.cvm}}, \code{\link{id.dc}} or \code{\link{id.cv}}
#'
#' @examples
#' \donttest{
#' # data contains quarterly observations from 1965Q1 to 2008Q3
#' # x = output gap
#' # pi = inflation
#' # i = interest rates
#' set.seed(23211)
#' v1 <- vars::VAR(USA, lag.max = 10, ic = "AIC" )
#' x1 <- id.ngml(v1)
#' summary(x1)
#'
#' # switching columns according to sign pattern
#' x1$B <- x1$B[,c(3,2,1)]
#' x1$B[,3] <- x1$B[,3]*(-1)
#'
#' # impulse response analysis
#' i1 <- irf(x1, n.ahead = 30)
#' plot(i1, scales = 'free_y')
#' }
#' @importFrom tsDyn VARrep
#' @export
#------------------------------------------------------#
## Identification via non-Gaussian maximum likelihood ##
#------------------------------------------------------#
id.ngml <- function(x, stage3 = FALSE, restriction_matrix = NULL){
u <- Tob <- p <- k <- residY <- coef_x <- yOut <- type <- y <- NULL
get_var_objects(x)
# calculating the covariance matrix
Sigma_hat <- crossprod(residY)/(Tob-1-k*p)
if(!is.null(restriction_matrix)){
resultUnrestricted <- identifyNGML(x = x, coef_x = coef_x, Sigma_hat = Sigma_hat, u = u, k = k, p = p, Tob = Tob, yOut = yOut, type = type,
stage3 = stage3, restriction_matrix = NULL, y = y)
result <- identifyNGML(x = x, coef_x = coef_x, Sigma_hat = Sigma_hat, u = u, k = k, p = p, Tob = Tob, yOut = yOut, type = type,
stage3 = stage3, restriction_matrix = restriction_matrix, y = y)
lRatioTestStatistic = 2 * (resultUnrestricted$Lik - result$Lik)
pValue = round(1 - pchisq(lRatioTestStatistic, result$restrictions), 4)
lRatioTest <- data.frame(testStatistic = lRatioTestStatistic, p.value = pValue)
rownames(lRatioTest) <- ""
colnames(lRatioTest) <- c("Test statistic", "p-value")
result$lRatioTest <- lRatioTest
}else{
restriction_matrix <- NULL
result <- identifyNGML(x = x, coef_x = coef_x, Sigma_hat = Sigma_hat, u = u, k = k, p = p, Tob = Tob, yOut = yOut, type = type,
stage3 = stage3, restriction_matrix = restriction_matrix, y = y)
}
class(result) <- "svars"
return(result)
}
| /R/id.ngml.R | permissive | AlexanderRitz/svars | R | false | false | 4,784 | r | #' Non-Gaussian maximum likelihood identification of SVAR models
#'
#' Given an estimated VAR model, this function applies identification by means of a non-Gaussian likelihood for the structural impact matrix B of the corresponding SVAR model
#' \deqn{y_t=c_t+A_1 y_{t-1}+...+A_p y_{t-p}+u_t =c_t+A_1 y_{t-1}+...+A_p y_{t-p}+B \epsilon_t.}
#' Matrix B corresponds to the unique decomposition of the least squares covariance matrix \eqn{\Sigma_u=B B'} if the vector of structural shocks \eqn{\epsilon_t} contains at most one Gaussian shock (Comon, 94).
#' A likelihood function of independent t-distributed structural shocks \eqn{\epsilon_t=B^{-1}u_t} is maximized with respect to the entries of B and the degrees of freedom of the t-distribution (Lanne et al., 2017).
#'
#' @param x An object of class 'vars', 'vec2var', 'nlVar'. Estimated VAR object
#' @param stage3 Logical. If stage3="TRUE", the VAR parameters are estimated via non-gaussian maximum likelihood (computationally demanding)
#' @param restriction_matrix Matrix. A matrix containing presupposed entries for matrix B, NA if no restriction is imposed (entries to be estimated)
#' @return A list of class "svars" with elements
#' \item{B}{Estimated structural impact matrix B, i.e. unique decomposition of the covariance matrix of reduced form errors}
#' \item{sigma}{Estimated scale of the standardized matrix B_stand, i.e. \eqn{B=B_stand*diag(\sigma_1,...,\sigma_K)}}
#' \item{sigma_SE}{Standard errors of the scale}
#' \item{df}{Estimated degrees of freedom}
#' \item{df_SE}{Standard errors of the degrees of freedom}
#' \item{Fish}{Observed Fisher information matrix}
#' \item{A_hat}{Estimated VAR parameter via ML}
#' \item{B_stand}{Estimated standardized structural impact matrix}
#' \item{B_stand_SE}{Standard errors of standardized matrix B_stand}
#' \item{Lik}{Function value of likelihood}
#' \item{method}{Method applied for identification}
#' \item{n}{Number of observations}
#' \item{type}{Type of the VAR model, e.g. 'const'}
#' \item{y}{Data matrix}
#' \item{p}{Number of lags}
#' \item{K}{Dimension of the VAR}
#' \item{restrictions}{Number of specified restrictions}
#' \item{restriction_matrix}{Specified restriction matrix}
#' \item{stage3}{Logical, whether Stage 3 is performed}
#'
#'@references Lanne, M., Meitz, M., Saikkonen, P., 2017. Identification and estimation of non-Gaussian structural vector autoregressions. J. Econometrics 196 (2), 288-304.\cr
#'Comon, P., 1994. Independent component analysis, A new concept?, Signal Processing, 36, 287-314
#'
#' @seealso For alternative identification approaches see \code{\link{id.st}}, \code{\link{id.cvm}}, \code{\link{id.dc}} or \code{\link{id.cv}}
#'
#' @examples
#' \donttest{
#' # data contains quarterly observations from 1965Q1 to 2008Q3
#' # x = output gap
#' # pi = inflation
#' # i = interest rates
#' set.seed(23211)
#' v1 <- vars::VAR(USA, lag.max = 10, ic = "AIC" )
#' x1 <- id.ngml(v1)
#' summary(x1)
#'
#' # switching columns according to sign pattern
#' x1$B <- x1$B[,c(3,2,1)]
#' x1$B[,3] <- x1$B[,3]*(-1)
#'
#' # impulse response analysis
#' i1 <- irf(x1, n.ahead = 30)
#' plot(i1, scales = 'free_y')
#' }
#' @importFrom tsDyn VARrep
#' @export
#------------------------------------------------------#
## Identification via non-Gaussian maximum likelihood ##
#------------------------------------------------------#
id.ngml <- function(x, stage3 = FALSE, restriction_matrix = NULL){
u <- Tob <- p <- k <- residY <- coef_x <- yOut <- type <- y <- NULL
get_var_objects(x)
# calculating the covariance matrix
Sigma_hat <- crossprod(residY)/(Tob-1-k*p)
if(!is.null(restriction_matrix)){
resultUnrestricted <- identifyNGML(x = x, coef_x = coef_x, Sigma_hat = Sigma_hat, u = u, k = k, p = p, Tob = Tob, yOut = yOut, type = type,
stage3 = stage3, restriction_matrix = NULL, y = y)
result <- identifyNGML(x = x, coef_x = coef_x, Sigma_hat = Sigma_hat, u = u, k = k, p = p, Tob = Tob, yOut = yOut, type = type,
stage3 = stage3, restriction_matrix = restriction_matrix, y = y)
lRatioTestStatistic = 2 * (resultUnrestricted$Lik - result$Lik)
pValue = round(1 - pchisq(lRatioTestStatistic, result$restrictions), 4)
lRatioTest <- data.frame(testStatistic = lRatioTestStatistic, p.value = pValue)
rownames(lRatioTest) <- ""
colnames(lRatioTest) <- c("Test statistic", "p-value")
result$lRatioTest <- lRatioTest
}else{
restriction_matrix <- NULL
result <- identifyNGML(x = x, coef_x = coef_x, Sigma_hat = Sigma_hat, u = u, k = k, p = p, Tob = Tob, yOut = yOut, type = type,
stage3 = stage3, restriction_matrix = restriction_matrix, y = y)
}
class(result) <- "svars"
return(result)
}
|
context("ecr main function")
setUpControlObject = function(n.population,
n.offspring,
survival.strategy = "plus",
n.elite = 1L,
n.mating.pool = round(n.population / 2),
max.iter = 60L) {
control = setupECRControl(
n.population = n.population,
n.offspring = n.offspring,
survival.strategy = survival.strategy,
n.elite = n.elite,
representation = "float",
monitor = NULL,
stopping.conditions = list(setupMaximumIterationsTerminator(max.iter = max.iter))
)
control = setupEvolutionaryOperators(
control,
survival.selector = setupGreedySelector()
)
}
test_that("ecr works with simple soo function", {
obj.fun = smoof::makeSphereFunction(dimensions = 2L)
for (n.population in c(15, 30)) {
for (n.offspring in c(15, 30)) {
for (survival.strategy in c("plus", "comma")) {
if (survival.strategy == "comma") {
n.offspring = n.population
}
control = setUpControlObject(n.population, n.offspring, survival.strategy)
res = doTheEvolution(obj.fun, control = control)
expect_output(print(control), regexp = "CONTROL OBJECT")
# check result
expect_false(is.null(res))
expect_true(res$best.value < 0.1,
info = sprintf("Did not approximate optimal value with params mu: %i, lambda: %i, strategy: %s",
n.population, n.offspring, survival.strategy))
expect_true(all(res$best.param < 0.1),
info = sprintf("Did not approximate optimal params with params mu: %i, lambda: %i, strategy: %s",
n.population, n.offspring, survival.strategy))
expect_output(print(res), regexp = "EA applied")
expect_true(getGenerations(res) > 0L)
expect_true(getEvaluations(res) > 0L)
}
}
}
})
test_that("ecr works for maximization", {
obj.fun = makeSingleObjectiveFunction(
name = "maximize me",
fn = function(x) -sum(x^2),
par.set = makeNumericParamSet("x", len = 1L, lower = -10, upper = 10),
minimize = FALSE # we want to maximize here
)
control = setupECRControl(
n.population = 10L,
n.offspring = 10L,
survival.strategy = "plus",
stopping.conditions = list(setupMaximumIterationsTerminator(max.iter = 50L)),
monitor = NULL,
representation = "float"
)
res = doTheEvolution(obj.fun, control = control)
expect_true(abs(res$best.value - 0) < 0.05)
})
test_that("ecr works on binary representations", {
n.params = 10L
max.iter = 50L
obj.fun = makeOneMinFunction(dimensions = n.params)
for (n.population in c(10, 15)) {
for (n.offspring in c(10, 15)) {
for (mutator in c(setupBitFlipMutator())) {
control = setupECRControl(
n.population = n.population,
n.offspring = n.offspring,
survival.strategy = "plus",
stopping.conditions = list(setupMaximumIterationsTerminator(max.iter = max.iter)),
monitor = NULL,
representation = "binary",
)
control = setupEvolutionaryOperators(
control,
mutator = mutator
)
res = doTheEvolution(obj.fun, control = control)
# check results
expect_false(is.null(res))
expect_equal(res$best.value, 0,
info = sprintf("Did not find OneMin minimum with params mu: %i, lambda: %i, strategy: %s, mutator: %s",
n.population, n.offspring, "plus", getOperatorName(mutator)))
expect_true(all(res$best.param == 0),
info = sprintf("Did not find OneMin minimum with params mu: %i, lambda: %i, strategy: %s, mutator: %s",
n.population, n.offspring, "plus", getOperatorName(mutator)))
}
}
}
})
test_that("ecr works with additional arguments", {
obj.fun = makeSingleObjectiveFunction(
fn = function(x, shift = 100L) {
sum(x^2) + shift
},
par.set = makeNumericParamSet("x", lower = -10, upper = 10, len = 1L)
)
control = setupECRControl(
n.population = 10L,
n.offspring = 5L,
representation = "float",
survival.strategy = "plus",
monitor = NULL,
stopping.conditions = list(setupMaximumIterationsTerminator(max.iter = 50L))
)
res = doTheEvolution(obj.fun, control, more.args = list(shift = 1000))
expect_true(res$best.value < 1000.1)
expect_true(res$best.param < 0.1)
})
test_that("ecr works on permutation genomes", {
# defs
n.params = 5L
max.iter = 50L
# objective
obj.fun = makeSingleObjectiveFunction(
fn = function(x) {
CI = 0
for (i in seq(length(x)-1)) {
CI = CI + sum(x[1] > x[-1])
x = x[-1]
}
return(CI)
},
par.set = makeParamSet(
makeIntegerVectorParam(len = n.params, id = "x", lower = 1, upper = n.params)
),
name = "Sorting"
)
control = setupECRControl(
n.population = 5L,
n.offspring = 5L,
representation = "permutation",
survival.strategy = "plus",
monitor = NULL,
stopping.conditions = list(setupMaximumIterationsTerminator(max.iter = 50L))
)
# check it for a selection of mutators for permutations
for (mutatorGenerator in c(setupSwapMutator, setupInversionMutator, setupInsertionMutator)) {
for (recombinatorGenerator in c(setupNullRecombinator, setupPMXRecombinator)) {
control = setupEvolutionaryOperators(
control,
mutator = mutatorGenerator(),
recombinator = recombinatorGenerator()
)
res = doTheEvolution(obj.fun, control = control)
# check results
expect_false(is.null(res))
expect_equal(res$best.value, 0,
info = sprintf("Did not find correct sorting with mutator '%s' and recombinator '%s'.",
getOperatorName(control$mutator),
getOperatorName(control$recombinator)
)
)
}
}
})
test_that("ecr finds optimum if is is located on the edge of the search space", {
fn = makeSingleObjectiveFunction(
name = "linear",
par.set = makeNumericParamSet("x", len = 2L, lower = 0, upper = 1),
# optimum is in (0, 0)
fn = function(x) sum(x)
)
# initialize control object
control = setupECRControl(
n.population = 30L,
n.offspring = 10L,
survival.strategy = "plus",
representation = "float",
monitor = NULL,
stopping.conditions = setupTerminators(max.iter = 50L)
)
control = setupEvolutionaryOperators(control, mutator = setupGaussMutator(sdev = 0.05))
res = doTheEvolution(fn, control = control)
expect_true(res$best.value < 0.1)
expect_true(all(res$best.param < 0.1))
})
test_that("ecr can handle initial populations", {
fn = makeSphereFunction(2L)
initial.population = list(c(1, 1), c(2, 2), c(3, 3))
control = setupECRControl(
n.population = 3L,
n.offspring = 1L,
representation = "float",
monitor = NULL,
stopping.conditions = setupTerminators(max.iter = 1L)
)
# stop if initial population is to large
expect_error(doTheEvolution(fn, control, c(initial.population, c(2, 2.5))), "exceeds", ignore.case = TRUE)
})
test_that("ecr(...) shortcut function works as expected for floating point representation", {
fn = function(x) {
sum(x^2)
}
res = ecr(fn, n.dim = 2L, lower = c(-5, -5), upper = c(5, 5),
representation = "float", n.population = 20L, n.offspring = 10L, max.iter = 30L,
monitor = NULL)
expect_true(abs(res$best.value) < 0.01)
expect_true(all(res$best.param < 0.01))
})
test_that("ecr(...) shortcut function works as expected for binary point representation", {
fn = function(x) {
sum(x)
}
res = ecr(fn, n.dim = 15L, n.bits = 15L,
representation = "binary", n.population = 20L, n.offspring = 10L, max.iter = 30L,
monitor = NULL)
expect_true(res$best.value == 0)
expect_true(all(res$best.param == 0))
})
| /tests/testthat/test_ecr.R | no_license | lucasmpavelski/ecr | R | false | false | 7,736 | r | context("ecr main function")
setUpControlObject = function(n.population,
n.offspring,
survival.strategy = "plus",
n.elite = 1L,
n.mating.pool = round(n.population / 2),
max.iter = 60L) {
control = setupECRControl(
n.population = n.population,
n.offspring = n.offspring,
survival.strategy = survival.strategy,
n.elite = n.elite,
representation = "float",
monitor = NULL,
stopping.conditions = list(setupMaximumIterationsTerminator(max.iter = max.iter))
)
control = setupEvolutionaryOperators(
control,
survival.selector = setupGreedySelector()
)
}
test_that("ecr works with simple soo function", {
obj.fun = smoof::makeSphereFunction(dimensions = 2L)
for (n.population in c(15, 30)) {
for (n.offspring in c(15, 30)) {
for (survival.strategy in c("plus", "comma")) {
if (survival.strategy == "comma") {
n.offspring = n.population
}
control = setUpControlObject(n.population, n.offspring, survival.strategy)
res = doTheEvolution(obj.fun, control = control)
expect_output(print(control), regexp = "CONTROL OBJECT")
# check result
expect_false(is.null(res))
expect_true(res$best.value < 0.1,
info = sprintf("Did not approximate optimal value with params mu: %i, lambda: %i, strategy: %s",
n.population, n.offspring, survival.strategy))
expect_true(all(res$best.param < 0.1),
info = sprintf("Did not approximate optimal params with params mu: %i, lambda: %i, strategy: %s",
n.population, n.offspring, survival.strategy))
expect_output(print(res), regexp = "EA applied")
expect_true(getGenerations(res) > 0L)
expect_true(getEvaluations(res) > 0L)
}
}
}
})
test_that("ecr works for maximization", {
obj.fun = makeSingleObjectiveFunction(
name = "maximize me",
fn = function(x) -sum(x^2),
par.set = makeNumericParamSet("x", len = 1L, lower = -10, upper = 10),
minimize = FALSE # we want to maximize here
)
control = setupECRControl(
n.population = 10L,
n.offspring = 10L,
survival.strategy = "plus",
stopping.conditions = list(setupMaximumIterationsTerminator(max.iter = 50L)),
monitor = NULL,
representation = "float"
)
res = doTheEvolution(obj.fun, control = control)
expect_true(abs(res$best.value - 0) < 0.05)
})
test_that("ecr works on binary representations", {
n.params = 10L
max.iter = 50L
obj.fun = makeOneMinFunction(dimensions = n.params)
for (n.population in c(10, 15)) {
for (n.offspring in c(10, 15)) {
for (mutator in c(setupBitFlipMutator())) {
control = setupECRControl(
n.population = n.population,
n.offspring = n.offspring,
survival.strategy = "plus",
stopping.conditions = list(setupMaximumIterationsTerminator(max.iter = max.iter)),
monitor = NULL,
representation = "binary",
)
control = setupEvolutionaryOperators(
control,
mutator = mutator
)
res = doTheEvolution(obj.fun, control = control)
# check results
expect_false(is.null(res))
expect_equal(res$best.value, 0,
info = sprintf("Did not find OneMin minimum with params mu: %i, lambda: %i, strategy: %s, mutator: %s",
n.population, n.offspring, "plus", getOperatorName(mutator)))
expect_true(all(res$best.param == 0),
info = sprintf("Did not find OneMin minimum with params mu: %i, lambda: %i, strategy: %s, mutator: %s",
n.population, n.offspring, "plus", getOperatorName(mutator)))
}
}
}
})
test_that("ecr works with additional arguments", {
obj.fun = makeSingleObjectiveFunction(
fn = function(x, shift = 100L) {
sum(x^2) + shift
},
par.set = makeNumericParamSet("x", lower = -10, upper = 10, len = 1L)
)
control = setupECRControl(
n.population = 10L,
n.offspring = 5L,
representation = "float",
survival.strategy = "plus",
monitor = NULL,
stopping.conditions = list(setupMaximumIterationsTerminator(max.iter = 50L))
)
res = doTheEvolution(obj.fun, control, more.args = list(shift = 1000))
expect_true(res$best.value < 1000.1)
expect_true(res$best.param < 0.1)
})
test_that("ecr works on permutation genomes", {
# defs
n.params = 5L
max.iter = 50L
# objective
obj.fun = makeSingleObjectiveFunction(
fn = function(x) {
CI = 0
for (i in seq(length(x)-1)) {
CI = CI + sum(x[1] > x[-1])
x = x[-1]
}
return(CI)
},
par.set = makeParamSet(
makeIntegerVectorParam(len = n.params, id = "x", lower = 1, upper = n.params)
),
name = "Sorting"
)
control = setupECRControl(
n.population = 5L,
n.offspring = 5L,
representation = "permutation",
survival.strategy = "plus",
monitor = NULL,
stopping.conditions = list(setupMaximumIterationsTerminator(max.iter = 50L))
)
# check it for a selection of mutators for permutations
for (mutatorGenerator in c(setupSwapMutator, setupInversionMutator, setupInsertionMutator)) {
for (recombinatorGenerator in c(setupNullRecombinator, setupPMXRecombinator)) {
control = setupEvolutionaryOperators(
control,
mutator = mutatorGenerator(),
recombinator = recombinatorGenerator()
)
res = doTheEvolution(obj.fun, control = control)
# check results
expect_false(is.null(res))
expect_equal(res$best.value, 0,
info = sprintf("Did not find correct sorting with mutator '%s' and recombinator '%s'.",
getOperatorName(control$mutator),
getOperatorName(control$recombinator)
)
)
}
}
})
test_that("ecr finds optimum if is is located on the edge of the search space", {
fn = makeSingleObjectiveFunction(
name = "linear",
par.set = makeNumericParamSet("x", len = 2L, lower = 0, upper = 1),
# optimum is in (0, 0)
fn = function(x) sum(x)
)
# initialize control object
control = setupECRControl(
n.population = 30L,
n.offspring = 10L,
survival.strategy = "plus",
representation = "float",
monitor = NULL,
stopping.conditions = setupTerminators(max.iter = 50L)
)
control = setupEvolutionaryOperators(control, mutator = setupGaussMutator(sdev = 0.05))
res = doTheEvolution(fn, control = control)
expect_true(res$best.value < 0.1)
expect_true(all(res$best.param < 0.1))
})
test_that("ecr can handle initial populations", {
fn = makeSphereFunction(2L)
initial.population = list(c(1, 1), c(2, 2), c(3, 3))
control = setupECRControl(
n.population = 3L,
n.offspring = 1L,
representation = "float",
monitor = NULL,
stopping.conditions = setupTerminators(max.iter = 1L)
)
# stop if initial population is to large
expect_error(doTheEvolution(fn, control, c(initial.population, c(2, 2.5))), "exceeds", ignore.case = TRUE)
})
test_that("ecr(...) shortcut function works as expected for floating point representation", {
fn = function(x) {
sum(x^2)
}
res = ecr(fn, n.dim = 2L, lower = c(-5, -5), upper = c(5, 5),
representation = "float", n.population = 20L, n.offspring = 10L, max.iter = 30L,
monitor = NULL)
expect_true(abs(res$best.value) < 0.01)
expect_true(all(res$best.param < 0.01))
})
test_that("ecr(...) shortcut function works as expected for binary point representation", {
fn = function(x) {
sum(x)
}
res = ecr(fn, n.dim = 15L, n.bits = 15L,
representation = "binary", n.population = 20L, n.offspring = 10L, max.iter = 30L,
monitor = NULL)
expect_true(res$best.value == 0)
expect_true(all(res$best.param == 0))
})
|
# Regresion Avanzada: Examen Final
# Modelo Dinamico
#-Defining data-
data_dinam <- list("n"=n,"y"=c(datos$WTI[1:(n-3)],rep(NA,m)), "x"=select(datos, -WTI))
#-Defining inits-
inits<-function(){list(alpha=rep(0,n),beta=matrix(0,k,n),tau.y=1,tau.a=1,tau.b=rep(1,k),yp=rep(1,n))}
#-Selecting parameters to monitor-
parameters<-c("alpha","beta","tau.y","tau.a","tau.b","yp")
#-Running code in JAGS-
set.seed(semilla)
mod_dinam.sim<-jags(data_dinam,inits,parameters,model.file="Modelos/mod_dinamico1.txt",
n.iter=niter,n.chains=nchains,n.burnin=nburning,n.thin=1,
progress.bar='none')
#-Monitoring the chains-
#JAGS Output
out_dinam<-mod_dinam.sim$BUGSoutput$sims.list
# # Chain for coefficients
# z<-out_dinam$beta[,1,1] # simulaciones de beta 1 al tiempo 1
# par(mfrow=c(2,2))
# plot(z,type="l")
# plot(cumsum(z)/(1:length(z)),type="l")
# hist(z,freq=FALSE)
# acf(z)
#-Coefficient Summary-
# Simulations
out_dinam.sum<-mod_dinam.sim$BUGSoutput$summary
# Modes
modas_dinam_alpha<-apply(out_dinam$alpha,2,getmode)
modas_dinam_beta<-unlist(lapply(1:n,function(x){apply(out_dinam$beta[,,x],2,getmode)}))
probs_dinam_beta<-unlist(lapply(1:n,function(x){apply(out_dinam$beta[,,x],2,prob)}))
# Summary
out_dinam.sum.t_alpha<-cbind(out_dinam.sum[grep("alpha",rownames(out_dinam.sum)),c(1,3,5,7)],modas_dinam_alpha)
out_dinam.sum.t_alpha<-cbind(out_dinam.sum.t_alpha,apply(out_dinam$alpha,2,prob))
out_dinam.sum.t_alpha<-out_dinam.sum.t_alpha[,c(1,3,5,2,4,6)]
colnames(out_dinam.sum.t_alpha)<-c("Media","Mediana","Moda","2.5%","97.5%","Prob.")
rownames(out_dinam.sum.t_alpha)<-paste('Intercepto t=',1:n,sep='_')
out_dinam.sum.t_beta<-cbind(out_dinam.sum[grep("beta",rownames(out_dinam.sum)),c(1,3,5,7)],modas_dinam_beta,probs_dinam_beta)
# out_dinam.sum.t_beta<-cbind(out_dinam.sum.t_beta,apply(out_dinam$beta,2,prob))
out_dinam.sum.t_beta<-out_dinam.sum.t_beta[,c(1,3,5,2,4,6)]
colnames(out_dinam.sum.t_beta)<-c("Media","Mediana","Moda","2.5%","97.5%","Prob.")
rownames(out_dinam.sum.t_beta)<-paste(rep(c('JPM Dollar Ind.','VIX Ind','Prod. OPEP','Dem. OPEP','T-Bill 10YR','T-Bill 1YR'),n),rep(1:n,each=k),sep=' t=')
#-DIC-
out_dinam.dic<-mod_dinam.sim$BUGSoutput$DIC
#-Predictions-
out_dinam.yp<-out_dinam.sum[grep("yp",rownames(out_dinam.sum)),]
#-alpha-
out_dinam.alpha<-out_dinam.sum[grep("alpha",rownames(out_dinam.sum)),]
#-Betas-
out_dinam.beta<-out_dinam.sum[grep("beta",rownames(out_dinam.sum)),]
#-PseudoR2-
pseudoR2_dinam<-pseudoR2(out.yp=out_dinam.yp)
| /Modelos/Modelo_Dinamico.R | no_license | AlejandraLLI/Reg_Av_Proyecto_Final | R | false | false | 2,513 | r | # Regresion Avanzada: Examen Final
# Modelo Dinamico
#-Defining data-
data_dinam <- list("n"=n,"y"=c(datos$WTI[1:(n-3)],rep(NA,m)), "x"=select(datos, -WTI))
#-Defining inits-
inits<-function(){list(alpha=rep(0,n),beta=matrix(0,k,n),tau.y=1,tau.a=1,tau.b=rep(1,k),yp=rep(1,n))}
#-Selecting parameters to monitor-
parameters<-c("alpha","beta","tau.y","tau.a","tau.b","yp")
#-Running code in JAGS-
set.seed(semilla)
mod_dinam.sim<-jags(data_dinam,inits,parameters,model.file="Modelos/mod_dinamico1.txt",
n.iter=niter,n.chains=nchains,n.burnin=nburning,n.thin=1,
progress.bar='none')
#-Monitoring the chains-
#JAGS Output
out_dinam<-mod_dinam.sim$BUGSoutput$sims.list
# # Chain for coefficients
# z<-out_dinam$beta[,1,1] # simulaciones de beta 1 al tiempo 1
# par(mfrow=c(2,2))
# plot(z,type="l")
# plot(cumsum(z)/(1:length(z)),type="l")
# hist(z,freq=FALSE)
# acf(z)
#-Coefficient Summary-
# Simulations
out_dinam.sum<-mod_dinam.sim$BUGSoutput$summary
# Modes
modas_dinam_alpha<-apply(out_dinam$alpha,2,getmode)
modas_dinam_beta<-unlist(lapply(1:n,function(x){apply(out_dinam$beta[,,x],2,getmode)}))
probs_dinam_beta<-unlist(lapply(1:n,function(x){apply(out_dinam$beta[,,x],2,prob)}))
# Summary
out_dinam.sum.t_alpha<-cbind(out_dinam.sum[grep("alpha",rownames(out_dinam.sum)),c(1,3,5,7)],modas_dinam_alpha)
out_dinam.sum.t_alpha<-cbind(out_dinam.sum.t_alpha,apply(out_dinam$alpha,2,prob))
out_dinam.sum.t_alpha<-out_dinam.sum.t_alpha[,c(1,3,5,2,4,6)]
colnames(out_dinam.sum.t_alpha)<-c("Media","Mediana","Moda","2.5%","97.5%","Prob.")
rownames(out_dinam.sum.t_alpha)<-paste('Intercepto t=',1:n,sep='_')
out_dinam.sum.t_beta<-cbind(out_dinam.sum[grep("beta",rownames(out_dinam.sum)),c(1,3,5,7)],modas_dinam_beta,probs_dinam_beta)
# out_dinam.sum.t_beta<-cbind(out_dinam.sum.t_beta,apply(out_dinam$beta,2,prob))
out_dinam.sum.t_beta<-out_dinam.sum.t_beta[,c(1,3,5,2,4,6)]
colnames(out_dinam.sum.t_beta)<-c("Media","Mediana","Moda","2.5%","97.5%","Prob.")
rownames(out_dinam.sum.t_beta)<-paste(rep(c('JPM Dollar Ind.','VIX Ind','Prod. OPEP','Dem. OPEP','T-Bill 10YR','T-Bill 1YR'),n),rep(1:n,each=k),sep=' t=')
#-DIC-
out_dinam.dic<-mod_dinam.sim$BUGSoutput$DIC
#-Predictions-
out_dinam.yp<-out_dinam.sum[grep("yp",rownames(out_dinam.sum)),]
#-alpha-
out_dinam.alpha<-out_dinam.sum[grep("alpha",rownames(out_dinam.sum)),]
#-Betas-
out_dinam.beta<-out_dinam.sum[grep("beta",rownames(out_dinam.sum)),]
#-PseudoR2-
pseudoR2_dinam<-pseudoR2(out.yp=out_dinam.yp)
|
library(rstan)
library(mvtnorm)
library(tidyverse)
library(invgamma)
library(reshape2)
library(magrittr)
library(cowplot)
library(ggsci)
library(shinystan)
library(KScorrect)
library(Matrix)
library(matrixcalc)
library(GGally)
library(Sim.DiffProc)
library(cusp)
library(microbiome)
library(smoother)
library(earlywarnings)
library(ggsignif)
# library(tvReg)
source("OU.functions.R")
chains <- 1
iter <- 500
options(mc.cores = parallel::detectCores()) | /main.R | no_license | velait/OUP | R | false | false | 456 | r | library(rstan)
library(mvtnorm)
library(tidyverse)
library(invgamma)
library(reshape2)
library(magrittr)
library(cowplot)
library(ggsci)
library(shinystan)
library(KScorrect)
library(Matrix)
library(matrixcalc)
library(GGally)
library(Sim.DiffProc)
library(cusp)
library(microbiome)
library(smoother)
library(earlywarnings)
library(ggsignif)
# library(tvReg)
source("OU.functions.R")
chains <- 1
iter <- 500
options(mc.cores = parallel::detectCores()) |
# computes X values for up to three way interactions and returns as data frame
interactions <- function(X, n, var_order = NULL, three_way = FALSE){
if(n > ncol(X)) {
stop("n must be less then or equal to ncol(X)")
}
if(n < 2) {
stop("n must be greater than 1")
}
if(n < 3 & three_way == TRUE) {
stop("n must be greater than 2 when looking for 3 way interaction")
}
if(!is.null(var_order)) {
X <- X[, var_order]
}
if(three_way == TRUE) {
X_int3 <- as.data.frame(matrix(0, nrow = nrow(X), ncol=choose(n, 3)))
i <- 1
for (j in 1:(n-2)) {
for (k in (j + 1):(n-1)) {
for (l in (k + 1):n) {
X_int3[, i] <- X[, j] * X[, k] * X[, l]
names(X_int3)[i] <- paste(names(X)[j], names(X)[k],
names(X)[l], sep = "x")
i <- i + 1
}
}
}
}
X_int2 <- as.data.frame(matrix(0, nrow = nrow(X), ncol=choose(n, 2)))
i <- 1
for (j in 1:(n-1)) {
for (k in (j + 1):(n)) {
X_int2[, i] <- X[, j] * X[, k]
names(X_int2)[i] <- paste(names(X)[j], names(X)[k],
sep = "x")
i <- i + 1
}
}
if(three_way == TRUE) {
cbind(X_int2, X_int3)
} else {
X_int2
}
}
# subsets original data frame to only features in
surv_data <- function(survivor_df, df) {
df[, names(df) %in% survivor_df[, 1]]
}
# scales VIM by proportion within module
vi_prop <- function(x){
vi_scaled <- x[,2]/sum(x[,2])
x <- cbind(x, vi_scaled, 1:nrow(x))
}
fuzzy_forest_int_rem <- function(feature_list, final_rf, module_membership,
WGCNA_object=NULL, survivor_list, selection_list,
initial_feature_list, initial_final_rf) {
out <- list()
out[[1]] <- feature_list
out[[2]] <- final_rf
out[[3]] <- module_membership
out[[4]] <- module_membership
out[[5]] <- survivor_list
out[[6]] <- selection_list
out[[7]] <- initial_feature_list
out[[8]] <- initial_final_rf
names(out) <- c("feature_list", "final_rf", "module_membership",
"WGCNA_object", "survivor_list", "selection_list",
"initial_feature_list", "initial_final_rf")
class(out) <- "fuzzy_forest_int_rem"
return(out)
}
# fuzzy forest with adjustments for within-module interactions
# NOTE: adjustments are highlighted below by bars of '#'
ff_int_rem <- function(X, y, Z=NULL, module_membership,
screen_params = screen_control(min_ntree=500),
select_params = select_control(min_ntree=500),
final_ntree = 5000,
num_processors=1, nodesize, test_features=NULL,
test_y=NULL,
############## added args below ############
interaction = FALSE, m = NULL, three_way = FALSE,
across = FALSE, r = r, ...) {
CLASSIFICATION <- is.factor(y)
if ( !((mode(y)=="numeric") || is.factor(y)) ) {
stop("y must be a numeric vector or factor")
}
if( (!CLASSIFICATION) && (length(unique(y)) < 5) ) {
warning("y has 5 or fewer unique values. In this case, we recommend
classification instead of regression. For classification,
y must be a factor.")
}
if(!is.data.frame(X)) {
stop("X must be a data.frame.")
}
if(!is.null(Z)) {
if (!is.data.frame(Z)) {
stop("Z must be a data.frame.")
}
}
if(CLASSIFICATION == TRUE) {
if(missing(nodesize)){
nodesize <- 1
}
}
if(CLASSIFICATION == FALSE) {
if(missing(nodesize)){
nodesize <- 5
}
}
screen_control <- screen_params
select_control <- select_params
module_list <- unique(module_membership)
if(num_processors > 1) {
#set up parallel backend
cl <- parallel::makeCluster(num_processors)
parallel::clusterCall(cl, library, package = "randomForest", character.only = TRUE)
doParallel::registerDoParallel(cl)
#close parallel backend on exit
on.exit(try(parallel::stopCluster(cl), silent=TRUE))
}
survivors <- vector('list', length(module_list))
drop_fraction <- screen_control$drop_fraction
mtry_factor <- screen_control$mtry_factor
ntree_factor <- screen_control$ntree_factor
min_ntree <- screen_control$min_ntree
keep_fraction <- screen_control$keep_fraction
if(ncol(X)*keep_fraction < select_control$number_selected){
warning(c("ncol(X)*keep_fraction < number_selected", "\n",
"number_selected will be set to floor(ncol(X)*keep_fraction)"))
select_control$number_selected <- max(floor(ncol(X)*keep_fraction), 1)
}
for (i in 1:length(module_list)) {
module <- X[, which(module_membership == module_list[i]), drop=FALSE]
num_features <- ncol(module)
#TUNING PARAMETER mtry_factor
if(CLASSIFICATION == TRUE) {
mtry <- min(ceiling(mtry_factor*sqrt(num_features)), num_features)
if(missing(nodesize)){
nodesize <- 1
}
}
if(CLASSIFICATION == FALSE) {
mtry <- min(ceiling(mtry_factor*num_features/3), num_features)
if(missing(nodesize)){
nodesize <- 5
}
}
#TUNING PARAMETER ntree_factor
ntree <- max(num_features*ntree_factor, min_ntree)
#TUNING PARAMETER keep_fraction
target <- ceiling(num_features * keep_fraction)
while (num_features >= target){
if(num_processors > 1) {
rf <- foreach(ntree = rep(ntree/num_processors, num_processors),
.combine = combine, .packages = 'randomForest') %dopar% {
randomForest(module, y, ntree = ntree, mtry = mtry,
importance = TRUE, scale = FALSE, nodesize=nodesize) }
}
if(num_processors == 1) {
rf <- randomForest::randomForest(module, y, ntree = ntree, mtry = mtry,
importance = TRUE, scale = FALSE,
nodesize = nodesize)
}
var_importance <- randomForest::importance(rf, type=1, scale=FALSE)[, 1]
var_importance <- var_importance[order(var_importance,
decreasing=TRUE)]
reduction <- ceiling(num_features*drop_fraction)
if(num_features - reduction > target) {
trimmed_varlist <- var_importance[1:(num_features - reduction)]
features <- names(trimmed_varlist)
module <- module[, which(names(module) %in% features)]
num_features <- length(features)
if(CLASSIFICATION == TRUE) {
mtry <- min(ceiling(mtry_factor*sqrt(num_features)), num_features)
}
if(CLASSIFICATION == FALSE) {
mtry <- min(ceiling(mtry_factor*num_features/3), num_features)
}
ntree <- max(num_features*ntree_factor, min_ntree)
}
else {
num_features <- target - 1
mod_varlist <- var_importance[1:target]
features <- names(var_importance)[1:target]
survivors[[i]] <- cbind(features, mod_varlist)
row.names(survivors[[i]]) <- NULL
survivors[[i]] <- as.data.frame(survivors[[i]])
survivors[[i]][, 1] <- as.character(survivors[[i]][, 1])
survivors[[i]][, 2] <- as.numeric(as.character(survivors[[i]][, 2]))
}
}
}
survivor_list <- survivors
names(survivor_list) <- module_list
survivors <- do.call('rbind', survivors)
survivors <- as.data.frame(survivors, stringsAsFactors = FALSE)
survivors[, 2] <- as.numeric(survivors[, 2])
names(survivors) <- c("featureID", "Permutation VIM")
X_surv <- X[, names(X) %in% survivors[, 1]]
########################### Adjustments ###############################
if(interaction == TRUE & across == FALSE){
X_surv_mod <- lapply(survivor_list, surv_data, df = X)
X_surv_int <- do.call(cbind, lapply(X_surv_mod, interactions,
n = m, three_way = three_way))
X_surv <- cbind(X_surv, X_surv_int)
}
# testing scaling
# if(interaction == TRUE & across == TRUE){
# surv_list_prop <- lapply(survivor_list, vi_prop)
# all_surv_list_prop <- do.call(rbind, surv_list_prop)
# var_order <- all_surv_list_prop[order(all_surv_list_prop[, 4], -all_surv_list_prop[, 3]), 1]
# X_surv_int <- interactions(X_surv, n = m, var_order = var_order,
# three_way = three_way)
# X_surv <- cbind(X_surv, X_surv_int)
# }
# original across mthods
if(interaction == TRUE & across == TRUE){
var_order <- survivors[order(survivors[, 2], decreasing = TRUE), 1]
X_surv_int <- interactions(X_surv, n = m, var_order = var_order,
three_way = three_way)
X_surv <- cbind(X_surv, X_surv_int)
}
if(!is.null(Z)) {
X_surv <- cbind(X_surv, Z, stringsAsFactors=FALSE)
}
#######################################################################
############### for loop added to remove top r features ###############
if (r < 0) {
stop("r must be greater than or equal to 0")
} else if (r > 0) {
l <- 2
} else {
l <- 1
}
final_lists <- list()
final_rfs <- list()
for (i in 1:l) {
if(i == 2){
if (r > nrow(final_list)) {
stop("r > nrow(final_list)")
}
if (r >= ncol(X_surv)) {
"not enough remaining features to remove r of them"
}
X_surv <- X_surv[, !(names(X_surv) %in% final_list[(1:r), 1])]
}
select_args <- list(X_surv, y, num_processors, nodesize)
select_args <- c(select_args, select_control)
names(select_args)[1:4] <- c("X", "y", "num_processors", "nodesize")
select_results <- do.call("select_RF", select_args)
final_list <- select_results[[1]][, 1, drop=F]
selection_list <- select_results[[2]]
row.names(final_list) <- NULL
colnames(final_list) <- c("feature_name")
final_list <- as.data.frame(final_list, stringsAsFactors=FALSE)
#VIMs from last tree in recursive feature elimination should be
#replaced.
final_list <- cbind(final_list,
matrix(rep(".", 2*dim(final_list)[1]), ncol=2),
stringsAsFactors=F)
##################### changed X to X_surv #############################
final_X <- X_surv[, names(X_surv) %in% final_list[, 1], drop=FALSE]
#Some selected features may be from Z
if(!is.null(Z)) {
final_X <- cbind(final_X, Z[, names(Z) %in% final_list[, 1], drop=FALSE],
stringsAsFactors=FALSE)
}
current_p <- dim(final_X)[2]
if(CLASSIFICATION == TRUE) {
final_mtry <- min(ceiling(select_control$mtry_factor*sqrt(current_p)),
current_p)
}
if(CLASSIFICATION == FALSE) {
final_mtry <- min(ceiling(select_control$mtry_factor*current_p/3),
current_p)
}
if(!is.null(test_features)) {
test_features <- test_features[, which(names(test_features) %in%
names(final_X))]
}
final_rf <- randomForest::randomForest(x=final_X, y=y, mtry=final_mtry, ntree=final_ntree,
importance=TRUE, nodesize=nodesize,
xtest=test_features, ytest=test_y)
final_importance <- randomForest::importance(final_rf, type=1, scale = F)
final_list[, 1] <- row.names(final_importance)
final_list[, 2] <- final_importance[, 1]
#Now it's very important to associate the right module to the right
#feature. The ordering must be correct. This is made trickier by
#by the fact that when Z is not null, there exist elements in the
#the VIM list that aren't in X.
#select_X is a vector with selected features in order of X.
select_X <- names(X)[which(names(X) %in% final_list[, 1])]
#select_mods is a vector with associated module memberships in order of X.
select_mods <- module_membership[which(names(X) %in% final_list[, 1])]
#select_order is a vector with selected features given according to
#the order returned by randomForest.
select_order <- final_list[, 1][which(final_list[,1] %in% names(X))]
#select_mods is a vector with module memberships reordered according
#to the order returned by randomForest
select_mods <- select_mods[match(select_order, select_X)]
#Here is where the module membership is entered into the table.
#Note that for elements of Z, the entry will be "."
final_list[, 3][final_list[, 1] %in% names(X)] <- select_mods
names(final_list)[2:3] <- c("variable_importance", "module_membership")
#Reorder vims so that they are in decreasing order.
final_list <- final_list[order(final_list[, 2], decreasing=T), ]
final_lists[[i]] <- final_list
final_rfs[[i]] <- final_rf
}
module_membership <- as.data.frame(cbind(names(X), module_membership),
stringsAsFactors=FALSE)
names(module_membership) <- c("feature_name", "module")
if (r == 0) {
initial_final_list <- final_list <- final_lists[[1]]
initial_final_rf <- final_rf <- final_rfs[[1]]
} else {
initial_final_list <- final_lists[[1]]
final_list <- final_lists[[2]]
initial_final_rf <- final_rfs[[1]]
final_rf <- final_rfs[[2]]
}
out <- fuzzy_forest_int_rem(final_list, final_rf, module_membership,
survivor_list=survivor_list,
selection_list=selection_list,
initial_feature_list = initial_final_list,
initial_final_rf = initial_final_rf)
return(out)
}
# wff function but adjust to include arguments for interactions
wff_int_rem <- function(X, y, Z=NULL, WGCNA_params=WGCNA_control(power=6),
screen_params=screen_control(min_ntree=500),
select_params=select_control(min_ntree=500),
final_ntree=5000, num_processors=1, nodesize,
test_features=NULL, test_y=NULL,
############# interaction, m, three_way#################
interaction = FALSE, m = NULL, three_way = FALSE,
across = FALSE, r = 0, ...) {
if (!requireNamespace("WGCNA", quietly = T)) {
stop("WGCNA must be installed.")
}
else{
if(class(X) != "data.frame"){
stop("X must be a data.frame")
}
if((!is.null(Z)) && (class(Z) != "data.frame")){
stop("Z must be a data.frame")
}
numeric_test <- sapply(X, is.numeric)
if (sum(numeric_test) != dim(X)[2]) {
stop("To carry out WGCNA, all columns of X must be numeric.")
}
CLASSIFICATION <- is.factor(y)
if(CLASSIFICATION == TRUE) {
if(missing(nodesize)){
nodesize <- 1
}
}
if(CLASSIFICATION == FALSE) {
if(missing(nodesize)){
nodesize <- 5
}
}
WGCNA_control <- WGCNA_params
screen_control <- screen_params
select_control <- select_params
WGCNA_args <- list(X, WGCNA_control$power)
WGCNA_args <- c(WGCNA_args, WGCNA_control$extra_args)
names(WGCNA_args) <- c("datExpr", "power", names(WGCNA_control$extra_args))
bwise <- do.call("blockwiseModules", WGCNA_args)
module_membership <- bwise$colors
screen_drop_fraction <- screen_control$drop_fraction
screen_keep_fraction <- screen_control$keep_fraction
screen_mtry_factor <- screen_control$mtry_factor
screen_ntree_factor <- screen_control$ntree_factor
screen_min_ntree <- screen_control$min_ntree
out <- ff_int_rem(X, y, Z, module_membership,
screen_control, select_control, final_ntree,
num_processors, nodesize=nodesize,
test_features=test_features, test_y=test_y,
########## added args for everything below ############
interaction = interaction, m = m,
three_way = three_way,
across = across,
r = r)
out$WGCNA_object <- bwise
return(out)
}
} | /fuzzy_forest_int_rem.R | no_license | raguilar2/fuzzier_forest | R | false | false | 16,159 | r | # computes X values for up to three way interactions and returns as data frame
interactions <- function(X, n, var_order = NULL, three_way = FALSE){
if(n > ncol(X)) {
stop("n must be less then or equal to ncol(X)")
}
if(n < 2) {
stop("n must be greater than 1")
}
if(n < 3 & three_way == TRUE) {
stop("n must be greater than 2 when looking for 3 way interaction")
}
if(!is.null(var_order)) {
X <- X[, var_order]
}
if(three_way == TRUE) {
X_int3 <- as.data.frame(matrix(0, nrow = nrow(X), ncol=choose(n, 3)))
i <- 1
for (j in 1:(n-2)) {
for (k in (j + 1):(n-1)) {
for (l in (k + 1):n) {
X_int3[, i] <- X[, j] * X[, k] * X[, l]
names(X_int3)[i] <- paste(names(X)[j], names(X)[k],
names(X)[l], sep = "x")
i <- i + 1
}
}
}
}
X_int2 <- as.data.frame(matrix(0, nrow = nrow(X), ncol=choose(n, 2)))
i <- 1
for (j in 1:(n-1)) {
for (k in (j + 1):(n)) {
X_int2[, i] <- X[, j] * X[, k]
names(X_int2)[i] <- paste(names(X)[j], names(X)[k],
sep = "x")
i <- i + 1
}
}
if(three_way == TRUE) {
cbind(X_int2, X_int3)
} else {
X_int2
}
}
# subsets original data frame to only features in
surv_data <- function(survivor_df, df) {
df[, names(df) %in% survivor_df[, 1]]
}
# scales VIM by proportion within module
vi_prop <- function(x){
vi_scaled <- x[,2]/sum(x[,2])
x <- cbind(x, vi_scaled, 1:nrow(x))
}
fuzzy_forest_int_rem <- function(feature_list, final_rf, module_membership,
WGCNA_object=NULL, survivor_list, selection_list,
initial_feature_list, initial_final_rf) {
out <- list()
out[[1]] <- feature_list
out[[2]] <- final_rf
out[[3]] <- module_membership
out[[4]] <- module_membership
out[[5]] <- survivor_list
out[[6]] <- selection_list
out[[7]] <- initial_feature_list
out[[8]] <- initial_final_rf
names(out) <- c("feature_list", "final_rf", "module_membership",
"WGCNA_object", "survivor_list", "selection_list",
"initial_feature_list", "initial_final_rf")
class(out) <- "fuzzy_forest_int_rem"
return(out)
}
# fuzzy forest with adjustments for within-module interactions
# NOTE: adjustments are highlighted below by bars of '#'
ff_int_rem <- function(X, y, Z=NULL, module_membership,
screen_params = screen_control(min_ntree=500),
select_params = select_control(min_ntree=500),
final_ntree = 5000,
num_processors=1, nodesize, test_features=NULL,
test_y=NULL,
############## added args below ############
interaction = FALSE, m = NULL, three_way = FALSE,
across = FALSE, r = r, ...) {
CLASSIFICATION <- is.factor(y)
if ( !((mode(y)=="numeric") || is.factor(y)) ) {
stop("y must be a numeric vector or factor")
}
if( (!CLASSIFICATION) && (length(unique(y)) < 5) ) {
warning("y has 5 or fewer unique values. In this case, we recommend
classification instead of regression. For classification,
y must be a factor.")
}
if(!is.data.frame(X)) {
stop("X must be a data.frame.")
}
if(!is.null(Z)) {
if (!is.data.frame(Z)) {
stop("Z must be a data.frame.")
}
}
if(CLASSIFICATION == TRUE) {
if(missing(nodesize)){
nodesize <- 1
}
}
if(CLASSIFICATION == FALSE) {
if(missing(nodesize)){
nodesize <- 5
}
}
screen_control <- screen_params
select_control <- select_params
module_list <- unique(module_membership)
if(num_processors > 1) {
#set up parallel backend
cl <- parallel::makeCluster(num_processors)
parallel::clusterCall(cl, library, package = "randomForest", character.only = TRUE)
doParallel::registerDoParallel(cl)
#close parallel backend on exit
on.exit(try(parallel::stopCluster(cl), silent=TRUE))
}
survivors <- vector('list', length(module_list))
drop_fraction <- screen_control$drop_fraction
mtry_factor <- screen_control$mtry_factor
ntree_factor <- screen_control$ntree_factor
min_ntree <- screen_control$min_ntree
keep_fraction <- screen_control$keep_fraction
if(ncol(X)*keep_fraction < select_control$number_selected){
warning(c("ncol(X)*keep_fraction < number_selected", "\n",
"number_selected will be set to floor(ncol(X)*keep_fraction)"))
select_control$number_selected <- max(floor(ncol(X)*keep_fraction), 1)
}
for (i in 1:length(module_list)) {
module <- X[, which(module_membership == module_list[i]), drop=FALSE]
num_features <- ncol(module)
#TUNING PARAMETER mtry_factor
if(CLASSIFICATION == TRUE) {
mtry <- min(ceiling(mtry_factor*sqrt(num_features)), num_features)
if(missing(nodesize)){
nodesize <- 1
}
}
if(CLASSIFICATION == FALSE) {
mtry <- min(ceiling(mtry_factor*num_features/3), num_features)
if(missing(nodesize)){
nodesize <- 5
}
}
#TUNING PARAMETER ntree_factor
ntree <- max(num_features*ntree_factor, min_ntree)
#TUNING PARAMETER keep_fraction
target <- ceiling(num_features * keep_fraction)
while (num_features >= target){
if(num_processors > 1) {
rf <- foreach(ntree = rep(ntree/num_processors, num_processors),
.combine = combine, .packages = 'randomForest') %dopar% {
randomForest(module, y, ntree = ntree, mtry = mtry,
importance = TRUE, scale = FALSE, nodesize=nodesize) }
}
if(num_processors == 1) {
rf <- randomForest::randomForest(module, y, ntree = ntree, mtry = mtry,
importance = TRUE, scale = FALSE,
nodesize = nodesize)
}
var_importance <- randomForest::importance(rf, type=1, scale=FALSE)[, 1]
var_importance <- var_importance[order(var_importance,
decreasing=TRUE)]
reduction <- ceiling(num_features*drop_fraction)
if(num_features - reduction > target) {
trimmed_varlist <- var_importance[1:(num_features - reduction)]
features <- names(trimmed_varlist)
module <- module[, which(names(module) %in% features)]
num_features <- length(features)
if(CLASSIFICATION == TRUE) {
mtry <- min(ceiling(mtry_factor*sqrt(num_features)), num_features)
}
if(CLASSIFICATION == FALSE) {
mtry <- min(ceiling(mtry_factor*num_features/3), num_features)
}
ntree <- max(num_features*ntree_factor, min_ntree)
}
else {
num_features <- target - 1
mod_varlist <- var_importance[1:target]
features <- names(var_importance)[1:target]
survivors[[i]] <- cbind(features, mod_varlist)
row.names(survivors[[i]]) <- NULL
survivors[[i]] <- as.data.frame(survivors[[i]])
survivors[[i]][, 1] <- as.character(survivors[[i]][, 1])
survivors[[i]][, 2] <- as.numeric(as.character(survivors[[i]][, 2]))
}
}
}
survivor_list <- survivors
names(survivor_list) <- module_list
survivors <- do.call('rbind', survivors)
survivors <- as.data.frame(survivors, stringsAsFactors = FALSE)
survivors[, 2] <- as.numeric(survivors[, 2])
names(survivors) <- c("featureID", "Permutation VIM")
X_surv <- X[, names(X) %in% survivors[, 1]]
########################### Adjustments ###############################
if(interaction == TRUE & across == FALSE){
X_surv_mod <- lapply(survivor_list, surv_data, df = X)
X_surv_int <- do.call(cbind, lapply(X_surv_mod, interactions,
n = m, three_way = three_way))
X_surv <- cbind(X_surv, X_surv_int)
}
# testing scaling
# if(interaction == TRUE & across == TRUE){
# surv_list_prop <- lapply(survivor_list, vi_prop)
# all_surv_list_prop <- do.call(rbind, surv_list_prop)
# var_order <- all_surv_list_prop[order(all_surv_list_prop[, 4], -all_surv_list_prop[, 3]), 1]
# X_surv_int <- interactions(X_surv, n = m, var_order = var_order,
# three_way = three_way)
# X_surv <- cbind(X_surv, X_surv_int)
# }
# original across mthods
if(interaction == TRUE & across == TRUE){
var_order <- survivors[order(survivors[, 2], decreasing = TRUE), 1]
X_surv_int <- interactions(X_surv, n = m, var_order = var_order,
three_way = three_way)
X_surv <- cbind(X_surv, X_surv_int)
}
if(!is.null(Z)) {
X_surv <- cbind(X_surv, Z, stringsAsFactors=FALSE)
}
#######################################################################
############### for loop added to remove top r features ###############
if (r < 0) {
stop("r must be greater than or equal to 0")
} else if (r > 0) {
l <- 2
} else {
l <- 1
}
final_lists <- list()
final_rfs <- list()
for (i in 1:l) {
if(i == 2){
if (r > nrow(final_list)) {
stop("r > nrow(final_list)")
}
if (r >= ncol(X_surv)) {
"not enough remaining features to remove r of them"
}
X_surv <- X_surv[, !(names(X_surv) %in% final_list[(1:r), 1])]
}
select_args <- list(X_surv, y, num_processors, nodesize)
select_args <- c(select_args, select_control)
names(select_args)[1:4] <- c("X", "y", "num_processors", "nodesize")
select_results <- do.call("select_RF", select_args)
final_list <- select_results[[1]][, 1, drop=F]
selection_list <- select_results[[2]]
row.names(final_list) <- NULL
colnames(final_list) <- c("feature_name")
final_list <- as.data.frame(final_list, stringsAsFactors=FALSE)
#VIMs from last tree in recursive feature elimination should be
#replaced.
final_list <- cbind(final_list,
matrix(rep(".", 2*dim(final_list)[1]), ncol=2),
stringsAsFactors=F)
##################### changed X to X_surv #############################
final_X <- X_surv[, names(X_surv) %in% final_list[, 1], drop=FALSE]
#Some selected features may be from Z
if(!is.null(Z)) {
final_X <- cbind(final_X, Z[, names(Z) %in% final_list[, 1], drop=FALSE],
stringsAsFactors=FALSE)
}
current_p <- dim(final_X)[2]
if(CLASSIFICATION == TRUE) {
final_mtry <- min(ceiling(select_control$mtry_factor*sqrt(current_p)),
current_p)
}
if(CLASSIFICATION == FALSE) {
final_mtry <- min(ceiling(select_control$mtry_factor*current_p/3),
current_p)
}
if(!is.null(test_features)) {
test_features <- test_features[, which(names(test_features) %in%
names(final_X))]
}
final_rf <- randomForest::randomForest(x=final_X, y=y, mtry=final_mtry, ntree=final_ntree,
importance=TRUE, nodesize=nodesize,
xtest=test_features, ytest=test_y)
final_importance <- randomForest::importance(final_rf, type=1, scale = F)
final_list[, 1] <- row.names(final_importance)
final_list[, 2] <- final_importance[, 1]
#Now it's very important to associate the right module to the right
#feature. The ordering must be correct. This is made trickier by
#by the fact that when Z is not null, there exist elements in the
#the VIM list that aren't in X.
#select_X is a vector with selected features in order of X.
select_X <- names(X)[which(names(X) %in% final_list[, 1])]
#select_mods is a vector with associated module memberships in order of X.
select_mods <- module_membership[which(names(X) %in% final_list[, 1])]
#select_order is a vector with selected features given according to
#the order returned by randomForest.
select_order <- final_list[, 1][which(final_list[,1] %in% names(X))]
#select_mods is a vector with module memberships reordered according
#to the order returned by randomForest
select_mods <- select_mods[match(select_order, select_X)]
#Here is where the module membership is entered into the table.
#Note that for elements of Z, the entry will be "."
final_list[, 3][final_list[, 1] %in% names(X)] <- select_mods
names(final_list)[2:3] <- c("variable_importance", "module_membership")
#Reorder vims so that they are in decreasing order.
final_list <- final_list[order(final_list[, 2], decreasing=T), ]
final_lists[[i]] <- final_list
final_rfs[[i]] <- final_rf
}
module_membership <- as.data.frame(cbind(names(X), module_membership),
stringsAsFactors=FALSE)
names(module_membership) <- c("feature_name", "module")
if (r == 0) {
initial_final_list <- final_list <- final_lists[[1]]
initial_final_rf <- final_rf <- final_rfs[[1]]
} else {
initial_final_list <- final_lists[[1]]
final_list <- final_lists[[2]]
initial_final_rf <- final_rfs[[1]]
final_rf <- final_rfs[[2]]
}
out <- fuzzy_forest_int_rem(final_list, final_rf, module_membership,
survivor_list=survivor_list,
selection_list=selection_list,
initial_feature_list = initial_final_list,
initial_final_rf = initial_final_rf)
return(out)
}
# wff function but adjust to include arguments for interactions
wff_int_rem <- function(X, y, Z=NULL, WGCNA_params=WGCNA_control(power=6),
screen_params=screen_control(min_ntree=500),
select_params=select_control(min_ntree=500),
final_ntree=5000, num_processors=1, nodesize,
test_features=NULL, test_y=NULL,
############# interaction, m, three_way#################
interaction = FALSE, m = NULL, three_way = FALSE,
across = FALSE, r = 0, ...) {
if (!requireNamespace("WGCNA", quietly = T)) {
stop("WGCNA must be installed.")
}
else{
if(class(X) != "data.frame"){
stop("X must be a data.frame")
}
if((!is.null(Z)) && (class(Z) != "data.frame")){
stop("Z must be a data.frame")
}
numeric_test <- sapply(X, is.numeric)
if (sum(numeric_test) != dim(X)[2]) {
stop("To carry out WGCNA, all columns of X must be numeric.")
}
CLASSIFICATION <- is.factor(y)
if(CLASSIFICATION == TRUE) {
if(missing(nodesize)){
nodesize <- 1
}
}
if(CLASSIFICATION == FALSE) {
if(missing(nodesize)){
nodesize <- 5
}
}
WGCNA_control <- WGCNA_params
screen_control <- screen_params
select_control <- select_params
WGCNA_args <- list(X, WGCNA_control$power)
WGCNA_args <- c(WGCNA_args, WGCNA_control$extra_args)
names(WGCNA_args) <- c("datExpr", "power", names(WGCNA_control$extra_args))
bwise <- do.call("blockwiseModules", WGCNA_args)
module_membership <- bwise$colors
screen_drop_fraction <- screen_control$drop_fraction
screen_keep_fraction <- screen_control$keep_fraction
screen_mtry_factor <- screen_control$mtry_factor
screen_ntree_factor <- screen_control$ntree_factor
screen_min_ntree <- screen_control$min_ntree
out <- ff_int_rem(X, y, Z, module_membership,
screen_control, select_control, final_ntree,
num_processors, nodesize=nodesize,
test_features=test_features, test_y=test_y,
########## added args for everything below ############
interaction = interaction, m = m,
three_way = three_way,
across = across,
r = r)
out$WGCNA_object <- bwise
return(out)
}
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/agedepth.R
\name{agedepth}
\alias{agedepth}
\title{Plot an age-depth model}
\usage{
agedepth(
set = get("info"),
BCAD = set$BCAD,
depth.unit = "cm",
age.unit = "yr",
unit = depth.unit,
d.lab = c(),
age.lab = c(),
yr.lab = age.lab,
kcal = FALSE,
acc.lab = c(),
d.min = c(),
d.max = c(),
d.by = c(),
depths = set$depths,
depths.file = FALSE,
age.min = c(),
yr.min = age.min,
age.max = c(),
yr.max = age.max,
hiatus.option = 1,
dark = c(),
prob = set$prob,
rounded = 0,
d.res = 400,
age.res = 400,
yr.res = age.res,
date.res = 100,
grey.res = 100,
rotate.axes = FALSE,
rev.age = FALSE,
rev.yr = rev.age,
rev.d = FALSE,
maxcalc = 500,
height = 15,
calheight = 1,
mirror = TRUE,
up = TRUE,
cutoff = 0.001,
plot.range = TRUE,
panels = layout(1),
range.col = grey(0.5),
range.lty = "12",
mn.col = "red",
mn.lty = "12",
med.col = NA,
med.lty = "12",
C14.col = rgb(0, 0, 1, 0.35),
C14.border = rgb(0, 0, 1, 0.5),
cal.col = rgb(0, 0.5, 0.5, 0.35),
cal.border = rgb(0, 0.5, 0.5, 0.5),
dates.col = c(),
pbmodelled.col = function(x) rgb(0, 0, 1, 0.5 * x),
pbmeasured.col = "blue",
pb.lim = c(),
hiatus.col = grey(0.5),
hiatus.lty = "12",
greyscale = grey(seq(1, 0, length = grey.res)),
slump.col = grey(0.8),
normalise.dists = TRUE,
same.heights = FALSE,
cc = set$cc,
title = set$core,
title.location = "top",
after = set$after,
bty = "l",
mar = c(3, 3, 1, 1),
mgp = c(1.5, 0.7, 0),
xaxs = "r",
yaxs = "i",
xaxt = "s",
yaxt = "s",
plot.pb = TRUE,
plot.pdf = FALSE,
dates.only = FALSE,
model.only = FALSE,
verbose = TRUE
)
}
\arguments{
\item{set}{Detailed information of the current run, stored within this session's memory as variable \code{info}.}
\item{BCAD}{The calendar scale of graphs and age output-files is in \code{cal BP} by default, but can be changed to BC/AD using \code{BCAD=TRUE}.}
\item{depth.unit}{Units of the depths. Defaults to \code{depth.unit="cm"}.}
\item{age.unit}{Units of the ages. Defaults to \code{age.unit="yr"}.}
\item{unit}{Deprecated and replaced by \code{depth.unit}.}
\item{d.lab}{The labels for the depth axis. Default \code{d.lab="Depth (cm)"}. See also \code{depth.unit}.}
\item{age.lab}{The labels for the calendar axis (default \code{age.lab="cal BP"} or \code{"BC/AD"} if \code{BCAD=TRUE}).}
\item{yr.lab}{Deprecated - use age.lab instead}
\item{kcal}{Use kcal BP. Default is \code{kcal=FALSE}.}
\item{acc.lab}{The labels for the accumulation rate plot (top middle). Default \code{d.lab="Acc. rate (yr/cm)"} (or whatever units you're using).}
\item{d.min}{Minimum depth of age-depth model (use this to extrapolate to depths higher than the top dated depth).}
\item{d.max}{Maximum depth of age-depth model (use this to extrapolate to depths below the bottom dated depth).}
\item{d.by}{Depth intervals at which ages are calculated. Default 1. Alternative depth intervals can be provided using, e.g., d.\code{by=0.5}.}
\item{depths}{By default, Bacon will calculate the ages for the depths \code{d.min} to \code{d.max} in steps of \code{d.by}. Alternative depths can be provided as, e.g., \code{depths=seq(0, 100, length=500)} or as a file, e.g., \code{depths=read.table("CoreDepths.txt"}. See also \code{depths.file}.}
\item{depths.file}{By default, Bacon will calculate the ages for the depths \code{d.min} to \code{d.max} in steps of \code{d.by}.
If \code{depths.file=TRUE}, Bacon will read a file containing the depths for which you require ages.
This file, containing the depths in a single column without a header, should be stored within \code{coredir},
and its name should start with the core's name and end with '_depths.txt'. Then specify \code{depths.file=TRUE} (default \code{FALSE}). See also \code{depths}.}
\item{age.min}{Minimum age of the age-depth plot.}
\item{yr.min}{Deprecated - use age.min instead.}
\item{age.max}{Maximum age of the age-depth plot.}
\item{yr.max}{Deprecated - use age.min instead.}
\item{hiatus.option}{How to calculate accumulation rates and ages for sections with hiatuses. Either extrapolate from surrounding sections (default, \code{hiatus.option=1}), use a w-weighted mix between the prior and posterior values for depths below the hiatus and prior information only for above the hiatus (\code{hiatus.option=2}), or use the originally calculated slopes (\code{hiatus.option=0}).}
\item{dark}{Darkness of the greyscale age-depth model. By default, the darkest grey value is calculated as 10 times the height of the lowest-precision age estimate \code{dark=c()}. Lower values will result in lighter grey but values >1 are not allowed.}
\item{prob}{Confidence interval to report (between 0 and 1, default 0.95 or 95\%).}
\item{rounded}{Rounding of years. Default is to round to single years.}
\item{d.res}{Resolution or amount of greyscale pixels to cover the depth scale of the age-model plot. Default \code{d.res=200}.}
\item{age.res}{Resolution or amount of greyscale pixels to cover the age scale of the age-model plot. Default \code{yr.res=200}.}
\item{yr.res}{Deprecated - use age.res instead.}
\item{date.res}{Date distributions are plotted using \code{date.res=100} points by default.}
\item{grey.res}{Grey-scale resolution of the age-depth model. Default \code{grey.res=100}.}
\item{rotate.axes}{By default, the age-depth model is plotted with the depths on the horizontal axis and ages on the vertical axis. This can be changed with \code{rotate.axes=TRUE}.}
\item{rev.age}{The direction of the age axis, which can be reversed using \code{rev.age=TRUE}.}
\item{rev.yr}{Deprecated - use rev.age instead.}
\item{rev.d}{The direction of the depth axis, which can be reversed using \code{rev.d=TRUE}.}
\item{maxcalc}{Number of depths to calculate ages for. If this is more than \code{maxcalc=500}, a warning will be shown that calculations will take time.}
\item{height}{The maximum heights of the distributions of the dates on the plot. See also \code{normalise.dists}.}
\item{calheight}{Multiplier for the heights of the distributions of dates on the calendar scale. Defaults to \code{calheight=1}.}
\item{mirror}{Plot the dates as 'blobs'. Set to \code{mirror=FALSE} to plot simple distributions.}
\item{up}{Directions of distributions if they are plotted non-mirrored. Default \code{up=TRUE}.}
\item{cutoff}{Avoid plotting very low probabilities of date distributions (default \code{cutoff=0.001}).}
\item{plot.range}{Whether or not to plot the curves showing the confidence ranges of the age-model. Defaults to (\code{plot.range=TRUE}).}
\item{panels}{Divide the graph panel. Defaults to 1 graph per panel, \code{panels=layout(1)}. To avoid dividing into panels, use \code{panels=c()}.}
\item{range.col}{The colour of the curves showing the confidence ranges of the age-model. Defaults to medium grey (\code{range.col=grey(0.5)}).}
\item{range.lty}{The line type of the curves showing the confidence ranges of the age-model. Defaults to \code{range.lty=12}.}
\item{mn.col}{The colour of the mean age-depth model: default \code{mn.col="red"}.}
\item{mn.lty}{The line type of the mean age-depth model. Default \code{mn.lty=12}.}
\item{med.col}{The colour of the median age-depth model: not drawn by default \code{med.col=NA}.}
\item{med.lty}{The line type of the median age-depth model. Default \code{med.lty=12}.}
\item{C14.col}{The colour of the calibrated ranges of the dates. Default is semi-transparent blue: \code{C14.col=rgb(0,0,1,.35)}.}
\item{C14.border}{The colours of the borders of calibrated 14C dates. Default is semi-transparent dark blue: \code{C14.border=rgb(0, 0, 1, 0.5)}.}
\item{cal.col}{The colour of the non-14C dates. Default is semi-transparent blue-green: \code{cal.col=rgb(0,.5,.5,.35)}.}
\item{cal.border}{The colour of the border of non-14C dates in the age-depth plot: default semi-transparent dark blue-green: \code{cal.border=rgb(0,.5,.5,.5)}. Not used by default.}
\item{dates.col}{As an alternative to colouring dates based on whether they are 14C or not, sets of dates can be coloured as, e.g., \code{dates.col=colours()[2:100]}.}
\item{pbmodelled.col}{Colour of the modelled 210Pb values. Defaults to shades of blue: \code{pbmodelled.col=function(x) rgb(0,0,1,x)}.}
\item{pbmeasured.col}{Colour of the measured 210Pb values (default \code{pbmeasured.col="blue"}). Draws rectangles of the upper and lower depths as well as the Pb values with 95 percent error ranges.}
\item{pb.lim}{Axis limits for the Pb-210 data. Calculated automatically by default (\code{pblim=c()}).}
\item{hiatus.col}{The colour of the depths of any hiatuses. Default \code{hiatus.col=grey(0.5)}.}
\item{hiatus.lty}{The line type of the depths of any hiatuses. Default \code{hiatus.lty=12}.}
\item{greyscale}{The function to produce a coloured representation of all age-models. Defaults to grey-scales: \code{greyscale=function(x) grey(1-x)}.}
\item{slump.col}{Colour of slumps. Defaults to \code{slump.col=grey(0.8)}.}
\item{normalise.dists}{By default, the distributions of more precise dates will cover less time and will thus peak higher than less precise dates. This can be avoided by specifying \code{normalise.dists=FALSE}.}
\item{same.heights}{Plot the distributions of the dates all at the same maximum height (default \code{same.height=FALSE}).}
\item{cc}{Calibration curve for 14C dates: \code{cc=1} for IntCal20 (northern hemisphere terrestrial), \code{cc=2} for Marine20 (marine), \code{cc=3} for SHCal20 (southern hemisphere terrestrial). For dates that are already on the cal BP scale use \code{cc=0}.}
\item{title}{The title of the age-depth model is plotted on the main panel. By default this is the core's name. To leave empty: \code{title=""}.}
\item{title.location}{Location of the title. Default \code{title.location='top'}.}
\item{after}{Sets a short section above and below hiatus.depths within which to calculate ages. For internal calculations - do not change.}
\item{bty}{Type of box to be drawn around plots (\code{"n"} for none, and \code{"l"} (default), \code{"7"}, \code{"c"}, \code{"u"}, or \code{"o"} for correspondingly shaped boxes).}
\item{mar}{Plot margins (amount of white space along edges of axes 1-4). Default \code{mar=c(3,3,1,1)}.}
\item{mgp}{Axis text margins (where should titles, labels and tick marks be plotted). Defaults to \code{mgp=c(1.5, .7, .0)}.}
\item{xaxs}{Extension of x-axis. By default, add some extra white-space at both extremes (\code{xaxs="r"}). See ?par for other options.}
\item{yaxs}{Extension of y-axis. By default, add no extra white-space at both extremes (\code{yaxs="i"}). See ?par for other options.}
\item{xaxt}{Whether or not to plot the x-axis. Can be used to adapt axes after a plot. See ?par for other options.}
\item{yaxt}{Whether or not to plot the y-axis. Can be used to adapt axes after a plot. See ?par for other options.}
\item{plot.pb}{Plot the 210Pb data. Defaults to \code{plot.pb=TRUE}.}
\item{plot.pdf}{Produce a pdf file of the age-depth plot.}
\item{dates.only}{By default, the age-depth model is plotted on top of the dates. This can be avoided by supplying \code{dates.only=TRUE}.}
\item{model.only}{By default, panels showing the MCMC iterations and the priors and posteriors for accumulation rate and memory are plotted above the main age-depth model panel. This can be avoided by supplying \code{model.only=TRUE}. Note however that this removes relevant information to evaluate the age-depth model, so we do recommend to present age-models together with these upper panels.}
\item{verbose}{Provide a summary of the age ranges after producing the age-depth model graph; default \code{verbose=FALSE}.}
}
\value{
A plot of the age-depth model, and estimated ages incl. confidence ranges for each depth.
}
\description{
Plot the age-depth model of a core.
}
\details{
After loading a previous run, or after running either the \link{scissors} or \link{thinner} command, plot the age-model
again using the command \code{agedepth()}.
}
\examples{
\donttest{
Plum(ssize=100, ask=FALSE, suggest=FALSE, coredir=tempfile(),
date.sample=2018.5, radon.case=0, n.supp=3)
agedepth()
}
}
\references{
Blaauw, M. and Christen, J.A., Flexible paleoclimate age-depth models using an autoregressive
gamma process. Bayesian Analysis 6 (2011), no. 3, 457--474.
\url{https://projecteuclid.org/euclid.ba/1339616472}
}
\author{
Maarten Blaauw, J. Andres Christen
}
| /fuzzedpackages/rplum/man/agedepth.Rd | no_license | akhikolla/testpackages | R | false | true | 12,529 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/agedepth.R
\name{agedepth}
\alias{agedepth}
\title{Plot an age-depth model}
\usage{
agedepth(
set = get("info"),
BCAD = set$BCAD,
depth.unit = "cm",
age.unit = "yr",
unit = depth.unit,
d.lab = c(),
age.lab = c(),
yr.lab = age.lab,
kcal = FALSE,
acc.lab = c(),
d.min = c(),
d.max = c(),
d.by = c(),
depths = set$depths,
depths.file = FALSE,
age.min = c(),
yr.min = age.min,
age.max = c(),
yr.max = age.max,
hiatus.option = 1,
dark = c(),
prob = set$prob,
rounded = 0,
d.res = 400,
age.res = 400,
yr.res = age.res,
date.res = 100,
grey.res = 100,
rotate.axes = FALSE,
rev.age = FALSE,
rev.yr = rev.age,
rev.d = FALSE,
maxcalc = 500,
height = 15,
calheight = 1,
mirror = TRUE,
up = TRUE,
cutoff = 0.001,
plot.range = TRUE,
panels = layout(1),
range.col = grey(0.5),
range.lty = "12",
mn.col = "red",
mn.lty = "12",
med.col = NA,
med.lty = "12",
C14.col = rgb(0, 0, 1, 0.35),
C14.border = rgb(0, 0, 1, 0.5),
cal.col = rgb(0, 0.5, 0.5, 0.35),
cal.border = rgb(0, 0.5, 0.5, 0.5),
dates.col = c(),
pbmodelled.col = function(x) rgb(0, 0, 1, 0.5 * x),
pbmeasured.col = "blue",
pb.lim = c(),
hiatus.col = grey(0.5),
hiatus.lty = "12",
greyscale = grey(seq(1, 0, length = grey.res)),
slump.col = grey(0.8),
normalise.dists = TRUE,
same.heights = FALSE,
cc = set$cc,
title = set$core,
title.location = "top",
after = set$after,
bty = "l",
mar = c(3, 3, 1, 1),
mgp = c(1.5, 0.7, 0),
xaxs = "r",
yaxs = "i",
xaxt = "s",
yaxt = "s",
plot.pb = TRUE,
plot.pdf = FALSE,
dates.only = FALSE,
model.only = FALSE,
verbose = TRUE
)
}
\arguments{
\item{set}{Detailed information of the current run, stored within this session's memory as variable \code{info}.}
\item{BCAD}{The calendar scale of graphs and age output-files is in \code{cal BP} by default, but can be changed to BC/AD using \code{BCAD=TRUE}.}
\item{depth.unit}{Units of the depths. Defaults to \code{depth.unit="cm"}.}
\item{age.unit}{Units of the ages. Defaults to \code{age.unit="yr"}.}
\item{unit}{Deprecated and replaced by \code{depth.unit}.}
\item{d.lab}{The labels for the depth axis. Default \code{d.lab="Depth (cm)"}. See also \code{depth.unit}.}
\item{age.lab}{The labels for the calendar axis (default \code{age.lab="cal BP"} or \code{"BC/AD"} if \code{BCAD=TRUE}).}
\item{yr.lab}{Deprecated - use age.lab instead}
\item{kcal}{Use kcal BP. Default is \code{kcal=FALSE}.}
\item{acc.lab}{The labels for the accumulation rate plot (top middle). Default \code{d.lab="Acc. rate (yr/cm)"} (or whatever units you're using).}
\item{d.min}{Minimum depth of age-depth model (use this to extrapolate to depths higher than the top dated depth).}
\item{d.max}{Maximum depth of age-depth model (use this to extrapolate to depths below the bottom dated depth).}
\item{d.by}{Depth intervals at which ages are calculated. Default 1. Alternative depth intervals can be provided using, e.g., d.\code{by=0.5}.}
\item{depths}{By default, Bacon will calculate the ages for the depths \code{d.min} to \code{d.max} in steps of \code{d.by}. Alternative depths can be provided as, e.g., \code{depths=seq(0, 100, length=500)} or as a file, e.g., \code{depths=read.table("CoreDepths.txt"}. See also \code{depths.file}.}
\item{depths.file}{By default, Bacon will calculate the ages for the depths \code{d.min} to \code{d.max} in steps of \code{d.by}.
If \code{depths.file=TRUE}, Bacon will read a file containing the depths for which you require ages.
This file, containing the depths in a single column without a header, should be stored within \code{coredir},
and its name should start with the core's name and end with '_depths.txt'. Then specify \code{depths.file=TRUE} (default \code{FALSE}). See also \code{depths}.}
\item{age.min}{Minimum age of the age-depth plot.}
\item{yr.min}{Deprecated - use age.min instead.}
\item{age.max}{Maximum age of the age-depth plot.}
\item{yr.max}{Deprecated - use age.min instead.}
\item{hiatus.option}{How to calculate accumulation rates and ages for sections with hiatuses. Either extrapolate from surrounding sections (default, \code{hiatus.option=1}), use a w-weighted mix between the prior and posterior values for depths below the hiatus and prior information only for above the hiatus (\code{hiatus.option=2}), or use the originally calculated slopes (\code{hiatus.option=0}).}
\item{dark}{Darkness of the greyscale age-depth model. By default, the darkest grey value is calculated as 10 times the height of the lowest-precision age estimate \code{dark=c()}. Lower values will result in lighter grey but values >1 are not allowed.}
\item{prob}{Confidence interval to report (between 0 and 1, default 0.95 or 95\%).}
\item{rounded}{Rounding of years. Default is to round to single years.}
\item{d.res}{Resolution or amount of greyscale pixels to cover the depth scale of the age-model plot. Default \code{d.res=200}.}
\item{age.res}{Resolution or amount of greyscale pixels to cover the age scale of the age-model plot. Default \code{yr.res=200}.}
\item{yr.res}{Deprecated - use age.res instead.}
\item{date.res}{Date distributions are plotted using \code{date.res=100} points by default.}
\item{grey.res}{Grey-scale resolution of the age-depth model. Default \code{grey.res=100}.}
\item{rotate.axes}{By default, the age-depth model is plotted with the depths on the horizontal axis and ages on the vertical axis. This can be changed with \code{rotate.axes=TRUE}.}
\item{rev.age}{The direction of the age axis, which can be reversed using \code{rev.age=TRUE}.}
\item{rev.yr}{Deprecated - use rev.age instead.}
\item{rev.d}{The direction of the depth axis, which can be reversed using \code{rev.d=TRUE}.}
\item{maxcalc}{Number of depths to calculate ages for. If this is more than \code{maxcalc=500}, a warning will be shown that calculations will take time.}
\item{height}{The maximum heights of the distributions of the dates on the plot. See also \code{normalise.dists}.}
\item{calheight}{Multiplier for the heights of the distributions of dates on the calendar scale. Defaults to \code{calheight=1}.}
\item{mirror}{Plot the dates as 'blobs'. Set to \code{mirror=FALSE} to plot simple distributions.}
\item{up}{Directions of distributions if they are plotted non-mirrored. Default \code{up=TRUE}.}
\item{cutoff}{Avoid plotting very low probabilities of date distributions (default \code{cutoff=0.001}).}
\item{plot.range}{Whether or not to plot the curves showing the confidence ranges of the age-model. Defaults to (\code{plot.range=TRUE}).}
\item{panels}{Divide the graph panel. Defaults to 1 graph per panel, \code{panels=layout(1)}. To avoid dividing into panels, use \code{panels=c()}.}
\item{range.col}{The colour of the curves showing the confidence ranges of the age-model. Defaults to medium grey (\code{range.col=grey(0.5)}).}
\item{range.lty}{The line type of the curves showing the confidence ranges of the age-model. Defaults to \code{range.lty=12}.}
\item{mn.col}{The colour of the mean age-depth model: default \code{mn.col="red"}.}
\item{mn.lty}{The line type of the mean age-depth model. Default \code{mn.lty=12}.}
\item{med.col}{The colour of the median age-depth model: not drawn by default \code{med.col=NA}.}
\item{med.lty}{The line type of the median age-depth model. Default \code{med.lty=12}.}
\item{C14.col}{The colour of the calibrated ranges of the dates. Default is semi-transparent blue: \code{C14.col=rgb(0,0,1,.35)}.}
\item{C14.border}{The colours of the borders of calibrated 14C dates. Default is semi-transparent dark blue: \code{C14.border=rgb(0, 0, 1, 0.5)}.}
\item{cal.col}{The colour of the non-14C dates. Default is semi-transparent blue-green: \code{cal.col=rgb(0,.5,.5,.35)}.}
\item{cal.border}{The colour of the border of non-14C dates in the age-depth plot: default semi-transparent dark blue-green: \code{cal.border=rgb(0,.5,.5,.5)}. Not used by default.}
\item{dates.col}{As an alternative to colouring dates based on whether they are 14C or not, sets of dates can be coloured as, e.g., \code{dates.col=colours()[2:100]}.}
\item{pbmodelled.col}{Colour of the modelled 210Pb values. Defaults to shades of blue: \code{pbmodelled.col=function(x) rgb(0,0,1,x)}.}
\item{pbmeasured.col}{Colour of the measured 210Pb values (default \code{pbmeasured.col="blue"}). Draws rectangles of the upper and lower depths as well as the Pb values with 95 percent error ranges.}
\item{pb.lim}{Axis limits for the Pb-210 data. Calculated automatically by default (\code{pblim=c()}).}
\item{hiatus.col}{The colour of the depths of any hiatuses. Default \code{hiatus.col=grey(0.5)}.}
\item{hiatus.lty}{The line type of the depths of any hiatuses. Default \code{hiatus.lty=12}.}
\item{greyscale}{The function to produce a coloured representation of all age-models. Defaults to grey-scales: \code{greyscale=function(x) grey(1-x)}.}
\item{slump.col}{Colour of slumps. Defaults to \code{slump.col=grey(0.8)}.}
\item{normalise.dists}{By default, the distributions of more precise dates will cover less time and will thus peak higher than less precise dates. This can be avoided by specifying \code{normalise.dists=FALSE}.}
\item{same.heights}{Plot the distributions of the dates all at the same maximum height (default \code{same.height=FALSE}).}
\item{cc}{Calibration curve for 14C dates: \code{cc=1} for IntCal20 (northern hemisphere terrestrial), \code{cc=2} for Marine20 (marine), \code{cc=3} for SHCal20 (southern hemisphere terrestrial). For dates that are already on the cal BP scale use \code{cc=0}.}
\item{title}{The title of the age-depth model is plotted on the main panel. By default this is the core's name. To leave empty: \code{title=""}.}
\item{title.location}{Location of the title. Default \code{title.location='top'}.}
\item{after}{Sets a short section above and below hiatus.depths within which to calculate ages. For internal calculations - do not change.}
\item{bty}{Type of box to be drawn around plots (\code{"n"} for none, and \code{"l"} (default), \code{"7"}, \code{"c"}, \code{"u"}, or \code{"o"} for correspondingly shaped boxes).}
\item{mar}{Plot margins (amount of white space along edges of axes 1-4). Default \code{mar=c(3,3,1,1)}.}
\item{mgp}{Axis text margins (where should titles, labels and tick marks be plotted). Defaults to \code{mgp=c(1.5, .7, .0)}.}
\item{xaxs}{Extension of x-axis. By default, add some extra white-space at both extremes (\code{xaxs="r"}). See ?par for other options.}
\item{yaxs}{Extension of y-axis. By default, add no extra white-space at both extremes (\code{yaxs="i"}). See ?par for other options.}
\item{xaxt}{Whether or not to plot the x-axis. Can be used to adapt axes after a plot. See ?par for other options.}
\item{yaxt}{Whether or not to plot the y-axis. Can be used to adapt axes after a plot. See ?par for other options.}
\item{plot.pb}{Plot the 210Pb data. Defaults to \code{plot.pb=TRUE}.}
\item{plot.pdf}{Produce a pdf file of the age-depth plot.}
\item{dates.only}{By default, the age-depth model is plotted on top of the dates. This can be avoided by supplying \code{dates.only=TRUE}.}
\item{model.only}{By default, panels showing the MCMC iterations and the priors and posteriors for accumulation rate and memory are plotted above the main age-depth model panel. This can be avoided by supplying \code{model.only=TRUE}. Note however that this removes relevant information to evaluate the age-depth model, so we do recommend to present age-models together with these upper panels.}
\item{verbose}{Provide a summary of the age ranges after producing the age-depth model graph; default \code{verbose=FALSE}.}
}
\value{
A plot of the age-depth model, and estimated ages incl. confidence ranges for each depth.
}
\description{
Plot the age-depth model of a core.
}
\details{
After loading a previous run, or after running either the \link{scissors} or \link{thinner} command, plot the age-model
again using the command \code{agedepth()}.
}
\examples{
\donttest{
Plum(ssize=100, ask=FALSE, suggest=FALSE, coredir=tempfile(),
date.sample=2018.5, radon.case=0, n.supp=3)
agedepth()
}
}
\references{
Blaauw, M. and Christen, J.A., Flexible paleoclimate age-depth models using an autoregressive
gamma process. Bayesian Analysis 6 (2011), no. 3, 457--474.
\url{https://projecteuclid.org/euclid.ba/1339616472}
}
\author{
Maarten Blaauw, J. Andres Christen
}
|
#
# (c) Florian Katerndal
#
library(stringr)
library(dplyr)
library(raster)
library(RStoolbox)
library(sf)
library(itertools)
library(foreach)
library(doParallel)
library(readr)
source("/data/Dagobah/fonda/shk/fonda/proj_ab/scripts/funs.R")
ras_files <- list.files("/data/Dagobah/fonda/shk/fonda/proj_ab/data/basefiles", full.names = TRUE,
pattern = ".*LEVEL3_SEN2L_BAP.tif")
years <- 2015:2020
registerDoParallel(cores = 45)
# Level-1
level1_2em <- read.csv("/data/Dagobah/fonda/shk/fonda/proj_ab/data/speclibs/Level1_2EM.csv",
fileEncoding = "UTF-8", stringsAsFactors = FALSE)
level1_3em <- read_rds("/data/Dagobah/fonda/shk/fonda/proj_ab/data/speclibs/Level1_3EM.rds")
for (i in 1:length(ras_files)) {
ras <- stack(ras_files[i])
mesma_returns <- foreach(model_1 = isplitRows(level1_2em, chunkSize = 1),
.packages = c("raster", "RStoolbox", "stringr"),
.inorder = FALSE,
.multicombine = TRUE) %dopar% {
rasterOptions(
chunksize = 1.6e+10,
maxmemory = 1.6e+10,
memfrac = 0.9,
todisk = FALSE,
tmpdir = "/data/Dagobah/fonda/shk/fonda/proj_ab/temp"
)
model_1 <- as.data.frame(model_1)
class_name <- c(model_1[, "Klasse", drop = TRUE], "Schatten")
ID <- model_1[, "X", drop = TRUE]
out_dir <- paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_1/", paste0(ID))
out_dir <- str_replace(out_dir, "[\\s]", "_")
out_dir <- str_replace_all(out_dir, "[\\(\\)]", "")
dir.create(out_dir, recursive = TRUE, showWarnings = TRUE)
model_1 <- add_shade(
model_1[, !colnames(model_1) %in% c("X", "Klasse", "FID", "angle", "EAR", "rank_angle", "rank_EAR")]
)
out_ras <- mesma(ras, model_1, iterate = 400)
for (j in seq(nlayers(out_ras))) {
if (j == nlayers(out_ras)) {
name <- "RMSE"
} else {
name <- class_name[j]
}
path <- paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_1/", paste0(ID), "/", name, ".tif")
path <- str_replace(path, "[\\s]", "_")
path <- str_replace_all(path, "[\\(\\)]", "")
writeRaster(out_ras[[j]], path,
options = c(
"NUM_THREADS=1",
"COMPRESS=DEFLATE",
"PREDICTOR=3"
),
overwrite = TRUE)
}
rm(out_ras, ID, class_name)
system(
paste(
"/data/Dagobah/fonda/shk/fonda/proj_ab/src/release/fix_rasters",
paste0(out_dir, "/*"),
paste0(out_dir, "/out.tif")
)
)
return(TRUE)
}
system("echo L1 2-EM done")
mesma_returns <- foreach(model_1 = isplitVector(level1_3em, chunkSize = 1),
.packages = c("raster", "RStoolbox", "stringr"),
.inorder = FALSE,
.multicombine = TRUE) %dopar% {
rasterOptions(
chunksize = 1.6e+10,
maxmemory = 1.6e+10,
memfrac = 0.9,
todisk = FALSE,
tmpdir = "/data/Dagobah/fonda/shk/fonda/proj_ab/temp"
)
model_1 <- as.data.frame(model_1)
class_name <- c(model_1[, "Klasse", drop = TRUE], "Schatten")
ID <- str_c(model_1[, "FID", drop = TRUE], collapse = "_")
out_dir <- paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_1/", paste0(ID))
out_dir <- str_replace(out_dir, "[\\s]", "_")
out_dir <- str_replace_all(out_dir, "[\\(\\)]", "")
dir.create(out_dir, recursive = TRUE, showWarnings = TRUE)
model_1 <- add_shade(
model_1[, !colnames(model_1) %in% c("X", "Klasse", "FID", "angle", "EAR", "rank_angle", "rank_EAR")]
)
out_ras <- mesma(ras, model_1, iterate = 400)
for (j in seq(nlayers(out_ras))) {
if (j == nlayers(out_ras)) {
name <- "RMSE"
} else {
name <- class_name[j]
}
path <- paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_1/", paste0(ID), "/", name, ".tif")
path <- str_replace(path, "[\\s]", "_")
path <- str_replace_all(path, "[\\(\\)]", "")
writeRaster(out_ras[[j]], path,
options = c(
"NUM_THREADS=1",
"COMPRESS=DEFLATE",
"PREDICTOR=3"
),
overwrite = TRUE)
}
rm(out_ras, ID, class_name)
system(
paste(
"/data/Dagobah/fonda/shk/fonda/proj_ab/src/release/fix_rasters",
paste0(out_dir, "/*"),
paste0(out_dir, "/out.tif")
)
)
return(TRUE)
}
system("echo L1 3-EM done")
}
# Hierarchy Building on local machine
# Level-2
level2_2em <- read.csv("/data/Dagobah/fonda/shk/fonda/proj_ab/data/speclibs/Level2_2EM.csv",
fileEncoding = "UTF-8", stringsAsFactors = FALSE)
level2_3em <- read_rds("/data/Dagobah/fonda/shk/fonda/proj_ab/data/speclibs/Level2_3EM.rds")
for (i in 1:length(ras_files)) {
system(
paste(
"/data/Dagobah/fonda/shk/fonda/proj_ab/src/release/mask_stack 2",
ras_files[i],
paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_1/l1_hierarchy_", years[i], ".tif"),
paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/basefiles/masked_BAP_L2-", years[i], ".tif")
)
)
ras <- stack(paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/basefiles/masked_BAP_L2-", years[i], ".tif"))
mesma_returns <- foreach(model_1 = isplitRows(level2_2em, chunkSize = 1),
.packages = c("raster", "RStoolbox", "stringr"),
.inorder = FALSE,
.multicombine = TRUE) %dopar% {
rasterOptions(
chunksize = 1.6e+10,
maxmemory = 1.6e+10,
memfrac = 0.9,
todisk = FALSE,
tmpdir = "/data/Dagobah/fonda/shk/fonda/proj_ab/temp"
)
model_1 <- as.data.frame(model_1)
class_name <- c(model_1[, "Klasse", drop = TRUE], "Schatten")
ID <- model_1[, "X", drop = TRUE]
out_dir <- paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_2/", paste0(ID))
out_dir <- str_replace(out_dir, "[\\s]", "_")
out_dir <- str_replace_all(out_dir, "[\\(\\)]", "")
dir.create(out_dir, recursive = TRUE, showWarnings = TRUE)
model_1 <- add_shade(
model_1[, !colnames(model_1) %in% c("X", "Level2", "Klasse", "FID", "angle", "EAR", "rank_angle", "rank_EAR")]
)
out_ras <- mesma(ras, model_1, iterate = 400)
for (j in seq(nlayers(out_ras))) {
if (j == nlayers(out_ras)) {
name <- "RMSE"
} else {
name <- class_name[j]
}
path <- paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_2/", paste0(ID), "/", name, ".tif")
path <- str_replace(path, "[\\s]", "_")
path <- str_replace_all(path, "[\\(\\)]", "")
writeRaster(out_ras[[j]], path,
options = c(
"NUM_THREADS=1",
"COMPRESS=DEFLATE",
"PREDICTOR=3"
),
overwrite = TRUE)
}
rm(out_ras, ID, class_name)
system(
paste(
"/data/Dagobah/fonda/shk/fonda/proj_ab/src/release/fix_rasters",
paste0(out_dir, "/*"),
paste0(out_dir, "/out.tif")
)
)
return(TRUE)
}
system("echo L2 2-EM done")
mesma_returns <- foreach(model_1 = isplitVector(level2_3em, chunkSize = 1),
.packages = c("raster", "RStoolbox", "stringr"),
.inorder = FALSE,
.multicombine = TRUE) %dopar% {
rasterOptions(
chunksize = 1.6e+10,
maxmemory = 1.6e+10,
memfrac = 0.9,
todisk = FALSE,
tmpdir = "/data/Dagobah/fonda/shk/fonda/proj_ab/temp"
)
model_1 <- as.data.frame(model_1)
class_name <- c(model_1[, "Klasse", drop = TRUE], "Schatten")
ID <- str_c(model_1[, "FID", drop = TRUE], collapse = "_")
out_dir <- paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_2/", paste0(ID))
out_dir <- str_replace(out_dir, "[\\s]", "_")
out_dir <- str_replace_all(out_dir, "[\\(\\)]", "")
dir.create(out_dir, recursive = TRUE, showWarnings = TRUE)
model_1 <- add_shade(
model_1[, grepl("Band", colnames(model_1))]
)
out_ras <- mesma(ras, model_1, iterate = 400)
for (j in seq(nlayers(out_ras))) {
if (j == nlayers(out_ras)) {
name <- "RMSE"
} else {
name <- class_name[j]
}
path <- paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_2/", paste0(ID), "/", name, ".tif")
path <- str_replace(path, "[\\s]", "_")
path <- str_replace_all(path, "[\\(\\)]", "")
writeRaster(out_ras[[j]], path,
options = c(
"NUM_THREADS=1",
"COMPRESS=DEFLATE",
"PREDICTOR=3"
),
overwrite = TRUE)
}
rm(out_ras, ID, class_name)
system(
paste(
"/data/Dagobah/fonda/shk/fonda/proj_ab/src/release/fix_rasters",
paste0(out_dir, "/*"),
paste0(out_dir, "/out.tif")
)
)
return(TRUE)
}
system("echo L2 3-EM done")
}
# Hierarchy Building on local machine!
# Level-3
# this "transports" the masks from level 2 to 3, the fact that I forgot this doesn't
# change anything besides that I waited longer for the proccessing that I would have needed
# than if I had noticed it before being done with processing everything
ras_files <- list.files("/data/Dagobah/fonda/shk/fonda/proj_ab/data/basefiles", full.names = TRUE,
pattern = "masked_BAP_L2-.*.tif")
level3_2em <- read.csv("/data/Dagobah/fonda/shk/fonda/proj_ab/data/speclibs/Level3_2EM.csv",
fileEncoding = "UTF-8", stringsAsFactors = FALSE)
level3_3em <- read_rds("/data/Dagobah/fonda/shk/fonda/proj_ab/data/speclibs/Level3_3EM.rds")
for (i in 1:length(ras_files)) {
system(
paste(
"/data/Dagobah/fonda/shk/fonda/proj_ab/src/release/mask_stack 4",
ras_files[i],
paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_1/l1_hierarchy_", years[i], ".tif"),
paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/basefiles/masked_BAP_L3-", years[i], ".tif")
)
)
ras <- stack(paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/basefiles/masked_BAP_L3-", years[i], ".tif"))
mesma_returns <- foreach(model_1 = isplitRows(level3_2em, chunkSize = 1),
.packages = c("raster", "RStoolbox", "stringr"),
.inorder = FALSE,
.multicombine = TRUE) %dopar% {
rasterOptions(
chunksize = 1.6e+10,
maxmemory = 1.6e+10,
memfrac = 0.9,
todisk = FALSE,
tmpdir = "/data/Dagobah/fonda/shk/fonda/proj_ab/temp"
)
model_1 <- as.data.frame(model_1)
class_name <- c(model_1[, "Klasse", drop = TRUE], "Schatten")
ID <- model_1[, "X", drop = TRUE]
out_dir <- paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_3/", paste0(ID))
out_dir <- str_replace(out_dir, "[\\s]", "_")
out_dir <- str_replace_all(out_dir, "[\\(\\)]", "")
dir.create(out_dir, recursive = TRUE, showWarnings = TRUE)
model_1 <- add_shade(
model_1[, !colnames(model_1) %in% c("X", "Level2", "Klasse", "FID", "angle", "EAR", "rank_angle", "rank_EAR")]
)
out_ras <- mesma(ras, model_1, iterate = 400)
for (j in seq(nlayers(out_ras))) {
if (j == nlayers(out_ras)) {
name <- "RMSE"
} else {
name <- class_name[j]
}
path <- paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_3/", paste0(ID), "/", name, ".tif")
path <- str_replace(path, "[\\s]", "_")
path <- str_replace_all(path, "[\\(\\)]", "")
writeRaster(out_ras[[j]], path,
options = c(
"NUM_THREADS=1",
"COMPRESS=DEFLATE",
"PREDICTOR=3"
),
overwrite = TRUE)
}
rm(out_ras, ID, class_name)
system(
paste(
"/data/Dagobah/fonda/shk/fonda/proj_ab/src/release/fix_rasters",
paste0(out_dir, "/*"),
paste0(out_dir, "/out.tif")
)
)
return(TRUE)
}
system("echo L3 2-EM done")
mesma_returns <- foreach(model_1 = isplitVector(level3_3em, chunkSize = 1),
.packages = c("raster", "RStoolbox", "stringr"),
.inorder = FALSE,
.multicombine = TRUE) %dopar% {
rasterOptions(
chunksize = 1.6e+10,
maxmemory = 1.6e+10,
memfrac = 0.9,
todisk = FALSE,
tmpdir = "/data/Dagobah/fonda/shk/fonda/proj_ab/temp"
)
model_1 <- as.data.frame(model_1)
class_name <- c(model_1[, "Klasse", drop = TRUE], "Schatten")
ID <- str_c(model_1[, "FID", drop = TRUE], collapse = "_")
out_dir <- paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_3/", paste0(ID))
out_dir <- str_replace(out_dir, "[\\s]", "_")
out_dir <- str_replace_all(out_dir, "[\\(\\)]", "")
dir.create(out_dir, recursive = TRUE, showWarnings = TRUE)
model_1 <- add_shade(
model_1[, grepl("Band", colnames(model_1))]
)
out_ras <- mesma(ras, model_1, iterate = 400)
for (j in seq(nlayers(out_ras))) {
if (j == nlayers(out_ras)) {
name <- "RMSE"
} else {
name <- class_name[j]
}
path <- paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_3/", paste0(ID), "/", name, ".tif")
path <- str_replace(path, "[\\s]", "_")
path <- str_replace_all(path, "[\\(\\)]", "")
writeRaster(out_ras[[j]], path,
options = c(
"NUM_THREADS=1",
"COMPRESS=DEFLATE",
"PREDICTOR=3"
),
overwrite = TRUE)
}
rm(out_ras, ID, class_name)
system(
paste(
"/data/Dagobah/fonda/shk/fonda/proj_ab/src/release/fix_rasters",
paste0(out_dir, "/*"),
paste0(out_dir, "/out.tif")
)
)
return(TRUE)
}
system("echo L3 3-EM done")
}
# Hierarchy Building on local machine!
stopImplicitCluster()
| /R-Scripts/analysis_03.R | no_license | Florian-Katerndahl/Proj-Arbeiten | R | false | false | 12,917 | r | #
# (c) Florian Katerndal
#
library(stringr)
library(dplyr)
library(raster)
library(RStoolbox)
library(sf)
library(itertools)
library(foreach)
library(doParallel)
library(readr)
source("/data/Dagobah/fonda/shk/fonda/proj_ab/scripts/funs.R")
ras_files <- list.files("/data/Dagobah/fonda/shk/fonda/proj_ab/data/basefiles", full.names = TRUE,
pattern = ".*LEVEL3_SEN2L_BAP.tif")
years <- 2015:2020
registerDoParallel(cores = 45)
# Level-1
level1_2em <- read.csv("/data/Dagobah/fonda/shk/fonda/proj_ab/data/speclibs/Level1_2EM.csv",
fileEncoding = "UTF-8", stringsAsFactors = FALSE)
level1_3em <- read_rds("/data/Dagobah/fonda/shk/fonda/proj_ab/data/speclibs/Level1_3EM.rds")
for (i in 1:length(ras_files)) {
ras <- stack(ras_files[i])
mesma_returns <- foreach(model_1 = isplitRows(level1_2em, chunkSize = 1),
.packages = c("raster", "RStoolbox", "stringr"),
.inorder = FALSE,
.multicombine = TRUE) %dopar% {
rasterOptions(
chunksize = 1.6e+10,
maxmemory = 1.6e+10,
memfrac = 0.9,
todisk = FALSE,
tmpdir = "/data/Dagobah/fonda/shk/fonda/proj_ab/temp"
)
model_1 <- as.data.frame(model_1)
class_name <- c(model_1[, "Klasse", drop = TRUE], "Schatten")
ID <- model_1[, "X", drop = TRUE]
out_dir <- paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_1/", paste0(ID))
out_dir <- str_replace(out_dir, "[\\s]", "_")
out_dir <- str_replace_all(out_dir, "[\\(\\)]", "")
dir.create(out_dir, recursive = TRUE, showWarnings = TRUE)
model_1 <- add_shade(
model_1[, !colnames(model_1) %in% c("X", "Klasse", "FID", "angle", "EAR", "rank_angle", "rank_EAR")]
)
out_ras <- mesma(ras, model_1, iterate = 400)
for (j in seq(nlayers(out_ras))) {
if (j == nlayers(out_ras)) {
name <- "RMSE"
} else {
name <- class_name[j]
}
path <- paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_1/", paste0(ID), "/", name, ".tif")
path <- str_replace(path, "[\\s]", "_")
path <- str_replace_all(path, "[\\(\\)]", "")
writeRaster(out_ras[[j]], path,
options = c(
"NUM_THREADS=1",
"COMPRESS=DEFLATE",
"PREDICTOR=3"
),
overwrite = TRUE)
}
rm(out_ras, ID, class_name)
system(
paste(
"/data/Dagobah/fonda/shk/fonda/proj_ab/src/release/fix_rasters",
paste0(out_dir, "/*"),
paste0(out_dir, "/out.tif")
)
)
return(TRUE)
}
system("echo L1 2-EM done")
mesma_returns <- foreach(model_1 = isplitVector(level1_3em, chunkSize = 1),
.packages = c("raster", "RStoolbox", "stringr"),
.inorder = FALSE,
.multicombine = TRUE) %dopar% {
rasterOptions(
chunksize = 1.6e+10,
maxmemory = 1.6e+10,
memfrac = 0.9,
todisk = FALSE,
tmpdir = "/data/Dagobah/fonda/shk/fonda/proj_ab/temp"
)
model_1 <- as.data.frame(model_1)
class_name <- c(model_1[, "Klasse", drop = TRUE], "Schatten")
ID <- str_c(model_1[, "FID", drop = TRUE], collapse = "_")
out_dir <- paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_1/", paste0(ID))
out_dir <- str_replace(out_dir, "[\\s]", "_")
out_dir <- str_replace_all(out_dir, "[\\(\\)]", "")
dir.create(out_dir, recursive = TRUE, showWarnings = TRUE)
model_1 <- add_shade(
model_1[, !colnames(model_1) %in% c("X", "Klasse", "FID", "angle", "EAR", "rank_angle", "rank_EAR")]
)
out_ras <- mesma(ras, model_1, iterate = 400)
for (j in seq(nlayers(out_ras))) {
if (j == nlayers(out_ras)) {
name <- "RMSE"
} else {
name <- class_name[j]
}
path <- paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_1/", paste0(ID), "/", name, ".tif")
path <- str_replace(path, "[\\s]", "_")
path <- str_replace_all(path, "[\\(\\)]", "")
writeRaster(out_ras[[j]], path,
options = c(
"NUM_THREADS=1",
"COMPRESS=DEFLATE",
"PREDICTOR=3"
),
overwrite = TRUE)
}
rm(out_ras, ID, class_name)
system(
paste(
"/data/Dagobah/fonda/shk/fonda/proj_ab/src/release/fix_rasters",
paste0(out_dir, "/*"),
paste0(out_dir, "/out.tif")
)
)
return(TRUE)
}
system("echo L1 3-EM done")
}
# Hierarchy Building on local machine
# Level-2
level2_2em <- read.csv("/data/Dagobah/fonda/shk/fonda/proj_ab/data/speclibs/Level2_2EM.csv",
fileEncoding = "UTF-8", stringsAsFactors = FALSE)
level2_3em <- read_rds("/data/Dagobah/fonda/shk/fonda/proj_ab/data/speclibs/Level2_3EM.rds")
for (i in 1:length(ras_files)) {
system(
paste(
"/data/Dagobah/fonda/shk/fonda/proj_ab/src/release/mask_stack 2",
ras_files[i],
paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_1/l1_hierarchy_", years[i], ".tif"),
paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/basefiles/masked_BAP_L2-", years[i], ".tif")
)
)
ras <- stack(paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/basefiles/masked_BAP_L2-", years[i], ".tif"))
mesma_returns <- foreach(model_1 = isplitRows(level2_2em, chunkSize = 1),
.packages = c("raster", "RStoolbox", "stringr"),
.inorder = FALSE,
.multicombine = TRUE) %dopar% {
rasterOptions(
chunksize = 1.6e+10,
maxmemory = 1.6e+10,
memfrac = 0.9,
todisk = FALSE,
tmpdir = "/data/Dagobah/fonda/shk/fonda/proj_ab/temp"
)
model_1 <- as.data.frame(model_1)
class_name <- c(model_1[, "Klasse", drop = TRUE], "Schatten")
ID <- model_1[, "X", drop = TRUE]
out_dir <- paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_2/", paste0(ID))
out_dir <- str_replace(out_dir, "[\\s]", "_")
out_dir <- str_replace_all(out_dir, "[\\(\\)]", "")
dir.create(out_dir, recursive = TRUE, showWarnings = TRUE)
model_1 <- add_shade(
model_1[, !colnames(model_1) %in% c("X", "Level2", "Klasse", "FID", "angle", "EAR", "rank_angle", "rank_EAR")]
)
out_ras <- mesma(ras, model_1, iterate = 400)
for (j in seq(nlayers(out_ras))) {
if (j == nlayers(out_ras)) {
name <- "RMSE"
} else {
name <- class_name[j]
}
path <- paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_2/", paste0(ID), "/", name, ".tif")
path <- str_replace(path, "[\\s]", "_")
path <- str_replace_all(path, "[\\(\\)]", "")
writeRaster(out_ras[[j]], path,
options = c(
"NUM_THREADS=1",
"COMPRESS=DEFLATE",
"PREDICTOR=3"
),
overwrite = TRUE)
}
rm(out_ras, ID, class_name)
system(
paste(
"/data/Dagobah/fonda/shk/fonda/proj_ab/src/release/fix_rasters",
paste0(out_dir, "/*"),
paste0(out_dir, "/out.tif")
)
)
return(TRUE)
}
system("echo L2 2-EM done")
mesma_returns <- foreach(model_1 = isplitVector(level2_3em, chunkSize = 1),
.packages = c("raster", "RStoolbox", "stringr"),
.inorder = FALSE,
.multicombine = TRUE) %dopar% {
rasterOptions(
chunksize = 1.6e+10,
maxmemory = 1.6e+10,
memfrac = 0.9,
todisk = FALSE,
tmpdir = "/data/Dagobah/fonda/shk/fonda/proj_ab/temp"
)
model_1 <- as.data.frame(model_1)
class_name <- c(model_1[, "Klasse", drop = TRUE], "Schatten")
ID <- str_c(model_1[, "FID", drop = TRUE], collapse = "_")
out_dir <- paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_2/", paste0(ID))
out_dir <- str_replace(out_dir, "[\\s]", "_")
out_dir <- str_replace_all(out_dir, "[\\(\\)]", "")
dir.create(out_dir, recursive = TRUE, showWarnings = TRUE)
model_1 <- add_shade(
model_1[, grepl("Band", colnames(model_1))]
)
out_ras <- mesma(ras, model_1, iterate = 400)
for (j in seq(nlayers(out_ras))) {
if (j == nlayers(out_ras)) {
name <- "RMSE"
} else {
name <- class_name[j]
}
path <- paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_2/", paste0(ID), "/", name, ".tif")
path <- str_replace(path, "[\\s]", "_")
path <- str_replace_all(path, "[\\(\\)]", "")
writeRaster(out_ras[[j]], path,
options = c(
"NUM_THREADS=1",
"COMPRESS=DEFLATE",
"PREDICTOR=3"
),
overwrite = TRUE)
}
rm(out_ras, ID, class_name)
system(
paste(
"/data/Dagobah/fonda/shk/fonda/proj_ab/src/release/fix_rasters",
paste0(out_dir, "/*"),
paste0(out_dir, "/out.tif")
)
)
return(TRUE)
}
system("echo L2 3-EM done")
}
# Hierarchy Building on local machine!
# Level-3
# this "transports" the masks from level 2 to 3, the fact that I forgot this doesn't
# change anything besides that I waited longer for the proccessing that I would have needed
# than if I had noticed it before being done with processing everything
ras_files <- list.files("/data/Dagobah/fonda/shk/fonda/proj_ab/data/basefiles", full.names = TRUE,
pattern = "masked_BAP_L2-.*.tif")
level3_2em <- read.csv("/data/Dagobah/fonda/shk/fonda/proj_ab/data/speclibs/Level3_2EM.csv",
fileEncoding = "UTF-8", stringsAsFactors = FALSE)
level3_3em <- read_rds("/data/Dagobah/fonda/shk/fonda/proj_ab/data/speclibs/Level3_3EM.rds")
for (i in 1:length(ras_files)) {
system(
paste(
"/data/Dagobah/fonda/shk/fonda/proj_ab/src/release/mask_stack 4",
ras_files[i],
paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_1/l1_hierarchy_", years[i], ".tif"),
paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/basefiles/masked_BAP_L3-", years[i], ".tif")
)
)
ras <- stack(paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/basefiles/masked_BAP_L3-", years[i], ".tif"))
mesma_returns <- foreach(model_1 = isplitRows(level3_2em, chunkSize = 1),
.packages = c("raster", "RStoolbox", "stringr"),
.inorder = FALSE,
.multicombine = TRUE) %dopar% {
rasterOptions(
chunksize = 1.6e+10,
maxmemory = 1.6e+10,
memfrac = 0.9,
todisk = FALSE,
tmpdir = "/data/Dagobah/fonda/shk/fonda/proj_ab/temp"
)
model_1 <- as.data.frame(model_1)
class_name <- c(model_1[, "Klasse", drop = TRUE], "Schatten")
ID <- model_1[, "X", drop = TRUE]
out_dir <- paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_3/", paste0(ID))
out_dir <- str_replace(out_dir, "[\\s]", "_")
out_dir <- str_replace_all(out_dir, "[\\(\\)]", "")
dir.create(out_dir, recursive = TRUE, showWarnings = TRUE)
model_1 <- add_shade(
model_1[, !colnames(model_1) %in% c("X", "Level2", "Klasse", "FID", "angle", "EAR", "rank_angle", "rank_EAR")]
)
out_ras <- mesma(ras, model_1, iterate = 400)
for (j in seq(nlayers(out_ras))) {
if (j == nlayers(out_ras)) {
name <- "RMSE"
} else {
name <- class_name[j]
}
path <- paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_3/", paste0(ID), "/", name, ".tif")
path <- str_replace(path, "[\\s]", "_")
path <- str_replace_all(path, "[\\(\\)]", "")
writeRaster(out_ras[[j]], path,
options = c(
"NUM_THREADS=1",
"COMPRESS=DEFLATE",
"PREDICTOR=3"
),
overwrite = TRUE)
}
rm(out_ras, ID, class_name)
system(
paste(
"/data/Dagobah/fonda/shk/fonda/proj_ab/src/release/fix_rasters",
paste0(out_dir, "/*"),
paste0(out_dir, "/out.tif")
)
)
return(TRUE)
}
system("echo L3 2-EM done")
mesma_returns <- foreach(model_1 = isplitVector(level3_3em, chunkSize = 1),
.packages = c("raster", "RStoolbox", "stringr"),
.inorder = FALSE,
.multicombine = TRUE) %dopar% {
rasterOptions(
chunksize = 1.6e+10,
maxmemory = 1.6e+10,
memfrac = 0.9,
todisk = FALSE,
tmpdir = "/data/Dagobah/fonda/shk/fonda/proj_ab/temp"
)
model_1 <- as.data.frame(model_1)
class_name <- c(model_1[, "Klasse", drop = TRUE], "Schatten")
ID <- str_c(model_1[, "FID", drop = TRUE], collapse = "_")
out_dir <- paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_3/", paste0(ID))
out_dir <- str_replace(out_dir, "[\\s]", "_")
out_dir <- str_replace_all(out_dir, "[\\(\\)]", "")
dir.create(out_dir, recursive = TRUE, showWarnings = TRUE)
model_1 <- add_shade(
model_1[, grepl("Band", colnames(model_1))]
)
out_ras <- mesma(ras, model_1, iterate = 400)
for (j in seq(nlayers(out_ras))) {
if (j == nlayers(out_ras)) {
name <- "RMSE"
} else {
name <- class_name[j]
}
path <- paste0("/data/Dagobah/fonda/shk/fonda/proj_ab/data/", years[i], "/Level_3/", paste0(ID), "/", name, ".tif")
path <- str_replace(path, "[\\s]", "_")
path <- str_replace_all(path, "[\\(\\)]", "")
writeRaster(out_ras[[j]], path,
options = c(
"NUM_THREADS=1",
"COMPRESS=DEFLATE",
"PREDICTOR=3"
),
overwrite = TRUE)
}
rm(out_ras, ID, class_name)
system(
paste(
"/data/Dagobah/fonda/shk/fonda/proj_ab/src/release/fix_rasters",
paste0(out_dir, "/*"),
paste0(out_dir, "/out.tif")
)
)
return(TRUE)
}
system("echo L3 3-EM done")
}
# Hierarchy Building on local machine!
stopImplicitCluster()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.