content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{deutsch}
\alias{deutsch}
\title{[Dataset] Deutsch Soccer Team And Clubs}
\format{A data.frame (26 * 5) \cr
\tabular{lll}{
Var \tab Type \tab Meaning \cr
player \tab chr \tab Player names \cr
club \tab chr \tab Club of the players \cr
weight \tab num \tab The weight of the connection between clubs and players \cr
role \tab chr \tab Role of the players ('Fw', 'Mf', 'Gk', 'Df', ...) \cr
year \tab int \tab Year tag of the Deutsch soccer team
}}
\description{
A data.frame comprising of Deutsch soccer team players and their clubs. It contains
two Deutsch team (\code{year} == 2014 and \code{year} == 2016)
}
\examples{
data(deutsch)
str(deutsch)
}
\references{
\url{https://madlogos.github.io/rechartsX/Basic_Plots_12_Chord.html}
}
| /man/deutsch.Rd | permissive | xwydq/recharts-1 | R | false | true | 844 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{deutsch}
\alias{deutsch}
\title{[Dataset] Deutsch Soccer Team And Clubs}
\format{A data.frame (26 * 5) \cr
\tabular{lll}{
Var \tab Type \tab Meaning \cr
player \tab chr \tab Player names \cr
club \tab chr \tab Club of the players \cr
weight \tab num \tab The weight of the connection between clubs and players \cr
role \tab chr \tab Role of the players ('Fw', 'Mf', 'Gk', 'Df', ...) \cr
year \tab int \tab Year tag of the Deutsch soccer team
}}
\description{
A data.frame comprising of Deutsch soccer team players and their clubs. It contains
two Deutsch team (\code{year} == 2014 and \code{year} == 2016)
}
\examples{
data(deutsch)
str(deutsch)
}
\references{
\url{https://madlogos.github.io/rechartsX/Basic_Plots_12_Chord.html}
}
|
context("test summary output")
#get test data file locations
dataf <- system.file("extdata/col_sc.txt", package="rucrdtw")
firstf <- system.file("extdata/first_sc.txt", package="rucrdtw")
test_that("ucrdtw summary method works", {
first = ucrdtw_ff(dataf, firstf, 60, 0.05)
x <- summary(first)
expect_equal(class(x), "data.frame")
expect_equal(ncol(x), length(first))
})
test_that("ucred summary method works", {
first = ucred_ff(dataf, firstf, 60)
x <- summary(first)
expect_equal(class(x), "data.frame")
expect_equal(ncol(x), length(first))
})
| /tests/testthat/test-summaries.R | permissive | cran/rucrdtw | R | false | false | 586 | r | context("test summary output")
#get test data file locations
dataf <- system.file("extdata/col_sc.txt", package="rucrdtw")
firstf <- system.file("extdata/first_sc.txt", package="rucrdtw")
test_that("ucrdtw summary method works", {
first = ucrdtw_ff(dataf, firstf, 60, 0.05)
x <- summary(first)
expect_equal(class(x), "data.frame")
expect_equal(ncol(x), length(first))
})
test_that("ucred summary method works", {
first = ucred_ff(dataf, firstf, 60)
x <- summary(first)
expect_equal(class(x), "data.frame")
expect_equal(ncol(x), length(first))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_query.R
\name{griffin.model.iterator}
\alias{griffin.model.iterator}
\title{Iterates over griffin controller to return string networks}
\usage{
griffin.model.iterator(controller, n = 1)
}
\arguments{
\item{controller}{griffin controller created by run.query}
\item{n}{number of networks to return}
}
\description{
Iterates over griffin controller to return string networks
}
\keyword{internal}
| /man/griffin.model.iterator.Rd | permissive | gsc0107/rgriffin | R | false | true | 477 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/run_query.R
\name{griffin.model.iterator}
\alias{griffin.model.iterator}
\title{Iterates over griffin controller to return string networks}
\usage{
griffin.model.iterator(controller, n = 1)
}
\arguments{
\item{controller}{griffin controller created by run.query}
\item{n}{number of networks to return}
}
\description{
Iterates over griffin controller to return string networks
}
\keyword{internal}
|
#
# Data source:
# Purpose: Exploratory Analysis of data
# April 20, 2020
# ---------------------------------------------------------------------------------------------------------------
library(skimr)
library(readr)
library(dataPreparation)
library(tidyverse)
library(dplyr)
library(corrplot)
library(caret)
library(ggplot2)
library(leaps)
library(rpart)
library(tree)
library(glmnet)
library(car)
library(MASS)
# Import Data
library(readr)
brfss1 <- read_csv("data/analytic.csv", col_types = cols(SEX = col_integer(),
X_AGE_G = col_integer(), X_BMI5CAT = col_integer()))
###############################################
# Exploratory Data Analysis #
###############################################
# Check dimension of data
dim(brfss1)
# Summary of data
skim(brfss1)
# ---------------------------------------------------------------------------------------------------------------
# Replace/Remove NAs & missing values
#Replace NA with zero
# SMOKEDAY2 - field blank if respondent does not smoke? Replace with zero
brfss1 <- brfss1 %>%
mutate(SMOKDAY2=ifelse(is.na(SMOKDAY2), 0, SMOKDAY2))
#Replace NA with mean value
brfss1 <- brfss1 %>%
mutate(X_BMI5CAT=ifelse(is.na(X_BMI5CAT), 3, X_BMI5CAT))
# Remove NAs from : EXERANY2v & X_MRACE1
brfss <- na.omit(brfss1)
#check
dim(brfss)
any(is.na.data.frame(brfss))
hist(brfss$VETERAN3)
#all respondants are or have served in military per study inclusion criteria.
#Remove VETERAN3 & REDUDANT SLEPTIM1
brfss <- subset(brfss, select = -c(VETERAN3, SLEPTIM1))
table(brfss$EDGROUP)
# Removed Non-Responders
brfss <- brfss[brfss$EDGROUP != 9,]
table(brfss$SMOKGRP)
#only 290 Non-responders. Removed from dataset
brfss <- brfss[brfss$SMOKGRP != 9,]
#convert categorical data to factors to properly graph
brfss3 <- brfss %>%
mutate(ALCGRP = factor(ALCGRP, levels = c(3, 2, 9, 1),labels = c('Weekly', 'Monthly', "Unknown", "None")),
X_AGE_G = factor(X_AGE_G, levels = c(1, 2, 3, 4, 5, 6), labels = c('18-24', '25-34', '35-44', '45-54', '55-64', '65+')),
ASTHMA4 = factor(ASTHMA4, levels = c(1,0), labels = c("Yes", "No")),
RACEGRP = factor(RACEGRP, levels = c(1, 2, 3, 4, 5, 6, 9), labels = c('White', 'AA', 'Native Am', 'Asian', 'Pacific Islander', 'Other/Multiracial', 'Unknown')),
MARITAL = factor(MARITAL, levels = c(1, 2, 3, 4, 5, 9), labels = c('Married', 'Divorced', 'Widowed', 'Never Married', 'Partner', 'Unknown')),
GENHLTH2 = factor(GENHLTH2, levels = c(1, 2, 3, 4, 5, 9), labels = c('Excellent', 'Very Good', 'Good', 'Fair', 'Poor', 'Unknown')),
HLTHPLN2 = factor(HLTHPLN2, levels = c(1, 2, 9), labels = c('Yes', 'No', 'Unknown')),
EDGROUP = factor(EDGROUP, levels = c(1, 2, 3, 4, 9), labels = c('Some High School', 'High School', 'Some college', 'College Graduate', 'Unknown')),
INCOME3 = factor(INCOME3, levels = c(1, 2, 3, 4, 5, 6, 7, 8, 9), labels = c('< $10K', '10-15', '15-20', '20-25', '25-35', '35-50', '50-75', '$75K +', 'Unknown')),
BMICAT = factor(BMICAT, levels = c(1, 2, 3, 4, 9), labels = c('Underweight', 'Normal', 'Overweight', 'Obese', 'Unknown')),
SMOKEDAY2 = factor(SMOKDAY2, levels = c(1,2, 3, 9), labels = c('Every Day', 'Some Days', 'Not at all', 'Unknown')),
EXERANY3 = factor(EXERANY3, levels = c(1, 2, 9), labels = c('Yes', 'No', 'Unknown')))
skim(brfss3)
# ---------------------------------------------------------------------------------------------------------------
#Plots
#Sleep Distribution
# Barplot of Sleep Distribution
bar_sleep <- ggplot(data=brfss) +
geom_bar(mapping=aes(x=SLEPTIM2, fill=ALCDAY5),
show.legend = TRUE,
width=0.6) +
xlim(-1, 20) +
theme_minimal() +
labs(x = "Hours of sleep", y = "Count", title="Hours Slept Distribution")
bar_sleep
boxplot(brfss$SLEPTIM2, main="Box Plot of SLEPTIM2",
xlab="All Respondants", ylab="Hours Slept")
# General Sleep distribution of all veteran respondants
boxplot(SLEPTIM2~ALCGRP, data=brfss3, main="Box Plot of SLEPTIM2 by ALCGRP",
xlab="Alcohol consumption in last 30 days", ylab="Hours Slept")
#SleepHrs distribution - Alcohol Group Comparison
boxplot(SLEPTIM2~BMICAT, data=brfss3, main="Box Plot of Sleep by BMI Group",
xlab="BMI Category", ylab="Hours Slept")
#SleepHrs distribution - BMI Group Comparison
boxplot(SLEPTIM2~EDGROUP, data=brfss3, main="Box Plot of Sleep by EDU Group",
xlab="Highest Education Completed", ylab="Hours Slept")
#SleepHrs distribution - Highest Education Group Comparison
boxplot(SLEPTIM2~SMOKGRP, data=brfss, main="Box Plot of Sleep by Smoker Freq Group",
xlab="Current Smoking Freq", ylab="Hours Slept")
#SleepHrs distribution - Smoker Group Comparison
boxplot(SLEPTIM2~SMOKDAY2, data=brfss, main="Box Plot of SLEPTIM2 by Smoker Freq Group",
xlab="Current Smoking Freq", ylab="Hours Slept")
#SleepHrs distribution - Smoker Group Comparison
#BMI Distribution
bar_bmi2 <- ggplot(data=brfss3) +
geom_bar(mapping=aes(x=forcats::fct_rev(fct_infreq(BMICAT)), fill=BMICAT),
show.legend = FALSE,
width=0.6, alpha=0.8) +
theme(aspect.ratio=1) +
theme_minimal() +
labs(x = "BMI", y = "Count", title="BMI Distribution")
bar_bmi2
#SleepHrs distribution - BMI Group Comparison
boxplot(SLEPTIM2~BMICAT, data=brfss3, main="Box Plot of Sleep by BMI Group",
xlab="Reported BMI", ylab="Hours Slept")
# Plot Correlation Matrix with Correlation Coefficients
numcor2 <- cor(brfss1)
corrplot(numcor2, method = "color",type = "upper", tl.col = "black", tl.cex = 0.5)
###############################################################################################
#########################################
# Regularization #
#########################################
#use L2 for feature selection - Rework for final
#########################################
# Linear Regression #
#########################################
#split data into training and testing dataset
brfss2 <- subset(brfss3, select = -c(ALCGRP, X_AGE_G, ASTHMA4, RACEGRP, MARITAL, GENHLTH2, HLTHPLN2, EDGROUP, INCOME3, BMICAT, EXERANY3))
set.seed(123)
row.number <- sample(x=1:nrow(brfss2), size=0.8*nrow(brfss2))
train.data <- brfss2[row.number,]
test.data <- brfss2[-row.number,]
dim(train.data)
dim(test.data)
prop.table(table(train.data$ALCDAY5))
linearmodel <- lm(SLEPTIM2~., data=train.data)
lm_summary <- summary(linearmodel)
lm_summary
lm_summary$r.squared
# Plots for Residual Analysis
#and linear Regression assumptions check
par(mfrow=c(2,2))
plot(linearmodel)
qqnorm(linearmodel$residuals); qqline(linearmodel$residuals)
#Monisha's code based on her screenshots; replaced SLEPTIM1 with SLEPTIM2)
mlinearmodel <- glm(ALCDAY5 ~ SLEPTIM2 + X_AGE_G +
SMOKDAY2 + SEX + X_MRACE1 + GENHLTH + INCOME2 + X_BMI5CAT +
ALCGRP + DRKMONTHLY + AGE2 + AGE3 + AGE4 + AGE5 + SMOKGRP + HISPANIC +
RACEGRP + BLACK + ASIAN + OTHRACE + FORMERMAR + GENHLTH2 + FAIRHLTH +
POORHLTH + LOWED + SOMECOLL + INCOME3 + INC1 + INC2 + INC3 + INC4 +
INC5 + INC6 + OVWT + EXERANY3, data = brfss)
stepAICm <- stepAIC(mlinearmodel, direction = 'backward')
stepAICm$anova
# Final Model:
ALCDAY5 ~ SLEPTIM2 + X_AGE_G + SMOKDAY2 + SEX + X_MRACE1 + GENHLTH +
INCOME2 + X_BMI5CAT + ALCGRP + DRKMONTHLY + AGE2 + AGE3 +
AGE4 + AGE5 + SMOKGRP + HISPANIC + RACEGRP + BLACK + ASIAN +
OTHRACE + FORMERMAR + GENHLTH2 + POORHLTH + LOWED + SOMECOLL +
INCOME3 + INC1 + INC2 + INC3 + INC4 + INC5 + INC6 + OVWT
m2linearmodel <- glm(ALCDAY5 ~ SLEPTIM2 + X_AGE_G + SMOKDAY2 + SEX + X_MRACE1 + GENHLTH +
INCOME2 + X_BMI5CAT + ALCGRP + DRKMONTHLY + AGE2 + AGE3 +
AGE4 + AGE5 + SMOKGRP + HISPANIC + RACEGRP + BLACK + ASIAN +
OTHRACE + FORMERMAR + GENHLTH2 + POORHLTH + LOWED + SOMECOLL +
INCOME3 + INC1 + INC2 + INC3 + INC4 + INC5 + INC6 + OVWT, data = brfss)
lm_summary <- summary(m2linearmodel)
lm_summary
# Plots for Residual Analysis
par(mfrow=c(2,2))
plot(m2linearmodel)
qqnorm(m2linearmodel$residuals); qqline(m2linearmodel$residuals)
###############################################################################################
# ASTHMA
set.seed(456)
row.number <- sample(x=1:nrow(brfss3), size=0.8*nrow(brfss3))
trainfactors <- brfss3[row.number,]
testfactors <- brfss3[-row.number,]
glmmodel <- glm(ASTHMA4~., family = binomial(link = 'logit'), data = trainfactors)
summary(glmmodel)
anova(glmmodel, test="Chisq")
# Df Deviance Resid. Df Resid. Dev Pr(>Chi)
#
# ALCDAY5 1 44.8 45865 28099 2.16e-11 ***
# ASTHMA3 1 28099.0 45864 0 < 2.2e-16 ***
#prediction
prefactors <-as.numeric(predict(glmmodel,newdata = testfactors,type = "response")>0.5)
obs_p_lr = data.frame(prob=prefactors,obs=testfactors$ASTHMA4)
#ROC curve
par(mfrow=c(1,1))
lr_roc <- roc(testfactors$ASTHMA4,prefactors)
plot(lr_roc, print.auc=TRUE, auc.polygon=TRUE, grid=c(0.1, 0.2),grid.col=c("green", "red"),
max.auc.polygon=TRUE,auc.polygon.col="skyblue", print.thres=TRUE,main='ROC Curve ')
#########################################
# Decision Tree #
#########################################
#will fix later!!
#use brfss4
###############################################
# Create a subset to filter variables for Decision Tree
brfss4 <- subset(brfss2, select = -c(ASTHMA3, SMOKE100, SMOKDAY2, X_MRACE1,
INCOME2, X_BMI5CAT, SMOKGRP, MARGRP, HLTHPLN1))
# Build the Decision Tree Model
dtree <- rpart(SLEPTIM2~., data=train.data, method = "anova", control=rpart.control(minsplit=5,cp=0.004))
###############################################################################################
###############################################
# Random Forest #
###############################################
# Will tune for final
#Fitting Random Forest model
set.seed(666)
traindata<-brfss3[sample(1:nrow(brfss3),round(0.8*nrow(brfss3))),]
testdata<-brfss3[-sample(1:nrow(brfss3),round(0.8*nrow(brfss3))),]
treeRF1 <- randomForest(traindata$SLEPTIM2 ~., traindata, ntree=500)
treeRF1
# Call:
# randomForest(formula = traindata$SLEPTIM2 ~ ., data = traindata, ntree = 500)
# Type of random forest: regression
# Number of trees: 5
# No. of variables tried at each split: 13
#
# Mean of squared residuals: 3.000807
# % Var explained: -39.59
varImp(treeRF2)
#Predict Output
predictedRF <- predict(treeRF1,testdata)
# Checking classification accuracy
acctest <- table(predictedRF, testdata$SLEPTIM2)
# AUC Curve
treeRF_roc<- multiclass.roc(testdata$SLEPTIM2, as.numeric(predictedRF))
auc(treeRF_roc)
# Multi-class area under the curve: 0.8827
##TUNING REQUIRED
####
### Tuning ###
# Tuning parameters:
# number of trees
# number of variables tried at each split ("mtry")
| /6110BasicPjCode2share.R | no_license | Monisha-Rajendran/BRFSS | R | false | false | 11,126 | r |
#
# Data source:
# Purpose: Exploratory Analysis of data
# April 20, 2020
# ---------------------------------------------------------------------------------------------------------------
library(skimr)
library(readr)
library(dataPreparation)
library(tidyverse)
library(dplyr)
library(corrplot)
library(caret)
library(ggplot2)
library(leaps)
library(rpart)
library(tree)
library(glmnet)
library(car)
library(MASS)
# Import Data
library(readr)
brfss1 <- read_csv("data/analytic.csv", col_types = cols(SEX = col_integer(),
X_AGE_G = col_integer(), X_BMI5CAT = col_integer()))
###############################################
# Exploratory Data Analysis #
###############################################
# Check dimension of data
dim(brfss1)
# Summary of data
skim(brfss1)
# ---------------------------------------------------------------------------------------------------------------
# Replace/Remove NAs & missing values
#Replace NA with zero
# SMOKEDAY2 - field blank if respondent does not smoke? Replace with zero
brfss1 <- brfss1 %>%
mutate(SMOKDAY2=ifelse(is.na(SMOKDAY2), 0, SMOKDAY2))
#Replace NA with mean value
brfss1 <- brfss1 %>%
mutate(X_BMI5CAT=ifelse(is.na(X_BMI5CAT), 3, X_BMI5CAT))
# Remove NAs from : EXERANY2v & X_MRACE1
brfss <- na.omit(brfss1)
#check
dim(brfss)
any(is.na.data.frame(brfss))
hist(brfss$VETERAN3)
#all respondants are or have served in military per study inclusion criteria.
#Remove VETERAN3 & REDUDANT SLEPTIM1
brfss <- subset(brfss, select = -c(VETERAN3, SLEPTIM1))
table(brfss$EDGROUP)
# Removed Non-Responders
brfss <- brfss[brfss$EDGROUP != 9,]
table(brfss$SMOKGRP)
#only 290 Non-responders. Removed from dataset
brfss <- brfss[brfss$SMOKGRP != 9,]
#convert categorical data to factors to properly graph
brfss3 <- brfss %>%
mutate(ALCGRP = factor(ALCGRP, levels = c(3, 2, 9, 1),labels = c('Weekly', 'Monthly', "Unknown", "None")),
X_AGE_G = factor(X_AGE_G, levels = c(1, 2, 3, 4, 5, 6), labels = c('18-24', '25-34', '35-44', '45-54', '55-64', '65+')),
ASTHMA4 = factor(ASTHMA4, levels = c(1,0), labels = c("Yes", "No")),
RACEGRP = factor(RACEGRP, levels = c(1, 2, 3, 4, 5, 6, 9), labels = c('White', 'AA', 'Native Am', 'Asian', 'Pacific Islander', 'Other/Multiracial', 'Unknown')),
MARITAL = factor(MARITAL, levels = c(1, 2, 3, 4, 5, 9), labels = c('Married', 'Divorced', 'Widowed', 'Never Married', 'Partner', 'Unknown')),
GENHLTH2 = factor(GENHLTH2, levels = c(1, 2, 3, 4, 5, 9), labels = c('Excellent', 'Very Good', 'Good', 'Fair', 'Poor', 'Unknown')),
HLTHPLN2 = factor(HLTHPLN2, levels = c(1, 2, 9), labels = c('Yes', 'No', 'Unknown')),
EDGROUP = factor(EDGROUP, levels = c(1, 2, 3, 4, 9), labels = c('Some High School', 'High School', 'Some college', 'College Graduate', 'Unknown')),
INCOME3 = factor(INCOME3, levels = c(1, 2, 3, 4, 5, 6, 7, 8, 9), labels = c('< $10K', '10-15', '15-20', '20-25', '25-35', '35-50', '50-75', '$75K +', 'Unknown')),
BMICAT = factor(BMICAT, levels = c(1, 2, 3, 4, 9), labels = c('Underweight', 'Normal', 'Overweight', 'Obese', 'Unknown')),
SMOKEDAY2 = factor(SMOKDAY2, levels = c(1,2, 3, 9), labels = c('Every Day', 'Some Days', 'Not at all', 'Unknown')),
EXERANY3 = factor(EXERANY3, levels = c(1, 2, 9), labels = c('Yes', 'No', 'Unknown')))
skim(brfss3)
# ---------------------------------------------------------------------------------------------------------------
#Plots
#Sleep Distribution
# Barplot of Sleep Distribution
bar_sleep <- ggplot(data=brfss) +
geom_bar(mapping=aes(x=SLEPTIM2, fill=ALCDAY5),
show.legend = TRUE,
width=0.6) +
xlim(-1, 20) +
theme_minimal() +
labs(x = "Hours of sleep", y = "Count", title="Hours Slept Distribution")
bar_sleep
boxplot(brfss$SLEPTIM2, main="Box Plot of SLEPTIM2",
xlab="All Respondants", ylab="Hours Slept")
# General Sleep distribution of all veteran respondants
boxplot(SLEPTIM2~ALCGRP, data=brfss3, main="Box Plot of SLEPTIM2 by ALCGRP",
xlab="Alcohol consumption in last 30 days", ylab="Hours Slept")
#SleepHrs distribution - Alcohol Group Comparison
boxplot(SLEPTIM2~BMICAT, data=brfss3, main="Box Plot of Sleep by BMI Group",
xlab="BMI Category", ylab="Hours Slept")
#SleepHrs distribution - BMI Group Comparison
boxplot(SLEPTIM2~EDGROUP, data=brfss3, main="Box Plot of Sleep by EDU Group",
xlab="Highest Education Completed", ylab="Hours Slept")
#SleepHrs distribution - Highest Education Group Comparison
boxplot(SLEPTIM2~SMOKGRP, data=brfss, main="Box Plot of Sleep by Smoker Freq Group",
xlab="Current Smoking Freq", ylab="Hours Slept")
#SleepHrs distribution - Smoker Group Comparison
boxplot(SLEPTIM2~SMOKDAY2, data=brfss, main="Box Plot of SLEPTIM2 by Smoker Freq Group",
xlab="Current Smoking Freq", ylab="Hours Slept")
#SleepHrs distribution - Smoker Group Comparison
#BMI Distribution
bar_bmi2 <- ggplot(data=brfss3) +
geom_bar(mapping=aes(x=forcats::fct_rev(fct_infreq(BMICAT)), fill=BMICAT),
show.legend = FALSE,
width=0.6, alpha=0.8) +
theme(aspect.ratio=1) +
theme_minimal() +
labs(x = "BMI", y = "Count", title="BMI Distribution")
bar_bmi2
#SleepHrs distribution - BMI Group Comparison
boxplot(SLEPTIM2~BMICAT, data=brfss3, main="Box Plot of Sleep by BMI Group",
xlab="Reported BMI", ylab="Hours Slept")
# Plot Correlation Matrix with Correlation Coefficients
numcor2 <- cor(brfss1)
corrplot(numcor2, method = "color",type = "upper", tl.col = "black", tl.cex = 0.5)
###############################################################################################
#########################################
# Regularization #
#########################################
#use L2 for feature selection - Rework for final
#########################################
# Linear Regression #
#########################################
#split data into training and testing dataset
brfss2 <- subset(brfss3, select = -c(ALCGRP, X_AGE_G, ASTHMA4, RACEGRP, MARITAL, GENHLTH2, HLTHPLN2, EDGROUP, INCOME3, BMICAT, EXERANY3))
set.seed(123)
row.number <- sample(x=1:nrow(brfss2), size=0.8*nrow(brfss2))
train.data <- brfss2[row.number,]
test.data <- brfss2[-row.number,]
dim(train.data)
dim(test.data)
prop.table(table(train.data$ALCDAY5))
linearmodel <- lm(SLEPTIM2~., data=train.data)
lm_summary <- summary(linearmodel)
lm_summary
lm_summary$r.squared
# Plots for Residual Analysis
#and linear Regression assumptions check
par(mfrow=c(2,2))
plot(linearmodel)
qqnorm(linearmodel$residuals); qqline(linearmodel$residuals)
#Monisha's code based on her screenshots; replaced SLEPTIM1 with SLEPTIM2)
mlinearmodel <- glm(ALCDAY5 ~ SLEPTIM2 + X_AGE_G +
SMOKDAY2 + SEX + X_MRACE1 + GENHLTH + INCOME2 + X_BMI5CAT +
ALCGRP + DRKMONTHLY + AGE2 + AGE3 + AGE4 + AGE5 + SMOKGRP + HISPANIC +
RACEGRP + BLACK + ASIAN + OTHRACE + FORMERMAR + GENHLTH2 + FAIRHLTH +
POORHLTH + LOWED + SOMECOLL + INCOME3 + INC1 + INC2 + INC3 + INC4 +
INC5 + INC6 + OVWT + EXERANY3, data = brfss)
stepAICm <- stepAIC(mlinearmodel, direction = 'backward')
stepAICm$anova
# Final Model:
ALCDAY5 ~ SLEPTIM2 + X_AGE_G + SMOKDAY2 + SEX + X_MRACE1 + GENHLTH +
INCOME2 + X_BMI5CAT + ALCGRP + DRKMONTHLY + AGE2 + AGE3 +
AGE4 + AGE5 + SMOKGRP + HISPANIC + RACEGRP + BLACK + ASIAN +
OTHRACE + FORMERMAR + GENHLTH2 + POORHLTH + LOWED + SOMECOLL +
INCOME3 + INC1 + INC2 + INC3 + INC4 + INC5 + INC6 + OVWT
m2linearmodel <- glm(ALCDAY5 ~ SLEPTIM2 + X_AGE_G + SMOKDAY2 + SEX + X_MRACE1 + GENHLTH +
INCOME2 + X_BMI5CAT + ALCGRP + DRKMONTHLY + AGE2 + AGE3 +
AGE4 + AGE5 + SMOKGRP + HISPANIC + RACEGRP + BLACK + ASIAN +
OTHRACE + FORMERMAR + GENHLTH2 + POORHLTH + LOWED + SOMECOLL +
INCOME3 + INC1 + INC2 + INC3 + INC4 + INC5 + INC6 + OVWT, data = brfss)
lm_summary <- summary(m2linearmodel)
lm_summary
# Plots for Residual Analysis
par(mfrow=c(2,2))
plot(m2linearmodel)
qqnorm(m2linearmodel$residuals); qqline(m2linearmodel$residuals)
###############################################################################################
# ASTHMA
set.seed(456)
row.number <- sample(x=1:nrow(brfss3), size=0.8*nrow(brfss3))
trainfactors <- brfss3[row.number,]
testfactors <- brfss3[-row.number,]
glmmodel <- glm(ASTHMA4~., family = binomial(link = 'logit'), data = trainfactors)
summary(glmmodel)
anova(glmmodel, test="Chisq")
# Df Deviance Resid. Df Resid. Dev Pr(>Chi)
#
# ALCDAY5 1 44.8 45865 28099 2.16e-11 ***
# ASTHMA3 1 28099.0 45864 0 < 2.2e-16 ***
#prediction
prefactors <-as.numeric(predict(glmmodel,newdata = testfactors,type = "response")>0.5)
obs_p_lr = data.frame(prob=prefactors,obs=testfactors$ASTHMA4)
#ROC curve
par(mfrow=c(1,1))
lr_roc <- roc(testfactors$ASTHMA4,prefactors)
plot(lr_roc, print.auc=TRUE, auc.polygon=TRUE, grid=c(0.1, 0.2),grid.col=c("green", "red"),
max.auc.polygon=TRUE,auc.polygon.col="skyblue", print.thres=TRUE,main='ROC Curve ')
#########################################
# Decision Tree #
#########################################
#will fix later!!
#use brfss4
###############################################
# Create a subset to filter variables for Decision Tree
brfss4 <- subset(brfss2, select = -c(ASTHMA3, SMOKE100, SMOKDAY2, X_MRACE1,
INCOME2, X_BMI5CAT, SMOKGRP, MARGRP, HLTHPLN1))
# Build the Decision Tree Model
dtree <- rpart(SLEPTIM2~., data=train.data, method = "anova", control=rpart.control(minsplit=5,cp=0.004))
###############################################################################################
###############################################
# Random Forest #
###############################################
# Will tune for final
#Fitting Random Forest model
set.seed(666)
traindata<-brfss3[sample(1:nrow(brfss3),round(0.8*nrow(brfss3))),]
testdata<-brfss3[-sample(1:nrow(brfss3),round(0.8*nrow(brfss3))),]
treeRF1 <- randomForest(traindata$SLEPTIM2 ~., traindata, ntree=500)
treeRF1
# Call:
# randomForest(formula = traindata$SLEPTIM2 ~ ., data = traindata, ntree = 500)
# Type of random forest: regression
# Number of trees: 5
# No. of variables tried at each split: 13
#
# Mean of squared residuals: 3.000807
# % Var explained: -39.59
varImp(treeRF2)
#Predict Output
predictedRF <- predict(treeRF1,testdata)
# Checking classification accuracy
acctest <- table(predictedRF, testdata$SLEPTIM2)
# AUC Curve
treeRF_roc<- multiclass.roc(testdata$SLEPTIM2, as.numeric(predictedRF))
auc(treeRF_roc)
# Multi-class area under the curve: 0.8827
##TUNING REQUIRED
####
### Tuning ###
# Tuning parameters:
# number of trees
# number of variables tried at each split ("mtry")
|
## Generate orderly.yml for collate_model_outputs
x <- list(
script = "collate_combined_rt.R",
artefacts = list(
data = list(
description = "Collated combined rt estimates (quantiles)",
filenames = c("combined_rt_qntls.rds",
"weekly_iqr.rds",
"combined_weighted_estimates_across_countries.rds",
"combined_weighted_estimates_per_country.rds")
)
),
sources = c("R/utils.R"),
packages = c("dplyr", "tidyr", "ggdist", "purrr", "ggplot2")
)
weeks_needed <- seq(
from = week_starting, to = week_ending, by = "7 days"
)
use_si <- "si_2"
dependancies <- purrr::map(
weeks,
function(week) {
query <- glue::glue(
"latest(parameter:week_ending == \"{week}\" ",
" && parameter:use_si == \"{use_si}\")"
)
y <- list(
produce_combined_rt = list(
id = query,
use = list(
"combined_rt_estimates.rds",
"weekly_iqr.rds",
"combined_weighted_estimates_per_country.rds",
"combined_weighted_estimates_across_countries.rds"
)
)
)
infiles <- purrr::map(
y$produce_combined_rt$use,
function(x) strsplit(x, split = ".", fixed = TRUE)[[1]][1]
)
names(y$produce_combined_rt$use) <- glue::glue("{infiles}_{week}.rds")
y
}
)
x$depends <- dependancies
con <- file(here::here("src/collate_combined_rt/orderly.yml"), "w")
yaml::write_yaml(x, con)
close(con)
| /orderly-helper-scripts/dependencies_collate_combined_rt.R | no_license | mrc-ide/covid19-forecasts-orderly | R | false | false | 1,449 | r | ## Generate orderly.yml for collate_model_outputs
x <- list(
script = "collate_combined_rt.R",
artefacts = list(
data = list(
description = "Collated combined rt estimates (quantiles)",
filenames = c("combined_rt_qntls.rds",
"weekly_iqr.rds",
"combined_weighted_estimates_across_countries.rds",
"combined_weighted_estimates_per_country.rds")
)
),
sources = c("R/utils.R"),
packages = c("dplyr", "tidyr", "ggdist", "purrr", "ggplot2")
)
weeks_needed <- seq(
from = week_starting, to = week_ending, by = "7 days"
)
use_si <- "si_2"
dependancies <- purrr::map(
weeks,
function(week) {
query <- glue::glue(
"latest(parameter:week_ending == \"{week}\" ",
" && parameter:use_si == \"{use_si}\")"
)
y <- list(
produce_combined_rt = list(
id = query,
use = list(
"combined_rt_estimates.rds",
"weekly_iqr.rds",
"combined_weighted_estimates_per_country.rds",
"combined_weighted_estimates_across_countries.rds"
)
)
)
infiles <- purrr::map(
y$produce_combined_rt$use,
function(x) strsplit(x, split = ".", fixed = TRUE)[[1]][1]
)
names(y$produce_combined_rt$use) <- glue::glue("{infiles}_{week}.rds")
y
}
)
x$depends <- dependancies
con <- file(here::here("src/collate_combined_rt/orderly.yml"), "w")
yaml::write_yaml(x, con)
close(con)
|
\name{cluster}
\alias{cluster}
\title{Cluster sampling}
\description{Cluster sampling with equal/unequal probabilities.}
\usage{cluster(data, clustername, size, method=c("srswor","srswr","poisson",
"systematic"),pik,description=FALSE)}
\arguments{
\item{data}{data frame or data matrix; its number of rows is N, the population size.}
\item{clustername}{the name of the clustering variable.}
\item{size}{sample size.}
\item{method}{method to select clusters; the following methods are implemented: simple random
sampling without replacement (srswor), simple random sampling with replacement (srswr),
Poisson sampling (poisson), systematic sampling (systematic); if the method is not specified,
by default the method is "srswor".}
\item{pik}{vector of inclusion probabilities or auxiliary information used to compute them;
this argument is only used for unequal probability sampling (Poisson, systematic). If an
auxiliary information is provided, the function uses the \link{inclusionprobabilities} function for
computing these probabilities.}
\item{description}{a message is printed if its value is TRUE; the message gives the number
of selected clusters, the number of units in the population and the number of selected units.
By default, the value is FALSE.}
}
\value{
The function returns a data set with the following information: the selected clusters, the identifier of the units in the selected clusters,
the final inclusion probabilities for these units (they are equal for the units included in the same cluster). If method is "srswr", the number of replicates is also given.
}
\seealso{
\code{\link{mstage}}, \code{\link{strata}}, \code{\link{getdata}}}
\examples{
############
## Example 1
############
# Uses the swissmunicipalities data to draw a sample of clusters
data(swissmunicipalities)
# the variable 'REG' has 7 categories in the population
# it is used as clustering variable
# the sample size is 3; the method is simple random sampling without replacement
cl=cluster(swissmunicipalities,clustername=c("REG"),size=3,method="srswor")
# extracts the observed data
# the order of the columns is different from the order in the initial database
getdata(swissmunicipalities, cl)
############
## Example 2
############
# the same data as in Example 1
# the sample size is 3; the method is systematic sampling
# the pik vector is randomly generated using the U(0,1) distribution
cl_sys=cluster(swissmunicipalities,clustername=c("REG"),size=3,method="systematic",
pik=runif(7))
# extracts the observed data
getdata(swissmunicipalities,cl_sys)
}
\keyword{survey}
| /man/cluster.Rd | no_license | cran/sampling | R | false | false | 2,635 | rd | \name{cluster}
\alias{cluster}
\title{Cluster sampling}
\description{Cluster sampling with equal/unequal probabilities.}
\usage{cluster(data, clustername, size, method=c("srswor","srswr","poisson",
"systematic"),pik,description=FALSE)}
\arguments{
\item{data}{data frame or data matrix; its number of rows is N, the population size.}
\item{clustername}{the name of the clustering variable.}
\item{size}{sample size.}
\item{method}{method to select clusters; the following methods are implemented: simple random
sampling without replacement (srswor), simple random sampling with replacement (srswr),
Poisson sampling (poisson), systematic sampling (systematic); if the method is not specified,
by default the method is "srswor".}
\item{pik}{vector of inclusion probabilities or auxiliary information used to compute them;
this argument is only used for unequal probability sampling (Poisson, systematic). If an
auxiliary information is provided, the function uses the \link{inclusionprobabilities} function for
computing these probabilities.}
\item{description}{a message is printed if its value is TRUE; the message gives the number
of selected clusters, the number of units in the population and the number of selected units.
By default, the value is FALSE.}
}
\value{
The function returns a data set with the following information: the selected clusters, the identifier of the units in the selected clusters,
the final inclusion probabilities for these units (they are equal for the units included in the same cluster). If method is "srswr", the number of replicates is also given.
}
\seealso{
\code{\link{mstage}}, \code{\link{strata}}, \code{\link{getdata}}}
\examples{
############
## Example 1
############
# Uses the swissmunicipalities data to draw a sample of clusters
data(swissmunicipalities)
# the variable 'REG' has 7 categories in the population
# it is used as clustering variable
# the sample size is 3; the method is simple random sampling without replacement
cl=cluster(swissmunicipalities,clustername=c("REG"),size=3,method="srswor")
# extracts the observed data
# the order of the columns is different from the order in the initial database
getdata(swissmunicipalities, cl)
############
## Example 2
############
# the same data as in Example 1
# the sample size is 3; the method is systematic sampling
# the pik vector is randomly generated using the U(0,1) distribution
cl_sys=cluster(swissmunicipalities,clustername=c("REG"),size=3,method="systematic",
pik=runif(7))
# extracts the observed data
getdata(swissmunicipalities,cl_sys)
}
\keyword{survey}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tapply.stat.r
\name{tapply.stat}
\alias{tapply.stat}
\title{Statistics of data grouped by factors}
\usage{
tapply.stat(y, x, stat = "mean")
}
\arguments{
\item{y}{Data.frame variables.}
\item{x}{Data.frame factors.}
\item{stat}{Method.}
}
\value{
y Numeric
x Numeric
stat method = "mean", ...
}
\description{
\code{tapply.stat} This process lies in finding statistics
which consist of more than one variable, grouped or crossed
by factors. The table must be organized by columns between
variables and factors.
}
\author{
Eric B Ferreira,
\email{eric.ferreira@unifal-mg.edu.br}
Denismar Alves Nogueira
Portya Piscitelli Cavalcanti
(Adapted from Felipe de Mendiburu - GPL)
}
| /man/tapply.stat.Rd | no_license | denisnog/ExpDes.pt | R | false | true | 756 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tapply.stat.r
\name{tapply.stat}
\alias{tapply.stat}
\title{Statistics of data grouped by factors}
\usage{
tapply.stat(y, x, stat = "mean")
}
\arguments{
\item{y}{Data.frame variables.}
\item{x}{Data.frame factors.}
\item{stat}{Method.}
}
\value{
y Numeric
x Numeric
stat method = "mean", ...
}
\description{
\code{tapply.stat} This process lies in finding statistics
which consist of more than one variable, grouped or crossed
by factors. The table must be organized by columns between
variables and factors.
}
\author{
Eric B Ferreira,
\email{eric.ferreira@unifal-mg.edu.br}
Denismar Alves Nogueira
Portya Piscitelli Cavalcanti
(Adapted from Felipe de Mendiburu - GPL)
}
|
\name{moderate}
\alias{moderate}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Calculate and plot the direct effect of the selected exposure variable at each level of the moderator.
}
\description{
Calculate and plot the direct effect of the selected exposure variable at each level of the moderator.
}
\usage{
moderate(med1,vari,j=1,kx=1,continuous.resolution=100,plot=TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{med1}{
The med object from the \link[=med]{med} function.
}
\item{vari}{
The name of the moderator.
}
\item{j}{
The jth response if the response is multiple.
}
\item{kx}{
The moderate effect is with the kx-th predictor(s).
}
\item{continuous.resolution}{
The number of equally space points at which to evaluate continuous predictors.
}
\item{plot}{
Plot the direct effect at each level of the moderator if ture.
}
}
\value{
The \link[=moderate]{moderate} returns a list where the item result is a data frame with two or three elements
\item{moderator }{the moderator levels.}
\item{x }{the level of the exposure variable -- available only for continuous exposure and moderate with nonlinear method.}
\item{de }{the direct effect at the corresonding moderator (and exposure) level(s).}
}
\details{
Calculate and plot the direct effect of the selected exposure variable at each level of the moderator base on the result from the \link[=med]{med} function.
}
\author{
Qingzhao Yu \email{qyu@lsuhsc.edu}
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{"\link[=form.interaction]{form.interaction}"},
\code{"\link[=test.moderation]{test.moderation}"}
}
\examples{
\donttest{
#nonlinear model
data("weight_behavior")
x=weight_behavior[,c(2,4:14)]
pred=weight_behavior[,3]
y=weight_behavior[,15]
data.bin<-data.org(x,y,pred=pred,contmed=c(7:9,11:12),binmed=c(6,10),
binref=c(1,1),catmed=5,catref=1,predref="M",alpha=0.4,alpha2=0.4)
temp2<-med(data=data.bin,n=2,nonlinear=TRUE)
result1=moderate(temp2,vari="race")
result2=moderate(temp2,vari="age")
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ Plot }
| /man/moderate.Rd | no_license | cran/mma | R | false | false | 2,285 | rd | \name{moderate}
\alias{moderate}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Calculate and plot the direct effect of the selected exposure variable at each level of the moderator.
}
\description{
Calculate and plot the direct effect of the selected exposure variable at each level of the moderator.
}
\usage{
moderate(med1,vari,j=1,kx=1,continuous.resolution=100,plot=TRUE)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{med1}{
The med object from the \link[=med]{med} function.
}
\item{vari}{
The name of the moderator.
}
\item{j}{
The jth response if the response is multiple.
}
\item{kx}{
The moderate effect is with the kx-th predictor(s).
}
\item{continuous.resolution}{
The number of equally space points at which to evaluate continuous predictors.
}
\item{plot}{
Plot the direct effect at each level of the moderator if ture.
}
}
\value{
The \link[=moderate]{moderate} returns a list where the item result is a data frame with two or three elements
\item{moderator }{the moderator levels.}
\item{x }{the level of the exposure variable -- available only for continuous exposure and moderate with nonlinear method.}
\item{de }{the direct effect at the corresonding moderator (and exposure) level(s).}
}
\details{
Calculate and plot the direct effect of the selected exposure variable at each level of the moderator base on the result from the \link[=med]{med} function.
}
\author{
Qingzhao Yu \email{qyu@lsuhsc.edu}
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
\code{"\link[=form.interaction]{form.interaction}"},
\code{"\link[=test.moderation]{test.moderation}"}
}
\examples{
\donttest{
#nonlinear model
data("weight_behavior")
x=weight_behavior[,c(2,4:14)]
pred=weight_behavior[,3]
y=weight_behavior[,15]
data.bin<-data.org(x,y,pred=pred,contmed=c(7:9,11:12),binmed=c(6,10),
binref=c(1,1),catmed=5,catref=1,predref="M",alpha=0.4,alpha2=0.4)
temp2<-med(data=data.bin,n=2,nonlinear=TRUE)
result1=moderate(temp2,vari="race")
result2=moderate(temp2,vari="age")
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
%\keyword{ Plot }
|
library(ElemStatLearn)
head(spam)
# Split dataset in training and testing
inx = sample(nrow(spam), round(nrow(spam) * 0.8))
train = spam[inx,]
test = spam[-inx,]
# Fit regression model
fit = glm(spam ~ ., data = train, family = binomial())
summary(fit)
# Call:
# glm(formula = spam ~ ., family = binomial(), data = train)
#
# Deviance Residuals:
# Min 1Q Median 3Q Max
# -4.5172 -0.2039 0.0000 0.1111 5.4944
# Coefficients:
# Estimate Std. Error z value Pr(>|z|)
# (Intercept) -1.511e+00 1.546e-01 -9.772 < 2e-16 ***
# A.1 -4.546e-01 2.560e-01 -1.776 0.075720 .
# A.2 -1.630e-01 7.731e-02 -2.108 0.035043 *
# A.3 1.487e-01 1.261e-01 1.179 0.238591
# A.4 2.055e+00 1.467e+00 1.401 0.161153
# A.5 6.165e-01 1.191e-01 5.177 2.25e-07 ***
# A.6 7.156e-01 2.768e-01 2.585 0.009747 **
# A.7 2.606e+00 3.917e-01 6.652 2.88e-11 ***
# A.8 6.750e-01 2.284e-01 2.955 0.003127 **
# A.9 1.197e+00 3.362e-01 3.559 0.000373 ***
# Signif. codes: 0 *** 0.001 ** 0.01 * 0.05 . 0.1 1
### Make predictions
preds = predict(fit, test, type = "response")
preds = ifelse(preds > 0.5, 1, 0)
tbl = table(target = test$spam, preds)
tbl
# preds
# target 0 1
# email 535 23
# spam 46 316
sum(diag(tbl)) / sum(tbl)
# 0.925 | /Logistic Regression.R | no_license | mcvenkat/R-Programs | R | false | false | 1,461 | r | library(ElemStatLearn)
head(spam)
# Split dataset in training and testing
inx = sample(nrow(spam), round(nrow(spam) * 0.8))
train = spam[inx,]
test = spam[-inx,]
# Fit regression model
fit = glm(spam ~ ., data = train, family = binomial())
summary(fit)
# Call:
# glm(formula = spam ~ ., family = binomial(), data = train)
#
# Deviance Residuals:
# Min 1Q Median 3Q Max
# -4.5172 -0.2039 0.0000 0.1111 5.4944
# Coefficients:
# Estimate Std. Error z value Pr(>|z|)
# (Intercept) -1.511e+00 1.546e-01 -9.772 < 2e-16 ***
# A.1 -4.546e-01 2.560e-01 -1.776 0.075720 .
# A.2 -1.630e-01 7.731e-02 -2.108 0.035043 *
# A.3 1.487e-01 1.261e-01 1.179 0.238591
# A.4 2.055e+00 1.467e+00 1.401 0.161153
# A.5 6.165e-01 1.191e-01 5.177 2.25e-07 ***
# A.6 7.156e-01 2.768e-01 2.585 0.009747 **
# A.7 2.606e+00 3.917e-01 6.652 2.88e-11 ***
# A.8 6.750e-01 2.284e-01 2.955 0.003127 **
# A.9 1.197e+00 3.362e-01 3.559 0.000373 ***
# Signif. codes: 0 *** 0.001 ** 0.01 * 0.05 . 0.1 1
### Make predictions
preds = predict(fit, test, type = "response")
preds = ifelse(preds > 0.5, 1, 0)
tbl = table(target = test$spam, preds)
tbl
# preds
# target 0 1
# email 535 23
# spam 46 316
sum(diag(tbl)) / sum(tbl)
# 0.925 |
library(dplyr)
#Download the file
data<-"Week3_project.zip"
if(!file.exists("data")){
fileurl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileurl, data)
}
#Unzip the file
unzip(data)
#Reading data
features <- read.table("./UCI HAR Dataset/features.txt")
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
#test
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
x_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
#train
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
x_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
# 1. Merges the training and the test sets to create one data set.
subject_merged<-rbind(subject_train,subject_test)
x_merged<-rbind( x_train, x_test)
y_merged<-rbind(y_train, y_test)
merged_data<-cbind(subject_merged,x_merged,y_merged)
colnames(merged_data)<-c("subject", features[, 2], "activity")
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
col_names_select<-grepl("subject|activity|mean|std", colnames(merged_data))
tidy_data<-merged_data[,col_names_select]
# 3. Uses descriptive activity names to name the activities in the data set
tidy_data$activity <- activities[tidy_data$activity, 2]
# 4. Appropriately labels the data set with descriptive variable names.
names(tidy_data)<-gsub("^t", "time", names(tidy_data))
names(tidy_data)<-gsub("Acc", "Accelerometer", names(tidy_data))
names(tidy_data)<-gsub("Gyro", "Gyroscope", names(tidy_data))
names(tidy_data)<-gsub("^f", "Frequency", names(tidy_data))
names(tidy_data)<-gsub("Mag", "Magnitude", names(tidy_data))
names(tidy_data)<-gsub("BodyBody", "Body", names(tidy_data))
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
Final_tidy_data <- aggregate( . ~ subject + activity, tidy_data, mean )
Final_tidy_data <- Final_tidy_data[order(Final_tidy_data$subject,Final_tidy_data$activity),]
write.table(Final_tidy_data, "Tidy_data.txt", row.name=FALSE)
| /run_analysis.R | no_license | KotovaElena/getting_cleaning_project | R | false | false | 2,261 | r | library(dplyr)
#Download the file
data<-"Week3_project.zip"
if(!file.exists("data")){
fileurl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileurl, data)
}
#Unzip the file
unzip(data)
#Reading data
features <- read.table("./UCI HAR Dataset/features.txt")
activity_labels <- read.table("./UCI HAR Dataset/activity_labels.txt")
#test
subject_test <- read.table("./UCI HAR Dataset/test/subject_test.txt")
x_test <- read.table("./UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./UCI HAR Dataset/test/y_test.txt")
#train
subject_train <- read.table("./UCI HAR Dataset/train/subject_train.txt")
x_train <- read.table("./UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./UCI HAR Dataset/train/y_train.txt")
# 1. Merges the training and the test sets to create one data set.
subject_merged<-rbind(subject_train,subject_test)
x_merged<-rbind( x_train, x_test)
y_merged<-rbind(y_train, y_test)
merged_data<-cbind(subject_merged,x_merged,y_merged)
colnames(merged_data)<-c("subject", features[, 2], "activity")
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
col_names_select<-grepl("subject|activity|mean|std", colnames(merged_data))
tidy_data<-merged_data[,col_names_select]
# 3. Uses descriptive activity names to name the activities in the data set
tidy_data$activity <- activities[tidy_data$activity, 2]
# 4. Appropriately labels the data set with descriptive variable names.
names(tidy_data)<-gsub("^t", "time", names(tidy_data))
names(tidy_data)<-gsub("Acc", "Accelerometer", names(tidy_data))
names(tidy_data)<-gsub("Gyro", "Gyroscope", names(tidy_data))
names(tidy_data)<-gsub("^f", "Frequency", names(tidy_data))
names(tidy_data)<-gsub("Mag", "Magnitude", names(tidy_data))
names(tidy_data)<-gsub("BodyBody", "Body", names(tidy_data))
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
Final_tidy_data <- aggregate( . ~ subject + activity, tidy_data, mean )
Final_tidy_data <- Final_tidy_data[order(Final_tidy_data$subject,Final_tidy_data$activity),]
write.table(Final_tidy_data, "Tidy_data.txt", row.name=FALSE)
|
## Put comments here that give an overall description of what your
## functions do
## This function creates a list representing the "special matrix" object that can cache its inverse
## We are representing the item as a list with 4 functions:
## setMatrix: will store the original matrix in x variable (using that name due to exercise constraint)
## getMatrix: will return the original matrix stored in x variable (using that name due to exercise constraint)
## getInverseMatrix: will return the inverse in case it was calculated before (inverseMatrix variable). It will return NULL if not
## setInverseMatrix: will store the calculated inverse matrix into the inverseMatrix variable
makeCacheMatrix <- function(x = matrix()) {
inverseMatrix <- NULL
setMatrix <- function(y) {
x <<- y
inverseMatrix <<- NULL
}
getMatrix <- function() x
setInverseMatrix <- function(inverseMatrix) inverseMatrix <<- inverseMatrix
getInverseMatrix <- function() inverseMatrix
list(setMatrix = setMatrix,
getMatrix = getMatrix,
setInverseMatrix = setInverseMatrix,
getInverseMatrix = getInverseMatrix
)
}
## This function tries to get the inverse matrix from cache. If not
## it will calculate, store the new value in x variable and return the value.
## Lines 35 to 39 are the ones that get the cached value. If the value was calculated before, it returns it
## Rest of the code get the original matrix, calculates the inverse
## stores it in the x variable using the method provided and returns the value in line 45.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
cachedInverseMatrix <- x$getInverseMatrix()
if (!is.null(cachedInverseMatrix)) {
message("Returning Cached Inverse Matrix")
return(cachedInverseMatrix)
}
originalMatrix <- x$getMatrix()
calculatedInverseMatrix <- solve(originalMatrix)
x$setInverseMatrix(calculatedInverseMatrix)
calculatedInverseMatrix
}
| /cachematrix.R | no_license | rhaunter/ProgrammingAssignment2 | R | false | false | 1,979 | r | ## Put comments here that give an overall description of what your
## functions do
## This function creates a list representing the "special matrix" object that can cache its inverse
## We are representing the item as a list with 4 functions:
## setMatrix: will store the original matrix in x variable (using that name due to exercise constraint)
## getMatrix: will return the original matrix stored in x variable (using that name due to exercise constraint)
## getInverseMatrix: will return the inverse in case it was calculated before (inverseMatrix variable). It will return NULL if not
## setInverseMatrix: will store the calculated inverse matrix into the inverseMatrix variable
makeCacheMatrix <- function(x = matrix()) {
inverseMatrix <- NULL
setMatrix <- function(y) {
x <<- y
inverseMatrix <<- NULL
}
getMatrix <- function() x
setInverseMatrix <- function(inverseMatrix) inverseMatrix <<- inverseMatrix
getInverseMatrix <- function() inverseMatrix
list(setMatrix = setMatrix,
getMatrix = getMatrix,
setInverseMatrix = setInverseMatrix,
getInverseMatrix = getInverseMatrix
)
}
## This function tries to get the inverse matrix from cache. If not
## it will calculate, store the new value in x variable and return the value.
## Lines 35 to 39 are the ones that get the cached value. If the value was calculated before, it returns it
## Rest of the code get the original matrix, calculates the inverse
## stores it in the x variable using the method provided and returns the value in line 45.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
cachedInverseMatrix <- x$getInverseMatrix()
if (!is.null(cachedInverseMatrix)) {
message("Returning Cached Inverse Matrix")
return(cachedInverseMatrix)
}
originalMatrix <- x$getMatrix()
calculatedInverseMatrix <- solve(originalMatrix)
x$setInverseMatrix(calculatedInverseMatrix)
calculatedInverseMatrix
}
|
context("Single fieldbook analysis for AGROFIMS")
test_that("Create fieldbook with sub samples under CRD", {
fb_path <- rprojroot::find_testthat_root_file("dataset/test_crd_2.rds")
fb <- readRDS(fb_path)
#fb<-readRDS("/home/obenites/HIDAP_SB_1.0.0/fbanalysis/tests/testthat/dataset/test_crd_2.rds")
traits <- "1:Rice_Grain_Plant_density_plant_hill"
design <- "Completely Randomized Design (CRD)"
if(design=="Completely Randomized Design (CRD)"){
factors <- c("PLOT","ROW", "COL", "TREATMENT")
}
#Get traits from UI
gather_cols<- names(fb)[stringr::str_detect(string = names(fb), traits)]
if(length(gather_cols)>1){
#Columns to gather and Select columns
fb_sub <- fb[,c(factors , names(fb)[stringr::str_detect(names(fb),pattern = traits)] )]
#gather_cols <- names(fb_sub)[stringr::str_detect(string = names(fb_sub), traits)]
## Transpose data from previous data :fb_sub
fb_sub <- fb_sub %>% tidyr::gather_("SUBSAMPLE",traits, gather_cols)
fb_sub <- fb_sub %>% dplyr::mutate(SUBSAMPLE=gsub(".*__","",fb_sub$SUBSAMPLE))
fb_sub[,traits] <- as.numeric(fb_sub[,traits])
} else {
fb_sub <- fb[,c(factors, traits)]
fb_sub[,traits] <- as.numeric(fb_sub[,traits])
}
fb_sub
testthat::expect_equal(nrow(fb_sub),8)
testthat::expect_equal(names(fb_sub)[ncol(fb_sub)],"1:Rice_Grain_Plant_density_plant_hill")
testthat::expect_equal(unique(fb_sub$SUBSAMPLE),c("1","2"))
}) | /tests/testthat/test_creation_fbsubsample_crd.R | permissive | AGROFIMS/aganalysis | R | false | false | 1,480 | r | context("Single fieldbook analysis for AGROFIMS")
test_that("Create fieldbook with sub samples under CRD", {
fb_path <- rprojroot::find_testthat_root_file("dataset/test_crd_2.rds")
fb <- readRDS(fb_path)
#fb<-readRDS("/home/obenites/HIDAP_SB_1.0.0/fbanalysis/tests/testthat/dataset/test_crd_2.rds")
traits <- "1:Rice_Grain_Plant_density_plant_hill"
design <- "Completely Randomized Design (CRD)"
if(design=="Completely Randomized Design (CRD)"){
factors <- c("PLOT","ROW", "COL", "TREATMENT")
}
#Get traits from UI
gather_cols<- names(fb)[stringr::str_detect(string = names(fb), traits)]
if(length(gather_cols)>1){
#Columns to gather and Select columns
fb_sub <- fb[,c(factors , names(fb)[stringr::str_detect(names(fb),pattern = traits)] )]
#gather_cols <- names(fb_sub)[stringr::str_detect(string = names(fb_sub), traits)]
## Transpose data from previous data :fb_sub
fb_sub <- fb_sub %>% tidyr::gather_("SUBSAMPLE",traits, gather_cols)
fb_sub <- fb_sub %>% dplyr::mutate(SUBSAMPLE=gsub(".*__","",fb_sub$SUBSAMPLE))
fb_sub[,traits] <- as.numeric(fb_sub[,traits])
} else {
fb_sub <- fb[,c(factors, traits)]
fb_sub[,traits] <- as.numeric(fb_sub[,traits])
}
fb_sub
testthat::expect_equal(nrow(fb_sub),8)
testthat::expect_equal(names(fb_sub)[ncol(fb_sub)],"1:Rice_Grain_Plant_density_plant_hill")
testthat::expect_equal(unique(fb_sub$SUBSAMPLE),c("1","2"))
}) |
library(data.table)
data <- as.data.table(read.table(unz("exdata_data_household_power_consumption.zip", "household_power_consumption.txt"), header=T, sep=";", dec=".", na.strings = "?"))
data <- data[Date == "1/2/2007" | Date == "2/2/2007"]
png(file = "plot2.png", width = 480, height = 480, bg = "transparent")
plot(as.numeric(data$Global_active_power),
type = "s",
ylab = "Global Active Power (kilowatts)",
xlab = "",
xaxt="n"
)
width <- nrow(data)
axis(side=1,at=c(0,width/2,width),labels=c("Thu","Fri","Sat"))
dev.flush()
dev.off() | /plot2.R | no_license | Ellariel/r-data-plotting | R | false | false | 563 | r | library(data.table)
data <- as.data.table(read.table(unz("exdata_data_household_power_consumption.zip", "household_power_consumption.txt"), header=T, sep=";", dec=".", na.strings = "?"))
data <- data[Date == "1/2/2007" | Date == "2/2/2007"]
png(file = "plot2.png", width = 480, height = 480, bg = "transparent")
plot(as.numeric(data$Global_active_power),
type = "s",
ylab = "Global Active Power (kilowatts)",
xlab = "",
xaxt="n"
)
width <- nrow(data)
axis(side=1,at=c(0,width/2,width),labels=c("Thu","Fri","Sat"))
dev.flush()
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/splitDVDreads.R
\name{splitDVDreads}
\alias{splitDVDreads}
\title{Select DVD reads from a set of Nanopore reads and split the reads in 2 parts both containing the vector (DV and VD)}
\usage{
splitDVDreads(
ReadClass = NULL,
blastvec = NULL,
FastaFile = NULL,
WithGeneA = NULL,
WithGeneB = NULL,
MinDNAlength = 10000L
)
}
\arguments{
\item{ReadClass}{Either a tibble obtained with the \code{\link{AnnotateBACreads}} function
or a path to an rds file containing such a file}
\item{blastvec}{Either a table imported with \code{\link{readBlast}} or a path to a blast file
obtained by aligning th evectors on the reads and using \code{-outfmt 6}}
\item{FastaFile}{Either a \code{DNAStringSet} object containing the full read sequences
or a path to a fasta file containing these sequences}
\item{WithGeneA}{Logical. Should the VDV reads align with GeneA? Default is NULL, i.e. no filtering on GeneA alignment}
\item{WithGeneB}{Logical. Should the VDV reads align with GeneB? Default is NULL, i.e. no filtering on GeneB alignment}
\item{MinDNAlength}{Integer. Minimum length of the DNA fragment to keep the reads in the results}
}
\value{
A list with:
\itemize{
\item{ReadDefinition}{ a \code{DNAStringSet} with the split reads}
\item{ReadSequence}{ a \code{GRanges} object with the definition of the DV/VD reads}
}
Note that reads with alignment on the opposite strand of the vector ("-" strand)
are automatically reverse complemented
If no reads are selected, the function returns NULL and a warning.
}
\description{
The function does the following:
\itemize{
\item{Selects DVD reads}{This is done using \code{\link{FilterBACreads}}}
\item{Split the read sequence in DV and VD}{Based on vector alignment, split the read sequence in DV and VD}
\item{Filter based on size}{Keep only the split reads with a DNA fragment that is at least \code{MinDNAlength}bp long}
\item{reverse complement reads on minus strand}{strand is determined based on vector alignment}
\item{Return the split reads}{The split reads are returned as a DNAString object}
}
By default, if alignemnt to the host genome is provided in the \code{ReadClass} object (column \code{HostAlign}),
then the selected DVD reads are selected to not show any significant alignment to the host genome
}
\examples{
## For simplicity (and to limit file size) we only keep the data for 5 pre-selected DVD reads
## Path to file (.rds) created with the AnnotateBACreads function
pathRC <- system.file("extdata", "BAC02_ReadClass.rds", package = "NanoBAC")
RC <- readRDS(pathRC)
selectedReads <- c("BAC02R5572", "BAC02R21438", "BAC02R1152",
"BAC02R20794", "BAC02R6278" )
RC <- RC[RC$ReadName \%in\% selectedReads,]
## Path to a fasta file containing the sequence of the 5 DVD reads
pathFasta <- system.file("extdata", "BAC02_5DVDreads.fa", package = "NanoBAC")
## Path to the file containing the result from the Blast alignment of the vector on the reads
pathBlast <- system.file("extdata", "BAC02_BlastVector.res", package = "NanoBAC")
## Select DVD reads and split the reads
myDVDreads <- splitDVDreads(ReadClass = RC,
blastvec = pathBlast,
FastaFile = pathFasta,
WithGeneA = TRUE,
WithGeneB = TRUE,
MinDNAlength = 35000)
## Read sequences:
myDVDreads$ReadSequence
## Read definitions:
myDVDreads$ReadDefinition
}
\author{
Pascal GP Martin
}
| /man/splitDVDreads.Rd | permissive | pgpmartin/NanoBAC | R | false | true | 3,591 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/splitDVDreads.R
\name{splitDVDreads}
\alias{splitDVDreads}
\title{Select DVD reads from a set of Nanopore reads and split the reads in 2 parts both containing the vector (DV and VD)}
\usage{
splitDVDreads(
ReadClass = NULL,
blastvec = NULL,
FastaFile = NULL,
WithGeneA = NULL,
WithGeneB = NULL,
MinDNAlength = 10000L
)
}
\arguments{
\item{ReadClass}{Either a tibble obtained with the \code{\link{AnnotateBACreads}} function
or a path to an rds file containing such a file}
\item{blastvec}{Either a table imported with \code{\link{readBlast}} or a path to a blast file
obtained by aligning th evectors on the reads and using \code{-outfmt 6}}
\item{FastaFile}{Either a \code{DNAStringSet} object containing the full read sequences
or a path to a fasta file containing these sequences}
\item{WithGeneA}{Logical. Should the VDV reads align with GeneA? Default is NULL, i.e. no filtering on GeneA alignment}
\item{WithGeneB}{Logical. Should the VDV reads align with GeneB? Default is NULL, i.e. no filtering on GeneB alignment}
\item{MinDNAlength}{Integer. Minimum length of the DNA fragment to keep the reads in the results}
}
\value{
A list with:
\itemize{
\item{ReadDefinition}{ a \code{DNAStringSet} with the split reads}
\item{ReadSequence}{ a \code{GRanges} object with the definition of the DV/VD reads}
}
Note that reads with alignment on the opposite strand of the vector ("-" strand)
are automatically reverse complemented
If no reads are selected, the function returns NULL and a warning.
}
\description{
The function does the following:
\itemize{
\item{Selects DVD reads}{This is done using \code{\link{FilterBACreads}}}
\item{Split the read sequence in DV and VD}{Based on vector alignment, split the read sequence in DV and VD}
\item{Filter based on size}{Keep only the split reads with a DNA fragment that is at least \code{MinDNAlength}bp long}
\item{reverse complement reads on minus strand}{strand is determined based on vector alignment}
\item{Return the split reads}{The split reads are returned as a DNAString object}
}
By default, if alignemnt to the host genome is provided in the \code{ReadClass} object (column \code{HostAlign}),
then the selected DVD reads are selected to not show any significant alignment to the host genome
}
\examples{
## For simplicity (and to limit file size) we only keep the data for 5 pre-selected DVD reads
## Path to file (.rds) created with the AnnotateBACreads function
pathRC <- system.file("extdata", "BAC02_ReadClass.rds", package = "NanoBAC")
RC <- readRDS(pathRC)
selectedReads <- c("BAC02R5572", "BAC02R21438", "BAC02R1152",
"BAC02R20794", "BAC02R6278" )
RC <- RC[RC$ReadName \%in\% selectedReads,]
## Path to a fasta file containing the sequence of the 5 DVD reads
pathFasta <- system.file("extdata", "BAC02_5DVDreads.fa", package = "NanoBAC")
## Path to the file containing the result from the Blast alignment of the vector on the reads
pathBlast <- system.file("extdata", "BAC02_BlastVector.res", package = "NanoBAC")
## Select DVD reads and split the reads
myDVDreads <- splitDVDreads(ReadClass = RC,
blastvec = pathBlast,
FastaFile = pathFasta,
WithGeneA = TRUE,
WithGeneB = TRUE,
MinDNAlength = 35000)
## Read sequences:
myDVDreads$ReadSequence
## Read definitions:
myDVDreads$ReadDefinition
}
\author{
Pascal GP Martin
}
|
library(MCPMod)
### Name: powCalc
### Title: Calculate the power for the multiple contrast test
### Aliases: powCalc
### Keywords: design
### ** Examples
doses <- c(0,10,25,50,100,150)
models <- list(linear = NULL, emax = c(25),
logistic = c(50, 10.88111), exponential=c(85),
betaMod=matrix(c(0.33,2.31,1.39,1.39), byrow=TRUE, nrow=2))
# calculate optimal contrasts and critical value
plMM <- planMM(models, doses, 50, scal = 200, alpha = 0.05)
# calculate mean vectors
compMod <- fullMod(models, doses, base = 0, maxEff = 0.4, scal = 200)
muMat <- modelMeans(compMod, doses, FALSE, scal = 200)
# calculate power to detect mean vectors
# Power for linear model
powCalc(plMM$contMat, 50, mu = muMat[,1], sigma = 1, cVal = plMM$critVal)
# Power for emax model
powCalc(plMM$contMat, 50, mu = muMat[,2], sigma = 1, cVal = plMM$critVal)
# Power for logistic model
powCalc(plMM$contMat, 50, mu = muMat[,3], sigma = 1, cVal = plMM$critVal)
# compare with JBS 16, p. 650
| /data/genthat_extracted_code/MCPMod/examples/powCalc.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,007 | r | library(MCPMod)
### Name: powCalc
### Title: Calculate the power for the multiple contrast test
### Aliases: powCalc
### Keywords: design
### ** Examples
doses <- c(0,10,25,50,100,150)
models <- list(linear = NULL, emax = c(25),
logistic = c(50, 10.88111), exponential=c(85),
betaMod=matrix(c(0.33,2.31,1.39,1.39), byrow=TRUE, nrow=2))
# calculate optimal contrasts and critical value
plMM <- planMM(models, doses, 50, scal = 200, alpha = 0.05)
# calculate mean vectors
compMod <- fullMod(models, doses, base = 0, maxEff = 0.4, scal = 200)
muMat <- modelMeans(compMod, doses, FALSE, scal = 200)
# calculate power to detect mean vectors
# Power for linear model
powCalc(plMM$contMat, 50, mu = muMat[,1], sigma = 1, cVal = plMM$critVal)
# Power for emax model
powCalc(plMM$contMat, 50, mu = muMat[,2], sigma = 1, cVal = plMM$critVal)
# Power for logistic model
powCalc(plMM$contMat, 50, mu = muMat[,3], sigma = 1, cVal = plMM$critVal)
# compare with JBS 16, p. 650
|
par(mfrow=c(3,2))
N <- 100
#Student distribution
df0 <- 1
df1 <- 6
df2 <- 5
df3 <- 8
ncp2 <-2
ncp3 <- 4
x0 <- rt(N, df0)
x1 <- rt(N, df1)
x2 <- rt(N, df2, ncp2)
x3 <- rt(N, df3, ncp3)
plot( density(x0), col='orange')
plot(x0, dt(x0, df=df0), col="red")
hist(x0, prob=TRUE)
curve(dt(x0, df=df0), col="red", add=TRUE)
curve(dt(x1, df=df1), col="green", add=TRUE)
curve(dt(x2, df=df2,ncp2), col="blue", add=TRUE)
curve(dt(x3, df=df3, ncp3), col="magenta", add=TRUE)
lines( density(x0), col='orange')
plot(pt(x0, df0), col="red")
points(pt(x1, df1), col="green")
points(pt(x2, df2,ncp2), col="blue")
points(pt(x3, df3,ncp3), col="magenta")
x <- rchisq(100, 5)
hist(x0, prob=TRUE)
curve( dchisq(x, df=5), col='green', add=TRUE)
curve( dchisq(x, df=10), col='red', add=TRUE )
| /ex2_2.R | no_license | mdiannna/ProjectStatistics | R | false | false | 780 | r | par(mfrow=c(3,2))
N <- 100
#Student distribution
df0 <- 1
df1 <- 6
df2 <- 5
df3 <- 8
ncp2 <-2
ncp3 <- 4
x0 <- rt(N, df0)
x1 <- rt(N, df1)
x2 <- rt(N, df2, ncp2)
x3 <- rt(N, df3, ncp3)
plot( density(x0), col='orange')
plot(x0, dt(x0, df=df0), col="red")
hist(x0, prob=TRUE)
curve(dt(x0, df=df0), col="red", add=TRUE)
curve(dt(x1, df=df1), col="green", add=TRUE)
curve(dt(x2, df=df2,ncp2), col="blue", add=TRUE)
curve(dt(x3, df=df3, ncp3), col="magenta", add=TRUE)
lines( density(x0), col='orange')
plot(pt(x0, df0), col="red")
points(pt(x1, df1), col="green")
points(pt(x2, df2,ncp2), col="blue")
points(pt(x3, df3,ncp3), col="magenta")
x <- rchisq(100, 5)
hist(x0, prob=TRUE)
curve( dchisq(x, df=5), col='green', add=TRUE)
curve( dchisq(x, df=10), col='red', add=TRUE )
|
# Autor: Jose Luis Vicente Villardon
# Dpto. de Estadistica
# Universidad de Salamanca
# Revisado: Noviembre/2017
# Integer is treated as numeric unless otherwise is specified
#
GowerProximities<- function(x, y=NULL, Binary=NULL, Classes=NULL, transformation=3, IntegerAsOrdinal=FALSE, BinCoef= "Simple_Matching", ContCoef="Gower", NomCoef="GOW", OrdCoef="GOW") {
if (!is.data.frame(x)) stop("Main data is not organized as a data frame")
NewX=AdaptDataFrame(x, Binary=Binary, IntegerAsOrdinal=IntegerAsOrdinal)
if (is.null(y)) NewY=NewX
else{
if (!is.data.frame(y)) stop("Suplementary data is not organized as a data frame")
NewY=AdaptDataFrame(y, Binary=Binary, IntegerAsOrdinal=IntegerAsOrdinal)
}
n = dim(NewX$X)[1]
p = dim(NewX$X)[2]
n1 = dim(NewY$X)[1]
p1 = dim(NewY$X)[2]
if (!(p==p1)) stop("Number of columns of the two matrices are not the same")
transformations= c("Identity", "1-S", "sqrt(1-S)", "-log(s)", "1/S-1", "sqrt(2(1-S))", "1-(S+1)/2", "1-abs(S)", "1/(S+1)")
if (is.numeric(transformation)) transformation=transformations[transformation]
if (transformation==1) Type="similarity"
else Type="dissimilarity"
if ( (BinCoef== "Simple_Matching") & (ContCoef=="Gower") & (NomCoef=="GOW") & (OrdCoef=="GOW"))
coefficient="Gower Similarity"
else
paste("Binary: ",BinCoef, ", Continuous: ", ContCoef, ", Nominal: ", NomCoef, ", Ordinal: ", OrdCoef)
result= list()
result$TypeData="Mixed"
result$Type=Type
result$Coefficient=coefficient
result$Transformation=transformation
result$Data=NewX$X
result$SupData=NewY$X
result$Types=NewX$Types
result$Proximities=GowerSimilarities(NewX$X, y=NewY$X, transformation=transformation, Classes=NewX$Types, BinCoef= BinCoef, ContCoef=ContCoef, NomCoef=NomCoef, OrdCoef=OrdCoef)
rownames(result$Proximities)=rownames(x)
colnames(result$Proximities)=rownames(x)
result$SupProximities=NULL
if (!is.null(y)) result$SupProximities=GowerSimilarities(x,y, transformation)
class(result)="proximities"
return(result)
}
| /R/GowerProximities.R | no_license | villardon/MultBiplotR | R | false | false | 2,062 | r | # Autor: Jose Luis Vicente Villardon
# Dpto. de Estadistica
# Universidad de Salamanca
# Revisado: Noviembre/2017
# Integer is treated as numeric unless otherwise is specified
#
GowerProximities<- function(x, y=NULL, Binary=NULL, Classes=NULL, transformation=3, IntegerAsOrdinal=FALSE, BinCoef= "Simple_Matching", ContCoef="Gower", NomCoef="GOW", OrdCoef="GOW") {
if (!is.data.frame(x)) stop("Main data is not organized as a data frame")
NewX=AdaptDataFrame(x, Binary=Binary, IntegerAsOrdinal=IntegerAsOrdinal)
if (is.null(y)) NewY=NewX
else{
if (!is.data.frame(y)) stop("Suplementary data is not organized as a data frame")
NewY=AdaptDataFrame(y, Binary=Binary, IntegerAsOrdinal=IntegerAsOrdinal)
}
n = dim(NewX$X)[1]
p = dim(NewX$X)[2]
n1 = dim(NewY$X)[1]
p1 = dim(NewY$X)[2]
if (!(p==p1)) stop("Number of columns of the two matrices are not the same")
transformations= c("Identity", "1-S", "sqrt(1-S)", "-log(s)", "1/S-1", "sqrt(2(1-S))", "1-(S+1)/2", "1-abs(S)", "1/(S+1)")
if (is.numeric(transformation)) transformation=transformations[transformation]
if (transformation==1) Type="similarity"
else Type="dissimilarity"
if ( (BinCoef== "Simple_Matching") & (ContCoef=="Gower") & (NomCoef=="GOW") & (OrdCoef=="GOW"))
coefficient="Gower Similarity"
else
paste("Binary: ",BinCoef, ", Continuous: ", ContCoef, ", Nominal: ", NomCoef, ", Ordinal: ", OrdCoef)
result= list()
result$TypeData="Mixed"
result$Type=Type
result$Coefficient=coefficient
result$Transformation=transformation
result$Data=NewX$X
result$SupData=NewY$X
result$Types=NewX$Types
result$Proximities=GowerSimilarities(NewX$X, y=NewY$X, transformation=transformation, Classes=NewX$Types, BinCoef= BinCoef, ContCoef=ContCoef, NomCoef=NomCoef, OrdCoef=OrdCoef)
rownames(result$Proximities)=rownames(x)
colnames(result$Proximities)=rownames(x)
result$SupProximities=NULL
if (!is.null(y)) result$SupProximities=GowerSimilarities(x,y, transformation)
class(result)="proximities"
return(result)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FunksjonerDatafilerFHI.R
\name{sendDataFilerFHI}
\alias{sendDataFilerFHI}
\title{Funksjon som henter filer som skal sendes til FHI. To filer fra intensivopphold
og to filer fra sykehusopphold. Dvs. Ei fil for hvert opphold og ei aggregert til
person, for hvert register}
\usage{
sendDataFilerFHI(zipFilNavn = "Testfil", brukernavn = "testperson")
}
\arguments{
\item{zipFilNavn}{Navn på fila som skal kjøres. DataFHICovMonitor, DataFHIPanBeredInflu, Testfil}
\item{brukernavn}{Innlogget brukernavn}
}
\value{
Filsti til fil med filsti til zip...
}
\description{
Funksjon som henter filer som skal sendes til FHI. To filer fra intensivopphold
og to filer fra sykehusopphold. Dvs. Ei fil for hvert opphold og ei aggregert til
person, for hvert register
}
| /man/sendDataFilerFHI.Rd | permissive | Rapporteket/korona | R | false | true | 834 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/FunksjonerDatafilerFHI.R
\name{sendDataFilerFHI}
\alias{sendDataFilerFHI}
\title{Funksjon som henter filer som skal sendes til FHI. To filer fra intensivopphold
og to filer fra sykehusopphold. Dvs. Ei fil for hvert opphold og ei aggregert til
person, for hvert register}
\usage{
sendDataFilerFHI(zipFilNavn = "Testfil", brukernavn = "testperson")
}
\arguments{
\item{zipFilNavn}{Navn på fila som skal kjøres. DataFHICovMonitor, DataFHIPanBeredInflu, Testfil}
\item{brukernavn}{Innlogget brukernavn}
}
\value{
Filsti til fil med filsti til zip...
}
\description{
Funksjon som henter filer som skal sendes til FHI. To filer fra intensivopphold
og to filer fra sykehusopphold. Dvs. Ei fil for hvert opphold og ei aggregert til
person, for hvert register
}
|
## @knitr tgIntro
library(datasets)
data(ToothGrowth); str(ToothGrowth)
## @knitr tgIntro2
apply(matrix(
with(ToothGrowth, is.na(c(len, supp, dose))), 60,
dimnames = list(1:60, c("len", "supp", "dose"))), 2, sum) | /tgIntro.R | no_license | gking2224/StatInfCourseProject | R | false | false | 229 | r | ## @knitr tgIntro
library(datasets)
data(ToothGrowth); str(ToothGrowth)
## @knitr tgIntro2
apply(matrix(
with(ToothGrowth, is.na(c(len, supp, dose))), 60,
dimnames = list(1:60, c("len", "supp", "dose"))), 2, sum) |
\name{var_fs}
\docType{data}
\alias{var_fs}
\title{Variance-covariance matrix of the 92 species in data_fs}
\description{
This data set gives the distance matrix (variance-covariance matrix)
of the 92 species in data_fs
}
\usage{var_fs}
\format{A matrix of size 92*92.}
\source{Prepared by Tim Ryan.}
\references{
}
\keyword{datasets}
| /man/var_fs.Rd | no_license | cran/pGLS | R | false | false | 432 | rd | \name{var_fs}
\docType{data}
\alias{var_fs}
\title{Variance-covariance matrix of the 92 species in data_fs}
\description{
This data set gives the distance matrix (variance-covariance matrix)
of the 92 species in data_fs
}
\usage{var_fs}
\format{A matrix of size 92*92.}
\source{Prepared by Tim Ryan.}
\references{
}
\keyword{datasets}
|
ComputerData <-read.csv(file.choose())
Computer_data <- ComputerData[,-1]
View(Computer_data)
class(Computer_data)
library(plyr)
Computer_data1 <- Computer_data
Computer_data1$cd <- as.numeric(revalue(Computer_data1$cd,c("yes"=1, "no"=0)))
Computer_data1$multi <- as.numeric(revalue(Computer_data1$multi,c("yes"=1, "no"=0)))
Computer_data1$premium <- as.numeric(revalue(Computer_data1$premium,c("yes"=1, "no"=0)))
View(Computer_data1)
class(Computer_data1)
attach(Computer_data1)
summary(Computer_data1)
plot(speed, price)
plot(hd, price)
plot(ram, price)
plot(screen, price)
plot(cd, price)
plot(multi, price)
plot(premium, price)
plot(ads, price)
plot(trend, price)
windows()
pairs(Computer_data1)
cor(Computer_data1)
Model.Computer_data1 <- lm(price~speed+hd+ram+screen+cd+multi+premium+ads+trend)
summary(Model.Computer_data1)
| /Computer_data1.R | no_license | surajbaraik/Multi-Linear-Regression-computer-data-R-and-Python | R | false | false | 883 | r |
ComputerData <-read.csv(file.choose())
Computer_data <- ComputerData[,-1]
View(Computer_data)
class(Computer_data)
library(plyr)
Computer_data1 <- Computer_data
Computer_data1$cd <- as.numeric(revalue(Computer_data1$cd,c("yes"=1, "no"=0)))
Computer_data1$multi <- as.numeric(revalue(Computer_data1$multi,c("yes"=1, "no"=0)))
Computer_data1$premium <- as.numeric(revalue(Computer_data1$premium,c("yes"=1, "no"=0)))
View(Computer_data1)
class(Computer_data1)
attach(Computer_data1)
summary(Computer_data1)
plot(speed, price)
plot(hd, price)
plot(ram, price)
plot(screen, price)
plot(cd, price)
plot(multi, price)
plot(premium, price)
plot(ads, price)
plot(trend, price)
windows()
pairs(Computer_data1)
cor(Computer_data1)
Model.Computer_data1 <- lm(price~speed+hd+ram+screen+cd+multi+premium+ads+trend)
summary(Model.Computer_data1)
|
#' Parallel aggregate
#'
#' Function to aggregate a raster brick
#'
#' @import parallel
#' @importFrom methods as
#' @importFrom raster aggregate as.list brick setZ
#' @param dummie_nc a character string
#' @param new_res numeric
#' @return raster brick
#' @keywords internal
aggregate_brick <- function(dummie_nc, new_res){
dummie_brick <- brick(dummie_nc)
dummie_brick <- as.list(dummie_brick)
no_cores <- detectCores() - 1
if (no_cores < 1 | is.na(no_cores))(no_cores <- 1)
cluster <- makeCluster(no_cores, type = "PSOCK")
clusterExport(cluster, "new_res", envir = environment())
dummie_list <- parLapply(cluster, dummie_brick, function(dummie_layer){
dummie_res <- raster::res(dummie_layer)[1]
dummie_factor <- new_res/dummie_res
dummie_raster <- raster::aggregate(dummie_layer, fact = dummie_factor,
fun = mean, na.rm = TRUE)
dummie_raster
})
stopCluster(cluster)
dummie_list <- brick(dummie_list)
dummie_names <- names(dummie_list)
if (!Reduce("|", grepl("^X\\d\\d\\d\\d\\.\\d\\d\\.\\d\\d",
dummie_names))) {
if (grepl("persiann", dummie_nc)) {
dummie_names <- sub("^.", "", dummie_names)
dummie_names <- as.numeric(dummie_names)
dummie_Z <- as.Date(dummie_names, origin = "1983-01-01 00:00:00")
} else if (grepl("gldas-clsm", dummie_nc)) {
dummie_names <- sub("^.", "", dummie_names)
dummie_names <- as.numeric(dummie_names)
dummie_Z <- as.Date(dummie_names, origin = "1948-01-01 00:00:00")
}
} else {
dummie_Z <- as.Date(dummie_names, format = "X%Y.%m.%d")
}
dummie_list <- setZ(dummie_list, dummie_Z)
return(dummie_list)
} | /R/aggregate_brick.R | no_license | imarkonis/pRecipe | R | false | false | 1,698 | r | #' Parallel aggregate
#'
#' Function to aggregate a raster brick
#'
#' @import parallel
#' @importFrom methods as
#' @importFrom raster aggregate as.list brick setZ
#' @param dummie_nc a character string
#' @param new_res numeric
#' @return raster brick
#' @keywords internal
aggregate_brick <- function(dummie_nc, new_res){
dummie_brick <- brick(dummie_nc)
dummie_brick <- as.list(dummie_brick)
no_cores <- detectCores() - 1
if (no_cores < 1 | is.na(no_cores))(no_cores <- 1)
cluster <- makeCluster(no_cores, type = "PSOCK")
clusterExport(cluster, "new_res", envir = environment())
dummie_list <- parLapply(cluster, dummie_brick, function(dummie_layer){
dummie_res <- raster::res(dummie_layer)[1]
dummie_factor <- new_res/dummie_res
dummie_raster <- raster::aggregate(dummie_layer, fact = dummie_factor,
fun = mean, na.rm = TRUE)
dummie_raster
})
stopCluster(cluster)
dummie_list <- brick(dummie_list)
dummie_names <- names(dummie_list)
if (!Reduce("|", grepl("^X\\d\\d\\d\\d\\.\\d\\d\\.\\d\\d",
dummie_names))) {
if (grepl("persiann", dummie_nc)) {
dummie_names <- sub("^.", "", dummie_names)
dummie_names <- as.numeric(dummie_names)
dummie_Z <- as.Date(dummie_names, origin = "1983-01-01 00:00:00")
} else if (grepl("gldas-clsm", dummie_nc)) {
dummie_names <- sub("^.", "", dummie_names)
dummie_names <- as.numeric(dummie_names)
dummie_Z <- as.Date(dummie_names, origin = "1948-01-01 00:00:00")
}
} else {
dummie_Z <- as.Date(dummie_names, format = "X%Y.%m.%d")
}
dummie_list <- setZ(dummie_list, dummie_Z)
return(dummie_list)
} |
#' ---
#' title: "Chlamee nitrate analysis"
#' author: "Joey"
#' ---
#+
knitr::opts_chunk$set(echo = TRUE)
knitr::opts_chunk$set(message = FALSE)
knitr::opts_chunk$set(warning = FALSE)
knitr::opts_chunk$set(cache = TRUE)
#+ knitr::opts_chunk$set(message = FALSE)
library(tidyverse)
library(cowplot)
library(broom)
library(readxl)
library(janitor)
library(plotrix)
library(here)
library(growthTools)
library(rootSolve)
#' Read in data
treatments <- read_excel(here("data-general", "ChlamEE_Treatments_JB.xlsx")) %>%
clean_names() %>%
mutate(treatment = ifelse(is.na(treatment), "none", treatment)) %>%
filter(population != "cc1629")
nitrate <- read_csv(here("data-processed", "nitrate-abundances-processed.csv"))
#' this is the step that gets us the growth rate estimates
growth_rates_n_AICc <- nitrate %>%
filter(population != "COMBO") %>%
mutate(ln.fluor = log(RFU)) %>%
group_by(well_plate) %>%
do(grs = get.growth.rate(x = .$days, y = .$ln.fluor,id = .$well_plate, plot.best.Q = F))
#' Get growth rates via AIC
growth_rates_n_AIC <- nitrate %>%
filter(population != "COMBO") %>%
mutate(ln.fluor = log(RFU)) %>%
group_by(well_plate) %>%
do(grs = get.growth.rate(x = .$days, y = .$ln.fluor,id = .$well_plate, plot.best.Q = F, model.selection = "AIC"))
#' Pull out the things we want
growth_sum_n_AICc <- growth_rates_n_AICc %>%
summarise(well_plate, mu = grs$best.slope, n_obs = grs$best.model.slope.n,
slope_r2 = grs$best.model.slope.r2,
best.model_r2 = grs$best.model.rsqr,
best.model = grs$best.model, best.se = grs$best.se,
contents = grs$best.model.contents)
growth_sum_n_AIC <- growth_rates_n_AIC %>%
summarise(well_plate, mu = grs$best.slope, n_obs = grs$best.model.slope.n,
slope_r2 = grs$best.model.slope.r2,
best.model_r2 = grs$best.model.rsqr,
best.model = grs$best.model, best.se = grs$best.se,
contents = grs$best.model.contents)
all_models <- left_join(growth_sum_n_AICc, growth_sum_n_AIC, by = "well_plate")
## now try something different, and pull out all the models that were best fit by gr.sat, gr.lagsat
growth_sum_n_AIC_saturated <- growth_rates_n_AIC %>%
summarise(well_plate, mu = grs$best.slope, n_obs = grs$best.model.slope.n,
slope_r2 = grs$best.model.slope.r2,
best.model_r2 = grs$best.model.rsqr,
best.model = grs$best.model, best.se = grs$best.se,
contents = grs$best.model.contents) %>%
filter(best.model %in% c("gr.sat", "gr.lagsat"))
#' 1099 out of 1480 are fit differently with AIC and AICc and picked by AIC as a lagsat or sat.
#' For these we will pull out the points before the saturated phase, and fit an exponential model.
all_models %>%
filter(best.model.x != best.model.y) %>%
filter(best.model.y %in% c("gr.lagsat","gr.sat")) %>%
tally() %>%
knitr::kable()
mismatches <- all_models %>%
filter(best.model.x != best.model.y) %>%
filter(best.model.y %in% c("gr.lagsat","gr.sat"))
key <- nitrate %>%
select(well_plate, population, nitrate_concentration) %>%
distinct(well_plate, .keep_all = TRUE)
AICc_growth_rates <- growth_sum_n_AICc %>%
select(-contents) %>%
mutate(IC_method = "AICc")
AICc_growth_rates2 <- left_join(AICc_growth_rates, key, by = "well_plate")
# write_csv(AICc_growth_rates2, here("data-processed", "nitrate_exp_growth_w_growthtools_AICc.csv"))
exp_params <- growth_sum_n_AICc %>%
unnest(contents %>% map(tidy, .id = "number")) ## pull out the slopes and intercepts etc.
sat_models <- growth_sum_n_AIC %>%
unnest(contents %>% map(tidy, .id = "number")) %>%
# filter(well_plate %in% c(mismatches$well_plate)) %>%
filter(best.model == "gr.sat", term == "B1") %>%
rename(cutoff_point = estimate) %>%
select(well_plate, cutoff_point, best.model)
lag_sat_models <- growth_sum_n_AIC %>%
unnest(contents %>% map(tidy, .id = "number")) %>%
# filter(well_plate %in% c(mismatches$well_plate)) %>%
filter(best.model == "gr.lagsat", term == "B2") %>%
rename(cutoff_point = estimate) %>%
select(well_plate, cutoff_point, best.model)
exponential_lag_models <- growth_sum_n_AIC %>%
unnest(contents %>% map(tidy, .id = "number")) %>%
filter(!best.model %in% c("gr.sat", "gr.lagsat")) %>%
# filter(!well_plate %in% c(mismatches$well_plate)) %>%
distinct(well_plate, .keep_all = TRUE) %>%
mutate(cutoff_point = 20) %>%
select(well_plate, cutoff_point, best.model)
all_cutoffs <- bind_rows(sat_models, lag_sat_models, exponential_lag_models)
all_cutoffs %>%
filter(cutoff_point < 20) %>% View
nitrate_cutoffs <- left_join(nitrate, all_cutoffs, by = "well_plate")
nitrate_cutoffs %>%
filter(cutoff_point < 2, nitrate_concentration < 10) %>%
ggplot(aes(x = days, y = RFU, color = best.model)) + geom_point() + geom_point(aes(x = cutoff_point, y = 0), size = 3, color = "red") +
facet_wrap( ~ well_plate)
#' now trim the time series of nitrate and force fit an exponential model
nitrate_with_cutoffs <- left_join(nitrate, all_cutoffs, by = "well_plate") %>%
filter(days < cutoff_point)
### fit an exponential model
nitrate_exponential_growth_rates <- nitrate_with_cutoffs %>%
filter(population != "COMBO") %>%
group_by(population, nitrate_concentration, well_plate) %>%
mutate(N0 = RFU[[1]]) %>%
group_by(nitrate_concentration, population, well_plate) %>%
do(tidy(nls(RFU ~ N0 * exp(r*days),
data= ., start=list(r=0.01),
control = nls.control(maxiter=100, minFactor=1/204800000)))) %>%
ungroup()
library(nls.multstart)
# try again exponential ---------------------------------------------------
ldata_n0 <- nitrate_with_cutoffs %>%
group_by(well_plate) %>%
mutate(N0 = RFU[[1]]) %>%
ungroup()
fits_many_n0 <- ldata_n0 %>%
group_by(population, well_plate) %>%
nest() %>%
mutate(fit = purrr::map(data, ~ nls_multstart(RFU ~ N0 * exp(r*days),
data = .x,
iter = 500,
start_lower = c(r = 0.2),
start_upper = c(r = 1),
supp_errors = 'N',
na.action = na.omit,
lower = c(r = 0),
upper = c(r = 5),
control = nls.control(maxiter=1000, minFactor=1/204800000))))
fits_many <- fits_many_n0
fits_well_plate <- fits_many %>%
select(well_plate)
key <- nitrate %>%
select(well_plate, population, nitrate_concentration) %>%
distinct(well_plate, .keep_all = TRUE)
pops <- left_join(fits_well_plate, key)
info <- fits_many %>%
unnest(fit %>% map(glance))
# get params
params <- fits_many %>%
filter(fit != "NULL") %>%
unnest(fit %>% map(tidy))
CI <- fits_many %>%
filter(fit != "NULL") %>%
unnest(fit %>% map(~ confint2(.x) %>%
data.frame() %>%
rename(., conf.low = X2.5.., conf.high = X97.5..))) %>%
group_by(., well_plate) %>%
mutate(., term = c('r')) %>%
ungroup()
# merge parameters and CI estimates
params <- merge(params, CI, by = intersect(names(params), names(CI)))
write_csv(params, here("data-processed", "exponential_params_cutoff_approach.csv"))
# get predictions
preds <- fits_many %>%
unnest(fit %>% map(augment))
new_preds <- ldata_n0 %>%
do(., data.frame(days = seq(min(.$days), max(.$days), length.out = 150), stringsAsFactors = FALSE))
# max and min for each curve
max_min <- group_by(ldata_n0, well_plate) %>%
summarise(., min_days = min(days), max_days = max(days)) %>%
ungroup()
# create new predictions
preds2 <- fits_many %>%
unnest(fit %>% map(augment, newdata = new_preds)) %>%
merge(., max_min, by = 'well_plate') %>%
group_by(., well_plate) %>%
filter(., days > unique(min_days) & days < unique(max_days)) %>%
rename(., RFU = .fitted) %>%
ungroup()
key <- ldata_n0 %>%
select(well_plate, population, nitrate_concentration) %>%
distinct(well_plate, .keep_all = TRUE)
preds3 <- left_join(preds2, key, by = c("well_plate", "population"))
ggplot() +
geom_point(aes(days, RFU, color = factor(nitrate_concentration)), size = 2, data = ldata_n0) +
geom_line(aes(days, RFU, group = well_plate, color = factor(nitrate_concentration)), data = preds3) +
facet_wrap( ~ population, scales = "free") +
scale_color_viridis_d() +
ylab('RFU') +
xlab('Days')
ggsave("figures/nitrate_rfus_exponential_n0.pdf", width = 40, height = 35)
# Again -------------------------------------------------------------------
nitrate_with_cutoffs2 <- nitrate_with_cutoffs %>%
filter(population != "COMBO") %>%
group_by(population, nitrate_concentration, well_plate) %>%
mutate(N0 = RFU[[1]])
exponential_fits <- nitrate_with_cutoffs2 %>%
group_by(nitrate_concentration, population, well_plate) %>%
nest() %>%
mutate(fit = purrr::map(data, ~ nls_multstart(RFU ~ N0 * exp(r*days),
data = .x,
iter = 500,
start_lower = c(r = 0.2),
start_upper = c(r = 1),
supp_errors = 'N',
na.action = na.omit,
lower = c(r = 0),
upper = c(r = 5),
control = nls.control(maxiter=1000, minFactor=1/204800000))))
fits_many <- exponential_fits
params <- fits_many %>%
filter(fit != "NULL") %>%
unnest(fit %>% map(tidy))
info <- fits_many %>%
unnest(fit %>% map(glance))
preds <- fits_many %>%
unnest(fit %>% map(augment))
new_preds <- nitrate_with_cutoffs2 %>%
distinct(RFU, nitrate_concentration, population, well_plate, .keep_all = TRUE) %>%
group_by(well_plate, population) %>%
do(., data.frame(days = seq(min(.$days), max(.$days), length.out = 10), stringsAsFactors = FALSE))
ngrowth <- nitrate_with_cutoffs2 %>%
distinct(RFU, nitrate_concentration, population, days, .keep_all = TRUE)
max_min <- dplyr::group_by(ngrowth, population, well_plate) %>%
summarise(., min_days = min(days), max_days = max(days)) %>%
ungroup()
# create new predictions
preds2 <- fits_many %>%
unnest(fit %>% map(augment, newdata = new_preds)) %>%
merge(., max_min, by = 'well_plate') %>%
group_by(., well_plate) %>%
filter(., days > unique(min_days) & days < unique(max_days)) %>%
rename(., RFU = .fitted) %>%
ungroup()
ggplot() +
geom_point(aes(days, RFU), size = 2, data = ngrowth) +
geom_line(aes(days, RFU_fitted, group = well_plate), data = preds2) +
facet_wrap( ~ population) +
ylab('RFU') +
xlab('Days')
nitrate_exponential_growth_rates_fitted <- nitrate_with_cutoffs %>%
filter(population != "COMBO") %>%
group_by(population, nitrate_concentration, well_plate) %>%
mutate(N0 = RFU[[1]]) %>%
ungroup() %>%
group_by(population, well_plate) %>%
do(augment(nls(RFU ~ N0 * exp(r*days),
data= ., start=list(r=0.01),
control = nls.control(maxiter=100, minFactor=1/204800000)), newdata = new_preds)) %>%
ungroup()
new_preds <- nitrate_exponential_growth_rates %>%
distinct(estimate, nitrate_concentration, population, .keep_all = TRUE) %>%
group_by(population, well_plate) %>%
do(., data.frame(nitrate_concentration = seq(min(.$nitrate_concentration), max(.$nitrate_concentration), length.out = 150), stringsAsFactors = FALSE))
nitrate_exponential_growth_rates_fitted %>%
ggplot(aes(x = days, y = .fitted, group = well_plate, color = nitrate_concentration)) + geom_line() +
geom_point(aes(x = days, y = RFU, color = nitrate_concentration), data = nitrate_exponential_growth_rates_fitted) +
facet_wrap( ~ population) + scale_color_viridis_c()
nitrate_exp <- nitrate %>%
mutate(exponential = case_when(days < 2 ~ "yes",
TRUE ~ "no")) %>%
filter(exponential == "yes")
growth_rates_n_AICcut <- nitrate_with_cutoffs %>%
# filter(cutoff_point < 20) %>%
# nitrate_with_cutoffs %>%
filter(population != "COMBO") %>%
mutate(ln.fluor = log(RFU)) %>%
group_by(well_plate) %>%
do(grs = get.growth.rate(x = .$days, y = .$ln.fluor,id = .$well_plate, plot.best.Q = F, methods = "linear"))
nexp <- nitrate_exponential_growth_rates %>%
select(estimate, population, well_plate) %>%
rename(exp_growth = estimate)
grtools_exp <- growth_rates_n_AICcut %>%
summarise(well_plate, estimate = grs$best.slope, n_obs = grs$best.model.slope.n,
slope_r2 = grs$best.model.slope.r2,
best.model_r2 = grs$best.model.rsqr,
best.model = grs$best.model, best.se = grs$best.se,
contents = grs$best.model.contents) %>%
select(estimate, well_plate) %>%
rename(gt_growth = estimate)
all_exponential_models <- left_join(nexp, grtools_exp)
all_exponential_models %>%
ggplot(aes(x = gt_growth, y = exp_growth)) + geom_point() +
geom_abline(slope = 1, intercept = 0)
exponential_lag_models_original <- growth_rates_n_AICc %>%
filter(!well_plate %in% c(mismatches$well_plate))
# all_growth_rates_cut <- bind_rows(growth_rates_n_AICcut, exponential_lag_models_original)
growth_sum_n_AICcut <- growth_rates_n_AICcut %>%
summarise(well_plate, mu = grs$best.slope, n_obs = grs$best.model.slope.n,
slope_r2 = grs$best.model.slope.r2,
best.model_r2 = grs$best.model.rsqr,
best.model = grs$best.model, best.se = grs$best.se,
contents = grs$best.model.contents)
mods <- nitrate_exponential_growth_rates
exp_params_cut <- growth_sum_n_AICcut %>%
unnest(contents %>% map(tidy, .id = "number"))
exp_params_aug_cut <- growth_sum_n_AICcut %>%
unnest(contents %>% map(augment, .id = "number")) %>% ### now add the fitted values
rename(days = x)
exp_wide_cut <- exp_params_cut %>%
spread(key = term, value = estimate)
all_preds <- left_join(exp_params_aug_cut, exp_wide_cut)
all_preds2 <- left_join(all_preds, key, by = "well_plate")
all_preds_exp <- left_join(all_preds, key, by = "well_plate") ### this is now the version with the cutoff exponentials
# %>%
# group_by(well_plate) %>%
# mutate(B1 = mean(B1, na.rm = TRUE)) %>%
# mutate(B2 = mean(B2, na.rm = TRUE)) %>% ### here we do some wrangling to get the colour coding right for our plots
# mutate(exponential = case_when(best.model == "gr" ~ "yes",
# best.model == "gr.sat" & days < B1 ~ "yes",
# best.model == "gr.lag" & days < B1 ~ "yes",
# best.model == "gr.lagsat" & days < B2 & days > B1 ~ "yes",
# TRUE ~ "no"))
# all_preds2 %>%
# ggplot(aes(x = days, y = .fitted, group = well_plate, color = best.model)) + geom_line() +
# geom_point(aes(x = days, y = y)) +
# facet_grid( ~ well_plate) + ylab("Ln(RFU)") +xlab("Days")
ggsave("figures/all_exponential_nitrate_wells.png", width = 45, height = 20)
### this doesn't look like it's working, because it's still picking up time points that are clearly in the sat phase.
all_preds_exp %>%
ggplot(aes(x = days, y = .fitted, group = well_plate, color = best.model)) + geom_line() +
geom_point(aes(x = days, y = y)) +
facet_grid(nitrate_concentration ~ population) + ylab("Ln(RFU)") +xlab("Days")
ggsave("figures/all_exponential_nitrate_exp_sat_cut.png", width = 45, height = 20)
#+ fig.width=45, fig.height=20
all_preds2 %>%
ggplot(aes(x = days, y = .fitted, group = well_plate, color = best.model)) + geom_line() +
geom_point(aes(x = days, y = y, shape = exponential)) +
facet_grid(nitrate_concentration ~ population) + ylab("Ln(RFU)") +xlab("Days")
AICcut_growth_rates2 <- left_join(growth_sum_n_AICcut, key, by = "well_plate")
AICcut_growth_rates2 %>%
mutate(nitrate_concentration = as.numeric(nitrate_concentration)) %>%
rename(estimate = mu) %>%
ggplot(aes(x = nitrate_concentration, y = estimate)) + geom_point() + facet_wrap(~population)
# Monod fits --------------------------------------------------------------
growth_rates <- AICcut_growth_rates2
growth_rates <- left_join(params, key, by = c("well_plate", "population")) %>%
select(population, well_plate, estimate) %>%
rename(exp_mult = estimate)
growth_rates_exp <- left_join(nitrate_exponential_growth_rates, key) %>%
# select(population, well_plate, estimate) %>%
rename(exp = estimate)
nitrate_exp <- nitrate %>%
mutate(exponential = case_when(days < 2 ~ "yes",
TRUE ~ "no")) %>%
filter(exponential == "yes") %>%
group_by(population, nitrate_concentration, well_plate) %>%
mutate(N0 = RFU[[1]])
growth_rates <- nitrate_exp %>%
filter(population != "COMBO") %>%
group_by(nitrate_concentration, population, well_plate) %>%
do(tidy(nls(RFU ~ N0 * exp(r*days),
data= ., start=list(r=0.01),
control = nls.control(maxiter=100, minFactor=1/204800000)))) %>%
ungroup()
growth_rates <- exp_fits_top
growth_rates <- exp_fits_log
growth_rates <- exp_fits_all
growth_rates <- growth_rates_exp
growth_rates <- nitrate_eyeball_exp %>%
rename(nitrate_concentration = nitrate_concentration.x)
growth_rates <- read_csv(here("data-processed", "nitrate_exp_growth_w_growthtools_AIC.csv")) %>%
filter(population != "COMBO") %>%
rename(estimate = mu)
nitrate_eyeball_exp <- read_csv(here("data-processed", "nitrate_exp_growth_eyeball.csv")) %>%
distinct(population, nitrate_concentration.x, well_plate.x, .keep_all = TRUE)# left_join(growth_rates, growth_rates_exp) %>% ok so these are the same, which is good.
#' Fit the Monod model to the growth rates
monod_fits <- growth_rates %>%
# AICc_growth_rates2 %>%
mutate(nitrate_concentration = as.numeric(nitrate_concentration)) %>%
# rename(estimate = mu) %>%
group_by(population) %>%
do(tidy(nls(estimate ~ umax* (nitrate_concentration / (ks+ nitrate_concentration)),
data= ., start=list(ks = 1, umax = 1), algorithm="port", lower=list(c=0.01, d=0),
control = nls.control(maxiter=500, minFactor=1/204800000))))
#' get the fitted values
prediction_function <- function(df) {
monodcurve<-function(x){
growth_rate<- (df$umax[[1]] * (x / (df$ks[[1]] +x)))
growth_rate}
pred <- function(x) {
y <- monodcurve(x)
}
x <- seq(0, 1000, by = 0.1)
preds <- sapply(x, pred)
preds <- data.frame(x, preds) %>%
rename(nitrate_concentration.x = x,
growth_rate = preds)
}
bs_split <- monod_fits %>%
select(population, term, estimate) %>%
dplyr::ungroup() %>%
spread(key = term, value = estimate) %>%
split(.$population)
all_preds_n <- bs_split %>% ### here we just use the fitted parameters from the Monod to get the predicted values
map_df(prediction_function, .id = "population")
all_predsn_2 <- left_join(all_preds_n, treatments, by = c("population")) %>%
distinct(ancestor_id, treatment, nitrate_concentration.x, .keep_all = TRUE)
all_growth_n2 <- left_join(growth_rates, treatments) ## changed this to the 'cut' version. can switch back later
#+ fig.width = 12, fig.height = 8
all_growth_n2 %>%
# mutate(estimate = mu) %>%
mutate(nitrate_concentration = as.numeric(nitrate_concentration)) %>%
# filter(treatment == "N", ancestor_id == "anc3") %>%
ggplot(aes(x= nitrate_concentration, y= estimate)) +
geom_point() +
# geom_point(aes(x = nitrate_concentration.x, y = estimate), data = filter(nitrate_eyeball_exp, population == 27), color = "blue") +
geom_line(data=all_predsn_2, aes(x=nitrate_concentration.x, y=growth_rate, color = treatment), size = 1) +
facet_grid(treatment ~ ancestor_id) +
geom_hline(yintercept = 0.1, linetype = "dotted") +
ylab("Exponential growth rate (/day)") + xlab("Nitrate concentration (uM)")
monod_wide <- monod_fits %>%
select(population, term, estimate) %>%
spread(key = term, value = estimate)
m <- 0.1 ## set mortality rate, which we use in the rstar_solve
monod_curve_mortality <- function(nitrate_concentration, umax, ks){
res <- (umax* (nitrate_concentration / (ks+ nitrate_concentration))) - 0.1
res
}
#' Find R*
rstars <- monod_wide %>%
mutate(rstar = uniroot.all(function(x) monod_curve_mortality(x, umax, ks), c(0.0, 50))) %>% ## numerical
mutate(rstar_solve = ks*m/(umax-m)) ## analytical
rstars2 <- left_join(rstars, treatments, by = "population") %>%
distinct(population, ks, umax, .keep_all = TRUE)
#+ fig.width = 6, fig.height = 4
rstars2 %>%
group_by(treatment) %>%
summarise_each(funs(mean, std.error), rstar_solve) %>%
ggplot(aes(x = reorder(treatment, mean), y = mean)) + geom_point() +
geom_errorbar(aes(ymin = mean - std.error, ymax = mean + std.error),width = 0.1) +
ylab("R* (umol N)") + xlab("Selection treatment") + geom_point(aes(x = reorder(treatment, rstar), y = rstar, color = ancestor_id), size = 2, data = rstars2, alpha = 0.5) +
scale_color_discrete(name = "Ancestor")
ggsave("figures/r-star-means-exp-cutoff.png", width = 6, height = 4)
ggsave("figures/r-star-means-exp-max-r.png", width = 6, height = 4)
ggsave("figures/r-star-means-exp-max-r-log.png", width = 6, height = 4)
# ok now try one more thing ----------------------------------------------
### fit the exponential model to different time points, and find when the growth rate declines.
nitrate_exponential_growth_rates <- nitrate_with_cutoffs %>%
filter(population != "COMBO") %>%
group_by(population, nitrate_concentration, well_plate) %>%
mutate(N0 = RFU[[1]]) %>%
group_by(nitrate_concentration, population, well_plate) %>%
do(tidy(nls(RFU ~ N0 * exp(r*days),
data= ., start=list(r=0.01),
control = nls.control(maxiter=100, minFactor=1/204800000)))) %>%
ungroup()
n2 <- nitrate %>%
filter(population != "COMBO") %>%
group_by(population, nitrate_concentration, well_plate) %>%
mutate(N0 = RFU[[1]])
fitting_window <- function(x) {
growth_rates <- n2 %>%
top_n(n = -x, wt = days) %>%
group_by(nitrate_concentration, population, well_plate) %>%
do(tidy(nls(RFU ~ N0 * exp(r*days),
data= ., start=list(r=0.01),
control = nls.control(maxiter=100, minFactor=1/204800000)))) %>%
mutate(number_of_points = x) %>%
ungroup()
}
fitting_window_log <- function(x) {
growth_rates <- n2 %>%
top_n(n = -x, wt = days) %>%
group_by(nitrate_concentration, population, well_plate) %>%
do(tidy(lm(log(RFU) ~ days, data = .))) %>%
mutate(number_of_points = x) %>%
ungroup()
}
windows <- seq(4,7, by = 1)
multi_fits <- windows %>%
map_df(fitting_window, .id = "iteration")
multi_fits_log <- windows %>%
map_df(fitting_window_log, .id = "iteration")
multi_fits %>%
ggplot(aes(x = number_of_points, y = estimate, group = well_plate)) + geom_point() + geom_line() +
facet_wrap( ~ nitrate_concentration)
multi_fits_log %>%
filter(term == "days") %>%
ggplot(aes(x = number_of_points, y = estimate, group = well_plate)) + geom_point() + geom_line() +
facet_wrap( ~ nitrate_concentration)
exp_fits_top <- multi_fits %>%
group_by(well_plate) %>%
top_n(n = 1, wt = estimate)
exp_fits_log <- multi_fits_log %>%
filter(term == "days") %>%
group_by(well_plate) %>%
top_n(n = 1, wt = estimate) %>%
mutate(well_plate_points = paste(well_plate, number_of_points, sep = "_")) %>%
filter(nitrate_concentration > 40)
exp_fits_log_less_40 <- multi_fits_log %>%
filter(term == "days", nitrate_concentration <= 40) %>%
filter(number_of_points == 3) %>%
group_by(well_plate) %>%
top_n(n = 1, wt = estimate) %>%
mutate(well_plate_points = paste(well_plate, number_of_points, sep = "_"))
exp_fits_all <- bind_rows(exp_fits_log, exp_fits_log_less_40)
fitting_window_log_augment <- function(x) {
growth_rates <- n2 %>%
top_n(n = -x, wt = days) %>%
group_by(nitrate_concentration, population, well_plate) %>%
do(augment(lm(log(RFU) ~ days, data = .))) %>%
mutate(number_of_points = x) %>%
ungroup()
}
windows <- seq(3,7, by = 1)
multi_fits_log_augment <- windows %>%
map_df(fitting_window_log_augment, .id = "iteration")
multi_fits_log_augment2 <- left_join(multi_fits_log_augment, treatments) %>%
mutate(well_plate_points = paste(well_plate, number_of_points, sep = "_")) %>%
filter(well_plate_points %in% c(exp_fits_log$well_plate_points))
left_join(nitrate, treatments, by = "population") %>%
filter(treatment == "N", ancestor_id == "anc3") %>%
ggplot(aes(x = days, y = RFU)) + geom_point() +
facet_wrap(~ nitrate_concentration, scales = "free")
multi_fits_log_augment2 %>%
filter(treatment == "N", ancestor_id == "anc3") %>%
ggplot(aes(x = days, y = .fitted, group = well_plate, color = factor(number_of_points))) + geom_line() +
geom_point(aes(x = days, y = log.RFU., color = factor(number_of_points)), data = filter(multi_fits_log_augment2, treatment == "N", ancestor_id == "anc3")) + ylab("ln(RFU)") +
facet_grid(population ~ nitrate_concentration)
ggsave("figures/N_anc3_trajectories_exp.png", width = 20, height = 20)
ggsave("figures/log_exponential_max_r_trajectories_exp_nitrate.png", width = 30, height = 40)
| /R-scripts/08_nitrate_growth_monod.R | permissive | JoeyBernhardt/chlamee-r-star | R | false | false | 24,281 | r |
#' ---
#' title: "Chlamee nitrate analysis"
#' author: "Joey"
#' ---
#+
knitr::opts_chunk$set(echo = TRUE)
knitr::opts_chunk$set(message = FALSE)
knitr::opts_chunk$set(warning = FALSE)
knitr::opts_chunk$set(cache = TRUE)
#+ knitr::opts_chunk$set(message = FALSE)
library(tidyverse)
library(cowplot)
library(broom)
library(readxl)
library(janitor)
library(plotrix)
library(here)
library(growthTools)
library(rootSolve)
#' Read in data
treatments <- read_excel(here("data-general", "ChlamEE_Treatments_JB.xlsx")) %>%
clean_names() %>%
mutate(treatment = ifelse(is.na(treatment), "none", treatment)) %>%
filter(population != "cc1629")
nitrate <- read_csv(here("data-processed", "nitrate-abundances-processed.csv"))
#' this is the step that gets us the growth rate estimates
growth_rates_n_AICc <- nitrate %>%
filter(population != "COMBO") %>%
mutate(ln.fluor = log(RFU)) %>%
group_by(well_plate) %>%
do(grs = get.growth.rate(x = .$days, y = .$ln.fluor,id = .$well_plate, plot.best.Q = F))
#' Get growth rates via AIC
growth_rates_n_AIC <- nitrate %>%
filter(population != "COMBO") %>%
mutate(ln.fluor = log(RFU)) %>%
group_by(well_plate) %>%
do(grs = get.growth.rate(x = .$days, y = .$ln.fluor,id = .$well_plate, plot.best.Q = F, model.selection = "AIC"))
#' Pull out the things we want
growth_sum_n_AICc <- growth_rates_n_AICc %>%
summarise(well_plate, mu = grs$best.slope, n_obs = grs$best.model.slope.n,
slope_r2 = grs$best.model.slope.r2,
best.model_r2 = grs$best.model.rsqr,
best.model = grs$best.model, best.se = grs$best.se,
contents = grs$best.model.contents)
growth_sum_n_AIC <- growth_rates_n_AIC %>%
summarise(well_plate, mu = grs$best.slope, n_obs = grs$best.model.slope.n,
slope_r2 = grs$best.model.slope.r2,
best.model_r2 = grs$best.model.rsqr,
best.model = grs$best.model, best.se = grs$best.se,
contents = grs$best.model.contents)
all_models <- left_join(growth_sum_n_AICc, growth_sum_n_AIC, by = "well_plate")
## now try something different, and pull out all the models that were best fit by gr.sat, gr.lagsat
growth_sum_n_AIC_saturated <- growth_rates_n_AIC %>%
summarise(well_plate, mu = grs$best.slope, n_obs = grs$best.model.slope.n,
slope_r2 = grs$best.model.slope.r2,
best.model_r2 = grs$best.model.rsqr,
best.model = grs$best.model, best.se = grs$best.se,
contents = grs$best.model.contents) %>%
filter(best.model %in% c("gr.sat", "gr.lagsat"))
#' 1099 out of 1480 are fit differently with AIC and AICc and picked by AIC as a lagsat or sat.
#' For these we will pull out the points before the saturated phase, and fit an exponential model.
all_models %>%
filter(best.model.x != best.model.y) %>%
filter(best.model.y %in% c("gr.lagsat","gr.sat")) %>%
tally() %>%
knitr::kable()
mismatches <- all_models %>%
filter(best.model.x != best.model.y) %>%
filter(best.model.y %in% c("gr.lagsat","gr.sat"))
key <- nitrate %>%
select(well_plate, population, nitrate_concentration) %>%
distinct(well_plate, .keep_all = TRUE)
AICc_growth_rates <- growth_sum_n_AICc %>%
select(-contents) %>%
mutate(IC_method = "AICc")
AICc_growth_rates2 <- left_join(AICc_growth_rates, key, by = "well_plate")
# write_csv(AICc_growth_rates2, here("data-processed", "nitrate_exp_growth_w_growthtools_AICc.csv"))
exp_params <- growth_sum_n_AICc %>%
unnest(contents %>% map(tidy, .id = "number")) ## pull out the slopes and intercepts etc.
sat_models <- growth_sum_n_AIC %>%
unnest(contents %>% map(tidy, .id = "number")) %>%
# filter(well_plate %in% c(mismatches$well_plate)) %>%
filter(best.model == "gr.sat", term == "B1") %>%
rename(cutoff_point = estimate) %>%
select(well_plate, cutoff_point, best.model)
lag_sat_models <- growth_sum_n_AIC %>%
unnest(contents %>% map(tidy, .id = "number")) %>%
# filter(well_plate %in% c(mismatches$well_plate)) %>%
filter(best.model == "gr.lagsat", term == "B2") %>%
rename(cutoff_point = estimate) %>%
select(well_plate, cutoff_point, best.model)
exponential_lag_models <- growth_sum_n_AIC %>%
unnest(contents %>% map(tidy, .id = "number")) %>%
filter(!best.model %in% c("gr.sat", "gr.lagsat")) %>%
# filter(!well_plate %in% c(mismatches$well_plate)) %>%
distinct(well_plate, .keep_all = TRUE) %>%
mutate(cutoff_point = 20) %>%
select(well_plate, cutoff_point, best.model)
all_cutoffs <- bind_rows(sat_models, lag_sat_models, exponential_lag_models)
all_cutoffs %>%
filter(cutoff_point < 20) %>% View
nitrate_cutoffs <- left_join(nitrate, all_cutoffs, by = "well_plate")
nitrate_cutoffs %>%
filter(cutoff_point < 2, nitrate_concentration < 10) %>%
ggplot(aes(x = days, y = RFU, color = best.model)) + geom_point() + geom_point(aes(x = cutoff_point, y = 0), size = 3, color = "red") +
facet_wrap( ~ well_plate)
#' now trim the time series of nitrate and force fit an exponential model
nitrate_with_cutoffs <- left_join(nitrate, all_cutoffs, by = "well_plate") %>%
filter(days < cutoff_point)
### fit an exponential model
nitrate_exponential_growth_rates <- nitrate_with_cutoffs %>%
filter(population != "COMBO") %>%
group_by(population, nitrate_concentration, well_plate) %>%
mutate(N0 = RFU[[1]]) %>%
group_by(nitrate_concentration, population, well_plate) %>%
do(tidy(nls(RFU ~ N0 * exp(r*days),
data= ., start=list(r=0.01),
control = nls.control(maxiter=100, minFactor=1/204800000)))) %>%
ungroup()
library(nls.multstart)
# try again exponential ---------------------------------------------------
ldata_n0 <- nitrate_with_cutoffs %>%
group_by(well_plate) %>%
mutate(N0 = RFU[[1]]) %>%
ungroup()
fits_many_n0 <- ldata_n0 %>%
group_by(population, well_plate) %>%
nest() %>%
mutate(fit = purrr::map(data, ~ nls_multstart(RFU ~ N0 * exp(r*days),
data = .x,
iter = 500,
start_lower = c(r = 0.2),
start_upper = c(r = 1),
supp_errors = 'N',
na.action = na.omit,
lower = c(r = 0),
upper = c(r = 5),
control = nls.control(maxiter=1000, minFactor=1/204800000))))
fits_many <- fits_many_n0
fits_well_plate <- fits_many %>%
select(well_plate)
key <- nitrate %>%
select(well_plate, population, nitrate_concentration) %>%
distinct(well_plate, .keep_all = TRUE)
pops <- left_join(fits_well_plate, key)
info <- fits_many %>%
unnest(fit %>% map(glance))
# get params
params <- fits_many %>%
filter(fit != "NULL") %>%
unnest(fit %>% map(tidy))
CI <- fits_many %>%
filter(fit != "NULL") %>%
unnest(fit %>% map(~ confint2(.x) %>%
data.frame() %>%
rename(., conf.low = X2.5.., conf.high = X97.5..))) %>%
group_by(., well_plate) %>%
mutate(., term = c('r')) %>%
ungroup()
# merge parameters and CI estimates
params <- merge(params, CI, by = intersect(names(params), names(CI)))
write_csv(params, here("data-processed", "exponential_params_cutoff_approach.csv"))
# get predictions
preds <- fits_many %>%
unnest(fit %>% map(augment))
new_preds <- ldata_n0 %>%
do(., data.frame(days = seq(min(.$days), max(.$days), length.out = 150), stringsAsFactors = FALSE))
# max and min for each curve
max_min <- group_by(ldata_n0, well_plate) %>%
summarise(., min_days = min(days), max_days = max(days)) %>%
ungroup()
# create new predictions
preds2 <- fits_many %>%
unnest(fit %>% map(augment, newdata = new_preds)) %>%
merge(., max_min, by = 'well_plate') %>%
group_by(., well_plate) %>%
filter(., days > unique(min_days) & days < unique(max_days)) %>%
rename(., RFU = .fitted) %>%
ungroup()
key <- ldata_n0 %>%
select(well_plate, population, nitrate_concentration) %>%
distinct(well_plate, .keep_all = TRUE)
preds3 <- left_join(preds2, key, by = c("well_plate", "population"))
ggplot() +
geom_point(aes(days, RFU, color = factor(nitrate_concentration)), size = 2, data = ldata_n0) +
geom_line(aes(days, RFU, group = well_plate, color = factor(nitrate_concentration)), data = preds3) +
facet_wrap( ~ population, scales = "free") +
scale_color_viridis_d() +
ylab('RFU') +
xlab('Days')
ggsave("figures/nitrate_rfus_exponential_n0.pdf", width = 40, height = 35)
# Again -------------------------------------------------------------------
nitrate_with_cutoffs2 <- nitrate_with_cutoffs %>%
filter(population != "COMBO") %>%
group_by(population, nitrate_concentration, well_plate) %>%
mutate(N0 = RFU[[1]])
exponential_fits <- nitrate_with_cutoffs2 %>%
group_by(nitrate_concentration, population, well_plate) %>%
nest() %>%
mutate(fit = purrr::map(data, ~ nls_multstart(RFU ~ N0 * exp(r*days),
data = .x,
iter = 500,
start_lower = c(r = 0.2),
start_upper = c(r = 1),
supp_errors = 'N',
na.action = na.omit,
lower = c(r = 0),
upper = c(r = 5),
control = nls.control(maxiter=1000, minFactor=1/204800000))))
fits_many <- exponential_fits
params <- fits_many %>%
filter(fit != "NULL") %>%
unnest(fit %>% map(tidy))
info <- fits_many %>%
unnest(fit %>% map(glance))
preds <- fits_many %>%
unnest(fit %>% map(augment))
new_preds <- nitrate_with_cutoffs2 %>%
distinct(RFU, nitrate_concentration, population, well_plate, .keep_all = TRUE) %>%
group_by(well_plate, population) %>%
do(., data.frame(days = seq(min(.$days), max(.$days), length.out = 10), stringsAsFactors = FALSE))
ngrowth <- nitrate_with_cutoffs2 %>%
distinct(RFU, nitrate_concentration, population, days, .keep_all = TRUE)
max_min <- dplyr::group_by(ngrowth, population, well_plate) %>%
summarise(., min_days = min(days), max_days = max(days)) %>%
ungroup()
# create new predictions
preds2 <- fits_many %>%
unnest(fit %>% map(augment, newdata = new_preds)) %>%
merge(., max_min, by = 'well_plate') %>%
group_by(., well_plate) %>%
filter(., days > unique(min_days) & days < unique(max_days)) %>%
rename(., RFU = .fitted) %>%
ungroup()
ggplot() +
geom_point(aes(days, RFU), size = 2, data = ngrowth) +
geom_line(aes(days, RFU_fitted, group = well_plate), data = preds2) +
facet_wrap( ~ population) +
ylab('RFU') +
xlab('Days')
nitrate_exponential_growth_rates_fitted <- nitrate_with_cutoffs %>%
filter(population != "COMBO") %>%
group_by(population, nitrate_concentration, well_plate) %>%
mutate(N0 = RFU[[1]]) %>%
ungroup() %>%
group_by(population, well_plate) %>%
do(augment(nls(RFU ~ N0 * exp(r*days),
data= ., start=list(r=0.01),
control = nls.control(maxiter=100, minFactor=1/204800000)), newdata = new_preds)) %>%
ungroup()
new_preds <- nitrate_exponential_growth_rates %>%
distinct(estimate, nitrate_concentration, population, .keep_all = TRUE) %>%
group_by(population, well_plate) %>%
do(., data.frame(nitrate_concentration = seq(min(.$nitrate_concentration), max(.$nitrate_concentration), length.out = 150), stringsAsFactors = FALSE))
nitrate_exponential_growth_rates_fitted %>%
ggplot(aes(x = days, y = .fitted, group = well_plate, color = nitrate_concentration)) + geom_line() +
geom_point(aes(x = days, y = RFU, color = nitrate_concentration), data = nitrate_exponential_growth_rates_fitted) +
facet_wrap( ~ population) + scale_color_viridis_c()
nitrate_exp <- nitrate %>%
mutate(exponential = case_when(days < 2 ~ "yes",
TRUE ~ "no")) %>%
filter(exponential == "yes")
growth_rates_n_AICcut <- nitrate_with_cutoffs %>%
# filter(cutoff_point < 20) %>%
# nitrate_with_cutoffs %>%
filter(population != "COMBO") %>%
mutate(ln.fluor = log(RFU)) %>%
group_by(well_plate) %>%
do(grs = get.growth.rate(x = .$days, y = .$ln.fluor,id = .$well_plate, plot.best.Q = F, methods = "linear"))
nexp <- nitrate_exponential_growth_rates %>%
select(estimate, population, well_plate) %>%
rename(exp_growth = estimate)
grtools_exp <- growth_rates_n_AICcut %>%
summarise(well_plate, estimate = grs$best.slope, n_obs = grs$best.model.slope.n,
slope_r2 = grs$best.model.slope.r2,
best.model_r2 = grs$best.model.rsqr,
best.model = grs$best.model, best.se = grs$best.se,
contents = grs$best.model.contents) %>%
select(estimate, well_plate) %>%
rename(gt_growth = estimate)
all_exponential_models <- left_join(nexp, grtools_exp)
all_exponential_models %>%
ggplot(aes(x = gt_growth, y = exp_growth)) + geom_point() +
geom_abline(slope = 1, intercept = 0)
exponential_lag_models_original <- growth_rates_n_AICc %>%
filter(!well_plate %in% c(mismatches$well_plate))
# all_growth_rates_cut <- bind_rows(growth_rates_n_AICcut, exponential_lag_models_original)
growth_sum_n_AICcut <- growth_rates_n_AICcut %>%
summarise(well_plate, mu = grs$best.slope, n_obs = grs$best.model.slope.n,
slope_r2 = grs$best.model.slope.r2,
best.model_r2 = grs$best.model.rsqr,
best.model = grs$best.model, best.se = grs$best.se,
contents = grs$best.model.contents)
mods <- nitrate_exponential_growth_rates
exp_params_cut <- growth_sum_n_AICcut %>%
unnest(contents %>% map(tidy, .id = "number"))
exp_params_aug_cut <- growth_sum_n_AICcut %>%
unnest(contents %>% map(augment, .id = "number")) %>% ### now add the fitted values
rename(days = x)
exp_wide_cut <- exp_params_cut %>%
spread(key = term, value = estimate)
all_preds <- left_join(exp_params_aug_cut, exp_wide_cut)
all_preds2 <- left_join(all_preds, key, by = "well_plate")
all_preds_exp <- left_join(all_preds, key, by = "well_plate") ### this is now the version with the cutoff exponentials
# %>%
# group_by(well_plate) %>%
# mutate(B1 = mean(B1, na.rm = TRUE)) %>%
# mutate(B2 = mean(B2, na.rm = TRUE)) %>% ### here we do some wrangling to get the colour coding right for our plots
# mutate(exponential = case_when(best.model == "gr" ~ "yes",
# best.model == "gr.sat" & days < B1 ~ "yes",
# best.model == "gr.lag" & days < B1 ~ "yes",
# best.model == "gr.lagsat" & days < B2 & days > B1 ~ "yes",
# TRUE ~ "no"))
# all_preds2 %>%
# ggplot(aes(x = days, y = .fitted, group = well_plate, color = best.model)) + geom_line() +
# geom_point(aes(x = days, y = y)) +
# facet_grid( ~ well_plate) + ylab("Ln(RFU)") +xlab("Days")
ggsave("figures/all_exponential_nitrate_wells.png", width = 45, height = 20)
### this doesn't look like it's working, because it's still picking up time points that are clearly in the sat phase.
all_preds_exp %>%
ggplot(aes(x = days, y = .fitted, group = well_plate, color = best.model)) + geom_line() +
geom_point(aes(x = days, y = y)) +
facet_grid(nitrate_concentration ~ population) + ylab("Ln(RFU)") +xlab("Days")
ggsave("figures/all_exponential_nitrate_exp_sat_cut.png", width = 45, height = 20)
#+ fig.width=45, fig.height=20
all_preds2 %>%
ggplot(aes(x = days, y = .fitted, group = well_plate, color = best.model)) + geom_line() +
geom_point(aes(x = days, y = y, shape = exponential)) +
facet_grid(nitrate_concentration ~ population) + ylab("Ln(RFU)") +xlab("Days")
AICcut_growth_rates2 <- left_join(growth_sum_n_AICcut, key, by = "well_plate")
AICcut_growth_rates2 %>%
mutate(nitrate_concentration = as.numeric(nitrate_concentration)) %>%
rename(estimate = mu) %>%
ggplot(aes(x = nitrate_concentration, y = estimate)) + geom_point() + facet_wrap(~population)
# Monod fits --------------------------------------------------------------
growth_rates <- AICcut_growth_rates2
growth_rates <- left_join(params, key, by = c("well_plate", "population")) %>%
select(population, well_plate, estimate) %>%
rename(exp_mult = estimate)
growth_rates_exp <- left_join(nitrate_exponential_growth_rates, key) %>%
# select(population, well_plate, estimate) %>%
rename(exp = estimate)
nitrate_exp <- nitrate %>%
mutate(exponential = case_when(days < 2 ~ "yes",
TRUE ~ "no")) %>%
filter(exponential == "yes") %>%
group_by(population, nitrate_concentration, well_plate) %>%
mutate(N0 = RFU[[1]])
growth_rates <- nitrate_exp %>%
filter(population != "COMBO") %>%
group_by(nitrate_concentration, population, well_plate) %>%
do(tidy(nls(RFU ~ N0 * exp(r*days),
data= ., start=list(r=0.01),
control = nls.control(maxiter=100, minFactor=1/204800000)))) %>%
ungroup()
growth_rates <- exp_fits_top
growth_rates <- exp_fits_log
growth_rates <- exp_fits_all
growth_rates <- growth_rates_exp
growth_rates <- nitrate_eyeball_exp %>%
rename(nitrate_concentration = nitrate_concentration.x)
growth_rates <- read_csv(here("data-processed", "nitrate_exp_growth_w_growthtools_AIC.csv")) %>%
filter(population != "COMBO") %>%
rename(estimate = mu)
nitrate_eyeball_exp <- read_csv(here("data-processed", "nitrate_exp_growth_eyeball.csv")) %>%
distinct(population, nitrate_concentration.x, well_plate.x, .keep_all = TRUE)# left_join(growth_rates, growth_rates_exp) %>% ok so these are the same, which is good.
#' Fit the Monod model to the growth rates
monod_fits <- growth_rates %>%
# AICc_growth_rates2 %>%
mutate(nitrate_concentration = as.numeric(nitrate_concentration)) %>%
# rename(estimate = mu) %>%
group_by(population) %>%
do(tidy(nls(estimate ~ umax* (nitrate_concentration / (ks+ nitrate_concentration)),
data= ., start=list(ks = 1, umax = 1), algorithm="port", lower=list(c=0.01, d=0),
control = nls.control(maxiter=500, minFactor=1/204800000))))
#' get the fitted values
prediction_function <- function(df) {
monodcurve<-function(x){
growth_rate<- (df$umax[[1]] * (x / (df$ks[[1]] +x)))
growth_rate}
pred <- function(x) {
y <- monodcurve(x)
}
x <- seq(0, 1000, by = 0.1)
preds <- sapply(x, pred)
preds <- data.frame(x, preds) %>%
rename(nitrate_concentration.x = x,
growth_rate = preds)
}
bs_split <- monod_fits %>%
select(population, term, estimate) %>%
dplyr::ungroup() %>%
spread(key = term, value = estimate) %>%
split(.$population)
all_preds_n <- bs_split %>% ### here we just use the fitted parameters from the Monod to get the predicted values
map_df(prediction_function, .id = "population")
all_predsn_2 <- left_join(all_preds_n, treatments, by = c("population")) %>%
distinct(ancestor_id, treatment, nitrate_concentration.x, .keep_all = TRUE)
all_growth_n2 <- left_join(growth_rates, treatments) ## changed this to the 'cut' version. can switch back later
#+ fig.width = 12, fig.height = 8
all_growth_n2 %>%
# mutate(estimate = mu) %>%
mutate(nitrate_concentration = as.numeric(nitrate_concentration)) %>%
# filter(treatment == "N", ancestor_id == "anc3") %>%
ggplot(aes(x= nitrate_concentration, y= estimate)) +
geom_point() +
# geom_point(aes(x = nitrate_concentration.x, y = estimate), data = filter(nitrate_eyeball_exp, population == 27), color = "blue") +
geom_line(data=all_predsn_2, aes(x=nitrate_concentration.x, y=growth_rate, color = treatment), size = 1) +
facet_grid(treatment ~ ancestor_id) +
geom_hline(yintercept = 0.1, linetype = "dotted") +
ylab("Exponential growth rate (/day)") + xlab("Nitrate concentration (uM)")
monod_wide <- monod_fits %>%
select(population, term, estimate) %>%
spread(key = term, value = estimate)
m <- 0.1 ## set mortality rate, which we use in the rstar_solve
monod_curve_mortality <- function(nitrate_concentration, umax, ks){
res <- (umax* (nitrate_concentration / (ks+ nitrate_concentration))) - 0.1
res
}
#' Find R*
rstars <- monod_wide %>%
mutate(rstar = uniroot.all(function(x) monod_curve_mortality(x, umax, ks), c(0.0, 50))) %>% ## numerical
mutate(rstar_solve = ks*m/(umax-m)) ## analytical
rstars2 <- left_join(rstars, treatments, by = "population") %>%
distinct(population, ks, umax, .keep_all = TRUE)
#+ fig.width = 6, fig.height = 4
rstars2 %>%
group_by(treatment) %>%
summarise_each(funs(mean, std.error), rstar_solve) %>%
ggplot(aes(x = reorder(treatment, mean), y = mean)) + geom_point() +
geom_errorbar(aes(ymin = mean - std.error, ymax = mean + std.error),width = 0.1) +
ylab("R* (umol N)") + xlab("Selection treatment") + geom_point(aes(x = reorder(treatment, rstar), y = rstar, color = ancestor_id), size = 2, data = rstars2, alpha = 0.5) +
scale_color_discrete(name = "Ancestor")
ggsave("figures/r-star-means-exp-cutoff.png", width = 6, height = 4)
ggsave("figures/r-star-means-exp-max-r.png", width = 6, height = 4)
ggsave("figures/r-star-means-exp-max-r-log.png", width = 6, height = 4)
# ok now try one more thing ----------------------------------------------
### fit the exponential model to different time points, and find when the growth rate declines.
nitrate_exponential_growth_rates <- nitrate_with_cutoffs %>%
filter(population != "COMBO") %>%
group_by(population, nitrate_concentration, well_plate) %>%
mutate(N0 = RFU[[1]]) %>%
group_by(nitrate_concentration, population, well_plate) %>%
do(tidy(nls(RFU ~ N0 * exp(r*days),
data= ., start=list(r=0.01),
control = nls.control(maxiter=100, minFactor=1/204800000)))) %>%
ungroup()
n2 <- nitrate %>%
filter(population != "COMBO") %>%
group_by(population, nitrate_concentration, well_plate) %>%
mutate(N0 = RFU[[1]])
fitting_window <- function(x) {
growth_rates <- n2 %>%
top_n(n = -x, wt = days) %>%
group_by(nitrate_concentration, population, well_plate) %>%
do(tidy(nls(RFU ~ N0 * exp(r*days),
data= ., start=list(r=0.01),
control = nls.control(maxiter=100, minFactor=1/204800000)))) %>%
mutate(number_of_points = x) %>%
ungroup()
}
fitting_window_log <- function(x) {
growth_rates <- n2 %>%
top_n(n = -x, wt = days) %>%
group_by(nitrate_concentration, population, well_plate) %>%
do(tidy(lm(log(RFU) ~ days, data = .))) %>%
mutate(number_of_points = x) %>%
ungroup()
}
windows <- seq(4,7, by = 1)
multi_fits <- windows %>%
map_df(fitting_window, .id = "iteration")
multi_fits_log <- windows %>%
map_df(fitting_window_log, .id = "iteration")
multi_fits %>%
ggplot(aes(x = number_of_points, y = estimate, group = well_plate)) + geom_point() + geom_line() +
facet_wrap( ~ nitrate_concentration)
multi_fits_log %>%
filter(term == "days") %>%
ggplot(aes(x = number_of_points, y = estimate, group = well_plate)) + geom_point() + geom_line() +
facet_wrap( ~ nitrate_concentration)
exp_fits_top <- multi_fits %>%
group_by(well_plate) %>%
top_n(n = 1, wt = estimate)
exp_fits_log <- multi_fits_log %>%
filter(term == "days") %>%
group_by(well_plate) %>%
top_n(n = 1, wt = estimate) %>%
mutate(well_plate_points = paste(well_plate, number_of_points, sep = "_")) %>%
filter(nitrate_concentration > 40)
exp_fits_log_less_40 <- multi_fits_log %>%
filter(term == "days", nitrate_concentration <= 40) %>%
filter(number_of_points == 3) %>%
group_by(well_plate) %>%
top_n(n = 1, wt = estimate) %>%
mutate(well_plate_points = paste(well_plate, number_of_points, sep = "_"))
exp_fits_all <- bind_rows(exp_fits_log, exp_fits_log_less_40)
fitting_window_log_augment <- function(x) {
growth_rates <- n2 %>%
top_n(n = -x, wt = days) %>%
group_by(nitrate_concentration, population, well_plate) %>%
do(augment(lm(log(RFU) ~ days, data = .))) %>%
mutate(number_of_points = x) %>%
ungroup()
}
windows <- seq(3,7, by = 1)
multi_fits_log_augment <- windows %>%
map_df(fitting_window_log_augment, .id = "iteration")
multi_fits_log_augment2 <- left_join(multi_fits_log_augment, treatments) %>%
mutate(well_plate_points = paste(well_plate, number_of_points, sep = "_")) %>%
filter(well_plate_points %in% c(exp_fits_log$well_plate_points))
left_join(nitrate, treatments, by = "population") %>%
filter(treatment == "N", ancestor_id == "anc3") %>%
ggplot(aes(x = days, y = RFU)) + geom_point() +
facet_wrap(~ nitrate_concentration, scales = "free")
multi_fits_log_augment2 %>%
filter(treatment == "N", ancestor_id == "anc3") %>%
ggplot(aes(x = days, y = .fitted, group = well_plate, color = factor(number_of_points))) + geom_line() +
geom_point(aes(x = days, y = log.RFU., color = factor(number_of_points)), data = filter(multi_fits_log_augment2, treatment == "N", ancestor_id == "anc3")) + ylab("ln(RFU)") +
facet_grid(population ~ nitrate_concentration)
ggsave("figures/N_anc3_trajectories_exp.png", width = 20, height = 20)
ggsave("figures/log_exponential_max_r_trajectories_exp_nitrate.png", width = 30, height = 40)
|
/Exercise/R_Examples/Ex_19_2.R | no_license | KuChanTung/R | R | false | false | 943 | r | ||
# scripts to generate and save all possible mb dags
# dags will be saved in list and then saved into .rds into hard drive for furture reference
# their names should follow the order of n_m_k, where n = |mb|, m = |colliders|, k = |spouses|
# for the case when there is no colliders and hence no spouses, the file will be named n_0_0
dir = "all mb dags/"
#x=c()
x = paste0("V", 1:7)
y = "T"
n = length(x)
# generate mb dags with no spouses
dagList = enumWithNoSp(x, y)
saveRDS(dagList, paste0(dir, n, "_0_0.rds"))
cat(n, "_0_0 :", length(dagList), "\n")
# generate mb dags with spouses
dag = empty.graph(c(x, y))
if (n > 1) {
for (m in 1:floor(n / 2)) {
for (k in 1:(n - 2 * m + 1)) {
if (m < 2) {
subDagList = readRDS(paste0(dir, n - k - 1, "_0_0.rds"))
dagList = subEnumeration(x, y, m, k, subDagList)
if (is.null(dagIsom(dagList))) {# apply dag isomorphism check, if pass then save into drive
saveRDS(dagList, paste0(dir, n, "_", m, "_", k, ".rds"))
cat(n, "_", m, "_", k, ":", length(dagList), "\n")
} else {
print("There are duplicated dags!")
}
} else {# when m >= 2
dagList = c()
for (k_dash in 1:min(k, n - k - 2)) {
subDagList = readRDS(paste0(dir, n - k - 1, "_", m - 1, "_", k_dash, ".rds"))
subList = subEnumeration(x, y, m, k, subDagList)
if (k_dash == k) {# when there are duplicated dags
# apply dag isomorphism check and remove the duplicated dags
duplicatedIndices = dagIsom(subList)[, 2]
subList = subList[-duplicatedIndices]
}
dagList = c(dagList, subList)
} # end for each k_dash
if (is.null(dagIsom(dagList))) {# apply dag isomorphism check, if pass then save into drive
saveRDS(dagList, paste0(dir, n, "_", m, "_", k, ".rds"))
cat(n, "_", m, "_", k, ":", length(dagList), "\n")
} else {
print("There are duplicated dags!")
}
} # end else when m >= 2
} # end for k
} # end for m
} # end if m > 1
| /RStudioProjects/LocalStrLearning/scripts/scripts_mbDagsEnumeration.R | no_license | kelvinyangli/PhDProjects | R | false | false | 2,338 | r | # scripts to generate and save all possible mb dags
# dags will be saved in list and then saved into .rds into hard drive for furture reference
# their names should follow the order of n_m_k, where n = |mb|, m = |colliders|, k = |spouses|
# for the case when there is no colliders and hence no spouses, the file will be named n_0_0
dir = "all mb dags/"
#x=c()
x = paste0("V", 1:7)
y = "T"
n = length(x)
# generate mb dags with no spouses
dagList = enumWithNoSp(x, y)
saveRDS(dagList, paste0(dir, n, "_0_0.rds"))
cat(n, "_0_0 :", length(dagList), "\n")
# generate mb dags with spouses
dag = empty.graph(c(x, y))
if (n > 1) {
for (m in 1:floor(n / 2)) {
for (k in 1:(n - 2 * m + 1)) {
if (m < 2) {
subDagList = readRDS(paste0(dir, n - k - 1, "_0_0.rds"))
dagList = subEnumeration(x, y, m, k, subDagList)
if (is.null(dagIsom(dagList))) {# apply dag isomorphism check, if pass then save into drive
saveRDS(dagList, paste0(dir, n, "_", m, "_", k, ".rds"))
cat(n, "_", m, "_", k, ":", length(dagList), "\n")
} else {
print("There are duplicated dags!")
}
} else {# when m >= 2
dagList = c()
for (k_dash in 1:min(k, n - k - 2)) {
subDagList = readRDS(paste0(dir, n - k - 1, "_", m - 1, "_", k_dash, ".rds"))
subList = subEnumeration(x, y, m, k, subDagList)
if (k_dash == k) {# when there are duplicated dags
# apply dag isomorphism check and remove the duplicated dags
duplicatedIndices = dagIsom(subList)[, 2]
subList = subList[-duplicatedIndices]
}
dagList = c(dagList, subList)
} # end for each k_dash
if (is.null(dagIsom(dagList))) {# apply dag isomorphism check, if pass then save into drive
saveRDS(dagList, paste0(dir, n, "_", m, "_", k, ".rds"))
cat(n, "_", m, "_", k, ":", length(dagList), "\n")
} else {
print("There are duplicated dags!")
}
} # end else when m >= 2
} # end for k
} # end for m
} # end if m > 1
|
testlist <- list(data = structure(c(2.2250738585072e-308, 0, 0, 0, 0), .Dim = c(1L, 5L)), x = structure(NA_real_, .Dim = c(1L, 1L)))
result <- do.call(distr6:::C_EmpiricalMVPdf,testlist)
str(result) | /distr6/inst/testfiles/C_EmpiricalMVPdf/libFuzzer_C_EmpiricalMVPdf/C_EmpiricalMVPdf_valgrind_files/1610036301-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 199 | r | testlist <- list(data = structure(c(2.2250738585072e-308, 0, 0, 0, 0), .Dim = c(1L, 5L)), x = structure(NA_real_, .Dim = c(1L, 1L)))
result <- do.call(distr6:::C_EmpiricalMVPdf,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bioc_stats.R
\name{bioc_stats}
\alias{bioc_stats}
\title{bioc_stats}
\usage{
bioc_stats(packages, use_cache = TRUE, type = "Software")
}
\arguments{
\item{packages}{packages}
\item{use_cache}{logical, should cached data be used? Default: TRUE. If set to FALSE, it will
re-query download stats and update cache.}
\item{type}{one of "Software", "AnnotationData", "ExperimentData", and "Workflow"}
}
\value{
data.frame
}
\description{
monthly download stats of Bioconductor software package(s)
}
\examples{
\dontrun{
library("dlstats")
pkgs <- c("ChIPseeker", "clusterProfiler", "DOSE", "ggtree", "GOSemSim", "ReactomePA")
y <- bioc_stats(pkgs, use_cache=TRUE)
head(y)
}
}
\author{
Guangchuang Yu
}
| /man/bioc_stats.Rd | no_license | GuangchuangYu/dlstats | R | false | true | 776 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bioc_stats.R
\name{bioc_stats}
\alias{bioc_stats}
\title{bioc_stats}
\usage{
bioc_stats(packages, use_cache = TRUE, type = "Software")
}
\arguments{
\item{packages}{packages}
\item{use_cache}{logical, should cached data be used? Default: TRUE. If set to FALSE, it will
re-query download stats and update cache.}
\item{type}{one of "Software", "AnnotationData", "ExperimentData", and "Workflow"}
}
\value{
data.frame
}
\description{
monthly download stats of Bioconductor software package(s)
}
\examples{
\dontrun{
library("dlstats")
pkgs <- c("ChIPseeker", "clusterProfiler", "DOSE", "ggtree", "GOSemSim", "ReactomePA")
y <- bioc_stats(pkgs, use_cache=TRUE)
head(y)
}
}
\author{
Guangchuang Yu
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/match_font.R
\name{match_font}
\alias{match_font}
\title{Find a system font by name and style}
\usage{
match_font(family, italic = FALSE, bold = FALSE)
}
\arguments{
\item{family}{The name of the font family}
\item{italic, bold}{logicals indicating the font style}
}
\value{
A list containing the path locating the font file and the 0-based
index of the font in the file.
}
\description{
This function locates the font file (and index) best matching a name and
optional style (italic/bold). A font file will be returned even if a match
isn't found, but it is not necessarily similar to the requested family and
it should not be relied on for font substitution. The aliases \code{"sans"},
\code{"serif"}, and \code{"mono"} match to the system default sans-serif, serif, and
mono fonts respectively (\code{""} is equivalent to \code{"sans"}).
}
\examples{
# Get the system default sans-serif font in italic
match_font('sans', italic = TRUE)
}
| /man/match_font.Rd | permissive | r-lib/systemfonts | R | false | true | 1,021 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/match_font.R
\name{match_font}
\alias{match_font}
\title{Find a system font by name and style}
\usage{
match_font(family, italic = FALSE, bold = FALSE)
}
\arguments{
\item{family}{The name of the font family}
\item{italic, bold}{logicals indicating the font style}
}
\value{
A list containing the path locating the font file and the 0-based
index of the font in the file.
}
\description{
This function locates the font file (and index) best matching a name and
optional style (italic/bold). A font file will be returned even if a match
isn't found, but it is not necessarily similar to the requested family and
it should not be relied on for font substitution. The aliases \code{"sans"},
\code{"serif"}, and \code{"mono"} match to the system default sans-serif, serif, and
mono fonts respectively (\code{""} is equivalent to \code{"sans"}).
}
\examples{
# Get the system default sans-serif font in italic
match_font('sans', italic = TRUE)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rdsdataservice_operations.R
\name{rdsdataservice_commit_transaction}
\alias{rdsdataservice_commit_transaction}
\title{Ends a SQL transaction started with the BeginTransaction operation and
commits the changes}
\usage{
rdsdataservice_commit_transaction(resourceArn, secretArn, transactionId)
}
\arguments{
\item{resourceArn}{[required] The Amazon Resource Name (ARN) of the Aurora Serverless DB cluster.}
\item{secretArn}{[required] The name or ARN of the secret that enables access to the DB cluster.}
\item{transactionId}{[required] The identifier of the transaction to end and commit.}
}
\description{
Ends a SQL transaction started with the \code{\link[=rdsdataservice_begin_transaction]{begin_transaction}} operation and commits the changes.
See \url{https://www.paws-r-sdk.com/docs/rdsdataservice_commit_transaction/} for full documentation.
}
\keyword{internal}
| /cran/paws.database/man/rdsdataservice_commit_transaction.Rd | permissive | paws-r/paws | R | false | true | 949 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rdsdataservice_operations.R
\name{rdsdataservice_commit_transaction}
\alias{rdsdataservice_commit_transaction}
\title{Ends a SQL transaction started with the BeginTransaction operation and
commits the changes}
\usage{
rdsdataservice_commit_transaction(resourceArn, secretArn, transactionId)
}
\arguments{
\item{resourceArn}{[required] The Amazon Resource Name (ARN) of the Aurora Serverless DB cluster.}
\item{secretArn}{[required] The name or ARN of the secret that enables access to the DB cluster.}
\item{transactionId}{[required] The identifier of the transaction to end and commit.}
}
\description{
Ends a SQL transaction started with the \code{\link[=rdsdataservice_begin_transaction]{begin_transaction}} operation and commits the changes.
See \url{https://www.paws-r-sdk.com/docs/rdsdataservice_commit_transaction/} for full documentation.
}
\keyword{internal}
|
dt <- read.table("household_power_consumption.txt",header = T,sep = ";",na.strings = "?")
dt$strDatetime <- paste(dt$Date," ",dt$Time)
dt$date <- as.Date(dt$Date, format="%d/%m/%Y")
dt.active <- subset(dt, dt$date %in% c(as.Date("2007-02-01"), as.Date("2007-02-02")))
dt.active$datetime <- strptime(dt.active$strDatetime, format= "%d/%m/%Y %H:%M:%S")
#Follwing line is irrelavent
#dt.active <- subset(dt, dt$datetime > strptime("2007-02-01", "%Y-%m-%d") & dt$datetime < strptime("2007-02-02","%Y-%m-%d"))
png(filename="plot3.png", width=480, height=480, units="px")
plot(dt.active$datetime, dt.active$Sub_metering_1, type = 'l', xlab = '', ylab = 'Energy sub metering')
lines(dt.active$datetime, dt.active$Sub_metering_2, col='red')
lines(dt.active$datetime, dt.active$Sub_metering_3, col='blue')
legend('topright', lwd = 1, col = c('black', 'red', 'blue'), legend = c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'))
dev.off()
| /plot3.R | no_license | LFTang/ExData_Plotting1 | R | false | false | 939 | r | dt <- read.table("household_power_consumption.txt",header = T,sep = ";",na.strings = "?")
dt$strDatetime <- paste(dt$Date," ",dt$Time)
dt$date <- as.Date(dt$Date, format="%d/%m/%Y")
dt.active <- subset(dt, dt$date %in% c(as.Date("2007-02-01"), as.Date("2007-02-02")))
dt.active$datetime <- strptime(dt.active$strDatetime, format= "%d/%m/%Y %H:%M:%S")
#Follwing line is irrelavent
#dt.active <- subset(dt, dt$datetime > strptime("2007-02-01", "%Y-%m-%d") & dt$datetime < strptime("2007-02-02","%Y-%m-%d"))
png(filename="plot3.png", width=480, height=480, units="px")
plot(dt.active$datetime, dt.active$Sub_metering_1, type = 'l', xlab = '', ylab = 'Energy sub metering')
lines(dt.active$datetime, dt.active$Sub_metering_2, col='red')
lines(dt.active$datetime, dt.active$Sub_metering_3, col='blue')
legend('topright', lwd = 1, col = c('black', 'red', 'blue'), legend = c('Sub_metering_1', 'Sub_metering_2', 'Sub_metering_3'))
dev.off()
|
context("Methods sentomeasures")
library("data.table")
library("quanteda")
set.seed(123)
# corpus, lexicon and aggregation control creation
data("usnews")
corpus <- quanteda::corpus_sample(sento_corpus(corpusdf = usnews), size = 600)
data("list_lexicons")
lex <- sento_lexicons(list_lexicons[c("HENRY_en", "LM_en")])
ctr <- ctr_agg(howWithin = "counts", howDocs = "proportional", howTime = c("linear", "exponential"), by = "day",
lag = 60, alphasExp = c(0.1, 0.6))
sentMeas <- sento_measures(corpus, lex, ctr)
### tests from here ###
# diff
N <- nobs(sentMeas)
M <- nmeasures(sentMeas)
test_that("Differencing is properly done", {
expect_equal(nobs(diff(sentMeas, lag = 1)), N - 1)
expect_equal(nobs(diff(sentMeas, lag = 2, differences = 3)), N - 2 * 3)
})
# scale
s1 <- scale(sentMeas)
s2 <- suppressWarnings(scale(sentMeas, center = -as.matrix(as.data.table(sentMeas)[, -1]), scale = FALSE))
s3 <- scale(sentMeas, center = as.numeric(sentMeas$stats["mean", ]), scale = as.numeric(sentMeas$stats["sd", ]))
s4 <- scale(sentMeas,
center = -matrix(as.numeric(sentMeas$stats["mean", ]), nrow = N, ncol = M, byrow = TRUE),
scale = matrix(as.numeric(sentMeas$stats["sd", ]), nrow = N, ncol = M, byrow = TRUE))
test_that("Scaling is properly done", {
expect_equal(rowMeans(s1$stats["mean", ], na.rm = TRUE), c(mean = 0))
expect_equal(rowMeans(s1$stats["sd", ], na.rm = TRUE), c(sd = 1))
expect_equal(rowMeans(s2$stats["mean", ], na.rm = TRUE), c(mean = 0))
expect_equal(rowMeans(s2$stats["sd", ], na.rm = TRUE), c(sd = 0))
expect_equal(s1$stats["mean", ], s3$stats["mean", ])
expect_equal(s1$stats["sd", ], s3$stats["sd", ])
expect_equal(s1$stats["mean", ], s4$stats["mean", ])
expect_equal(s1$stats["sd", ], s4$stats["sd", ])
})
# summary.sentomeasures, print.sentomeasures
cat("\n")
test_that("No output returned when object summarized or printed", {
expect_null(summary(sentMeas))
expect_null(print(sentMeas))
})
# plot.sentomeasures
p <- plot(sentMeas, group = sample(c("features", "lexicons", "time"), 1))
test_that("Plot is a ggplot object", {
expect_true(inherits(p, "ggplot"))
})
# as.data.table, measures_to_long
measuresLong <- as.data.table(sentMeas, format = "long")
test_that("Proper long formatting of sentiment measures", {
expect_true(nrow(measuresLong) == nobs(sentMeas) * nmeasures(sentMeas))
expect_true(all(sentMeas$lexicons %in% unique(measuresLong[["lexicons"]])))
expect_true(all(sentMeas$features %in% unique(measuresLong[["features"]])))
expect_true(all(sentMeas$time %in% unique(measuresLong[["time"]])))
expect_true(all(as.data.table(sentMeas)[["date"]] %in% unique(measuresLong[["date"]])))
})
# as.data.frame
test_that("Proper data.frame conversion", {
expect_true(class(as.data.frame(sentMeas)) == "data.frame")
})
| /tests/testthat/test_methods_sentomeasures.R | no_license | cran/sentometrics | R | false | false | 2,905 | r |
context("Methods sentomeasures")
library("data.table")
library("quanteda")
set.seed(123)
# corpus, lexicon and aggregation control creation
data("usnews")
corpus <- quanteda::corpus_sample(sento_corpus(corpusdf = usnews), size = 600)
data("list_lexicons")
lex <- sento_lexicons(list_lexicons[c("HENRY_en", "LM_en")])
ctr <- ctr_agg(howWithin = "counts", howDocs = "proportional", howTime = c("linear", "exponential"), by = "day",
lag = 60, alphasExp = c(0.1, 0.6))
sentMeas <- sento_measures(corpus, lex, ctr)
### tests from here ###
# diff
N <- nobs(sentMeas)
M <- nmeasures(sentMeas)
test_that("Differencing is properly done", {
expect_equal(nobs(diff(sentMeas, lag = 1)), N - 1)
expect_equal(nobs(diff(sentMeas, lag = 2, differences = 3)), N - 2 * 3)
})
# scale
s1 <- scale(sentMeas)
s2 <- suppressWarnings(scale(sentMeas, center = -as.matrix(as.data.table(sentMeas)[, -1]), scale = FALSE))
s3 <- scale(sentMeas, center = as.numeric(sentMeas$stats["mean", ]), scale = as.numeric(sentMeas$stats["sd", ]))
s4 <- scale(sentMeas,
center = -matrix(as.numeric(sentMeas$stats["mean", ]), nrow = N, ncol = M, byrow = TRUE),
scale = matrix(as.numeric(sentMeas$stats["sd", ]), nrow = N, ncol = M, byrow = TRUE))
test_that("Scaling is properly done", {
expect_equal(rowMeans(s1$stats["mean", ], na.rm = TRUE), c(mean = 0))
expect_equal(rowMeans(s1$stats["sd", ], na.rm = TRUE), c(sd = 1))
expect_equal(rowMeans(s2$stats["mean", ], na.rm = TRUE), c(mean = 0))
expect_equal(rowMeans(s2$stats["sd", ], na.rm = TRUE), c(sd = 0))
expect_equal(s1$stats["mean", ], s3$stats["mean", ])
expect_equal(s1$stats["sd", ], s3$stats["sd", ])
expect_equal(s1$stats["mean", ], s4$stats["mean", ])
expect_equal(s1$stats["sd", ], s4$stats["sd", ])
})
# summary.sentomeasures, print.sentomeasures
cat("\n")
test_that("No output returned when object summarized or printed", {
expect_null(summary(sentMeas))
expect_null(print(sentMeas))
})
# plot.sentomeasures
p <- plot(sentMeas, group = sample(c("features", "lexicons", "time"), 1))
test_that("Plot is a ggplot object", {
expect_true(inherits(p, "ggplot"))
})
# as.data.table, measures_to_long
measuresLong <- as.data.table(sentMeas, format = "long")
test_that("Proper long formatting of sentiment measures", {
expect_true(nrow(measuresLong) == nobs(sentMeas) * nmeasures(sentMeas))
expect_true(all(sentMeas$lexicons %in% unique(measuresLong[["lexicons"]])))
expect_true(all(sentMeas$features %in% unique(measuresLong[["features"]])))
expect_true(all(sentMeas$time %in% unique(measuresLong[["time"]])))
expect_true(all(as.data.table(sentMeas)[["date"]] %in% unique(measuresLong[["date"]])))
})
# as.data.frame
test_that("Proper data.frame conversion", {
expect_true(class(as.data.frame(sentMeas)) == "data.frame")
})
|
rm(list=ls())
library(tidyverse)
data <- data.frame(h=0,a=0,u_ha=0,w_ha=0)
data <- data[0,]
data <-data %>% add_row(h=1,a=1,u_ha=2062.50,w_ha=275)
data <-data %>% add_row(h=1,a=2,u_ha=2062.50,w_ha=275)
data <-data %>% add_row(h=2,a=1,u_ha=1744.875,w_ha=258.5)
data <-data %>% add_row(h=2,a=2,u_ha=2392.500,w_ha=319.0)
data <-data %>% add_row(h=3,a=1,u_ha=1725.000,w_ha=230.0)
data <-data %>% add_row(h=3,a=2,u_ha=2100.000,w_ha=280.0)
data <-data %>% add_row(h=4,a=1,u_ha=1491.750,w_ha=229.5)
data <-data %>% add_row(h=4,a=2,u_ha=1164.375,w_ha=202.5)
data <-data %>% add_row(h=5,a=1,u_ha=2117.500,w_ha=302.5)
data <-data %>% add_row(h=5,a=2,u_ha=2272.875,w_ha=313.5)
data <-data %>% add_row(h=6,a=1,u_ha=3213.000,w_ha=459.0)
data <-data %>% add_row(h=6,a=2,u_ha=2010.250,w_ha=365.5)
ratio_mean = sum(data$u_ha)/sum(data$w_ha)
#### Taylor series approximation ####
var_u_ha <- 0
var_w_ha <- 0
cov_u_w_ha <- 0
for(i in 1:6){
taylor <- data[data$h==i,]
var_u_ha <- var_u_ha + (taylor$u_ha[1]-taylor$u_ha[2])**2
var_w_ha <- var_w_ha + (taylor$w_ha[1]-taylor$w_ha[2])**2
cov_u_w_ha <- cov_u_w_ha + (taylor$u_ha[1]-taylor$u_ha[2])*(taylor$w_ha[1]-taylor$w_ha[2])
}
rpta_var_taylor = (1/(sum(data$w_ha)**2))*(var_u_ha+(ratio_mean**2)*(var_w_ha)-2*ratio_mean*cov_u_w_ha); rpta_var_taylor
rpta_se_taylor = sqrt(rpta_var_taylor);rpta_se_taylor
### Confidence Interval Taylor
ratio_mean + (rpta_se_taylor*qt(1-0.05/2,6))
ratio_mean - (rpta_se_taylor*qt(1-0.05/2,6))
#### Balanced repeated replication ####
set.seed(13920)
#u/w
hadamard=matrix(nrow=8,ncol = 6,data = c(1,1,1,-1,1,-1,-1,1,1,1,-1,1,-1,-1,1,1,1,-1,1,1,-1,1,1,1,-1,1,-1,-1,1,1,1,-1,1,-1,-1,1,1,1,-1,1,1,-1,-1,-1,-1,-1,-1,-1),byrow = T)
hadamard[4,2]=-1
hadamard[7,5]=-1
value_hadamard_u = hadamard
for(i in 1:6){
strata <- data[data$h==i,]
row <- hadamard[,i]
for(j in 1:8){
element <- ifelse(row[j]==1,strata[1,"u_ha"],strata[2,"u_ha"])
value_hadamard_u[j,i]=element
}
}
value_hadamard_w = hadamard
for(i in 1:6){
strata <- data[data$h==i,]
row <- hadamard[,i]
for(j in 1:8){
element <- ifelse(row[j]==1,strata[1,"w_ha"],strata[2,"w_ha"])
value_hadamard_w[j,i]=element
}
}
value_hadamard_u= cbind(value_hadamard_u,rowSums(value_hadamard_u))
value_hadamard_u=cbind(value_hadamard_u,sum(data$u_ha)-rowSums(value_hadamard_u[,1:6]))
value_hadamard_w=cbind(value_hadamard_w,rowSums(value_hadamard_w))
value_hadamard_w=cbind(value_hadamard_w,sum(data$w_ha)-rowSums(value_hadamard_w[,1:6]))
hd_ratios = data.frame(z_y=0,z2_y=0)
hd_ratios = hd_ratios[0,]
for(i in 1:8){
hd_ratios <- hd_ratios %>% add_row(z_y = value_hadamard_u[i,7]/value_hadamard_w[i,7],z2_y = value_hadamard_u[i,8]/value_hadamard_w[i,8])
}
var = vector()
for(k in 1:8){
for(l in 1:2){
pl = 0
pl = pl + (hd_ratios[k,l]-ratio_mean)**2
}
var = append(var,pl)
}
rpta_var_brr = 1/(2*8)*sum(var);rpta_var_brr
rpta_se_brr = sqrt(rpta_var_brr);rpta_se_brr
### Confidence Interval BRR
ratio_mean + (rpta_se_brr*qt(1-0.05/2,6))
ratio_mean - (rpta_se_brr*qt(1-0.05/2,6))
#### Jackknife repeated replication ####
jk_matrix_u = matrix(ncol = 6*2,nrow = 6)
jk_matrix_w = matrix(ncol = 6*2,nrow = 6)
for(p in 1:6){
random <- round(runif(1,1,2))
strata <- data[data$h==p,]
if (random==1){
jk_matrix_u[p,p*2-1]=0
jk_matrix_u[p,p*2]=2*strata[2,"u_ha"]
jk_matrix_u[,p*2-1] = replace_na(jk_matrix_u[,p*2-1],strata[1,"u_ha"])
jk_matrix_u[,p*2] = replace_na(jk_matrix_u[,p*2],strata[2,"u_ha"])
jk_matrix_w[p,p*2-1]=0
jk_matrix_w[p,p*2]=2*strata[2,"w_ha"]
jk_matrix_w[,p*2-1] = replace_na(jk_matrix_w[,p*2-1],strata[1,"w_ha"])
jk_matrix_w[,p*2] = replace_na(jk_matrix_w[,p*2],strata[2,"w_ha"])
}
if (random == 2){
jk_matrix_u[p,p*2-1]=0
jk_matrix_u[p,p*2]=2*strata[1,"u_ha"]
jk_matrix_u[,p*2-1] = replace_na(jk_matrix_u[,p*2-1],strata[2,"u_ha"])
jk_matrix_u[,p*2] = replace_na(jk_matrix_u[,p*2],strata[1,"u_ha"])
jk_matrix_w[p,p*2-1]=0
jk_matrix_w[p,p*2]=2*strata[1,"w_ha"]
jk_matrix_w[,p*2-1] = replace_na(jk_matrix_w[,p*2-1],strata[2,"w_ha"])
jk_matrix_w[,p*2] = replace_na(jk_matrix_w[,p*2],strata[1,"w_ha"])
}
}
jk_matrix_u = cbind(jk_matrix_u,rowSums(jk_matrix_u))
jk_matrix_w = cbind(jk_matrix_w,rowSums(jk_matrix_w))
jrr_var = 0
for (m in 1:6) {
jrr_ratio = jk_matrix_u[m,13]/jk_matrix_w[m,13]
jrr_var = jrr_var + (jrr_ratio - ratio_mean)**2
}
rpta_var_jrr = jrr_var
rpta_se_jrr = sqrt(rpta_var_jrr)
### Confidence Interval JRR
ratio_mean + (rpta_se_jrr*qt(1-0.05/2,6))
ratio_mean - (rpta_se_jrr*qt(1-0.05/2,6))
| /src/Manrique J_HW6.R | permissive | jamanrique/SurveyAnalysis.jl | R | false | false | 4,715 | r | rm(list=ls())
library(tidyverse)
data <- data.frame(h=0,a=0,u_ha=0,w_ha=0)
data <- data[0,]
data <-data %>% add_row(h=1,a=1,u_ha=2062.50,w_ha=275)
data <-data %>% add_row(h=1,a=2,u_ha=2062.50,w_ha=275)
data <-data %>% add_row(h=2,a=1,u_ha=1744.875,w_ha=258.5)
data <-data %>% add_row(h=2,a=2,u_ha=2392.500,w_ha=319.0)
data <-data %>% add_row(h=3,a=1,u_ha=1725.000,w_ha=230.0)
data <-data %>% add_row(h=3,a=2,u_ha=2100.000,w_ha=280.0)
data <-data %>% add_row(h=4,a=1,u_ha=1491.750,w_ha=229.5)
data <-data %>% add_row(h=4,a=2,u_ha=1164.375,w_ha=202.5)
data <-data %>% add_row(h=5,a=1,u_ha=2117.500,w_ha=302.5)
data <-data %>% add_row(h=5,a=2,u_ha=2272.875,w_ha=313.5)
data <-data %>% add_row(h=6,a=1,u_ha=3213.000,w_ha=459.0)
data <-data %>% add_row(h=6,a=2,u_ha=2010.250,w_ha=365.5)
ratio_mean = sum(data$u_ha)/sum(data$w_ha)
#### Taylor series approximation ####
var_u_ha <- 0
var_w_ha <- 0
cov_u_w_ha <- 0
for(i in 1:6){
taylor <- data[data$h==i,]
var_u_ha <- var_u_ha + (taylor$u_ha[1]-taylor$u_ha[2])**2
var_w_ha <- var_w_ha + (taylor$w_ha[1]-taylor$w_ha[2])**2
cov_u_w_ha <- cov_u_w_ha + (taylor$u_ha[1]-taylor$u_ha[2])*(taylor$w_ha[1]-taylor$w_ha[2])
}
rpta_var_taylor = (1/(sum(data$w_ha)**2))*(var_u_ha+(ratio_mean**2)*(var_w_ha)-2*ratio_mean*cov_u_w_ha); rpta_var_taylor
rpta_se_taylor = sqrt(rpta_var_taylor);rpta_se_taylor
### Confidence Interval Taylor
ratio_mean + (rpta_se_taylor*qt(1-0.05/2,6))
ratio_mean - (rpta_se_taylor*qt(1-0.05/2,6))
#### Balanced repeated replication ####
set.seed(13920)
#u/w
hadamard=matrix(nrow=8,ncol = 6,data = c(1,1,1,-1,1,-1,-1,1,1,1,-1,1,-1,-1,1,1,1,-1,1,1,-1,1,1,1,-1,1,-1,-1,1,1,1,-1,1,-1,-1,1,1,1,-1,1,1,-1,-1,-1,-1,-1,-1,-1),byrow = T)
hadamard[4,2]=-1
hadamard[7,5]=-1
value_hadamard_u = hadamard
for(i in 1:6){
strata <- data[data$h==i,]
row <- hadamard[,i]
for(j in 1:8){
element <- ifelse(row[j]==1,strata[1,"u_ha"],strata[2,"u_ha"])
value_hadamard_u[j,i]=element
}
}
value_hadamard_w = hadamard
for(i in 1:6){
strata <- data[data$h==i,]
row <- hadamard[,i]
for(j in 1:8){
element <- ifelse(row[j]==1,strata[1,"w_ha"],strata[2,"w_ha"])
value_hadamard_w[j,i]=element
}
}
value_hadamard_u= cbind(value_hadamard_u,rowSums(value_hadamard_u))
value_hadamard_u=cbind(value_hadamard_u,sum(data$u_ha)-rowSums(value_hadamard_u[,1:6]))
value_hadamard_w=cbind(value_hadamard_w,rowSums(value_hadamard_w))
value_hadamard_w=cbind(value_hadamard_w,sum(data$w_ha)-rowSums(value_hadamard_w[,1:6]))
hd_ratios = data.frame(z_y=0,z2_y=0)
hd_ratios = hd_ratios[0,]
for(i in 1:8){
hd_ratios <- hd_ratios %>% add_row(z_y = value_hadamard_u[i,7]/value_hadamard_w[i,7],z2_y = value_hadamard_u[i,8]/value_hadamard_w[i,8])
}
var = vector()
for(k in 1:8){
for(l in 1:2){
pl = 0
pl = pl + (hd_ratios[k,l]-ratio_mean)**2
}
var = append(var,pl)
}
rpta_var_brr = 1/(2*8)*sum(var);rpta_var_brr
rpta_se_brr = sqrt(rpta_var_brr);rpta_se_brr
### Confidence Interval BRR
ratio_mean + (rpta_se_brr*qt(1-0.05/2,6))
ratio_mean - (rpta_se_brr*qt(1-0.05/2,6))
#### Jackknife repeated replication ####
jk_matrix_u = matrix(ncol = 6*2,nrow = 6)
jk_matrix_w = matrix(ncol = 6*2,nrow = 6)
for(p in 1:6){
random <- round(runif(1,1,2))
strata <- data[data$h==p,]
if (random==1){
jk_matrix_u[p,p*2-1]=0
jk_matrix_u[p,p*2]=2*strata[2,"u_ha"]
jk_matrix_u[,p*2-1] = replace_na(jk_matrix_u[,p*2-1],strata[1,"u_ha"])
jk_matrix_u[,p*2] = replace_na(jk_matrix_u[,p*2],strata[2,"u_ha"])
jk_matrix_w[p,p*2-1]=0
jk_matrix_w[p,p*2]=2*strata[2,"w_ha"]
jk_matrix_w[,p*2-1] = replace_na(jk_matrix_w[,p*2-1],strata[1,"w_ha"])
jk_matrix_w[,p*2] = replace_na(jk_matrix_w[,p*2],strata[2,"w_ha"])
}
if (random == 2){
jk_matrix_u[p,p*2-1]=0
jk_matrix_u[p,p*2]=2*strata[1,"u_ha"]
jk_matrix_u[,p*2-1] = replace_na(jk_matrix_u[,p*2-1],strata[2,"u_ha"])
jk_matrix_u[,p*2] = replace_na(jk_matrix_u[,p*2],strata[1,"u_ha"])
jk_matrix_w[p,p*2-1]=0
jk_matrix_w[p,p*2]=2*strata[1,"w_ha"]
jk_matrix_w[,p*2-1] = replace_na(jk_matrix_w[,p*2-1],strata[2,"w_ha"])
jk_matrix_w[,p*2] = replace_na(jk_matrix_w[,p*2],strata[1,"w_ha"])
}
}
jk_matrix_u = cbind(jk_matrix_u,rowSums(jk_matrix_u))
jk_matrix_w = cbind(jk_matrix_w,rowSums(jk_matrix_w))
jrr_var = 0
for (m in 1:6) {
jrr_ratio = jk_matrix_u[m,13]/jk_matrix_w[m,13]
jrr_var = jrr_var + (jrr_ratio - ratio_mean)**2
}
rpta_var_jrr = jrr_var
rpta_se_jrr = sqrt(rpta_var_jrr)
### Confidence Interval JRR
ratio_mean + (rpta_se_jrr*qt(1-0.05/2,6))
ratio_mean - (rpta_se_jrr*qt(1-0.05/2,6))
|
library(tidyverse)
library(rio)
library(AER)
library(stargazer)
ace_xc_raw <- import("data/ace_xcountry.dta")
xc_df <- ace_xc_raw %>%
select(logpgdp05, tyr05_n, ruleoflaw, lat_abst,
africa, america, asia, f_brit, f_french,
lcapped, lpd1500s, prienr1900, protmiss) %>%
rename("gdp" = logpgdp05,
"yr_school" = tyr05_n,
"latitude" = lat_abst,
"settlermortality" = lcapped,
"pop" = lpd1500s,
"enrollment1900" = prienr1900)
ggplot(xc_df, aes(x = ruleoflaw,
y = gdp)) +
geom_point() +
ggsave("institutioneffect.png")
ggplot(xc_df, aes(x = protmiss,
y = gdp)) +
geom_point() +
ggsave("hkiv.png")
fit1 <- lm(logpgdp05 ~ tyr05_n + ruleoflaw + lat_abst +
africa + america + asia + f_brit + f_french, data = xc_df)
tsls_fit_xc <- ivreg(logpgdp05 ~ tyr05_n + lcapped + lpd1500s |
lcapped + lpd1500s + prienr1900 + protmiss, data = xc_df)
stargazer(tsls_fit_xc, type = "text")
| /ace_xc.R | no_license | jbl18c/FYP | R | false | false | 1,015 | r | library(tidyverse)
library(rio)
library(AER)
library(stargazer)
ace_xc_raw <- import("data/ace_xcountry.dta")
xc_df <- ace_xc_raw %>%
select(logpgdp05, tyr05_n, ruleoflaw, lat_abst,
africa, america, asia, f_brit, f_french,
lcapped, lpd1500s, prienr1900, protmiss) %>%
rename("gdp" = logpgdp05,
"yr_school" = tyr05_n,
"latitude" = lat_abst,
"settlermortality" = lcapped,
"pop" = lpd1500s,
"enrollment1900" = prienr1900)
ggplot(xc_df, aes(x = ruleoflaw,
y = gdp)) +
geom_point() +
ggsave("institutioneffect.png")
ggplot(xc_df, aes(x = protmiss,
y = gdp)) +
geom_point() +
ggsave("hkiv.png")
fit1 <- lm(logpgdp05 ~ tyr05_n + ruleoflaw + lat_abst +
africa + america + asia + f_brit + f_french, data = xc_df)
tsls_fit_xc <- ivreg(logpgdp05 ~ tyr05_n + lcapped + lpd1500s |
lcapped + lpd1500s + prienr1900 + protmiss, data = xc_df)
stargazer(tsls_fit_xc, type = "text")
|
/refm/api/src/win32/registry.rd | no_license | mrkn/rubydoc | R | false | false | 13,204 | rd | ||
#Count the number of scripts per chromosome for proliferation purposes
#Cluster Paths
dimension_path <- "/home/pmd-01/chemenya/CHS/txtDosage/dimensions/"
output_path <- "/home/pmd-01/chemenya/CHS/Split_Imputed_Results/"
dosage_path <- "/home/pmd-01/chemenya/CHS/Split_Imputed/"
#Loop through all 22 chromosomes
files <- do.call(rbind,lapply(1:22,function(i){
#Set Chromosome
chr=i
#Read in the number of SNPs to be read in the dosage file
dimension <- as.numeric(read.table(paste0(dimension_path,"chr",chr,".row"))[1])
#Read in how many files there are for this chromosome
num.files <- ceiling(dimension/5000)
#Which files to read
scripts <- ceiling(num.files/20)
#Put all together to save
cbind(chr,num.files,scripts)
}))
files
| /Count.Num.Results.R | no_license | LilithMoss/CHS_Imputed | R | false | false | 767 | r | #Count the number of scripts per chromosome for proliferation purposes
#Cluster Paths
dimension_path <- "/home/pmd-01/chemenya/CHS/txtDosage/dimensions/"
output_path <- "/home/pmd-01/chemenya/CHS/Split_Imputed_Results/"
dosage_path <- "/home/pmd-01/chemenya/CHS/Split_Imputed/"
#Loop through all 22 chromosomes
files <- do.call(rbind,lapply(1:22,function(i){
#Set Chromosome
chr=i
#Read in the number of SNPs to be read in the dosage file
dimension <- as.numeric(read.table(paste0(dimension_path,"chr",chr,".row"))[1])
#Read in how many files there are for this chromosome
num.files <- ceiling(dimension/5000)
#Which files to read
scripts <- ceiling(num.files/20)
#Put all together to save
cbind(chr,num.files,scripts)
}))
files
|
# Функция scale() позволяет совершить стандартизацию вектора, то есть делает его среднее значение равным нулю,
# а стандартное отклонение - единице (Z-преобразование).
# Стандартизованный коэффициент регрессии (β) можно получить, если предикторы и зависимая переменная стандартизованы.
# Напишите функцию, которая на вход получает dataframe с двумя количественными переменными,
# а возвращает стандартизованные коэффициенты для регрессионной модели,
# в которой первая переменная датафрейма выступает в качестве зависимой, а вторая в качестве независимой.
lm(scale(x[[2]]) ~ scale(x[[1]]), x)$coefficients
# vs
x <-scale(x)
lm(x[,1] ~ x[,2])$coefficients
# Напишите функцию normality.test, которая получает на вход dataframe с количественными переменными,
# проверяет распределения каждой переменной на нормальность с помощью функции shapiro.test.
# Функция должна возвращать вектор с значениями p - value, полученного в результате проверки на нормальность каждой переменной.
# Названия элементов вектора должны совпадать с названиями переменных.
normality.test <- function(x){
sapply(x, function(y) { shapiro.test(y)$p.value })
}
# vs
normality.test <- function(x){
return(sapply(x, FUN = shapiro.test)['p.value',])}
# Загрузите себе прикреплённый к этому степу датасет и постройте регрессию, предсказывающую DV по IV.
# Установите библиотеку gvlma и проверьте, удовлетворяется ли в этой модели требование гомоскедастичности.
# Введите в поле ответа p-значение для теста гетероскедастичности.
library(gvlma)
step7 <- read.csv("https://stepic.org/media/attachments/lesson/12088/homosc.csv", sep=',' )
step7_x <- lm(DV ~ IV, step7)
step7_x1 <- gvlma(step7_x)
summary(step7_x1)
# Напишите функцию resid.norm, которая тестирует распределение остатков от модели на нормальность при помощи функции shapiro.test
# и создает гистограмму при помощи функции ggplot() с красной заливкой "red",
# если распределение остатков значимо отличается от нормального (p < 0.05),
# и с зелёной заливкой "green" - если распределение остатков значимо не отличается от нормального.
resid.norm <- function(fit) {
res <- shapiro.test(fit$residuals)
df <- data.frame(fit$residuals)
return (ggplot(df, aes(fit$residuals)) + geom_histogram(bins=30, fill=ifelse(res$p.value < 0.05, 'red', 'green')))
}
# vs
resid.norm <- function(fit) {
resid.norm.pv <- shapiro.test(fit$residuals)$p.value
plt <- ggplot(data.frame(fit$model), aes(x = fit$residuals)) +
geom_histogram(fill = ifelse(resid.norm.pv < 0.05, 'red', 'green'))
return(plt)}
# Ещё одной проблемой регрессионных моделей может стать мультиколлинеарность - ситуация,
# когда предикторы очень сильно коррелируют между собой.
# Иногда корреляция между двумя предикторами может достигать 1, например, когда два предиктора - это одна и та же переменная,
# измеренная в разных шкалах (x1 - рост в метрах, x2 - рост в сантиметрах)
# Проверить данные на мультиколлинеарность можно по графику pairs() и посчитав корреляцию между всеми предикторами c помощью функции cor.
# Напишите функцию high.corr, которая принимает на вход датасет с произвольным числом количественных переменных
# и возвращает вектор с именами двух переменных с максимальным абсолютным значением коэффициента корреляции.
high.corr <- function(x){
num_var <- sapply(x, function(x) is.numeric(x))
cor_mat <- cor(x[, num_var])
diag(cor_mat) <- 0
u <- which(abs(cor_mat) == max(abs(cor_mat)), arr.ind = TRUE)
return(rownames(u))
}
# vs
high.corr <- function(x){
cr <- cor(x)
diag(cr) <- 0
return(rownames(which(abs(cr)==max(abs(cr)),arr.ind=T)))} | /Stepik3/diagnostic_model.r | no_license | venkaDaria/rlang-demo | R | false | false | 5,423 | r | # Функция scale() позволяет совершить стандартизацию вектора, то есть делает его среднее значение равным нулю,
# а стандартное отклонение - единице (Z-преобразование).
# Стандартизованный коэффициент регрессии (β) можно получить, если предикторы и зависимая переменная стандартизованы.
# Напишите функцию, которая на вход получает dataframe с двумя количественными переменными,
# а возвращает стандартизованные коэффициенты для регрессионной модели,
# в которой первая переменная датафрейма выступает в качестве зависимой, а вторая в качестве независимой.
lm(scale(x[[2]]) ~ scale(x[[1]]), x)$coefficients
# vs
x <-scale(x)
lm(x[,1] ~ x[,2])$coefficients
# Напишите функцию normality.test, которая получает на вход dataframe с количественными переменными,
# проверяет распределения каждой переменной на нормальность с помощью функции shapiro.test.
# Функция должна возвращать вектор с значениями p - value, полученного в результате проверки на нормальность каждой переменной.
# Названия элементов вектора должны совпадать с названиями переменных.
normality.test <- function(x){
sapply(x, function(y) { shapiro.test(y)$p.value })
}
# vs
normality.test <- function(x){
return(sapply(x, FUN = shapiro.test)['p.value',])}
# Загрузите себе прикреплённый к этому степу датасет и постройте регрессию, предсказывающую DV по IV.
# Установите библиотеку gvlma и проверьте, удовлетворяется ли в этой модели требование гомоскедастичности.
# Введите в поле ответа p-значение для теста гетероскедастичности.
library(gvlma)
step7 <- read.csv("https://stepic.org/media/attachments/lesson/12088/homosc.csv", sep=',' )
step7_x <- lm(DV ~ IV, step7)
step7_x1 <- gvlma(step7_x)
summary(step7_x1)
# Напишите функцию resid.norm, которая тестирует распределение остатков от модели на нормальность при помощи функции shapiro.test
# и создает гистограмму при помощи функции ggplot() с красной заливкой "red",
# если распределение остатков значимо отличается от нормального (p < 0.05),
# и с зелёной заливкой "green" - если распределение остатков значимо не отличается от нормального.
resid.norm <- function(fit) {
res <- shapiro.test(fit$residuals)
df <- data.frame(fit$residuals)
return (ggplot(df, aes(fit$residuals)) + geom_histogram(bins=30, fill=ifelse(res$p.value < 0.05, 'red', 'green')))
}
# vs
resid.norm <- function(fit) {
resid.norm.pv <- shapiro.test(fit$residuals)$p.value
plt <- ggplot(data.frame(fit$model), aes(x = fit$residuals)) +
geom_histogram(fill = ifelse(resid.norm.pv < 0.05, 'red', 'green'))
return(plt)}
# Ещё одной проблемой регрессионных моделей может стать мультиколлинеарность - ситуация,
# когда предикторы очень сильно коррелируют между собой.
# Иногда корреляция между двумя предикторами может достигать 1, например, когда два предиктора - это одна и та же переменная,
# измеренная в разных шкалах (x1 - рост в метрах, x2 - рост в сантиметрах)
# Проверить данные на мультиколлинеарность можно по графику pairs() и посчитав корреляцию между всеми предикторами c помощью функции cor.
# Напишите функцию high.corr, которая принимает на вход датасет с произвольным числом количественных переменных
# и возвращает вектор с именами двух переменных с максимальным абсолютным значением коэффициента корреляции.
high.corr <- function(x){
num_var <- sapply(x, function(x) is.numeric(x))
cor_mat <- cor(x[, num_var])
diag(cor_mat) <- 0
u <- which(abs(cor_mat) == max(abs(cor_mat)), arr.ind = TRUE)
return(rownames(u))
}
# vs
high.corr <- function(x){
cr <- cor(x)
diag(cr) <- 0
return(rownames(which(abs(cr)==max(abs(cr)),arr.ind=T)))} |
library(XLConnect)
### Name: setFillForegroundColor-methods
### Title: Specifying the fill foreground color for cell styles
### Aliases: setFillForegroundColor setFillForegroundColor-methods
### setFillForegroundColor,cellstyle,numeric-method
### Keywords: methods utilities
### ** Examples
# Load workbook (create if not existing)
wb <- loadWorkbook("setFillForegroundColor.xlsx", create = TRUE)
# Create a worksheet
createSheet(wb, name = "cellstyles")
# Create a custom anonymous cell style
cs <- createCellStyle(wb)
# Specify the fill background color for the cell style created above
setFillBackgroundColor(cs, color = XLC$"COLOR.CORNFLOWER_BLUE")
# Specify the fill foreground color
setFillForegroundColor(cs, color = XLC$"COLOR.YELLOW")
# Specify the fill pattern
setFillPattern(cs, fill = XLC$"FILL.BIG_SPOTS")
# Set the cell style created above for the top left cell (A1) in the
# 'cellstyles' worksheet
setCellStyle(wb, sheet = "cellstyles", row = 1, col = 1, cellstyle = cs)
# Save the workbook
saveWorkbook(wb)
| /data/genthat_extracted_code/XLConnect/examples/setFillForegroundColor-methods.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 1,039 | r | library(XLConnect)
### Name: setFillForegroundColor-methods
### Title: Specifying the fill foreground color for cell styles
### Aliases: setFillForegroundColor setFillForegroundColor-methods
### setFillForegroundColor,cellstyle,numeric-method
### Keywords: methods utilities
### ** Examples
# Load workbook (create if not existing)
wb <- loadWorkbook("setFillForegroundColor.xlsx", create = TRUE)
# Create a worksheet
createSheet(wb, name = "cellstyles")
# Create a custom anonymous cell style
cs <- createCellStyle(wb)
# Specify the fill background color for the cell style created above
setFillBackgroundColor(cs, color = XLC$"COLOR.CORNFLOWER_BLUE")
# Specify the fill foreground color
setFillForegroundColor(cs, color = XLC$"COLOR.YELLOW")
# Specify the fill pattern
setFillPattern(cs, fill = XLC$"FILL.BIG_SPOTS")
# Set the cell style created above for the top left cell (A1) in the
# 'cellstyles' worksheet
setCellStyle(wb, sheet = "cellstyles", row = 1, col = 1, cellstyle = cs)
# Save the workbook
saveWorkbook(wb)
|
### obtained from qmedist function
set.seed(1)
pars.abx = c(2.7350040,0.1709258)
gammaA = 0.1964637
gammaAbx = 1/(1/gammaA - 1.1*rnorm(5e3,pars.abx[1],pars.abx[2]))
varnames = c('PK',
'S1a','S1b','S2',
'Ea1a','Ea1b','Ea2',
'Es1a','Es1b','Es2',
'Ia1a','Ia1b','Ia2',
'Is1a','Is1b','Is2',
'Y1a','Y1b','Y2',
'Ya1a','Ya1b','Ya2',
'Ic1a','Ic1b','Ic2',
'Ra1a','Ra1b','Ra2',
'Rs1a','Rs1b','Rs2',
'B1','B2'
)
parnames = c('delta','gammaA','gammaD','gammaC','eta','xi','kappa','zeta','N','Nart','Noth','mu1','mu2','nu',
'pi','betaW','betaL','k','omega','popA')
nu=8.58
vaxd = (9.9*10^5)/(5.1*10^7)
######## INFECTIONS: -1 Suscept, +1 latent
#### infections by Water (Asx and Sx)
infectW.A = rep(0,33); infectW.A[which(varnames=='S1a')] = -1; infectW.A[which(varnames=='Ea1a')] = 1
infectW.S = rep(0,33); infectW.S[which(varnames=='S1a')] = -1; infectW.S[which(varnames=='Es1a')] = 1
#### infections by Local (Asx and Sx, pops 1a, 1b, 2)
infectL.A.1a = rep(0,33); infectL.A.1a[which(varnames=='S1a')] = -1; infectL.A.1a[which(varnames=='Ea1a')] = 1
infectL.S.1a = rep(0,33); infectL.S.1a[which(varnames=='S1a')] = -1; infectL.S.1a[which(varnames=='Es1a')] = 1
infectL.A.1b = rep(0,33); infectL.A.1b[which(varnames=='S1b')] = -1; infectL.A.1b[which(varnames=='Ea1b')] = 1
infectL.S.1b = rep(0,33); infectL.S.1b[which(varnames=='S1b')] = -1; infectL.S.1b[which(varnames=='Es1b')] = 1
infectL.A.2 = rep(0,33); infectL.A.2[which(varnames=='S2')] = -1; infectL.A.2[which(varnames=='Ea2')] = 1
infectL.S.2 = rep(0,33); infectL.S.2[which(varnames=='S2')] = -1; infectL.S.2[which(varnames=='Es2')] = 1
#### migration
mig.S.1a = rep(0,33); mig.S.1a[which(varnames=='S1a')] = -1; mig.S.1a[which(varnames=='S1b')] = 1
mig.S.1b = rep(0,33); mig.S.1b[which(varnames=='S1b')] = -1; mig.S.1b[which(varnames=='S1a')] = 1
mig.Ea.1a = rep(0,33); mig.Ea.1a[which(varnames=='Ea1a')] = -1; mig.Ea.1a[which(varnames=='Ea1b')] = 1
mig.Ea.1b = rep(0,33); mig.Ea.1b[which(varnames=='Ea1b')] = -1; mig.Ea.1b[which(varnames=='Ea1a')] = 1
mig.Es.1a = rep(0,33); mig.Es.1a[which(varnames=='Es1a')] = -1; mig.Es.1a[which(varnames=='Es1b')] = 1
mig.Es.1b = rep(0,33); mig.Es.1b[which(varnames=='Es1b')] = -1; mig.Es.1b[which(varnames=='Es1a')] = 1
mig.Ia.1a = rep(0,33); mig.Ia.1a[which(varnames=='Ia1a')] = -1; mig.Ia.1a[which(varnames=='Ia1b')] = 1
mig.Ia.1b = rep(0,33); mig.Ia.1b[which(varnames=='Ia1b')] = -1; mig.Ia.1b[which(varnames=='Ia1a')] = 1
mig.Ic.1a = rep(0,33); mig.Ic.1a[which(varnames=='Ic1a')] = -1; mig.Ic.1a[which(varnames=='Ic1b')] = 1
mig.Ic.1b = rep(0,33); mig.Ic.1b[which(varnames=='Ic1b')] = -1; mig.Ic.1b[which(varnames=='Ic1a')] = 1
######## PROGRESSION: -1 latent, +1 infectious; +1 obs if severe
prog.A.1a = rep(0,33); prog.A.1a[which(varnames=='Ea1a')] = -1; prog.A.1a[which(varnames=='Ia1a')] = 1; prog.A.1a[which(varnames=='Ya1a')] = 1
prog.S.1a = rep(0,33); prog.S.1a[which(varnames=='Es1a')] = -1; prog.S.1a[which(varnames=='Is1a')] = 1; prog.S.1a[which(varnames=='Y1a')] = 1
prog.A.1b = rep(0,33); prog.A.1b[which(varnames=='Ea1b')] = -1; prog.A.1b[which(varnames=='Ia1b')] = 1; prog.A.1b[which(varnames=='Ya1b')] = 1
prog.S.1b = rep(0,33); prog.S.1b[which(varnames=='Es1b')] = -1; prog.S.1b[which(varnames=='Is1b')] = 1; prog.S.1b[which(varnames=='Y1b')] = 1
prog.A.2 = rep(0,33); prog.A.2[which(varnames=='Ea2')] = -1; prog.A.2[which(varnames=='Ia2')] = 1; prog.A.2[which(varnames=='Ya2')] = 1
prog.S.2 = rep(0,33); prog.S.2[which(varnames=='Es2')] = -1; prog.S.2[which(varnames=='Is2')] = 1; prog.S.2[which(varnames=='Y2')] = 1
######## CONVALESCENCE: -1 severe, +1 convalesc
conval.1a = rep(0,33); conval.1a[which(varnames=='Is1a')] = -1; conval.1a[which(varnames=='Ic1a')] = 1
conval.1b = rep(0,33); conval.1b[which(varnames=='Is1b')] = -1; conval.1b[which(varnames=='Ic1b')] = 1
conval.2 = rep(0,33); conval.2[which(varnames=='Is2')] = -1; conval.2[which(varnames=='Ic2')] = 1
######## MORTALITY: -1 severe
mortal.1a = rep(0,33); mortal.1a[which(varnames=='Is1a')] = -1
mortal.1b = rep(0,33); mortal.1b[which(varnames=='Is1b')] = -1
mortal.2 = rep(0,33); mortal.2[which(varnames=='Is2')] = -1
######## RECOVERY: -1 infectious (Asx and Cx); +1 recovered (Asx and Sx)
recov.A.1a = rep(0,33); recov.A.1a[which(varnames=='Ia1a')] = -1; recov.A.1a[which(varnames=='Ra1a')] = 1
recov.S.1a = rep(0,33); recov.S.1a[which(varnames=='Ic1a')] = -1; recov.S.1a[which(varnames=='Rs1a')] = 1
recov.A.1b = rep(0,33); recov.A.1b[which(varnames=='Ia1b')] = -1; recov.A.1b[which(varnames=='Ra1b')] = 1
recov.S.1b = rep(0,33); recov.S.1b[which(varnames=='Ic1b')] = -1; recov.S.1b[which(varnames=='Rs1b')] = 1
recov.A.2 = rep(0,33); recov.A.2[which(varnames=='Ia2')] = -1; recov.A.2[which(varnames=='Ra2')] = 1
recov.S.2 = rep(0,33); recov.S.2[which(varnames=='Ic2')] = -1; recov.S.2[which(varnames=='Rs2')] = 1
######## PK RECOVERIES: -1 PK
recov.PK = rep(0,33); recov.PK[which(varnames=='PK')] = -1
######## SHED: +1 B1
shed.PK = rep(0,33); shed.PK[which(varnames=='B1')] = vaxd
shed.A = rep(0,33); shed.A[which(varnames=='B1')] = 1
shed.S = rep(0,33); shed.S[which(varnames=='B1')] = nu ### shed nu vibrios relative to Asx and Conval
shed.C = rep(0,33); shed.C[which(varnames=='B1')] = 1
trans = rep(0,33); trans[which(varnames=='B1')] = -vaxd; trans[which(varnames=='B2')] = vaxd
die = rep(0,33); die[which(varnames=='B2')] = -vaxd
transitions = matrix(c(
infectW.A,
infectW.S,
infectL.A.1a,
infectL.S.1a,
infectL.A.1b,
infectL.S.1b,
infectL.A.2,
infectL.S.2,
mig.S.1a,
mig.S.1b,
mig.Ea.1a,
mig.Ea.1b,
mig.Es.1a,
mig.Es.1b,
mig.Ia.1a,
mig.Ia.1b,
mig.Ic.1a,
mig.Ic.1b,
prog.A.1a,
prog.S.1a,
prog.A.1b,
prog.S.1b,
prog.A.2,
prog.S.2,
conval.1a,
conval.1b,
conval.2,
mortal.1a,
mortal.1b,
mortal.2,
recov.A.1a,
recov.S.1a,
recov.A.1b,
recov.S.1b,
recov.A.2,
recov.S.2,
recov.PK,
shed.PK,
shed.A,
shed.S,
shed.C,
trans,
die),byrow=T,ncol=33)
onestep = function(x,pars){
for (z in 1:length(x)){
x[z] = max(0,x[z])
}
for (i in 1:33){
assign(varnames[i],x[i+1])
}
for (h in 1:length(parnames)){
assign(parnames[h],pars[h])
}
r = 1+log10(nu)
lambdaW = betaW*(eta*B1+B2)/(betaW*(eta*B1+B2) + kappa)
lambdaL = k*log(1 + betaL*(Ia1a + Ia1b + Ia2 + r*(Is1a + Is1b + Is2) + Ic1a + Ic1b + Ic2)/k)/(Nart+Noth)
rates = c(
infectW.A = lambdaW*(1-lambdaW)*S1a,
infectW.S = (lambdaW^2)*S1a,
infectL.A.1a = (1-pi)*lambdaL*S1a,
infectL.S.1a = pi*lambdaL*S1a,
infectL.A.1b = (1-pi)*lambdaL*S1b,
infectL.S.1b = pi*lambdaL*S1b,
infectL.A.2 = (1-pi)*lambdaL*S2,
infectL.S.2 = pi*lambdaL*S2,
mig.S.1a = 0,#omega*(1-popA)*S1a,
mig.S.1b = 0,#omega*popA*S1b,
mig.Ea.1a = omega*(1-popA)*Ea1a,
mig.Ea.1b = omega*popA*Ea1b,
mig.Es.1a = omega*(1-popA)*Es1a,
mig.Es.1b = omega*popA*Es1b,
mig.Ia.1a = omega*(1-popA)*Ia1a,
mig.Ia.1b = omega*popA*Ia1b,
mig.Ic.1a = omega*(1-popA)*Ic1a,
mig.Ic.1b = omega*popA*Ic1b,
prog.A.1a = delta*Ea1a,
prog.S.1a = delta*Es1a,
prog.A.1b = delta*Ea1b,
prog.S.1b = delta*Es1b,
prog.A.2 = delta*Ea2,
prog.S.2 = delta*Es2,
conval.1a = gammaD*Is1a, ### (1 - zeta)*gammaD/(1-zeta) ### probability of recovery times exit rate
conval.1b = gammaD*Is1b,
conval.2 = gammaD*Is2,
mortal.1a = zeta*gammaD*Is1a/(1-zeta), ## zeta*(gammaD/(1-zeta)) ### probability of death times exit rate
mortal.1b = zeta*gammaD*Is1b/(1-zeta),
mortal.2 = zeta*gammaD*Is2/(1-zeta),
recov.A.1a = gammaA*Ia1a,
recov.S.1a = gammaC*Ic1a,
recov.A.1b = gammaA*Ia1b,
recov.S.1b = gammaC*Ic1b,
recov.A.2 = gammaA*Ia2,
recov.S.2 = gammaC*Ic2,
recov.PK = gammaAX*PK,
shed.PK = PK,
shed.A = Ia1a,
shed.S = Is1a,
shed.C = Ic1a,
trans = mu1*B1/vaxd,
die = mu2*B2/vaxd
)
if ((Ia1a==0)&(Ia1b==0)&(Ea1a==0)&(Ea1b==0)&(Is1a==0)&(Is1b==0)&(Es1a==0)&(Es1b==0)&(Ea2==0)&(Es2==0)&(Ia2==0)&(Is2==0)){
rates['mig.S.1a'] = rates['mig.S.1b'] = 0
rates['mig.Ea.1a'] = rates['mig.Ea.1b'] = 0
rates['mig.Es.1a'] = rates['mig.Es.1b'] = 0
rates['mig.Ia.1a'] = rates['mig.Ia.1b'] = 0
rates['mig.Ic.1a'] = rates['mig.Ic.1b'] = 0
}
tot.rate = sum(rates)
tau = rexp(n=1,rate=tot.rate)
if (is.na(tau)){
return('no transmission')
} else{
event = sample(1:length(rates),size=1,prob=rates/tot.rate)
return(x+c(tau,transitions[event,]))
}
}
simul.fn = function(x,params,maxstep,tmax){
names(x) = c('time',varnames)
j = 0
while (j<=maxstep){
if (j>1){
if ((x['time']>tmax)&(y['PK']==0)&(y['Ea1a']==0)&(y['Ia1a']==0)&(y['Es1a']==0)){
return(c(9999,j))
}
}
j = j+1
y = onestep(x,params)
names(y) = c('time',varnames)
if (y[1]=='no transmission'){
return(c(9999,j))
}
if (sum(c(y['PK']==0),(y['B1']==0),(y['B2']==0),
(y['Ea1a']==0),(y['Es1a']==0),(y['Ia1a']==0),(y['Is1a']==0),(y['Ic1a']==0),
(y['Ea1b']==0),(y['Es1b']==0),(y['Ia1b']==0),(y['Is1b']==0),(y['Ic1b']==0),
(y['Ea2']==0),(y['Es2']==0),(y['Ia2']==0),(y['Is2']==0),(y['Ic2']==0))==18){
return(c(9999,j))
}
if (sum(c(y['Y1a'],y['Y1b'],y['Y2'])>0)){
return(c(y['time'],j))
}
x = y
names(x) = c('time',varnames)
}
return(c(9999,j))
}
set.seed(30102)
load(file='~/chol3.mcmc.Rdata')
pars = c(1/1.55,1/5.09,1/3.32,1/1.77,100,1,0.1,0.025,9923243,879644,9043599,1,1/30,8.58)
nsims = 5e3
out = matrix(NA,nsims,2)
for (l in 1:nsims){
par = c(pars,state3[sample(2001:25000,1),,sample(1:3,1)])
for (z in 1:length(parnames)){
assign(parnames[z],par[z])
}
gammaAX = gammaAbx[l]
init = c(3,popA*Nart,(1-popA)*Nart,Noth,rep(0,29))
out[l,] = simul.fn(x=c(0,init),params=par,maxstep=5e4,tmax=100)
if ((l/1e1)==ceiling(l/1e1)){
print(l)
}
}
abx10.vax.chol3sim = out
save(abx10.vax.chol3sim,file='abx10.vax.chol3sim.Rdata')
| /code/Simulations/vaxabx/abxboost/vax1/abx10.vax.chol3sim.R | no_license | joelewnard/choleraHaiti | R | false | false | 10,115 | r | ### obtained from qmedist function
set.seed(1)
pars.abx = c(2.7350040,0.1709258)
gammaA = 0.1964637
gammaAbx = 1/(1/gammaA - 1.1*rnorm(5e3,pars.abx[1],pars.abx[2]))
varnames = c('PK',
'S1a','S1b','S2',
'Ea1a','Ea1b','Ea2',
'Es1a','Es1b','Es2',
'Ia1a','Ia1b','Ia2',
'Is1a','Is1b','Is2',
'Y1a','Y1b','Y2',
'Ya1a','Ya1b','Ya2',
'Ic1a','Ic1b','Ic2',
'Ra1a','Ra1b','Ra2',
'Rs1a','Rs1b','Rs2',
'B1','B2'
)
parnames = c('delta','gammaA','gammaD','gammaC','eta','xi','kappa','zeta','N','Nart','Noth','mu1','mu2','nu',
'pi','betaW','betaL','k','omega','popA')
nu=8.58
vaxd = (9.9*10^5)/(5.1*10^7)
######## INFECTIONS: -1 Suscept, +1 latent
#### infections by Water (Asx and Sx)
infectW.A = rep(0,33); infectW.A[which(varnames=='S1a')] = -1; infectW.A[which(varnames=='Ea1a')] = 1
infectW.S = rep(0,33); infectW.S[which(varnames=='S1a')] = -1; infectW.S[which(varnames=='Es1a')] = 1
#### infections by Local (Asx and Sx, pops 1a, 1b, 2)
infectL.A.1a = rep(0,33); infectL.A.1a[which(varnames=='S1a')] = -1; infectL.A.1a[which(varnames=='Ea1a')] = 1
infectL.S.1a = rep(0,33); infectL.S.1a[which(varnames=='S1a')] = -1; infectL.S.1a[which(varnames=='Es1a')] = 1
infectL.A.1b = rep(0,33); infectL.A.1b[which(varnames=='S1b')] = -1; infectL.A.1b[which(varnames=='Ea1b')] = 1
infectL.S.1b = rep(0,33); infectL.S.1b[which(varnames=='S1b')] = -1; infectL.S.1b[which(varnames=='Es1b')] = 1
infectL.A.2 = rep(0,33); infectL.A.2[which(varnames=='S2')] = -1; infectL.A.2[which(varnames=='Ea2')] = 1
infectL.S.2 = rep(0,33); infectL.S.2[which(varnames=='S2')] = -1; infectL.S.2[which(varnames=='Es2')] = 1
#### migration
mig.S.1a = rep(0,33); mig.S.1a[which(varnames=='S1a')] = -1; mig.S.1a[which(varnames=='S1b')] = 1
mig.S.1b = rep(0,33); mig.S.1b[which(varnames=='S1b')] = -1; mig.S.1b[which(varnames=='S1a')] = 1
mig.Ea.1a = rep(0,33); mig.Ea.1a[which(varnames=='Ea1a')] = -1; mig.Ea.1a[which(varnames=='Ea1b')] = 1
mig.Ea.1b = rep(0,33); mig.Ea.1b[which(varnames=='Ea1b')] = -1; mig.Ea.1b[which(varnames=='Ea1a')] = 1
mig.Es.1a = rep(0,33); mig.Es.1a[which(varnames=='Es1a')] = -1; mig.Es.1a[which(varnames=='Es1b')] = 1
mig.Es.1b = rep(0,33); mig.Es.1b[which(varnames=='Es1b')] = -1; mig.Es.1b[which(varnames=='Es1a')] = 1
mig.Ia.1a = rep(0,33); mig.Ia.1a[which(varnames=='Ia1a')] = -1; mig.Ia.1a[which(varnames=='Ia1b')] = 1
mig.Ia.1b = rep(0,33); mig.Ia.1b[which(varnames=='Ia1b')] = -1; mig.Ia.1b[which(varnames=='Ia1a')] = 1
mig.Ic.1a = rep(0,33); mig.Ic.1a[which(varnames=='Ic1a')] = -1; mig.Ic.1a[which(varnames=='Ic1b')] = 1
mig.Ic.1b = rep(0,33); mig.Ic.1b[which(varnames=='Ic1b')] = -1; mig.Ic.1b[which(varnames=='Ic1a')] = 1
######## PROGRESSION: -1 latent, +1 infectious; +1 obs if severe
prog.A.1a = rep(0,33); prog.A.1a[which(varnames=='Ea1a')] = -1; prog.A.1a[which(varnames=='Ia1a')] = 1; prog.A.1a[which(varnames=='Ya1a')] = 1
prog.S.1a = rep(0,33); prog.S.1a[which(varnames=='Es1a')] = -1; prog.S.1a[which(varnames=='Is1a')] = 1; prog.S.1a[which(varnames=='Y1a')] = 1
prog.A.1b = rep(0,33); prog.A.1b[which(varnames=='Ea1b')] = -1; prog.A.1b[which(varnames=='Ia1b')] = 1; prog.A.1b[which(varnames=='Ya1b')] = 1
prog.S.1b = rep(0,33); prog.S.1b[which(varnames=='Es1b')] = -1; prog.S.1b[which(varnames=='Is1b')] = 1; prog.S.1b[which(varnames=='Y1b')] = 1
prog.A.2 = rep(0,33); prog.A.2[which(varnames=='Ea2')] = -1; prog.A.2[which(varnames=='Ia2')] = 1; prog.A.2[which(varnames=='Ya2')] = 1
prog.S.2 = rep(0,33); prog.S.2[which(varnames=='Es2')] = -1; prog.S.2[which(varnames=='Is2')] = 1; prog.S.2[which(varnames=='Y2')] = 1
######## CONVALESCENCE: -1 severe, +1 convalesc
conval.1a = rep(0,33); conval.1a[which(varnames=='Is1a')] = -1; conval.1a[which(varnames=='Ic1a')] = 1
conval.1b = rep(0,33); conval.1b[which(varnames=='Is1b')] = -1; conval.1b[which(varnames=='Ic1b')] = 1
conval.2 = rep(0,33); conval.2[which(varnames=='Is2')] = -1; conval.2[which(varnames=='Ic2')] = 1
######## MORTALITY: -1 severe
mortal.1a = rep(0,33); mortal.1a[which(varnames=='Is1a')] = -1
mortal.1b = rep(0,33); mortal.1b[which(varnames=='Is1b')] = -1
mortal.2 = rep(0,33); mortal.2[which(varnames=='Is2')] = -1
######## RECOVERY: -1 infectious (Asx and Cx); +1 recovered (Asx and Sx)
recov.A.1a = rep(0,33); recov.A.1a[which(varnames=='Ia1a')] = -1; recov.A.1a[which(varnames=='Ra1a')] = 1
recov.S.1a = rep(0,33); recov.S.1a[which(varnames=='Ic1a')] = -1; recov.S.1a[which(varnames=='Rs1a')] = 1
recov.A.1b = rep(0,33); recov.A.1b[which(varnames=='Ia1b')] = -1; recov.A.1b[which(varnames=='Ra1b')] = 1
recov.S.1b = rep(0,33); recov.S.1b[which(varnames=='Ic1b')] = -1; recov.S.1b[which(varnames=='Rs1b')] = 1
recov.A.2 = rep(0,33); recov.A.2[which(varnames=='Ia2')] = -1; recov.A.2[which(varnames=='Ra2')] = 1
recov.S.2 = rep(0,33); recov.S.2[which(varnames=='Ic2')] = -1; recov.S.2[which(varnames=='Rs2')] = 1
######## PK RECOVERIES: -1 PK
recov.PK = rep(0,33); recov.PK[which(varnames=='PK')] = -1
######## SHED: +1 B1
shed.PK = rep(0,33); shed.PK[which(varnames=='B1')] = vaxd
shed.A = rep(0,33); shed.A[which(varnames=='B1')] = 1
shed.S = rep(0,33); shed.S[which(varnames=='B1')] = nu ### shed nu vibrios relative to Asx and Conval
shed.C = rep(0,33); shed.C[which(varnames=='B1')] = 1
trans = rep(0,33); trans[which(varnames=='B1')] = -vaxd; trans[which(varnames=='B2')] = vaxd
die = rep(0,33); die[which(varnames=='B2')] = -vaxd
transitions = matrix(c(
infectW.A,
infectW.S,
infectL.A.1a,
infectL.S.1a,
infectL.A.1b,
infectL.S.1b,
infectL.A.2,
infectL.S.2,
mig.S.1a,
mig.S.1b,
mig.Ea.1a,
mig.Ea.1b,
mig.Es.1a,
mig.Es.1b,
mig.Ia.1a,
mig.Ia.1b,
mig.Ic.1a,
mig.Ic.1b,
prog.A.1a,
prog.S.1a,
prog.A.1b,
prog.S.1b,
prog.A.2,
prog.S.2,
conval.1a,
conval.1b,
conval.2,
mortal.1a,
mortal.1b,
mortal.2,
recov.A.1a,
recov.S.1a,
recov.A.1b,
recov.S.1b,
recov.A.2,
recov.S.2,
recov.PK,
shed.PK,
shed.A,
shed.S,
shed.C,
trans,
die),byrow=T,ncol=33)
onestep = function(x,pars){
for (z in 1:length(x)){
x[z] = max(0,x[z])
}
for (i in 1:33){
assign(varnames[i],x[i+1])
}
for (h in 1:length(parnames)){
assign(parnames[h],pars[h])
}
r = 1+log10(nu)
lambdaW = betaW*(eta*B1+B2)/(betaW*(eta*B1+B2) + kappa)
lambdaL = k*log(1 + betaL*(Ia1a + Ia1b + Ia2 + r*(Is1a + Is1b + Is2) + Ic1a + Ic1b + Ic2)/k)/(Nart+Noth)
rates = c(
infectW.A = lambdaW*(1-lambdaW)*S1a,
infectW.S = (lambdaW^2)*S1a,
infectL.A.1a = (1-pi)*lambdaL*S1a,
infectL.S.1a = pi*lambdaL*S1a,
infectL.A.1b = (1-pi)*lambdaL*S1b,
infectL.S.1b = pi*lambdaL*S1b,
infectL.A.2 = (1-pi)*lambdaL*S2,
infectL.S.2 = pi*lambdaL*S2,
mig.S.1a = 0,#omega*(1-popA)*S1a,
mig.S.1b = 0,#omega*popA*S1b,
mig.Ea.1a = omega*(1-popA)*Ea1a,
mig.Ea.1b = omega*popA*Ea1b,
mig.Es.1a = omega*(1-popA)*Es1a,
mig.Es.1b = omega*popA*Es1b,
mig.Ia.1a = omega*(1-popA)*Ia1a,
mig.Ia.1b = omega*popA*Ia1b,
mig.Ic.1a = omega*(1-popA)*Ic1a,
mig.Ic.1b = omega*popA*Ic1b,
prog.A.1a = delta*Ea1a,
prog.S.1a = delta*Es1a,
prog.A.1b = delta*Ea1b,
prog.S.1b = delta*Es1b,
prog.A.2 = delta*Ea2,
prog.S.2 = delta*Es2,
conval.1a = gammaD*Is1a, ### (1 - zeta)*gammaD/(1-zeta) ### probability of recovery times exit rate
conval.1b = gammaD*Is1b,
conval.2 = gammaD*Is2,
mortal.1a = zeta*gammaD*Is1a/(1-zeta), ## zeta*(gammaD/(1-zeta)) ### probability of death times exit rate
mortal.1b = zeta*gammaD*Is1b/(1-zeta),
mortal.2 = zeta*gammaD*Is2/(1-zeta),
recov.A.1a = gammaA*Ia1a,
recov.S.1a = gammaC*Ic1a,
recov.A.1b = gammaA*Ia1b,
recov.S.1b = gammaC*Ic1b,
recov.A.2 = gammaA*Ia2,
recov.S.2 = gammaC*Ic2,
recov.PK = gammaAX*PK,
shed.PK = PK,
shed.A = Ia1a,
shed.S = Is1a,
shed.C = Ic1a,
trans = mu1*B1/vaxd,
die = mu2*B2/vaxd
)
if ((Ia1a==0)&(Ia1b==0)&(Ea1a==0)&(Ea1b==0)&(Is1a==0)&(Is1b==0)&(Es1a==0)&(Es1b==0)&(Ea2==0)&(Es2==0)&(Ia2==0)&(Is2==0)){
rates['mig.S.1a'] = rates['mig.S.1b'] = 0
rates['mig.Ea.1a'] = rates['mig.Ea.1b'] = 0
rates['mig.Es.1a'] = rates['mig.Es.1b'] = 0
rates['mig.Ia.1a'] = rates['mig.Ia.1b'] = 0
rates['mig.Ic.1a'] = rates['mig.Ic.1b'] = 0
}
tot.rate = sum(rates)
tau = rexp(n=1,rate=tot.rate)
if (is.na(tau)){
return('no transmission')
} else{
event = sample(1:length(rates),size=1,prob=rates/tot.rate)
return(x+c(tau,transitions[event,]))
}
}
simul.fn = function(x,params,maxstep,tmax){
names(x) = c('time',varnames)
j = 0
while (j<=maxstep){
if (j>1){
if ((x['time']>tmax)&(y['PK']==0)&(y['Ea1a']==0)&(y['Ia1a']==0)&(y['Es1a']==0)){
return(c(9999,j))
}
}
j = j+1
y = onestep(x,params)
names(y) = c('time',varnames)
if (y[1]=='no transmission'){
return(c(9999,j))
}
if (sum(c(y['PK']==0),(y['B1']==0),(y['B2']==0),
(y['Ea1a']==0),(y['Es1a']==0),(y['Ia1a']==0),(y['Is1a']==0),(y['Ic1a']==0),
(y['Ea1b']==0),(y['Es1b']==0),(y['Ia1b']==0),(y['Is1b']==0),(y['Ic1b']==0),
(y['Ea2']==0),(y['Es2']==0),(y['Ia2']==0),(y['Is2']==0),(y['Ic2']==0))==18){
return(c(9999,j))
}
if (sum(c(y['Y1a'],y['Y1b'],y['Y2'])>0)){
return(c(y['time'],j))
}
x = y
names(x) = c('time',varnames)
}
return(c(9999,j))
}
set.seed(30102)
load(file='~/chol3.mcmc.Rdata')
pars = c(1/1.55,1/5.09,1/3.32,1/1.77,100,1,0.1,0.025,9923243,879644,9043599,1,1/30,8.58)
nsims = 5e3
out = matrix(NA,nsims,2)
for (l in 1:nsims){
par = c(pars,state3[sample(2001:25000,1),,sample(1:3,1)])
for (z in 1:length(parnames)){
assign(parnames[z],par[z])
}
gammaAX = gammaAbx[l]
init = c(3,popA*Nart,(1-popA)*Nart,Noth,rep(0,29))
out[l,] = simul.fn(x=c(0,init),params=par,maxstep=5e4,tmax=100)
if ((l/1e1)==ceiling(l/1e1)){
print(l)
}
}
abx10.vax.chol3sim = out
save(abx10.vax.chol3sim,file='abx10.vax.chol3sim.Rdata')
|
#' Calculate (or plot) cumulative effect for all time-points of the follow-up
#'
#' @inheritParams gg_partial
#' @param z1 The exposure profile for which to calculate the cumulative effect.
#' Can be either a single number or a vector of same length as unique observation
#' time points.
#' @param z2 If provided, calculated cumulative effect is for the difference
#' between the two exposure profiles (g(z1,t)-g(z2,t)).
#' @param se_mult Multiplicative factor used to calculate confidence intervals
#' (e.g., lower = fit - 2*se).
#' @export
get_cumu_eff <- function(data, model, term, z1, z2 = NULL, se_mult = 2) {
assert_class(data, "fped")
ped <- make_ped_dat(data, term, z1)
coefs <- coef(model)
col_ind <- grep(term, names(coefs))
coefs <- coefs[col_ind]
Vp <- model$Vp[col_ind, col_ind]
X <- predict(model, ped, type = "lpmatrix")[, col_ind]
if (!is.null(z2)) {
X2 <- predict(model, make_ped_dat(data, term, z2),
type = "lpmatrix")[, col_ind]
X <- X - X2
}
ped$cumu_eff <- drop(X %*% coefs)
ped$se_cumu_eff <- drop(sqrt(rowSums( (X %*% Vp) * X) ))
ped$cumu_eff_lower <- ped$cumu_eff - se_mult * ped$se_cumu_eff
ped$cumu_eff_upper <- ped$cumu_eff + se_mult * ped$se_cumu_eff
ped
}
#' @keywords internal
make_ped_dat <- function(x, term, z_vec) {
nfunc <- length(attr(x, "ll_funs"))
ind_term <- get_term_ind(x, term)
nz <- length(attr(x, "tz")[[ind_term]])
tz_var <- attr(x, "tz_vars")[[ind_term]]
tz <- attr(x, "tz")[[ind_term]]
func <- attr(x, "func")[[ind_term]]
ll_fun <- attr(x, "ll_funs")[[ind_term]]
func_mat_names <- attr(x, "func_mat_names")[[ind_term]]
LL_name <- grep("LL", func_mat_names, value = TRUE)
tz_var_mat <- make_mat_names(tz_var, func$latency_var, func$tz_var,
func$suffix, nfunc)
q_weights <- attr(x, "ll_weights")[[ind_term]]
stopifnot(length(z_vec) == nz | length(z_vec) == 1)
z_vec <- if (length(z_vec) == 1) {
rep(z_vec, nz)
} else {
z_vec
}
ped_df <- make_newdata(x, tend = unique(.data$tend))
ped_df[[LL_name]] <- outer(ped_df$tend, tz, FUN = ll_fun) * 1L *
matrix(q_weights$ll_weight, nrow = nrow(ped_df), ncol = nz, byrow = TRUE)
if (func$latency_var != "") {
ped_df[[tz_var_mat]] <- outer(ped_df$tend, tz, FUN = "-")
ped_df[[tz_var_mat]] * (ped_df[[LL_name]] != 0)
} else {
ped_df[[tz_var]] <- matrix(tz, nrow = nrow(ped_df), ncol = nz, byrow = TRUE)
ped_df[[tz_var]] <- ped_df[[tz_var]] * (ped_df[[LL_name]] != 0)
}
ped_df[[term]] <- matrix(z_vec, nrow = nrow(ped_df), ncol = nz, byrow = TRUE)
t_mat_var <- grep(attr(x, "time_var"), func_mat_names, value = TRUE)
if (length(t_mat_var) != 0) {
ped_df[[t_mat_var]] <- matrix(unique(x[[t_mat_var]][, 1]),
nrow = nrow(ped_df), ncol = nz)
}
ped_df
}
get_term_ind <- function(x, term) {
which(map_lgl(attr(x, "func_mat_names"), ~any(grepl(term, .x))))
}
| /R/cumulative-effect.R | permissive | adibender/pammtools | R | false | false | 2,919 | r | #' Calculate (or plot) cumulative effect for all time-points of the follow-up
#'
#' @inheritParams gg_partial
#' @param z1 The exposure profile for which to calculate the cumulative effect.
#' Can be either a single number or a vector of same length as unique observation
#' time points.
#' @param z2 If provided, calculated cumulative effect is for the difference
#' between the two exposure profiles (g(z1,t)-g(z2,t)).
#' @param se_mult Multiplicative factor used to calculate confidence intervals
#' (e.g., lower = fit - 2*se).
#' @export
get_cumu_eff <- function(data, model, term, z1, z2 = NULL, se_mult = 2) {
assert_class(data, "fped")
ped <- make_ped_dat(data, term, z1)
coefs <- coef(model)
col_ind <- grep(term, names(coefs))
coefs <- coefs[col_ind]
Vp <- model$Vp[col_ind, col_ind]
X <- predict(model, ped, type = "lpmatrix")[, col_ind]
if (!is.null(z2)) {
X2 <- predict(model, make_ped_dat(data, term, z2),
type = "lpmatrix")[, col_ind]
X <- X - X2
}
ped$cumu_eff <- drop(X %*% coefs)
ped$se_cumu_eff <- drop(sqrt(rowSums( (X %*% Vp) * X) ))
ped$cumu_eff_lower <- ped$cumu_eff - se_mult * ped$se_cumu_eff
ped$cumu_eff_upper <- ped$cumu_eff + se_mult * ped$se_cumu_eff
ped
}
#' @keywords internal
make_ped_dat <- function(x, term, z_vec) {
nfunc <- length(attr(x, "ll_funs"))
ind_term <- get_term_ind(x, term)
nz <- length(attr(x, "tz")[[ind_term]])
tz_var <- attr(x, "tz_vars")[[ind_term]]
tz <- attr(x, "tz")[[ind_term]]
func <- attr(x, "func")[[ind_term]]
ll_fun <- attr(x, "ll_funs")[[ind_term]]
func_mat_names <- attr(x, "func_mat_names")[[ind_term]]
LL_name <- grep("LL", func_mat_names, value = TRUE)
tz_var_mat <- make_mat_names(tz_var, func$latency_var, func$tz_var,
func$suffix, nfunc)
q_weights <- attr(x, "ll_weights")[[ind_term]]
stopifnot(length(z_vec) == nz | length(z_vec) == 1)
z_vec <- if (length(z_vec) == 1) {
rep(z_vec, nz)
} else {
z_vec
}
ped_df <- make_newdata(x, tend = unique(.data$tend))
ped_df[[LL_name]] <- outer(ped_df$tend, tz, FUN = ll_fun) * 1L *
matrix(q_weights$ll_weight, nrow = nrow(ped_df), ncol = nz, byrow = TRUE)
if (func$latency_var != "") {
ped_df[[tz_var_mat]] <- outer(ped_df$tend, tz, FUN = "-")
ped_df[[tz_var_mat]] * (ped_df[[LL_name]] != 0)
} else {
ped_df[[tz_var]] <- matrix(tz, nrow = nrow(ped_df), ncol = nz, byrow = TRUE)
ped_df[[tz_var]] <- ped_df[[tz_var]] * (ped_df[[LL_name]] != 0)
}
ped_df[[term]] <- matrix(z_vec, nrow = nrow(ped_df), ncol = nz, byrow = TRUE)
t_mat_var <- grep(attr(x, "time_var"), func_mat_names, value = TRUE)
if (length(t_mat_var) != 0) {
ped_df[[t_mat_var]] <- matrix(unique(x[[t_mat_var]][, 1]),
nrow = nrow(ped_df), ncol = nz)
}
ped_df
}
get_term_ind <- function(x, term) {
which(map_lgl(attr(x, "func_mat_names"), ~any(grepl(term, .x))))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rtracklayer.R
\name{load_bw_set}
\alias{load_bw_set}
\title{LoadBigWigSet}
\usage{
load_bw_set(files, region)
}
\arguments{
\item{region}{named list with a chr, start, end and strand slot}
\item{file}{files to load in dataframe with metadata and factors for grouping.}
}
\value{
A tidy data.frame with columns for chr start end strand and all levels specified the files.
}
\description{
Loads a specified range from specified bigwigs files.
}
\details{
Loads the values from the region from the file(s) and collects everything in a tidy dataframe.
Files are a dataframe or named list containing at a minimum these slots: 'path' 'filename' and named category
levels
}
\examples{
path <- '/Users/schmidm/Documents/other_people_to_and_from/ClaudiaI/bw'
files <- create_bw_file_set(path, c('rep3', '.bw$'), c('_N20', '_3D12'), c('siRNA', 'ab', 'rep'), '_')
region <- list(chr='chr1', start=1000000, end=1001000, strand='+')
set <- load_bw_set(files, region)
ggplot(set, aes(x=starts, y=scores, color=rep)) + geom_line() + facet_grid(siRNA~ab)
}
| /man/load_bw_set.Rd | no_license | manschmi/RMetaTools | R | false | true | 1,127 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rtracklayer.R
\name{load_bw_set}
\alias{load_bw_set}
\title{LoadBigWigSet}
\usage{
load_bw_set(files, region)
}
\arguments{
\item{region}{named list with a chr, start, end and strand slot}
\item{file}{files to load in dataframe with metadata and factors for grouping.}
}
\value{
A tidy data.frame with columns for chr start end strand and all levels specified the files.
}
\description{
Loads a specified range from specified bigwigs files.
}
\details{
Loads the values from the region from the file(s) and collects everything in a tidy dataframe.
Files are a dataframe or named list containing at a minimum these slots: 'path' 'filename' and named category
levels
}
\examples{
path <- '/Users/schmidm/Documents/other_people_to_and_from/ClaudiaI/bw'
files <- create_bw_file_set(path, c('rep3', '.bw$'), c('_N20', '_3D12'), c('siRNA', 'ab', 'rep'), '_')
region <- list(chr='chr1', start=1000000, end=1001000, strand='+')
set <- load_bw_set(files, region)
ggplot(set, aes(x=starts, y=scores, color=rep)) + geom_line() + facet_grid(siRNA~ab)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AutoGeneratedDefinitions.R
\name{getIncidenceRateResults}
\alias{getIncidenceRateResults}
\title{Get results for a IncidenceRate Id.}
\usage{
getIncidenceRateResults(incidenceRateId, baseUrl)
}
\arguments{
\item{incidenceRateId}{An integer id representing the id that uniquely identifies a incidence rate analysis definition
in a WebApi instance.}
\item{baseUrl}{The base URL for the WebApi instance, for example:
"http://server.org:80/WebAPI".}
}
\value{
An R object with results.
}
\description{
Get results for a IncidenceRate Id.
}
\details{
Get the results for IncidenceRate id.
}
\examples{
\dontrun{
getIncidenceRateResults(incidenceRateId = 342, baseUrl = "http://server.org:80/WebAPI")
}
}
| /man/getIncidenceRateResults.Rd | permissive | OHDSI/ROhdsiWebApi | R | false | true | 778 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AutoGeneratedDefinitions.R
\name{getIncidenceRateResults}
\alias{getIncidenceRateResults}
\title{Get results for a IncidenceRate Id.}
\usage{
getIncidenceRateResults(incidenceRateId, baseUrl)
}
\arguments{
\item{incidenceRateId}{An integer id representing the id that uniquely identifies a incidence rate analysis definition
in a WebApi instance.}
\item{baseUrl}{The base URL for the WebApi instance, for example:
"http://server.org:80/WebAPI".}
}
\value{
An R object with results.
}
\description{
Get results for a IncidenceRate Id.
}
\details{
Get the results for IncidenceRate id.
}
\examples{
\dontrun{
getIncidenceRateResults(incidenceRateId = 342, baseUrl = "http://server.org:80/WebAPI")
}
}
|
u <- rbeta(100000,5,1.5)*100
#hist(u)
p <- c(0.5, 1, 5, 10, 25, 50, 75, 90, 95, 99, 99.5)/100
sta <- c("PCTL0.5","PCTL1","PCTL5","PCTL10","PCTL25","PCTL50",
"PCTL75","PCTL90","PCTL95","PCTL99","PCTL99.5",
"mean","var","stdev","count","sum",
quantile(u,p),
mean(u),var(u),sd(u),length(u),sum(u))
write(u, file="data/large-skew.dat", ncolumns=1)
write(u[1:50000], file="data/large-skew-chunk1.dat", ncolumns=1)
write(u[50001:100000], file="data/large-skew-chunk2.dat", ncolumns=1)
write(sta, file="data/large-skew.sta", ncolumns=16)
u<-sort(u,decreasing=FALSE)
write(u, file="data/large-skew-asc.dat", ncolumns=1)
u<-sort(u,decreasing=TRUE)
write(u, file="data/large-skew-desc.dat", ncolumns=1)
| /r/large-skew.r | permissive | JimCooke/t-digest | R | false | false | 729 | r | u <- rbeta(100000,5,1.5)*100
#hist(u)
p <- c(0.5, 1, 5, 10, 25, 50, 75, 90, 95, 99, 99.5)/100
sta <- c("PCTL0.5","PCTL1","PCTL5","PCTL10","PCTL25","PCTL50",
"PCTL75","PCTL90","PCTL95","PCTL99","PCTL99.5",
"mean","var","stdev","count","sum",
quantile(u,p),
mean(u),var(u),sd(u),length(u),sum(u))
write(u, file="data/large-skew.dat", ncolumns=1)
write(u[1:50000], file="data/large-skew-chunk1.dat", ncolumns=1)
write(u[50001:100000], file="data/large-skew-chunk2.dat", ncolumns=1)
write(sta, file="data/large-skew.sta", ncolumns=16)
u<-sort(u,decreasing=FALSE)
write(u, file="data/large-skew-asc.dat", ncolumns=1)
u<-sort(u,decreasing=TRUE)
write(u, file="data/large-skew-desc.dat", ncolumns=1)
|
context("ANOVAs: replicating published results")
test_that("purely within ANOVA, return='univ': Maxell & Delaney (2004), Table 12.5 and 12.6, p. 578", {
### replicate results from Table 12.6
data(md_12.1)
# valus from table:
f <- c(40.72, 33.77, 45.31)
ss_num <- c(289920, 285660, 105120)
ss_error <- c(64080, 76140, 20880)
num_df <- c(2, 1, 2)
den_df <- c(18, 9, 18)
md_ez_r <- aov_ez("id", "rt", md_12.1, within = c("angle", "noise"))
md_car_r <- aov_car(rt ~ 1 + Error(id/angle*noise), md_12.1)
md_aov_4_r <- aov_4(rt ~ 1 + (angle*noise|id), md_12.1)
expect_that(md_ez_r, is_equivalent_to(md_car_r))
expect_that(md_ez_r, is_equivalent_to(md_aov_4_r))
expect_that(round(md_ez_r$anova_table[,"F"], 2), is_equivalent_to(f))
expect_that(suppressWarnings(summary(md_ez_r$Anova)$univariate.tests[,"SS"][-1]), is_equivalent_to(ss_num))
expect_that(suppressWarnings(summary(md_ez_r$Anova)$univariate.tests[,"Error SS"])[-1], is_equivalent_to(ss_error))
expect_that(anova(md_ez_r, correction = "none")[,"num Df"], is_equivalent_to(num_df))
expect_that(anova(md_ez_r, correction = "none")[,"den Df"], is_equivalent_to(den_df))
})
test_that("Analysis of Singmann & Klauer (2011, Exp. 1)", {
data(sk2011.1, package = "afex")
out1 <- aov_ez("id", "response", sk2011.1[ sk2011.1$what == "affirmation",], within = c("inference", "type"), between = "instruction", anova_table=(es = "pes"), fun_aggregate = mean, return = "afex_aov")
df_num <- rep(1, 7)
df_den <- rep(38, 7)
MSE <- c(1072.42, 1007.21, 1007.21, 187.9, 187.9, 498.48, 498.48)
F <- c(0.13, 13.01, 12.44, 0.06, 3.09, 29.62, 10.73)
pes <- c(0, 0.26, 0.25, 0, 0.08, 0.44, 0.22)
p <- c(0.72, 0.0009, 0.001, 0.81, 0.09, 0.001, 0.002)
expect_that(out1$anova_table[["num Df"]], is_equivalent_to(df_num))
expect_that(out1$anova_table[["den Df"]], is_equivalent_to(df_den))
expect_that(out1$anova_table[["MSE"]], equals(MSE, tolerance = 0.001))
expect_that(out1$anova_table[["F"]], equals(F, tolerance = 0.001))
expect_that(out1$anova_table[["pes"]], equals(pes, tolerance = 0.02))
expect_that(out1$anova_table[["Pr(>F)"]], equals(p, tolerance = 0.01))
})
test_that("Data from O'Brien & Kaiser replicates their paper (p. 328, Table 8, column 'average'", {
data(obk.long, package = "afex")
out1 <- aov_car(value ~ treatment * gender + Error(id/(phase*hour)), data = obk.long, observed = "gender", return = "afex_aov", anova_table = list(correction = "none"))
expect_that(unname(unlist(out1[["anova_table"]]["treatment", c("num Df", "den Df", "F")])), equals(c(2, 10, 3.94), tolerance = 0.001))
expect_that(unname(unlist(out1[["anova_table"]]["gender", c("num Df", "den Df", "F")])), equals(c(1, 10, 3.66), tolerance = 0.001))
expect_that(round(unname(unlist(out1[["anova_table"]]["treatment:gender", c("num Df", "den Df", "F")])), 2), equals(c(2, 10, 2.86), tolerance = 0.001))
## check against own results:
anova_tab <- structure(list(`num Df` = c(2, 1, 2, 2, 4, 2, 4, 4, 8, 4, 8,
8, 16, 8, 16), `den Df` = c(10, 10, 10, 20, 20, 20, 20, 40, 40,
40, 40, 80, 80, 80, 80), MSE = c(22.8055555555555, 22.8055555555555,
22.8055555555555, 4.01388888888889, 4.01388888888889, 4.01388888888889,
4.01388888888889, 1.5625, 1.5625, 1.5625, 1.5625, 1.20208333333333,
1.20208333333333, 1.20208333333333, 1.20208333333333), F = c(3.940494501098,
3.65912050065102, 2.85547267441343, 16.1329196993199, 4.85098375975551,
0.282782484190432, 0.636602429722426, 16.6856704980843, 0.0933333333333336,
0.450268199233716, 0.620437956204379, 1.17990398215104, 0.345292160558641,
0.931293452060798, 0.735935938468544), ges = c(0.198248507309966,
0.114806410630587, 0.179183259116394, 0.151232705544895, 0.0967823866181358,
0.00312317714869712, 0.0140618480455475, 0.12547183572154, 0.00160250371109459,
0.0038716854273722, 0.010669821220833, 0.0153706689696344, 0.00905399063368842,
0.012321395080303, 0.0194734697889242), `Pr(>F)` = c(0.0547069269265198,
0.0848002538616402, 0.104469234023772, 6.73163655770545e-05,
0.00672273209545241, 0.756647338927411, 0.642369488905348, 4.02664339633774e-08,
0.999244623719389, 0.771559070589063, 0.755484449904079, 0.32158661418337,
0.990124565656718, 0.495611922963992, 0.749561639456282)), .Names = c("num Df",
"den Df", "MSE", "F", "ges", "Pr(>F)"), heading = c("Anova Table (Type 3 tests)\n",
"Response: value"), row.names = c("treatment", "gender", "treatment:gender",
"phase", "treatment:phase", "gender:phase", "treatment:gender:phase",
"hour", "treatment:hour", "gender:hour", "treatment:gender:hour",
"phase:hour", "treatment:phase:hour", "gender:phase:hour", "treatment:gender:phase:hour"
), class = c("data.frame"))
expect_equal(out1[["anova_table"]], anova_tab, check.attributes = FALSE)
})
test_that("Data from O'Brien & Kaiser adjusted for familywise error rate (p. 328, Table 8, column 'average'", {
data(obk.long, package = "afex")
out1 <- aov_car(value ~ treatment * gender + Error(id/(phase*hour)), data = obk.long, observed = "gender", return = "afex_aov", anova_table = list(correction = "none", p_adjust_method = "bonferroni"))
expect_that(unname(unlist(out1[["anova_table"]]["treatment", c("num Df", "den Df", "F")])), equals(c(2, 10, 3.94), tolerance = 0.001))
expect_that(unname(unlist(out1[["anova_table"]]["gender", c("num Df", "den Df", "F")])), equals(c(1, 10, 3.66), tolerance = 0.001))
expect_that(round(unname(unlist(out1[["anova_table"]]["treatment:gender", c("num Df", "den Df", "F")])), 2), equals(c(2, 10, 2.86), tolerance = 0.001))
## check against own results:
anova_tab <- structure(list(`num Df` = c(2, 1, 2, 2, 4, 2, 4, 4, 8, 4, 8,
8, 16, 8, 16), `den Df` = c(10, 10, 10, 20, 20, 20, 20, 40, 40,
40, 40, 80, 80, 80, 80), MSE = c(22.8055555555555, 22.8055555555555,
22.8055555555555, 4.01388888888889, 4.01388888888889, 4.01388888888889,
4.01388888888889, 1.5625, 1.5625, 1.5625, 1.5625, 1.20208333333333,
1.20208333333333, 1.20208333333333, 1.20208333333333), F = c(3.940494501098,
3.65912050065102, 2.85547267441343, 16.1329196993199, 4.85098375975551,
0.282782484190432, 0.636602429722426, 16.6856704980843, 0.0933333333333336,
0.450268199233716, 0.620437956204379, 1.17990398215104, 0.345292160558641,
0.931293452060798, 0.735935938468544), ges = c(0.198248507309966,
0.114806410630587, 0.179183259116394, 0.151232705544895, 0.0967823866181358,
0.00312317714869712, 0.0140618480455475, 0.12547183572154, 0.00160250371109459,
0.0038716854273722, 0.010669821220833, 0.0153706689696344, 0.00905399063368842,
0.012321395080303, 0.0194734697889242), `Pr(>F)` = c(0.0547069269265198,
0.0848002538616402, 0.104469234023772, 6.73163655770545e-05,
0.00672273209545241, 0.756647338927411, 0.642369488905348, 4.02664339633774e-08,
0.999244623719389, 0.771559070589063, 0.755484449904079, 0.32158661418337,
0.990124565656718, 0.495611922963992, 0.749561639456282)), .Names = c("num Df",
"den Df", "MSE", "F", "ges", "Pr(>F)"), heading = c("Anova Table (Type 3 tests)\n",
"Response: value"), row.names = c("treatment", "gender", "treatment:gender",
"phase", "treatment:phase", "gender:phase", "treatment:gender:phase",
"hour", "treatment:hour", "gender:hour", "treatment:gender:hour",
"phase:hour", "treatment:phase:hour", "gender:phase:hour", "treatment:gender:phase:hour"
), class = c("data.frame"))
anova_tab$`Pr(>F)` <- p.adjust(anova_tab$`Pr(>F)`, method = "bonferroni")
expect_equal(out1[["anova_table"]], anova_tab, check.attributes = FALSE)
})
test_that("afex_aov printing", {
data(sk2011.1, package = "afex")
out_new <- aov_ez("id", "response", sk2011.1[ sk2011.1$what == "affirmation",], within = c("inference", "type"), between = "instruction", anova_table=(es = "pes"), fun_aggregate = mean, return = "afex_aov")
expect_output(print(out_new), "Signif. codes")
expect_output(print(anova(out_new)), "Signif. codes")
expect_output(print(nice(out_new)), "Anova")
load("afex_aov_16_1.rda")
expect_output(print(out1), "Anova")
expect_output(print(anova(out1)), "Signif. codes")
expect_output(print(nice(out1)), "Anova")
})
| /tests/testthat/test-aov_car-basic.R | no_license | crsh/afex | R | false | false | 8,222 | r |
context("ANOVAs: replicating published results")
test_that("purely within ANOVA, return='univ': Maxell & Delaney (2004), Table 12.5 and 12.6, p. 578", {
### replicate results from Table 12.6
data(md_12.1)
# valus from table:
f <- c(40.72, 33.77, 45.31)
ss_num <- c(289920, 285660, 105120)
ss_error <- c(64080, 76140, 20880)
num_df <- c(2, 1, 2)
den_df <- c(18, 9, 18)
md_ez_r <- aov_ez("id", "rt", md_12.1, within = c("angle", "noise"))
md_car_r <- aov_car(rt ~ 1 + Error(id/angle*noise), md_12.1)
md_aov_4_r <- aov_4(rt ~ 1 + (angle*noise|id), md_12.1)
expect_that(md_ez_r, is_equivalent_to(md_car_r))
expect_that(md_ez_r, is_equivalent_to(md_aov_4_r))
expect_that(round(md_ez_r$anova_table[,"F"], 2), is_equivalent_to(f))
expect_that(suppressWarnings(summary(md_ez_r$Anova)$univariate.tests[,"SS"][-1]), is_equivalent_to(ss_num))
expect_that(suppressWarnings(summary(md_ez_r$Anova)$univariate.tests[,"Error SS"])[-1], is_equivalent_to(ss_error))
expect_that(anova(md_ez_r, correction = "none")[,"num Df"], is_equivalent_to(num_df))
expect_that(anova(md_ez_r, correction = "none")[,"den Df"], is_equivalent_to(den_df))
})
test_that("Analysis of Singmann & Klauer (2011, Exp. 1)", {
data(sk2011.1, package = "afex")
out1 <- aov_ez("id", "response", sk2011.1[ sk2011.1$what == "affirmation",], within = c("inference", "type"), between = "instruction", anova_table=(es = "pes"), fun_aggregate = mean, return = "afex_aov")
df_num <- rep(1, 7)
df_den <- rep(38, 7)
MSE <- c(1072.42, 1007.21, 1007.21, 187.9, 187.9, 498.48, 498.48)
F <- c(0.13, 13.01, 12.44, 0.06, 3.09, 29.62, 10.73)
pes <- c(0, 0.26, 0.25, 0, 0.08, 0.44, 0.22)
p <- c(0.72, 0.0009, 0.001, 0.81, 0.09, 0.001, 0.002)
expect_that(out1$anova_table[["num Df"]], is_equivalent_to(df_num))
expect_that(out1$anova_table[["den Df"]], is_equivalent_to(df_den))
expect_that(out1$anova_table[["MSE"]], equals(MSE, tolerance = 0.001))
expect_that(out1$anova_table[["F"]], equals(F, tolerance = 0.001))
expect_that(out1$anova_table[["pes"]], equals(pes, tolerance = 0.02))
expect_that(out1$anova_table[["Pr(>F)"]], equals(p, tolerance = 0.01))
})
test_that("Data from O'Brien & Kaiser replicates their paper (p. 328, Table 8, column 'average'", {
data(obk.long, package = "afex")
out1 <- aov_car(value ~ treatment * gender + Error(id/(phase*hour)), data = obk.long, observed = "gender", return = "afex_aov", anova_table = list(correction = "none"))
expect_that(unname(unlist(out1[["anova_table"]]["treatment", c("num Df", "den Df", "F")])), equals(c(2, 10, 3.94), tolerance = 0.001))
expect_that(unname(unlist(out1[["anova_table"]]["gender", c("num Df", "den Df", "F")])), equals(c(1, 10, 3.66), tolerance = 0.001))
expect_that(round(unname(unlist(out1[["anova_table"]]["treatment:gender", c("num Df", "den Df", "F")])), 2), equals(c(2, 10, 2.86), tolerance = 0.001))
## check against own results:
anova_tab <- structure(list(`num Df` = c(2, 1, 2, 2, 4, 2, 4, 4, 8, 4, 8,
8, 16, 8, 16), `den Df` = c(10, 10, 10, 20, 20, 20, 20, 40, 40,
40, 40, 80, 80, 80, 80), MSE = c(22.8055555555555, 22.8055555555555,
22.8055555555555, 4.01388888888889, 4.01388888888889, 4.01388888888889,
4.01388888888889, 1.5625, 1.5625, 1.5625, 1.5625, 1.20208333333333,
1.20208333333333, 1.20208333333333, 1.20208333333333), F = c(3.940494501098,
3.65912050065102, 2.85547267441343, 16.1329196993199, 4.85098375975551,
0.282782484190432, 0.636602429722426, 16.6856704980843, 0.0933333333333336,
0.450268199233716, 0.620437956204379, 1.17990398215104, 0.345292160558641,
0.931293452060798, 0.735935938468544), ges = c(0.198248507309966,
0.114806410630587, 0.179183259116394, 0.151232705544895, 0.0967823866181358,
0.00312317714869712, 0.0140618480455475, 0.12547183572154, 0.00160250371109459,
0.0038716854273722, 0.010669821220833, 0.0153706689696344, 0.00905399063368842,
0.012321395080303, 0.0194734697889242), `Pr(>F)` = c(0.0547069269265198,
0.0848002538616402, 0.104469234023772, 6.73163655770545e-05,
0.00672273209545241, 0.756647338927411, 0.642369488905348, 4.02664339633774e-08,
0.999244623719389, 0.771559070589063, 0.755484449904079, 0.32158661418337,
0.990124565656718, 0.495611922963992, 0.749561639456282)), .Names = c("num Df",
"den Df", "MSE", "F", "ges", "Pr(>F)"), heading = c("Anova Table (Type 3 tests)\n",
"Response: value"), row.names = c("treatment", "gender", "treatment:gender",
"phase", "treatment:phase", "gender:phase", "treatment:gender:phase",
"hour", "treatment:hour", "gender:hour", "treatment:gender:hour",
"phase:hour", "treatment:phase:hour", "gender:phase:hour", "treatment:gender:phase:hour"
), class = c("data.frame"))
expect_equal(out1[["anova_table"]], anova_tab, check.attributes = FALSE)
})
test_that("Data from O'Brien & Kaiser adjusted for familywise error rate (p. 328, Table 8, column 'average'", {
data(obk.long, package = "afex")
out1 <- aov_car(value ~ treatment * gender + Error(id/(phase*hour)), data = obk.long, observed = "gender", return = "afex_aov", anova_table = list(correction = "none", p_adjust_method = "bonferroni"))
expect_that(unname(unlist(out1[["anova_table"]]["treatment", c("num Df", "den Df", "F")])), equals(c(2, 10, 3.94), tolerance = 0.001))
expect_that(unname(unlist(out1[["anova_table"]]["gender", c("num Df", "den Df", "F")])), equals(c(1, 10, 3.66), tolerance = 0.001))
expect_that(round(unname(unlist(out1[["anova_table"]]["treatment:gender", c("num Df", "den Df", "F")])), 2), equals(c(2, 10, 2.86), tolerance = 0.001))
## check against own results:
anova_tab <- structure(list(`num Df` = c(2, 1, 2, 2, 4, 2, 4, 4, 8, 4, 8,
8, 16, 8, 16), `den Df` = c(10, 10, 10, 20, 20, 20, 20, 40, 40,
40, 40, 80, 80, 80, 80), MSE = c(22.8055555555555, 22.8055555555555,
22.8055555555555, 4.01388888888889, 4.01388888888889, 4.01388888888889,
4.01388888888889, 1.5625, 1.5625, 1.5625, 1.5625, 1.20208333333333,
1.20208333333333, 1.20208333333333, 1.20208333333333), F = c(3.940494501098,
3.65912050065102, 2.85547267441343, 16.1329196993199, 4.85098375975551,
0.282782484190432, 0.636602429722426, 16.6856704980843, 0.0933333333333336,
0.450268199233716, 0.620437956204379, 1.17990398215104, 0.345292160558641,
0.931293452060798, 0.735935938468544), ges = c(0.198248507309966,
0.114806410630587, 0.179183259116394, 0.151232705544895, 0.0967823866181358,
0.00312317714869712, 0.0140618480455475, 0.12547183572154, 0.00160250371109459,
0.0038716854273722, 0.010669821220833, 0.0153706689696344, 0.00905399063368842,
0.012321395080303, 0.0194734697889242), `Pr(>F)` = c(0.0547069269265198,
0.0848002538616402, 0.104469234023772, 6.73163655770545e-05,
0.00672273209545241, 0.756647338927411, 0.642369488905348, 4.02664339633774e-08,
0.999244623719389, 0.771559070589063, 0.755484449904079, 0.32158661418337,
0.990124565656718, 0.495611922963992, 0.749561639456282)), .Names = c("num Df",
"den Df", "MSE", "F", "ges", "Pr(>F)"), heading = c("Anova Table (Type 3 tests)\n",
"Response: value"), row.names = c("treatment", "gender", "treatment:gender",
"phase", "treatment:phase", "gender:phase", "treatment:gender:phase",
"hour", "treatment:hour", "gender:hour", "treatment:gender:hour",
"phase:hour", "treatment:phase:hour", "gender:phase:hour", "treatment:gender:phase:hour"
), class = c("data.frame"))
anova_tab$`Pr(>F)` <- p.adjust(anova_tab$`Pr(>F)`, method = "bonferroni")
expect_equal(out1[["anova_table"]], anova_tab, check.attributes = FALSE)
})
test_that("afex_aov printing", {
data(sk2011.1, package = "afex")
out_new <- aov_ez("id", "response", sk2011.1[ sk2011.1$what == "affirmation",], within = c("inference", "type"), between = "instruction", anova_table=(es = "pes"), fun_aggregate = mean, return = "afex_aov")
expect_output(print(out_new), "Signif. codes")
expect_output(print(anova(out_new)), "Signif. codes")
expect_output(print(nice(out_new)), "Anova")
load("afex_aov_16_1.rda")
expect_output(print(out1), "Anova")
expect_output(print(anova(out1)), "Signif. codes")
expect_output(print(nice(out1)), "Anova")
})
|
#----------------------------------------------#
# Author: Laurent Berge
# Date creation: Tue Apr 23 16:41:47 2019
# Purpose: All estimation functions
#----------------------------------------------#
#' Fixed-effects OLS estimation
#'
#' Estimates OLS with any number of fixed-effects.
#'
#' @inheritParams femlm
#'
#' @param fml A formula representing the relation to be estimated. For example: \code{fml = z~x+y}. To include fixed-effects, insert them in this formula using a pipe: e.g. \code{fml = z~x+y | fe_1+fe_2}. You can combine two fixed-effects with \code{^}: e.g. \code{fml = z~x+y|fe_1^fe_2}, see details. You can also use variables with varying slopes using square brackets: e.g. in \code{fml = z~y|fe_1[x] + fe_2}, see details. To add IVs, insert the endogenous vars./instruments after a pipe, like in \code{y ~ x | c(x_endo1, x_endo2) ~ x_inst1 + x_inst2}. Note that it should always be the last element, see details. Multiple estimations can be performed at once: for multiple dep. vars, wrap them in \code{c()}: ex \code{c(y1, y2)}. For multiple indep. vars, use the stepwise functions: ex \code{x1 + csw(x2, x3)}. The formula \code{fml = c(y1, y2) ~ x1 + cw0(x2, x3)} leads to 6 estimation, see details.
#' @param weights A formula or a numeric vector. Each observation can be weighted, the weights must be greater than 0. If equal to a formula, it should be one-sided: for example \code{~ var_weight}.
#' @param verbose Integer. Higher values give more information. In particular, it can detail the number of iterations in the demeaning algorithm (the first number is the left-hand-side, the other numbers are the right-hand-side variables).
#' @param demeaned Logical, default is \code{FALSE}. Only used in the presence of fixed-effects: should the centered variables be returned? If \code{TRUE}, it creates the items \code{y_demeaned} and \code{X_demeaned}.
#' @param notes Logical. By default, two notes are displayed: when NAs are removed (to show additional information) and when some observations are removed because of collinearity. To avoid displaying these messages, you can set \code{notes = FALSE}. You can remove these messages permanently by using \code{setFixest_notes(FALSE)}.
#' @param collin.tol Numeric scalar, default is \code{1e-10}. Threshold deciding when variables should be considered collinear and subsequently removed from the estimation. Higher values means more variables will be removed (if there is presence of collinearity). One signal of presence of collinearity is t-stats that are extremely low (for instance when t-stats < 1e-3).
#' @param y Numeric vector/matrix/data.frame of the dependent variable(s). Multiple dependent variables will return a \code{fixest_multi} object.
#' @param X Numeric matrix of the regressors.
#' @param fixef_df Matrix/data.frame of the fixed-effects.
#'
#' @details
#' The method used to demean each variable along the fixed-effects is based on Berge (2018), since this is the same problem to solve as for the Gaussian case in a ML setup.
#'
#' @section Combining the fixed-effects:
#' You can combine two variables to make it a new fixed-effect using \code{^}. The syntax is as follows: \code{fe_1^fe_2}. Here you created a new variable which is the combination of the two variables fe_1 and fe_2. This is identical to doing \code{paste0(fe_1, "_", fe_2)} but more convenient.
#'
#' Note that pasting is a costly operation, especially for large data sets. Thus, the internal algorithm uses a numerical trick which is fast, but the drawback is that the identity of each observation is lost (i.e. they are now equal to a meaningless number instead of being equal to \code{paste0(fe_1, "_", fe_2)}). These \dQuote{identities} are useful only if you're interested in the value of the fixed-effects (that you can extract with \code{\link[fixest]{fixef.fixest}}). If you're only interested in coefficients of the variables, it doesn't matter. Anyway, you can use \code{combine.quick = FALSE} to tell the internal algorithm to use \code{paste} instead of the numerical trick. By default, the numerical trick is performed only for large data sets.
#'
#' @section Varying slopes:
#' You can add variables with varying slopes in the fixed-effect part of the formula. The syntax is as follows: fixef_var[var1, var2]. Here the variables var1 and var2 will be with varying slopes (one slope per value in fixef_var) and the fixed-effect fixef_var will also be added.
#'
#' To add only the variables with varying slopes and not the fixed-effect, use double square brackets: fixef_var[[var1, var2]].
#'
#' In other words:
#' \itemize{
#' \item fixef_var[var1, var2] is equivalent to fixef_var + fixef_var[[var1]] + fixef_var[[var2]]
#' \item fixef_var[[var1, var2]] is equivalent to fixef_var[[var1]] + fixef_var[[var2]]
#' }
#'
#' In general, for convergence reasons, it is recommended to always add the fixed-effect and avoid using only the variable with varying slope (i.e. use single square brackets).
#'
#' @section Lagging variables:
#'
#' To use leads/lags of variables in the estimation, you can: i) either provide the argument \code{panel.id}, ii) either set your data set as a panel with the function \code{\link[fixest]{panel}}. Doing either of the two will give you acceess to the lagging functions \code{\link[fixest]{l}}, \code{\link[fixest:l]{f}} and \code{\link[fixest:l]{d}}.
#'
#' You can provide several leads/lags/differences at once: e.g. if your formula is equal to \code{f(y) ~ l(x, -1:1)}, it means that the dependent variable is equal to the lead of \code{y}, and you will have as explanatory variables the lead of \code{x1}, \code{x1} and the lag of \code{x1}. See the examples in function \code{\link[fixest]{l}} for more details.
#'
#' @section Interactions:
#'
#' You can interact a numeric variable with a "factor-like" variable by using \code{interact(var, fe, ref)}, where \code{fe} is the variable to be interacted with and the argument \code{ref} is a value of \code{fe} taken as a reference (optional). Instead of using the function \code{\link[fixest:i]{interact}}, you can use the alias \code{i(var, fe, ref)}.
#'
#' Using this specific way to create interactions leads to a different display of the interacted values in \code{\link[fixest]{etable}} and offers a special representation of the interacted coefficients in the function \code{\link[fixest]{coefplot}}. See examples.
#'
#' It is important to note that *if you do not care about the standard-errors of the interactions*, then you can add interactions in the fixed-effects part of the formula (using the syntax fe[[var]], as explained in the section \dQuote{Varying slopes}).
#'
#' The function \code{\link[fixest:i]{interact}} has in fact more arguments, please see details in its associated help page.
#'
#' @section On standard-errors:
#'
#' Standard-errors can be computed in different ways, you can use the arguments \code{se} and \code{dof} in \code{\link[fixest]{summary.fixest}} to define how to compute them. By default, in the presence of fixed-effects, standard-errors are automatically clustered.
#'
#' The following vignette: \href{https://cran.r-project.org/package=fixest/vignettes/standard_errors.html}{On standard-errors} describes in details how the standard-errors are computed in \code{fixest} and how you can replicate standard-errors from other software.
#'
#' You can use the functions \code{\link[fixest]{setFixest_se}} and \code{\link[fixest:dof]{setFixest_dof}} to permanently set the way the standard-errors are computed.
#'
#' @section Instrumental variables:
#'
#' To estimate two stage least square regressions, insert the relationship between the endogenous regressor(s) and the instruments in a formula, after a pipe.
#'
#' For example, \code{fml = y ~ x1 | x_endo ~ x_inst} will use the variables \code{x1} and \code{x_inst} in the first stage to explain \code{x_endo}. Then will use the fitted value of \code{x_endo} (which will be named \code{fit_x_endo}) and \code{x1} to explain \code{y}.
#' To include several endogenous regressors, just use "+", like in: \code{fml = y ~ x1 | x_endo1 + x_end2 ~ x_inst1 + x_inst2}.
#'
#' Of course you can still add the fixed-effects, but the IV formula must always come last, like in \code{fml = y ~ x1 | fe1 + fe2 | x_endo ~ x_inst}.
#'
#' By default, the second stage regression is returned. You can access the first stage(s) regressions either directly in the slot \code{iv_first_stage} (not recommended), or using the argument \code{stage = 1} from the function \code{\link[fixest]{summary.fixest}}. For example \code{summary(iv_est, stage = 1)} will give the first stage(s). Note that using summary you can display both the second and first stages at the same time using, e.g., \code{stage = 1:2} (using \code{2:1} would reverse the order).
#'
#'
#' @section Multiple estimations:
#'
#' Multiple estimations can be performed at once, they just have to be specified in the formula. Multiple estimations yield a \code{fixest_multi} object which is \sQuote{kind of} a list of all the results but includes specific methods to access the results in a handy way.
#'
#' To include mutliple dependent variables, wrap them in \code{c()} (\code{list()} also works). For instance \code{fml = c(y1, y2) ~ x1} would estimate the model \code{fml = y1 ~ x1} and then the model \code{fml = y2 ~ x1}.
#'
#' To include multiple independent variables, you need to use the stepwise functions. There are 4 stepwise functions associated to 4 short aliases. These are a) stepwise, stepwise0, cstepwise, cstepwise0, and b) sw, sw0, csw, csw0. Let's explain that.
#' Assume you have the following formula: \code{fml = y ~ x1 + sw(x2, x3)}. The stepwise function \code{sw} will estimate the following two models: \code{y ~ x1 + x2} and \code{y ~ x1 + x3}. That is, each element in \code{sw()} is sequentially, and separately, added to the formula. Would have you used \code{sw0} in lieu of \code{sw}, then the model \code{y ~ x1} would also have been estimated. The \code{0} in the name means that the model wihtout any stepwise element also needs to be estimated.
#' Finally, the prefix \code{c} means cumulative: each stepwise element is added to the next. That is, \code{fml = y ~ x1 + csw(x2, x3)} would lead to the following models \code{y ~ x1 + x2} and \code{y ~ x1 + x2 + x3}. The \code{0} has the same meaning and would also lead to the model without the stepwise elements to be estimated: in other words, \code{fml = y ~ x1 + csw0(x2, x3)} leads to the following three models: \code{y ~ x1}, \code{y ~ x1 + x2} and \code{y ~ x1 + x2 + x3}.
#'
#' Multiple independent variables can be combined with multiple dependent variables, as in \code{fml = c(y1, y2) ~ cw(x1, x2, x3)} which would lead to 6 estimations. Multiple estimations can also be combined to split samples (with the arguments \code{split}, \code{fsplit}).
#'
#' Fixed-effects cannot be included in a stepwise fashion: they are there or not and stay the same for all estimations.
#'
#' A note on performance. The feature of multiple estimations has been highly optimized for \code{feols}, in particular in the presence of fixed-effects. It is faster to estimate multiple models using the formula rather than with a loop. For non-\code{feols} models using the formula is roughly similar to using a loop performance-wise.
#'
#'
#' @return
#' A \code{fixest} object. Note that \code{fixest} objects contain many elements and most of them are for internal use, they are presented here only for information. To access them, it is safer to use the user-level methods (e.g. \code{\link[fixest]{vcov.fixest}}, \code{\link[fixest]{resid.fixest}}, etc) or functions (like for instance \code{\link[fixest]{fitstat}} to access any fit statistic).
#' \item{nobs}{The number of observations.}
#' \item{fml}{The linear formula of the call.}
#' \item{call}{The call of the function.}
#' \item{method}{The method used to estimate the model.}
#' \item{family}{The family used to estimate the model.}
#' \item{fml_all}{A list containing different parts of the formula. Always contain the linear formula. Then depending on the cases: \code{fixef}: the fixed-effects, \code{iv}: the IV part of the formula.}
#' \item{fixef_vars}{The names of each fixed-effect dimension.}
#' \item{fixef_id}{The list (of length the number of fixed-effects) of the fixed-effects identifiers for each observation.}
#' \item{fixef_sizes}{The size of each fixed-effect (i.e. the number of unique identifierfor each fixed-effect dimension).}
#' \item{coefficients}{The named vector of estimated coefficients.}
#' \item{multicol}{Logical, if multicollinearity was found.}
#' \item{coeftable}{The table of the coefficients with their standard errors, z-values and p-values.}
#' \item{loglik}{The loglikelihood.}
#' \item{ssr_null}{Sum of the squared residuals of the null model (containing only with the intercept).}
#' \item{ssr_fe_only}{Sum of the squared residuals of the model estimated with fixed-effects only.}
#' \item{ll_null}{The log-likelihood of the null model (containing only with the intercept).}
#' \item{ll_fe_only}{The log-likelihood of the model estimated with fixed-effects only.}
#' \item{fitted.values}{The fitted values.}
#' \item{linear.predictors}{The linear predictors.}
#' \item{residuals}{The residuals (y minus the fitted values).}
#' \item{sq.cor}{Squared correlation between the dependent variable and the expected predictor (i.e. fitted.values) obtained by the estimation.}
#' \item{hessian}{The Hessian of the parameters.}
#' \item{cov.unscaled}{The variance-covariance matrix of the parameters.}
#' \item{se}{The standard-error of the parameters.}
#' \item{scores}{The matrix of the scores (first derivative for each observation).}
#' \item{residuals}{The difference between the dependent variable and the expected predictor.}
#' \item{sumFE}{The sum of the fixed-effects coefficients for each observation.}
#' \item{offset}{(When relevant.) The offset formula.}
#' \item{weights}{(When relevant.) The weights formula.}
#' \item{obs_selection}{(When relevant.) List containing vectors of integers. It represents the sequential selection of observation vis a vis the original data set.}
#' \item{collin.var}{(When relevant.) Vector containing the variables removed because of collinearity.}
#' \item{collin.coef}{(When relevant.) Vector of coefficients, where the values of the variables removed because of collinearity are NA.}
#' \item{collin.min_norm}{The minimal diagonal value of the Cholesky decomposition. Small values indicate possible presence collinearity.}
#' \item{y_demeaned}{Only when \code{demeaned = TRUE}: the centered dependent variable.}
#' \item{X_demeaned}{Only when \code{demeaned = TRUE}: the centered explanatory variable.}
#'
#'
#' @seealso
#' See also \code{\link[fixest]{summary.fixest}} to see the results with the appropriate standard-errors, \code{\link[fixest]{fixef.fixest}} to extract the fixed-effects coefficients, and the function \code{\link[fixest]{etable}} to visualize the results of multiple estimations. For plotting coefficients: see \code{\link[fixest]{coefplot}}.
#'
#' And other estimation methods: \code{\link[fixest]{femlm}}, \code{\link[fixest]{feglm}}, \code{\link[fixest:feglm]{fepois}}, \code{\link[fixest:femlm]{fenegbin}}, \code{\link[fixest]{feNmlm}}.
#'
#' @author
#' Laurent Berge
#'
#' @references
#'
#' Berge, Laurent, 2018, "Efficient estimation of maximum likelihood models with multiple fixed-effects: the R package FENmlm." CREA Discussion Papers, 13 (\url{https://wwwen.uni.lu/content/download/110162/1299525/file/2018_13}).
#'
#' For models with multiple fixed-effects:
#'
#' Gaure, Simen, 2013, "OLS with multiple high dimensional category variables", Computational Statistics & Data Analysis 66 pp. 8--18
#'
#' @examples
#'
#' #
#' # Basic estimation
#' #
#'
#' res = feols(Sepal.Length ~ Sepal.Width + Petal.Length, iris)
#' # You can specify clustered standard-errors in summary:
#' summary(res, cluster = ~Species)
#'
#' #
#' # Just one set of fixed-effects:
#' #
#'
#' res = feols(Sepal.Length ~ Sepal.Width + Petal.Length | Species, iris)
#' # By default, the SEs are clustered according to the first fixed-effect
#' summary(res)
#'
#' #
#' # Varying slopes:
#' #
#'
#' res = feols(Sepal.Length ~ Petal.Length | Species[Sepal.Width], iris)
#' summary(res)
#'
#' #
#' # Combining the FEs:
#' #
#'
#' base = iris
#' base$fe_2 = rep(1:10, 15)
#' res_comb = feols(Sepal.Length ~ Petal.Length | Species^fe_2, base)
#' summary(res_comb)
#' fixef(res_comb)[[1]]
#'
#' #
#' # Using leads/lags:
#' #
#'
#' data(base_did)
#' # We need to set up the panel with the arg. panel.id
#' est1 = feols(y ~ l(x1, 0:1), base_did, panel.id = ~id+period)
#' est2 = feols(f(y) ~ l(x1, -1:1), base_did, panel.id = ~id+period)
#' etable(est1, est2, order = "f", drop="Int")
#'
#' #
#' # Using interactions:
#' #
#'
#' data(base_did)
#' # We interact the variable 'period' with the variable 'treat'
#' est_did = feols(y ~ x1 + i(treat, period, 5) | id+period, base_did)
#'
#' # Now we can plot the result of the interaction with coefplot
#' coefplot(est_did)
#' # You have many more example in coefplot help
#'
#' #
#' # Instrumental variables
#' #
#'
#' # To estimate Two stage least squares,
#' # insert a formula describing the endo. vars./instr. relation after a pipe:
#'
#' base = iris
#' names(base) = c("y", "x1", "x2", "x3", "fe1")
#' base$x_inst1 = 0.2 * base$x1 + 0.7 * base$x2 + rpois(150, 2)
#' base$x_inst2 = 0.2 * base$x2 + 0.7 * base$x3 + rpois(150, 3)
#' base$x_endo1 = 0.5 * base$y + 0.5 * base$x3 + rnorm(150, sd = 2)
#' base$x_endo2 = 1.5 * base$y + 0.5 * base$x3 + 3 * base$x_inst1 + rnorm(150, sd = 5)
#'
#' # Using 2 controls, 1 endogenous var. and 1 instrument
#' res_iv = feols(y ~ x1 + x2 | x_endo1 ~ x_inst1, base)
#'
#' # The second stage is the default
#' summary(res_iv)
#'
#' # To show the first stage:
#' summary(res_iv, stage = 1)
#'
#' # To show both the first and second stages:
#' summary(res_iv, stage = 1:2)
#'
#' # Adding a fixed-effect => IV formula always last!
#' res_iv_fe = feols(y ~ x1 + x2 | fe1 | x_endo1 ~ x_inst1, base)
#'
#' # With two endogenous regressors
#' res_iv2 = feols(y ~ x1 + x2 | x_endo1 + x_endo2 ~ x_inst1 + x_inst2, base)
#'
#' # Now there's two first stages => a fixest_multi object is returned
#' sum_res_iv2 = summary(res_iv2, stage = 1)
#'
#' # You can navigate through it by subsetting:
#' sum_res_iv2[iv = 1]
#'
#' # The stage argument also works in etable:
#' etable(res_iv, res_iv_fe, res_iv2, order = "endo")
#'
#' etable(res_iv, res_iv_fe, res_iv2, stage = 1:2, order = c("endo", "inst"),
#' group = list(control = "!endo|inst"))
#'
#' #
#' # Multiple estimations:
#' #
#'
#' # 6 estimations
#' est_mult = feols(c(Ozone, Solar.R) ~ Wind + Temp + csw0(Wind:Temp, Day), airquality)
#'
#' # We can display the results for the first lhs:
#' etable(est_mult[lhs = 1])
#'
#' # And now the second (access can be made by name)
#' etable(est_mult[lhs = "Solar.R"])
#'
#' # Now we focus on the two last right hand sides
#' # (note that .N can be used to specify the last item)
#' etable(est_mult[rhs = 2:.N])
#'
#' # Combining with split
#' est_split = feols(c(Ozone, Solar.R) ~ sw(poly(Wind, 2), poly(Temp, 2)),
#' airquality, split = ~ Month)
#'
#' # You can display everything at once with the print method
#' est_split
#'
#' # Different way of displaying the results with "compact"
#' summary(est_split, "compact")
#'
#' # You can still select which sample/LHS/RHS to display
#' est_split[sample = 1:2, lhs = 1, rhs = 1]
#'
feols = function(fml, data, weights, offset, subset, split, fsplit, cluster, se, dof, panel.id, fixef, fixef.rm = "none", fixef.tol = 1e-6,
fixef.iter = 10000, collin.tol = 1e-10, nthreads = getFixest_nthreads(), lean = FALSE, verbose = 0, warn = TRUE,
notes = getFixest_notes(), combine.quick, demeaned = FALSE, mem.clean = FALSE, only.env = FALSE, env, ...){
dots = list(...)
# 1st: is the call coming from feglm?
fromGLM = FALSE
skip_fixef = FALSE
if("X" %in% names(dots)){
fromGLM = TRUE
# env is provided by feglm
X = dots$X
y = as.vector(dots$y)
init = dots$means
correct_0w = dots$correct_0w
if(verbose){
time_start = proc.time()
gt = function(x, nl = TRUE) cat(sfill(x, 20), ": ", -(t0 - (t0<<-proc.time()))[3], "s", ifelse(nl, "\n", ""), sep = "")
t0 = proc.time()
}
} else {
time_start = proc.time()
gt = function(x, nl = TRUE) cat(sfill(x, 20), ": ", -(t0 - (t0<<-proc.time()))[3], "s", ifelse(nl, "\n", ""), sep = "")
t0 = proc.time()
# we use fixest_env for appropriate controls and data handling
if(missing(env)){
set_defaults("fixest_estimation")
call_env = new.env(parent = parent.frame())
env = try(fixest_env(fml = fml, data = data, weights = weights, offset = offset, subset = subset, split = split, fsplit = fsplit, cluster = cluster, se = se, dof = dof, panel.id = panel.id, fixef = fixef, fixef.rm = fixef.rm, fixef.tol = fixef.tol, fixef.iter = fixef.iter, collin.tol = collin.tol, nthreads = nthreads, lean = lean, verbose = verbose, warn = warn, notes = notes, combine.quick = combine.quick, demeaned = demeaned, mem.clean = mem.clean, origin = "feols", mc_origin = match.call(), call_env = call_env, ...), silent = TRUE)
} else if((r <- !is.environment(env)) || !isTRUE(env$fixest_env)) {
stop("Argument 'env' must be an environment created by a fixest estimation. Currently it is not ", ifelse(r, "an", "a 'fixest'"), " environment.")
}
if("try-error" %in% class(env)){
stop(format_error_msg(env, "feols"))
}
check_arg(only.env, "logical scalar")
if(only.env){
return(env)
}
y = get("lhs", env)
X = get("linear.mat", env)
nthreads = get("nthreads", env)
init = 0
# demeaned variables
if(!is.null(dots$X_demean)){
skip_fixef = TRUE
X_demean = dots$X_demean
y_demean = dots$y_demean
}
# offset
offset = get("offset.value", env)
isOffset = length(offset) > 1
if(isOffset){
y = y - offset
}
# weights
weights = get("weights.value", env)
isWeight = length(weights) > 1
correct_0w = FALSE
mem.clean = get("mem.clean", env)
demeaned = get("demeaned", env)
verbose = get("verbose", env)
if(verbose >= 2) gt("Setup")
}
isFixef = get("isFixef", env)
# Used to solve with the reduced model
xwx = dots$xwx
xwy = dots$xwy
#
# Split ####
#
do_split = get("do_split", env)
if(do_split){
res = multi_split(env, feols)
return(res)
}
#
# Multi fixef ####
#
do_multi_fixef = get("do_multi_fixef", env)
if(do_multi_fixef){
res = multi_fixef(env, feols)
return(res)
}
#
# Multi LHS and RHS ####
#
do_multi_lhs = get("do_multi_lhs", env)
do_multi_rhs = get("do_multi_rhs", env)
if(do_multi_lhs || do_multi_rhs){
assign("do_multi_lhs", FALSE, env)
assign("do_multi_rhs", FALSE, env)
do_iv = get("do_iv", env)
fml = get("fml", env)
lhs_names = get("lhs_names", env)
lhs = y
if(do_multi_lhs){
# We find out which LHS have the same NA patterns => saves a lot of computation
n_lhs = length(lhs)
lhs_group_is_na = list()
lhs_group_id = c()
lhs_group_n_na = c()
for(i in 1:n_lhs){
is_na_current = !is.finite(lhs[[i]])
n_na_current = sum(is_na_current)
if(i == 1){
lhs_group_id = 1
lhs_group_is_na[[1]] = is_na_current
lhs_group_n_na[1] = n_na_current
} else {
qui = which(lhs_group_n_na == n_na_current)
if(length(qui) > 0){
if(n_na_current == 0){
# no need to check the pattern
lhs_group_id[i] = lhs_group_id[qui[1]]
next
}
for(j in qui){
if(all(is_na_current == lhs_group_is_na[[j]])){
lhs_group_id[i] = lhs_group_id[j]
next
}
}
}
# if here => new group because couldn't be matched
id = max(lhs_group_id) + 1
lhs_group_id[i] = id
lhs_group_is_na[[id]] = is_na_current
lhs_group_n_na[id] = n_na_current
}
}
# we make groups
lhs_group = list()
for(i in 1:max(lhs_group_id)){
lhs_group[[i]] = which(lhs_group_id == i)
}
} else if(do_multi_lhs == FALSE){
lhs_group_is_na = list(FALSE)
lhs_group_n_na = 0
lhs_group = list(1)
lhs = list(lhs) # I really abuse R shallow copy system...
names(lhs) = deparse_long(fml[[2]])
}
if(do_multi_rhs){
rhs_info_stepwise = get("rhs_info_stepwise", env)
multi_rhs_fml_full = rhs_info_stepwise$fml_all_full
multi_rhs_fml_sw = rhs_info_stepwise$fml_all_sw
multi_rhs_cumul = rhs_info_stepwise$is_cumul
linear_core = get("linear_core", env)
rhs = get("rhs_sw", env)
# Two schemes:
# - if cumulative: we take advantage of it => both in demeaning and in estimation
# - if regular stepwise => only in demeaning
# => of course this is dependent on the pattern of NAs
#
n_core_left = ifelse(length(linear_core$left) == 1, 0, ncol(linear_core$left))
n_core_right = ifelse(length(linear_core$right) == 1, 0, ncol(linear_core$right))
# rnc: running number of columns
rnc = n_core_left
if(rnc == 0){
col_start = integer(0)
} else {
col_start = 1:rnc
}
rhs_group_is_na = list()
rhs_group_id = c()
rhs_group_n_na = c()
rhs_n_vars = c()
rhs_col_id = list()
any_na_rhs = FALSE
for(i in seq_along(multi_rhs_fml_sw)){
# We evaluate the extra data and check the NA pattern
my_fml = multi_rhs_fml_sw[[i]]
if(i == 1 && (multi_rhs_cumul || identical(my_fml[[3]], 1))){
# That case is already in the main linear.mat => no NA
rhs_group_id = 1
rhs_group_is_na[[1]] = FALSE
rhs_group_n_na[1] = 0
rhs_n_vars[1] = 0
rhs[[1]] = 0
if(rnc == 0){
rhs_col_id[[1]] = integer(0)
} else {
rhs_col_id[[1]] = 1:rnc
}
next
}
rhs_current = rhs[[i]]
rhs_n_vars[i] = ncol(rhs_current)
info = cpppar_which_na_inf_mat(rhs_current, nthreads)
is_na_current = info$is_na_inf
if(multi_rhs_cumul && any_na_rhs){
# we cumulate the NAs
is_na_current = is_na_current | rhs_group_is_na[[rhs_group_id[i - 1]]]
info$any_na_inf = any(is_na_current)
}
n_na_current = 0
if(info$any_na_inf){
any_na_rhs = TRUE
n_na_current = sum(is_na_current)
} else {
# NULL would lead to problems down the road
is_na_current = FALSE
}
if(i == 1){
rhs_group_id = 1
rhs_group_is_na[[1]] = is_na_current
rhs_group_n_na[1] = n_na_current
} else {
qui = which(rhs_group_n_na == n_na_current)
if(length(qui) > 0){
if(n_na_current == 0){
# no need to check the pattern
rhs_group_id[i] = rhs_group_id[qui[1]]
next
}
go_next = FALSE
for(j in qui){
if(all(is_na_current == rhs_group_is_na[[j]])){
rhs_group_id[i] = rhs_group_id[j]
go_next = TRUE
break
}
}
if(go_next) next
}
# if here => new group because couldn't be matched
id = max(rhs_group_id) + 1
rhs_group_id[i] = id
rhs_group_is_na[[id]] = is_na_current
rhs_group_n_na[id] = n_na_current
}
}
# we make groups
rhs_group = list()
for(i in 1:max(rhs_group_id)){
rhs_group[[i]] = which(rhs_group_id == i)
}
# Finding the right column IDs to select
rhs_group_n_vars = rep(0, length(rhs_group)) # To get the total nber of cols per group
for(i in seq_along(multi_rhs_fml_sw)){
if(multi_rhs_cumul){
rnc = rnc + rhs_n_vars[i]
if(rnc == 0){
rhs_col_id[[i]] = integer(0)
} else {
rhs_col_id[[i]] = 1:rnc
}
} else {
id = rhs_group_id[i]
rhs_col_id[[i]] = c(col_start, seq(rnc + rhs_group_n_vars[id] + 1, length.out = rhs_n_vars[i]))
rhs_group_n_vars[id] = rhs_group_n_vars[id] + rhs_n_vars[i]
}
}
if(n_core_right > 0){
# We adjust
if(multi_rhs_cumul){
for(i in seq_along(multi_rhs_fml_sw)){
id = rhs_group_id[i]
gmax = max(rhs_group[[id]])
rhs_col_id[[i]] = c(rhs_col_id[[i]], n_core_left + sum(rhs_n_vars[1:gmax]) + 1:n_core_right)
}
} else {
for(i in seq_along(multi_rhs_fml_sw)){
id = rhs_group_id[i]
rhs_col_id[[i]] = c(rhs_col_id[[i]], n_core_left + rhs_group_n_vars[id] + 1:n_core_right)
}
}
}
} else if(do_multi_rhs == FALSE){
multi_rhs_fml_full = list(.xpd(rhs = fml[[3]]))
multi_rhs_cumul = FALSE
rhs_group_is_na = list(FALSE)
rhs_group_n_na = 0
rhs_n_vars = 0
rhs_group = list(1)
rhs = list(0)
rhs_col_id = list(1:NCOL(X))
linear_core = list(left = X, right = 1)
}
isLinear_right = length(linear_core$right) > 1
isLinear = length(linear_core$left) > 1 || isLinear_right
n_lhs = length(lhs)
n_rhs = length(rhs)
res = vector("list", n_lhs * n_rhs)
rhs_names = sapply(multi_rhs_fml_full, function(x) as.character(x)[[2]])
for(i in seq_along(lhs_group)){
for(j in seq_along(rhs_group)){
# NA removal
no_na = FALSE
if(lhs_group_n_na[i] > 0){
if(rhs_group_n_na[j] > 0){
is_na_current = lhs_group_is_na[[i]] | rhs_group_is_na[[j]]
} else {
is_na_current = lhs_group_is_na[[i]]
}
} else if(rhs_group_n_na[j] > 0){
is_na_current = rhs_group_is_na[[j]]
} else {
no_na = TRUE
}
# Here it depends on whether there are FEs or not, whether it's cumul or not
my_lhs = lhs[lhs_group[[i]]]
if(isLinear){
my_rhs = linear_core[1]
if(multi_rhs_cumul){
gmax = max(rhs_group[[j]])
my_rhs[1 + (1:gmax)] = rhs[1:gmax]
} else {
for(u in rhs_group[[j]]){
if(length(rhs[[u]]) > 1){
my_rhs[[length(my_rhs) + 1]] = rhs[[u]]
}
}
}
if(isLinear_right){
my_rhs[[length(my_rhs) + 1]] = linear_core$right
}
} else{
rhs_len = lengths(rhs)
if(multi_rhs_cumul){
gmax = max(rhs_group[[j]])
my_rhs = rhs[rhs_len > 1 & seq_along(rhs) <= gmax]
} else {
my_rhs = rhs[rhs_len > 1 & seq_along(rhs) %in% rhs_group[[j]]]
}
if(isLinear_right){
my_rhs[[length(my_rhs) + 1]] = linear_core$right
}
}
len_all = lengths(my_rhs)
if(any(len_all == 1)){
my_rhs = my_rhs[len_all > 1]
}
if(!no_na){
# NA removal
for(u in seq_along(my_lhs)){
my_lhs[[u]] = my_lhs[[u]][!is_na_current]
}
for(u in seq_along(my_rhs)){
if(length(my_rhs[[u]]) > 1) my_rhs[[u]] = my_rhs[[u]][!is_na_current, , drop = FALSE]
}
my_env = reshape_env(env, obs2keep = which(!is_na_current), assign_lhs = FALSE, assign_rhs = FALSE)
} else {
my_env = reshape_env(env)
}
isLinear_current = TRUE
if(length(my_rhs) == 0){
X_all = 0
isLinear_current = FALSE
} else {
X_all = do.call("cbind", my_rhs)
}
if(do_iv){
# We need to GET them => they have been modified in my_env
iv_lhs = get("iv_lhs", my_env)
iv.mat = get("iv.mat", my_env)
n_inst = ncol(iv.mat)
}
if(isFixef){
# We batch demean
n_vars_X = ifelse(is.null(ncol(X_all)), 0, ncol(X_all))
# fixef information
fixef_sizes = get("fixef_sizes", my_env)
fixef_table_vector = get("fixef_table_vector", my_env)
fixef_id_list = get("fixef_id_list", my_env)
slope_flag = get("slope_flag", my_env)
slope_vars = get("slope_variables", my_env)
if(mem.clean) gc()
vars_demean = cpp_demean(my_lhs, X_all, weights, iterMax = fixef.iter,
diffMax = fixef.tol, r_nb_id_Q = fixef_sizes,
fe_id_list = fixef_id_list, table_id_I = fixef_table_vector,
slope_flag_Q = slope_flag, slope_vars_list = slope_vars,
r_init = init, nthreads = nthreads)
X_demean = vars_demean$X_demean
y_demean = vars_demean$y_demean
if(do_iv){
iv_vars_demean = cpp_demean(iv_lhs, iv.mat, weights, iterMax = fixef.iter,
diffMax = fixef.tol, r_nb_id_Q = fixef_sizes,
fe_id_list = fixef_id_list, table_id_I = fixef_table_vector,
slope_flag_Q = slope_flag, slope_vars_list = slope_vars,
r_init = init, nthreads = nthreads)
iv.mat_demean = iv_vars_demean$X_demean
iv_lhs_demean = iv_vars_demean$y_demean
}
}
# We precompute the solution
if(do_iv){
if(isFixef){
iv_products = cpp_iv_products(X = X_demean, y = y_demean,
Z = iv.mat_demean, u = iv_lhs_demean,
w = weights, nthreads = nthreads)
} else {
iv_products = cpp_iv_products(X = X_all, y = my_lhs, Z = iv.mat,
u = iv_lhs, w = weights, nthreads = nthreads)
}
} else {
if(isFixef){
my_products = cpp_sparse_products(X_demean, weights, y_demean, nthreads = nthreads)
} else {
my_products = cpp_sparse_products(X_all, weights, my_lhs, nthreads = nthreads)
}
xwx = my_products$XtX
xwy = my_products$Xty
}
for(ii in seq_along(my_lhs)){
i_lhs = lhs_group[[i]][ii]
for(jj in rhs_group[[j]]){
qui = rhs_col_id[[jj]]
if(isLinear_current){
my_X = X_all[, qui, drop = FALSE]
} else {
my_X = 0
}
my_fml = .xpd(lhs = lhs_names[i_lhs], rhs = multi_rhs_fml_full[[jj]])
current_env = reshape_env(my_env, lhs = my_lhs[[ii]], rhs = my_X, fml_linear = my_fml)
if(do_iv){
if(isLinear_current){
qui_iv = c(1:n_inst, n_inst + qui)
XtX = iv_products$XtX[qui, qui, drop = FALSE]
Xty = iv_products$Xty[[ii]][qui]
} else {
qui_iv = 1:n_inst
XtX = matrix(0, 1, 1)
Xty = matrix(0, 1, 1)
}
my_iv_products = list(XtX = XtX,
Xty = Xty,
ZXtZX = iv_products$ZXtZX[qui_iv, qui_iv, drop = FALSE],
ZXtu = lapply(iv_products$ZXtu, function(x) x[qui_iv]))
if(isFixef){
my_res = feols(env = current_env, iv_products = my_iv_products,
X_demean = X_demean[ , qui, drop = FALSE],
y_demean = y_demean[[ii]],
iv.mat_demean = iv.mat_demean, iv_lhs_demean = iv_lhs_demean)
} else {
my_res = feols(env = current_env, iv_products = my_iv_products)
}
} else {
if(isFixef){
my_res = feols(env = current_env, xwx = xwx[qui, qui, drop = FALSE], xwy = xwy[[ii]][qui],
X_demean = X_demean[ , qui, drop = FALSE],
y_demean = y_demean[[ii]])
} else {
my_res = feols(env = current_env, xwx = xwx[qui, qui, drop = FALSE], xwy = xwy[[ii]][qui])
}
}
res[[index_2D_to_1D(i_lhs, jj, n_rhs)]] = my_res
}
}
}
}
# Meta information for fixest_multi
index = list(lhs = n_lhs, rhs = n_rhs)
all_names = list(lhs = lhs_names, rhs = rhs_names)
# result
res_multi = setup_multi(index, all_names, res)
return(res_multi)
}
#
# IV ####
#
do_iv = get("do_iv", env)
if(do_iv){
assign("do_iv", FALSE, env)
assign("verbose", 0, env)
# Loaded already
# y: lhs
# X: linear.mat
iv_lhs = get("iv_lhs", env)
iv_lhs_names = get("iv_lhs_names", env)
iv.mat = get("iv.mat", env) # we enforce (before) at least one variable in iv.mat
K = ncol(iv.mat)
n_endo = length(iv_lhs)
lean = get("lean", env)
# Simple check that the function is not misused
pblm = intersect(iv_lhs_names, colnames(X))
if(length(pblm) > 0){
any_exo = length(setdiff(colnames(X), iv_lhs_names)) > 0
msg = if(any_exo) "" else " If there is no exogenous variable, just use '1' in the first part of the formula."
stop("Endogenous variables should not be used as exogenous regressors. The variable", enumerate_items(pblm, "s.quote.were"), " found in the first part of the multipart formula: ", ifsingle(pblm, "it", "they"), " should not be there.", msg)
}
if(isFixef){
# we batch demean first
n_vars_X = ifelse(is.null(ncol(X)), 0, ncol(X))
if(mem.clean) gc()
if(!is.null(dots$iv_products)){
# means this is a call from multiple LHS/RHS
X_demean = dots$X_demean
y_demean = dots$y_demean
iv.mat_demean = dots$iv.mat_demean
iv_lhs_demean = dots$iv_lhs_demean
iv_products = dots$iv_products
} else {
# fixef information
fixef_sizes = get("fixef_sizes", env)
fixef_table_vector = get("fixef_table_vector", env)
fixef_id_list = get("fixef_id_list", env)
slope_flag = get("slope_flag", env)
slope_vars = get("slope_variables", env)
vars_demean = cpp_demean(y, X, weights, iterMax = fixef.iter,
diffMax = fixef.tol, r_nb_id_Q = fixef_sizes,
fe_id_list = fixef_id_list, table_id_I = fixef_table_vector,
slope_flag_Q = slope_flag, slope_vars_list = slope_vars,
r_init = init, nthreads = nthreads)
iv_vars_demean = cpp_demean(iv_lhs, iv.mat, weights, iterMax = fixef.iter,
diffMax = fixef.tol, r_nb_id_Q = fixef_sizes,
fe_id_list = fixef_id_list, table_id_I = fixef_table_vector,
slope_flag_Q = slope_flag, slope_vars_list = slope_vars,
r_init = init, nthreads = nthreads)
X_demean = vars_demean$X_demean
y_demean = vars_demean$y_demean
iv.mat_demean = iv_vars_demean$X_demean
iv_lhs_demean = iv_vars_demean$y_demean
# We precompute the solution
iv_products = cpp_iv_products(X = X_demean, y = y_demean, Z = iv.mat_demean,
u = iv_lhs_demean, w = weights, nthreads = nthreads)
}
if(n_vars_X == 0){
ZX_demean = iv.mat_demean
ZX = iv.mat
} else {
ZX_demean = cbind(iv.mat_demean, X_demean)
ZX = cbind(iv.mat, X)
}
# First stage(s)
ZXtZX = iv_products$ZXtZX
ZXtu = iv_products$ZXtu
res_first_stage = list()
for(i in 1:n_endo){
current_env = reshape_env(env, lhs = iv_lhs[[i]], rhs = ZX, fml_iv_endo = iv_lhs_names[i])
my_res = feols(env = current_env, xwx = ZXtZX, xwy = ZXtu[[i]],
X_demean = ZX_demean, y_demean = iv_lhs_demean[[i]],
add_fitted_demean = TRUE, iv_call = TRUE)
# For the F-stats
if(n_vars_X == 0){
my_res$ssr_no_inst = cpp_ssq(iv_lhs_demean[[i]], weights)
} else {
fit_no_inst = ols_fit(iv_lhs_demean[[i]], X_demean, w = weights, correct_0w = FALSE,
collin.tol = collin.tol, nthreads = nthreads,
xwx = iv_products$XtX, xwy = ZXtu[[i]][-(1:K)])
my_res$ssr_no_inst = cpp_ssq(fit_no_inst$residuals, weights)
}
my_res$iv_stage = 1
my_res$iv_inst_names_xpd = colnames(iv.mat)
res_first_stage[[iv_lhs_names[i]]] = my_res
}
if(verbose >= 2) gt("1st stage(s)")
# Second stage
if(n_endo == 1){
res_FS = res_first_stage[[1]]
U = as.matrix(res_FS$fitted.values)
U_demean = as.matrix(res_FS$fitted.values_demean)
} else {
U_list = list()
U_dm_list = list()
for(i in 1:n_endo){
res_FS = res_first_stage[[i]]
U_list[[i]] = res_FS$fitted.values
U_dm_list[[i]] = res_FS$fitted.values_demean
}
U = do.call("cbind", U_list)
U_demean = do.call("cbind", U_dm_list)
}
colnames(U) = colnames(U_demean) = paste0("fit_", iv_lhs_names)
if(n_vars_X == 0){
UX = as.matrix(U)
UX_demean = as.matrix(U_demean)
} else {
UX = cbind(U, X)
UX_demean = cbind(U_demean, X_demean)
}
XtX = iv_products$XtX
Xty = iv_products$Xty
iv_prod_second = cpp_iv_product_completion(XtX = XtX, Xty = Xty, X = X_demean,
y = y_demean, U = U_demean, w = weights, nthreads = nthreads)
UXtUX = iv_prod_second$UXtUX
UXty = iv_prod_second$UXty
resid_s1 = lapply(res_first_stage, function(x) x$residuals)
current_env = reshape_env(env, rhs = UX)
res_second_stage = feols(env = current_env, xwx = UXtUX, xwy = UXty,
X_demean = UX_demean, y_demean = y_demean,
resid_1st_stage = resid_s1, iv_call = TRUE)
# For the F-stats
if(n_vars_X == 0){
res_second_stage$ssr_no_endo = cpp_ssq(y_demean, weights)
} else {
fit_no_endo = ols_fit(y_demean, X_demean, w = weights, correct_0w = FALSE,
collin.tol = collin.tol, nthreads = nthreads,
xwx = XtX, xwy = Xty)
res_second_stage$ssr_no_endo = cpp_ssq(fit_no_endo$residuals, weights)
}
} else {
# fixef == FALSE
# We precompute the solution
if(!is.null(dots$iv_products)){
# means this is a call from multiple LHS/RHS
iv_products = dots$iv_products
} else {
iv_products = cpp_iv_products(X = X, y = y, Z = iv.mat,
u = iv_lhs, w = weights, nthreads = nthreads)
}
if(verbose >= 2) gt("IV products")
ZX = cbind(iv.mat, X)
# First stage(s)
ZXtZX = iv_products$ZXtZX
ZXtu = iv_products$ZXtu
# Let's put the intercept first => I know it's not really elegant, but that's life
is_int = "(Intercept)" %in% colnames(X)
if(is_int){
nz = ncol(iv.mat)
nzx = ncol(ZX)
qui = c(nz + 1, (1:nzx)[-(nz + 1)])
ZX = ZX[, qui, drop = FALSE]
ZXtZX = ZXtZX[qui, qui, drop = FALSE]
for(i in seq_along(ZXtu)){
ZXtu[[i]] = ZXtu[[i]][qui]
}
}
res_first_stage = list()
for(i in 1:n_endo){
current_env = reshape_env(env, lhs = iv_lhs[[i]], rhs = ZX, fml_iv_endo = iv_lhs_names[i])
my_res = feols(env = current_env, xwx = ZXtZX, xwy = ZXtu[[i]], iv_call = TRUE)
# For the F-stats
fit_no_inst = ols_fit(iv_lhs[[i]], X, w = weights, correct_0w = FALSE, collin.tol = collin.tol, nthreads = nthreads,
xwx = ZXtZX[-(1:K + is_int), -(1:K + is_int), drop = FALSE], xwy = ZXtu[[i]][-(1:K + is_int)])
my_res$ssr_no_inst = cpp_ssq(fit_no_inst$residuals, weights)
my_res$iv_stage = 1
my_res$iv_inst_names_xpd = colnames(iv.mat)
res_first_stage[[iv_lhs_names[i]]] = my_res
}
if(verbose >= 2) gt("1st stage(s)")
# Second stage
if(n_endo == 1){
res_FS = res_first_stage[[1]]
U = as.matrix(res_FS$fitted.values)
} else {
U_list = list()
U_dm_list = list()
for(i in 1:n_endo){
res_FS = res_first_stage[[i]]
U_list[[i]] = res_FS$fitted.values
}
U = do.call("cbind", U_list)
}
colnames(U) = paste0("fit_", iv_lhs_names)
UX = cbind(U, X)
XtX = iv_products$XtX
Xty = iv_products$Xty
iv_prod_second = cpp_iv_product_completion(XtX = XtX, Xty = Xty, X = X,
y = y, U = U, w = weights, nthreads = nthreads)
UXtUX = iv_prod_second$UXtUX
UXty = iv_prod_second$UXty
if(is_int){
nu = ncol(U)
nux = ncol(UX)
qui = c(nu + 1, (1:nux)[-(nu + 1)])
UX = UX[, qui, drop = FALSE]
UXtUX = UXtUX[qui, qui, drop = FALSE]
UXty = UXty[qui]
}
resid_s1 = lapply(res_first_stage, function(x) x$residuals)
current_env = reshape_env(env, rhs = UX)
res_second_stage = feols(env = current_env, xwx = UXtUX, xwy = UXty,
resid_1st_stage = resid_s1, iv_call = TRUE)
# For the F-stats
fit_no_endo = ols_fit(y, X, w = weights, correct_0w = FALSE,
collin.tol = collin.tol, nthreads = nthreads,
xwx = XtX, xwy = Xty)
res_second_stage$ssr_no_endo = cpp_ssq(fit_no_endo$residuals, weights)
}
if(verbose >= 2) gt("2nd stage")
#
# Wu-Hausman endogeneity test
#
# Current limitation => only standard vcov => later add argument (which would yield the full est.)?
# The problem of the full est. is that it takes memory very likely needlessly
if(isFixef){
ENDO_demean = do.call(cbind, iv_lhs_demean)
iv_prod_wh = cpp_iv_product_completion(XtX = UXtUX, Xty = UXty,
X = UX_demean, y = y_demean, U = ENDO_demean,
w = weights, nthreads = nthreads)
RHS_wh = cbind(ENDO_demean, UX_demean)
fit_wh = ols_fit(y_demean, RHS_wh, w = weights, correct_0w = FALSE, collin.tol = collin.tol,
nthreads = nthreads, xwx = iv_prod_wh$UXtUX, xwy = iv_prod_wh$UXty)
} else {
ENDO = do.call(cbind, iv_lhs)
iv_prod_wh = cpp_iv_product_completion(XtX = UXtUX, Xty = UXty,
X = UX, y = y, U = ENDO,
w = weights, nthreads = nthreads)
RHS_wh = cbind(ENDO, UX)
fit_wh = ols_fit(y, RHS_wh, w = weights, correct_0w = FALSE, collin.tol = collin.tol,
nthreads = nthreads, xwx = iv_prod_wh$UXtUX, xwy = iv_prod_wh$UXty)
}
df1 = n_endo
df2 = length(y) - (res_second_stage$nparams + df1)
if(any(fit_wh$is_excluded)){
stat = p = NA
} else {
qui = df1 + 1:df1 + ("(Intercept)" %in% names(res_second_stage$coefficients))
my_coef = fit_wh$coefficients[qui]
vcov_wh = fit_wh$xwx_inv[qui, qui] * cpp_ssq(fit_wh$residuals, weights) / df2
stat = drop(my_coef %*% solve(vcov_wh) %*% my_coef) / df1
p = pf(stat, df1, df2, lower.tail = FALSE)
}
res_second_stage$iv_wh = list(stat = stat, p = p, df1 = df1, df2 = df2)
#
# Sargan
#
if(n_endo < ncol(iv.mat)){
df = ncol(iv.mat) - n_endo
resid_2nd = res_second_stage$residuals
if(isFixef){
xwy = cpppar_xwy(ZX_demean, resid_2nd, weights, nthreads)
fit_sargan = ols_fit(resid_2nd, ZX_demean, w = weights, correct_0w = FALSE, collin.tol = collin.tol,
nthreads = nthreads, xwx = ZXtZX, xwy = xwy)
} else {
xwy = cpppar_xwy(ZX, resid_2nd, weights, nthreads)
fit_sargan = ols_fit(resid_2nd, ZX, w = weights, correct_0w = FALSE, collin.tol = collin.tol,
nthreads = nthreads, xwx = ZXtZX, xwy = xwy)
}
r = fit_sargan$residuals
stat = length(r) * (1 - cpp_ssq(r, weights) / cpp_ssr_null(resid_2nd))
p = pchisq(stat, df, lower.tail = FALSE)
res_second_stage$iv_sargan = list(stat = stat, p = p, df = df)
}
# extra information
res_second_stage$iv_inst_names_xpd = res_first_stage[[1]]$iv_inst_names_xpd
res_second_stage$iv_endo_names_fit = paste0("fit_", res_second_stage$iv_endo_names)
# if lean = TRUE: we clean the IV residuals (which were needed so far)
if(lean){
for(i in 1:n_endo){
res_first_stage[[i]]$residuals = NULL
res_first_stage[[i]]$fitted.values = NULL
res_first_stage[[i]]$fitted.values_demean = NULL
}
res_second_stage$residuals = NULL
res_second_stage$fitted.values = NULL
res_second_stage$fitted.values_demean = NULL
}
res_second_stage$iv_first_stage = res_first_stage
# meta info
res_second_stage$iv_stage = 2
return(res_second_stage)
}
#
# Regular estimation ####
#
onlyFixef = length(X) == 1
if(fromGLM){
res = list(coefficients = NA)
} else {
res = get("res", env)
}
if(skip_fixef){
# Variables were already demeaned
} else if(!isFixef){
# No Fixed-effects
y_demean = y
X_demean = X
res$means = 0
} else {
time_demean = proc.time()
# Number of nthreads
n_vars_X = ifelse(is.null(ncol(X)), 0, ncol(X))
# fixef information
fixef_sizes = get("fixef_sizes", env)
fixef_table_vector = get("fixef_table_vector", env)
fixef_id_list = get("fixef_id_list", env)
slope_flag = get("slope_flag", env)
slope_vars = get("slope_variables", env)
if(mem.clean){
# we can't really rm many variables... but gc can be enough
# cpp_demean is the most mem intensive bit
gc()
}
vars_demean = cpp_demean(y, X, weights, iterMax = fixef.iter,
diffMax = fixef.tol, r_nb_id_Q = fixef_sizes,
fe_id_list = fixef_id_list, table_id_I = fixef_table_vector,
slope_flag_Q = slope_flag, slope_vars_list = slope_vars,
r_init = init, nthreads = nthreads)
y_demean = vars_demean$y_demean
if(onlyFixef){
X_demean = matrix(1, nrow = length(y_demean))
} else {
X_demean = vars_demean$X_demean
}
res$iterations = vars_demean$iterations
if(fromGLM){
res$means = vars_demean$means
}
if(mem.clean){
rm(vars_demean)
}
if(any(abs(slope_flag) > 0) && any(res$iterations > 300)){
# Maybe we have a convergence problem
# This is poorly coded, but it's a temporary fix
opt_fe <- check_conv(y_demean, X_demean, fixef_id_list, slope_flag, slope_vars, weights)
# This is a bit too rough a check but it should catch the most problematic cases
if(any(opt_fe > 1e-4)){
msg = "There seems to be a convergence problem due to the presence of variables with varying slopes. The precision of the estimates may not be great."
if(any(slope_flag < 0)){
sugg = "This convergence problem mostly arises when there are varying slopes without their associated fixed-effect, as is the case in your estimation. Why not try to include the fixed-effect (i.e. use '[' instead of '[[')?"
} else {
sugg = "As a workaround, and if there are not too many slopes, you can use the variables with varying slopes as regular variables using the function interact (see ?interact)."
}
msg = paste(msg, sugg)
res$convStatus = FALSE
res$message = paste0("tol: ", signif_plus(fixef.tol), ", iter: ", max(res$iterations))
if(fromGLM){
res$warn_varying_slope = msg
} else {
warning(msg)
}
}
} else if(any(res$iterations >= fixef.iter)){
msg = paste0("Demeaning algorithm: Absence of convergence after reaching the maximum number of iterations (", fixef.iter, ").")
res$convStatus = FALSE
res$message = paste0("Maximum of ", fixef.iter, " iterations reached.")
if(fromGLM){
res$warn_varying_slope = msg
} else {
warning(msg)
}
}
if(verbose >= 1){
if(length(fixef_sizes) > 1){
gt("Demeaning", FALSE)
cat(" (iter: ", paste0(c(tail(res$iterations, 1), res$iterations[-length(res$iterations)]), collapse = ", "), ")\n", sep="")
} else {
gt("Demeaning")
}
}
}
#
# Estimation
#
if(mem.clean){
gc()
}
if(!onlyFixef){
est = ols_fit(y_demean, X_demean, weights, correct_0w, collin.tol, nthreads, xwx, xwy)
if(mem.clean){
gc()
}
# Corner case: not any relevant variable
if(!is.null(est$all_removed)){
all_vars = colnames(X)
IN_MULTI = get("IN_MULTI", env)
if(isFixef){
msg = paste0(ifsingle(all_vars, "The only variable ", "All variables"), enumerate_items(all_vars, "quote.is", nmax = 3), " collinear with the fixed effects. In such circumstances, the estimation is void.")
} else {
msg = paste0(ifsingle(all_vars, "The only variable ", "All variables"), enumerate_items(all_vars, "quote.is", nmax = 3), " virtually constant and equal to 0. In such circumstances, the estimation is void.")
}
if(IN_MULTI || !warn){
if(warn) warning(msg)
return(fixest_NA_results(env))
} else {
stop_up(msg, up = fromGLM)
}
}
# Formatting the result
coef = est$coefficients
names(coef) = colnames(X)[!est$is_excluded]
res$coefficients = coef
# Additional stuff
res$residuals = est$residuals
res$multicol = est$multicol
res$collin.min_norm = est$collin.min_norm
if(fromGLM) res$is_excluded = est$is_excluded
if(demeaned){
res$y_demeaned = y_demean
res$X_demeaned = X_demean
colnames(res$X_demeaned) = colnames(X)
}
} else {
res$residuals = y_demean
res$coefficients = coef = NULL
res$onlyFixef = TRUE
res$multicol = FALSE
if(demeaned){
res$y_demeaned = y_demean
}
}
time_post = proc.time()
if(verbose >= 1){
gt("Estimation")
}
if(mem.clean){
gc()
}
if(fromGLM){
res$fitted.values = y - res$residuals
if(!onlyFixef){
res$X_demean = X_demean
}
return(res)
}
#
# Post processing
#
# Collinearity message
collin.adj = 0
if(res$multicol){
var_collinear = colnames(X)[est$is_excluded]
if(notes){
message(ifsingle(var_collinear, "The variable ", "Variables "), enumerate_items(var_collinear, "quote.has", nmax = 3), " been removed because of collinearity (see $collin.var).")
}
res$collin.var = var_collinear
# full set of coeffficients with NAs
collin.coef = setNames(rep(NA, ncol(X)), colnames(X))
collin.coef[!est$is_excluded] = res$coefficients
res$collin.coef = collin.coef
if(isFixef){
X = X[, !est$is_excluded, drop = FALSE]
}
X_demean = X_demean[, !est$is_excluded, drop = FALSE]
collin.adj = sum(est$is_excluded)
}
n = length(y)
res$nparams = res$nparams - collin.adj
df_k = res$nparams
res$nobs = n
if(isWeight) res$weights = weights
#
# IV correction
#
if(!is.null(dots$resid_1st_stage)){
# We correct the residual
is_int = "(Intercept)" %in% names(res$coefficients)
resid_new = cpp_iv_resid(res$residuals, res$coefficients, dots$resid_1st_stage, is_int, nthreads)
res$iv_residuals = res$residuals
res$residuals = resid_new
}
#
# Hessian, score, etc
#
if(onlyFixef){
res$fitted.values = res$sumFE = y - res$residuals
} else {
if(mem.clean){
gc()
}
# X_beta / fitted / sumFE
if(isFixef){
x_beta = cpppar_xbeta(X, coef, nthreads)
res$sumFE = y - x_beta - res$residuals
res$fitted.values = x_beta + res$sumFE
if(isTRUE(dots$add_fitted_demean)){
res$fitted.values_demean = est$fitted.values
}
} else {
res$fitted.values = est$fitted.values
}
if(isOffset){
res$fitted.values = res$fitted.values + offset
}
#
# score + hessian + vcov
if(isWeight){
res$scores = (res$residuals * weights) * X_demean
} else {
res$scores = res$residuals * X_demean
}
res$hessian = est$xwx
if(mem.clean){
gc()
}
res$sigma2 = cpp_ssq(res$residuals, weights) / (length(y) - df_k)
res$cov.unscaled = est$xwx_inv * res$sigma2
rownames(res$cov.unscaled) = colnames(res$cov.unscaled) = names(coef)
# se
se = diag(res$cov.unscaled)
se[se < 0] = NA
se = sqrt(se)
# coeftable
zvalue <- coef/se
pvalue <- 2*pt(-abs(zvalue), max(n - df_k, 1))
coeftable <- data.frame("Estimate"=coef, "Std. Error"=se, "t value"=zvalue, "Pr(>|t|)"=pvalue)
names(coeftable) <- c("Estimate", "Std. Error", "t value", "Pr(>|t|)")
row.names(coeftable) <- names(coef)
attr(se, "type") = attr(coeftable, "type") = "Standard"
res$coeftable = coeftable
res$se = se
}
# fit stats
if(!cpp_isConstant(res$fitted.values)){
res$sq.cor = stats::cor(y, res$fitted.values)**2
} else {
res$sq.cor = NA
}
if(mem.clean){
gc()
}
res$ssr_null = cpp_ssr_null(y, weights)
res$ssr = cpp_ssq(res$residuals, weights)
sigma_null = sqrt(res$ssr_null / ifelse(isWeight, sum(weights), n))
res$ll_null = -1/2/sigma_null^2*res$ssr_null - (log(sigma_null) + log(2*pi)/2) * ifelse(isWeight, sum(weights), n)
# fixef info
if(isFixef){
# For the within R2
if(!onlyFixef){
res$ssr_fe_only = cpp_ssq(y_demean, weights)
sigma = sqrt(res$ssr_fe_only / ifelse(isWeight, sum(weights), n))
res$ll_fe_only = -1/2/sigma^2*res$ssr_fe_only - (log(sigma) + log(2*pi)/2) * ifelse(isWeight, sum(weights), n)
}
}
if(verbose >= 3) gt("Post-processing")
class(res) = "fixest"
do_summary = get("do_summary", env)
if(do_summary){
se = get("se", env)
cluster = get("cluster", env)
lean = get("lean", env)
dof = get("dof", env)
summary_flags = get("summary_flags", env)
# If lean = TRUE, 1st stage residuals are still needed for the 2nd stage
if(isTRUE(dots$iv_call) && lean){
r = res$residuals
fv = res$fitted.values
fvd = res$fitted.values_demean
}
res = summary(res, se = se, cluster = cluster, dof = dof, lean = lean, summary_flags = summary_flags)
if(isTRUE(dots$iv_call) && lean){
res$residuals = r
res$fitted.values = fv
res$fitted.values_demean = fvd
}
}
res
}
ols_fit = function(y, X, w, correct_0w = FALSE, collin.tol, nthreads, xwx = NULL, xwy = NULL){
# No control here -- done before
if(is.null(xwx)){
info_products = cpp_sparse_products(X, w, y, correct_0w, nthreads)
xwx = info_products$XtX
xwy = info_products$Xty
}
multicol = FALSE
info_inv = cpp_cholesky(xwx, collin.tol, nthreads)
if(!is.null(info_inv$all_removed)){
# Means all variables are collinear! => can happen when using FEs
return(list(all_removed = TRUE))
}
xwx_inv = info_inv$XtX_inv
is_excluded = info_inv$id_excl
multicol = any(is_excluded)
if(multicol){
beta = as.vector(xwx_inv %*% xwy[!is_excluded])
fitted.values = cpppar_xbeta(X[, !is_excluded, drop = FALSE], beta, nthreads)
} else {
# avoids copies
beta = as.vector(xwx_inv %*% xwy)
fitted.values = cpppar_xbeta(X, beta, nthreads)
}
residuals = y - fitted.values
res = list(xwx = xwx, coefficients = beta, fitted.values = fitted.values, xwx_inv = xwx_inv, multicol = multicol, residuals = residuals, is_excluded = is_excluded, collin.min_norm = info_inv$min_norm)
res
}
check_conv = function(y, X, fixef_id_list, slope_flag, slope_vars, weights){
# VERY SLOW!!!!
# IF THIS FUNCTION LASTS => TO BE PORTED TO C++
# y, X => variables that were demeaned
# For each variable: we compute the optimal FE coefficient
# it should be 0 if the algorithm converged
Q = length(slope_flag)
nobs = length(y)
if(length(X) == 1){
K = 1
} else {
K = NCOL(X) + 1
}
res = list()
for(k in 1:K){
if(k == 1){
x = y
} else {
x = X[, k - 1]
}
res_tmp = c()
index_slope = 1
for(q in 1:Q){
fixef_id = fixef_id_list[[q]]
if(slope_flag[q] >= 0){
res_tmp = c(res_tmp, max(abs(tapply(weights * x, fixef_id, mean))))
}
n_slopes = abs(slope_flag[q])
if(n_slopes > 0){
for(i in 1:n_slopes){
var = slope_vars[[index_slope]]
num = tapply(weights * x * var, fixef_id, sum)
denom = tapply(weights * var^2, fixef_id, sum)
res_tmp = c(res_tmp, max(abs(num/denom)))
index_slope = index_slope + 1
}
}
}
res[[k]] = res_tmp
}
res = do.call("rbind", res)
res
}
#' @rdname feols
feols.fit = function(y, X, fixef_df, offset, split, fsplit, cluster, se, dof, weights, subset,
fixef.rm = "perfect", fixef.tol = 1e-6, fixef.iter = 10000,
collin.tol = 1e-10, nthreads = getFixest_nthreads(), lean = FALSE, warn = TRUE,
notes = getFixest_notes(), mem.clean = FALSE, verbose = 0, only.env = FALSE, env, ...){
if(missing(weights)) weights = NULL
time_start = proc.time()
if(missing(env)){
set_defaults("fixest_estimation")
call_env = new.env(parent = parent.frame())
env = try(fixest_env(y = y, X = X, fixef_df = fixef_df, offset = offset, weights = weights, subset = subset, split = split, fsplit = fsplit, cluster = cluster, se = se, dof = dof, fixef.rm = fixef.rm, fixef.tol=fixef.tol, fixef.iter=fixef.iter, collin.tol = collin.tol, nthreads = nthreads, lean = lean, warn=warn, notes=notes, verbose = verbose, mem.clean = mem.clean, origin = "feols.fit", mc_origin = match.call(), call_env = call_env, ...), silent = TRUE)
} else if((r <- !is.environment(env)) || !isTRUE(env$fixest_env)){
stop("Argument 'env' must be an environment created by a fixest estimation. Currently it is not ", ifelse(r, "an", "a 'fixest'"), " environment.")
}
if("try-error" %in% class(env)){
mc = match.call()
origin = ifelse(is.null(mc$origin), "feols.fit", mc$origin)
stop(format_error_msg(env, origin))
}
check_arg(only.env, "logical scalar")
if(only.env){
return(env)
}
verbose = get("verbose", env)
if(verbose >= 2) cat("Setup in ", (proc.time() - time_start)[3], "s\n", sep="")
# workhorse is feols (OK if error msg leads to feols [clear enough])
res = feols(env = env)
res
}
#' Fixed-effects GLM estimations
#'
#' Estimates GLM models with any number of fixed-effects.
#'
#' @inheritParams feols
#' @inheritParams femlm
#' @inheritSection feols Combining the fixed-effects
#' @inheritSection feols Varying slopes
#' @inheritSection feols Lagging variables
#' @inheritSection feols Interactions
#' @inheritSection feols On standard-errors
#' @inheritSection feols Multiple estimations
#'
#' @param family Family to be used for the estimation. Defaults to \code{poisson()}. See \code{\link[stats]{family}} for details of family functions.
#' @param start Starting values for the coefficients. Can be: i) a numeric of length 1 (e.g. \code{start = 0}), ii) a numeric vector of the exact same length as the number of variables, or iii) a named vector of any length (the names will be used to initialize the appropriate coefficients). Default is missing.
#' @param etastart Numeric vector of the same length as the data. Starting values for the linear predictor. Default is missing.
#' @param mustart Numeric vector of the same length as the data. Starting values for the vector of means. Default is missing.
#' @param fixef.tol Precision used to obtain the fixed-effects. Defaults to \code{1e-6}. It corresponds to the maximum absolute difference allowed between two coefficients of successive iterations.
#' @param glm.iter Number of iterations of the glm algorithm. Default is 25.
#' @param glm.tol Tolerance level for the glm algorithm. Default is \code{1e-8}.
#' @param verbose Integer. Higher values give more information. In particular, it can detail the number of iterations in the demeaning algoritmh (the first number is the left-hand-side, the other numbers are the right-hand-side variables). It can also detail the step-halving algorithm.
#' @param notes Logical. By default, three notes are displayed: when NAs are removed, when some fixed-effects are removed because of only 0 (or 0/1) outcomes, or when a variable is dropped because of collinearity. To avoid displaying these messages, you can set \code{notes = FALSE}. You can remove these messages permanently by using \code{setFixest_notes(FALSE)}.
#'
#' @details
#' The core of the GLM are the weighted OLS estimations. These estimations are performed with \code{\link[fixest]{feols}}. The method used to demean each variable along the fixed-effects is based on Berge (2018), since this is the same problem to solve as for the Gaussian case in a ML setup.
#'
#' @return
#' A \code{fixest} object. Note that \code{fixest} objects contain many elements and most of them are for internal use, they are presented here only for information. To access them, it is safer to use the user-level methods (e.g. \code{\link[fixest]{vcov.fixest}}, \code{\link[fixest]{resid.fixest}}, etc) or functions (like for instance \code{\link[fixest]{fitstat}} to access any fit statistic).
#' \item{nobs}{The number of observations.}
#' \item{fml}{The linear formula of the call.}
#' \item{call}{The call of the function.}
#' \item{method}{The method used to estimate the model.}
#' \item{family}{The family used to estimate the model.}
#' \item{fml_all}{A list containing different parts of the formula. Always contain the linear formula. Then, if relevant: \code{fixef}: the fixed-effects.}
#' \item{nparams}{The number of parameters of the model.}
#' \item{fixef_vars}{The names of each fixed-effect dimension.}
#' \item{fixef_id}{The list (of length the number of fixed-effects) of the fixed-effects identifiers for each observation.}
#' \item{fixef_sizes}{The size of each fixed-effect (i.e. the number of unique identifierfor each fixed-effect dimension).}
#' \item{y}{(When relevant.) The dependent variable (used to compute the within-R2 when fixed-effects are present).}
#' \item{convStatus}{Logical, convergence status of the IRWLS algorithm.}
#' \item{irls_weights}{The weights of the last iteration of the IRWLS algorithm.}
#' \item{obs_selection}{(When relevant.) List containing vectors of integers. It represents the sequential selection of observation vis a vis the original data set.}
#' \item{fixef_removed}{(When relevant.) In the case there were fixed-effects and some observations were removed because of only 0/1 outcome within a fixed-effect, it gives the list (for each fixed-effect dimension) of the fixed-effect identifiers that were removed.}
#' \item{coefficients}{The named vector of estimated coefficients.}
#' \item{coeftable}{The table of the coefficients with their standard errors, z-values and p-values.}
#' \item{loglik}{The loglikelihood.}
#' \item{deviance}{Deviance of the fitted model.}
#' \item{iterations}{Number of iterations of the algorithm.}
#' \item{ll_null}{Log-likelihood of the null model (i.e. with the intercept only).}
#' \item{ssr_null}{Sum of the squared residuals of the null model (containing only with the intercept).}
#' \item{pseudo_r2}{The adjusted pseudo R2.}
#' \item{fitted.values}{The fitted values are the expected value of the dependent variable for the fitted model: that is \eqn{E(Y|X)}.}
#' \item{linear.predictors}{The linear predictors.}
#' \item{residuals}{The residuals (y minus the fitted values).}
#' \item{sq.cor}{Squared correlation between the dependent variable and the expected predictor (i.e. fitted.values) obtained by the estimation.}
#' \item{hessian}{The Hessian of the parameters.}
#' \item{cov.unscaled}{The variance-covariance matrix of the parameters.}
#' \item{se}{The standard-error of the parameters.}
#' \item{scores}{The matrix of the scores (first derivative for each observation).}
#' \item{residuals}{The difference between the dependent variable and the expected predictor.}
#' \item{sumFE}{The sum of the fixed-effects coefficients for each observation.}
#' \item{offset}{(When relevant.) The offset formula.}
#' \item{weights}{(When relevant.) The weights formula.}
#' \item{collin.var}{(When relevant.) Vector containing the variables removed because of collinearity.}
#' \item{collin.coef}{(When relevant.) Vector of coefficients, where the values of the variables removed because of collinearity are NA.}
#'
#'
#'
#'
#' @seealso
#' See also \code{\link[fixest]{summary.fixest}} to see the results with the appropriate standard-errors, \code{\link[fixest]{fixef.fixest}} to extract the fixed-effects coefficients, and the function \code{\link[fixest]{etable}} to visualize the results of multiple estimations.
#' And other estimation methods: \code{\link[fixest]{feols}}, \code{\link[fixest]{femlm}}, \code{\link[fixest:femlm]{fenegbin}}, \code{\link[fixest]{feNmlm}}.
#'
#' @author
#' Laurent Berge
#'
#' @references
#'
#' Berge, Laurent, 2018, "Efficient estimation of maximum likelihood models with multiple fixed-effects: the R package FENmlm." CREA Discussion Papers, 13 (\url{https://wwwen.uni.lu/content/download/110162/1299525/file/2018_13}).
#'
#' For models with multiple fixed-effects:
#'
#' Gaure, Simen, 2013, "OLS with multiple high dimensional category variables", Computational Statistics & Data Analysis 66 pp. 8--18
#'
#'
#' @examples
#'
#' # Default is a poisson model
#' res = feglm(Sepal.Length ~ Sepal.Width + Petal.Length | Species, iris)
#'
#' # You could also use fepois
#' res_pois = fepois(Sepal.Length ~ Sepal.Width + Petal.Length | Species, iris)
#'
#' # With the fit method:
#' res_fit = feglm.fit(iris$Sepal.Length, iris[, 2:3], iris$Species)
#'
#' # All results are identical:
#' etable(res, res_pois, res_fit)
#'
#' # Note that you have more examples in feols
#'
#' #
#' # Multiple estimations:
#' #
#'
#' # 6 estimations
#' est_mult = fepois(c(Ozone, Solar.R) ~ Wind + Temp + csw0(Wind:Temp, Day), airquality)
#'
#' # We can display the results for the first lhs:
#' etable(est_mult[lhs = 1])
#'
#' # And now the second (access can be made by name)
#' etable(est_mult[lhs = "Solar.R"])
#'
#' # Now we focus on the two last right hand sides
#' # (note that .N can be used to specify the last item)
#' etable(est_mult[rhs = 2:.N])
#'
#' # Combining with split
#' est_split = fepois(c(Ozone, Solar.R) ~ sw(poly(Wind, 2), poly(Temp, 2)),
#' airquality, split = ~ Month)
#'
#' # You can display everything at once with the print method
#' est_split
#'
#' # Different way of displaying the results with "compact"
#' summary(est_split, "compact")
#'
#' # You can still select which sample/LHS/RHS to display
#' est_split[sample = 1:2, lhs = 1, rhs = 1]
#'
#'
feglm = function(fml, data, family = "poisson", offset, weights, subset, split, fsplit, cluster, se, dof, panel.id, start = NULL,
etastart = NULL, mustart = NULL, fixef, fixef.rm = "perfect", fixef.tol = 1e-6, fixef.iter = 10000, collin.tol = 1e-10,
glm.iter = 25, glm.tol = 1e-8, nthreads = getFixest_nthreads(), lean = FALSE,
warn = TRUE, notes = getFixest_notes(), verbose = 0, combine.quick, mem.clean = FALSE, only.env = FALSE, env, ...){
if(missing(weights)) weights = NULL
time_start = proc.time()
if(missing(env)){
set_defaults("fixest_estimation")
call_env = new.env(parent = parent.frame())
env = try(fixest_env(fml=fml, data=data, family = family, offset = offset, weights = weights, subset = subset, split = split, fsplit = fsplit, cluster = cluster, se = se, dof = dof, panel.id = panel.id, linear.start = start, etastart=etastart, mustart=mustart, fixef = fixef, fixef.rm = fixef.rm, fixef.tol=fixef.tol, fixef.iter=fixef.iter, collin.tol = collin.tol, glm.iter = glm.iter, glm.tol = glm.tol, nthreads = nthreads, lean = lean, warn=warn, notes=notes, verbose = verbose, combine.quick = combine.quick, mem.clean = mem.clean, origin = "feglm", mc_origin = match.call(), call_env = call_env, ...), silent = TRUE)
} else if((r <- !is.environment(env)) || !isTRUE(env$fixest_env)){
stop("Argument 'env' must be an environment created by a fixest estimation. Currently it is not ", ifelse(r, "an", "a 'fixest'"), " environment.")
}
if("try-error" %in% class(env)){
mc = match.call()
origin = ifelse(is.null(mc$origin), "feglm", mc$origin)
stop(format_error_msg(env, origin))
}
check_arg(only.env, "logical scalar")
if(only.env){
return(env)
}
verbose = get("verbose", env)
if(verbose >= 2) cat("Setup in ", (proc.time() - time_start)[3], "s\n", sep="")
# workhorse is feglm.fit (OK if error msg leads to feglm.fit [clear enough])
res = feglm.fit(env = env)
res
}
#' @rdname feglm
feglm.fit = function(y, X, fixef_df, family = "poisson", offset, split, fsplit, cluster, se, dof, weights, subset, start = NULL,
etastart = NULL, mustart = NULL, fixef.rm = "perfect", fixef.tol = 1e-6, fixef.iter = 10000,
collin.tol = 1e-10, glm.iter = 25, glm.tol = 1e-8, nthreads = getFixest_nthreads(), lean = FALSE, warn = TRUE,
notes = getFixest_notes(), mem.clean = FALSE, verbose = 0, only.env = FALSE, env, ...){
dots = list(...)
lean_internal = isTRUE(dots$lean_internal)
means = 1
if(!missing(env)){
# This is an internal call from the function feglm
# no need to further check the arguments
# we extract them from the env
if((r <- !is.environment(env)) || !isTRUE(env$fixest_env)) {
stop("Argument 'env' must be an environment created by a fixest estimation. Currently it is not ", ifelse(r, "an", "a 'fixest'"), " environment.")
}
# main variables
if(missing(y)) y = get("lhs", env)
if(missing(X)) X = get("linear.mat", env)
if(!missing(fixef_df) && is.null(fixef_df)){
assign("isFixef", FALSE, env)
}
if(missing(offset)) offset = get("offset.value", env)
if(missing(weights)) weights = get("weights.value", env)
# other params
if(missing(fixef.tol)) fixef.tol = get("fixef.tol", env)
if(missing(fixef.iter)) fixef.iter = get("fixef.iter", env)
if(missing(collin.tol)) collin.tol = get("collin.tol", env)
if(missing(glm.iter)) glm.iter = get("glm.iter", env)
if(missing(glm.tol)) glm.tol = get("glm.tol", env)
if(missing(warn)) warn = get("warn", env)
if(missing(verbose)) verbose = get("verbose", env)
# starting point of the fixed-effects
if(!is.null(dots$means)) means = dots$means
# init
init.type = get("init.type", env)
starting_values = get("starting_values", env)
if(lean_internal){
# Call within here => either null model or fe only
init.type = "default"
if(!is.null(etastart)){
init.type = "eta"
starting_values = etastart
}
}
} else {
if(missing(weights)) weights = NULL
time_start = proc.time()
set_defaults("fixest_estimation")
call_env = new.env(parent = parent.frame())
env = try(fixest_env(y = y, X = X, fixef_df = fixef_df, family = family, nthreads = nthreads, lean = lean, offset = offset, weights = weights, subset = subset, split = split, fsplit = fsplit, cluster = cluster, se = se, dof = dof, linear.start = start, etastart=etastart, mustart=mustart, fixef.rm = fixef.rm, fixef.tol = fixef.tol, fixef.iter = fixef.iter, collin.tol = collin.tol, glm.iter = glm.iter, glm.tol = glm.tol, notes=notes, mem.clean = mem.clean, warn=warn, verbose = verbose, origin = "feglm.fit", mc_origin = match.call(), call_env = call_env, ...), silent = TRUE)
if("try-error" %in% class(env)){
stop(format_error_msg(env, "feglm.fit"))
}
check_arg(only.env, "logical scalar")
if(only.env){
return(env)
}
verbose = get("verbose", env)
if(verbose >= 2) cat("Setup in ", (proc.time() - time_start)[3], "s\n", sep="")
# y/X
y = get("lhs", env)
X = get("linear.mat", env)
# offset
offset = get("offset.value", env)
# weights
weights = get("weights.value", env)
# init
init.type = get("init.type", env)
starting_values = get("starting_values", env)
}
#
# Split ####
#
do_split = get("do_split", env)
if(do_split){
res = multi_split(env, feglm.fit)
return(res)
}
#
# Multi fixef ####
#
do_multi_fixef = get("do_multi_fixef", env)
if(do_multi_fixef){
res = multi_fixef(env, feglm.fit)
return(res)
}
#
# Multi LHS and RHS ####
#
do_multi_lhs = get("do_multi_lhs", env)
do_multi_rhs = get("do_multi_rhs", env)
if(do_multi_lhs || do_multi_rhs){
res = multi_LHS_RHS(env, feglm.fit)
return(res)
}
#
# Regular estimation ####
#
# Setup:
family = get("family_funs", env)
isFixef = get("isFixef", env)
nthreads = get("nthreads", env)
isWeight = length(weights) > 1
isOffset = length(offset) > 1
nobs <- length(y)
onlyFixef = length(X) == 1
# the preformatted results
res = get("res", env)
# glm functions:
variance = family$variance
linkfun = family$linkfun
linkinv = family$linkinv
sum_dev.resids = family$sum_dev.resids
valideta = family$valideta
validmu = family$validmu
mu.eta = family$mu.eta
family_equiv = family$family_equiv
#
# Init
#
if(mem.clean){
gc()
}
if(init.type == "mu"){
mu = starting_values
if(!valideta(mu)){
stop("In 'mustart' the values provided are not valid.")
}
eta = linkfun(mu)
} else if(init.type == "eta"){
eta = starting_values
if(!valideta(eta)){
stop("In 'etastart' the values provided are not valid.")
}
mu = linkinv(eta)
} else if(init.type == "coef"){
# If there are fixed-effects we MUST first compute the FE model with starting values as offset
# otherwise we are too far away from the solution and starting values may lead to divergence
# (hence step halving would be required)
# This means that initializing with coefficients incurs large computational costs
# with fixed-effects
start = get("start", env)
offset_fe = offset + cpppar_xbeta(X, start, nthreads)
if(isFixef){
mustart = 0
eval(family$initialize)
eta = linkfun(mustart)
# just a rough estimate (=> high tol values) [no benefit in high precision]
model_fe = try(feglm.fit(X = 0, etastart = eta, offset = offset_fe, glm.tol = 1e-2, fixef.tol = 1e-2, env = env, lean_internal = TRUE))
if("try-error" %in% class(model_fe)){
stop("Estimation failed during initialization when getting the fixed-effects, maybe change the values of 'start'? \n", model_fe)
}
eta = model_fe$linear.predictors
mu = model_fe$fitted.values
devold = model_fe$deviance
} else {
eta = offset_fe
mu = linkinv(eta)
devold = sum_dev.resids(y, mu, eta, wt = weights)
}
wols_old = list(fitted.values = eta - offset)
} else {
mustart = 0
eval(family$initialize)
eta = linkfun(mustart)
mu = linkinv(eta)
# NOTA: FE only => ADDS LOTS OF COMPUTATIONAL COSTS without convergence benefit
}
if(mem.clean){
gc()
}
if(init.type != "coef"){
# starting deviance with constant equal to 1e-5
# this is important for getting in step halving early (when deviance goes awry right from the start)
devold = sum_dev.resids(y, rep(linkinv(1e-5), nobs), rep(1e-5, nobs), wt = weights)
wols_old = list(fitted.values = rep(1e-5, nobs))
}
if(!validmu(mu) || !valideta(eta)){
stop("Current starting values are not valid.")
}
assign("nb_sh", 0, env)
on.exit(warn_step_halving(env))
if((init.type == "coef" && verbose >= 1) || verbose >= 4) {
cat("Deviance at initializat. = ", numberFormatNormal(devold), "\n", sep = "")
}
#
# The main loop
#
wols_means = 1
conv = FALSE
warning_msg = div_message = ""
for (iter in 1:glm.iter) {
if(mem.clean){
gc()
}
mu.eta.val = mu.eta(mu, eta)
var_mu = variance(mu)
# controls
any_pblm_mu = cpp_any_na_null(var_mu)
if(any_pblm_mu){
if (anyNA(var_mu)){
stop("NAs in V(mu), at iteration ", iter, ".")
} else if (any(var_mu == 0)){
stop("0s in V(mu), at iteration ", iter, ".")
}
}
if(anyNA(mu.eta.val)){
stop("NAs in d(mu)/d(eta), at iteration ", iter, ".")
}
if(isOffset){
z = (eta - offset) + (y - mu)/mu.eta.val
} else {
z = eta + (y - mu)/mu.eta.val
}
w = as.vector(weights * mu.eta.val**2 / var_mu)
is_0w = w == 0
any_0w = any(is_0w)
if(any_0w && all(is_0w)){
warning_msg = paste0("No informative observation at iteration ", iter, ".")
div_message = "No informative observation."
break
}
if(mem.clean && iter > 1){
rm(wols)
gc()
}
wols = feols(y = z, X = X, weights = w, means = wols_means, correct_0w = any_0w, env = env, fixef.tol = fixef.tol * 10**(iter==1), fixef.iter = fixef.iter, collin.tol = collin.tol, nthreads = nthreads, mem.clean = mem.clean, verbose = verbose - 1)
if(isTRUE(wols$NA_model)){
return(wols)
}
# In theory OLS estimation is guaranteed to exist
# yet, NA coef may happen with non-infinite very large values of z/w (e.g. values > 1e100)
if(anyNA(wols$coefficients)){
if(iter == 1){
stop("Weighted-OLS returns NA coefficients at first iteration, step halving cannot be performed. Try other starting values?")
}
warning_msg = paste0("Divergence at iteration ", iter, ": ", msg, ". Weighted-OLS returns NA coefficients. Last evaluated coefficients with finite deviance are returned for information purposes.")
div_message = "Weighted-OLS returned NA coefficients."
wols = wols_old
break
} else {
wols_means = wols$means
}
eta = wols$fitted.values
if(isOffset){
eta = eta + offset
}
if(mem.clean){
gc()
}
mu = linkinv(eta)
dev = sum_dev.resids(y, mu, eta, wt = weights)
dev_evol = dev - devold
if(verbose >= 1) cat("Iteration: ", sprintf("%02i", iter), " -- Deviance = ", numberFormatNormal(dev), " -- Evol. = ", dev_evol, "\n", sep = "")
#
# STEP HALVING
#
if(!is.finite(dev) || dev_evol > 0 || !valideta(eta) || !validmu(mu)){
if(!is.finite(dev)){
# we report step-halving but only for non-finite deviances
# other situations are OK (it just happens)
nb_sh = get("nb_sh", env)
assign("nb_sh", nb_sh + 1, env)
}
eta_new = wols$fitted.values
eta_old = wols_old$fitted.values
iter_sh = 0
do_exit = FALSE
while(!is.finite(dev) || dev_evol > 0 || !valideta(eta_new) || !validmu(mu)){
if(iter == 1 && (is.finite(dev) && valideta(eta_new) && validmu(mu)) && iter_sh >= 2){
# BEWARE FIRST ITERATION:
# at first iteration, the deviance can be higher than the init, and SH may not help
# we need to make sure we get out of SH before it's messed up
break
} else if(iter_sh == glm.iter){
# if first iteration => means algo did not find viable solution
if(iter == 1){
stop("Algorithm failed at first iteration. Step-halving could not find a valid set of parameters.")
}
# Problem only if the deviance is non-finite or eta/mu not valid
# Otherwise, it means that we're at a maximum
if(!is.finite(dev) || !valideta(eta_new) || !validmu(mu)){
# message
msg = ifelse(!is.finite(dev), "non-finite deviance", "no valid eta/mu")
warning_msg = paste0("Divergence at iteration ", iter, ": ", msg, ". Step halving: no valid correction found. Last evaluated coefficients with finite deviance are returned for information purposes.")
div_message = paste0(msg, " despite step-halving")
wols = wols_old
do_exit = TRUE
}
break
}
iter_sh = iter_sh + 1
eta_new = (eta_old + eta_new) / 2
if(mem.clean){
gc()
}
mu = linkinv(eta_new + offset)
dev = sum_dev.resids(y, mu, eta_new + offset, wt = weights)
dev_evol = dev - devold
if(verbose >= 3) cat("Step-halving: iter =", iter_sh, "-- dev:", numberFormatNormal(dev), "-- evol:", numberFormatNormal(dev_evol), "\n")
}
if(do_exit) break
# it worked: update
eta = eta_new + offset
wols$fitted.values = eta_new
# NOTA: we must NOT end with a step halving => we need a proper weighted-ols estimation
# we force the algorithm to continue
dev_evol = Inf
if(verbose >= 2){
cat("Step-halving: new deviance = ", numberFormatNormal(dev), "\n", sep = "")
}
}
if(abs(dev_evol)/(0.1 + abs(dev)) < glm.tol){
conv = TRUE
break
} else {
devold = dev
wols_old = wols
}
}
# Convergence flag
if(!conv){
if(iter == glm.iter){
warning_msg = paste0("Absence of convergence: Maximum number of iterations reached (", glm.iter, "). Final deviance: ", numberFormatNormal(dev), ".")
div_message = "no convergence: Maximum number of iterations reached"
}
res$convStatus = FALSE
res$message = div_message
} else {
res$convStatus = TRUE
}
#
# post processing
#
# Collinearity message
collin.adj = 0
if(wols$multicol){
var_collinear = colnames(X)[wols$is_excluded]
if(notes) message(ifsingle(var_collinear, "The variable ", "Variables "), enumerate_items(var_collinear, "quote.has"), " been removed because of collinearity (see $collin.var).")
res$collin.var = var_collinear
# full set of coeffficients with NAs
collin.coef = setNames(rep(NA, ncol(X)), colnames(X))
collin.coef[!wols$is_excluded] = wols$coefficients
res$collin.coef = collin.coef
wols$X_demean = wols$X_demean[, !wols$is_excluded, drop = FALSE]
X = X[, !wols$is_excluded, drop = FALSE]
collin.adj = sum(wols$is_excluded)
}
res$irls_weights = w # weights from the iteratively reweighted least square
res$coefficients = coef = wols$coefficients
res$collin.min_norm = wols$collin.min_norm
if(!is.null(wols$warn_varying_slope)){
warning(wols$warn_varying_slope)
}
res$linear.predictors = wols$fitted.values
if(isOffset){
res$linear.predictors = res$linear.predictors + offset
}
res$fitted.values = linkinv(res$linear.predictors)
res$residuals = y - res$fitted.values
if(onlyFixef) res$onlyFixef = onlyFixef
# dispersion + scores
if(family$family %in% c("poisson", "binomial")){
res$dispersion = 1
} else {
weighted_resids = wols$residuals * res$irls_weights
# res$dispersion = sum(weighted_resids ** 2) / sum(res$irls_weights)
# I use the second line to fit GLM's
res$dispersion = sum(weighted_resids * wols$residuals) / (res$nobs - res$nparams)
}
res$working_residuals = wols$residuals
if(!onlyFixef && !lean_internal){
# score + hessian + vcov
if(mem.clean){
gc()
}
# dispersion + scores
if(family$family %in% c("poisson", "binomial")){
res$scores = (wols$residuals * res$irls_weights) * wols$X_demean
res$hessian = cpppar_crossprod(wols$X_demean, res$irls_weights, nthreads)
} else {
res$scores = (weighted_resids / res$dispersion) * wols$X_demean
res$hessian = cpppar_crossprod(wols$X_demean, res$irls_weights, nthreads) / res$dispersion
}
info_inv = cpp_cholesky(res$hessian, collin.tol, nthreads)
if(!is.null(info_inv$all_removed)){
# This should not occur, but I prefer to be safe
stop("Not any single variable with a positive variance was found after the weighted-OLS stage. (If possible, could you send a replicable example to fixest's author? He's curious about when that actually happens, since in theory it should never happen.)")
}
var = info_inv$XtX_inv
is_excluded = info_inv$id_excl
if(any(is_excluded)){
# There should be no remaining collinearity
warning_msg = paste(warning_msg, "Residual collinearity was found after the weighted-OLS stage. The covariance is not defined. (This should not happen. If possible, could you send a replicable example to fixest's author? He's curious about when that actually happen.)")
var = matrix(NA, length(is_excluded), length(is_excluded))
}
res$cov.unscaled = var
rownames(res$cov.unscaled) = colnames(res$cov.unscaled) = names(coef)
# se
se = diag(res$cov.unscaled)
se[se < 0] = NA
se = sqrt(se)
# coeftable
zvalue <- coef/se
use_t = !family$family %in% c("poisson", "binomial")
if(use_t){
pvalue <- 2*pt(-abs(zvalue), max(res$nobs - res$nparams, 1))
ctable_names = c("Estimate", "Std. Error", "t value", "Pr(>|t|)")
} else {
pvalue <- 2*pnorm(-abs(zvalue))
ctable_names = c("Estimate", "Std. Error", "z value", "Pr(>|z|)")
}
coeftable <- data.frame("Estimate"=coef, "Std. Error"=se, "z value"=zvalue, "Pr(>|z|)"=pvalue)
names(coeftable) <- ctable_names
row.names(coeftable) <- names(coef)
attr(se, "type") = attr(coeftable, "type") = "Standard"
res$coeftable = coeftable
res$se = se
}
if(nchar(warning_msg) > 0){
if(warn){
warning(warning_msg, call. = FALSE)
options("fixest_last_warning" = proc.time())
}
}
n = length(y)
res$nobs = n
res$nparams = res$nparams - collin.adj
df_k = res$nparams
# r2s
if(!cpp_isConstant(res$fitted.values)){
res$sq.cor = stats::cor(y, res$fitted.values)**2
} else {
res$sq.cor = NA
}
# deviance
res$deviance = dev
# simpler form for poisson
if(family_equiv == "poisson"){
if(isWeight){
if(mem.clean){
gc()
}
res$loglik = sum( (y * eta - mu - cpppar_lgamma(y + 1, nthreads)) * weights)
} else {
# lfact is later used in model0 and is costly to compute
lfact = sum(rpar_lgamma(y + 1, env))
assign("lfactorial", lfact, env)
res$loglik = sum(y * eta - mu) - lfact
}
} else {
res$loglik = family$aic(y = y, n = rep.int(1, n), mu = res$fitted.values, wt = weights, dev = dev) / -2
}
if(lean_internal){
return(res)
}
# The pseudo_r2
if(family_equiv %in% c("poisson", "logit")){
model0 = get_model_null(env, theta.init = NULL)
ll_null = model0$loglik
fitted_null = linkinv(model0$constant)
} else {
if(verbose >= 1) cat("Null model:\n")
if(mem.clean){
gc()
}
model_null = feglm.fit(X = matrix(1, nrow = n, ncol = 1), fixef_df = NULL, env = env, lean_internal = TRUE)
ll_null = model_null$loglik
fitted_null = model_null$fitted.values
}
res$ll_null = ll_null
res$pseudo_r2 = 1 - (res$loglik - df_k)/(ll_null - 1)
# fixef info
if(isFixef){
if(onlyFixef){
res$sumFE = res$linear.predictors
} else {
res$sumFE = res$linear.predictors - cpppar_xbeta(X, res$coefficients, nthreads)
}
if(isOffset){
res$sumFE = res$sumFE - offset
}
}
# other
res$iterations = iter
res$family = family
class(res) = "fixest"
do_summary = get("do_summary", env)
if(do_summary){
se = get("se", env)
cluster = get("cluster", env)
lean = get("lean", env)
dof = get("dof", env)
summary_flags = get("summary_flags", env)
# To compute the RMSE and lean = TRUE
if(lean) res$ssr = cpp_ssq(res$residuals, weights)
res = summary(res, se = se, cluster = cluster, dof = dof, lean = lean, summary_flags = summary_flags)
}
return(res)
}
#' Fixed-effects maximum likelihood model
#'
#' This function estimates maximum likelihood models with any number of fixed-effects.
#'
#' @inheritParams feNmlm
#' @inherit feNmlm return details
#' @inheritSection feols Combining the fixed-effects
#' @inheritSection feols Lagging variables
#' @inheritSection feols Interactions
#' @inheritSection feols On standard-errors
#' @inheritSection feols Multiple estimations
#'
#' @param fml A formula representing the relation to be estimated. For example: \code{fml = z~x+y}. To include fixed-effects, insert them in this formula using a pipe: e.g. \code{fml = z~x+y|fixef_1+fixef_2}. Multiple estimations can be performed at once: for multiple dep. vars, wrap them in \code{c()}: ex \code{c(y1, y2)}. For multiple indep. vars, use the stepwise functions: ex \code{x1 + csw(x2, x3)}. The formula \code{fml = c(y1, y2) ~ x1 + cw0(x2, x3)} leads to 6 estimation, see details.
#' @param start Starting values for the coefficients. Can be: i) a numeric of length 1 (e.g. \code{start = 0}, the default), ii) a numeric vector of the exact same length as the number of variables, or iii) a named vector of any length (the names will be used to initialize the appropriate coefficients).
#'
#' @details
#' Note that the functions \code{\link[fixest]{feglm}} and \code{\link[fixest]{femlm}} provide the same results when using the same families but differ in that the latter is a direct maximum likelihood optimization (so the two can really have different convergence rates).
#'
#' @return
#' A \code{fixest} object. Note that \code{fixest} objects contain many elements and most of them are for internal use, they are presented here only for information. To access them, it is safer to use the user-level methods (e.g. \code{\link[fixest]{vcov.fixest}}, \code{\link[fixest]{resid.fixest}}, etc) or functions (like for instance \code{\link[fixest]{fitstat}} to access any fit statistic).
#' \item{nobs}{The number of observations.}
#' \item{fml}{The linear formula of the call.}
#' \item{call}{The call of the function.}
#' \item{method}{The method used to estimate the model.}
#' \item{family}{The family used to estimate the model.}
#' \item{fml_all}{A list containing different parts of the formula. Always contain the linear formula. Then, if relevant: \code{fixef}: the fixed-effects; \code{NL}: the non linear part of the formula.}
#' \item{nparams}{The number of parameters of the model.}
#' \item{fixef_vars}{The names of each fixed-effect dimension.}
#' \item{fixef_id}{The list (of length the number of fixed-effects) of the fixed-effects identifiers for each observation.}
#' \item{fixef_sizes}{The size of each fixed-effect (i.e. the number of unique identifierfor each fixed-effect dimension).}
#' \item{convStatus}{Logical, convergence status.}
#' \item{message}{The convergence message from the optimization procedures.}
#' \item{obs_selection}{(When relevant.) List containing vectors of integers. It represents the sequential selection of observation vis a vis the original data set.}
#' \item{fixef_removed}{(When relevant.) In the case there were fixed-effects and some observations were removed because of only 0/1 outcome within a fixed-effect, it gives the list (for each fixed-effect dimension) of the fixed-effect identifiers that were removed.}
#' \item{coefficients}{The named vector of estimated coefficients.}
#' \item{coeftable}{The table of the coefficients with their standard errors, z-values and p-values.}
#' \item{loglik}{The log-likelihood.}
#' \item{iterations}{Number of iterations of the algorithm.}
#' \item{ll_null}{Log-likelihood of the null model (i.e. with the intercept only).}
#' \item{ll_fe_only}{Log-likelihood of the model with only the fixed-effects.}
#' \item{ssr_null}{Sum of the squared residuals of the null model (containing only with the intercept).}
#' \item{pseudo_r2}{The adjusted pseudo R2.}
#' \item{fitted.values}{The fitted values are the expected value of the dependent variable for the fitted model: that is \eqn{E(Y|X)}.}
#' \item{residuals}{The residuals (y minus the fitted values).}
#' \item{sq.cor}{Squared correlation between the dependent variable and the expected predictor (i.e. fitted.values) obtained by the estimation.}
#' \item{hessian}{The Hessian of the parameters.}
#' \item{cov.unscaled}{The variance-covariance matrix of the parameters.}
#' \item{se}{The standard-error of the parameters.}
#' \item{scores}{The matrix of the scores (first derivative for each observation).}
#' \item{residuals}{The difference between the dependent variable and the expected predictor.}
#' \item{sumFE}{The sum of the fixed-effects coefficients for each observation.}
#' \item{offset}{(When relevant.) The offset formula.}
#' \item{weights}{(When relevant.) The weights formula.}
#'
#'
#' @seealso
#' See also \code{\link[fixest]{summary.fixest}} to see the results with the appropriate standard-errors, \code{\link[fixest]{fixef.fixest}} to extract the fixed-effects coefficients, and the function \code{\link[fixest]{etable}} to visualize the results of multiple estimations.
#' And other estimation methods: \code{\link[fixest]{feols}}, \code{\link[fixest]{feglm}}, \code{\link[fixest:feglm]{fepois}}, \code{\link[fixest]{feNmlm}}.
#'
#' @author
#' Laurent Berge
#'
#' @references
#'
#' Berge, Laurent, 2018, "Efficient estimation of maximum likelihood models with multiple fixed-effects: the R package FENmlm." CREA Discussion Papers, 13 (\url{https://wwwen.uni.lu/content/download/110162/1299525/file/2018_13}).
#'
#' For models with multiple fixed-effects:
#'
#' Gaure, Simen, 2013, "OLS with multiple high dimensional category variables", Computational Statistics & Data Analysis 66 pp. 8--18
#'
#' On the unconditionnal Negative Binomial model:
#'
#' Allison, Paul D and Waterman, Richard P, 2002, "Fixed-Effects Negative Binomial Regression Models", Sociological Methodology 32(1) pp. 247--265
#'
#' @examples
#'
#' # Load trade data
#' data(trade)
#'
#' # We estimate the effect of distance on trade => we account for 3 fixed-effects
#' # 1) Poisson estimation
#' est_pois = femlm(Euros ~ log(dist_km) | Origin + Destination + Product, trade)
#'
#' # 2) Log-Log Gaussian estimation (with same FEs)
#' est_gaus = update(est_pois, log(Euros+1) ~ ., family = "gaussian")
#'
#' # Comparison of the results using the function etable
#' etable(est_pois, est_gaus)
#' # Now using two way clustered standard-errors
#' etable(est_pois, est_gaus, se = "twoway")
#'
#' # Comparing different types of standard errors
#' sum_hetero = summary(est_pois, se = "hetero")
#' sum_oneway = summary(est_pois, se = "cluster")
#' sum_twoway = summary(est_pois, se = "twoway")
#' sum_threeway = summary(est_pois, se = "threeway")
#'
#' etable(sum_hetero, sum_oneway, sum_twoway, sum_threeway)
#'
#'
#' #
#' # Multiple estimations:
#' #
#'
#' # 6 estimations
#' est_mult = femlm(c(Ozone, Solar.R) ~ Wind + Temp + csw0(Wind:Temp, Day), airquality)
#'
#' # We can display the results for the first lhs:
#' etable(est_mult[lhs = 1])
#'
#' # And now the second (access can be made by name)
#' etable(est_mult[lhs = "Solar.R"])
#'
#' # Now we focus on the two last right hand sides
#' # (note that .N can be used to specify the last item)
#' etable(est_mult[rhs = 2:.N])
#'
#' # Combining with split
#' est_split = fepois(c(Ozone, Solar.R) ~ sw(poly(Wind, 2), poly(Temp, 2)),
#' airquality, split = ~ Month)
#'
#' # You can display everything at once with the print method
#' est_split
#'
#' # Different way of displaying the results with "compact"
#' summary(est_split, "compact")
#'
#' # You can still select which sample/LHS/RHS to display
#' est_split[sample = 1:2, lhs = 1, rhs = 1]
#'
#'
#'
#'
femlm <- function(fml, data, family=c("poisson", "negbin", "logit", "gaussian"), start = 0, fixef, fixef.rm = "perfect",
offset, subset, split, fsplit, cluster, se, dof, panel.id, fixef.tol = 1e-5, fixef.iter = 10000,
nthreads = getFixest_nthreads(), lean = FALSE, verbose = 0, warn = TRUE,
notes = getFixest_notes(), theta.init, combine.quick, mem.clean = FALSE, only.env = FALSE, env, ...){
# This is just an alias
call_env_bis = new.env(parent = parent.frame())
res = try(feNmlm(fml = fml, data = data, family = family, fixef = fixef, fixef.rm = fixef.rm, offset = offset, subset = subset, split = split, fsplit = fsplit, cluster = cluster, se = se, dof = dof, panel.id = panel.id, start = start, fixef.tol=fixef.tol, fixef.iter=fixef.iter, nthreads=nthreads, lean = lean, verbose=verbose, warn=warn, notes=notes, theta.init = theta.init, combine.quick = combine.quick, mem.clean = mem.clean, origin = "femlm", mc_origin_bis = match.call(), call_env_bis = call_env_bis, only.env = only.env, env = env, ...), silent = TRUE)
if("try-error" %in% class(res)){
stop(format_error_msg(res, "femlm"))
}
return(res)
}
#' @rdname femlm
fenegbin = function(fml, data, theta.init, start = 0, fixef, fixef.rm = "perfect", offset, subset, split, fsplit, cluster, se, dof, panel.id,
fixef.tol = 1e-5, fixef.iter = 10000, nthreads = getFixest_nthreads(), lean = FALSE,
verbose = 0, warn = TRUE, notes = getFixest_notes(), combine.quick, mem.clean = FALSE, only.env = FALSE, env, ...){
# We control for the problematic argument family
if("family" %in% names(match.call())){
stop("Function fenegbin does not accept the argument 'family'.")
}
# This is just an alias
call_env_bis = new.env(parent = parent.frame())
res = try(feNmlm(fml = fml, data=data, family = "negbin", theta.init = theta.init, start = start, fixef = fixef, fixef.rm = fixef.rm, offset = offset, subset = subset, split = split, fsplit = fsplit, cluster = cluster, se = se, dof = dof, panel.id = panel.id, fixef.tol = fixef.tol, fixef.iter = fixef.iter, nthreads = nthreads, lean = lean, verbose = verbose, warn = warn, notes = notes, combine.quick = combine.quick, mem.clean = mem.clean, origin = "fenegbin", mc_origin_bis = match.call(), call_env_bis = call_env_bis, only.env = only.env, env = env, ...), silent = TRUE)
if("try-error" %in% class(res)){
stop(format_error_msg(res, "fenegbin"))
}
return(res)
}
#' @rdname feglm
fepois = function(fml, data, offset, weights, subset, split, fsplit, cluster, se, dof, panel.id,
start = NULL, etastart = NULL, mustart = NULL,
fixef, fixef.rm = "perfect", fixef.tol = 1e-6, fixef.iter = 10000, collin.tol = 1e-10,
glm.iter = 25, glm.tol = 1e-8, nthreads = getFixest_nthreads(), lean = FALSE, warn = TRUE, notes = getFixest_notes(),
verbose = 0, combine.quick, mem.clean = FALSE, only.env = FALSE, env, ...){
# We control for the problematic argument family
if("family" %in% names(match.call())){
stop("Function fepois does not accept the argument 'family'.")
}
# This is just an alias
call_env_bis = new.env(parent = parent.frame())
res = try(feglm(fml = fml, data = data, family = "poisson", offset = offset, weights = weights, subset = subset, split = split, fsplit = fsplit, cluster = cluster, se = se, dof = dof, panel.id = panel.id, start = start, etastart = etastart, mustart = mustart, fixef = fixef, fixef.rm = fixef.rm, fixef.tol = fixef.tol, fixef.iter = fixef.iter, collin.tol = collin.tol, glm.iter = glm.iter, glm.tol = glm.tol, nthreads = nthreads, lean = lean, warn = warn, notes = notes, verbose = verbose, combine.quick = combine.quick, mem.clean = mem.clean, origin_bis = "fepois", mc_origin_bis = match.call(), call_env_bis = call_env_bis, only.env=only.env, env=env, ...), silent = TRUE)
if("try-error" %in% class(res)){
stop(format_error_msg(res, "fepois"))
}
return(res)
}
#' Fixed effects nonlinear maximum likelihood models
#'
#' This function estimates maximum likelihood models (e.g., Poisson or Logit) with non-linear in parameters right-hand-sides and is efficient to handle any number of fixed effects. If you do not use non-linear in parameters right-hand-side, use \code{\link[fixest]{femlm}} or \code{\link[fixest]{feglm}} instead (their design is simpler).
#'
#' @inheritParams summary.fixest
#' @inheritParams panel
#' @inheritSection feols Lagging variables
#' @inheritSection feols Interactions
#' @inheritSection feols On standard-errors
#' @inheritSection feols Multiple estimations
#'
#' @param fml A formula. This formula gives the linear formula to be estimated (it is similar to a \code{lm} formula), for example: \code{fml = z~x+y}. To include fixed-effects variables, insert them in this formula using a pipe (e.g. \code{fml = z~x+y|fixef_1+fixef_2}). To include a non-linear in parameters element, you must use the argment \code{NL.fml}. Multiple estimations can be performed at once: for multiple dep. vars, wrap them in \code{c()}: ex \code{c(y1, y2)}. For multiple indep. vars, use the stepwise functions: ex \code{x1 + csw(x2, x3)}. This leads to 6 estimation \code{fml = c(y1, y2) ~ x1 + cw0(x2, x3)}. See details.
#' @param start Starting values for the coefficients in the linear part (for the non-linear part, use NL.start). Can be: i) a numeric of length 1 (e.g. \code{start = 0}, the default), ii) a numeric vector of the exact same length as the number of variables, or iii) a named vector of any length (the names will be used to initialize the appropriate coefficients).
#' @param NL.fml A formula. If provided, this formula represents the non-linear part of the right hand side (RHS). Note that contrary to the \code{fml} argument, the coefficients must explicitly appear in this formula. For instance, it can be \code{~a*log(b*x + c*x^3)}, where \code{a}, \code{b}, and \code{c} are the coefficients to be estimated. Note that only the RHS of the formula is to be provided, and NOT the left hand side.
#' @param split A one sided formula representing a variable (eg \code{split = ~var}) or a vector. If provided, the sample is split according to the variable and one estimation is performed for each value of that variable. If you also want to include the estimation for the full sample, use the argument \code{fsplit} instead.
#' @param fsplit A one sided formula representing a variable (eg \code{split = ~var}) or a vector. If provided, the sample is split according to the variable and one estimation is performed for each value of that variable. This argument is the same as split but also includes the full sample as the first estimation.
#' @param data A data.frame containing the necessary variables to run the model. The variables of the non-linear right hand side of the formula are identified with this \code{data.frame} names. Can also be a matrix.
#' @param family Character scalar. It should provide the family. The possible values are "poisson" (Poisson model with log-link, the default), "negbin" (Negative Binomial model with log-link), "logit" (LOGIT model with log-link), "gaussian" (Gaussian model).
#' @param fixef Character vector. The names of variables to be used as fixed-effects. These variables should contain the identifier of each observation (e.g., think of it as a panel identifier). Note that the recommended way to include fixed-effects is to insert them directly in the formula.
#' @param subset A vector (logical or numeric) or a one-sided formula. If provided, then the estimation will be performed only on the observations defined by this argument.
#' @param NL.start (For NL models only) A list of starting values for the non-linear parameters. ALL the parameters are to be named and given a staring value. Example: \code{NL.start=list(a=1,b=5,c=0)}. Though, there is an exception: if all parameters are to be given the same starting value, you can use a numeric scalar.
#' @param lower (For NL models only) A list. The lower bound for each of the non-linear parameters that requires one. Example: \code{lower=list(b=0,c=0)}. Beware, if the estimated parameter is at his lower bound, then asymptotic theory cannot be applied and the standard-error of the parameter cannot be estimated because the gradient will not be null. In other words, when at its upper/lower bound, the parameter is considered as 'fixed'.
#' @param upper (For NL models only) A list. The upper bound for each of the non-linear parameters that requires one. Example: \code{upper=list(a=10,c=50)}. Beware, if the estimated parameter is at his upper bound, then asymptotic theory cannot be applied and the standard-error of the parameter cannot be estimated because the gradient will not be null. In other words, when at its upper/lower bound, the parameter is considered as 'fixed'.
#' @param NL.start.init (For NL models only) Numeric scalar. If the argument \code{NL.start} is not provided, or only partially filled (i.e. there remain non-linear parameters with no starting value), then the starting value of all remaining non-linear parameters is set to \code{NL.start.init}.
#' @param offset A formula or a numeric vector. An offset can be added to the estimation. If equal to a formula, it should be of the form (for example) \code{~0.5*x**2}. This offset is linearly added to the elements of the main formula 'fml'.
#' @param jacobian.method (For NL models only) Character scalar. Provides the method used to numerically compute the Jacobian of the non-linear part. Can be either \code{"simple"} or \code{"Richardson"}. Default is \code{"simple"}. See the help of \code{\link[numDeriv]{jacobian}} for more information.
#' @param useHessian Logical. Should the Hessian be computed in the optimization stage? Default is \code{TRUE}.
#' @param hessian.args List of arguments to be passed to function \code{\link[numDeriv]{genD}}. Defaults is missing. Only used with the presence of \code{NL.fml}.
#' @param opt.control List of elements to be passed to the optimization method \code{\link[stats]{nlminb}}. See the help page of \code{\link[stats]{nlminb}} for more information.
#' @param nthreads The number of threads. Can be: a) an integer lower than, or equal to, the maximum number of threads; b) 0: meaning all available threads will be used; c) a number strictly between 0 and 1 which represents the fraction of all threads to use. The default is to use 50\% of all threads. You can set permanently the number of threads used within this package using the function \code{\link[fixest]{setFixest_nthreads}}.
#' @param verbose Integer, default is 0. It represents the level of information that should be reported during the optimisation process. If \code{verbose=0}: nothing is reported. If \code{verbose=1}: the value of the coefficients and the likelihood are reported. If \code{verbose=2}: \code{1} + information on the computing time of the null model, the fixed-effects coefficients and the hessian are reported.
#' @param theta.init Positive numeric scalar. The starting value of the dispersion parameter if \code{family="negbin"}. By default, the algorithm uses as a starting value the theta obtained from the model with only the intercept.
#' @param fixef.rm Can be equal to "perfect" (default), "singleton", "both" or "none". Controls which observations are to be removed. If "perfect", then observations having a fixed-effect with perfect fit (e.g. only 0 outcomes in Poisson estimations) will be removed. If "singleton", all observations for which a fixed-effect appears only once will be removed. The meaning of "both" and "none" is direct.
#' @param fixef.tol Precision used to obtain the fixed-effects. Defaults to \code{1e-5}. It corresponds to the maximum absolute difference allowed between two coefficients of successive iterations. Argument \code{fixef.tol} cannot be lower than \code{10000*.Machine$double.eps}. Note that this parameter is dynamically controlled by the algorithm.
#' @param fixef.iter Maximum number of iterations in fixed-effects algorithm (only in use for 2+ fixed-effects). Default is 10000.
#' @param deriv.iter Maximum number of iterations in the algorithm to obtain the derivative of the fixed-effects (only in use for 2+ fixed-effects). Default is 1000.
#' @param deriv.tol Precision used to obtain the fixed-effects derivatives. Defaults to \code{1e-4}. It corresponds to the maximum absolute difference allowed between two coefficients of successive iterations. Argument \code{deriv.tol} cannot be lower than \code{10000*.Machine$double.eps}.
#' @param warn Logical, default is \code{TRUE}. Whether warnings should be displayed (concerns warnings relating to convergence state).
#' @param notes Logical. By default, two notes are displayed: when NAs are removed (to show additional information) and when some observations are removed because of only 0 (or 0/1) outcomes in a fixed-effect setup (in Poisson/Neg. Bin./Logit models). To avoid displaying these messages, you can set \code{notes = FALSE}. You can remove these messages permanently by using \code{setFixest_notes(FALSE)}.
#' @param combine.quick Logical. When you combine different variables to transform them into a single fixed-effects you can do e.g. \code{y ~ x | paste(var1, var2)}. The algorithm provides a shorthand to do the same operation: \code{y ~ x | var1^var2}. Because pasting variables is a costly operation, the internal algorithm may use a numerical trick to hasten the process. The cost of doing so is that you lose the labels. If you are interested in getting the value of the fixed-effects coefficients after the estimation, you should use \code{combine.quick = FALSE}. By default it is equal to \code{FALSE} if the number of observations is lower than 50,000, and to \code{TRUE} otherwise.
#' @param only.env (Advanced users.) Logical, default is \code{FALSE}. If \code{TRUE}, then only the environment used to make the estimation is returned.
#' @param mem.clean Logical, default is \code{FALSE}. Only to be used if the data set is large compared to the available RAM. If \code{TRUE} then intermediary objects are removed as much as possible and \code{\link[base]{gc}} is run before each substantial C++ section in the internal code to avoid memory issues.
#' @param lean Logical, default is \code{FALSE}. If \code{TRUE} then all large objects are removed from the returned result: this will save memory but will block the possibility to use many methods. It is recommended to use the arguments \code{se} or \code{cluster} to obtain the appropriate standard-errors at estimation time, since obtaining different SEs won't be possible afterwards.
#' @param env (Advanced users.) A \code{fixest} environment created by a \code{fixest} estimation with \code{only.env = TRUE}. Default is missing. If provided, the data from this environment will be used to perform the estimation.
#' @param ... Not currently used.
#'
#' @details
#' This function estimates maximum likelihood models where the conditional expectations are as follows:
#'
#' Gaussian likelihood:
#' \deqn{E(Y|X)=X\beta}{E(Y|X) = X*beta}
#' Poisson and Negative Binomial likelihoods:
#' \deqn{E(Y|X)=\exp(X\beta)}{E(Y|X) = exp(X*beta)}
#' where in the Negative Binomial there is the parameter \eqn{\theta}{theta} used to model the variance as \eqn{\mu+\mu^2/\theta}{mu+mu^2/theta}, with \eqn{\mu}{mu} the conditional expectation.
#' Logit likelihood:
#' \deqn{E(Y|X)=\frac{\exp(X\beta)}{1+\exp(X\beta)}}{E(Y|X) = exp(X*beta) / (1 + exp(X*beta))}
#'
#' When there are one or more fixed-effects, the conditional expectation can be written as:
#' \deqn{E(Y|X) = h(X\beta+\sum_{k}\sum_{m}\gamma_{m}^{k}\times C_{im}^{k}),}
#' where \eqn{h(.)} is the function corresponding to the likelihood function as shown before. \eqn{C^k} is the matrix associated to fixed-effect dimension \eqn{k} such that \eqn{C^k_{im}} is equal to 1 if observation \eqn{i} is of category \eqn{m} in the fixed-effect dimension \eqn{k} and 0 otherwise.
#'
#' When there are non linear in parameters functions, we can schematically split the set of regressors in two:
#' \deqn{f(X,\beta)=X^1\beta^1 + g(X^2,\beta^2)}
#' with first a linear term and then a non linear part expressed by the function g. That is, we add a non-linear term to the linear terms (which are \eqn{X*beta} and the fixed-effects coefficients). It is always better (more efficient) to put into the argument \code{NL.fml} only the non-linear in parameter terms, and add all linear terms in the \code{fml} argument.
#'
#' To estimate only a non-linear formula without even the intercept, you must exclude the intercept from the linear formula by using, e.g., \code{fml = z~0}.
#'
#' The over-dispersion parameter of the Negative Binomial family, theta, is capped at 10,000. If theta reaches this high value, it means that there is no overdispersion.
#'
#' @return
#' A \code{fixest} object. Note that \code{fixest} objects contain many elements and most of them are for internal use, they are presented here only for information. To access them, it is safer to use the user-level methods (e.g. \code{\link[fixest]{vcov.fixest}}, \code{\link[fixest]{resid.fixest}}, etc) or functions (like for instance \code{\link[fixest]{fitstat}} to access any fit statistic).
#' \item{coefficients}{The named vector of coefficients.}
#' \item{coeftable}{The table of the coefficients with their standard errors, z-values and p-values.}
#' \item{loglik}{The loglikelihood.}
#' \item{iterations}{Number of iterations of the algorithm.}
#' \item{nobs}{The number of observations.}
#' \item{nparams}{The number of parameters of the model.}
#' \item{call}{The call.}
#' \item{fml}{The linear formula of the call.}
#' \item{fml_all}{A list containing different parts of the formula. Always contain the linear formula. Then, if relevant: \code{fixef}: the fixed-effects; \code{NL}: the non linear part of the formula.}
#' \item{ll_null}{Log-likelihood of the null model (i.e. with the intercept only).}
#' \item{pseudo_r2}{The adjusted pseudo R2.}
#' \item{message}{The convergence message from the optimization procedures.}
#' \item{sq.cor}{Squared correlation between the dependent variable and the expected predictor (i.e. fitted.values) obtained by the estimation.}
#' \item{hessian}{The Hessian of the parameters.}
#' \item{fitted.values}{The fitted values are the expected value of the dependent variable for the fitted model: that is \eqn{E(Y|X)}.}
#' \item{cov.unscaled}{The variance-covariance matrix of the parameters.}
#' \item{se}{The standard-error of the parameters.}
#' \item{scores}{The matrix of the scores (first derivative for each observation).}
#' \item{family}{The ML family that was used for the estimation.}
#' \item{residuals}{The difference between the dependent variable and the expected predictor.}
#' \item{sumFE}{The sum of the fixed-effects for each observation.}
#' \item{offset}{The offset formula.}
#' \item{NL.fml}{The nonlinear formula of the call.}
#' \item{bounds}{Whether the coefficients were upper or lower bounded. -- This can only be the case when a non-linear formula is included and the arguments 'lower' or 'upper' are provided.}
#' \item{isBounded}{The logical vector that gives for each coefficient whether it was bounded or not. This can only be the case when a non-linear formula is included and the arguments 'lower' or 'upper' are provided.}
#' \item{fixef_vars}{The names of each fixed-effect dimension.}
#' \item{fixef_id}{The list (of length the number of fixed-effects) of the fixed-effects identifiers for each observation.}
#' \item{fixef_sizes}{The size of each fixed-effect (i.e. the number of unique identifierfor each fixed-effect dimension).}
#' \item{obs_selection}{(When relevant.) List containing vectors of integers. It represents the sequential selection of observation vis a vis the original data set.}
#' \item{fixef_removed}{In the case there were fixed-effects and some observations were removed because of only 0/1 outcome within a fixed-effect, it gives the list (for each fixed-effect dimension) of the fixed-effect identifiers that were removed.}
#' \item{theta}{In the case of a negative binomial estimation: the overdispersion parameter.}
#'
#' @seealso
#' See also \code{\link[fixest]{summary.fixest}} to see the results with the appropriate standard-errors, \code{\link[fixest]{fixef.fixest}} to extract the fixed-effects coefficients, and the function \code{\link[fixest]{etable}} to visualize the results of multiple estimations.
#'
#' And other estimation methods: \code{\link[fixest]{feols}}, \code{\link[fixest]{femlm}}, \code{\link[fixest]{feglm}}, \code{\link[fixest:feglm]{fepois}}, \code{\link[fixest:femlm]{fenegbin}}.
#'
#' @author
#' Laurent Berge
#'
#' @references
#'
#' Berge, Laurent, 2018, "Efficient estimation of maximum likelihood models with multiple fixed-effects: the R package FENmlm." CREA Discussion Papers, 13 (\url{https://wwwen.uni.lu/content/download/110162/1299525/file/2018_13}).
#'
#' For models with multiple fixed-effects:
#'
#' Gaure, Simen, 2013, "OLS with multiple high dimensional category variables", Computational Statistics & Data Analysis 66 pp. 8--18
#'
#' On the unconditionnal Negative Binomial model:
#'
#' Allison, Paul D and Waterman, Richard P, 2002, "Fixed-Effects Negative Binomial Regression Models", Sociological Methodology 32(1) pp. 247--265
#'
#' @examples
#'
#' # This section covers only non-linear in parameters examples
#' # For linear relationships: use femlm or feglm instead
#'
#' # Generating data for a simple example
#' set.seed(1)
#' n = 100
#' x = rnorm(n, 1, 5)**2
#' y = rnorm(n, -1, 5)**2
#' z1 = rpois(n, x*y) + rpois(n, 2)
#' base = data.frame(x, y, z1)
#'
#' # Estimating a 'linear' relation:
#' est1_L = femlm(z1 ~ log(x) + log(y), base)
#' # Estimating the same 'linear' relation using a 'non-linear' call
#' est1_NL = feNmlm(z1 ~ 1, base, NL.fml = ~a*log(x)+b*log(y), NL.start = list(a=0, b=0))
#' # we compare the estimates with the function esttable (they are identical)
#' etable(est1_L, est1_NL)
#'
#' # Now generating a non-linear relation (E(z2) = x + y + 1):
#' z2 = rpois(n, x + y) + rpois(n, 1)
#' base$z2 = z2
#'
#' # Estimation using this non-linear form
#' est2_NL = feNmlm(z2 ~ 0, base, NL.fml = ~log(a*x + b*y),
#' NL.start = 2, lower = list(a=0, b=0))
#' # we can't estimate this relation linearily
#' # => closest we can do:
#' est2_L = femlm(z2 ~ log(x) + log(y), base)
#'
#' # Difference between the two models:
#' etable(est2_L, est2_NL)
#'
#' # Plotting the fits:
#' plot(x, z2, pch = 18)
#' points(x, fitted(est2_L), col = 2, pch = 1)
#' points(x, fitted(est2_NL), col = 4, pch = 2)
#'
#'
feNmlm = function(fml, data, family=c("poisson", "negbin", "logit", "gaussian"), NL.fml, fixef, fixef.rm = "perfect", NL.start, lower, upper, NL.start.init, offset, subset, split, fsplit, cluster, se, dof, panel.id, start = 0, jacobian.method="simple", useHessian = TRUE, hessian.args = NULL, opt.control = list(), nthreads = getFixest_nthreads(), lean = FALSE, verbose = 0, theta.init, fixef.tol = 1e-5, fixef.iter = 10000, deriv.tol = 1e-4, deriv.iter = 1000, warn = TRUE, notes = getFixest_notes(), combine.quick, mem.clean = FALSE, only.env = FALSE, env, ...){
time_start = proc.time()
if(missing(env)){
set_defaults("fixest_estimation")
call_env = new.env(parent = parent.frame())
env = try(fixest_env(fml = fml, data = data, family = family, NL.fml = NL.fml, fixef = fixef, fixef.rm = fixef.rm, NL.start = NL.start, lower = lower, upper = upper, NL.start.init = NL.start.init, offset = offset, subset = subset, split = split, fsplit = fsplit, cluster = cluster, se = se, dof = dof, panel.id = panel.id, linear.start = start, jacobian.method = jacobian.method, useHessian = useHessian, opt.control = opt.control, nthreads = nthreads, lean = lean, verbose = verbose, theta.init = theta.init, fixef.tol = fixef.tol, fixef.iter = fixef.iter, deriv.iter = deriv.iter, warn = warn, notes = notes, combine.quick = combine.quick, mem.clean = mem.clean, mc_origin = match.call(), call_env = call_env, computeModel0 = TRUE, ...), silent = TRUE)
} else if((r <- !is.environment(env)) || !isTRUE(env$fixest_env)) {
stop("Argument 'env' must be an environment created by a fixest estimation. Currently it is not ", ifelse(r, "an", "a 'fixest'"), " environment.")
}
check_arg(only.env, "logical scalar")
if(only.env){
return(env)
}
if("try-error" %in% class(env)){
mc = match.call()
origin = ifelse(is.null(mc$origin), "feNmlm", mc$origin)
stop(format_error_msg(env, origin))
}
verbose = get("verbose", env)
if(verbose >= 2) cat("Setup in ", (proc.time() - time_start)[3], "s\n", sep="")
#
# Split ####
#
do_split = get("do_split", env)
if(do_split){
res = multi_split(env, feNmlm)
return(res)
}
#
# Multi fixef ####
#
do_multi_fixef = get("do_multi_fixef", env)
if(do_multi_fixef){
res = multi_fixef(env, feNmlm)
return(res)
}
#
# Multi LHS and RHS ####
#
do_multi_lhs = get("do_multi_lhs", env)
do_multi_rhs = get("do_multi_rhs", env)
if(do_multi_lhs || do_multi_rhs){
res = multi_LHS_RHS(env, feNmlm)
return(res)
}
#
# Regular estimation ####
#
# Objects needed for optimization + misc
start = get("start", env)
lower = get("lower", env)
upper = get("upper", env)
gradient = get("gradient", env)
hessian = get("hessian", env)
family = get("family", env)
isLinear = get("isLinear", env)
isNonLinear = get("isNL", env)
opt.control = get("opt.control", env)
lhs = get("lhs", env)
family = get("family", env)
famFuns = get("famFuns", env)
params = get("params", env)
isFixef = get("isFixef", env)
onlyFixef = !isLinear && !isNonLinear && isFixef
#
# Model 0 + theta init
#
theta.init = get("theta.init", env)
model0 = get_model_null(env, theta.init)
# For the negative binomial:
if(family == "negbin"){
theta.init = get("theta.init", env)
if(is.null(theta.init)){
theta.init = model0$theta
}
params = c(params, ".theta")
start = c(start, theta.init)
names(start) = params
upper = c(upper, 10000)
lower = c(lower, 1e-3)
assign("params", params, env)
}
assign("model0", model0, env)
# the result
res = get("res", env)
# NO VARIABLE -- ONLY FIXED-EFFECTS
if(onlyFixef){
if(family == "negbin"){
stop("To estimate the negative binomial model, you need at least one variable. (The estimation of the model with only the fixed-effects is not implemented.)")
}
res = femlm_only_clusters(env)
res$onlyFixef = TRUE
return(res)
}
# warnings => to avoid accumulation, but should appear even if the user stops the algorithm
on.exit(warn_fixef_iter(env))
#
# Maximizing the likelihood
#
opt <- try(stats::nlminb(start=start, objective=femlm_ll, env=env, lower=lower, upper=upper, gradient=gradient, hessian=hessian, control=opt.control), silent = TRUE)
if("try-error" %in% class(opt)){
# We return the coefficients (can be interesting for debugging)
iter = get("iter", env)
origin = get("origin", env)
warning_msg = paste0("[", origin, "] Optimization failed at iteration ", iter, ". Reason: ", gsub("^[^\n]+\n *(.+\n)", "\\1", opt))
if(!"coef_evaluated" %in% names(env)){
# big problem right from the start
stop(warning_msg)
} else {
coef = get("coef_evaluated", env)
warning(warning_msg, " Last evaluated coefficients returned.", call. = FALSE)
return(coef)
}
} else {
convStatus = TRUE
warning_msg = ""
if(!opt$message %in% c("X-convergence (3)", "relative convergence (4)", "both X-convergence and relative convergence (5)")){
warning_msg = " The optimization algorithm did not converge, the results are not reliable."
convStatus = FALSE
}
coef <- opt$par
}
# The Hessian
hessian = femlm_hessian(coef, env = env)
# we add the names of the non linear variables in the hessian
if(isNonLinear || family == "negbin"){
dimnames(hessian) = list(params, params)
}
# we create the Hessian without the bounded parameters
hessian_noBounded = hessian
# Handling the bounds
if(!isNonLinear){
NL.fml = NULL
bounds = NULL
isBounded = NULL
} else {
nonlinear.params = get("nonlinear.params", env)
# we report the bounds & if the estimated parameters are bounded
upper_bound = upper[nonlinear.params]
lower_bound = lower[nonlinear.params]
# 1: are the estimated parameters at their bounds?
coef_NL = coef[nonlinear.params]
isBounded = rep(FALSE, length(params))
isBounded[1:length(coef_NL)] = (coef_NL == lower_bound) | (coef_NL == upper_bound)
# 2: we save the bounds
upper_bound_small = upper_bound[is.finite(upper_bound)]
lower_bound_small = lower_bound[is.finite(lower_bound)]
bounds = list()
if(length(upper_bound_small) > 0) bounds$upper = upper_bound_small
if(length(lower_bound_small) > 0) bounds$lower = lower_bound_small
if(length(bounds) == 0){
bounds = NULL
}
# 3: we update the Hessian (basically, we drop the bounded element)
if(any(isBounded)){
hessian_noBounded = hessian[-which(isBounded), -which(isBounded), drop = FALSE]
boundText = ifelse(coef_NL == upper_bound, "Upper bounded", "Lower bounded")[isBounded]
attr(isBounded, "type") = boundText
}
}
# Variance
var <- NULL
try(var <- solve(hessian_noBounded), silent = TRUE)
if(is.null(var)){
warning_msg = paste(warning_msg, "The information matrix is singular: presence of collinearity. Use function collinearity() to pinpoint the problems.")
var = hessian_noBounded * NA
se = diag(var)
} else {
se = diag(var)
se[se < 0] = NA
se = sqrt(se)
}
# Warning message
if(nchar(warning_msg) > 0){
if(warn){
warning("[femlm]:", warning_msg, call. = FALSE)
options("fixest_last_warning" = proc.time())
}
}
# To handle the bounded coefficient, we set its SE to NA
if(any(isBounded)){
se = se[params]
names(se) = params
}
zvalue <- coef/se
pvalue <- 2*pnorm(-abs(zvalue))
# We add the information on the bound for the se & update the var to drop the bounded vars
se_format = se
if(any(isBounded)){
se_format[!isBounded] = decimalFormat(se_format[!isBounded])
se_format[isBounded] = boundText
}
coeftable <- data.frame("Estimate"=coef, "Std. Error"=se_format, "z value"=zvalue, "Pr(>|z|)"=pvalue, stringsAsFactors = FALSE)
names(coeftable) <- c("Estimate", "Std. Error", "z value", "Pr(>|z|)")
row.names(coeftable) <- params
attr(se, "type") = attr(coeftable, "type") = "Standard"
mu_both = get_mu(coef, env, final = TRUE)
mu = mu_both$mu
exp_mu = mu_both$exp_mu
# calcul pseudo r2
loglik <- -opt$objective # moins car la fonction minimise
ll_null <- model0$loglik
# dummies are constrained, they don't have full dof (cause you need to take one value off for unicity)
# this is an approximation, in some cases there can be more than one ref. But good approx.
nparams = res$nparams
pseudo_r2 <- 1 - (loglik - nparams + 1) / ll_null
# Calcul residus
expected.predictor = famFuns$expected.predictor(mu, exp_mu, env)
residuals = lhs - expected.predictor
# calcul squared corr
if(cpp_isConstant(expected.predictor)){
sq.cor = NA
} else {
sq.cor = stats::cor(lhs, expected.predictor)**2
}
ssr_null = cpp_ssr_null(lhs)
# The scores
scores = femlm_scores(coef, env)
if(isNonLinear){
# we add the names of the non linear params in the score
colnames(scores) = params
}
n = length(lhs)
# Saving
res$coefficients = coef
res$coeftable = coeftable
res$loglik = loglik
res$iterations = opt$iterations
res$ll_null = ll_null
res$ssr_null = ssr_null
res$pseudo_r2 = pseudo_r2
res$message = opt$message
res$convStatus = convStatus
res$sq.cor = sq.cor
res$fitted.values = expected.predictor
res$hessian = hessian
res$cov.unscaled = var
res$se = se
res$scores = scores
res$family = family
res$residuals = residuals
# The value of mu (if cannot be recovered from fitted())
if(family == "logit"){
qui_01 = expected.predictor %in% c(0, 1)
if(any(qui_01)){
res$mu = mu
}
} else if(family %in% c("poisson", "negbin")){
qui_0 = expected.predictor == 0
if(any(qui_0)){
res$mu = mu
}
}
if(!is.null(bounds)){
res$bounds = bounds
res$isBounded = isBounded
}
# Fixed-effects
if(isFixef){
useExp_fixefCoef = family %in% c("poisson")
sumFE = attr(mu, "sumFE")
if(useExp_fixefCoef){
sumFE = rpar_log(sumFE, env)
}
res$sumFE = sumFE
# The LL and SSR with FE only
if("ll_fe_only" %in% names(env)){
res$ll_fe_only = get("ll_fe_only", env)
res$ssr_fe_only = get("ssr_fe_only", env)
} else {
# we need to compute it
# indicator of whether we compute the exp(mu)
useExp = family %in% c("poisson", "logit", "negbin")
# mu, using the offset
if(!is.null(res$offset)){
mu_noDum = res$offset
} else {
mu_noDum = 0
}
if(length(mu_noDum) == 1) mu_noDum = rep(mu_noDum, n)
exp_mu_noDum = NULL
if(useExp_fixefCoef){
exp_mu_noDum = rpar_exp(mu_noDum, env)
}
assign("fixef.tol", 1e-4, env) # no need of supa precision
dummies = getDummies(mu_noDum, exp_mu_noDum, env, coef)
exp_mu = NULL
if(useExp_fixefCoef){
# despite being called mu, it is in fact exp(mu)!!!
exp_mu = exp_mu_noDum*dummies
mu = rpar_log(exp_mu, env)
} else {
mu = mu_noDum + dummies
if(useExp){
exp_mu = rpar_exp(mu, env)
}
}
res$ll_fe_only = famFuns$ll(lhs, mu, exp_mu, env, coef)
ep = famFuns$expected.predictor(mu, exp_mu, env)
res$ssr_fe_only = cpp_ssq(lhs - ep)
}
}
if(family == "negbin"){
theta = coef[".theta"]
res$theta = theta
if(notes && theta > 1000){
message("Very high value of theta (", theta, "). There is no sign of overdispersion, you may consider a Poisson model.")
}
}
class(res) <- "fixest"
if(verbose > 0){
cat("\n")
}
do_summary = get("do_summary", env)
if(do_summary){
se = get("se", env)
cluster = get("cluster", env)
lean = get("lean", env)
dof = get("dof", env)
summary_flags = get("summary_flags", env)
# To compute the RMSE and lean = TRUE
if(lean) res$ssr = cpp_ssq(res$residuals)
res = summary(res, se = se, cluster = cluster, dof = dof, lean = lean, summary_flags = summary_flags)
}
return(res)
}
####
#### Delayed Warnings ####
####
warn_fixef_iter = function(env){
# Show warnings related to the nber of times the maximum of iterations was reached
# For fixed-effect
fixef.iter = get("fixef.iter", env)
fixef.iter.limit_reached = get("fixef.iter.limit_reached", env)
origin = get("origin", env)
warn = get("warn", env)
if(!warn) return(invisible(NULL))
goWarning = FALSE
warning_msg = ""
if(fixef.iter.limit_reached > 0){
goWarning = TRUE
warning_msg = paste0(origin, ": [Getting the fixed-effects] iteration limit reached (", fixef.iter, ").", ifelse(fixef.iter.limit_reached > 1, paste0(" (", fixef.iter.limit_reached, " times.)"), " (Once.)"))
}
# For the fixed-effect derivatives
deriv.iter = get("deriv.iter", env)
deriv.iter.limit_reached = get("deriv.iter.limit_reached", env)
if(deriv.iter.limit_reached > 0){
prefix = ifelse(goWarning, paste0("\n", sprintf("% *s", nchar(origin) + 2, " ")), paste0(origin, ": "))
warning_msg = paste0(warning_msg, prefix, "[Getting fixed-effects derivatives] iteration limit reached (", deriv.iter, ").", ifelse(deriv.iter.limit_reached > 1, paste0(" (", deriv.iter.limit_reached, " times.)"), " (Once.)"))
goWarning = TRUE
}
if(goWarning){
warning(warning_msg, call. = FALSE, immediate. = TRUE)
}
}
warn_step_halving = function(env){
nb_sh = get("nb_sh", env)
warn = get("warn", env)
if(!warn) return(invisible(NULL))
if(nb_sh > 0){
warning("feglm: Step halving due to non-finite deviance (", ifelse(nb_sh > 1, paste0(nb_sh, " times"), "once"), ").", call. = FALSE, immediate. = TRUE)
}
}
format_error_msg = function(x, origin){
# Simple formatting of the error msg
# LATER:
# - for object not found: provide a better error msg by calling the name of the missing
# argument => likely I'll need a match.call argument
x = gsub("\n+$", "", x)
if(grepl("^Error (in|:|: in) (fe|fixest|fun)[^\n]+\n", x)){
res = gsub("^Error (in|:|: in) (fe|fixest|fun)[^\n]+\n *(.+)", "\\3", x)
} else if(grepl("[Oo]bject '.+' not found", x) || grepl("memory|cannot allocate", x)) {
res = x
} else {
res = paste0(x, "\nThis error was unforeseen by the author of the function ", origin, ". If you think your call to the function is legitimate, could you report?")
}
res
}
####
#### Multiple estimation tools ####
####
multi_split = function(env, fun){
split = get("split", env)
split.full = get("split.full", env)
split.items = get("split.items", env)
split.name = get("split.name", env)
assign("do_split", FALSE, env)
res_all = list()
n_split = length(split.items)
index = NULL
all_names = NULL
is_multi = FALSE
for(i in 0:n_split){
if(i == 0){
if(split.full){
my_env = reshape_env(env)
my_res = fun(env = my_env)
} else {
next
}
} else {
my_res = fun(env = reshape_env(env, obs2keep = which(split == i)))
}
res_all[[length(res_all) + 1]] = my_res
}
if(split.full){
split.items = c("Full sample", split.items)
}
index = list(sample = length(res_all))
all_names = list(sample = split.items, split.name = split.name)
# result
res_multi = setup_multi(index, all_names, res_all)
return(res_multi)
}
multi_LHS_RHS = function(env, fun){
do_multi_lhs = get("do_multi_lhs", env)
do_multi_rhs = get("do_multi_rhs", env)
assign("do_multi_lhs", FALSE, env)
assign("do_multi_rhs", FALSE, env)
nthreads = get("nthreads", env)
# IMPORTANT NOTE:
# contrary to feols, the preprocessing is only a small fraction of the
# computing time in ML models
# Therefore we don't need to optimize processing as hard as in FEOLS
# because the gains are only marginal
fml = get("fml", env)
# LHS
lhs_names = get("lhs_names", env)
lhs = get("lhs", env)
if(do_multi_lhs == FALSE){
lhs = list(lhs)
}
# RHS
if(do_multi_rhs){
rhs_info_stepwise = get("rhs_info_stepwise", env)
multi_rhs_fml_full = rhs_info_stepwise$fml_all_full
multi_rhs_fml_sw = rhs_info_stepwise$fml_all_sw
multi_rhs_cumul = rhs_info_stepwise$is_cumul
linear_core = get("linear_core", env)
rhs_sw = get("rhs_sw", env)
} else {
multi_rhs_fml_full = list(.xpd(rhs = fml[[3]]))
multi_rhs_cumul = FALSE
linear.mat = get("linear.mat", env)
linear_core = list(left = linear.mat, right = 1)
rhs_sw = list(1)
}
isLinear_left = length(linear_core$left) > 1
isLinear_right = length(linear_core$right) > 1
n_lhs = length(lhs)
n_rhs = length(rhs_sw)
res = vector("list", n_lhs * n_rhs)
rhs_names = sapply(multi_rhs_fml_full, function(x) as.character(x)[[2]])
for(i in seq_along(lhs)){
for(j in seq_along(rhs_sw)){
# reshaping the env => taking care of the NAs
# Forming the RHS
my_rhs = linear_core[1]
if(multi_rhs_cumul){
my_rhs[1 + 1:j] = rhs_sw[1:j]
} else {
my_rhs[2] = rhs_sw[j]
}
if(isLinear_right){
my_rhs[[length(my_rhs) + 1]] = linear_core$right
}
n_all = lengths(my_rhs)
if(any(n_all == 1)){
my_rhs = my_rhs[n_all > 1]
}
if(length(my_rhs) == 0){
my_rhs = 1
} else {
my_rhs = do.call("cbind", my_rhs)
}
if(length(my_rhs) == 1){
is_na_current = !is.finite(lhs[[i]])
} else {
is_na_current = !is.finite(lhs[[i]]) | cpppar_which_na_inf_mat(my_rhs, nthreads)$is_na_inf
}
my_fml = .xpd(lhs = lhs_names[i], rhs = multi_rhs_fml_full[[j]])
if(any(is_na_current)){
my_env = reshape_env(env, which(!is_na_current), lhs = lhs[[i]], rhs = my_rhs, fml_linear = my_fml)
} else {
# We still need to check the RHS (only 0/1)
my_env = reshape_env(env, lhs = lhs[[i]], rhs = my_rhs, fml_linear = my_fml, check_lhs = TRUE)
}
my_res = fun(env = my_env)
res[[index_2D_to_1D(i, j, n_rhs)]] = my_res
}
}
# Meta information for fixest_multi
index = list(lhs = n_lhs, rhs = n_rhs)
all_names = list(lhs = lhs_names, rhs = rhs_names)
# result
res_multi = setup_multi(index, all_names, res)
return(res_multi)
}
multi_fixef = function(env, estfun){
# Honestly had I known it was so painful, I wouldn't have done it...
assign("do_multi_fixef", FALSE, env)
multi_fixef_fml_full = get("multi_fixef_fml_full", env)
combine.quick = get("combine.quick", env)
fixef.rm = get("fixef.rm", env)
family = get("family", env)
origin_type = get("origin_type", env)
nthreads = get("nthreads", env)
data = get("data", env)
n_fixef = length(multi_fixef_fml_full)
data_results = list()
for(i in 1:n_fixef){
fml_fixef = multi_fixef_fml_full[[i]]
if(length(all.vars(fml_fixef)) > 0){
#
# Evaluation of the fixed-effects
#
fixef_terms_full = fixef_terms(fml_fixef)
# fixef_terms_full computed in the formula section
fixef_terms = fixef_terms_full$fml_terms
# FEs
fixef_df = error_sender(prepare_df(fixef_terms_full$fe_vars, data, combine.quick),
"Problem evaluating the fixed-effects part of the formula:\n")
fixef_vars = names(fixef_df)
# Slopes
isSlope = any(fixef_terms_full$slope_flag != 0)
slope_vars_list = list(0)
if(isSlope){
slope_df = error_sender(prepare_df(fixef_terms_full$slope_vars, data),
"Problem evaluating the variables with varying slopes in the fixed-effects part of the formula:\n")
slope_flag = fixef_terms_full$slope_flag
slope_vars = fixef_terms_full$slope_vars
slope_vars_list = fixef_terms_full$slope_vars_list
# Further controls
not_numeric = !sapply(slope_df, is.numeric)
if(any(not_numeric)){
stop("In the fixed-effects part of the formula (i.e. in ", as.character(fml_fixef[2]), "), variables with varying slopes must be numeric. Currently variable", enumerate_items(names(slope_df)[not_numeric], "s.is.quote"), " not.")
}
# slope_flag: 0: no Varying slope // > 0: varying slope AND fixed-effect // < 0: varying slope WITHOUT fixed-effect
onlySlope = all(slope_flag < 0)
}
# fml update
fml_fixef = .xpd(rhs = fixef_terms)
#
# NA
#
for(j in seq_along(fixef_df)){
if(!is.numeric(fixef_df[[j]]) && !is.character(fixef_df[[j]])){
fixef_df[[j]] = as.character(fixef_df[[j]])
}
}
is_NA = !complete.cases(fixef_df)
if(isSlope){
# Convert to double
who_not_double = which(sapply(slope_df, is.integer))
for(j in who_not_double){
slope_df[[j]] = as.numeric(slope_df[[j]])
}
info = cpppar_which_na_inf_df(slope_df, nthreads)
if(info$any_na_inf){
is_NA = is_NA | info$is_na_inf
}
}
if(any(is_NA)){
# Remember that isFixef is FALSE so far => so we only change the reg vars
my_env = reshape_env(env = env, obs2keep = which(!is_NA))
# NA removal in fixef
fixef_df = fixef_df[!is_NA, , drop = FALSE]
if(isSlope){
slope_df = slope_df[!is_NA, , drop = FALSE]
}
} else {
my_env = new.env(parent = env)
}
# We remove the linear part if needed
if(get("do_multi_rhs", env)){
linear_core = get("linear_core", my_env)
if("(Intercept)" %in% colnames(linear_core$left)){
int_col = which("(Intercept)" %in% colnames(linear_core$left))
if(ncol(linear_core$left) == 1){
linear_core$left = 1
} else {
linear_core$left = linear_core$left[, -int_col, drop = FALSE]
}
assign("linear_core", linear_core, my_env)
}
} else {
linear.mat = get("linear.mat", my_env)
if("(Intercept)" %in% colnames(linear.mat)){
int_col = which("(Intercept)" %in% colnames(linear.mat))
if(ncol(linear.mat) == 1){
assign("linear.mat", 1, my_env)
} else {
assign("linear.mat", linear.mat[, -int_col, drop = FALSE], my_env)
}
}
}
# We assign the fixed-effects
lhs = get("lhs", my_env)
# We delay the computation by using isSplit = TRUE and split.full = FALSE
# Real QUF will be done in the last reshape env
info_fe = setup_fixef(fixef_df = fixef_df, lhs = lhs, fixef_vars = fixef_vars, fixef.rm = fixef.rm, family = family, isSplit = TRUE, split.full = FALSE, origin_type = origin_type, isSlope = isSlope, slope_flag = slope_flag, slope_df = slope_df, slope_vars_list = slope_vars_list, nthreads = nthreads)
fixef_id = info_fe$fixef_id
fixef_names = info_fe$fixef_names
fixef_sizes = info_fe$fixef_sizes
fixef_table = info_fe$fixef_table
sum_y_all = info_fe$sum_y_all
lhs = info_fe$lhs
obs2remove = info_fe$obs2remove
fixef_removed = info_fe$fixef_removed
message_fixef = info_fe$message_fixef
slope_variables = info_fe$slope_variables
slope_flag = info_fe$slope_flag
fixef_id_res = info_fe$fixef_id_res
fixef_sizes_res = info_fe$fixef_sizes_res
new_order = info_fe$new_order
assign("isFixef", TRUE, my_env)
assign("new_order_original", new_order, my_env)
assign("fixef_names", fixef_names, my_env)
assign("fixef_vars", fixef_vars, my_env)
assign_fixef_env(env, family, origin_type, fixef_id, fixef_sizes, fixef_table, sum_y_all, slope_flag, slope_variables, slope_vars_list)
#
# Formatting the fixef stuff from res
#
# fml & fixef_vars => other stuff will be taken care of in reshape
res = get("res", my_env)
res$fml_all$fixef = fml_fixef
res$fixef_vars = fixef_vars
if(isSlope){
res$fixef_terms = fixef_terms
}
assign("res", res, my_env)
#
# Last reshape
#
my_env_est = reshape_env(my_env, assign_fixef = TRUE)
} else {
# No fixed-effect // new.env is indispensable => otherwise multi RHS/LHS not possible
my_env_est = reshape_env(env)
}
data_results[[i]] = estfun(env = my_env_est)
}
index = list(fixef = n_fixef)
fixef_names = sapply(multi_fixef_fml_full, function(x) as.character(x)[[2]])
all_names = list(fixef = fixef_names)
res_multi = setup_multi(index, all_names, data_results)
if("lhs" %in% names(attr(res_multi, "meta")$index)){
res_multi = res_multi[lhs = TRUE]
}
return(res_multi)
}
| /R/ESTIMATION_FUNS.R | no_license | LvTolsmall/fixest | R | false | false | 154,584 | r | #----------------------------------------------#
# Author: Laurent Berge
# Date creation: Tue Apr 23 16:41:47 2019
# Purpose: All estimation functions
#----------------------------------------------#
#' Fixed-effects OLS estimation
#'
#' Estimates OLS with any number of fixed-effects.
#'
#' @inheritParams femlm
#'
#' @param fml A formula representing the relation to be estimated. For example: \code{fml = z~x+y}. To include fixed-effects, insert them in this formula using a pipe: e.g. \code{fml = z~x+y | fe_1+fe_2}. You can combine two fixed-effects with \code{^}: e.g. \code{fml = z~x+y|fe_1^fe_2}, see details. You can also use variables with varying slopes using square brackets: e.g. in \code{fml = z~y|fe_1[x] + fe_2}, see details. To add IVs, insert the endogenous vars./instruments after a pipe, like in \code{y ~ x | c(x_endo1, x_endo2) ~ x_inst1 + x_inst2}. Note that it should always be the last element, see details. Multiple estimations can be performed at once: for multiple dep. vars, wrap them in \code{c()}: ex \code{c(y1, y2)}. For multiple indep. vars, use the stepwise functions: ex \code{x1 + csw(x2, x3)}. The formula \code{fml = c(y1, y2) ~ x1 + cw0(x2, x3)} leads to 6 estimation, see details.
#' @param weights A formula or a numeric vector. Each observation can be weighted, the weights must be greater than 0. If equal to a formula, it should be one-sided: for example \code{~ var_weight}.
#' @param verbose Integer. Higher values give more information. In particular, it can detail the number of iterations in the demeaning algorithm (the first number is the left-hand-side, the other numbers are the right-hand-side variables).
#' @param demeaned Logical, default is \code{FALSE}. Only used in the presence of fixed-effects: should the centered variables be returned? If \code{TRUE}, it creates the items \code{y_demeaned} and \code{X_demeaned}.
#' @param notes Logical. By default, two notes are displayed: when NAs are removed (to show additional information) and when some observations are removed because of collinearity. To avoid displaying these messages, you can set \code{notes = FALSE}. You can remove these messages permanently by using \code{setFixest_notes(FALSE)}.
#' @param collin.tol Numeric scalar, default is \code{1e-10}. Threshold deciding when variables should be considered collinear and subsequently removed from the estimation. Higher values means more variables will be removed (if there is presence of collinearity). One signal of presence of collinearity is t-stats that are extremely low (for instance when t-stats < 1e-3).
#' @param y Numeric vector/matrix/data.frame of the dependent variable(s). Multiple dependent variables will return a \code{fixest_multi} object.
#' @param X Numeric matrix of the regressors.
#' @param fixef_df Matrix/data.frame of the fixed-effects.
#'
#' @details
#' The method used to demean each variable along the fixed-effects is based on Berge (2018), since this is the same problem to solve as for the Gaussian case in a ML setup.
#'
#' @section Combining the fixed-effects:
#' You can combine two variables to make it a new fixed-effect using \code{^}. The syntax is as follows: \code{fe_1^fe_2}. Here you created a new variable which is the combination of the two variables fe_1 and fe_2. This is identical to doing \code{paste0(fe_1, "_", fe_2)} but more convenient.
#'
#' Note that pasting is a costly operation, especially for large data sets. Thus, the internal algorithm uses a numerical trick which is fast, but the drawback is that the identity of each observation is lost (i.e. they are now equal to a meaningless number instead of being equal to \code{paste0(fe_1, "_", fe_2)}). These \dQuote{identities} are useful only if you're interested in the value of the fixed-effects (that you can extract with \code{\link[fixest]{fixef.fixest}}). If you're only interested in coefficients of the variables, it doesn't matter. Anyway, you can use \code{combine.quick = FALSE} to tell the internal algorithm to use \code{paste} instead of the numerical trick. By default, the numerical trick is performed only for large data sets.
#'
#' @section Varying slopes:
#' You can add variables with varying slopes in the fixed-effect part of the formula. The syntax is as follows: fixef_var[var1, var2]. Here the variables var1 and var2 will be with varying slopes (one slope per value in fixef_var) and the fixed-effect fixef_var will also be added.
#'
#' To add only the variables with varying slopes and not the fixed-effect, use double square brackets: fixef_var[[var1, var2]].
#'
#' In other words:
#' \itemize{
#' \item fixef_var[var1, var2] is equivalent to fixef_var + fixef_var[[var1]] + fixef_var[[var2]]
#' \item fixef_var[[var1, var2]] is equivalent to fixef_var[[var1]] + fixef_var[[var2]]
#' }
#'
#' In general, for convergence reasons, it is recommended to always add the fixed-effect and avoid using only the variable with varying slope (i.e. use single square brackets).
#'
#' @section Lagging variables:
#'
#' To use leads/lags of variables in the estimation, you can: i) either provide the argument \code{panel.id}, ii) either set your data set as a panel with the function \code{\link[fixest]{panel}}. Doing either of the two will give you acceess to the lagging functions \code{\link[fixest]{l}}, \code{\link[fixest:l]{f}} and \code{\link[fixest:l]{d}}.
#'
#' You can provide several leads/lags/differences at once: e.g. if your formula is equal to \code{f(y) ~ l(x, -1:1)}, it means that the dependent variable is equal to the lead of \code{y}, and you will have as explanatory variables the lead of \code{x1}, \code{x1} and the lag of \code{x1}. See the examples in function \code{\link[fixest]{l}} for more details.
#'
#' @section Interactions:
#'
#' You can interact a numeric variable with a "factor-like" variable by using \code{interact(var, fe, ref)}, where \code{fe} is the variable to be interacted with and the argument \code{ref} is a value of \code{fe} taken as a reference (optional). Instead of using the function \code{\link[fixest:i]{interact}}, you can use the alias \code{i(var, fe, ref)}.
#'
#' Using this specific way to create interactions leads to a different display of the interacted values in \code{\link[fixest]{etable}} and offers a special representation of the interacted coefficients in the function \code{\link[fixest]{coefplot}}. See examples.
#'
#' It is important to note that *if you do not care about the standard-errors of the interactions*, then you can add interactions in the fixed-effects part of the formula (using the syntax fe[[var]], as explained in the section \dQuote{Varying slopes}).
#'
#' The function \code{\link[fixest:i]{interact}} has in fact more arguments, please see details in its associated help page.
#'
#' @section On standard-errors:
#'
#' Standard-errors can be computed in different ways, you can use the arguments \code{se} and \code{dof} in \code{\link[fixest]{summary.fixest}} to define how to compute them. By default, in the presence of fixed-effects, standard-errors are automatically clustered.
#'
#' The following vignette: \href{https://cran.r-project.org/package=fixest/vignettes/standard_errors.html}{On standard-errors} describes in details how the standard-errors are computed in \code{fixest} and how you can replicate standard-errors from other software.
#'
#' You can use the functions \code{\link[fixest]{setFixest_se}} and \code{\link[fixest:dof]{setFixest_dof}} to permanently set the way the standard-errors are computed.
#'
#' @section Instrumental variables:
#'
#' To estimate two stage least square regressions, insert the relationship between the endogenous regressor(s) and the instruments in a formula, after a pipe.
#'
#' For example, \code{fml = y ~ x1 | x_endo ~ x_inst} will use the variables \code{x1} and \code{x_inst} in the first stage to explain \code{x_endo}. Then will use the fitted value of \code{x_endo} (which will be named \code{fit_x_endo}) and \code{x1} to explain \code{y}.
#' To include several endogenous regressors, just use "+", like in: \code{fml = y ~ x1 | x_endo1 + x_end2 ~ x_inst1 + x_inst2}.
#'
#' Of course you can still add the fixed-effects, but the IV formula must always come last, like in \code{fml = y ~ x1 | fe1 + fe2 | x_endo ~ x_inst}.
#'
#' By default, the second stage regression is returned. You can access the first stage(s) regressions either directly in the slot \code{iv_first_stage} (not recommended), or using the argument \code{stage = 1} from the function \code{\link[fixest]{summary.fixest}}. For example \code{summary(iv_est, stage = 1)} will give the first stage(s). Note that using summary you can display both the second and first stages at the same time using, e.g., \code{stage = 1:2} (using \code{2:1} would reverse the order).
#'
#'
#' @section Multiple estimations:
#'
#' Multiple estimations can be performed at once, they just have to be specified in the formula. Multiple estimations yield a \code{fixest_multi} object which is \sQuote{kind of} a list of all the results but includes specific methods to access the results in a handy way.
#'
#' To include mutliple dependent variables, wrap them in \code{c()} (\code{list()} also works). For instance \code{fml = c(y1, y2) ~ x1} would estimate the model \code{fml = y1 ~ x1} and then the model \code{fml = y2 ~ x1}.
#'
#' To include multiple independent variables, you need to use the stepwise functions. There are 4 stepwise functions associated to 4 short aliases. These are a) stepwise, stepwise0, cstepwise, cstepwise0, and b) sw, sw0, csw, csw0. Let's explain that.
#' Assume you have the following formula: \code{fml = y ~ x1 + sw(x2, x3)}. The stepwise function \code{sw} will estimate the following two models: \code{y ~ x1 + x2} and \code{y ~ x1 + x3}. That is, each element in \code{sw()} is sequentially, and separately, added to the formula. Would have you used \code{sw0} in lieu of \code{sw}, then the model \code{y ~ x1} would also have been estimated. The \code{0} in the name means that the model wihtout any stepwise element also needs to be estimated.
#' Finally, the prefix \code{c} means cumulative: each stepwise element is added to the next. That is, \code{fml = y ~ x1 + csw(x2, x3)} would lead to the following models \code{y ~ x1 + x2} and \code{y ~ x1 + x2 + x3}. The \code{0} has the same meaning and would also lead to the model without the stepwise elements to be estimated: in other words, \code{fml = y ~ x1 + csw0(x2, x3)} leads to the following three models: \code{y ~ x1}, \code{y ~ x1 + x2} and \code{y ~ x1 + x2 + x3}.
#'
#' Multiple independent variables can be combined with multiple dependent variables, as in \code{fml = c(y1, y2) ~ cw(x1, x2, x3)} which would lead to 6 estimations. Multiple estimations can also be combined to split samples (with the arguments \code{split}, \code{fsplit}).
#'
#' Fixed-effects cannot be included in a stepwise fashion: they are there or not and stay the same for all estimations.
#'
#' A note on performance. The feature of multiple estimations has been highly optimized for \code{feols}, in particular in the presence of fixed-effects. It is faster to estimate multiple models using the formula rather than with a loop. For non-\code{feols} models using the formula is roughly similar to using a loop performance-wise.
#'
#'
#' @return
#' A \code{fixest} object. Note that \code{fixest} objects contain many elements and most of them are for internal use, they are presented here only for information. To access them, it is safer to use the user-level methods (e.g. \code{\link[fixest]{vcov.fixest}}, \code{\link[fixest]{resid.fixest}}, etc) or functions (like for instance \code{\link[fixest]{fitstat}} to access any fit statistic).
#' \item{nobs}{The number of observations.}
#' \item{fml}{The linear formula of the call.}
#' \item{call}{The call of the function.}
#' \item{method}{The method used to estimate the model.}
#' \item{family}{The family used to estimate the model.}
#' \item{fml_all}{A list containing different parts of the formula. Always contain the linear formula. Then depending on the cases: \code{fixef}: the fixed-effects, \code{iv}: the IV part of the formula.}
#' \item{fixef_vars}{The names of each fixed-effect dimension.}
#' \item{fixef_id}{The list (of length the number of fixed-effects) of the fixed-effects identifiers for each observation.}
#' \item{fixef_sizes}{The size of each fixed-effect (i.e. the number of unique identifierfor each fixed-effect dimension).}
#' \item{coefficients}{The named vector of estimated coefficients.}
#' \item{multicol}{Logical, if multicollinearity was found.}
#' \item{coeftable}{The table of the coefficients with their standard errors, z-values and p-values.}
#' \item{loglik}{The loglikelihood.}
#' \item{ssr_null}{Sum of the squared residuals of the null model (containing only with the intercept).}
#' \item{ssr_fe_only}{Sum of the squared residuals of the model estimated with fixed-effects only.}
#' \item{ll_null}{The log-likelihood of the null model (containing only with the intercept).}
#' \item{ll_fe_only}{The log-likelihood of the model estimated with fixed-effects only.}
#' \item{fitted.values}{The fitted values.}
#' \item{linear.predictors}{The linear predictors.}
#' \item{residuals}{The residuals (y minus the fitted values).}
#' \item{sq.cor}{Squared correlation between the dependent variable and the expected predictor (i.e. fitted.values) obtained by the estimation.}
#' \item{hessian}{The Hessian of the parameters.}
#' \item{cov.unscaled}{The variance-covariance matrix of the parameters.}
#' \item{se}{The standard-error of the parameters.}
#' \item{scores}{The matrix of the scores (first derivative for each observation).}
#' \item{residuals}{The difference between the dependent variable and the expected predictor.}
#' \item{sumFE}{The sum of the fixed-effects coefficients for each observation.}
#' \item{offset}{(When relevant.) The offset formula.}
#' \item{weights}{(When relevant.) The weights formula.}
#' \item{obs_selection}{(When relevant.) List containing vectors of integers. It represents the sequential selection of observation vis a vis the original data set.}
#' \item{collin.var}{(When relevant.) Vector containing the variables removed because of collinearity.}
#' \item{collin.coef}{(When relevant.) Vector of coefficients, where the values of the variables removed because of collinearity are NA.}
#' \item{collin.min_norm}{The minimal diagonal value of the Cholesky decomposition. Small values indicate possible presence collinearity.}
#' \item{y_demeaned}{Only when \code{demeaned = TRUE}: the centered dependent variable.}
#' \item{X_demeaned}{Only when \code{demeaned = TRUE}: the centered explanatory variable.}
#'
#'
#' @seealso
#' See also \code{\link[fixest]{summary.fixest}} to see the results with the appropriate standard-errors, \code{\link[fixest]{fixef.fixest}} to extract the fixed-effects coefficients, and the function \code{\link[fixest]{etable}} to visualize the results of multiple estimations. For plotting coefficients: see \code{\link[fixest]{coefplot}}.
#'
#' And other estimation methods: \code{\link[fixest]{femlm}}, \code{\link[fixest]{feglm}}, \code{\link[fixest:feglm]{fepois}}, \code{\link[fixest:femlm]{fenegbin}}, \code{\link[fixest]{feNmlm}}.
#'
#' @author
#' Laurent Berge
#'
#' @references
#'
#' Berge, Laurent, 2018, "Efficient estimation of maximum likelihood models with multiple fixed-effects: the R package FENmlm." CREA Discussion Papers, 13 (\url{https://wwwen.uni.lu/content/download/110162/1299525/file/2018_13}).
#'
#' For models with multiple fixed-effects:
#'
#' Gaure, Simen, 2013, "OLS with multiple high dimensional category variables", Computational Statistics & Data Analysis 66 pp. 8--18
#'
#' @examples
#'
#' #
#' # Basic estimation
#' #
#'
#' res = feols(Sepal.Length ~ Sepal.Width + Petal.Length, iris)
#' # You can specify clustered standard-errors in summary:
#' summary(res, cluster = ~Species)
#'
#' #
#' # Just one set of fixed-effects:
#' #
#'
#' res = feols(Sepal.Length ~ Sepal.Width + Petal.Length | Species, iris)
#' # By default, the SEs are clustered according to the first fixed-effect
#' summary(res)
#'
#' #
#' # Varying slopes:
#' #
#'
#' res = feols(Sepal.Length ~ Petal.Length | Species[Sepal.Width], iris)
#' summary(res)
#'
#' #
#' # Combining the FEs:
#' #
#'
#' base = iris
#' base$fe_2 = rep(1:10, 15)
#' res_comb = feols(Sepal.Length ~ Petal.Length | Species^fe_2, base)
#' summary(res_comb)
#' fixef(res_comb)[[1]]
#'
#' #
#' # Using leads/lags:
#' #
#'
#' data(base_did)
#' # We need to set up the panel with the arg. panel.id
#' est1 = feols(y ~ l(x1, 0:1), base_did, panel.id = ~id+period)
#' est2 = feols(f(y) ~ l(x1, -1:1), base_did, panel.id = ~id+period)
#' etable(est1, est2, order = "f", drop="Int")
#'
#' #
#' # Using interactions:
#' #
#'
#' data(base_did)
#' # We interact the variable 'period' with the variable 'treat'
#' est_did = feols(y ~ x1 + i(treat, period, 5) | id+period, base_did)
#'
#' # Now we can plot the result of the interaction with coefplot
#' coefplot(est_did)
#' # You have many more example in coefplot help
#'
#' #
#' # Instrumental variables
#' #
#'
#' # To estimate Two stage least squares,
#' # insert a formula describing the endo. vars./instr. relation after a pipe:
#'
#' base = iris
#' names(base) = c("y", "x1", "x2", "x3", "fe1")
#' base$x_inst1 = 0.2 * base$x1 + 0.7 * base$x2 + rpois(150, 2)
#' base$x_inst2 = 0.2 * base$x2 + 0.7 * base$x3 + rpois(150, 3)
#' base$x_endo1 = 0.5 * base$y + 0.5 * base$x3 + rnorm(150, sd = 2)
#' base$x_endo2 = 1.5 * base$y + 0.5 * base$x3 + 3 * base$x_inst1 + rnorm(150, sd = 5)
#'
#' # Using 2 controls, 1 endogenous var. and 1 instrument
#' res_iv = feols(y ~ x1 + x2 | x_endo1 ~ x_inst1, base)
#'
#' # The second stage is the default
#' summary(res_iv)
#'
#' # To show the first stage:
#' summary(res_iv, stage = 1)
#'
#' # To show both the first and second stages:
#' summary(res_iv, stage = 1:2)
#'
#' # Adding a fixed-effect => IV formula always last!
#' res_iv_fe = feols(y ~ x1 + x2 | fe1 | x_endo1 ~ x_inst1, base)
#'
#' # With two endogenous regressors
#' res_iv2 = feols(y ~ x1 + x2 | x_endo1 + x_endo2 ~ x_inst1 + x_inst2, base)
#'
#' # Now there's two first stages => a fixest_multi object is returned
#' sum_res_iv2 = summary(res_iv2, stage = 1)
#'
#' # You can navigate through it by subsetting:
#' sum_res_iv2[iv = 1]
#'
#' # The stage argument also works in etable:
#' etable(res_iv, res_iv_fe, res_iv2, order = "endo")
#'
#' etable(res_iv, res_iv_fe, res_iv2, stage = 1:2, order = c("endo", "inst"),
#' group = list(control = "!endo|inst"))
#'
#' #
#' # Multiple estimations:
#' #
#'
#' # 6 estimations
#' est_mult = feols(c(Ozone, Solar.R) ~ Wind + Temp + csw0(Wind:Temp, Day), airquality)
#'
#' # We can display the results for the first lhs:
#' etable(est_mult[lhs = 1])
#'
#' # And now the second (access can be made by name)
#' etable(est_mult[lhs = "Solar.R"])
#'
#' # Now we focus on the two last right hand sides
#' # (note that .N can be used to specify the last item)
#' etable(est_mult[rhs = 2:.N])
#'
#' # Combining with split
#' est_split = feols(c(Ozone, Solar.R) ~ sw(poly(Wind, 2), poly(Temp, 2)),
#' airquality, split = ~ Month)
#'
#' # You can display everything at once with the print method
#' est_split
#'
#' # Different way of displaying the results with "compact"
#' summary(est_split, "compact")
#'
#' # You can still select which sample/LHS/RHS to display
#' est_split[sample = 1:2, lhs = 1, rhs = 1]
#'
feols = function(fml, data, weights, offset, subset, split, fsplit, cluster, se, dof, panel.id, fixef, fixef.rm = "none", fixef.tol = 1e-6,
fixef.iter = 10000, collin.tol = 1e-10, nthreads = getFixest_nthreads(), lean = FALSE, verbose = 0, warn = TRUE,
notes = getFixest_notes(), combine.quick, demeaned = FALSE, mem.clean = FALSE, only.env = FALSE, env, ...){
dots = list(...)
# 1st: is the call coming from feglm?
fromGLM = FALSE
skip_fixef = FALSE
if("X" %in% names(dots)){
fromGLM = TRUE
# env is provided by feglm
X = dots$X
y = as.vector(dots$y)
init = dots$means
correct_0w = dots$correct_0w
if(verbose){
time_start = proc.time()
gt = function(x, nl = TRUE) cat(sfill(x, 20), ": ", -(t0 - (t0<<-proc.time()))[3], "s", ifelse(nl, "\n", ""), sep = "")
t0 = proc.time()
}
} else {
time_start = proc.time()
gt = function(x, nl = TRUE) cat(sfill(x, 20), ": ", -(t0 - (t0<<-proc.time()))[3], "s", ifelse(nl, "\n", ""), sep = "")
t0 = proc.time()
# we use fixest_env for appropriate controls and data handling
if(missing(env)){
set_defaults("fixest_estimation")
call_env = new.env(parent = parent.frame())
env = try(fixest_env(fml = fml, data = data, weights = weights, offset = offset, subset = subset, split = split, fsplit = fsplit, cluster = cluster, se = se, dof = dof, panel.id = panel.id, fixef = fixef, fixef.rm = fixef.rm, fixef.tol = fixef.tol, fixef.iter = fixef.iter, collin.tol = collin.tol, nthreads = nthreads, lean = lean, verbose = verbose, warn = warn, notes = notes, combine.quick = combine.quick, demeaned = demeaned, mem.clean = mem.clean, origin = "feols", mc_origin = match.call(), call_env = call_env, ...), silent = TRUE)
} else if((r <- !is.environment(env)) || !isTRUE(env$fixest_env)) {
stop("Argument 'env' must be an environment created by a fixest estimation. Currently it is not ", ifelse(r, "an", "a 'fixest'"), " environment.")
}
if("try-error" %in% class(env)){
stop(format_error_msg(env, "feols"))
}
check_arg(only.env, "logical scalar")
if(only.env){
return(env)
}
y = get("lhs", env)
X = get("linear.mat", env)
nthreads = get("nthreads", env)
init = 0
# demeaned variables
if(!is.null(dots$X_demean)){
skip_fixef = TRUE
X_demean = dots$X_demean
y_demean = dots$y_demean
}
# offset
offset = get("offset.value", env)
isOffset = length(offset) > 1
if(isOffset){
y = y - offset
}
# weights
weights = get("weights.value", env)
isWeight = length(weights) > 1
correct_0w = FALSE
mem.clean = get("mem.clean", env)
demeaned = get("demeaned", env)
verbose = get("verbose", env)
if(verbose >= 2) gt("Setup")
}
isFixef = get("isFixef", env)
# Used to solve with the reduced model
xwx = dots$xwx
xwy = dots$xwy
#
# Split ####
#
do_split = get("do_split", env)
if(do_split){
res = multi_split(env, feols)
return(res)
}
#
# Multi fixef ####
#
do_multi_fixef = get("do_multi_fixef", env)
if(do_multi_fixef){
res = multi_fixef(env, feols)
return(res)
}
#
# Multi LHS and RHS ####
#
do_multi_lhs = get("do_multi_lhs", env)
do_multi_rhs = get("do_multi_rhs", env)
if(do_multi_lhs || do_multi_rhs){
assign("do_multi_lhs", FALSE, env)
assign("do_multi_rhs", FALSE, env)
do_iv = get("do_iv", env)
fml = get("fml", env)
lhs_names = get("lhs_names", env)
lhs = y
if(do_multi_lhs){
# We find out which LHS have the same NA patterns => saves a lot of computation
n_lhs = length(lhs)
lhs_group_is_na = list()
lhs_group_id = c()
lhs_group_n_na = c()
for(i in 1:n_lhs){
is_na_current = !is.finite(lhs[[i]])
n_na_current = sum(is_na_current)
if(i == 1){
lhs_group_id = 1
lhs_group_is_na[[1]] = is_na_current
lhs_group_n_na[1] = n_na_current
} else {
qui = which(lhs_group_n_na == n_na_current)
if(length(qui) > 0){
if(n_na_current == 0){
# no need to check the pattern
lhs_group_id[i] = lhs_group_id[qui[1]]
next
}
for(j in qui){
if(all(is_na_current == lhs_group_is_na[[j]])){
lhs_group_id[i] = lhs_group_id[j]
next
}
}
}
# if here => new group because couldn't be matched
id = max(lhs_group_id) + 1
lhs_group_id[i] = id
lhs_group_is_na[[id]] = is_na_current
lhs_group_n_na[id] = n_na_current
}
}
# we make groups
lhs_group = list()
for(i in 1:max(lhs_group_id)){
lhs_group[[i]] = which(lhs_group_id == i)
}
} else if(do_multi_lhs == FALSE){
lhs_group_is_na = list(FALSE)
lhs_group_n_na = 0
lhs_group = list(1)
lhs = list(lhs) # I really abuse R shallow copy system...
names(lhs) = deparse_long(fml[[2]])
}
if(do_multi_rhs){
rhs_info_stepwise = get("rhs_info_stepwise", env)
multi_rhs_fml_full = rhs_info_stepwise$fml_all_full
multi_rhs_fml_sw = rhs_info_stepwise$fml_all_sw
multi_rhs_cumul = rhs_info_stepwise$is_cumul
linear_core = get("linear_core", env)
rhs = get("rhs_sw", env)
# Two schemes:
# - if cumulative: we take advantage of it => both in demeaning and in estimation
# - if regular stepwise => only in demeaning
# => of course this is dependent on the pattern of NAs
#
n_core_left = ifelse(length(linear_core$left) == 1, 0, ncol(linear_core$left))
n_core_right = ifelse(length(linear_core$right) == 1, 0, ncol(linear_core$right))
# rnc: running number of columns
rnc = n_core_left
if(rnc == 0){
col_start = integer(0)
} else {
col_start = 1:rnc
}
rhs_group_is_na = list()
rhs_group_id = c()
rhs_group_n_na = c()
rhs_n_vars = c()
rhs_col_id = list()
any_na_rhs = FALSE
for(i in seq_along(multi_rhs_fml_sw)){
# We evaluate the extra data and check the NA pattern
my_fml = multi_rhs_fml_sw[[i]]
if(i == 1 && (multi_rhs_cumul || identical(my_fml[[3]], 1))){
# That case is already in the main linear.mat => no NA
rhs_group_id = 1
rhs_group_is_na[[1]] = FALSE
rhs_group_n_na[1] = 0
rhs_n_vars[1] = 0
rhs[[1]] = 0
if(rnc == 0){
rhs_col_id[[1]] = integer(0)
} else {
rhs_col_id[[1]] = 1:rnc
}
next
}
rhs_current = rhs[[i]]
rhs_n_vars[i] = ncol(rhs_current)
info = cpppar_which_na_inf_mat(rhs_current, nthreads)
is_na_current = info$is_na_inf
if(multi_rhs_cumul && any_na_rhs){
# we cumulate the NAs
is_na_current = is_na_current | rhs_group_is_na[[rhs_group_id[i - 1]]]
info$any_na_inf = any(is_na_current)
}
n_na_current = 0
if(info$any_na_inf){
any_na_rhs = TRUE
n_na_current = sum(is_na_current)
} else {
# NULL would lead to problems down the road
is_na_current = FALSE
}
if(i == 1){
rhs_group_id = 1
rhs_group_is_na[[1]] = is_na_current
rhs_group_n_na[1] = n_na_current
} else {
qui = which(rhs_group_n_na == n_na_current)
if(length(qui) > 0){
if(n_na_current == 0){
# no need to check the pattern
rhs_group_id[i] = rhs_group_id[qui[1]]
next
}
go_next = FALSE
for(j in qui){
if(all(is_na_current == rhs_group_is_na[[j]])){
rhs_group_id[i] = rhs_group_id[j]
go_next = TRUE
break
}
}
if(go_next) next
}
# if here => new group because couldn't be matched
id = max(rhs_group_id) + 1
rhs_group_id[i] = id
rhs_group_is_na[[id]] = is_na_current
rhs_group_n_na[id] = n_na_current
}
}
# we make groups
rhs_group = list()
for(i in 1:max(rhs_group_id)){
rhs_group[[i]] = which(rhs_group_id == i)
}
# Finding the right column IDs to select
rhs_group_n_vars = rep(0, length(rhs_group)) # To get the total nber of cols per group
for(i in seq_along(multi_rhs_fml_sw)){
if(multi_rhs_cumul){
rnc = rnc + rhs_n_vars[i]
if(rnc == 0){
rhs_col_id[[i]] = integer(0)
} else {
rhs_col_id[[i]] = 1:rnc
}
} else {
id = rhs_group_id[i]
rhs_col_id[[i]] = c(col_start, seq(rnc + rhs_group_n_vars[id] + 1, length.out = rhs_n_vars[i]))
rhs_group_n_vars[id] = rhs_group_n_vars[id] + rhs_n_vars[i]
}
}
if(n_core_right > 0){
# We adjust
if(multi_rhs_cumul){
for(i in seq_along(multi_rhs_fml_sw)){
id = rhs_group_id[i]
gmax = max(rhs_group[[id]])
rhs_col_id[[i]] = c(rhs_col_id[[i]], n_core_left + sum(rhs_n_vars[1:gmax]) + 1:n_core_right)
}
} else {
for(i in seq_along(multi_rhs_fml_sw)){
id = rhs_group_id[i]
rhs_col_id[[i]] = c(rhs_col_id[[i]], n_core_left + rhs_group_n_vars[id] + 1:n_core_right)
}
}
}
} else if(do_multi_rhs == FALSE){
multi_rhs_fml_full = list(.xpd(rhs = fml[[3]]))
multi_rhs_cumul = FALSE
rhs_group_is_na = list(FALSE)
rhs_group_n_na = 0
rhs_n_vars = 0
rhs_group = list(1)
rhs = list(0)
rhs_col_id = list(1:NCOL(X))
linear_core = list(left = X, right = 1)
}
isLinear_right = length(linear_core$right) > 1
isLinear = length(linear_core$left) > 1 || isLinear_right
n_lhs = length(lhs)
n_rhs = length(rhs)
res = vector("list", n_lhs * n_rhs)
rhs_names = sapply(multi_rhs_fml_full, function(x) as.character(x)[[2]])
for(i in seq_along(lhs_group)){
for(j in seq_along(rhs_group)){
# NA removal
no_na = FALSE
if(lhs_group_n_na[i] > 0){
if(rhs_group_n_na[j] > 0){
is_na_current = lhs_group_is_na[[i]] | rhs_group_is_na[[j]]
} else {
is_na_current = lhs_group_is_na[[i]]
}
} else if(rhs_group_n_na[j] > 0){
is_na_current = rhs_group_is_na[[j]]
} else {
no_na = TRUE
}
# Here it depends on whether there are FEs or not, whether it's cumul or not
my_lhs = lhs[lhs_group[[i]]]
if(isLinear){
my_rhs = linear_core[1]
if(multi_rhs_cumul){
gmax = max(rhs_group[[j]])
my_rhs[1 + (1:gmax)] = rhs[1:gmax]
} else {
for(u in rhs_group[[j]]){
if(length(rhs[[u]]) > 1){
my_rhs[[length(my_rhs) + 1]] = rhs[[u]]
}
}
}
if(isLinear_right){
my_rhs[[length(my_rhs) + 1]] = linear_core$right
}
} else{
rhs_len = lengths(rhs)
if(multi_rhs_cumul){
gmax = max(rhs_group[[j]])
my_rhs = rhs[rhs_len > 1 & seq_along(rhs) <= gmax]
} else {
my_rhs = rhs[rhs_len > 1 & seq_along(rhs) %in% rhs_group[[j]]]
}
if(isLinear_right){
my_rhs[[length(my_rhs) + 1]] = linear_core$right
}
}
len_all = lengths(my_rhs)
if(any(len_all == 1)){
my_rhs = my_rhs[len_all > 1]
}
if(!no_na){
# NA removal
for(u in seq_along(my_lhs)){
my_lhs[[u]] = my_lhs[[u]][!is_na_current]
}
for(u in seq_along(my_rhs)){
if(length(my_rhs[[u]]) > 1) my_rhs[[u]] = my_rhs[[u]][!is_na_current, , drop = FALSE]
}
my_env = reshape_env(env, obs2keep = which(!is_na_current), assign_lhs = FALSE, assign_rhs = FALSE)
} else {
my_env = reshape_env(env)
}
isLinear_current = TRUE
if(length(my_rhs) == 0){
X_all = 0
isLinear_current = FALSE
} else {
X_all = do.call("cbind", my_rhs)
}
if(do_iv){
# We need to GET them => they have been modified in my_env
iv_lhs = get("iv_lhs", my_env)
iv.mat = get("iv.mat", my_env)
n_inst = ncol(iv.mat)
}
if(isFixef){
# We batch demean
n_vars_X = ifelse(is.null(ncol(X_all)), 0, ncol(X_all))
# fixef information
fixef_sizes = get("fixef_sizes", my_env)
fixef_table_vector = get("fixef_table_vector", my_env)
fixef_id_list = get("fixef_id_list", my_env)
slope_flag = get("slope_flag", my_env)
slope_vars = get("slope_variables", my_env)
if(mem.clean) gc()
vars_demean = cpp_demean(my_lhs, X_all, weights, iterMax = fixef.iter,
diffMax = fixef.tol, r_nb_id_Q = fixef_sizes,
fe_id_list = fixef_id_list, table_id_I = fixef_table_vector,
slope_flag_Q = slope_flag, slope_vars_list = slope_vars,
r_init = init, nthreads = nthreads)
X_demean = vars_demean$X_demean
y_demean = vars_demean$y_demean
if(do_iv){
iv_vars_demean = cpp_demean(iv_lhs, iv.mat, weights, iterMax = fixef.iter,
diffMax = fixef.tol, r_nb_id_Q = fixef_sizes,
fe_id_list = fixef_id_list, table_id_I = fixef_table_vector,
slope_flag_Q = slope_flag, slope_vars_list = slope_vars,
r_init = init, nthreads = nthreads)
iv.mat_demean = iv_vars_demean$X_demean
iv_lhs_demean = iv_vars_demean$y_demean
}
}
# We precompute the solution
if(do_iv){
if(isFixef){
iv_products = cpp_iv_products(X = X_demean, y = y_demean,
Z = iv.mat_demean, u = iv_lhs_demean,
w = weights, nthreads = nthreads)
} else {
iv_products = cpp_iv_products(X = X_all, y = my_lhs, Z = iv.mat,
u = iv_lhs, w = weights, nthreads = nthreads)
}
} else {
if(isFixef){
my_products = cpp_sparse_products(X_demean, weights, y_demean, nthreads = nthreads)
} else {
my_products = cpp_sparse_products(X_all, weights, my_lhs, nthreads = nthreads)
}
xwx = my_products$XtX
xwy = my_products$Xty
}
for(ii in seq_along(my_lhs)){
i_lhs = lhs_group[[i]][ii]
for(jj in rhs_group[[j]]){
qui = rhs_col_id[[jj]]
if(isLinear_current){
my_X = X_all[, qui, drop = FALSE]
} else {
my_X = 0
}
my_fml = .xpd(lhs = lhs_names[i_lhs], rhs = multi_rhs_fml_full[[jj]])
current_env = reshape_env(my_env, lhs = my_lhs[[ii]], rhs = my_X, fml_linear = my_fml)
if(do_iv){
if(isLinear_current){
qui_iv = c(1:n_inst, n_inst + qui)
XtX = iv_products$XtX[qui, qui, drop = FALSE]
Xty = iv_products$Xty[[ii]][qui]
} else {
qui_iv = 1:n_inst
XtX = matrix(0, 1, 1)
Xty = matrix(0, 1, 1)
}
my_iv_products = list(XtX = XtX,
Xty = Xty,
ZXtZX = iv_products$ZXtZX[qui_iv, qui_iv, drop = FALSE],
ZXtu = lapply(iv_products$ZXtu, function(x) x[qui_iv]))
if(isFixef){
my_res = feols(env = current_env, iv_products = my_iv_products,
X_demean = X_demean[ , qui, drop = FALSE],
y_demean = y_demean[[ii]],
iv.mat_demean = iv.mat_demean, iv_lhs_demean = iv_lhs_demean)
} else {
my_res = feols(env = current_env, iv_products = my_iv_products)
}
} else {
if(isFixef){
my_res = feols(env = current_env, xwx = xwx[qui, qui, drop = FALSE], xwy = xwy[[ii]][qui],
X_demean = X_demean[ , qui, drop = FALSE],
y_demean = y_demean[[ii]])
} else {
my_res = feols(env = current_env, xwx = xwx[qui, qui, drop = FALSE], xwy = xwy[[ii]][qui])
}
}
res[[index_2D_to_1D(i_lhs, jj, n_rhs)]] = my_res
}
}
}
}
# Meta information for fixest_multi
index = list(lhs = n_lhs, rhs = n_rhs)
all_names = list(lhs = lhs_names, rhs = rhs_names)
# result
res_multi = setup_multi(index, all_names, res)
return(res_multi)
}
#
# IV ####
#
do_iv = get("do_iv", env)
if(do_iv){
assign("do_iv", FALSE, env)
assign("verbose", 0, env)
# Loaded already
# y: lhs
# X: linear.mat
iv_lhs = get("iv_lhs", env)
iv_lhs_names = get("iv_lhs_names", env)
iv.mat = get("iv.mat", env) # we enforce (before) at least one variable in iv.mat
K = ncol(iv.mat)
n_endo = length(iv_lhs)
lean = get("lean", env)
# Simple check that the function is not misused
pblm = intersect(iv_lhs_names, colnames(X))
if(length(pblm) > 0){
any_exo = length(setdiff(colnames(X), iv_lhs_names)) > 0
msg = if(any_exo) "" else " If there is no exogenous variable, just use '1' in the first part of the formula."
stop("Endogenous variables should not be used as exogenous regressors. The variable", enumerate_items(pblm, "s.quote.were"), " found in the first part of the multipart formula: ", ifsingle(pblm, "it", "they"), " should not be there.", msg)
}
if(isFixef){
# we batch demean first
n_vars_X = ifelse(is.null(ncol(X)), 0, ncol(X))
if(mem.clean) gc()
if(!is.null(dots$iv_products)){
# means this is a call from multiple LHS/RHS
X_demean = dots$X_demean
y_demean = dots$y_demean
iv.mat_demean = dots$iv.mat_demean
iv_lhs_demean = dots$iv_lhs_demean
iv_products = dots$iv_products
} else {
# fixef information
fixef_sizes = get("fixef_sizes", env)
fixef_table_vector = get("fixef_table_vector", env)
fixef_id_list = get("fixef_id_list", env)
slope_flag = get("slope_flag", env)
slope_vars = get("slope_variables", env)
vars_demean = cpp_demean(y, X, weights, iterMax = fixef.iter,
diffMax = fixef.tol, r_nb_id_Q = fixef_sizes,
fe_id_list = fixef_id_list, table_id_I = fixef_table_vector,
slope_flag_Q = slope_flag, slope_vars_list = slope_vars,
r_init = init, nthreads = nthreads)
iv_vars_demean = cpp_demean(iv_lhs, iv.mat, weights, iterMax = fixef.iter,
diffMax = fixef.tol, r_nb_id_Q = fixef_sizes,
fe_id_list = fixef_id_list, table_id_I = fixef_table_vector,
slope_flag_Q = slope_flag, slope_vars_list = slope_vars,
r_init = init, nthreads = nthreads)
X_demean = vars_demean$X_demean
y_demean = vars_demean$y_demean
iv.mat_demean = iv_vars_demean$X_demean
iv_lhs_demean = iv_vars_demean$y_demean
# We precompute the solution
iv_products = cpp_iv_products(X = X_demean, y = y_demean, Z = iv.mat_demean,
u = iv_lhs_demean, w = weights, nthreads = nthreads)
}
if(n_vars_X == 0){
ZX_demean = iv.mat_demean
ZX = iv.mat
} else {
ZX_demean = cbind(iv.mat_demean, X_demean)
ZX = cbind(iv.mat, X)
}
# First stage(s)
ZXtZX = iv_products$ZXtZX
ZXtu = iv_products$ZXtu
res_first_stage = list()
for(i in 1:n_endo){
current_env = reshape_env(env, lhs = iv_lhs[[i]], rhs = ZX, fml_iv_endo = iv_lhs_names[i])
my_res = feols(env = current_env, xwx = ZXtZX, xwy = ZXtu[[i]],
X_demean = ZX_demean, y_demean = iv_lhs_demean[[i]],
add_fitted_demean = TRUE, iv_call = TRUE)
# For the F-stats
if(n_vars_X == 0){
my_res$ssr_no_inst = cpp_ssq(iv_lhs_demean[[i]], weights)
} else {
fit_no_inst = ols_fit(iv_lhs_demean[[i]], X_demean, w = weights, correct_0w = FALSE,
collin.tol = collin.tol, nthreads = nthreads,
xwx = iv_products$XtX, xwy = ZXtu[[i]][-(1:K)])
my_res$ssr_no_inst = cpp_ssq(fit_no_inst$residuals, weights)
}
my_res$iv_stage = 1
my_res$iv_inst_names_xpd = colnames(iv.mat)
res_first_stage[[iv_lhs_names[i]]] = my_res
}
if(verbose >= 2) gt("1st stage(s)")
# Second stage
if(n_endo == 1){
res_FS = res_first_stage[[1]]
U = as.matrix(res_FS$fitted.values)
U_demean = as.matrix(res_FS$fitted.values_demean)
} else {
U_list = list()
U_dm_list = list()
for(i in 1:n_endo){
res_FS = res_first_stage[[i]]
U_list[[i]] = res_FS$fitted.values
U_dm_list[[i]] = res_FS$fitted.values_demean
}
U = do.call("cbind", U_list)
U_demean = do.call("cbind", U_dm_list)
}
colnames(U) = colnames(U_demean) = paste0("fit_", iv_lhs_names)
if(n_vars_X == 0){
UX = as.matrix(U)
UX_demean = as.matrix(U_demean)
} else {
UX = cbind(U, X)
UX_demean = cbind(U_demean, X_demean)
}
XtX = iv_products$XtX
Xty = iv_products$Xty
iv_prod_second = cpp_iv_product_completion(XtX = XtX, Xty = Xty, X = X_demean,
y = y_demean, U = U_demean, w = weights, nthreads = nthreads)
UXtUX = iv_prod_second$UXtUX
UXty = iv_prod_second$UXty
resid_s1 = lapply(res_first_stage, function(x) x$residuals)
current_env = reshape_env(env, rhs = UX)
res_second_stage = feols(env = current_env, xwx = UXtUX, xwy = UXty,
X_demean = UX_demean, y_demean = y_demean,
resid_1st_stage = resid_s1, iv_call = TRUE)
# For the F-stats
if(n_vars_X == 0){
res_second_stage$ssr_no_endo = cpp_ssq(y_demean, weights)
} else {
fit_no_endo = ols_fit(y_demean, X_demean, w = weights, correct_0w = FALSE,
collin.tol = collin.tol, nthreads = nthreads,
xwx = XtX, xwy = Xty)
res_second_stage$ssr_no_endo = cpp_ssq(fit_no_endo$residuals, weights)
}
} else {
# fixef == FALSE
# We precompute the solution
if(!is.null(dots$iv_products)){
# means this is a call from multiple LHS/RHS
iv_products = dots$iv_products
} else {
iv_products = cpp_iv_products(X = X, y = y, Z = iv.mat,
u = iv_lhs, w = weights, nthreads = nthreads)
}
if(verbose >= 2) gt("IV products")
ZX = cbind(iv.mat, X)
# First stage(s)
ZXtZX = iv_products$ZXtZX
ZXtu = iv_products$ZXtu
# Let's put the intercept first => I know it's not really elegant, but that's life
is_int = "(Intercept)" %in% colnames(X)
if(is_int){
nz = ncol(iv.mat)
nzx = ncol(ZX)
qui = c(nz + 1, (1:nzx)[-(nz + 1)])
ZX = ZX[, qui, drop = FALSE]
ZXtZX = ZXtZX[qui, qui, drop = FALSE]
for(i in seq_along(ZXtu)){
ZXtu[[i]] = ZXtu[[i]][qui]
}
}
res_first_stage = list()
for(i in 1:n_endo){
current_env = reshape_env(env, lhs = iv_lhs[[i]], rhs = ZX, fml_iv_endo = iv_lhs_names[i])
my_res = feols(env = current_env, xwx = ZXtZX, xwy = ZXtu[[i]], iv_call = TRUE)
# For the F-stats
fit_no_inst = ols_fit(iv_lhs[[i]], X, w = weights, correct_0w = FALSE, collin.tol = collin.tol, nthreads = nthreads,
xwx = ZXtZX[-(1:K + is_int), -(1:K + is_int), drop = FALSE], xwy = ZXtu[[i]][-(1:K + is_int)])
my_res$ssr_no_inst = cpp_ssq(fit_no_inst$residuals, weights)
my_res$iv_stage = 1
my_res$iv_inst_names_xpd = colnames(iv.mat)
res_first_stage[[iv_lhs_names[i]]] = my_res
}
if(verbose >= 2) gt("1st stage(s)")
# Second stage
if(n_endo == 1){
res_FS = res_first_stage[[1]]
U = as.matrix(res_FS$fitted.values)
} else {
U_list = list()
U_dm_list = list()
for(i in 1:n_endo){
res_FS = res_first_stage[[i]]
U_list[[i]] = res_FS$fitted.values
}
U = do.call("cbind", U_list)
}
colnames(U) = paste0("fit_", iv_lhs_names)
UX = cbind(U, X)
XtX = iv_products$XtX
Xty = iv_products$Xty
iv_prod_second = cpp_iv_product_completion(XtX = XtX, Xty = Xty, X = X,
y = y, U = U, w = weights, nthreads = nthreads)
UXtUX = iv_prod_second$UXtUX
UXty = iv_prod_second$UXty
if(is_int){
nu = ncol(U)
nux = ncol(UX)
qui = c(nu + 1, (1:nux)[-(nu + 1)])
UX = UX[, qui, drop = FALSE]
UXtUX = UXtUX[qui, qui, drop = FALSE]
UXty = UXty[qui]
}
resid_s1 = lapply(res_first_stage, function(x) x$residuals)
current_env = reshape_env(env, rhs = UX)
res_second_stage = feols(env = current_env, xwx = UXtUX, xwy = UXty,
resid_1st_stage = resid_s1, iv_call = TRUE)
# For the F-stats
fit_no_endo = ols_fit(y, X, w = weights, correct_0w = FALSE,
collin.tol = collin.tol, nthreads = nthreads,
xwx = XtX, xwy = Xty)
res_second_stage$ssr_no_endo = cpp_ssq(fit_no_endo$residuals, weights)
}
if(verbose >= 2) gt("2nd stage")
#
# Wu-Hausman endogeneity test
#
# Current limitation => only standard vcov => later add argument (which would yield the full est.)?
# The problem of the full est. is that it takes memory very likely needlessly
if(isFixef){
ENDO_demean = do.call(cbind, iv_lhs_demean)
iv_prod_wh = cpp_iv_product_completion(XtX = UXtUX, Xty = UXty,
X = UX_demean, y = y_demean, U = ENDO_demean,
w = weights, nthreads = nthreads)
RHS_wh = cbind(ENDO_demean, UX_demean)
fit_wh = ols_fit(y_demean, RHS_wh, w = weights, correct_0w = FALSE, collin.tol = collin.tol,
nthreads = nthreads, xwx = iv_prod_wh$UXtUX, xwy = iv_prod_wh$UXty)
} else {
ENDO = do.call(cbind, iv_lhs)
iv_prod_wh = cpp_iv_product_completion(XtX = UXtUX, Xty = UXty,
X = UX, y = y, U = ENDO,
w = weights, nthreads = nthreads)
RHS_wh = cbind(ENDO, UX)
fit_wh = ols_fit(y, RHS_wh, w = weights, correct_0w = FALSE, collin.tol = collin.tol,
nthreads = nthreads, xwx = iv_prod_wh$UXtUX, xwy = iv_prod_wh$UXty)
}
df1 = n_endo
df2 = length(y) - (res_second_stage$nparams + df1)
if(any(fit_wh$is_excluded)){
stat = p = NA
} else {
qui = df1 + 1:df1 + ("(Intercept)" %in% names(res_second_stage$coefficients))
my_coef = fit_wh$coefficients[qui]
vcov_wh = fit_wh$xwx_inv[qui, qui] * cpp_ssq(fit_wh$residuals, weights) / df2
stat = drop(my_coef %*% solve(vcov_wh) %*% my_coef) / df1
p = pf(stat, df1, df2, lower.tail = FALSE)
}
res_second_stage$iv_wh = list(stat = stat, p = p, df1 = df1, df2 = df2)
#
# Sargan
#
if(n_endo < ncol(iv.mat)){
df = ncol(iv.mat) - n_endo
resid_2nd = res_second_stage$residuals
if(isFixef){
xwy = cpppar_xwy(ZX_demean, resid_2nd, weights, nthreads)
fit_sargan = ols_fit(resid_2nd, ZX_demean, w = weights, correct_0w = FALSE, collin.tol = collin.tol,
nthreads = nthreads, xwx = ZXtZX, xwy = xwy)
} else {
xwy = cpppar_xwy(ZX, resid_2nd, weights, nthreads)
fit_sargan = ols_fit(resid_2nd, ZX, w = weights, correct_0w = FALSE, collin.tol = collin.tol,
nthreads = nthreads, xwx = ZXtZX, xwy = xwy)
}
r = fit_sargan$residuals
stat = length(r) * (1 - cpp_ssq(r, weights) / cpp_ssr_null(resid_2nd))
p = pchisq(stat, df, lower.tail = FALSE)
res_second_stage$iv_sargan = list(stat = stat, p = p, df = df)
}
# extra information
res_second_stage$iv_inst_names_xpd = res_first_stage[[1]]$iv_inst_names_xpd
res_second_stage$iv_endo_names_fit = paste0("fit_", res_second_stage$iv_endo_names)
# if lean = TRUE: we clean the IV residuals (which were needed so far)
if(lean){
for(i in 1:n_endo){
res_first_stage[[i]]$residuals = NULL
res_first_stage[[i]]$fitted.values = NULL
res_first_stage[[i]]$fitted.values_demean = NULL
}
res_second_stage$residuals = NULL
res_second_stage$fitted.values = NULL
res_second_stage$fitted.values_demean = NULL
}
res_second_stage$iv_first_stage = res_first_stage
# meta info
res_second_stage$iv_stage = 2
return(res_second_stage)
}
#
# Regular estimation ####
#
onlyFixef = length(X) == 1
if(fromGLM){
res = list(coefficients = NA)
} else {
res = get("res", env)
}
if(skip_fixef){
# Variables were already demeaned
} else if(!isFixef){
# No Fixed-effects
y_demean = y
X_demean = X
res$means = 0
} else {
time_demean = proc.time()
# Number of nthreads
n_vars_X = ifelse(is.null(ncol(X)), 0, ncol(X))
# fixef information
fixef_sizes = get("fixef_sizes", env)
fixef_table_vector = get("fixef_table_vector", env)
fixef_id_list = get("fixef_id_list", env)
slope_flag = get("slope_flag", env)
slope_vars = get("slope_variables", env)
if(mem.clean){
# we can't really rm many variables... but gc can be enough
# cpp_demean is the most mem intensive bit
gc()
}
vars_demean = cpp_demean(y, X, weights, iterMax = fixef.iter,
diffMax = fixef.tol, r_nb_id_Q = fixef_sizes,
fe_id_list = fixef_id_list, table_id_I = fixef_table_vector,
slope_flag_Q = slope_flag, slope_vars_list = slope_vars,
r_init = init, nthreads = nthreads)
y_demean = vars_demean$y_demean
if(onlyFixef){
X_demean = matrix(1, nrow = length(y_demean))
} else {
X_demean = vars_demean$X_demean
}
res$iterations = vars_demean$iterations
if(fromGLM){
res$means = vars_demean$means
}
if(mem.clean){
rm(vars_demean)
}
if(any(abs(slope_flag) > 0) && any(res$iterations > 300)){
# Maybe we have a convergence problem
# This is poorly coded, but it's a temporary fix
opt_fe <- check_conv(y_demean, X_demean, fixef_id_list, slope_flag, slope_vars, weights)
# This is a bit too rough a check but it should catch the most problematic cases
if(any(opt_fe > 1e-4)){
msg = "There seems to be a convergence problem due to the presence of variables with varying slopes. The precision of the estimates may not be great."
if(any(slope_flag < 0)){
sugg = "This convergence problem mostly arises when there are varying slopes without their associated fixed-effect, as is the case in your estimation. Why not try to include the fixed-effect (i.e. use '[' instead of '[[')?"
} else {
sugg = "As a workaround, and if there are not too many slopes, you can use the variables with varying slopes as regular variables using the function interact (see ?interact)."
}
msg = paste(msg, sugg)
res$convStatus = FALSE
res$message = paste0("tol: ", signif_plus(fixef.tol), ", iter: ", max(res$iterations))
if(fromGLM){
res$warn_varying_slope = msg
} else {
warning(msg)
}
}
} else if(any(res$iterations >= fixef.iter)){
msg = paste0("Demeaning algorithm: Absence of convergence after reaching the maximum number of iterations (", fixef.iter, ").")
res$convStatus = FALSE
res$message = paste0("Maximum of ", fixef.iter, " iterations reached.")
if(fromGLM){
res$warn_varying_slope = msg
} else {
warning(msg)
}
}
if(verbose >= 1){
if(length(fixef_sizes) > 1){
gt("Demeaning", FALSE)
cat(" (iter: ", paste0(c(tail(res$iterations, 1), res$iterations[-length(res$iterations)]), collapse = ", "), ")\n", sep="")
} else {
gt("Demeaning")
}
}
}
#
# Estimation
#
if(mem.clean){
gc()
}
if(!onlyFixef){
est = ols_fit(y_demean, X_demean, weights, correct_0w, collin.tol, nthreads, xwx, xwy)
if(mem.clean){
gc()
}
# Corner case: not any relevant variable
if(!is.null(est$all_removed)){
all_vars = colnames(X)
IN_MULTI = get("IN_MULTI", env)
if(isFixef){
msg = paste0(ifsingle(all_vars, "The only variable ", "All variables"), enumerate_items(all_vars, "quote.is", nmax = 3), " collinear with the fixed effects. In such circumstances, the estimation is void.")
} else {
msg = paste0(ifsingle(all_vars, "The only variable ", "All variables"), enumerate_items(all_vars, "quote.is", nmax = 3), " virtually constant and equal to 0. In such circumstances, the estimation is void.")
}
if(IN_MULTI || !warn){
if(warn) warning(msg)
return(fixest_NA_results(env))
} else {
stop_up(msg, up = fromGLM)
}
}
# Formatting the result
coef = est$coefficients
names(coef) = colnames(X)[!est$is_excluded]
res$coefficients = coef
# Additional stuff
res$residuals = est$residuals
res$multicol = est$multicol
res$collin.min_norm = est$collin.min_norm
if(fromGLM) res$is_excluded = est$is_excluded
if(demeaned){
res$y_demeaned = y_demean
res$X_demeaned = X_demean
colnames(res$X_demeaned) = colnames(X)
}
} else {
res$residuals = y_demean
res$coefficients = coef = NULL
res$onlyFixef = TRUE
res$multicol = FALSE
if(demeaned){
res$y_demeaned = y_demean
}
}
time_post = proc.time()
if(verbose >= 1){
gt("Estimation")
}
if(mem.clean){
gc()
}
if(fromGLM){
res$fitted.values = y - res$residuals
if(!onlyFixef){
res$X_demean = X_demean
}
return(res)
}
#
# Post processing
#
# Collinearity message
collin.adj = 0
if(res$multicol){
var_collinear = colnames(X)[est$is_excluded]
if(notes){
message(ifsingle(var_collinear, "The variable ", "Variables "), enumerate_items(var_collinear, "quote.has", nmax = 3), " been removed because of collinearity (see $collin.var).")
}
res$collin.var = var_collinear
# full set of coeffficients with NAs
collin.coef = setNames(rep(NA, ncol(X)), colnames(X))
collin.coef[!est$is_excluded] = res$coefficients
res$collin.coef = collin.coef
if(isFixef){
X = X[, !est$is_excluded, drop = FALSE]
}
X_demean = X_demean[, !est$is_excluded, drop = FALSE]
collin.adj = sum(est$is_excluded)
}
n = length(y)
res$nparams = res$nparams - collin.adj
df_k = res$nparams
res$nobs = n
if(isWeight) res$weights = weights
#
# IV correction
#
if(!is.null(dots$resid_1st_stage)){
# We correct the residual
is_int = "(Intercept)" %in% names(res$coefficients)
resid_new = cpp_iv_resid(res$residuals, res$coefficients, dots$resid_1st_stage, is_int, nthreads)
res$iv_residuals = res$residuals
res$residuals = resid_new
}
#
# Hessian, score, etc
#
if(onlyFixef){
res$fitted.values = res$sumFE = y - res$residuals
} else {
if(mem.clean){
gc()
}
# X_beta / fitted / sumFE
if(isFixef){
x_beta = cpppar_xbeta(X, coef, nthreads)
res$sumFE = y - x_beta - res$residuals
res$fitted.values = x_beta + res$sumFE
if(isTRUE(dots$add_fitted_demean)){
res$fitted.values_demean = est$fitted.values
}
} else {
res$fitted.values = est$fitted.values
}
if(isOffset){
res$fitted.values = res$fitted.values + offset
}
#
# score + hessian + vcov
if(isWeight){
res$scores = (res$residuals * weights) * X_demean
} else {
res$scores = res$residuals * X_demean
}
res$hessian = est$xwx
if(mem.clean){
gc()
}
res$sigma2 = cpp_ssq(res$residuals, weights) / (length(y) - df_k)
res$cov.unscaled = est$xwx_inv * res$sigma2
rownames(res$cov.unscaled) = colnames(res$cov.unscaled) = names(coef)
# se
se = diag(res$cov.unscaled)
se[se < 0] = NA
se = sqrt(se)
# coeftable
zvalue <- coef/se
pvalue <- 2*pt(-abs(zvalue), max(n - df_k, 1))
coeftable <- data.frame("Estimate"=coef, "Std. Error"=se, "t value"=zvalue, "Pr(>|t|)"=pvalue)
names(coeftable) <- c("Estimate", "Std. Error", "t value", "Pr(>|t|)")
row.names(coeftable) <- names(coef)
attr(se, "type") = attr(coeftable, "type") = "Standard"
res$coeftable = coeftable
res$se = se
}
# fit stats
if(!cpp_isConstant(res$fitted.values)){
res$sq.cor = stats::cor(y, res$fitted.values)**2
} else {
res$sq.cor = NA
}
if(mem.clean){
gc()
}
res$ssr_null = cpp_ssr_null(y, weights)
res$ssr = cpp_ssq(res$residuals, weights)
sigma_null = sqrt(res$ssr_null / ifelse(isWeight, sum(weights), n))
res$ll_null = -1/2/sigma_null^2*res$ssr_null - (log(sigma_null) + log(2*pi)/2) * ifelse(isWeight, sum(weights), n)
# fixef info
if(isFixef){
# For the within R2
if(!onlyFixef){
res$ssr_fe_only = cpp_ssq(y_demean, weights)
sigma = sqrt(res$ssr_fe_only / ifelse(isWeight, sum(weights), n))
res$ll_fe_only = -1/2/sigma^2*res$ssr_fe_only - (log(sigma) + log(2*pi)/2) * ifelse(isWeight, sum(weights), n)
}
}
if(verbose >= 3) gt("Post-processing")
class(res) = "fixest"
do_summary = get("do_summary", env)
if(do_summary){
se = get("se", env)
cluster = get("cluster", env)
lean = get("lean", env)
dof = get("dof", env)
summary_flags = get("summary_flags", env)
# If lean = TRUE, 1st stage residuals are still needed for the 2nd stage
if(isTRUE(dots$iv_call) && lean){
r = res$residuals
fv = res$fitted.values
fvd = res$fitted.values_demean
}
res = summary(res, se = se, cluster = cluster, dof = dof, lean = lean, summary_flags = summary_flags)
if(isTRUE(dots$iv_call) && lean){
res$residuals = r
res$fitted.values = fv
res$fitted.values_demean = fvd
}
}
res
}
ols_fit = function(y, X, w, correct_0w = FALSE, collin.tol, nthreads, xwx = NULL, xwy = NULL){
# No control here -- done before
if(is.null(xwx)){
info_products = cpp_sparse_products(X, w, y, correct_0w, nthreads)
xwx = info_products$XtX
xwy = info_products$Xty
}
multicol = FALSE
info_inv = cpp_cholesky(xwx, collin.tol, nthreads)
if(!is.null(info_inv$all_removed)){
# Means all variables are collinear! => can happen when using FEs
return(list(all_removed = TRUE))
}
xwx_inv = info_inv$XtX_inv
is_excluded = info_inv$id_excl
multicol = any(is_excluded)
if(multicol){
beta = as.vector(xwx_inv %*% xwy[!is_excluded])
fitted.values = cpppar_xbeta(X[, !is_excluded, drop = FALSE], beta, nthreads)
} else {
# avoids copies
beta = as.vector(xwx_inv %*% xwy)
fitted.values = cpppar_xbeta(X, beta, nthreads)
}
residuals = y - fitted.values
res = list(xwx = xwx, coefficients = beta, fitted.values = fitted.values, xwx_inv = xwx_inv, multicol = multicol, residuals = residuals, is_excluded = is_excluded, collin.min_norm = info_inv$min_norm)
res
}
check_conv = function(y, X, fixef_id_list, slope_flag, slope_vars, weights){
# VERY SLOW!!!!
# IF THIS FUNCTION LASTS => TO BE PORTED TO C++
# y, X => variables that were demeaned
# For each variable: we compute the optimal FE coefficient
# it should be 0 if the algorithm converged
Q = length(slope_flag)
nobs = length(y)
if(length(X) == 1){
K = 1
} else {
K = NCOL(X) + 1
}
res = list()
for(k in 1:K){
if(k == 1){
x = y
} else {
x = X[, k - 1]
}
res_tmp = c()
index_slope = 1
for(q in 1:Q){
fixef_id = fixef_id_list[[q]]
if(slope_flag[q] >= 0){
res_tmp = c(res_tmp, max(abs(tapply(weights * x, fixef_id, mean))))
}
n_slopes = abs(slope_flag[q])
if(n_slopes > 0){
for(i in 1:n_slopes){
var = slope_vars[[index_slope]]
num = tapply(weights * x * var, fixef_id, sum)
denom = tapply(weights * var^2, fixef_id, sum)
res_tmp = c(res_tmp, max(abs(num/denom)))
index_slope = index_slope + 1
}
}
}
res[[k]] = res_tmp
}
res = do.call("rbind", res)
res
}
#' @rdname feols
feols.fit = function(y, X, fixef_df, offset, split, fsplit, cluster, se, dof, weights, subset,
fixef.rm = "perfect", fixef.tol = 1e-6, fixef.iter = 10000,
collin.tol = 1e-10, nthreads = getFixest_nthreads(), lean = FALSE, warn = TRUE,
notes = getFixest_notes(), mem.clean = FALSE, verbose = 0, only.env = FALSE, env, ...){
if(missing(weights)) weights = NULL
time_start = proc.time()
if(missing(env)){
set_defaults("fixest_estimation")
call_env = new.env(parent = parent.frame())
env = try(fixest_env(y = y, X = X, fixef_df = fixef_df, offset = offset, weights = weights, subset = subset, split = split, fsplit = fsplit, cluster = cluster, se = se, dof = dof, fixef.rm = fixef.rm, fixef.tol=fixef.tol, fixef.iter=fixef.iter, collin.tol = collin.tol, nthreads = nthreads, lean = lean, warn=warn, notes=notes, verbose = verbose, mem.clean = mem.clean, origin = "feols.fit", mc_origin = match.call(), call_env = call_env, ...), silent = TRUE)
} else if((r <- !is.environment(env)) || !isTRUE(env$fixest_env)){
stop("Argument 'env' must be an environment created by a fixest estimation. Currently it is not ", ifelse(r, "an", "a 'fixest'"), " environment.")
}
if("try-error" %in% class(env)){
mc = match.call()
origin = ifelse(is.null(mc$origin), "feols.fit", mc$origin)
stop(format_error_msg(env, origin))
}
check_arg(only.env, "logical scalar")
if(only.env){
return(env)
}
verbose = get("verbose", env)
if(verbose >= 2) cat("Setup in ", (proc.time() - time_start)[3], "s\n", sep="")
# workhorse is feols (OK if error msg leads to feols [clear enough])
res = feols(env = env)
res
}
#' Fixed-effects GLM estimations
#'
#' Estimates GLM models with any number of fixed-effects.
#'
#' @inheritParams feols
#' @inheritParams femlm
#' @inheritSection feols Combining the fixed-effects
#' @inheritSection feols Varying slopes
#' @inheritSection feols Lagging variables
#' @inheritSection feols Interactions
#' @inheritSection feols On standard-errors
#' @inheritSection feols Multiple estimations
#'
#' @param family Family to be used for the estimation. Defaults to \code{poisson()}. See \code{\link[stats]{family}} for details of family functions.
#' @param start Starting values for the coefficients. Can be: i) a numeric of length 1 (e.g. \code{start = 0}), ii) a numeric vector of the exact same length as the number of variables, or iii) a named vector of any length (the names will be used to initialize the appropriate coefficients). Default is missing.
#' @param etastart Numeric vector of the same length as the data. Starting values for the linear predictor. Default is missing.
#' @param mustart Numeric vector of the same length as the data. Starting values for the vector of means. Default is missing.
#' @param fixef.tol Precision used to obtain the fixed-effects. Defaults to \code{1e-6}. It corresponds to the maximum absolute difference allowed between two coefficients of successive iterations.
#' @param glm.iter Number of iterations of the glm algorithm. Default is 25.
#' @param glm.tol Tolerance level for the glm algorithm. Default is \code{1e-8}.
#' @param verbose Integer. Higher values give more information. In particular, it can detail the number of iterations in the demeaning algoritmh (the first number is the left-hand-side, the other numbers are the right-hand-side variables). It can also detail the step-halving algorithm.
#' @param notes Logical. By default, three notes are displayed: when NAs are removed, when some fixed-effects are removed because of only 0 (or 0/1) outcomes, or when a variable is dropped because of collinearity. To avoid displaying these messages, you can set \code{notes = FALSE}. You can remove these messages permanently by using \code{setFixest_notes(FALSE)}.
#'
#' @details
#' The core of the GLM are the weighted OLS estimations. These estimations are performed with \code{\link[fixest]{feols}}. The method used to demean each variable along the fixed-effects is based on Berge (2018), since this is the same problem to solve as for the Gaussian case in a ML setup.
#'
#' @return
#' A \code{fixest} object. Note that \code{fixest} objects contain many elements and most of them are for internal use, they are presented here only for information. To access them, it is safer to use the user-level methods (e.g. \code{\link[fixest]{vcov.fixest}}, \code{\link[fixest]{resid.fixest}}, etc) or functions (like for instance \code{\link[fixest]{fitstat}} to access any fit statistic).
#' \item{nobs}{The number of observations.}
#' \item{fml}{The linear formula of the call.}
#' \item{call}{The call of the function.}
#' \item{method}{The method used to estimate the model.}
#' \item{family}{The family used to estimate the model.}
#' \item{fml_all}{A list containing different parts of the formula. Always contain the linear formula. Then, if relevant: \code{fixef}: the fixed-effects.}
#' \item{nparams}{The number of parameters of the model.}
#' \item{fixef_vars}{The names of each fixed-effect dimension.}
#' \item{fixef_id}{The list (of length the number of fixed-effects) of the fixed-effects identifiers for each observation.}
#' \item{fixef_sizes}{The size of each fixed-effect (i.e. the number of unique identifierfor each fixed-effect dimension).}
#' \item{y}{(When relevant.) The dependent variable (used to compute the within-R2 when fixed-effects are present).}
#' \item{convStatus}{Logical, convergence status of the IRWLS algorithm.}
#' \item{irls_weights}{The weights of the last iteration of the IRWLS algorithm.}
#' \item{obs_selection}{(When relevant.) List containing vectors of integers. It represents the sequential selection of observation vis a vis the original data set.}
#' \item{fixef_removed}{(When relevant.) In the case there were fixed-effects and some observations were removed because of only 0/1 outcome within a fixed-effect, it gives the list (for each fixed-effect dimension) of the fixed-effect identifiers that were removed.}
#' \item{coefficients}{The named vector of estimated coefficients.}
#' \item{coeftable}{The table of the coefficients with their standard errors, z-values and p-values.}
#' \item{loglik}{The loglikelihood.}
#' \item{deviance}{Deviance of the fitted model.}
#' \item{iterations}{Number of iterations of the algorithm.}
#' \item{ll_null}{Log-likelihood of the null model (i.e. with the intercept only).}
#' \item{ssr_null}{Sum of the squared residuals of the null model (containing only with the intercept).}
#' \item{pseudo_r2}{The adjusted pseudo R2.}
#' \item{fitted.values}{The fitted values are the expected value of the dependent variable for the fitted model: that is \eqn{E(Y|X)}.}
#' \item{linear.predictors}{The linear predictors.}
#' \item{residuals}{The residuals (y minus the fitted values).}
#' \item{sq.cor}{Squared correlation between the dependent variable and the expected predictor (i.e. fitted.values) obtained by the estimation.}
#' \item{hessian}{The Hessian of the parameters.}
#' \item{cov.unscaled}{The variance-covariance matrix of the parameters.}
#' \item{se}{The standard-error of the parameters.}
#' \item{scores}{The matrix of the scores (first derivative for each observation).}
#' \item{residuals}{The difference between the dependent variable and the expected predictor.}
#' \item{sumFE}{The sum of the fixed-effects coefficients for each observation.}
#' \item{offset}{(When relevant.) The offset formula.}
#' \item{weights}{(When relevant.) The weights formula.}
#' \item{collin.var}{(When relevant.) Vector containing the variables removed because of collinearity.}
#' \item{collin.coef}{(When relevant.) Vector of coefficients, where the values of the variables removed because of collinearity are NA.}
#'
#'
#'
#'
#' @seealso
#' See also \code{\link[fixest]{summary.fixest}} to see the results with the appropriate standard-errors, \code{\link[fixest]{fixef.fixest}} to extract the fixed-effects coefficients, and the function \code{\link[fixest]{etable}} to visualize the results of multiple estimations.
#' And other estimation methods: \code{\link[fixest]{feols}}, \code{\link[fixest]{femlm}}, \code{\link[fixest:femlm]{fenegbin}}, \code{\link[fixest]{feNmlm}}.
#'
#' @author
#' Laurent Berge
#'
#' @references
#'
#' Berge, Laurent, 2018, "Efficient estimation of maximum likelihood models with multiple fixed-effects: the R package FENmlm." CREA Discussion Papers, 13 (\url{https://wwwen.uni.lu/content/download/110162/1299525/file/2018_13}).
#'
#' For models with multiple fixed-effects:
#'
#' Gaure, Simen, 2013, "OLS with multiple high dimensional category variables", Computational Statistics & Data Analysis 66 pp. 8--18
#'
#'
#' @examples
#'
#' # Default is a poisson model
#' res = feglm(Sepal.Length ~ Sepal.Width + Petal.Length | Species, iris)
#'
#' # You could also use fepois
#' res_pois = fepois(Sepal.Length ~ Sepal.Width + Petal.Length | Species, iris)
#'
#' # With the fit method:
#' res_fit = feglm.fit(iris$Sepal.Length, iris[, 2:3], iris$Species)
#'
#' # All results are identical:
#' etable(res, res_pois, res_fit)
#'
#' # Note that you have more examples in feols
#'
#' #
#' # Multiple estimations:
#' #
#'
#' # 6 estimations
#' est_mult = fepois(c(Ozone, Solar.R) ~ Wind + Temp + csw0(Wind:Temp, Day), airquality)
#'
#' # We can display the results for the first lhs:
#' etable(est_mult[lhs = 1])
#'
#' # And now the second (access can be made by name)
#' etable(est_mult[lhs = "Solar.R"])
#'
#' # Now we focus on the two last right hand sides
#' # (note that .N can be used to specify the last item)
#' etable(est_mult[rhs = 2:.N])
#'
#' # Combining with split
#' est_split = fepois(c(Ozone, Solar.R) ~ sw(poly(Wind, 2), poly(Temp, 2)),
#' airquality, split = ~ Month)
#'
#' # You can display everything at once with the print method
#' est_split
#'
#' # Different way of displaying the results with "compact"
#' summary(est_split, "compact")
#'
#' # You can still select which sample/LHS/RHS to display
#' est_split[sample = 1:2, lhs = 1, rhs = 1]
#'
#'
feglm = function(fml, data, family = "poisson", offset, weights, subset, split, fsplit, cluster, se, dof, panel.id, start = NULL,
etastart = NULL, mustart = NULL, fixef, fixef.rm = "perfect", fixef.tol = 1e-6, fixef.iter = 10000, collin.tol = 1e-10,
glm.iter = 25, glm.tol = 1e-8, nthreads = getFixest_nthreads(), lean = FALSE,
warn = TRUE, notes = getFixest_notes(), verbose = 0, combine.quick, mem.clean = FALSE, only.env = FALSE, env, ...){
if(missing(weights)) weights = NULL
time_start = proc.time()
if(missing(env)){
set_defaults("fixest_estimation")
call_env = new.env(parent = parent.frame())
env = try(fixest_env(fml=fml, data=data, family = family, offset = offset, weights = weights, subset = subset, split = split, fsplit = fsplit, cluster = cluster, se = se, dof = dof, panel.id = panel.id, linear.start = start, etastart=etastart, mustart=mustart, fixef = fixef, fixef.rm = fixef.rm, fixef.tol=fixef.tol, fixef.iter=fixef.iter, collin.tol = collin.tol, glm.iter = glm.iter, glm.tol = glm.tol, nthreads = nthreads, lean = lean, warn=warn, notes=notes, verbose = verbose, combine.quick = combine.quick, mem.clean = mem.clean, origin = "feglm", mc_origin = match.call(), call_env = call_env, ...), silent = TRUE)
} else if((r <- !is.environment(env)) || !isTRUE(env$fixest_env)){
stop("Argument 'env' must be an environment created by a fixest estimation. Currently it is not ", ifelse(r, "an", "a 'fixest'"), " environment.")
}
if("try-error" %in% class(env)){
mc = match.call()
origin = ifelse(is.null(mc$origin), "feglm", mc$origin)
stop(format_error_msg(env, origin))
}
check_arg(only.env, "logical scalar")
if(only.env){
return(env)
}
verbose = get("verbose", env)
if(verbose >= 2) cat("Setup in ", (proc.time() - time_start)[3], "s\n", sep="")
# workhorse is feglm.fit (OK if error msg leads to feglm.fit [clear enough])
res = feglm.fit(env = env)
res
}
#' @rdname feglm
feglm.fit = function(y, X, fixef_df, family = "poisson", offset, split, fsplit, cluster, se, dof, weights, subset, start = NULL,
etastart = NULL, mustart = NULL, fixef.rm = "perfect", fixef.tol = 1e-6, fixef.iter = 10000,
collin.tol = 1e-10, glm.iter = 25, glm.tol = 1e-8, nthreads = getFixest_nthreads(), lean = FALSE, warn = TRUE,
notes = getFixest_notes(), mem.clean = FALSE, verbose = 0, only.env = FALSE, env, ...){
dots = list(...)
lean_internal = isTRUE(dots$lean_internal)
means = 1
if(!missing(env)){
# This is an internal call from the function feglm
# no need to further check the arguments
# we extract them from the env
if((r <- !is.environment(env)) || !isTRUE(env$fixest_env)) {
stop("Argument 'env' must be an environment created by a fixest estimation. Currently it is not ", ifelse(r, "an", "a 'fixest'"), " environment.")
}
# main variables
if(missing(y)) y = get("lhs", env)
if(missing(X)) X = get("linear.mat", env)
if(!missing(fixef_df) && is.null(fixef_df)){
assign("isFixef", FALSE, env)
}
if(missing(offset)) offset = get("offset.value", env)
if(missing(weights)) weights = get("weights.value", env)
# other params
if(missing(fixef.tol)) fixef.tol = get("fixef.tol", env)
if(missing(fixef.iter)) fixef.iter = get("fixef.iter", env)
if(missing(collin.tol)) collin.tol = get("collin.tol", env)
if(missing(glm.iter)) glm.iter = get("glm.iter", env)
if(missing(glm.tol)) glm.tol = get("glm.tol", env)
if(missing(warn)) warn = get("warn", env)
if(missing(verbose)) verbose = get("verbose", env)
# starting point of the fixed-effects
if(!is.null(dots$means)) means = dots$means
# init
init.type = get("init.type", env)
starting_values = get("starting_values", env)
if(lean_internal){
# Call within here => either null model or fe only
init.type = "default"
if(!is.null(etastart)){
init.type = "eta"
starting_values = etastart
}
}
} else {
if(missing(weights)) weights = NULL
time_start = proc.time()
set_defaults("fixest_estimation")
call_env = new.env(parent = parent.frame())
env = try(fixest_env(y = y, X = X, fixef_df = fixef_df, family = family, nthreads = nthreads, lean = lean, offset = offset, weights = weights, subset = subset, split = split, fsplit = fsplit, cluster = cluster, se = se, dof = dof, linear.start = start, etastart=etastart, mustart=mustart, fixef.rm = fixef.rm, fixef.tol = fixef.tol, fixef.iter = fixef.iter, collin.tol = collin.tol, glm.iter = glm.iter, glm.tol = glm.tol, notes=notes, mem.clean = mem.clean, warn=warn, verbose = verbose, origin = "feglm.fit", mc_origin = match.call(), call_env = call_env, ...), silent = TRUE)
if("try-error" %in% class(env)){
stop(format_error_msg(env, "feglm.fit"))
}
check_arg(only.env, "logical scalar")
if(only.env){
return(env)
}
verbose = get("verbose", env)
if(verbose >= 2) cat("Setup in ", (proc.time() - time_start)[3], "s\n", sep="")
# y/X
y = get("lhs", env)
X = get("linear.mat", env)
# offset
offset = get("offset.value", env)
# weights
weights = get("weights.value", env)
# init
init.type = get("init.type", env)
starting_values = get("starting_values", env)
}
#
# Split ####
#
do_split = get("do_split", env)
if(do_split){
res = multi_split(env, feglm.fit)
return(res)
}
#
# Multi fixef ####
#
do_multi_fixef = get("do_multi_fixef", env)
if(do_multi_fixef){
res = multi_fixef(env, feglm.fit)
return(res)
}
#
# Multi LHS and RHS ####
#
do_multi_lhs = get("do_multi_lhs", env)
do_multi_rhs = get("do_multi_rhs", env)
if(do_multi_lhs || do_multi_rhs){
res = multi_LHS_RHS(env, feglm.fit)
return(res)
}
#
# Regular estimation ####
#
# Setup:
family = get("family_funs", env)
isFixef = get("isFixef", env)
nthreads = get("nthreads", env)
isWeight = length(weights) > 1
isOffset = length(offset) > 1
nobs <- length(y)
onlyFixef = length(X) == 1
# the preformatted results
res = get("res", env)
# glm functions:
variance = family$variance
linkfun = family$linkfun
linkinv = family$linkinv
sum_dev.resids = family$sum_dev.resids
valideta = family$valideta
validmu = family$validmu
mu.eta = family$mu.eta
family_equiv = family$family_equiv
#
# Init
#
if(mem.clean){
gc()
}
if(init.type == "mu"){
mu = starting_values
if(!valideta(mu)){
stop("In 'mustart' the values provided are not valid.")
}
eta = linkfun(mu)
} else if(init.type == "eta"){
eta = starting_values
if(!valideta(eta)){
stop("In 'etastart' the values provided are not valid.")
}
mu = linkinv(eta)
} else if(init.type == "coef"){
# If there are fixed-effects we MUST first compute the FE model with starting values as offset
# otherwise we are too far away from the solution and starting values may lead to divergence
# (hence step halving would be required)
# This means that initializing with coefficients incurs large computational costs
# with fixed-effects
start = get("start", env)
offset_fe = offset + cpppar_xbeta(X, start, nthreads)
if(isFixef){
mustart = 0
eval(family$initialize)
eta = linkfun(mustart)
# just a rough estimate (=> high tol values) [no benefit in high precision]
model_fe = try(feglm.fit(X = 0, etastart = eta, offset = offset_fe, glm.tol = 1e-2, fixef.tol = 1e-2, env = env, lean_internal = TRUE))
if("try-error" %in% class(model_fe)){
stop("Estimation failed during initialization when getting the fixed-effects, maybe change the values of 'start'? \n", model_fe)
}
eta = model_fe$linear.predictors
mu = model_fe$fitted.values
devold = model_fe$deviance
} else {
eta = offset_fe
mu = linkinv(eta)
devold = sum_dev.resids(y, mu, eta, wt = weights)
}
wols_old = list(fitted.values = eta - offset)
} else {
mustart = 0
eval(family$initialize)
eta = linkfun(mustart)
mu = linkinv(eta)
# NOTA: FE only => ADDS LOTS OF COMPUTATIONAL COSTS without convergence benefit
}
if(mem.clean){
gc()
}
if(init.type != "coef"){
# starting deviance with constant equal to 1e-5
# this is important for getting in step halving early (when deviance goes awry right from the start)
devold = sum_dev.resids(y, rep(linkinv(1e-5), nobs), rep(1e-5, nobs), wt = weights)
wols_old = list(fitted.values = rep(1e-5, nobs))
}
if(!validmu(mu) || !valideta(eta)){
stop("Current starting values are not valid.")
}
assign("nb_sh", 0, env)
on.exit(warn_step_halving(env))
if((init.type == "coef" && verbose >= 1) || verbose >= 4) {
cat("Deviance at initializat. = ", numberFormatNormal(devold), "\n", sep = "")
}
#
# The main loop
#
wols_means = 1
conv = FALSE
warning_msg = div_message = ""
for (iter in 1:glm.iter) {
if(mem.clean){
gc()
}
mu.eta.val = mu.eta(mu, eta)
var_mu = variance(mu)
# controls
any_pblm_mu = cpp_any_na_null(var_mu)
if(any_pblm_mu){
if (anyNA(var_mu)){
stop("NAs in V(mu), at iteration ", iter, ".")
} else if (any(var_mu == 0)){
stop("0s in V(mu), at iteration ", iter, ".")
}
}
if(anyNA(mu.eta.val)){
stop("NAs in d(mu)/d(eta), at iteration ", iter, ".")
}
if(isOffset){
z = (eta - offset) + (y - mu)/mu.eta.val
} else {
z = eta + (y - mu)/mu.eta.val
}
w = as.vector(weights * mu.eta.val**2 / var_mu)
is_0w = w == 0
any_0w = any(is_0w)
if(any_0w && all(is_0w)){
warning_msg = paste0("No informative observation at iteration ", iter, ".")
div_message = "No informative observation."
break
}
if(mem.clean && iter > 1){
rm(wols)
gc()
}
wols = feols(y = z, X = X, weights = w, means = wols_means, correct_0w = any_0w, env = env, fixef.tol = fixef.tol * 10**(iter==1), fixef.iter = fixef.iter, collin.tol = collin.tol, nthreads = nthreads, mem.clean = mem.clean, verbose = verbose - 1)
if(isTRUE(wols$NA_model)){
return(wols)
}
# In theory OLS estimation is guaranteed to exist
# yet, NA coef may happen with non-infinite very large values of z/w (e.g. values > 1e100)
if(anyNA(wols$coefficients)){
if(iter == 1){
stop("Weighted-OLS returns NA coefficients at first iteration, step halving cannot be performed. Try other starting values?")
}
warning_msg = paste0("Divergence at iteration ", iter, ": ", msg, ". Weighted-OLS returns NA coefficients. Last evaluated coefficients with finite deviance are returned for information purposes.")
div_message = "Weighted-OLS returned NA coefficients."
wols = wols_old
break
} else {
wols_means = wols$means
}
eta = wols$fitted.values
if(isOffset){
eta = eta + offset
}
if(mem.clean){
gc()
}
mu = linkinv(eta)
dev = sum_dev.resids(y, mu, eta, wt = weights)
dev_evol = dev - devold
if(verbose >= 1) cat("Iteration: ", sprintf("%02i", iter), " -- Deviance = ", numberFormatNormal(dev), " -- Evol. = ", dev_evol, "\n", sep = "")
#
# STEP HALVING
#
if(!is.finite(dev) || dev_evol > 0 || !valideta(eta) || !validmu(mu)){
if(!is.finite(dev)){
# we report step-halving but only for non-finite deviances
# other situations are OK (it just happens)
nb_sh = get("nb_sh", env)
assign("nb_sh", nb_sh + 1, env)
}
eta_new = wols$fitted.values
eta_old = wols_old$fitted.values
iter_sh = 0
do_exit = FALSE
while(!is.finite(dev) || dev_evol > 0 || !valideta(eta_new) || !validmu(mu)){
if(iter == 1 && (is.finite(dev) && valideta(eta_new) && validmu(mu)) && iter_sh >= 2){
# BEWARE FIRST ITERATION:
# at first iteration, the deviance can be higher than the init, and SH may not help
# we need to make sure we get out of SH before it's messed up
break
} else if(iter_sh == glm.iter){
# if first iteration => means algo did not find viable solution
if(iter == 1){
stop("Algorithm failed at first iteration. Step-halving could not find a valid set of parameters.")
}
# Problem only if the deviance is non-finite or eta/mu not valid
# Otherwise, it means that we're at a maximum
if(!is.finite(dev) || !valideta(eta_new) || !validmu(mu)){
# message
msg = ifelse(!is.finite(dev), "non-finite deviance", "no valid eta/mu")
warning_msg = paste0("Divergence at iteration ", iter, ": ", msg, ". Step halving: no valid correction found. Last evaluated coefficients with finite deviance are returned for information purposes.")
div_message = paste0(msg, " despite step-halving")
wols = wols_old
do_exit = TRUE
}
break
}
iter_sh = iter_sh + 1
eta_new = (eta_old + eta_new) / 2
if(mem.clean){
gc()
}
mu = linkinv(eta_new + offset)
dev = sum_dev.resids(y, mu, eta_new + offset, wt = weights)
dev_evol = dev - devold
if(verbose >= 3) cat("Step-halving: iter =", iter_sh, "-- dev:", numberFormatNormal(dev), "-- evol:", numberFormatNormal(dev_evol), "\n")
}
if(do_exit) break
# it worked: update
eta = eta_new + offset
wols$fitted.values = eta_new
# NOTA: we must NOT end with a step halving => we need a proper weighted-ols estimation
# we force the algorithm to continue
dev_evol = Inf
if(verbose >= 2){
cat("Step-halving: new deviance = ", numberFormatNormal(dev), "\n", sep = "")
}
}
if(abs(dev_evol)/(0.1 + abs(dev)) < glm.tol){
conv = TRUE
break
} else {
devold = dev
wols_old = wols
}
}
# Convergence flag
if(!conv){
if(iter == glm.iter){
warning_msg = paste0("Absence of convergence: Maximum number of iterations reached (", glm.iter, "). Final deviance: ", numberFormatNormal(dev), ".")
div_message = "no convergence: Maximum number of iterations reached"
}
res$convStatus = FALSE
res$message = div_message
} else {
res$convStatus = TRUE
}
#
# post processing
#
# Collinearity message
collin.adj = 0
if(wols$multicol){
var_collinear = colnames(X)[wols$is_excluded]
if(notes) message(ifsingle(var_collinear, "The variable ", "Variables "), enumerate_items(var_collinear, "quote.has"), " been removed because of collinearity (see $collin.var).")
res$collin.var = var_collinear
# full set of coeffficients with NAs
collin.coef = setNames(rep(NA, ncol(X)), colnames(X))
collin.coef[!wols$is_excluded] = wols$coefficients
res$collin.coef = collin.coef
wols$X_demean = wols$X_demean[, !wols$is_excluded, drop = FALSE]
X = X[, !wols$is_excluded, drop = FALSE]
collin.adj = sum(wols$is_excluded)
}
res$irls_weights = w # weights from the iteratively reweighted least square
res$coefficients = coef = wols$coefficients
res$collin.min_norm = wols$collin.min_norm
if(!is.null(wols$warn_varying_slope)){
warning(wols$warn_varying_slope)
}
res$linear.predictors = wols$fitted.values
if(isOffset){
res$linear.predictors = res$linear.predictors + offset
}
res$fitted.values = linkinv(res$linear.predictors)
res$residuals = y - res$fitted.values
if(onlyFixef) res$onlyFixef = onlyFixef
# dispersion + scores
if(family$family %in% c("poisson", "binomial")){
res$dispersion = 1
} else {
weighted_resids = wols$residuals * res$irls_weights
# res$dispersion = sum(weighted_resids ** 2) / sum(res$irls_weights)
# I use the second line to fit GLM's
res$dispersion = sum(weighted_resids * wols$residuals) / (res$nobs - res$nparams)
}
res$working_residuals = wols$residuals
if(!onlyFixef && !lean_internal){
# score + hessian + vcov
if(mem.clean){
gc()
}
# dispersion + scores
if(family$family %in% c("poisson", "binomial")){
res$scores = (wols$residuals * res$irls_weights) * wols$X_demean
res$hessian = cpppar_crossprod(wols$X_demean, res$irls_weights, nthreads)
} else {
res$scores = (weighted_resids / res$dispersion) * wols$X_demean
res$hessian = cpppar_crossprod(wols$X_demean, res$irls_weights, nthreads) / res$dispersion
}
info_inv = cpp_cholesky(res$hessian, collin.tol, nthreads)
if(!is.null(info_inv$all_removed)){
# This should not occur, but I prefer to be safe
stop("Not any single variable with a positive variance was found after the weighted-OLS stage. (If possible, could you send a replicable example to fixest's author? He's curious about when that actually happens, since in theory it should never happen.)")
}
var = info_inv$XtX_inv
is_excluded = info_inv$id_excl
if(any(is_excluded)){
# There should be no remaining collinearity
warning_msg = paste(warning_msg, "Residual collinearity was found after the weighted-OLS stage. The covariance is not defined. (This should not happen. If possible, could you send a replicable example to fixest's author? He's curious about when that actually happen.)")
var = matrix(NA, length(is_excluded), length(is_excluded))
}
res$cov.unscaled = var
rownames(res$cov.unscaled) = colnames(res$cov.unscaled) = names(coef)
# se
se = diag(res$cov.unscaled)
se[se < 0] = NA
se = sqrt(se)
# coeftable
zvalue <- coef/se
use_t = !family$family %in% c("poisson", "binomial")
if(use_t){
pvalue <- 2*pt(-abs(zvalue), max(res$nobs - res$nparams, 1))
ctable_names = c("Estimate", "Std. Error", "t value", "Pr(>|t|)")
} else {
pvalue <- 2*pnorm(-abs(zvalue))
ctable_names = c("Estimate", "Std. Error", "z value", "Pr(>|z|)")
}
coeftable <- data.frame("Estimate"=coef, "Std. Error"=se, "z value"=zvalue, "Pr(>|z|)"=pvalue)
names(coeftable) <- ctable_names
row.names(coeftable) <- names(coef)
attr(se, "type") = attr(coeftable, "type") = "Standard"
res$coeftable = coeftable
res$se = se
}
if(nchar(warning_msg) > 0){
if(warn){
warning(warning_msg, call. = FALSE)
options("fixest_last_warning" = proc.time())
}
}
n = length(y)
res$nobs = n
res$nparams = res$nparams - collin.adj
df_k = res$nparams
# r2s
if(!cpp_isConstant(res$fitted.values)){
res$sq.cor = stats::cor(y, res$fitted.values)**2
} else {
res$sq.cor = NA
}
# deviance
res$deviance = dev
# simpler form for poisson
if(family_equiv == "poisson"){
if(isWeight){
if(mem.clean){
gc()
}
res$loglik = sum( (y * eta - mu - cpppar_lgamma(y + 1, nthreads)) * weights)
} else {
# lfact is later used in model0 and is costly to compute
lfact = sum(rpar_lgamma(y + 1, env))
assign("lfactorial", lfact, env)
res$loglik = sum(y * eta - mu) - lfact
}
} else {
res$loglik = family$aic(y = y, n = rep.int(1, n), mu = res$fitted.values, wt = weights, dev = dev) / -2
}
if(lean_internal){
return(res)
}
# The pseudo_r2
if(family_equiv %in% c("poisson", "logit")){
model0 = get_model_null(env, theta.init = NULL)
ll_null = model0$loglik
fitted_null = linkinv(model0$constant)
} else {
if(verbose >= 1) cat("Null model:\n")
if(mem.clean){
gc()
}
model_null = feglm.fit(X = matrix(1, nrow = n, ncol = 1), fixef_df = NULL, env = env, lean_internal = TRUE)
ll_null = model_null$loglik
fitted_null = model_null$fitted.values
}
res$ll_null = ll_null
res$pseudo_r2 = 1 - (res$loglik - df_k)/(ll_null - 1)
# fixef info
if(isFixef){
if(onlyFixef){
res$sumFE = res$linear.predictors
} else {
res$sumFE = res$linear.predictors - cpppar_xbeta(X, res$coefficients, nthreads)
}
if(isOffset){
res$sumFE = res$sumFE - offset
}
}
# other
res$iterations = iter
res$family = family
class(res) = "fixest"
do_summary = get("do_summary", env)
if(do_summary){
se = get("se", env)
cluster = get("cluster", env)
lean = get("lean", env)
dof = get("dof", env)
summary_flags = get("summary_flags", env)
# To compute the RMSE and lean = TRUE
if(lean) res$ssr = cpp_ssq(res$residuals, weights)
res = summary(res, se = se, cluster = cluster, dof = dof, lean = lean, summary_flags = summary_flags)
}
return(res)
}
#' Fixed-effects maximum likelihood model
#'
#' This function estimates maximum likelihood models with any number of fixed-effects.
#'
#' @inheritParams feNmlm
#' @inherit feNmlm return details
#' @inheritSection feols Combining the fixed-effects
#' @inheritSection feols Lagging variables
#' @inheritSection feols Interactions
#' @inheritSection feols On standard-errors
#' @inheritSection feols Multiple estimations
#'
#' @param fml A formula representing the relation to be estimated. For example: \code{fml = z~x+y}. To include fixed-effects, insert them in this formula using a pipe: e.g. \code{fml = z~x+y|fixef_1+fixef_2}. Multiple estimations can be performed at once: for multiple dep. vars, wrap them in \code{c()}: ex \code{c(y1, y2)}. For multiple indep. vars, use the stepwise functions: ex \code{x1 + csw(x2, x3)}. The formula \code{fml = c(y1, y2) ~ x1 + cw0(x2, x3)} leads to 6 estimation, see details.
#' @param start Starting values for the coefficients. Can be: i) a numeric of length 1 (e.g. \code{start = 0}, the default), ii) a numeric vector of the exact same length as the number of variables, or iii) a named vector of any length (the names will be used to initialize the appropriate coefficients).
#'
#' @details
#' Note that the functions \code{\link[fixest]{feglm}} and \code{\link[fixest]{femlm}} provide the same results when using the same families but differ in that the latter is a direct maximum likelihood optimization (so the two can really have different convergence rates).
#'
#' @return
#' A \code{fixest} object. Note that \code{fixest} objects contain many elements and most of them are for internal use, they are presented here only for information. To access them, it is safer to use the user-level methods (e.g. \code{\link[fixest]{vcov.fixest}}, \code{\link[fixest]{resid.fixest}}, etc) or functions (like for instance \code{\link[fixest]{fitstat}} to access any fit statistic).
#' \item{nobs}{The number of observations.}
#' \item{fml}{The linear formula of the call.}
#' \item{call}{The call of the function.}
#' \item{method}{The method used to estimate the model.}
#' \item{family}{The family used to estimate the model.}
#' \item{fml_all}{A list containing different parts of the formula. Always contain the linear formula. Then, if relevant: \code{fixef}: the fixed-effects; \code{NL}: the non linear part of the formula.}
#' \item{nparams}{The number of parameters of the model.}
#' \item{fixef_vars}{The names of each fixed-effect dimension.}
#' \item{fixef_id}{The list (of length the number of fixed-effects) of the fixed-effects identifiers for each observation.}
#' \item{fixef_sizes}{The size of each fixed-effect (i.e. the number of unique identifierfor each fixed-effect dimension).}
#' \item{convStatus}{Logical, convergence status.}
#' \item{message}{The convergence message from the optimization procedures.}
#' \item{obs_selection}{(When relevant.) List containing vectors of integers. It represents the sequential selection of observation vis a vis the original data set.}
#' \item{fixef_removed}{(When relevant.) In the case there were fixed-effects and some observations were removed because of only 0/1 outcome within a fixed-effect, it gives the list (for each fixed-effect dimension) of the fixed-effect identifiers that were removed.}
#' \item{coefficients}{The named vector of estimated coefficients.}
#' \item{coeftable}{The table of the coefficients with their standard errors, z-values and p-values.}
#' \item{loglik}{The log-likelihood.}
#' \item{iterations}{Number of iterations of the algorithm.}
#' \item{ll_null}{Log-likelihood of the null model (i.e. with the intercept only).}
#' \item{ll_fe_only}{Log-likelihood of the model with only the fixed-effects.}
#' \item{ssr_null}{Sum of the squared residuals of the null model (containing only with the intercept).}
#' \item{pseudo_r2}{The adjusted pseudo R2.}
#' \item{fitted.values}{The fitted values are the expected value of the dependent variable for the fitted model: that is \eqn{E(Y|X)}.}
#' \item{residuals}{The residuals (y minus the fitted values).}
#' \item{sq.cor}{Squared correlation between the dependent variable and the expected predictor (i.e. fitted.values) obtained by the estimation.}
#' \item{hessian}{The Hessian of the parameters.}
#' \item{cov.unscaled}{The variance-covariance matrix of the parameters.}
#' \item{se}{The standard-error of the parameters.}
#' \item{scores}{The matrix of the scores (first derivative for each observation).}
#' \item{residuals}{The difference between the dependent variable and the expected predictor.}
#' \item{sumFE}{The sum of the fixed-effects coefficients for each observation.}
#' \item{offset}{(When relevant.) The offset formula.}
#' \item{weights}{(When relevant.) The weights formula.}
#'
#'
#' @seealso
#' See also \code{\link[fixest]{summary.fixest}} to see the results with the appropriate standard-errors, \code{\link[fixest]{fixef.fixest}} to extract the fixed-effects coefficients, and the function \code{\link[fixest]{etable}} to visualize the results of multiple estimations.
#' And other estimation methods: \code{\link[fixest]{feols}}, \code{\link[fixest]{feglm}}, \code{\link[fixest:feglm]{fepois}}, \code{\link[fixest]{feNmlm}}.
#'
#' @author
#' Laurent Berge
#'
#' @references
#'
#' Berge, Laurent, 2018, "Efficient estimation of maximum likelihood models with multiple fixed-effects: the R package FENmlm." CREA Discussion Papers, 13 (\url{https://wwwen.uni.lu/content/download/110162/1299525/file/2018_13}).
#'
#' For models with multiple fixed-effects:
#'
#' Gaure, Simen, 2013, "OLS with multiple high dimensional category variables", Computational Statistics & Data Analysis 66 pp. 8--18
#'
#' On the unconditionnal Negative Binomial model:
#'
#' Allison, Paul D and Waterman, Richard P, 2002, "Fixed-Effects Negative Binomial Regression Models", Sociological Methodology 32(1) pp. 247--265
#'
#' @examples
#'
#' # Load trade data
#' data(trade)
#'
#' # We estimate the effect of distance on trade => we account for 3 fixed-effects
#' # 1) Poisson estimation
#' est_pois = femlm(Euros ~ log(dist_km) | Origin + Destination + Product, trade)
#'
#' # 2) Log-Log Gaussian estimation (with same FEs)
#' est_gaus = update(est_pois, log(Euros+1) ~ ., family = "gaussian")
#'
#' # Comparison of the results using the function etable
#' etable(est_pois, est_gaus)
#' # Now using two way clustered standard-errors
#' etable(est_pois, est_gaus, se = "twoway")
#'
#' # Comparing different types of standard errors
#' sum_hetero = summary(est_pois, se = "hetero")
#' sum_oneway = summary(est_pois, se = "cluster")
#' sum_twoway = summary(est_pois, se = "twoway")
#' sum_threeway = summary(est_pois, se = "threeway")
#'
#' etable(sum_hetero, sum_oneway, sum_twoway, sum_threeway)
#'
#'
#' #
#' # Multiple estimations:
#' #
#'
#' # 6 estimations
#' est_mult = femlm(c(Ozone, Solar.R) ~ Wind + Temp + csw0(Wind:Temp, Day), airquality)
#'
#' # We can display the results for the first lhs:
#' etable(est_mult[lhs = 1])
#'
#' # And now the second (access can be made by name)
#' etable(est_mult[lhs = "Solar.R"])
#'
#' # Now we focus on the two last right hand sides
#' # (note that .N can be used to specify the last item)
#' etable(est_mult[rhs = 2:.N])
#'
#' # Combining with split
#' est_split = fepois(c(Ozone, Solar.R) ~ sw(poly(Wind, 2), poly(Temp, 2)),
#' airquality, split = ~ Month)
#'
#' # You can display everything at once with the print method
#' est_split
#'
#' # Different way of displaying the results with "compact"
#' summary(est_split, "compact")
#'
#' # You can still select which sample/LHS/RHS to display
#' est_split[sample = 1:2, lhs = 1, rhs = 1]
#'
#'
#'
#'
femlm <- function(fml, data, family=c("poisson", "negbin", "logit", "gaussian"), start = 0, fixef, fixef.rm = "perfect",
offset, subset, split, fsplit, cluster, se, dof, panel.id, fixef.tol = 1e-5, fixef.iter = 10000,
nthreads = getFixest_nthreads(), lean = FALSE, verbose = 0, warn = TRUE,
notes = getFixest_notes(), theta.init, combine.quick, mem.clean = FALSE, only.env = FALSE, env, ...){
# This is just an alias
call_env_bis = new.env(parent = parent.frame())
res = try(feNmlm(fml = fml, data = data, family = family, fixef = fixef, fixef.rm = fixef.rm, offset = offset, subset = subset, split = split, fsplit = fsplit, cluster = cluster, se = se, dof = dof, panel.id = panel.id, start = start, fixef.tol=fixef.tol, fixef.iter=fixef.iter, nthreads=nthreads, lean = lean, verbose=verbose, warn=warn, notes=notes, theta.init = theta.init, combine.quick = combine.quick, mem.clean = mem.clean, origin = "femlm", mc_origin_bis = match.call(), call_env_bis = call_env_bis, only.env = only.env, env = env, ...), silent = TRUE)
if("try-error" %in% class(res)){
stop(format_error_msg(res, "femlm"))
}
return(res)
}
#' @rdname femlm
fenegbin = function(fml, data, theta.init, start = 0, fixef, fixef.rm = "perfect", offset, subset, split, fsplit, cluster, se, dof, panel.id,
fixef.tol = 1e-5, fixef.iter = 10000, nthreads = getFixest_nthreads(), lean = FALSE,
verbose = 0, warn = TRUE, notes = getFixest_notes(), combine.quick, mem.clean = FALSE, only.env = FALSE, env, ...){
# We control for the problematic argument family
if("family" %in% names(match.call())){
stop("Function fenegbin does not accept the argument 'family'.")
}
# This is just an alias
call_env_bis = new.env(parent = parent.frame())
res = try(feNmlm(fml = fml, data=data, family = "negbin", theta.init = theta.init, start = start, fixef = fixef, fixef.rm = fixef.rm, offset = offset, subset = subset, split = split, fsplit = fsplit, cluster = cluster, se = se, dof = dof, panel.id = panel.id, fixef.tol = fixef.tol, fixef.iter = fixef.iter, nthreads = nthreads, lean = lean, verbose = verbose, warn = warn, notes = notes, combine.quick = combine.quick, mem.clean = mem.clean, origin = "fenegbin", mc_origin_bis = match.call(), call_env_bis = call_env_bis, only.env = only.env, env = env, ...), silent = TRUE)
if("try-error" %in% class(res)){
stop(format_error_msg(res, "fenegbin"))
}
return(res)
}
#' @rdname feglm
fepois = function(fml, data, offset, weights, subset, split, fsplit, cluster, se, dof, panel.id,
start = NULL, etastart = NULL, mustart = NULL,
fixef, fixef.rm = "perfect", fixef.tol = 1e-6, fixef.iter = 10000, collin.tol = 1e-10,
glm.iter = 25, glm.tol = 1e-8, nthreads = getFixest_nthreads(), lean = FALSE, warn = TRUE, notes = getFixest_notes(),
verbose = 0, combine.quick, mem.clean = FALSE, only.env = FALSE, env, ...){
# We control for the problematic argument family
if("family" %in% names(match.call())){
stop("Function fepois does not accept the argument 'family'.")
}
# This is just an alias
call_env_bis = new.env(parent = parent.frame())
res = try(feglm(fml = fml, data = data, family = "poisson", offset = offset, weights = weights, subset = subset, split = split, fsplit = fsplit, cluster = cluster, se = se, dof = dof, panel.id = panel.id, start = start, etastart = etastart, mustart = mustart, fixef = fixef, fixef.rm = fixef.rm, fixef.tol = fixef.tol, fixef.iter = fixef.iter, collin.tol = collin.tol, glm.iter = glm.iter, glm.tol = glm.tol, nthreads = nthreads, lean = lean, warn = warn, notes = notes, verbose = verbose, combine.quick = combine.quick, mem.clean = mem.clean, origin_bis = "fepois", mc_origin_bis = match.call(), call_env_bis = call_env_bis, only.env=only.env, env=env, ...), silent = TRUE)
if("try-error" %in% class(res)){
stop(format_error_msg(res, "fepois"))
}
return(res)
}
#' Fixed effects nonlinear maximum likelihood models
#'
#' This function estimates maximum likelihood models (e.g., Poisson or Logit) with non-linear in parameters right-hand-sides and is efficient to handle any number of fixed effects. If you do not use non-linear in parameters right-hand-side, use \code{\link[fixest]{femlm}} or \code{\link[fixest]{feglm}} instead (their design is simpler).
#'
#' @inheritParams summary.fixest
#' @inheritParams panel
#' @inheritSection feols Lagging variables
#' @inheritSection feols Interactions
#' @inheritSection feols On standard-errors
#' @inheritSection feols Multiple estimations
#'
#' @param fml A formula. This formula gives the linear formula to be estimated (it is similar to a \code{lm} formula), for example: \code{fml = z~x+y}. To include fixed-effects variables, insert them in this formula using a pipe (e.g. \code{fml = z~x+y|fixef_1+fixef_2}). To include a non-linear in parameters element, you must use the argment \code{NL.fml}. Multiple estimations can be performed at once: for multiple dep. vars, wrap them in \code{c()}: ex \code{c(y1, y2)}. For multiple indep. vars, use the stepwise functions: ex \code{x1 + csw(x2, x3)}. This leads to 6 estimation \code{fml = c(y1, y2) ~ x1 + cw0(x2, x3)}. See details.
#' @param start Starting values for the coefficients in the linear part (for the non-linear part, use NL.start). Can be: i) a numeric of length 1 (e.g. \code{start = 0}, the default), ii) a numeric vector of the exact same length as the number of variables, or iii) a named vector of any length (the names will be used to initialize the appropriate coefficients).
#' @param NL.fml A formula. If provided, this formula represents the non-linear part of the right hand side (RHS). Note that contrary to the \code{fml} argument, the coefficients must explicitly appear in this formula. For instance, it can be \code{~a*log(b*x + c*x^3)}, where \code{a}, \code{b}, and \code{c} are the coefficients to be estimated. Note that only the RHS of the formula is to be provided, and NOT the left hand side.
#' @param split A one sided formula representing a variable (eg \code{split = ~var}) or a vector. If provided, the sample is split according to the variable and one estimation is performed for each value of that variable. If you also want to include the estimation for the full sample, use the argument \code{fsplit} instead.
#' @param fsplit A one sided formula representing a variable (eg \code{split = ~var}) or a vector. If provided, the sample is split according to the variable and one estimation is performed for each value of that variable. This argument is the same as split but also includes the full sample as the first estimation.
#' @param data A data.frame containing the necessary variables to run the model. The variables of the non-linear right hand side of the formula are identified with this \code{data.frame} names. Can also be a matrix.
#' @param family Character scalar. It should provide the family. The possible values are "poisson" (Poisson model with log-link, the default), "negbin" (Negative Binomial model with log-link), "logit" (LOGIT model with log-link), "gaussian" (Gaussian model).
#' @param fixef Character vector. The names of variables to be used as fixed-effects. These variables should contain the identifier of each observation (e.g., think of it as a panel identifier). Note that the recommended way to include fixed-effects is to insert them directly in the formula.
#' @param subset A vector (logical or numeric) or a one-sided formula. If provided, then the estimation will be performed only on the observations defined by this argument.
#' @param NL.start (For NL models only) A list of starting values for the non-linear parameters. ALL the parameters are to be named and given a staring value. Example: \code{NL.start=list(a=1,b=5,c=0)}. Though, there is an exception: if all parameters are to be given the same starting value, you can use a numeric scalar.
#' @param lower (For NL models only) A list. The lower bound for each of the non-linear parameters that requires one. Example: \code{lower=list(b=0,c=0)}. Beware, if the estimated parameter is at his lower bound, then asymptotic theory cannot be applied and the standard-error of the parameter cannot be estimated because the gradient will not be null. In other words, when at its upper/lower bound, the parameter is considered as 'fixed'.
#' @param upper (For NL models only) A list. The upper bound for each of the non-linear parameters that requires one. Example: \code{upper=list(a=10,c=50)}. Beware, if the estimated parameter is at his upper bound, then asymptotic theory cannot be applied and the standard-error of the parameter cannot be estimated because the gradient will not be null. In other words, when at its upper/lower bound, the parameter is considered as 'fixed'.
#' @param NL.start.init (For NL models only) Numeric scalar. If the argument \code{NL.start} is not provided, or only partially filled (i.e. there remain non-linear parameters with no starting value), then the starting value of all remaining non-linear parameters is set to \code{NL.start.init}.
#' @param offset A formula or a numeric vector. An offset can be added to the estimation. If equal to a formula, it should be of the form (for example) \code{~0.5*x**2}. This offset is linearly added to the elements of the main formula 'fml'.
#' @param jacobian.method (For NL models only) Character scalar. Provides the method used to numerically compute the Jacobian of the non-linear part. Can be either \code{"simple"} or \code{"Richardson"}. Default is \code{"simple"}. See the help of \code{\link[numDeriv]{jacobian}} for more information.
#' @param useHessian Logical. Should the Hessian be computed in the optimization stage? Default is \code{TRUE}.
#' @param hessian.args List of arguments to be passed to function \code{\link[numDeriv]{genD}}. Defaults is missing. Only used with the presence of \code{NL.fml}.
#' @param opt.control List of elements to be passed to the optimization method \code{\link[stats]{nlminb}}. See the help page of \code{\link[stats]{nlminb}} for more information.
#' @param nthreads The number of threads. Can be: a) an integer lower than, or equal to, the maximum number of threads; b) 0: meaning all available threads will be used; c) a number strictly between 0 and 1 which represents the fraction of all threads to use. The default is to use 50\% of all threads. You can set permanently the number of threads used within this package using the function \code{\link[fixest]{setFixest_nthreads}}.
#' @param verbose Integer, default is 0. It represents the level of information that should be reported during the optimisation process. If \code{verbose=0}: nothing is reported. If \code{verbose=1}: the value of the coefficients and the likelihood are reported. If \code{verbose=2}: \code{1} + information on the computing time of the null model, the fixed-effects coefficients and the hessian are reported.
#' @param theta.init Positive numeric scalar. The starting value of the dispersion parameter if \code{family="negbin"}. By default, the algorithm uses as a starting value the theta obtained from the model with only the intercept.
#' @param fixef.rm Can be equal to "perfect" (default), "singleton", "both" or "none". Controls which observations are to be removed. If "perfect", then observations having a fixed-effect with perfect fit (e.g. only 0 outcomes in Poisson estimations) will be removed. If "singleton", all observations for which a fixed-effect appears only once will be removed. The meaning of "both" and "none" is direct.
#' @param fixef.tol Precision used to obtain the fixed-effects. Defaults to \code{1e-5}. It corresponds to the maximum absolute difference allowed between two coefficients of successive iterations. Argument \code{fixef.tol} cannot be lower than \code{10000*.Machine$double.eps}. Note that this parameter is dynamically controlled by the algorithm.
#' @param fixef.iter Maximum number of iterations in fixed-effects algorithm (only in use for 2+ fixed-effects). Default is 10000.
#' @param deriv.iter Maximum number of iterations in the algorithm to obtain the derivative of the fixed-effects (only in use for 2+ fixed-effects). Default is 1000.
#' @param deriv.tol Precision used to obtain the fixed-effects derivatives. Defaults to \code{1e-4}. It corresponds to the maximum absolute difference allowed between two coefficients of successive iterations. Argument \code{deriv.tol} cannot be lower than \code{10000*.Machine$double.eps}.
#' @param warn Logical, default is \code{TRUE}. Whether warnings should be displayed (concerns warnings relating to convergence state).
#' @param notes Logical. By default, two notes are displayed: when NAs are removed (to show additional information) and when some observations are removed because of only 0 (or 0/1) outcomes in a fixed-effect setup (in Poisson/Neg. Bin./Logit models). To avoid displaying these messages, you can set \code{notes = FALSE}. You can remove these messages permanently by using \code{setFixest_notes(FALSE)}.
#' @param combine.quick Logical. When you combine different variables to transform them into a single fixed-effects you can do e.g. \code{y ~ x | paste(var1, var2)}. The algorithm provides a shorthand to do the same operation: \code{y ~ x | var1^var2}. Because pasting variables is a costly operation, the internal algorithm may use a numerical trick to hasten the process. The cost of doing so is that you lose the labels. If you are interested in getting the value of the fixed-effects coefficients after the estimation, you should use \code{combine.quick = FALSE}. By default it is equal to \code{FALSE} if the number of observations is lower than 50,000, and to \code{TRUE} otherwise.
#' @param only.env (Advanced users.) Logical, default is \code{FALSE}. If \code{TRUE}, then only the environment used to make the estimation is returned.
#' @param mem.clean Logical, default is \code{FALSE}. Only to be used if the data set is large compared to the available RAM. If \code{TRUE} then intermediary objects are removed as much as possible and \code{\link[base]{gc}} is run before each substantial C++ section in the internal code to avoid memory issues.
#' @param lean Logical, default is \code{FALSE}. If \code{TRUE} then all large objects are removed from the returned result: this will save memory but will block the possibility to use many methods. It is recommended to use the arguments \code{se} or \code{cluster} to obtain the appropriate standard-errors at estimation time, since obtaining different SEs won't be possible afterwards.
#' @param env (Advanced users.) A \code{fixest} environment created by a \code{fixest} estimation with \code{only.env = TRUE}. Default is missing. If provided, the data from this environment will be used to perform the estimation.
#' @param ... Not currently used.
#'
#' @details
#' This function estimates maximum likelihood models where the conditional expectations are as follows:
#'
#' Gaussian likelihood:
#' \deqn{E(Y|X)=X\beta}{E(Y|X) = X*beta}
#' Poisson and Negative Binomial likelihoods:
#' \deqn{E(Y|X)=\exp(X\beta)}{E(Y|X) = exp(X*beta)}
#' where in the Negative Binomial there is the parameter \eqn{\theta}{theta} used to model the variance as \eqn{\mu+\mu^2/\theta}{mu+mu^2/theta}, with \eqn{\mu}{mu} the conditional expectation.
#' Logit likelihood:
#' \deqn{E(Y|X)=\frac{\exp(X\beta)}{1+\exp(X\beta)}}{E(Y|X) = exp(X*beta) / (1 + exp(X*beta))}
#'
#' When there are one or more fixed-effects, the conditional expectation can be written as:
#' \deqn{E(Y|X) = h(X\beta+\sum_{k}\sum_{m}\gamma_{m}^{k}\times C_{im}^{k}),}
#' where \eqn{h(.)} is the function corresponding to the likelihood function as shown before. \eqn{C^k} is the matrix associated to fixed-effect dimension \eqn{k} such that \eqn{C^k_{im}} is equal to 1 if observation \eqn{i} is of category \eqn{m} in the fixed-effect dimension \eqn{k} and 0 otherwise.
#'
#' When there are non linear in parameters functions, we can schematically split the set of regressors in two:
#' \deqn{f(X,\beta)=X^1\beta^1 + g(X^2,\beta^2)}
#' with first a linear term and then a non linear part expressed by the function g. That is, we add a non-linear term to the linear terms (which are \eqn{X*beta} and the fixed-effects coefficients). It is always better (more efficient) to put into the argument \code{NL.fml} only the non-linear in parameter terms, and add all linear terms in the \code{fml} argument.
#'
#' To estimate only a non-linear formula without even the intercept, you must exclude the intercept from the linear formula by using, e.g., \code{fml = z~0}.
#'
#' The over-dispersion parameter of the Negative Binomial family, theta, is capped at 10,000. If theta reaches this high value, it means that there is no overdispersion.
#'
#' @return
#' A \code{fixest} object. Note that \code{fixest} objects contain many elements and most of them are for internal use, they are presented here only for information. To access them, it is safer to use the user-level methods (e.g. \code{\link[fixest]{vcov.fixest}}, \code{\link[fixest]{resid.fixest}}, etc) or functions (like for instance \code{\link[fixest]{fitstat}} to access any fit statistic).
#' \item{coefficients}{The named vector of coefficients.}
#' \item{coeftable}{The table of the coefficients with their standard errors, z-values and p-values.}
#' \item{loglik}{The loglikelihood.}
#' \item{iterations}{Number of iterations of the algorithm.}
#' \item{nobs}{The number of observations.}
#' \item{nparams}{The number of parameters of the model.}
#' \item{call}{The call.}
#' \item{fml}{The linear formula of the call.}
#' \item{fml_all}{A list containing different parts of the formula. Always contain the linear formula. Then, if relevant: \code{fixef}: the fixed-effects; \code{NL}: the non linear part of the formula.}
#' \item{ll_null}{Log-likelihood of the null model (i.e. with the intercept only).}
#' \item{pseudo_r2}{The adjusted pseudo R2.}
#' \item{message}{The convergence message from the optimization procedures.}
#' \item{sq.cor}{Squared correlation between the dependent variable and the expected predictor (i.e. fitted.values) obtained by the estimation.}
#' \item{hessian}{The Hessian of the parameters.}
#' \item{fitted.values}{The fitted values are the expected value of the dependent variable for the fitted model: that is \eqn{E(Y|X)}.}
#' \item{cov.unscaled}{The variance-covariance matrix of the parameters.}
#' \item{se}{The standard-error of the parameters.}
#' \item{scores}{The matrix of the scores (first derivative for each observation).}
#' \item{family}{The ML family that was used for the estimation.}
#' \item{residuals}{The difference between the dependent variable and the expected predictor.}
#' \item{sumFE}{The sum of the fixed-effects for each observation.}
#' \item{offset}{The offset formula.}
#' \item{NL.fml}{The nonlinear formula of the call.}
#' \item{bounds}{Whether the coefficients were upper or lower bounded. -- This can only be the case when a non-linear formula is included and the arguments 'lower' or 'upper' are provided.}
#' \item{isBounded}{The logical vector that gives for each coefficient whether it was bounded or not. This can only be the case when a non-linear formula is included and the arguments 'lower' or 'upper' are provided.}
#' \item{fixef_vars}{The names of each fixed-effect dimension.}
#' \item{fixef_id}{The list (of length the number of fixed-effects) of the fixed-effects identifiers for each observation.}
#' \item{fixef_sizes}{The size of each fixed-effect (i.e. the number of unique identifierfor each fixed-effect dimension).}
#' \item{obs_selection}{(When relevant.) List containing vectors of integers. It represents the sequential selection of observation vis a vis the original data set.}
#' \item{fixef_removed}{In the case there were fixed-effects and some observations were removed because of only 0/1 outcome within a fixed-effect, it gives the list (for each fixed-effect dimension) of the fixed-effect identifiers that were removed.}
#' \item{theta}{In the case of a negative binomial estimation: the overdispersion parameter.}
#'
#' @seealso
#' See also \code{\link[fixest]{summary.fixest}} to see the results with the appropriate standard-errors, \code{\link[fixest]{fixef.fixest}} to extract the fixed-effects coefficients, and the function \code{\link[fixest]{etable}} to visualize the results of multiple estimations.
#'
#' And other estimation methods: \code{\link[fixest]{feols}}, \code{\link[fixest]{femlm}}, \code{\link[fixest]{feglm}}, \code{\link[fixest:feglm]{fepois}}, \code{\link[fixest:femlm]{fenegbin}}.
#'
#' @author
#' Laurent Berge
#'
#' @references
#'
#' Berge, Laurent, 2018, "Efficient estimation of maximum likelihood models with multiple fixed-effects: the R package FENmlm." CREA Discussion Papers, 13 (\url{https://wwwen.uni.lu/content/download/110162/1299525/file/2018_13}).
#'
#' For models with multiple fixed-effects:
#'
#' Gaure, Simen, 2013, "OLS with multiple high dimensional category variables", Computational Statistics & Data Analysis 66 pp. 8--18
#'
#' On the unconditionnal Negative Binomial model:
#'
#' Allison, Paul D and Waterman, Richard P, 2002, "Fixed-Effects Negative Binomial Regression Models", Sociological Methodology 32(1) pp. 247--265
#'
#' @examples
#'
#' # This section covers only non-linear in parameters examples
#' # For linear relationships: use femlm or feglm instead
#'
#' # Generating data for a simple example
#' set.seed(1)
#' n = 100
#' x = rnorm(n, 1, 5)**2
#' y = rnorm(n, -1, 5)**2
#' z1 = rpois(n, x*y) + rpois(n, 2)
#' base = data.frame(x, y, z1)
#'
#' # Estimating a 'linear' relation:
#' est1_L = femlm(z1 ~ log(x) + log(y), base)
#' # Estimating the same 'linear' relation using a 'non-linear' call
#' est1_NL = feNmlm(z1 ~ 1, base, NL.fml = ~a*log(x)+b*log(y), NL.start = list(a=0, b=0))
#' # we compare the estimates with the function esttable (they are identical)
#' etable(est1_L, est1_NL)
#'
#' # Now generating a non-linear relation (E(z2) = x + y + 1):
#' z2 = rpois(n, x + y) + rpois(n, 1)
#' base$z2 = z2
#'
#' # Estimation using this non-linear form
#' est2_NL = feNmlm(z2 ~ 0, base, NL.fml = ~log(a*x + b*y),
#' NL.start = 2, lower = list(a=0, b=0))
#' # we can't estimate this relation linearily
#' # => closest we can do:
#' est2_L = femlm(z2 ~ log(x) + log(y), base)
#'
#' # Difference between the two models:
#' etable(est2_L, est2_NL)
#'
#' # Plotting the fits:
#' plot(x, z2, pch = 18)
#' points(x, fitted(est2_L), col = 2, pch = 1)
#' points(x, fitted(est2_NL), col = 4, pch = 2)
#'
#'
feNmlm = function(fml, data, family=c("poisson", "negbin", "logit", "gaussian"), NL.fml, fixef, fixef.rm = "perfect", NL.start, lower, upper, NL.start.init, offset, subset, split, fsplit, cluster, se, dof, panel.id, start = 0, jacobian.method="simple", useHessian = TRUE, hessian.args = NULL, opt.control = list(), nthreads = getFixest_nthreads(), lean = FALSE, verbose = 0, theta.init, fixef.tol = 1e-5, fixef.iter = 10000, deriv.tol = 1e-4, deriv.iter = 1000, warn = TRUE, notes = getFixest_notes(), combine.quick, mem.clean = FALSE, only.env = FALSE, env, ...){
time_start = proc.time()
if(missing(env)){
set_defaults("fixest_estimation")
call_env = new.env(parent = parent.frame())
env = try(fixest_env(fml = fml, data = data, family = family, NL.fml = NL.fml, fixef = fixef, fixef.rm = fixef.rm, NL.start = NL.start, lower = lower, upper = upper, NL.start.init = NL.start.init, offset = offset, subset = subset, split = split, fsplit = fsplit, cluster = cluster, se = se, dof = dof, panel.id = panel.id, linear.start = start, jacobian.method = jacobian.method, useHessian = useHessian, opt.control = opt.control, nthreads = nthreads, lean = lean, verbose = verbose, theta.init = theta.init, fixef.tol = fixef.tol, fixef.iter = fixef.iter, deriv.iter = deriv.iter, warn = warn, notes = notes, combine.quick = combine.quick, mem.clean = mem.clean, mc_origin = match.call(), call_env = call_env, computeModel0 = TRUE, ...), silent = TRUE)
} else if((r <- !is.environment(env)) || !isTRUE(env$fixest_env)) {
stop("Argument 'env' must be an environment created by a fixest estimation. Currently it is not ", ifelse(r, "an", "a 'fixest'"), " environment.")
}
check_arg(only.env, "logical scalar")
if(only.env){
return(env)
}
if("try-error" %in% class(env)){
mc = match.call()
origin = ifelse(is.null(mc$origin), "feNmlm", mc$origin)
stop(format_error_msg(env, origin))
}
verbose = get("verbose", env)
if(verbose >= 2) cat("Setup in ", (proc.time() - time_start)[3], "s\n", sep="")
#
# Split ####
#
do_split = get("do_split", env)
if(do_split){
res = multi_split(env, feNmlm)
return(res)
}
#
# Multi fixef ####
#
do_multi_fixef = get("do_multi_fixef", env)
if(do_multi_fixef){
res = multi_fixef(env, feNmlm)
return(res)
}
#
# Multi LHS and RHS ####
#
do_multi_lhs = get("do_multi_lhs", env)
do_multi_rhs = get("do_multi_rhs", env)
if(do_multi_lhs || do_multi_rhs){
res = multi_LHS_RHS(env, feNmlm)
return(res)
}
#
# Regular estimation ####
#
# Objects needed for optimization + misc
start = get("start", env)
lower = get("lower", env)
upper = get("upper", env)
gradient = get("gradient", env)
hessian = get("hessian", env)
family = get("family", env)
isLinear = get("isLinear", env)
isNonLinear = get("isNL", env)
opt.control = get("opt.control", env)
lhs = get("lhs", env)
family = get("family", env)
famFuns = get("famFuns", env)
params = get("params", env)
isFixef = get("isFixef", env)
onlyFixef = !isLinear && !isNonLinear && isFixef
#
# Model 0 + theta init
#
theta.init = get("theta.init", env)
model0 = get_model_null(env, theta.init)
# For the negative binomial:
if(family == "negbin"){
theta.init = get("theta.init", env)
if(is.null(theta.init)){
theta.init = model0$theta
}
params = c(params, ".theta")
start = c(start, theta.init)
names(start) = params
upper = c(upper, 10000)
lower = c(lower, 1e-3)
assign("params", params, env)
}
assign("model0", model0, env)
# the result
res = get("res", env)
# NO VARIABLE -- ONLY FIXED-EFFECTS
if(onlyFixef){
if(family == "negbin"){
stop("To estimate the negative binomial model, you need at least one variable. (The estimation of the model with only the fixed-effects is not implemented.)")
}
res = femlm_only_clusters(env)
res$onlyFixef = TRUE
return(res)
}
# warnings => to avoid accumulation, but should appear even if the user stops the algorithm
on.exit(warn_fixef_iter(env))
#
# Maximizing the likelihood
#
opt <- try(stats::nlminb(start=start, objective=femlm_ll, env=env, lower=lower, upper=upper, gradient=gradient, hessian=hessian, control=opt.control), silent = TRUE)
if("try-error" %in% class(opt)){
# We return the coefficients (can be interesting for debugging)
iter = get("iter", env)
origin = get("origin", env)
warning_msg = paste0("[", origin, "] Optimization failed at iteration ", iter, ". Reason: ", gsub("^[^\n]+\n *(.+\n)", "\\1", opt))
if(!"coef_evaluated" %in% names(env)){
# big problem right from the start
stop(warning_msg)
} else {
coef = get("coef_evaluated", env)
warning(warning_msg, " Last evaluated coefficients returned.", call. = FALSE)
return(coef)
}
} else {
convStatus = TRUE
warning_msg = ""
if(!opt$message %in% c("X-convergence (3)", "relative convergence (4)", "both X-convergence and relative convergence (5)")){
warning_msg = " The optimization algorithm did not converge, the results are not reliable."
convStatus = FALSE
}
coef <- opt$par
}
# The Hessian
hessian = femlm_hessian(coef, env = env)
# we add the names of the non linear variables in the hessian
if(isNonLinear || family == "negbin"){
dimnames(hessian) = list(params, params)
}
# we create the Hessian without the bounded parameters
hessian_noBounded = hessian
# Handling the bounds
if(!isNonLinear){
NL.fml = NULL
bounds = NULL
isBounded = NULL
} else {
nonlinear.params = get("nonlinear.params", env)
# we report the bounds & if the estimated parameters are bounded
upper_bound = upper[nonlinear.params]
lower_bound = lower[nonlinear.params]
# 1: are the estimated parameters at their bounds?
coef_NL = coef[nonlinear.params]
isBounded = rep(FALSE, length(params))
isBounded[1:length(coef_NL)] = (coef_NL == lower_bound) | (coef_NL == upper_bound)
# 2: we save the bounds
upper_bound_small = upper_bound[is.finite(upper_bound)]
lower_bound_small = lower_bound[is.finite(lower_bound)]
bounds = list()
if(length(upper_bound_small) > 0) bounds$upper = upper_bound_small
if(length(lower_bound_small) > 0) bounds$lower = lower_bound_small
if(length(bounds) == 0){
bounds = NULL
}
# 3: we update the Hessian (basically, we drop the bounded element)
if(any(isBounded)){
hessian_noBounded = hessian[-which(isBounded), -which(isBounded), drop = FALSE]
boundText = ifelse(coef_NL == upper_bound, "Upper bounded", "Lower bounded")[isBounded]
attr(isBounded, "type") = boundText
}
}
# Variance
var <- NULL
try(var <- solve(hessian_noBounded), silent = TRUE)
if(is.null(var)){
warning_msg = paste(warning_msg, "The information matrix is singular: presence of collinearity. Use function collinearity() to pinpoint the problems.")
var = hessian_noBounded * NA
se = diag(var)
} else {
se = diag(var)
se[se < 0] = NA
se = sqrt(se)
}
# Warning message
if(nchar(warning_msg) > 0){
if(warn){
warning("[femlm]:", warning_msg, call. = FALSE)
options("fixest_last_warning" = proc.time())
}
}
# To handle the bounded coefficient, we set its SE to NA
if(any(isBounded)){
se = se[params]
names(se) = params
}
zvalue <- coef/se
pvalue <- 2*pnorm(-abs(zvalue))
# We add the information on the bound for the se & update the var to drop the bounded vars
se_format = se
if(any(isBounded)){
se_format[!isBounded] = decimalFormat(se_format[!isBounded])
se_format[isBounded] = boundText
}
coeftable <- data.frame("Estimate"=coef, "Std. Error"=se_format, "z value"=zvalue, "Pr(>|z|)"=pvalue, stringsAsFactors = FALSE)
names(coeftable) <- c("Estimate", "Std. Error", "z value", "Pr(>|z|)")
row.names(coeftable) <- params
attr(se, "type") = attr(coeftable, "type") = "Standard"
mu_both = get_mu(coef, env, final = TRUE)
mu = mu_both$mu
exp_mu = mu_both$exp_mu
# calcul pseudo r2
loglik <- -opt$objective # moins car la fonction minimise
ll_null <- model0$loglik
# dummies are constrained, they don't have full dof (cause you need to take one value off for unicity)
# this is an approximation, in some cases there can be more than one ref. But good approx.
nparams = res$nparams
pseudo_r2 <- 1 - (loglik - nparams + 1) / ll_null
# Calcul residus
expected.predictor = famFuns$expected.predictor(mu, exp_mu, env)
residuals = lhs - expected.predictor
# calcul squared corr
if(cpp_isConstant(expected.predictor)){
sq.cor = NA
} else {
sq.cor = stats::cor(lhs, expected.predictor)**2
}
ssr_null = cpp_ssr_null(lhs)
# The scores
scores = femlm_scores(coef, env)
if(isNonLinear){
# we add the names of the non linear params in the score
colnames(scores) = params
}
n = length(lhs)
# Saving
res$coefficients = coef
res$coeftable = coeftable
res$loglik = loglik
res$iterations = opt$iterations
res$ll_null = ll_null
res$ssr_null = ssr_null
res$pseudo_r2 = pseudo_r2
res$message = opt$message
res$convStatus = convStatus
res$sq.cor = sq.cor
res$fitted.values = expected.predictor
res$hessian = hessian
res$cov.unscaled = var
res$se = se
res$scores = scores
res$family = family
res$residuals = residuals
# The value of mu (if cannot be recovered from fitted())
if(family == "logit"){
qui_01 = expected.predictor %in% c(0, 1)
if(any(qui_01)){
res$mu = mu
}
} else if(family %in% c("poisson", "negbin")){
qui_0 = expected.predictor == 0
if(any(qui_0)){
res$mu = mu
}
}
if(!is.null(bounds)){
res$bounds = bounds
res$isBounded = isBounded
}
# Fixed-effects
if(isFixef){
useExp_fixefCoef = family %in% c("poisson")
sumFE = attr(mu, "sumFE")
if(useExp_fixefCoef){
sumFE = rpar_log(sumFE, env)
}
res$sumFE = sumFE
# The LL and SSR with FE only
if("ll_fe_only" %in% names(env)){
res$ll_fe_only = get("ll_fe_only", env)
res$ssr_fe_only = get("ssr_fe_only", env)
} else {
# we need to compute it
# indicator of whether we compute the exp(mu)
useExp = family %in% c("poisson", "logit", "negbin")
# mu, using the offset
if(!is.null(res$offset)){
mu_noDum = res$offset
} else {
mu_noDum = 0
}
if(length(mu_noDum) == 1) mu_noDum = rep(mu_noDum, n)
exp_mu_noDum = NULL
if(useExp_fixefCoef){
exp_mu_noDum = rpar_exp(mu_noDum, env)
}
assign("fixef.tol", 1e-4, env) # no need of supa precision
dummies = getDummies(mu_noDum, exp_mu_noDum, env, coef)
exp_mu = NULL
if(useExp_fixefCoef){
# despite being called mu, it is in fact exp(mu)!!!
exp_mu = exp_mu_noDum*dummies
mu = rpar_log(exp_mu, env)
} else {
mu = mu_noDum + dummies
if(useExp){
exp_mu = rpar_exp(mu, env)
}
}
res$ll_fe_only = famFuns$ll(lhs, mu, exp_mu, env, coef)
ep = famFuns$expected.predictor(mu, exp_mu, env)
res$ssr_fe_only = cpp_ssq(lhs - ep)
}
}
if(family == "negbin"){
theta = coef[".theta"]
res$theta = theta
if(notes && theta > 1000){
message("Very high value of theta (", theta, "). There is no sign of overdispersion, you may consider a Poisson model.")
}
}
class(res) <- "fixest"
if(verbose > 0){
cat("\n")
}
do_summary = get("do_summary", env)
if(do_summary){
se = get("se", env)
cluster = get("cluster", env)
lean = get("lean", env)
dof = get("dof", env)
summary_flags = get("summary_flags", env)
# To compute the RMSE and lean = TRUE
if(lean) res$ssr = cpp_ssq(res$residuals)
res = summary(res, se = se, cluster = cluster, dof = dof, lean = lean, summary_flags = summary_flags)
}
return(res)
}
####
#### Delayed Warnings ####
####
warn_fixef_iter = function(env){
# Show warnings related to the nber of times the maximum of iterations was reached
# For fixed-effect
fixef.iter = get("fixef.iter", env)
fixef.iter.limit_reached = get("fixef.iter.limit_reached", env)
origin = get("origin", env)
warn = get("warn", env)
if(!warn) return(invisible(NULL))
goWarning = FALSE
warning_msg = ""
if(fixef.iter.limit_reached > 0){
goWarning = TRUE
warning_msg = paste0(origin, ": [Getting the fixed-effects] iteration limit reached (", fixef.iter, ").", ifelse(fixef.iter.limit_reached > 1, paste0(" (", fixef.iter.limit_reached, " times.)"), " (Once.)"))
}
# For the fixed-effect derivatives
deriv.iter = get("deriv.iter", env)
deriv.iter.limit_reached = get("deriv.iter.limit_reached", env)
if(deriv.iter.limit_reached > 0){
prefix = ifelse(goWarning, paste0("\n", sprintf("% *s", nchar(origin) + 2, " ")), paste0(origin, ": "))
warning_msg = paste0(warning_msg, prefix, "[Getting fixed-effects derivatives] iteration limit reached (", deriv.iter, ").", ifelse(deriv.iter.limit_reached > 1, paste0(" (", deriv.iter.limit_reached, " times.)"), " (Once.)"))
goWarning = TRUE
}
if(goWarning){
warning(warning_msg, call. = FALSE, immediate. = TRUE)
}
}
warn_step_halving = function(env){
nb_sh = get("nb_sh", env)
warn = get("warn", env)
if(!warn) return(invisible(NULL))
if(nb_sh > 0){
warning("feglm: Step halving due to non-finite deviance (", ifelse(nb_sh > 1, paste0(nb_sh, " times"), "once"), ").", call. = FALSE, immediate. = TRUE)
}
}
format_error_msg = function(x, origin){
# Simple formatting of the error msg
# LATER:
# - for object not found: provide a better error msg by calling the name of the missing
# argument => likely I'll need a match.call argument
x = gsub("\n+$", "", x)
if(grepl("^Error (in|:|: in) (fe|fixest|fun)[^\n]+\n", x)){
res = gsub("^Error (in|:|: in) (fe|fixest|fun)[^\n]+\n *(.+)", "\\3", x)
} else if(grepl("[Oo]bject '.+' not found", x) || grepl("memory|cannot allocate", x)) {
res = x
} else {
res = paste0(x, "\nThis error was unforeseen by the author of the function ", origin, ". If you think your call to the function is legitimate, could you report?")
}
res
}
####
#### Multiple estimation tools ####
####
multi_split = function(env, fun){
split = get("split", env)
split.full = get("split.full", env)
split.items = get("split.items", env)
split.name = get("split.name", env)
assign("do_split", FALSE, env)
res_all = list()
n_split = length(split.items)
index = NULL
all_names = NULL
is_multi = FALSE
for(i in 0:n_split){
if(i == 0){
if(split.full){
my_env = reshape_env(env)
my_res = fun(env = my_env)
} else {
next
}
} else {
my_res = fun(env = reshape_env(env, obs2keep = which(split == i)))
}
res_all[[length(res_all) + 1]] = my_res
}
if(split.full){
split.items = c("Full sample", split.items)
}
index = list(sample = length(res_all))
all_names = list(sample = split.items, split.name = split.name)
# result
res_multi = setup_multi(index, all_names, res_all)
return(res_multi)
}
multi_LHS_RHS = function(env, fun){
do_multi_lhs = get("do_multi_lhs", env)
do_multi_rhs = get("do_multi_rhs", env)
assign("do_multi_lhs", FALSE, env)
assign("do_multi_rhs", FALSE, env)
nthreads = get("nthreads", env)
# IMPORTANT NOTE:
# contrary to feols, the preprocessing is only a small fraction of the
# computing time in ML models
# Therefore we don't need to optimize processing as hard as in FEOLS
# because the gains are only marginal
fml = get("fml", env)
# LHS
lhs_names = get("lhs_names", env)
lhs = get("lhs", env)
if(do_multi_lhs == FALSE){
lhs = list(lhs)
}
# RHS
if(do_multi_rhs){
rhs_info_stepwise = get("rhs_info_stepwise", env)
multi_rhs_fml_full = rhs_info_stepwise$fml_all_full
multi_rhs_fml_sw = rhs_info_stepwise$fml_all_sw
multi_rhs_cumul = rhs_info_stepwise$is_cumul
linear_core = get("linear_core", env)
rhs_sw = get("rhs_sw", env)
} else {
multi_rhs_fml_full = list(.xpd(rhs = fml[[3]]))
multi_rhs_cumul = FALSE
linear.mat = get("linear.mat", env)
linear_core = list(left = linear.mat, right = 1)
rhs_sw = list(1)
}
isLinear_left = length(linear_core$left) > 1
isLinear_right = length(linear_core$right) > 1
n_lhs = length(lhs)
n_rhs = length(rhs_sw)
res = vector("list", n_lhs * n_rhs)
rhs_names = sapply(multi_rhs_fml_full, function(x) as.character(x)[[2]])
for(i in seq_along(lhs)){
for(j in seq_along(rhs_sw)){
# reshaping the env => taking care of the NAs
# Forming the RHS
my_rhs = linear_core[1]
if(multi_rhs_cumul){
my_rhs[1 + 1:j] = rhs_sw[1:j]
} else {
my_rhs[2] = rhs_sw[j]
}
if(isLinear_right){
my_rhs[[length(my_rhs) + 1]] = linear_core$right
}
n_all = lengths(my_rhs)
if(any(n_all == 1)){
my_rhs = my_rhs[n_all > 1]
}
if(length(my_rhs) == 0){
my_rhs = 1
} else {
my_rhs = do.call("cbind", my_rhs)
}
if(length(my_rhs) == 1){
is_na_current = !is.finite(lhs[[i]])
} else {
is_na_current = !is.finite(lhs[[i]]) | cpppar_which_na_inf_mat(my_rhs, nthreads)$is_na_inf
}
my_fml = .xpd(lhs = lhs_names[i], rhs = multi_rhs_fml_full[[j]])
if(any(is_na_current)){
my_env = reshape_env(env, which(!is_na_current), lhs = lhs[[i]], rhs = my_rhs, fml_linear = my_fml)
} else {
# We still need to check the RHS (only 0/1)
my_env = reshape_env(env, lhs = lhs[[i]], rhs = my_rhs, fml_linear = my_fml, check_lhs = TRUE)
}
my_res = fun(env = my_env)
res[[index_2D_to_1D(i, j, n_rhs)]] = my_res
}
}
# Meta information for fixest_multi
index = list(lhs = n_lhs, rhs = n_rhs)
all_names = list(lhs = lhs_names, rhs = rhs_names)
# result
res_multi = setup_multi(index, all_names, res)
return(res_multi)
}
multi_fixef = function(env, estfun){
# Honestly had I known it was so painful, I wouldn't have done it...
assign("do_multi_fixef", FALSE, env)
multi_fixef_fml_full = get("multi_fixef_fml_full", env)
combine.quick = get("combine.quick", env)
fixef.rm = get("fixef.rm", env)
family = get("family", env)
origin_type = get("origin_type", env)
nthreads = get("nthreads", env)
data = get("data", env)
n_fixef = length(multi_fixef_fml_full)
data_results = list()
for(i in 1:n_fixef){
fml_fixef = multi_fixef_fml_full[[i]]
if(length(all.vars(fml_fixef)) > 0){
#
# Evaluation of the fixed-effects
#
fixef_terms_full = fixef_terms(fml_fixef)
# fixef_terms_full computed in the formula section
fixef_terms = fixef_terms_full$fml_terms
# FEs
fixef_df = error_sender(prepare_df(fixef_terms_full$fe_vars, data, combine.quick),
"Problem evaluating the fixed-effects part of the formula:\n")
fixef_vars = names(fixef_df)
# Slopes
isSlope = any(fixef_terms_full$slope_flag != 0)
slope_vars_list = list(0)
if(isSlope){
slope_df = error_sender(prepare_df(fixef_terms_full$slope_vars, data),
"Problem evaluating the variables with varying slopes in the fixed-effects part of the formula:\n")
slope_flag = fixef_terms_full$slope_flag
slope_vars = fixef_terms_full$slope_vars
slope_vars_list = fixef_terms_full$slope_vars_list
# Further controls
not_numeric = !sapply(slope_df, is.numeric)
if(any(not_numeric)){
stop("In the fixed-effects part of the formula (i.e. in ", as.character(fml_fixef[2]), "), variables with varying slopes must be numeric. Currently variable", enumerate_items(names(slope_df)[not_numeric], "s.is.quote"), " not.")
}
# slope_flag: 0: no Varying slope // > 0: varying slope AND fixed-effect // < 0: varying slope WITHOUT fixed-effect
onlySlope = all(slope_flag < 0)
}
# fml update
fml_fixef = .xpd(rhs = fixef_terms)
#
# NA
#
for(j in seq_along(fixef_df)){
if(!is.numeric(fixef_df[[j]]) && !is.character(fixef_df[[j]])){
fixef_df[[j]] = as.character(fixef_df[[j]])
}
}
is_NA = !complete.cases(fixef_df)
if(isSlope){
# Convert to double
who_not_double = which(sapply(slope_df, is.integer))
for(j in who_not_double){
slope_df[[j]] = as.numeric(slope_df[[j]])
}
info = cpppar_which_na_inf_df(slope_df, nthreads)
if(info$any_na_inf){
is_NA = is_NA | info$is_na_inf
}
}
if(any(is_NA)){
# Remember that isFixef is FALSE so far => so we only change the reg vars
my_env = reshape_env(env = env, obs2keep = which(!is_NA))
# NA removal in fixef
fixef_df = fixef_df[!is_NA, , drop = FALSE]
if(isSlope){
slope_df = slope_df[!is_NA, , drop = FALSE]
}
} else {
my_env = new.env(parent = env)
}
# We remove the linear part if needed
if(get("do_multi_rhs", env)){
linear_core = get("linear_core", my_env)
if("(Intercept)" %in% colnames(linear_core$left)){
int_col = which("(Intercept)" %in% colnames(linear_core$left))
if(ncol(linear_core$left) == 1){
linear_core$left = 1
} else {
linear_core$left = linear_core$left[, -int_col, drop = FALSE]
}
assign("linear_core", linear_core, my_env)
}
} else {
linear.mat = get("linear.mat", my_env)
if("(Intercept)" %in% colnames(linear.mat)){
int_col = which("(Intercept)" %in% colnames(linear.mat))
if(ncol(linear.mat) == 1){
assign("linear.mat", 1, my_env)
} else {
assign("linear.mat", linear.mat[, -int_col, drop = FALSE], my_env)
}
}
}
# We assign the fixed-effects
lhs = get("lhs", my_env)
# We delay the computation by using isSplit = TRUE and split.full = FALSE
# Real QUF will be done in the last reshape env
info_fe = setup_fixef(fixef_df = fixef_df, lhs = lhs, fixef_vars = fixef_vars, fixef.rm = fixef.rm, family = family, isSplit = TRUE, split.full = FALSE, origin_type = origin_type, isSlope = isSlope, slope_flag = slope_flag, slope_df = slope_df, slope_vars_list = slope_vars_list, nthreads = nthreads)
fixef_id = info_fe$fixef_id
fixef_names = info_fe$fixef_names
fixef_sizes = info_fe$fixef_sizes
fixef_table = info_fe$fixef_table
sum_y_all = info_fe$sum_y_all
lhs = info_fe$lhs
obs2remove = info_fe$obs2remove
fixef_removed = info_fe$fixef_removed
message_fixef = info_fe$message_fixef
slope_variables = info_fe$slope_variables
slope_flag = info_fe$slope_flag
fixef_id_res = info_fe$fixef_id_res
fixef_sizes_res = info_fe$fixef_sizes_res
new_order = info_fe$new_order
assign("isFixef", TRUE, my_env)
assign("new_order_original", new_order, my_env)
assign("fixef_names", fixef_names, my_env)
assign("fixef_vars", fixef_vars, my_env)
assign_fixef_env(env, family, origin_type, fixef_id, fixef_sizes, fixef_table, sum_y_all, slope_flag, slope_variables, slope_vars_list)
#
# Formatting the fixef stuff from res
#
# fml & fixef_vars => other stuff will be taken care of in reshape
res = get("res", my_env)
res$fml_all$fixef = fml_fixef
res$fixef_vars = fixef_vars
if(isSlope){
res$fixef_terms = fixef_terms
}
assign("res", res, my_env)
#
# Last reshape
#
my_env_est = reshape_env(my_env, assign_fixef = TRUE)
} else {
# No fixed-effect // new.env is indispensable => otherwise multi RHS/LHS not possible
my_env_est = reshape_env(env)
}
data_results[[i]] = estfun(env = my_env_est)
}
index = list(fixef = n_fixef)
fixef_names = sapply(multi_fixef_fml_full, function(x) as.character(x)[[2]])
all_names = list(fixef = fixef_names)
res_multi = setup_multi(index, all_names, data_results)
if("lhs" %in% names(attr(res_multi, "meta")$index)){
res_multi = res_multi[lhs = TRUE]
}
return(res_multi)
}
|
# bubble plot
df_sim <- read_rds(path = "model_fits/fit_simulated_70percent_turnout_by_state_allVBM_requested.Rds")
df_sim <- df_sim %>% group_by(State, sim) %>%
summarise_all(list(sum = sum)) %>%
ungroup()
df_shares <- df_sim %>%
transmute(
State = State,
sim = sim,
voters_sum = voters_white_sum + voters_black_sum + voters_hispanic_sum + voters_asian_sum + voters_other_sum,
share_voters_white = voters_white_sum / voters_sum,
share_voters_black = voters_black_sum / voters_sum,
share_voters_hispanic = voters_hispanic_sum / voters_sum,
share_voters_asian = voters_asian_sum / voters_sum,
share_voters_other = voters_other_sum / voters_sum,
share_requested_white = n_requested_white_sum / n_requested_sum,
share_requested_black = n_requested_black_sum / n_requested_sum,
share_requested_hispanic = n_requested_hispanic_sum / n_requested_sum,
share_requested_asian = n_requested_asian_sum / n_requested_sum,
share_requested_other = n_requested_other_sum / n_requested_sum,
share_submitted_white = n_submitted_white_sum / n_submitted_sum,
share_submitted_black = n_submitted_black_sum / n_submitted_sum,
share_submitted_hispanic = n_submitted_hispanic_sum / n_submitted_sum,
share_submitted_asian = n_submitted_asian_sum / n_submitted_sum,
share_submitted_other = n_submitted_other_sum / n_submitted_sum,
share_rejected_white = n_rejected_white_sum / n_rejected_sum,
share_rejected_black = n_rejected_black_sum / n_rejected_sum,
share_rejected_hispanic = n_rejected_hispanic_sum / n_rejected_sum,
share_rejected_asian = n_rejected_asian_sum / n_rejected_sum,
share_rejected_other = n_rejected_other_sum / n_rejected_sum
) %>%
group_by(State, ) %>%
dplyr::select(-sim) %>%
summarize_all(list(~ mean(.), ~ sd(.))) %>%
ungroup()
# hispanic
ggplot(data = df_shares, aes(x = share_submitted_hispanic_mean, y = share_rejected_hispanic_mean)) +
geom_point(aes(size = voters_sum_mean), fill = "blue", color = "black", alpha = 0.3) +
geom_text(aes(label = State), vjust = -0.25, hjust = -0.25,
data = df_shares %>%
filter(abs(share_rejected_hispanic_mean - share_submitted_hispanic_mean) > 0.075 |
(share_rejected_hispanic_mean - share_submitted_hispanic_mean) < 0)) +
lims(x = c(0, 0.5), y = c(0, 0.5)) +
geom_abline() +
theme_bw()
# black
ggplot(data = df_shares, aes(x = share_submitted_black_mean, y = share_rejected_black_mean)) +
geom_point(aes(size = voters_sum_mean), fill = "blue", color = "black", alpha = 0.3) +
geom_text(aes(label = State), vjust = 0, hjust = -0.25,
data = df_shares %>%
filter(voters_sum_mean > mean(voters_sum_mean))) +
lims(x = c(0, 0.5), y = c(0, 0.5)) +
geom_abline() +
scale_size_continuous(labels = comma) +
theme_bw()
| /code/data_summary/bubble_plot.R | no_license | jamesthesnake/absentee_ballot_rejection_rates | R | false | false | 2,872 | r | # bubble plot
df_sim <- read_rds(path = "model_fits/fit_simulated_70percent_turnout_by_state_allVBM_requested.Rds")
df_sim <- df_sim %>% group_by(State, sim) %>%
summarise_all(list(sum = sum)) %>%
ungroup()
df_shares <- df_sim %>%
transmute(
State = State,
sim = sim,
voters_sum = voters_white_sum + voters_black_sum + voters_hispanic_sum + voters_asian_sum + voters_other_sum,
share_voters_white = voters_white_sum / voters_sum,
share_voters_black = voters_black_sum / voters_sum,
share_voters_hispanic = voters_hispanic_sum / voters_sum,
share_voters_asian = voters_asian_sum / voters_sum,
share_voters_other = voters_other_sum / voters_sum,
share_requested_white = n_requested_white_sum / n_requested_sum,
share_requested_black = n_requested_black_sum / n_requested_sum,
share_requested_hispanic = n_requested_hispanic_sum / n_requested_sum,
share_requested_asian = n_requested_asian_sum / n_requested_sum,
share_requested_other = n_requested_other_sum / n_requested_sum,
share_submitted_white = n_submitted_white_sum / n_submitted_sum,
share_submitted_black = n_submitted_black_sum / n_submitted_sum,
share_submitted_hispanic = n_submitted_hispanic_sum / n_submitted_sum,
share_submitted_asian = n_submitted_asian_sum / n_submitted_sum,
share_submitted_other = n_submitted_other_sum / n_submitted_sum,
share_rejected_white = n_rejected_white_sum / n_rejected_sum,
share_rejected_black = n_rejected_black_sum / n_rejected_sum,
share_rejected_hispanic = n_rejected_hispanic_sum / n_rejected_sum,
share_rejected_asian = n_rejected_asian_sum / n_rejected_sum,
share_rejected_other = n_rejected_other_sum / n_rejected_sum
) %>%
group_by(State, ) %>%
dplyr::select(-sim) %>%
summarize_all(list(~ mean(.), ~ sd(.))) %>%
ungroup()
# hispanic
ggplot(data = df_shares, aes(x = share_submitted_hispanic_mean, y = share_rejected_hispanic_mean)) +
geom_point(aes(size = voters_sum_mean), fill = "blue", color = "black", alpha = 0.3) +
geom_text(aes(label = State), vjust = -0.25, hjust = -0.25,
data = df_shares %>%
filter(abs(share_rejected_hispanic_mean - share_submitted_hispanic_mean) > 0.075 |
(share_rejected_hispanic_mean - share_submitted_hispanic_mean) < 0)) +
lims(x = c(0, 0.5), y = c(0, 0.5)) +
geom_abline() +
theme_bw()
# black
ggplot(data = df_shares, aes(x = share_submitted_black_mean, y = share_rejected_black_mean)) +
geom_point(aes(size = voters_sum_mean), fill = "blue", color = "black", alpha = 0.3) +
geom_text(aes(label = State), vjust = 0, hjust = -0.25,
data = df_shares %>%
filter(voters_sum_mean > mean(voters_sum_mean))) +
lims(x = c(0, 0.5), y = c(0, 0.5)) +
geom_abline() +
scale_size_continuous(labels = comma) +
theme_bw()
|
# Routine to read in image files three data frames.
# The files will be loaded as:
# image.file1
# image.file2
# image.file3
# Import the data
image.file1 <- read.table("data/image1.txt")
image.file2 <- read.table("data/image2.txt")
image.file3 <- read.table("data/image3.txt")
# Reconfigure data frames
name.cols <- c("y.coord", "x.coord", "exp.label",
"ndai", "sd", "corr", "df", "cf", "bf","af", "an")
colnames(image.file1) <- name.cols
colnames(image.file2) <- name.cols
colnames(image.file3) <- name.cols
image.file1 <- data.frame(image.file1)
image.file2 <- data.frame(image.file2)
image.file3 <- data.frame(image.file3)
# Convert expert label column to factor for easier plotting
image.file1 <- image.file1 %>%
mutate(exp.label = as.factor(exp.label))
image.file2 <- image.file2 %>%
mutate(exp.label = as.factor(exp.label))
image.file3 <- image.file3 %>%
mutate(exp.label = as.factor(exp.label)) | /satellite-data-classification/R/load_data.R | no_license | WaverlyWei/Side-Projects | R | false | false | 928 | r | # Routine to read in image files three data frames.
# The files will be loaded as:
# image.file1
# image.file2
# image.file3
# Import the data
image.file1 <- read.table("data/image1.txt")
image.file2 <- read.table("data/image2.txt")
image.file3 <- read.table("data/image3.txt")
# Reconfigure data frames
name.cols <- c("y.coord", "x.coord", "exp.label",
"ndai", "sd", "corr", "df", "cf", "bf","af", "an")
colnames(image.file1) <- name.cols
colnames(image.file2) <- name.cols
colnames(image.file3) <- name.cols
image.file1 <- data.frame(image.file1)
image.file2 <- data.frame(image.file2)
image.file3 <- data.frame(image.file3)
# Convert expert label column to factor for easier plotting
image.file1 <- image.file1 %>%
mutate(exp.label = as.factor(exp.label))
image.file2 <- image.file2 %>%
mutate(exp.label = as.factor(exp.label))
image.file3 <- image.file3 %>%
mutate(exp.label = as.factor(exp.label)) |
#script para copiar arquivo de dados DadosRT.csv da pasta do curso para atual diretório de trabalho
lesson_dir <- file.path(path.package("swirl"), "Courses",
"Introducao_a_Estatistica_para_Linguistas", "data")
origem <- file.path(lesson_dir, "DadosRT.csv")
new_dir<-getwd()
destino <- file.path(new_dir, "DadosRT.csv")
file.copy(origem, destino, overwrite = T)
rm(destino)
rm(lesson_dir)
rm(new_dir)
rm(origem) | /Fundamentos/copiarDadosRT.R | permissive | oushiro/Introducao_a_Estatistica_para_Linguistas | R | false | false | 440 | r | #script para copiar arquivo de dados DadosRT.csv da pasta do curso para atual diretório de trabalho
lesson_dir <- file.path(path.package("swirl"), "Courses",
"Introducao_a_Estatistica_para_Linguistas", "data")
origem <- file.path(lesson_dir, "DadosRT.csv")
new_dir<-getwd()
destino <- file.path(new_dir, "DadosRT.csv")
file.copy(origem, destino, overwrite = T)
rm(destino)
rm(lesson_dir)
rm(new_dir)
rm(origem) |
library(tcR)
### Name: vis.logo
### Title: Logo - plots for amino acid and nucletide profiles.
### Aliases: vis.logo
### ** Examples
## Not run:
##D d <- kmer_profile(c('CASLL', 'CASSQ', 'CASGL'))
##D vis.logo(d)
## End(Not run)
| /data/genthat_extracted_code/tcR/examples/vis.logo.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 237 | r | library(tcR)
### Name: vis.logo
### Title: Logo - plots for amino acid and nucletide profiles.
### Aliases: vis.logo
### ** Examples
## Not run:
##D d <- kmer_profile(c('CASLL', 'CASSQ', 'CASGL'))
##D vis.logo(d)
## End(Not run)
|
rm(list =ls())
aps<-read.table(file="clipboard",sep="\t", header = TRUE)
aps
head(aps)
attach(aps)
names(aps)
library(ggplot2)
library(Rmisc)
library(gridExtra)
library(reshape2)
library(plyr)
tgc <- summarySE(aps, measurevar="sympt", groupvars=c("Isolates","wilt"),na.rm = T)
tgc
o <- ggplot(tgc, aes(Isolates, sympt, fill= wilt)) + geom_bar(stat="identity", color="black",
position=position_dodge())
p <-o + geom_errorbar(aes(ymin=sympt, ymax=sympt+se), width=.2,
position=position_dodge(.9)) +labs(x="Isolates", y="Wilt score")
p
library(RDCOMClient)
library(R2PPT)
#devtools::install_github("dkyleward/RDCOMClient")
temp_file<-paste(tempfile(),".wmf", sep="")
ggsave(temp_file, plot=p)
mkppt <- PPT.Init(method="RDCOMClient")
mkppt<-PPT.AddBlankSlide(mkppt)
mkppt<-PPT.AddGraphicstoSlide(mkppt, file=temp_file)
unlink(temp_file)
| /sidebysideboxplot.R | no_license | Ramkh/side_by-side-box-plot_adjusted-mean-value-and-multiple-regression-line-in-one-graph-plate_timur_data | R | false | false | 956 | r | rm(list =ls())
aps<-read.table(file="clipboard",sep="\t", header = TRUE)
aps
head(aps)
attach(aps)
names(aps)
library(ggplot2)
library(Rmisc)
library(gridExtra)
library(reshape2)
library(plyr)
tgc <- summarySE(aps, measurevar="sympt", groupvars=c("Isolates","wilt"),na.rm = T)
tgc
o <- ggplot(tgc, aes(Isolates, sympt, fill= wilt)) + geom_bar(stat="identity", color="black",
position=position_dodge())
p <-o + geom_errorbar(aes(ymin=sympt, ymax=sympt+se), width=.2,
position=position_dodge(.9)) +labs(x="Isolates", y="Wilt score")
p
library(RDCOMClient)
library(R2PPT)
#devtools::install_github("dkyleward/RDCOMClient")
temp_file<-paste(tempfile(),".wmf", sep="")
ggsave(temp_file, plot=p)
mkppt <- PPT.Init(method="RDCOMClient")
mkppt<-PPT.AddBlankSlide(mkppt)
mkppt<-PPT.AddGraphicstoSlide(mkppt, file=temp_file)
unlink(temp_file)
|
library(rmarkdown)
render('how_many_clusters/how_many_clusters.Rmd',
output_file = 'how_many_clusters.html')
render('where_are_the_roads/where_are_the_roads.Rmd',
output_file = 'where_are_the_roads.html')
render('preliminary_logistics/preliminary_logistics.Rmd',
output_file = 'preliminary_logistics.html') | /reports/generate_all_reports.R | no_license | joebrew/ilha_josina | R | false | false | 331 | r | library(rmarkdown)
render('how_many_clusters/how_many_clusters.Rmd',
output_file = 'how_many_clusters.html')
render('where_are_the_roads/where_are_the_roads.Rmd',
output_file = 'where_are_the_roads.html')
render('preliminary_logistics/preliminary_logistics.Rmd',
output_file = 'preliminary_logistics.html') |
require(pracma)
f <- function(x,y) {
exp(-x*y) *(sin(6*pi*x)+cos(8*pi*y))
}
dblquad(f = f,xa = 0,xb = 1,ya = 0,yb = 1)
n <- seq(0,1,0.01)
multiarray = list();
multiarray <- meshgrid(n,n)
Z<-f(multiarray$X,multiarray$Y)
persp(multiarray$X[1,],multiarray$Y[,1],Z,theta=30, phi=30, expand=0.6,col='lightblue', shade=0.75, ltheta=120,ticktype='detailed')
set.seed(4837)
mean(f(runif(10000),runif(10000)))
mean(f(runif(10000),runif(10000)))
mean(f(runif(10000),runif(10000)))
GetHalton <- function(HowMany, Base) {
Seq = matrix(0,HowMany,1)
NumBits = 1+round(log(HowMany)/log(Base));
VetBase = Base^(-(1:NumBits));
WorkVet = matrix(0,1,NumBits);
for (i in 1:HowMany){
j = 1;
ok = 0;
while (ok == 0){
WorkVet[j] = WorkVet[j]+1;
if (WorkVet[j] < Base){
ok = 1;
}
else{
WorkVet[j] = 0;
j = j+1;
}
}
Seq[i] = sum(WorkVet * VetBase)
}
return(Seq)
}
seq2 = GetHalton(10000,2)
seq4 = GetHalton(10000,4)
seq5 = GetHalton(10000,5)
seq7 = GetHalton(10000,7)
mean(f(seq2,seq5))
mean(f(seq2,seq4))
mean(f(seq2,seq7))
mean(f(seq5,seq7))
set.seed(327439)
mean(f(runif(100),runif(100)))
mean(f(runif(500),runif(500)))
mean(f(runif(1000),runif(1000)))
mean(f(runif(1500),runif(1500)))
mean(f(runif(2000),runif(2000)))
mean(f(seq2[1:100],seq7[1:100]))
mean(f(seq2[1:500],seq7[1:500]))
mean(f(seq2[1:1000],seq7[1:1000]))
mean(f(seq2[1:1500],seq7[1:1500]))
mean(f(seq2[1:2000],seq7[1:2000])) | /Numerical_Methods_In_Finance_And_Economics:_A_Matlab-Based_Introduction_by_Paolo_Brandimarte/CH4/EX4.16/Ex4_16.R | permissive | FOSSEE/R_TBC_Uploads | R | false | false | 1,530 | r | require(pracma)
f <- function(x,y) {
exp(-x*y) *(sin(6*pi*x)+cos(8*pi*y))
}
dblquad(f = f,xa = 0,xb = 1,ya = 0,yb = 1)
n <- seq(0,1,0.01)
multiarray = list();
multiarray <- meshgrid(n,n)
Z<-f(multiarray$X,multiarray$Y)
persp(multiarray$X[1,],multiarray$Y[,1],Z,theta=30, phi=30, expand=0.6,col='lightblue', shade=0.75, ltheta=120,ticktype='detailed')
set.seed(4837)
mean(f(runif(10000),runif(10000)))
mean(f(runif(10000),runif(10000)))
mean(f(runif(10000),runif(10000)))
GetHalton <- function(HowMany, Base) {
Seq = matrix(0,HowMany,1)
NumBits = 1+round(log(HowMany)/log(Base));
VetBase = Base^(-(1:NumBits));
WorkVet = matrix(0,1,NumBits);
for (i in 1:HowMany){
j = 1;
ok = 0;
while (ok == 0){
WorkVet[j] = WorkVet[j]+1;
if (WorkVet[j] < Base){
ok = 1;
}
else{
WorkVet[j] = 0;
j = j+1;
}
}
Seq[i] = sum(WorkVet * VetBase)
}
return(Seq)
}
seq2 = GetHalton(10000,2)
seq4 = GetHalton(10000,4)
seq5 = GetHalton(10000,5)
seq7 = GetHalton(10000,7)
mean(f(seq2,seq5))
mean(f(seq2,seq4))
mean(f(seq2,seq7))
mean(f(seq5,seq7))
set.seed(327439)
mean(f(runif(100),runif(100)))
mean(f(runif(500),runif(500)))
mean(f(runif(1000),runif(1000)))
mean(f(runif(1500),runif(1500)))
mean(f(runif(2000),runif(2000)))
mean(f(seq2[1:100],seq7[1:100]))
mean(f(seq2[1:500],seq7[1:500]))
mean(f(seq2[1:1000],seq7[1:1000]))
mean(f(seq2[1:1500],seq7[1:1500]))
mean(f(seq2[1:2000],seq7[1:2000])) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{main}
\alias{main}
\title{Creat bubble plots and density plots}
\usage{
main(
PatientSummary,
PatientObservations,
PheCodes,
loinc_mapping,
digits = NULL,
windows.size.bubble = 30,
windows.size.density = 30,
windows.min = 0,
windows.max = 120,
topn = NULL
)
}
\arguments{
\item{PatientSummary}{Dataframe; provided in the 4CE 2.1 data.}
\item{PatientObservations}{Dataframe; provided in the 4CE 2.1 data.}
\item{PheCodes}{Dataframe; a mapping file to roll up ICD codes to the Phecode level.}
\item{loinc_mapping}{Dataframe; connecting loinc codes to detailed description.}
\item{digits}{Integer; the digit of ICD code. If default = NULL, set to
be the largest digit of numbers in the \code{concept_code} column in \code{PatientObservations}.}
\item{windows.size.bubble}{Integer; the size of each window in the bubble plot, default=30.}
\item{windows.size.density}{Integer; the size of each window in the density plot, default=30.}
\item{windows.min}{Integer; the minimum time point in the bubble plot, default = 0.}
\item{windows.max}{Integer; the maximum time point in the bubble plot, default = 120.}
\item{topn}{Integer; number of the most frequently diagnosed diseases to display in the bubble plot (default=NULL).}
}
\value{
A list with the following components:
\tabular{ll}{
\code{data} \tab Processed \code{PatientObservations} with rollup information and input data for bubbleplot. \cr
\code{bubble} \tab Bubble plots for ICD and LAB count data with 4 cases. \cr
\code{density} \tab Density plots for continuous LAB data. \cr
}
}
\description{
Generate data frames that count the number of patients diagnosed with different diseases
under different cases within time windows. Create bubble plots and density plots for rollup ICD data and Lab data.
}
| /man/main.Rd | permissive | xinxiong0238/PostSequelae | R | false | true | 1,880 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{main}
\alias{main}
\title{Creat bubble plots and density plots}
\usage{
main(
PatientSummary,
PatientObservations,
PheCodes,
loinc_mapping,
digits = NULL,
windows.size.bubble = 30,
windows.size.density = 30,
windows.min = 0,
windows.max = 120,
topn = NULL
)
}
\arguments{
\item{PatientSummary}{Dataframe; provided in the 4CE 2.1 data.}
\item{PatientObservations}{Dataframe; provided in the 4CE 2.1 data.}
\item{PheCodes}{Dataframe; a mapping file to roll up ICD codes to the Phecode level.}
\item{loinc_mapping}{Dataframe; connecting loinc codes to detailed description.}
\item{digits}{Integer; the digit of ICD code. If default = NULL, set to
be the largest digit of numbers in the \code{concept_code} column in \code{PatientObservations}.}
\item{windows.size.bubble}{Integer; the size of each window in the bubble plot, default=30.}
\item{windows.size.density}{Integer; the size of each window in the density plot, default=30.}
\item{windows.min}{Integer; the minimum time point in the bubble plot, default = 0.}
\item{windows.max}{Integer; the maximum time point in the bubble plot, default = 120.}
\item{topn}{Integer; number of the most frequently diagnosed diseases to display in the bubble plot (default=NULL).}
}
\value{
A list with the following components:
\tabular{ll}{
\code{data} \tab Processed \code{PatientObservations} with rollup information and input data for bubbleplot. \cr
\code{bubble} \tab Bubble plots for ICD and LAB count data with 4 cases. \cr
\code{density} \tab Density plots for continuous LAB data. \cr
}
}
\description{
Generate data frames that count the number of patients diagnosed with different diseases
under different cases within time windows. Create bubble plots and density plots for rollup ICD data and Lab data.
}
|
ui_time_series <- function(id) {
ns <- shiny::NS(id)
shiny::tagList(
div(
class = "container",
id = "time_series_out",
plotly::plotlyOutput(outputId = ns("plotly"), height = "600px"),
DT::DTOutput(outputId = ns("data"))
)
)
}
server_time_series <- function(id, df) {
shiny::moduleServer(
id,
function(input, output, session) {
time_series <- shiny::reactive({
actual_df <- df() %>%
dplyr::group_by(date) %>%
dplyr::summarise(page_views = sum(pageviews))
new_dat <- validation %>%
dplyr::select(-page_views) %>%
dplyr::left_join(actual_df, by = "date")
if (!is.na(new_dat$page_views[1])) {
data_dt <- refit_tbl[, 1:3] %>%
modeltime::modeltime_calibrate(
new_data = new_dat %>%
dplyr::filter(!is.na(page_views))
) %>%
modeltime::modeltime_accuracy()
} else {
data_dt <- NULL
}
plot_plotly <- refit_tbl %>%
modeltime::modeltime_forecast(
new_data = new_dat,
actual_data = actual_df
) %>%
modeltime::plot_modeltime_forecast(
.legend_show = FALSE,
.conf_interval_show = FALSE
)
return(
list(
dt = data_dt,
plot = plot_plotly
)
)
})
output$plotly <- plotly::renderPlotly({
time_series()$plot
})
output$data <- DT::renderDT({
DT::datatable(time_series()$dt)
})
}
)
}
| /R/time_series_module.R | no_license | muzairaslam/GoogleAnalyticsDashboard | R | false | false | 1,598 | r | ui_time_series <- function(id) {
ns <- shiny::NS(id)
shiny::tagList(
div(
class = "container",
id = "time_series_out",
plotly::plotlyOutput(outputId = ns("plotly"), height = "600px"),
DT::DTOutput(outputId = ns("data"))
)
)
}
server_time_series <- function(id, df) {
shiny::moduleServer(
id,
function(input, output, session) {
time_series <- shiny::reactive({
actual_df <- df() %>%
dplyr::group_by(date) %>%
dplyr::summarise(page_views = sum(pageviews))
new_dat <- validation %>%
dplyr::select(-page_views) %>%
dplyr::left_join(actual_df, by = "date")
if (!is.na(new_dat$page_views[1])) {
data_dt <- refit_tbl[, 1:3] %>%
modeltime::modeltime_calibrate(
new_data = new_dat %>%
dplyr::filter(!is.na(page_views))
) %>%
modeltime::modeltime_accuracy()
} else {
data_dt <- NULL
}
plot_plotly <- refit_tbl %>%
modeltime::modeltime_forecast(
new_data = new_dat,
actual_data = actual_df
) %>%
modeltime::plot_modeltime_forecast(
.legend_show = FALSE,
.conf_interval_show = FALSE
)
return(
list(
dt = data_dt,
plot = plot_plotly
)
)
})
output$plotly <- plotly::renderPlotly({
time_series()$plot
})
output$data <- DT::renderDT({
DT::datatable(time_series()$dt)
})
}
)
}
|
.libPaths(new = "/work/statsgeneral/vcdim/Code/packages")
.libPaths() #Check to see it is #1 in the search path
install.packages(c('ncvreg', 'doParallel', 'polynom', 'parallel'), repos="http://cran.r-project.org")
#library(polynom)
library(MASS)
library(doParallel)
library(parallel)
#library(MASS)
#library(doParallel)
#library(parallel)
Model_cv = function(data, n_folds){
set.seed(10)
data1 = data.frame(scale(data, scale = TRUE, center = TRUE))
df = 2:ncol(data1)
folds_i <- sample(rep(1:n_folds, length.out = dim(data)[1]))
cv_tmp <- matrix(NA, nrow = n_folds, ncol = length(df))
for (k in 1:n_folds) {
test_i <- which(folds_i == k)
train_xy <- data1[-test_i, ]
test_x <- data1[test_i, ]
y = data1[test_i, ][,"YIELD"]
fitted_models <- apply(t(df), 2, function(degf) lm(YIELD ~ ., data = train_xy[,1:degf]))
pred <- mapply(function(obj, degf) predict(obj, test_x[, 1:degf]),
fitted_models, df)
cv_tmp[k, ] <- sapply(as.list(data.frame(pred)), function(y_hat) mean((y - y_hat)^2, na.rm = TRUE))
}
return(cv_tmp)
}
ERM = function(Loss, eta, n, h, m){
coef1 = (m^2)/(2*n)*log((2*m/eta)*((2*n*exp(1)/h)^h))
coef2 = (1 + sqrt(1 + (4*n*Loss)/((m^2)*log((2*m/eta)*((2*n*exp(1)/h)^h)))))
Bound = Loss + coef1*coef2
return(Bound)
}
Gaby = function(Loss, eta, n, h, m){
coef = (m)*sqrt((1/n)*log((2*m/eta)*((2*n*exp(1)/h)^h)))
Bound = Loss + coef
return(Bound)
}
phiTheo5 = function(n,x, c1, c2){
c2 = 0
c1*sqrt((x/n)*log(2*n*exp(1)/x)) + c2*(x/n)*log(2*n*exp(1)/x)
#0.5*((m^2)/n)*log(2*n*exp(1)/(x))*(1 + sqrt(1 + (x/n)*log((2*n*exp(1))/x)))
}
phiTheo = function(x){
0.2*sqrt((x/250)*log(2*250*exp(1)/x)) #+ 0.2*(x/250)*log(2*250*exp(1)/x)
#0.5*((m^2)/n)*log(2*n*exp(1)/(x))*(1 + sqrt(1 + (x/n)*log((2*n*exp(1))/x)))
}
phiTheo51 = function(x){
0.33*sqrt((10/x)*log(2*x*exp(1)/10)) + 0.01*(10/x)*log(2*x*exp(1)/10)
#0.5*((m^2)/n)*log(2*n*exp(1)/(x))*(1 + sqrt(1 + (x/n)*log((2*n*exp(1))/x)))
}
C1C2 = function(MatChixi, h, NL, c1, c2){
x1 = c1*sqrt((h/NL)*log(2*NL*exp(1)/h))
x2 = c2*(h/NL)*log(2*NL*exp(1)/h)
x2 = 0
out = (1/length(NL))*sum((MatChixi - x1 - x2)^2)
}
C1C2ratio = function(MatChixi, h, NL, c1, c2){
x1 = c1*sqrt((h/NL)*log(2*NL*exp(1)/h))
x2 = c2*(h/NL)*log(2*NL*exp(1)/h)
x = x1 + x2
out = (1/length(NL))*sum((MatChixi/x - 1)^2)
}
vcfunctratio = function(MatChixi,NL,x,m,c1,c2){
row = 1
Sum = 0
while(row <= length(MatChixi)){
Sum = Sum + (MatChixi[row]/phiTheo5(n=NL[row],x,c1,c2) - 1 )^2
row = row + 1
}
Sum = (1/length(NL))*Sum
return(Sum)
}
vcfunct = function(MatChixi,NL,x,m,c1,c2){
row = 1
Sum = 0
while(row <= length(MatChixi)){
Sum = Sum + (MatChixi[row] - phiTheo5(n=NL[row],x,c1,c2))^2
row = row + 1
}
Sum = (1/length(NL))*Sum
return(Sum)
}
Vapbound = function(x,NL){
0.16*((log(2*(NL/x))+1)/(NL/x-0.15))*(1+ sqrt(1+ 1.2*(NL/x-0.15)/(log(2*NL/x)+1)))
}
vapfunct = function(MatChixi,NL,x){
row = 1
Sum = 0
while(row <= length(MatChixi)){
Sum = Sum + (MatChixi[row] - Vapbound(x,NL[row]))^2
row = row + 1
}
Sum = (1/length(MatChixi))*Sum
}
#data = read.csv("C:/Users/merli/OneDrive/Documents/DataSet/SNPWheatData.csv", header = T)
data = read.csv("C:/Users/merli/OneDrive/Documents/DataSet/FullWheatData.csv", header = T)
str(data)
names(data)
#data = read.csv(file = "/work/statsgeneral/vcdim/Code/FullWheatData.csv", header = T)
head(data)
B=50
#data = read.csv(file = "/work/statsgeneral/vcdim/Code/WheatData.csv", header = T)
#data = as.data.frame(data, center = TRUE, scale = TRUE)
NL = c(450, 500, 550, 600, 650, 700, 750)
Loc = levels(data$LOCATION)
k = 2
data237 = subset(data, data$LOCATION == Loc[k])
var = c('YIELD','HT', 'TSTWT', 'TKWT', 'SPSM', 'KPS', 'KPSM',
'barc67', 'cmwg680bcd366', 'bcd141', 'barc86', 'gwm155',
'barc12','IBLK')
data2 = data237[,var]
BigModel = lm(YIELD ~ TKWT + TSTWT + SPSM + KPS + KPSM + HT +
I(TKWT^2)+ I(TKWT*TSTWT) + I(TKWT*SPSM) + I(TKWT*KPS) + I(TKWT*KPSM) + I(TKWT*SPSM) + I(TKWT*HT) +
I(TSTWT^2) + I(TSTWT*SPSM) + I(TSTWT*KPS) + I(TSTWT*KPSM) + I(TSTWT*HT) +
I(SPSM^2) + I(SPSM*KPS) + I(SPSM*KPSM) + I(SPSM*HT) +
I(KPS^2) + I(KPS*KPSM) + I(KPS*HT) +
I(KPSM^2) + I(KPSM*HT) +
I(HT^2) + barc67 + cmwg680bcd366 + bcd141 + barc86 + gwm155 +
barc12, data = data2, x=TRUE, y=TRUE)
Xdat = BigModel$x[,-1]
Ydat = BigModel$y
cor(Ydat, Xdat)
ddd = as.matrix(cbind(Ydat,Xdat))
cor(ddd)[1,]
Name = c('TKWT', 'TSTWT', 'SPSM', 'KPS', 'KPSM', 'HT',
'I(TKWT^2)', 'I(TKWT*TSTWT)', 'I(TKWT*SPSM)', 'I(TKWT*KPS)', 'I(TKWT*KPSM)', 'I(TKWT*HT)',
'I(TSTWT^2)', 'I(TSTWT*SPSM)', 'I(TSTWT*KPS)', 'I(TSTWT*KPSM)', 'I(TSTWT*HT)',
'I(SPSM^2)', 'I(SPSM*KPS)', 'I(SPSM*KPSM)', 'I(SPSM*HT)',
'I(KPS^2)', 'I(KPS*KPSM)', 'I(KPS*HT)',
'I(KPSM^2)', 'I(KPSM*HT)',
'I(HT^2)',
'barc67', 'cmwg680bcd366', 'bcd141', 'barc86', 'gwm155', 'barc12')
Cor = as.matrix(round(abs(cor(Ydat, Xdat)),4))
Name[order(cor(ddd)[1,][-1], decreasing = TRUE)]
######################################################################################################
# Order of inclusion of covariates using SNP data in Licoln 01
######################################################################################################
AA = Name[order(cor(ddd)[1,][-1], decreasing = TRUE)]
Big_order = lm(YIELD ~ I(TKWT*KPSM) + (TSTWT*KPS) + KPSM + I(SPSM*KPS) + I(KPSM^2) + I(SPSM*KPSM) +
I(TKWT*SPSM) + I(TSTWT*SPSM) + SPSM + I(KPSM*HT) + I(SPSM^2) + I(KPS*KPSM) +
I(SPSM*HT) + TSTWT + I(TSTWT^2) +
barc67 + I(TKWT*TSTWT)+ barc86 +
TKWT+ I(TKWT^2) + cmwg680bcd366 +
bcd141 + I(TKWT*KPS) + gwm155 +
I(TSTWT*KPS) + barc12 + I(KPS^2) +
KPS + I(TKWT*HT) + I(KPS*HT) +
I(TSTWT*HT) + HT + I(HT^2),
data = data2, x = TRUE, y = TRUE)
X_big = Big_order$x[,-1]
Y_big = Big_order$y
X_data = data.frame(YIELD = Y_big, X_big)
X_data = data.frame(scale(X_data, center = TRUE, scale = TRUE))
#######################################################################################################
Chxi = function(data, NL, B, m){
MatChixi = matrix(NA, nrow = B, ncol = length(NL))
Qstar = function(j,B,m){
(2*j+1)*B/(2*m)
}
Lower = function(j,B,m){
j*B/m
}
Upper = function(j,B,m){
(j+1)*B/m
}
for(i in 1:length(NL)){
# step one: we need to generate 2n data points
n = NL[i]
bprim = 1
while(bprim<(B+1)){
b = 1
#sumdiff = 0
Matxi = matrix(NA, nrow = B, ncol = m)
while(b < (B+1)){
cat('Bootstrap #', bprim, 'second boot', b, '\n')
set.seed(i*bprim*b+1)
Index1 = sample(nrow(data),size = 2*n,replace = TRUE)
Mydata = data[Index1,]
# Step two: split the data into two groups
Index = sample(nrow(Mydata),size = n,replace = FALSE)
SampleData = Mydata
G1 = SampleData[Index,]
G2 = SampleData[-Index,]
# Now lets fit a model using the modify dataset.
Model1 = lm(YIELD ~ ., x = TRUE, y = TRUE, data = G1)
Model2 = lm(YIELD ~ ., x = TRUE, y = TRUE, data = G2)
FirstHalfN = matrix(NA, nrow =length(Index), ncol =m)
FirstHalfQ = matrix(NA, nrow =length(Index), ncol =m)
colnames(FirstHalfN) =paste("N",1:m,sep="")
colnames(FirstHalfQ) = paste("Q",1:m,sep="")
X1 = data.frame(Model1$x)
Y1 = data.frame(Model1$y)
X2 = data.frame(Model2$x)
Y2 = data.frame(Model2$y)
#Pred1 = PredSqrt[Index]
qstarj = matrix(NA, nrow = 1, ncol = m)
Pred1 = (predict(Model1, X2)-Y2)^2
for(ind in 0:(m-1)){
qstarj[1,(1+ind)]= Qstar(ind,max(Pred1),m)
for(k in 1:length(Pred1)){
if(Pred1[k]>=Lower(ind,max(Pred1),m) & Pred1[k]<Upper(ind,max(Pred1),m)){
FirstHalfN[k,(ind+1)] = 1
FirstHalfQ[k,(ind+1)] = Qstar(ind,max(Pred1),m)
}
}
}
N1 = apply(FirstHalfN, 2, sum, na.rm = TRUE)
Q1 = apply(FirstHalfQ, 2, sum, na.rm = TRUE)
nustar1 = N1*qstarj/(n)
#nustar1 = N1*Q1/(n)
SecondHalfN = matrix(NA, nrow =length(Index), ncol =m)
SecondHalfQ = matrix(NA, nrow =length(Index), ncol =m)
colnames(SecondHalfN) =paste("N",1:m,sep="")
colnames(SecondHalfQ) =paste("Q",1:m,sep="")
qstarj = matrix(NA, nrow = 1, ncol = m)
Pred2 = (predict(Model2 ,X1)-Y1)^2
for(indx in 0:(m-1)){
qstarj[1,(1+indx)]= Qstar(indx,max(Pred1),m)
for(kk in 1:length(Pred2)){
if(Pred2[kk]>=Lower(indx,max(Pred2),m) & Pred2[kk]<Upper(indx,max(Pred2),m)){
SecondHalfN[k,(indx+1)] = 1
SecondHalfQ[k,(indx+1)] = Qstar(indx,max(Pred2),m)
}
}
}
N2 = apply(SecondHalfN, 2, sum, na.rm = TRUE)
Q2 = apply(SecondHalfQ, 2, sum, na.rm = TRUE)
nustar2 = N2*qstarj/(n)
#nustar2 = N2*Q2/(n)
diff = abs(nustar2 - nustar1)
#sumdiff = sumdiff + diff
Matxi[b,] = diff
b = b + 1
}
#MatChixi[bprim,i] = sum (apply(Matxi, 2, max, na.rm = T))
MatChixi[bprim,i] = sum (apply(Matxi, 2, mean, na.rm = T))
bprim = bprim + 1
}
}
MeanChi = apply(MatChixi, MARGIN =2, FUN = mean)
}
Output = matrix(NA, ncol = 6, nrow = length(AA))
colnames(Output) = c('h', 'ERM1', 'ERM2', 'AIC', 'BIC', 'C')
for(l in 2:ncol(X_data)){
dat = X_data[, 1:l]
X = data.frame(cbind(scale(dat, center = TRUE, scale = TRUE)))
#est = Chxi(data = X, NL = NL, B=5, m=10)
Model1 = lm(YIELD ~ ., data = X, x = TRUE, y = TRUE)
# estimate Chixi
est = Chxi(data = X, NL = NL, B=5, m=10)
#####################################################################
# Estimate c1 and c2
#####################################################################
c1 = seq(from = 0.01, to = 10, by = 0.01)
c2 = 0
Mertt = numeric()
# for (t in 1:length(hh)) {
ourestMat = matrix(NA, nrow = length(c1), ncol = length(c2))
ourestMat2 = matrix(NA, nrow = length(c1), ncol = length(c2))
for (j in 1:length(c1)) {
for (g in 1:length(c2)) {
ourestMat[j,g] = C1C2(est,ncol(data)-1, NL, c1 = c1[j], c2 = c2[g])
ourestMat2[j,g] = C1C2ratio(est,ncol(data)-1, NL, c1 = c1[j], c2 = c2[g])
}
}
Indexx = which(ourestMat == min(ourestMat), arr.ind = TRUE)
Indexx2 = which(ourestMat2 == min(ourestMat2), arr.ind = TRUE)
c111 = c1[Indexx[1,1]]
c222 = c2[Indexx[1,2]]
c11r = c1[Indexx2[1,1]]
c22r = c2[Indexx2[1,2]]
cat("C1 is ", c111, "c2 is ", c222, 'the number of col in data is is', ncol(data)-1, "\n")
# estimate vc dimension using grid search
range2 = seq(from = 1, to = 100, by = 1)
MerlinMat1 = numeric(length(range2))
MerlinMat1r = numeric(length(range2))
for (kk in 1:length(range2)) {
MerlinMat1[kk] = vcfunct(est,NL=NL,x=range2[kk], m=10, c1=c111, c2=c222)
MerlinMat1r[kk] = vcfunctratio(est,NL=NL,x=range2[kk], m=10, c1=c11r, c2=c22r)
}
cat('The estimate vcdim is: ', range2[which.min(MerlinMat1)], " for Loc ", Loc[k], "\n")
cat('The estimate vcdim is: ', range2[which.min(MerlinMat1r)], " for Loc ", Loc[k], "\n")
Risk2 = sum(Model1$residuals^2)
BIC = BIC(Model1)
AIC = AIC(Model1)
cat("The BIC is:", round(BIC), '\n')
ERM1 = Gaby(Loss = Risk2,eta = 0.05,
n = nrow(data), h = range2[which.min(MerlinMat1)], m=10)
round(ERM1)
ERM2 = ERM(Loss = Risk2,eta = 0.05,
n = nrow(data), h = range2[which.min(MerlinMat1)], m=10)
round(ERM2)
dataa = data.frame(h = range2[which.min(MerlinMat1)], ERM1 = round(ERM1), ERM2 = round(ERM2), BIC = round(BIC), AIC = round(AIC))
Output[l-1,] = c(range2[which.min(MerlinMat1)], round(ERM1), round(ERM2), round(AIC), round(BIC), c111
)
#dataa
}
Output
cv_tmp = Model_cv(dat, n_folds <- 10)
cv = colMeans(cv_tmp)
data.frame(Output[,-6], cv)
X_big = Big_order$x[,-1]
Y_big = Big_order$y
X1Scaled = scale(X_big, center = TRUE, scale = TRUE)
Anal_data = data.frame(X1Scaled, IBLK = data2$IBLK)
#Anal_data = data.frame(X1Scaled)
library(ncvreg)
Model_SCAD = ncvreg(Anal_data, Y_big, penalty = 'SCAD')
plot(Model_SCAD)
cvfit2 <- cv.ncvreg(Anal_data, Y_big, penalty = "SCAD")
round(coef(cvfit2),2)
plot(cvfit2)
cvfit2$lambda.min
Param = t(Model_SCAD$beta)
source("C:/Users/merli/OneDrive/Documents/Code/LSA.r")
source("C:/Users/merli/OneDrive/Documents/Code/Lasso.r")
Ada_lasso = lasso.adapt.bic2(Anal_data, Y_big)
round(Ada_lasso$coeff,2)
round(Ada_lasso$intercept,2)
ada = lsa(Model1)
#######################################################################################
# model dev, impl , valida, risk model, forcasting credit handle
# risk model enterprisewise data
| /Wheat_Pheno_SNP_2001.R | no_license | poudas1981/Wheat_data_set | R | false | false | 13,594 | r | .libPaths(new = "/work/statsgeneral/vcdim/Code/packages")
.libPaths() #Check to see it is #1 in the search path
install.packages(c('ncvreg', 'doParallel', 'polynom', 'parallel'), repos="http://cran.r-project.org")
#library(polynom)
library(MASS)
library(doParallel)
library(parallel)
#library(MASS)
#library(doParallel)
#library(parallel)
Model_cv = function(data, n_folds){
set.seed(10)
data1 = data.frame(scale(data, scale = TRUE, center = TRUE))
df = 2:ncol(data1)
folds_i <- sample(rep(1:n_folds, length.out = dim(data)[1]))
cv_tmp <- matrix(NA, nrow = n_folds, ncol = length(df))
for (k in 1:n_folds) {
test_i <- which(folds_i == k)
train_xy <- data1[-test_i, ]
test_x <- data1[test_i, ]
y = data1[test_i, ][,"YIELD"]
fitted_models <- apply(t(df), 2, function(degf) lm(YIELD ~ ., data = train_xy[,1:degf]))
pred <- mapply(function(obj, degf) predict(obj, test_x[, 1:degf]),
fitted_models, df)
cv_tmp[k, ] <- sapply(as.list(data.frame(pred)), function(y_hat) mean((y - y_hat)^2, na.rm = TRUE))
}
return(cv_tmp)
}
ERM = function(Loss, eta, n, h, m){
coef1 = (m^2)/(2*n)*log((2*m/eta)*((2*n*exp(1)/h)^h))
coef2 = (1 + sqrt(1 + (4*n*Loss)/((m^2)*log((2*m/eta)*((2*n*exp(1)/h)^h)))))
Bound = Loss + coef1*coef2
return(Bound)
}
Gaby = function(Loss, eta, n, h, m){
coef = (m)*sqrt((1/n)*log((2*m/eta)*((2*n*exp(1)/h)^h)))
Bound = Loss + coef
return(Bound)
}
phiTheo5 = function(n,x, c1, c2){
c2 = 0
c1*sqrt((x/n)*log(2*n*exp(1)/x)) + c2*(x/n)*log(2*n*exp(1)/x)
#0.5*((m^2)/n)*log(2*n*exp(1)/(x))*(1 + sqrt(1 + (x/n)*log((2*n*exp(1))/x)))
}
phiTheo = function(x){
0.2*sqrt((x/250)*log(2*250*exp(1)/x)) #+ 0.2*(x/250)*log(2*250*exp(1)/x)
#0.5*((m^2)/n)*log(2*n*exp(1)/(x))*(1 + sqrt(1 + (x/n)*log((2*n*exp(1))/x)))
}
phiTheo51 = function(x){
0.33*sqrt((10/x)*log(2*x*exp(1)/10)) + 0.01*(10/x)*log(2*x*exp(1)/10)
#0.5*((m^2)/n)*log(2*n*exp(1)/(x))*(1 + sqrt(1 + (x/n)*log((2*n*exp(1))/x)))
}
C1C2 = function(MatChixi, h, NL, c1, c2){
x1 = c1*sqrt((h/NL)*log(2*NL*exp(1)/h))
x2 = c2*(h/NL)*log(2*NL*exp(1)/h)
x2 = 0
out = (1/length(NL))*sum((MatChixi - x1 - x2)^2)
}
C1C2ratio = function(MatChixi, h, NL, c1, c2){
x1 = c1*sqrt((h/NL)*log(2*NL*exp(1)/h))
x2 = c2*(h/NL)*log(2*NL*exp(1)/h)
x = x1 + x2
out = (1/length(NL))*sum((MatChixi/x - 1)^2)
}
vcfunctratio = function(MatChixi,NL,x,m,c1,c2){
row = 1
Sum = 0
while(row <= length(MatChixi)){
Sum = Sum + (MatChixi[row]/phiTheo5(n=NL[row],x,c1,c2) - 1 )^2
row = row + 1
}
Sum = (1/length(NL))*Sum
return(Sum)
}
vcfunct = function(MatChixi,NL,x,m,c1,c2){
row = 1
Sum = 0
while(row <= length(MatChixi)){
Sum = Sum + (MatChixi[row] - phiTheo5(n=NL[row],x,c1,c2))^2
row = row + 1
}
Sum = (1/length(NL))*Sum
return(Sum)
}
Vapbound = function(x,NL){
0.16*((log(2*(NL/x))+1)/(NL/x-0.15))*(1+ sqrt(1+ 1.2*(NL/x-0.15)/(log(2*NL/x)+1)))
}
vapfunct = function(MatChixi,NL,x){
row = 1
Sum = 0
while(row <= length(MatChixi)){
Sum = Sum + (MatChixi[row] - Vapbound(x,NL[row]))^2
row = row + 1
}
Sum = (1/length(MatChixi))*Sum
}
#data = read.csv("C:/Users/merli/OneDrive/Documents/DataSet/SNPWheatData.csv", header = T)
data = read.csv("C:/Users/merli/OneDrive/Documents/DataSet/FullWheatData.csv", header = T)
str(data)
names(data)
#data = read.csv(file = "/work/statsgeneral/vcdim/Code/FullWheatData.csv", header = T)
head(data)
B=50
#data = read.csv(file = "/work/statsgeneral/vcdim/Code/WheatData.csv", header = T)
#data = as.data.frame(data, center = TRUE, scale = TRUE)
NL = c(450, 500, 550, 600, 650, 700, 750)
Loc = levels(data$LOCATION)
k = 2
data237 = subset(data, data$LOCATION == Loc[k])
var = c('YIELD','HT', 'TSTWT', 'TKWT', 'SPSM', 'KPS', 'KPSM',
'barc67', 'cmwg680bcd366', 'bcd141', 'barc86', 'gwm155',
'barc12','IBLK')
data2 = data237[,var]
BigModel = lm(YIELD ~ TKWT + TSTWT + SPSM + KPS + KPSM + HT +
I(TKWT^2)+ I(TKWT*TSTWT) + I(TKWT*SPSM) + I(TKWT*KPS) + I(TKWT*KPSM) + I(TKWT*SPSM) + I(TKWT*HT) +
I(TSTWT^2) + I(TSTWT*SPSM) + I(TSTWT*KPS) + I(TSTWT*KPSM) + I(TSTWT*HT) +
I(SPSM^2) + I(SPSM*KPS) + I(SPSM*KPSM) + I(SPSM*HT) +
I(KPS^2) + I(KPS*KPSM) + I(KPS*HT) +
I(KPSM^2) + I(KPSM*HT) +
I(HT^2) + barc67 + cmwg680bcd366 + bcd141 + barc86 + gwm155 +
barc12, data = data2, x=TRUE, y=TRUE)
Xdat = BigModel$x[,-1]
Ydat = BigModel$y
cor(Ydat, Xdat)
ddd = as.matrix(cbind(Ydat,Xdat))
cor(ddd)[1,]
Name = c('TKWT', 'TSTWT', 'SPSM', 'KPS', 'KPSM', 'HT',
'I(TKWT^2)', 'I(TKWT*TSTWT)', 'I(TKWT*SPSM)', 'I(TKWT*KPS)', 'I(TKWT*KPSM)', 'I(TKWT*HT)',
'I(TSTWT^2)', 'I(TSTWT*SPSM)', 'I(TSTWT*KPS)', 'I(TSTWT*KPSM)', 'I(TSTWT*HT)',
'I(SPSM^2)', 'I(SPSM*KPS)', 'I(SPSM*KPSM)', 'I(SPSM*HT)',
'I(KPS^2)', 'I(KPS*KPSM)', 'I(KPS*HT)',
'I(KPSM^2)', 'I(KPSM*HT)',
'I(HT^2)',
'barc67', 'cmwg680bcd366', 'bcd141', 'barc86', 'gwm155', 'barc12')
Cor = as.matrix(round(abs(cor(Ydat, Xdat)),4))
Name[order(cor(ddd)[1,][-1], decreasing = TRUE)]
######################################################################################################
# Order of inclusion of covariates using SNP data in Licoln 01
######################################################################################################
AA = Name[order(cor(ddd)[1,][-1], decreasing = TRUE)]
Big_order = lm(YIELD ~ I(TKWT*KPSM) + (TSTWT*KPS) + KPSM + I(SPSM*KPS) + I(KPSM^2) + I(SPSM*KPSM) +
I(TKWT*SPSM) + I(TSTWT*SPSM) + SPSM + I(KPSM*HT) + I(SPSM^2) + I(KPS*KPSM) +
I(SPSM*HT) + TSTWT + I(TSTWT^2) +
barc67 + I(TKWT*TSTWT)+ barc86 +
TKWT+ I(TKWT^2) + cmwg680bcd366 +
bcd141 + I(TKWT*KPS) + gwm155 +
I(TSTWT*KPS) + barc12 + I(KPS^2) +
KPS + I(TKWT*HT) + I(KPS*HT) +
I(TSTWT*HT) + HT + I(HT^2),
data = data2, x = TRUE, y = TRUE)
X_big = Big_order$x[,-1]
Y_big = Big_order$y
X_data = data.frame(YIELD = Y_big, X_big)
X_data = data.frame(scale(X_data, center = TRUE, scale = TRUE))
#######################################################################################################
Chxi = function(data, NL, B, m){
MatChixi = matrix(NA, nrow = B, ncol = length(NL))
Qstar = function(j,B,m){
(2*j+1)*B/(2*m)
}
Lower = function(j,B,m){
j*B/m
}
Upper = function(j,B,m){
(j+1)*B/m
}
for(i in 1:length(NL)){
# step one: we need to generate 2n data points
n = NL[i]
bprim = 1
while(bprim<(B+1)){
b = 1
#sumdiff = 0
Matxi = matrix(NA, nrow = B, ncol = m)
while(b < (B+1)){
cat('Bootstrap #', bprim, 'second boot', b, '\n')
set.seed(i*bprim*b+1)
Index1 = sample(nrow(data),size = 2*n,replace = TRUE)
Mydata = data[Index1,]
# Step two: split the data into two groups
Index = sample(nrow(Mydata),size = n,replace = FALSE)
SampleData = Mydata
G1 = SampleData[Index,]
G2 = SampleData[-Index,]
# Now lets fit a model using the modify dataset.
Model1 = lm(YIELD ~ ., x = TRUE, y = TRUE, data = G1)
Model2 = lm(YIELD ~ ., x = TRUE, y = TRUE, data = G2)
FirstHalfN = matrix(NA, nrow =length(Index), ncol =m)
FirstHalfQ = matrix(NA, nrow =length(Index), ncol =m)
colnames(FirstHalfN) =paste("N",1:m,sep="")
colnames(FirstHalfQ) = paste("Q",1:m,sep="")
X1 = data.frame(Model1$x)
Y1 = data.frame(Model1$y)
X2 = data.frame(Model2$x)
Y2 = data.frame(Model2$y)
#Pred1 = PredSqrt[Index]
qstarj = matrix(NA, nrow = 1, ncol = m)
Pred1 = (predict(Model1, X2)-Y2)^2
for(ind in 0:(m-1)){
qstarj[1,(1+ind)]= Qstar(ind,max(Pred1),m)
for(k in 1:length(Pred1)){
if(Pred1[k]>=Lower(ind,max(Pred1),m) & Pred1[k]<Upper(ind,max(Pred1),m)){
FirstHalfN[k,(ind+1)] = 1
FirstHalfQ[k,(ind+1)] = Qstar(ind,max(Pred1),m)
}
}
}
N1 = apply(FirstHalfN, 2, sum, na.rm = TRUE)
Q1 = apply(FirstHalfQ, 2, sum, na.rm = TRUE)
nustar1 = N1*qstarj/(n)
#nustar1 = N1*Q1/(n)
SecondHalfN = matrix(NA, nrow =length(Index), ncol =m)
SecondHalfQ = matrix(NA, nrow =length(Index), ncol =m)
colnames(SecondHalfN) =paste("N",1:m,sep="")
colnames(SecondHalfQ) =paste("Q",1:m,sep="")
qstarj = matrix(NA, nrow = 1, ncol = m)
Pred2 = (predict(Model2 ,X1)-Y1)^2
for(indx in 0:(m-1)){
qstarj[1,(1+indx)]= Qstar(indx,max(Pred1),m)
for(kk in 1:length(Pred2)){
if(Pred2[kk]>=Lower(indx,max(Pred2),m) & Pred2[kk]<Upper(indx,max(Pred2),m)){
SecondHalfN[k,(indx+1)] = 1
SecondHalfQ[k,(indx+1)] = Qstar(indx,max(Pred2),m)
}
}
}
N2 = apply(SecondHalfN, 2, sum, na.rm = TRUE)
Q2 = apply(SecondHalfQ, 2, sum, na.rm = TRUE)
nustar2 = N2*qstarj/(n)
#nustar2 = N2*Q2/(n)
diff = abs(nustar2 - nustar1)
#sumdiff = sumdiff + diff
Matxi[b,] = diff
b = b + 1
}
#MatChixi[bprim,i] = sum (apply(Matxi, 2, max, na.rm = T))
MatChixi[bprim,i] = sum (apply(Matxi, 2, mean, na.rm = T))
bprim = bprim + 1
}
}
MeanChi = apply(MatChixi, MARGIN =2, FUN = mean)
}
Output = matrix(NA, ncol = 6, nrow = length(AA))
colnames(Output) = c('h', 'ERM1', 'ERM2', 'AIC', 'BIC', 'C')
for(l in 2:ncol(X_data)){
dat = X_data[, 1:l]
X = data.frame(cbind(scale(dat, center = TRUE, scale = TRUE)))
#est = Chxi(data = X, NL = NL, B=5, m=10)
Model1 = lm(YIELD ~ ., data = X, x = TRUE, y = TRUE)
# estimate Chixi
est = Chxi(data = X, NL = NL, B=5, m=10)
#####################################################################
# Estimate c1 and c2
#####################################################################
c1 = seq(from = 0.01, to = 10, by = 0.01)
c2 = 0
Mertt = numeric()
# for (t in 1:length(hh)) {
ourestMat = matrix(NA, nrow = length(c1), ncol = length(c2))
ourestMat2 = matrix(NA, nrow = length(c1), ncol = length(c2))
for (j in 1:length(c1)) {
for (g in 1:length(c2)) {
ourestMat[j,g] = C1C2(est,ncol(data)-1, NL, c1 = c1[j], c2 = c2[g])
ourestMat2[j,g] = C1C2ratio(est,ncol(data)-1, NL, c1 = c1[j], c2 = c2[g])
}
}
Indexx = which(ourestMat == min(ourestMat), arr.ind = TRUE)
Indexx2 = which(ourestMat2 == min(ourestMat2), arr.ind = TRUE)
c111 = c1[Indexx[1,1]]
c222 = c2[Indexx[1,2]]
c11r = c1[Indexx2[1,1]]
c22r = c2[Indexx2[1,2]]
cat("C1 is ", c111, "c2 is ", c222, 'the number of col in data is is', ncol(data)-1, "\n")
# estimate vc dimension using grid search
range2 = seq(from = 1, to = 100, by = 1)
MerlinMat1 = numeric(length(range2))
MerlinMat1r = numeric(length(range2))
for (kk in 1:length(range2)) {
MerlinMat1[kk] = vcfunct(est,NL=NL,x=range2[kk], m=10, c1=c111, c2=c222)
MerlinMat1r[kk] = vcfunctratio(est,NL=NL,x=range2[kk], m=10, c1=c11r, c2=c22r)
}
cat('The estimate vcdim is: ', range2[which.min(MerlinMat1)], " for Loc ", Loc[k], "\n")
cat('The estimate vcdim is: ', range2[which.min(MerlinMat1r)], " for Loc ", Loc[k], "\n")
Risk2 = sum(Model1$residuals^2)
BIC = BIC(Model1)
AIC = AIC(Model1)
cat("The BIC is:", round(BIC), '\n')
ERM1 = Gaby(Loss = Risk2,eta = 0.05,
n = nrow(data), h = range2[which.min(MerlinMat1)], m=10)
round(ERM1)
ERM2 = ERM(Loss = Risk2,eta = 0.05,
n = nrow(data), h = range2[which.min(MerlinMat1)], m=10)
round(ERM2)
dataa = data.frame(h = range2[which.min(MerlinMat1)], ERM1 = round(ERM1), ERM2 = round(ERM2), BIC = round(BIC), AIC = round(AIC))
Output[l-1,] = c(range2[which.min(MerlinMat1)], round(ERM1), round(ERM2), round(AIC), round(BIC), c111
)
#dataa
}
Output
cv_tmp = Model_cv(dat, n_folds <- 10)
cv = colMeans(cv_tmp)
data.frame(Output[,-6], cv)
X_big = Big_order$x[,-1]
Y_big = Big_order$y
X1Scaled = scale(X_big, center = TRUE, scale = TRUE)
Anal_data = data.frame(X1Scaled, IBLK = data2$IBLK)
#Anal_data = data.frame(X1Scaled)
library(ncvreg)
Model_SCAD = ncvreg(Anal_data, Y_big, penalty = 'SCAD')
plot(Model_SCAD)
cvfit2 <- cv.ncvreg(Anal_data, Y_big, penalty = "SCAD")
round(coef(cvfit2),2)
plot(cvfit2)
cvfit2$lambda.min
Param = t(Model_SCAD$beta)
source("C:/Users/merli/OneDrive/Documents/Code/LSA.r")
source("C:/Users/merli/OneDrive/Documents/Code/Lasso.r")
Ada_lasso = lasso.adapt.bic2(Anal_data, Y_big)
round(Ada_lasso$coeff,2)
round(Ada_lasso$intercept,2)
ada = lsa(Model1)
#######################################################################################
# model dev, impl , valida, risk model, forcasting credit handle
# risk model enterprisewise data
|
create_summary <- function(input_path) {
print("create_summary")
load("C:\\Users\\Atul\\Desktop\\summ\\qt.Rdata")
##load("C:\\Users\\Atul\\Desktop\\summ\\p.Rdata")
source("C:\\Users\\Atul\\Desktop\\summ\\lexical_based_scoring.R")
source("C:\\Users\\Atul\\Desktop\\summ\\parano_based_scoring.R")
source("C:\\Users\\Atul\\Desktop\\summ\\paraoffset_based_scoring.R")
#source("C:\\Users\\Atul\\Desktop\\summ\\titlewords_based_scoring.R")
source("C:\\Users\\Atul\\Desktop\\summ\\propernoun_based_scoring.R")
source("C:\\Users\\Atul\\Desktop\\summ\\termfrequency_based_scoring .R")
source("C:\\Users\\Atul\\Desktop\\summ\\tf-idf_based_scoring.R")
source("C:\\Users\\Atul\\Desktop\\summ\\cue_based_scoring.R")
source("C:\\Users\\Atul\\Desktop\\summ\\makesummary.R")
source("C:\\Users\\Atul\\Desktop\\summ\\classifier.R")
corpus <-VCorpus(DirSource(input_path), readerControl = list(reader = readPlain))
for(qwerty in 1 : length(corpus)){
sumt <- makesummary(corpus, qwerty, t)
sumq <- makesummary(corpus, qwerty, q)
sump <- makesummary(corpus, qwerty, p)
strt <- as.String(sprintf("C:\\Users\\Atul\\Desktop\\summ\\summary_t\\%d.txt", qwerty))
fileConn<-file(strt)
writeLines(sumt, fileConn)
close(fileConn)
strq <- as.String(sprintf("C:\\Users\\Atul\\Desktop\\summ\\summary_q\\%d.txt", qwerty))
fileConn<-file(strq)
writeLines(sumq, fileConn)
close(fileConn)
strp <- as.String(sprintf("C:\\Users\\Atul\\Desktop\\summ\\summary_p\\%d.txt", qwerty))
fileConn<-file(strp)
writeLines(sump, fileConn)
close(fileConn)
}
} | /createsummary.r | no_license | shashankgarg1/text-summarisation | R | false | false | 1,566 | r | create_summary <- function(input_path) {
print("create_summary")
load("C:\\Users\\Atul\\Desktop\\summ\\qt.Rdata")
##load("C:\\Users\\Atul\\Desktop\\summ\\p.Rdata")
source("C:\\Users\\Atul\\Desktop\\summ\\lexical_based_scoring.R")
source("C:\\Users\\Atul\\Desktop\\summ\\parano_based_scoring.R")
source("C:\\Users\\Atul\\Desktop\\summ\\paraoffset_based_scoring.R")
#source("C:\\Users\\Atul\\Desktop\\summ\\titlewords_based_scoring.R")
source("C:\\Users\\Atul\\Desktop\\summ\\propernoun_based_scoring.R")
source("C:\\Users\\Atul\\Desktop\\summ\\termfrequency_based_scoring .R")
source("C:\\Users\\Atul\\Desktop\\summ\\tf-idf_based_scoring.R")
source("C:\\Users\\Atul\\Desktop\\summ\\cue_based_scoring.R")
source("C:\\Users\\Atul\\Desktop\\summ\\makesummary.R")
source("C:\\Users\\Atul\\Desktop\\summ\\classifier.R")
corpus <-VCorpus(DirSource(input_path), readerControl = list(reader = readPlain))
for(qwerty in 1 : length(corpus)){
sumt <- makesummary(corpus, qwerty, t)
sumq <- makesummary(corpus, qwerty, q)
sump <- makesummary(corpus, qwerty, p)
strt <- as.String(sprintf("C:\\Users\\Atul\\Desktop\\summ\\summary_t\\%d.txt", qwerty))
fileConn<-file(strt)
writeLines(sumt, fileConn)
close(fileConn)
strq <- as.String(sprintf("C:\\Users\\Atul\\Desktop\\summ\\summary_q\\%d.txt", qwerty))
fileConn<-file(strq)
writeLines(sumq, fileConn)
close(fileConn)
strp <- as.String(sprintf("C:\\Users\\Atul\\Desktop\\summ\\summary_p\\%d.txt", qwerty))
fileConn<-file(strp)
writeLines(sump, fileConn)
close(fileConn)
}
} |
\name{logknots}
\alias{logknots}
\title{ Define Knots for Lag Space at Equally-Spaced Log-Values }
\description{
This function defines the position of knot or cut-off values at equally-spaced log-values for spline or strata functions, respectively. It is expressely created for lag-response functions to set the knots or cut-offs placements accordingly with the default of versions of \pkg{dlnm} earlier than 2.0.0.
}
\usage{
logknots(x, nk=NULL, fun="ns", df=1, degree=3, intercept=TRUE)
}
\arguments{
\item{x }{ an integer scalar or vector of length 2, defining the the maximum lag or the lag range, respectively, or a vector variable.}
\item{nk }{ number of knots or cut-offs.}
\item{fun }{ character scalar with the name of the function for which the knots or cut-offs must be created. See Details below.}
\item{df }{ degree of freedom.}
\item{degree }{ degree of the piecewise polynomial. Only for \code{fun="bs"}.}
\item{intercept }{ logical. If an intercept is included in the basis function.}
}
\details{
This functions has been included for consistency with versions of \pkg{dlnm} earlier than 2.0.0, where the default knots or cut-off placements in the lag space for functions \code{ns}, \code{bs} and \code{strata} used to be at equally-spaced values in the log scale. Since version 2.0.0 on, the default is equally-spaced quantiles, similarly to functions defined for the space of predictor. This function can be used to replicate the results obtained with old versions.
The argument \code{x} is usually assumed to represent the maximum lag (if a scalar) or the lag range (if a vector of length 2). Otherwise is interpreted as a vector variable for which the range is computed internally.
The number of knots is set with the argument \code{nk}, or otherwise determined by the choice of function and number of degrees of freedom through the arguments \code{fun} and \code{df}. Specifically, the number of knots is set to \code{df-1-intercept} for \code{"ns"}, \code{df-degree-intercept} for \code{"bs"}, or \code{df-intercept} for \code{"strata"}.
An intercept is included by default (\code{intercept=TRUE}), consistently with the default for the lag space.
}
\value{
A numeric vector of knot or cut-off values, to be used in the \code{arglag} list argument of \code{\link{crossbasis}} for reproducing the default of versions of \pkg{dlnm} earlier than 2.0.0.
}
\author{Antonio Gasparrini <\email{antonio.gasparrini@lshtm.ac.uk}>}
\seealso{
\code{\link{equalknots}} for placing the knots at equally-spaced values. \code{\link{crossbasis}} to generate cross-basis matrices.
See \code{\link{dlnm-package}} for an introduction to the package and for links to package vignettes providing more detailed information.
}
\examples{
### setting 3 knots for lag 0-20
logknots(20, 3)
logknots(c(0,20), 3)
### setting knots and cut-offs for different functions
logknots(20, fun="ns", df=4)
logknots(20, fun="bs", df=4, degree=2)
logknots(20, fun="strata", df=4)
### with and without without intercept
logknots(20, fun="ns", df=4)
logknots(20, fun="ns", df=4, intercept=FALSE)
### replicating an old example in time series analysis
lagknots <- logknots(30, 3)
cb <- crossbasis(chicagoNMMAPS$temp, lag=30, argvar=list(fun="bs",df=5,
degree=2), arglag=list(knots=lagknots))
summary(cb)
library(splines)
model <- glm(death ~ cb + ns(time, 7*14) + dow,
family=quasipoisson(), chicagoNMMAPS)
pred <- crosspred(cb, model, cen=21, by=1)
plot(pred, xlab="Temperature", col="red", zlab="RR", shade=0.6,
main="3D graph of temperature effect")
}
\keyword{smooth}
| /man/logknots.Rd | no_license | mbexhrs3/dlnm | R | false | false | 3,588 | rd | \name{logknots}
\alias{logknots}
\title{ Define Knots for Lag Space at Equally-Spaced Log-Values }
\description{
This function defines the position of knot or cut-off values at equally-spaced log-values for spline or strata functions, respectively. It is expressely created for lag-response functions to set the knots or cut-offs placements accordingly with the default of versions of \pkg{dlnm} earlier than 2.0.0.
}
\usage{
logknots(x, nk=NULL, fun="ns", df=1, degree=3, intercept=TRUE)
}
\arguments{
\item{x }{ an integer scalar or vector of length 2, defining the the maximum lag or the lag range, respectively, or a vector variable.}
\item{nk }{ number of knots or cut-offs.}
\item{fun }{ character scalar with the name of the function for which the knots or cut-offs must be created. See Details below.}
\item{df }{ degree of freedom.}
\item{degree }{ degree of the piecewise polynomial. Only for \code{fun="bs"}.}
\item{intercept }{ logical. If an intercept is included in the basis function.}
}
\details{
This functions has been included for consistency with versions of \pkg{dlnm} earlier than 2.0.0, where the default knots or cut-off placements in the lag space for functions \code{ns}, \code{bs} and \code{strata} used to be at equally-spaced values in the log scale. Since version 2.0.0 on, the default is equally-spaced quantiles, similarly to functions defined for the space of predictor. This function can be used to replicate the results obtained with old versions.
The argument \code{x} is usually assumed to represent the maximum lag (if a scalar) or the lag range (if a vector of length 2). Otherwise is interpreted as a vector variable for which the range is computed internally.
The number of knots is set with the argument \code{nk}, or otherwise determined by the choice of function and number of degrees of freedom through the arguments \code{fun} and \code{df}. Specifically, the number of knots is set to \code{df-1-intercept} for \code{"ns"}, \code{df-degree-intercept} for \code{"bs"}, or \code{df-intercept} for \code{"strata"}.
An intercept is included by default (\code{intercept=TRUE}), consistently with the default for the lag space.
}
\value{
A numeric vector of knot or cut-off values, to be used in the \code{arglag} list argument of \code{\link{crossbasis}} for reproducing the default of versions of \pkg{dlnm} earlier than 2.0.0.
}
\author{Antonio Gasparrini <\email{antonio.gasparrini@lshtm.ac.uk}>}
\seealso{
\code{\link{equalknots}} for placing the knots at equally-spaced values. \code{\link{crossbasis}} to generate cross-basis matrices.
See \code{\link{dlnm-package}} for an introduction to the package and for links to package vignettes providing more detailed information.
}
\examples{
### setting 3 knots for lag 0-20
logknots(20, 3)
logknots(c(0,20), 3)
### setting knots and cut-offs for different functions
logknots(20, fun="ns", df=4)
logknots(20, fun="bs", df=4, degree=2)
logknots(20, fun="strata", df=4)
### with and without without intercept
logknots(20, fun="ns", df=4)
logknots(20, fun="ns", df=4, intercept=FALSE)
### replicating an old example in time series analysis
lagknots <- logknots(30, 3)
cb <- crossbasis(chicagoNMMAPS$temp, lag=30, argvar=list(fun="bs",df=5,
degree=2), arglag=list(knots=lagknots))
summary(cb)
library(splines)
model <- glm(death ~ cb + ns(time, 7*14) + dow,
family=quasipoisson(), chicagoNMMAPS)
pred <- crosspred(cb, model, cen=21, by=1)
plot(pred, xlab="Temperature", col="red", zlab="RR", shade=0.6,
main="3D graph of temperature effect")
}
\keyword{smooth}
|
#acoes selecionadas
tickers <- c("AAPL")
#acessando os dados de cotacoes intraday - Algo Trading - 7 dias
av_api_key("api-key")
stocks_data <- tq_get(tickers,
get = "alphavantage",
av_fun = "TIME_SERIES_INTRADAY",
interval = "1min",
outputsize = "full")
| /daytrade.R | permissive | manhaes346/nerdzao214_r_portfolioanalytics | R | false | false | 344 | r | #acoes selecionadas
tickers <- c("AAPL")
#acessando os dados de cotacoes intraday - Algo Trading - 7 dias
av_api_key("api-key")
stocks_data <- tq_get(tickers,
get = "alphavantage",
av_fun = "TIME_SERIES_INTRADAY",
interval = "1min",
outputsize = "full")
|
#' Generate Static Data Stream
#'
#' Generate a new synthetic multidimensional static data stream having the
#' desired properties.
#'
#' @param n A vector containing \code{x} values, where the values corresponds
#' to the number of points for each step and \code{x} to the number of
#' steps.
#' @param prop Proportion of outliers in the hidden space.
#' @param proptype Type of the proportion of outliers. Value "proportional":
#' depend on the size of the empty space. Value "absolute": same absolute
#' proportion per subspace.
#' @param stream.config A stream configuration object. Should have been
#' generated with \code{nstep = 1}.
#' @param verbose Prints the number of the currently generated element if TRUE.
#' @param method Choose method of point generation. Can be "Rejection" or "Construction"
#'
#' @return An object of class stream, which is a List of 5 elements.
#' - \code{data} contains the stream generated
#' - \code{labels} contains the description of each point (\code{0} if the point
#' is not an outlier, or the subspace in which it is outlying as a string)
#' - \code{n} the number of points at each step
#' - \code{prop} the proportion of outliers in the hidden space
#' - \code{stream.config} the associated stream configuration object (which is
#' valid only for static streams)
#'
#' @details
#' The data is generated uniformly, except in certain subspaces where the data
#' is concentrated in particular dependencies (i.e. in the "Wall" dependency,
#' data concentrates on the axes, in a L-like shape). This should create spaces
#' with high dependency and space to observe hidden outliers. Note that the
#' proportion of outlier \code{prop} does not relate directly to the percentage
#' of outliers in the output stream. Since it corresponds to the probability of
#' a point, being ALREADY in the hidden space to stay where it is, the overall
#' proportion of outliers depends on the hidden space volume, which depends
#' on the number of subspaces and their margins. The greater the margin, the
#' bigger the hidden space.
#'
#' @examples
#' # Generate a stream with default parameters
#' stream <- generate.static.stream()
#' # Generate a stream with custom configuration
#' stream.config <- generate.stream.config(dim=50, nstep=1) # nstep should be 1
#' stream <- generate.static.stream(n=1000, prop=0.05,
#' stream.config=stream.config)
#' # Output stream results (to uncomment)
#' # output.stream(stream, "example")
#'
#' @author Edouard Fouché, \email{edouard.fouche@kit.edu}
#'
#' @seealso
#' * \code{\link{generate.stream.config}} : generate a stream.config file for a
#' dynamic or static stream
#'
#' @md
#' @export
generate.static.stream <- function(n=1000, prop=0.01, proptype="proportional",
stream.config=NULL, verbose=FALSE, method="Rejection") {
# Generate n points with dim dimensions where the list of subspaces are
# generated wall-like with the size of the wall taken from margins list as
# 1-margin. In the hidden space, a proportion prop of the points is taken as
# outliers. Suggestion: add a verbose mode
sanitycheck.generate(n=n, prop=prop, stream.config=stream.config)
if(is.null(stream.config)) {
stream.config <- generate.stream.config(nstep=1)
} else {
if(stream.config$nstep != 1) {
stop("The stream.config file is not compatible with static streams:" +
" nstep should be = 1")
}
}
dim <- stream.config$dim
subspaces <- stream.config$subspaces
margins <- stream.config$margins
dependency <- stream.config$dependency
discretize <- stream.config$discretize
allowOverlap <- stream.config$allowOverlap
meta <- generate.multiple.rows(n, dim, subspaces, margins, prop,
proptype=proptype, dependency=dependency,
discretize=discretize, verbose=verbose, method=method)
res <- list("data"=meta$data,"labels"=meta$labels, "n"=n, "prop"=prop,
"proptype"=proptype, "allowOverlap"=allowOverlap,
"stream.config"=stream.config)
attr(res, "class") <- "stream"
return(res)
}
#' Generate Dynamic Data Stream
#'
#' Generate a new synthetic multidimensional dynamic data stream having the
#' desired properties.
#'
#' @param n A vector containing \code{x} values, where the values corresponds to
#' the number of points for each step and \code{x} to the number of steps.
#' @param prop Proportion of outliers in the hidden space.
#' @param proptype Type of the proportion of outliers. Value "proportional":
#' depend on the size of the empty space. Value "absolute": same absolute
#' proportion per subspace.
#' @param stream.config A stream configuration object. Should have been
#' generated with \code{nstep > 1}.
#' @param verbose If TRUE, then the state of the stream will be printed as
#' output for every 100 points.
#' @param coldstart If TRUE (default) all subspaces will start with a margin
#' value of 0.
#' @param transition A string indication what kind of transition should occur.
#' Can be "Linear" (default) or "Abrupt".
#' @param method Defines the point generation method. "Rejection" creates points
#' randomly until they fit into the dependency. "Construction" creates points
#' that are close to the relation with respect to the margin. If proptype is
#' "proportional" then first a random point is generated to check, whether the
#' point is in the hidden space and may become an outlier. If proptype is
#' "absolute" the decision whether the point becomes an outlier is made piror
#' to its generatrion.
#'
#' @return A an object of class \code{stream}, which is a \code{List} of 5
#' elements.
#' - \code{data} contains the stream generated
#' - \code{labels} contains the description of each point (\code{0} if the point
#' is not an outlier, or the subspace in which it is outlying as a string)
#' - \code{n} the number of points at each step
#' - \code{prop} the proportion of outliers in the hidden space
#' - \code{stream.config} the associated stream configuration object (which is
#' valid only for dynamic streams)
#'
#' @details
#' The data is generated uniformly, except in certain subspaces where the data
#' is concentrated in particular dependencies (i.e. in the "Wall" dependency,
#' data concentrates on the axes, in a L-like shape). This should create spaces
#' with high dependency and space to observe hidden outliers. Note that the
#' proportion of outlier \code{prop} does not relate directly to the percentage
#' of outliers in the output stream. Since it corresponds to the probability of
#' a point, being ALREADY in the hidden space to stay where it is, the overall
#' proportion of outliers depends on the hidden space volume, which depends
#' on the number of subspaces and their margins. The greater the margin, the
#' bigger the hidden space.
#'
#' @examples
#' # Generate a stream with default parameters
#' stream <- generate.dynamic.stream()
#' # Generate a stream with custom configuration
#' stream.config <- generate.stream.config(dim=50, nstep=10, volatility=0.5)
#' stream <- generate.dynamic.stream(n=100, prop=0.05,
#' stream.config=stream.config)
#' # Output stream results (to uncomment)
#' # output.stream(stream, "example")
#'
#' @author Edouard Fouché, \email{edouard.fouche@kit.edu}
#'
#' @seealso
#' * \code{\link{generate.stream.config}} : generate a stream.config file for a
#' dynamic or static stream
#'
#' @md
#' @export
generate.dynamic.stream <- function(n=100, prop=0.01, proptype="proportional",
stream.config=NULL, verbose=FALSE,
coldstart=TRUE, transition="Linear", method="Rejection") {
sanitycheck.generate(n=n, prop=prop, stream.config=stream.config,
verbose=verbose)
if(is.null(stream.config)) {
stream.config <- generate.stream.config()
} else {
if(stream.config$nstep <= 1) {
stop("The stream.config file in not compatible with dynamic streams:" +
" nstep should be > 1")
}
}
if(length(n) == 1) {
n <- rep(n, stream.config$nstep)
} # else assume that n has the good size, was checked by the sanity check
dim <- stream.config$dim
subspaceslist <- stream.config$subspaceslist
marginslist <- stream.config$marginslist
dependency <- stream.config$dependency
discretize <- stream.config$discretize
allowOverlap <- stream.config$allowOverlap
data <- data.frame()
labels <- c()
# Generate some data for each time step description
for(seq in 1:length(n)) {
if(verbose) print(paste("Step", seq, "of", length(n), ". Size", n[[seq]],
"elements."))
# Determine for the current state step the start and end margins values for
# each subspaces
subspaces_state <- list() # Indicates if the subspace has a dependency.
currentmargins <- list() # The start margin-value of a step.
nextmargins <- list() # The end margin-value of a step.
if(seq == 1) {
# If we want a coldstart, the starting margins values will be 0 for all
# subspaces. Otherwise, the provided value is used.
# TODO @apoth: Check, if this influences whether a drift is possible in /
# from the first to the second step.
subspaces_state <- subspaceslist[[seq]]
if(coldstart) {
currentmargins <- c(rep(0, length(subspaceslist[[seq]])))
} else {
currentmargins <- marginslist[[seq]]
}
nextmargins <- marginslist[[seq]]
} else {
# We shall consider subspace from the previous and the next state
subspaces_state <- unique(c(subspaceslist[[seq - 1]],
subspaceslist[[seq]]))
for(sub in 1:length(subspaces_state)) {
# In the case a subspace is contained in the next step, its intended
# value should be equal to its margins in the next step.
# Otherwise, it should be 0.
if(any(sapply(subspaceslist[[seq]],
function(x) setequal(x, subspaces_state[[sub]])))) {
nextmargins <- c(nextmargins,
marginslist[[seq]][sapply(subspaceslist[[seq]],
function(x) setequal(x, subspaces_state[[sub]]))])
} else {
nextmargins <- c(nextmargins, 0)
}
# In the case a subspace is contained in the current step, its start
# value should be equal to its margins in the current step.
# Otherwise, it should be 0.
if(any(sapply(subspaceslist[[seq - 1]],
function(x) setequal(x,subspaces_state[[sub]])))) {
currentmargins <- c(currentmargins,
marginslist[[seq - 1]][sapply(subspaceslist[[seq - 1]],
function(x) setequal(x,subspaces_state[[sub]]))])
} else {
currentmargins <- c(currentmargins, 0)
}
}
}
currentmargins <- as.list(currentmargins)
nextmargins <- as.list(nextmargins)
i <- 0
for(x in 1:n[[seq]]) {
# TODO @apoth: Add new transition types here!
#
# Update the current margins (transitioning uniformly between
# currentmargins and nextmargins)
if(transition == "Linear") {
margins_state <- as.list(unlist(currentmargins) -
(unlist(currentmargins) -
unlist(nextmargins)) * (x - 1) / n[[seq]])
} else if(transition == "Abrupt") { # Leave the margins as they are
margins_state <- currentmargins
} else {
stop("Unknown transition type specified.")
}
if(i %% 100 == 0 & verbose) {
print(c("subspaces_state:", paste(subspaces_state)), collapse=" ")
#print(c("currentmargins:", paste(currentmargins)), collapse=" ")
print(c("margins_state:", paste(margins_state)), collapse=" ")
#print(c("nextmargins:", paste(nextmargins)), collapse=" ")
}
i <- i + 1
# Generate a row
res <- generate.row(dim=dim, subspaces=subspaces_state,
margins=margins_state, prop=prop, proptype=proptype,
dependency=dependency, discretize=discretize,
method=method)
data <- rbind(data, t(res$data))
labels <- c(labels, res$label)
}
}
# Put adequate names on the columns
attributes(data)$names <- c(c(1:dim),"class")
res <- list("data"=data,"labels"=labels, "n"=n, "prop"=prop,
"proptype"=proptype, "allowOverlap" = allowOverlap,
"stream.config"=stream.config)
attr(res, "class") <- "stream"
return(res)
}
| /R/generateStream.R | permissive | allekai/R-streamgenerator | R | false | false | 12,920 | r | #' Generate Static Data Stream
#'
#' Generate a new synthetic multidimensional static data stream having the
#' desired properties.
#'
#' @param n A vector containing \code{x} values, where the values corresponds
#' to the number of points for each step and \code{x} to the number of
#' steps.
#' @param prop Proportion of outliers in the hidden space.
#' @param proptype Type of the proportion of outliers. Value "proportional":
#' depend on the size of the empty space. Value "absolute": same absolute
#' proportion per subspace.
#' @param stream.config A stream configuration object. Should have been
#' generated with \code{nstep = 1}.
#' @param verbose Prints the number of the currently generated element if TRUE.
#' @param method Choose method of point generation. Can be "Rejection" or "Construction"
#'
#' @return An object of class stream, which is a List of 5 elements.
#' - \code{data} contains the stream generated
#' - \code{labels} contains the description of each point (\code{0} if the point
#' is not an outlier, or the subspace in which it is outlying as a string)
#' - \code{n} the number of points at each step
#' - \code{prop} the proportion of outliers in the hidden space
#' - \code{stream.config} the associated stream configuration object (which is
#' valid only for static streams)
#'
#' @details
#' The data is generated uniformly, except in certain subspaces where the data
#' is concentrated in particular dependencies (i.e. in the "Wall" dependency,
#' data concentrates on the axes, in a L-like shape). This should create spaces
#' with high dependency and space to observe hidden outliers. Note that the
#' proportion of outlier \code{prop} does not relate directly to the percentage
#' of outliers in the output stream. Since it corresponds to the probability of
#' a point, being ALREADY in the hidden space to stay where it is, the overall
#' proportion of outliers depends on the hidden space volume, which depends
#' on the number of subspaces and their margins. The greater the margin, the
#' bigger the hidden space.
#'
#' @examples
#' # Generate a stream with default parameters
#' stream <- generate.static.stream()
#' # Generate a stream with custom configuration
#' stream.config <- generate.stream.config(dim=50, nstep=1) # nstep should be 1
#' stream <- generate.static.stream(n=1000, prop=0.05,
#' stream.config=stream.config)
#' # Output stream results (to uncomment)
#' # output.stream(stream, "example")
#'
#' @author Edouard Fouché, \email{edouard.fouche@kit.edu}
#'
#' @seealso
#' * \code{\link{generate.stream.config}} : generate a stream.config file for a
#' dynamic or static stream
#'
#' @md
#' @export
generate.static.stream <- function(n=1000, prop=0.01, proptype="proportional",
stream.config=NULL, verbose=FALSE, method="Rejection") {
# Generate n points with dim dimensions where the list of subspaces are
# generated wall-like with the size of the wall taken from margins list as
# 1-margin. In the hidden space, a proportion prop of the points is taken as
# outliers. Suggestion: add a verbose mode
sanitycheck.generate(n=n, prop=prop, stream.config=stream.config)
if(is.null(stream.config)) {
stream.config <- generate.stream.config(nstep=1)
} else {
if(stream.config$nstep != 1) {
stop("The stream.config file is not compatible with static streams:" +
" nstep should be = 1")
}
}
dim <- stream.config$dim
subspaces <- stream.config$subspaces
margins <- stream.config$margins
dependency <- stream.config$dependency
discretize <- stream.config$discretize
allowOverlap <- stream.config$allowOverlap
meta <- generate.multiple.rows(n, dim, subspaces, margins, prop,
proptype=proptype, dependency=dependency,
discretize=discretize, verbose=verbose, method=method)
res <- list("data"=meta$data,"labels"=meta$labels, "n"=n, "prop"=prop,
"proptype"=proptype, "allowOverlap"=allowOverlap,
"stream.config"=stream.config)
attr(res, "class") <- "stream"
return(res)
}
#' Generate Dynamic Data Stream
#'
#' Generate a new synthetic multidimensional dynamic data stream having the
#' desired properties.
#'
#' @param n A vector containing \code{x} values, where the values corresponds to
#' the number of points for each step and \code{x} to the number of steps.
#' @param prop Proportion of outliers in the hidden space.
#' @param proptype Type of the proportion of outliers. Value "proportional":
#' depend on the size of the empty space. Value "absolute": same absolute
#' proportion per subspace.
#' @param stream.config A stream configuration object. Should have been
#' generated with \code{nstep > 1}.
#' @param verbose If TRUE, then the state of the stream will be printed as
#' output for every 100 points.
#' @param coldstart If TRUE (default) all subspaces will start with a margin
#' value of 0.
#' @param transition A string indication what kind of transition should occur.
#' Can be "Linear" (default) or "Abrupt".
#' @param method Defines the point generation method. "Rejection" creates points
#' randomly until they fit into the dependency. "Construction" creates points
#' that are close to the relation with respect to the margin. If proptype is
#' "proportional" then first a random point is generated to check, whether the
#' point is in the hidden space and may become an outlier. If proptype is
#' "absolute" the decision whether the point becomes an outlier is made piror
#' to its generatrion.
#'
#' @return A an object of class \code{stream}, which is a \code{List} of 5
#' elements.
#' - \code{data} contains the stream generated
#' - \code{labels} contains the description of each point (\code{0} if the point
#' is not an outlier, or the subspace in which it is outlying as a string)
#' - \code{n} the number of points at each step
#' - \code{prop} the proportion of outliers in the hidden space
#' - \code{stream.config} the associated stream configuration object (which is
#' valid only for dynamic streams)
#'
#' @details
#' The data is generated uniformly, except in certain subspaces where the data
#' is concentrated in particular dependencies (i.e. in the "Wall" dependency,
#' data concentrates on the axes, in a L-like shape). This should create spaces
#' with high dependency and space to observe hidden outliers. Note that the
#' proportion of outlier \code{prop} does not relate directly to the percentage
#' of outliers in the output stream. Since it corresponds to the probability of
#' a point, being ALREADY in the hidden space to stay where it is, the overall
#' proportion of outliers depends on the hidden space volume, which depends
#' on the number of subspaces and their margins. The greater the margin, the
#' bigger the hidden space.
#'
#' @examples
#' # Generate a stream with default parameters
#' stream <- generate.dynamic.stream()
#' # Generate a stream with custom configuration
#' stream.config <- generate.stream.config(dim=50, nstep=10, volatility=0.5)
#' stream <- generate.dynamic.stream(n=100, prop=0.05,
#' stream.config=stream.config)
#' # Output stream results (to uncomment)
#' # output.stream(stream, "example")
#'
#' @author Edouard Fouché, \email{edouard.fouche@kit.edu}
#'
#' @seealso
#' * \code{\link{generate.stream.config}} : generate a stream.config file for a
#' dynamic or static stream
#'
#' @md
#' @export
generate.dynamic.stream <- function(n=100, prop=0.01, proptype="proportional",
stream.config=NULL, verbose=FALSE,
coldstart=TRUE, transition="Linear", method="Rejection") {
sanitycheck.generate(n=n, prop=prop, stream.config=stream.config,
verbose=verbose)
if(is.null(stream.config)) {
stream.config <- generate.stream.config()
} else {
if(stream.config$nstep <= 1) {
stop("The stream.config file in not compatible with dynamic streams:" +
" nstep should be > 1")
}
}
if(length(n) == 1) {
n <- rep(n, stream.config$nstep)
} # else assume that n has the good size, was checked by the sanity check
dim <- stream.config$dim
subspaceslist <- stream.config$subspaceslist
marginslist <- stream.config$marginslist
dependency <- stream.config$dependency
discretize <- stream.config$discretize
allowOverlap <- stream.config$allowOverlap
data <- data.frame()
labels <- c()
# Generate some data for each time step description
for(seq in 1:length(n)) {
if(verbose) print(paste("Step", seq, "of", length(n), ". Size", n[[seq]],
"elements."))
# Determine for the current state step the start and end margins values for
# each subspaces
subspaces_state <- list() # Indicates if the subspace has a dependency.
currentmargins <- list() # The start margin-value of a step.
nextmargins <- list() # The end margin-value of a step.
if(seq == 1) {
# If we want a coldstart, the starting margins values will be 0 for all
# subspaces. Otherwise, the provided value is used.
# TODO @apoth: Check, if this influences whether a drift is possible in /
# from the first to the second step.
subspaces_state <- subspaceslist[[seq]]
if(coldstart) {
currentmargins <- c(rep(0, length(subspaceslist[[seq]])))
} else {
currentmargins <- marginslist[[seq]]
}
nextmargins <- marginslist[[seq]]
} else {
# We shall consider subspace from the previous and the next state
subspaces_state <- unique(c(subspaceslist[[seq - 1]],
subspaceslist[[seq]]))
for(sub in 1:length(subspaces_state)) {
# In the case a subspace is contained in the next step, its intended
# value should be equal to its margins in the next step.
# Otherwise, it should be 0.
if(any(sapply(subspaceslist[[seq]],
function(x) setequal(x, subspaces_state[[sub]])))) {
nextmargins <- c(nextmargins,
marginslist[[seq]][sapply(subspaceslist[[seq]],
function(x) setequal(x, subspaces_state[[sub]]))])
} else {
nextmargins <- c(nextmargins, 0)
}
# In the case a subspace is contained in the current step, its start
# value should be equal to its margins in the current step.
# Otherwise, it should be 0.
if(any(sapply(subspaceslist[[seq - 1]],
function(x) setequal(x,subspaces_state[[sub]])))) {
currentmargins <- c(currentmargins,
marginslist[[seq - 1]][sapply(subspaceslist[[seq - 1]],
function(x) setequal(x,subspaces_state[[sub]]))])
} else {
currentmargins <- c(currentmargins, 0)
}
}
}
currentmargins <- as.list(currentmargins)
nextmargins <- as.list(nextmargins)
i <- 0
for(x in 1:n[[seq]]) {
# TODO @apoth: Add new transition types here!
#
# Update the current margins (transitioning uniformly between
# currentmargins and nextmargins)
if(transition == "Linear") {
margins_state <- as.list(unlist(currentmargins) -
(unlist(currentmargins) -
unlist(nextmargins)) * (x - 1) / n[[seq]])
} else if(transition == "Abrupt") { # Leave the margins as they are
margins_state <- currentmargins
} else {
stop("Unknown transition type specified.")
}
if(i %% 100 == 0 & verbose) {
print(c("subspaces_state:", paste(subspaces_state)), collapse=" ")
#print(c("currentmargins:", paste(currentmargins)), collapse=" ")
print(c("margins_state:", paste(margins_state)), collapse=" ")
#print(c("nextmargins:", paste(nextmargins)), collapse=" ")
}
i <- i + 1
# Generate a row
res <- generate.row(dim=dim, subspaces=subspaces_state,
margins=margins_state, prop=prop, proptype=proptype,
dependency=dependency, discretize=discretize,
method=method)
data <- rbind(data, t(res$data))
labels <- c(labels, res$label)
}
}
# Put adequate names on the columns
attributes(data)$names <- c(c(1:dim),"class")
res <- list("data"=data,"labels"=labels, "n"=n, "prop"=prop,
"proptype"=proptype, "allowOverlap" = allowOverlap,
"stream.config"=stream.config)
attr(res, "class") <- "stream"
return(res)
}
|
## Matrix inversion is usually a costly computation and there may be some benefit to caching the inverse
## of a matrix rather than compute it repeatedly. This pair of functions, calculate the inverse of a matrix or
## retrieves it from cache if it has already been computed.
makeCacheMatrix <- function(x = matrix()) {
## This function creates a special "matrix" object that can cache its inverse.
matrixInverse <- NULL
set <- function(y){
x <<- y
matrixInverse <<- NULL
}
get <- function() x
setInverse <- function(mInv) matrixInverse <<- mInv
getInverse <- function() matrixInverse
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
cacheSolve <- function(x, ...) {
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
##If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should
##retrieve the inverse from the cache.
## Return a matrix that is the inverse of 'x'
matrixInverse <- x$getInverse()
if(!is.null(matrixInverse)) {
message("getting cached data")
return(matrixInverse)
}
data <- x$get()
matrixInverse <- solve(data, ...)
x$setInverse(matrixInverse)
matrixInverse
}
| /cachematrix.R | no_license | vierageorge/ProgrammingAssignment2 | R | false | false | 1,340 | r | ## Matrix inversion is usually a costly computation and there may be some benefit to caching the inverse
## of a matrix rather than compute it repeatedly. This pair of functions, calculate the inverse of a matrix or
## retrieves it from cache if it has already been computed.
makeCacheMatrix <- function(x = matrix()) {
## This function creates a special "matrix" object that can cache its inverse.
matrixInverse <- NULL
set <- function(y){
x <<- y
matrixInverse <<- NULL
}
get <- function() x
setInverse <- function(mInv) matrixInverse <<- mInv
getInverse <- function() matrixInverse
list(set = set, get = get,
setInverse = setInverse,
getInverse = getInverse)
}
cacheSolve <- function(x, ...) {
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
##If the inverse has already been calculated (and the matrix has not changed), then the cachesolve should
##retrieve the inverse from the cache.
## Return a matrix that is the inverse of 'x'
matrixInverse <- x$getInverse()
if(!is.null(matrixInverse)) {
message("getting cached data")
return(matrixInverse)
}
data <- x$get()
matrixInverse <- solve(data, ...)
x$setInverse(matrixInverse)
matrixInverse
}
|
# Instalación de paquetes
instalar <- function (pkg) if (!pkg %in% installed.packages()) install.packages(pkg)
instalar("ggplot2")
instalar("shiny")
instalar("shinythemes")
instalar("DT")
instalar("plotly") | /001.introduccion/install.R | no_license | AMM53/Tecnicas-de-visualizacion | R | false | false | 209 | r | # Instalación de paquetes
instalar <- function (pkg) if (!pkg %in% installed.packages()) install.packages(pkg)
instalar("ggplot2")
instalar("shiny")
instalar("shinythemes")
instalar("DT")
instalar("plotly") |
### this function generates priors for attack and defense parameters
### in the classical model of ladder prediction
### on the basis of all games played in the previous season ('15-'16)
### note: this really just is a tailored function...
generate_abilities <- function(...){
#### Analyze all games played in the JPL season 14-15
rm(list=ls())
#source('get_data_easy_tryout.R')
source('pullDataJPL2015_2016.R')
## I. analyze
require(rjags)
require(runjags)
dataList <- list(
nGames = dim(JPL2015$allGames)[1],
nTeams = dim(JPL2015$teams)[1],
X1 = JPL2015$allGames$homegoals,
X2 = JPL2015$allGames$awaygoals,
T1 = JPL2015$allGames$homeID,
T2 = JPL2015$allGames$awayID
)
initsList <- function(){
Tattack = rgamma(dataList$nTeams,1,1) #attack parameter
Tdefense= rgamma(dataList$nTeams,1,1) # defense paramter
gamma = rgamma(dataList$nTeams,1,1) # home advantage parameter
return(list(Tattack=Tattack,Tdefense=Tdefense,gamma=gamma))
}
runJagsout <- run.jags( method = "parallel",
model = "jplclassic.txt",
monitor = c("Tattack","Tdefense", "gamma", "delta"),
data = dataList,
inits = initsList,
n.chains = 3,
thin = 10,
adapt = 10000,
burnin = 10000,
sample = 10000,
summarise=FALSE
)
#summary(runJagsout)
codaSamples = as.mcmc.list(runJagsout)
#gelman.diag(codaSamples)
allSamples<-combine.mcmc(codaSamples)
abilities <- matrix(colMeans(allSamples)[1:48],16,3)
sds <- matrix(apply(allSamples[,1:48],2,sd),16,3)
abilities <- data.frame(cbind(seq(1,16),abilities,sds))
colnames(abilities) <- c("teamID","attack","defense", "gamma", "sd.attack","sd.defense", "sd.gamma")
abilities <- left_join(abilities, JPL2015$teams, by = c('teamID' = 'ID')) %>%
select(teamID,IDCODE,defense,attack,gamma, sd.attack, sd.defense, sd.gamma)
abilities
return(abilities)
} | /generate_priors_from2015-2016.R | no_license | woutervoorspoels/JPL-predictions | R | false | false | 2,026 | r | ### this function generates priors for attack and defense parameters
### in the classical model of ladder prediction
### on the basis of all games played in the previous season ('15-'16)
### note: this really just is a tailored function...
generate_abilities <- function(...){
#### Analyze all games played in the JPL season 14-15
rm(list=ls())
#source('get_data_easy_tryout.R')
source('pullDataJPL2015_2016.R')
## I. analyze
require(rjags)
require(runjags)
dataList <- list(
nGames = dim(JPL2015$allGames)[1],
nTeams = dim(JPL2015$teams)[1],
X1 = JPL2015$allGames$homegoals,
X2 = JPL2015$allGames$awaygoals,
T1 = JPL2015$allGames$homeID,
T2 = JPL2015$allGames$awayID
)
initsList <- function(){
Tattack = rgamma(dataList$nTeams,1,1) #attack parameter
Tdefense= rgamma(dataList$nTeams,1,1) # defense paramter
gamma = rgamma(dataList$nTeams,1,1) # home advantage parameter
return(list(Tattack=Tattack,Tdefense=Tdefense,gamma=gamma))
}
runJagsout <- run.jags( method = "parallel",
model = "jplclassic.txt",
monitor = c("Tattack","Tdefense", "gamma", "delta"),
data = dataList,
inits = initsList,
n.chains = 3,
thin = 10,
adapt = 10000,
burnin = 10000,
sample = 10000,
summarise=FALSE
)
#summary(runJagsout)
codaSamples = as.mcmc.list(runJagsout)
#gelman.diag(codaSamples)
allSamples<-combine.mcmc(codaSamples)
abilities <- matrix(colMeans(allSamples)[1:48],16,3)
sds <- matrix(apply(allSamples[,1:48],2,sd),16,3)
abilities <- data.frame(cbind(seq(1,16),abilities,sds))
colnames(abilities) <- c("teamID","attack","defense", "gamma", "sd.attack","sd.defense", "sd.gamma")
abilities <- left_join(abilities, JPL2015$teams, by = c('teamID' = 'ID')) %>%
select(teamID,IDCODE,defense,attack,gamma, sd.attack, sd.defense, sd.gamma)
abilities
return(abilities)
} |
# pipe.GatherGeneAlignments.R -- collect up the reads that align to some genes, and
# optionally repackage in their original FASTQ format
`pipe.GatherGeneAlignments` <- function( sampleID, genes,
annotationFile="Annotation.txt", optionsFile="Options.txt",
results.path=NULL, tail.width=0,
stages=c("genomic", "splice"),
asFASTQ=FALSE, fastq.keyword="Genes", verbose=TRUE) {
# get needed paths, etc. from the options file
optT <- readOptionsTable( optionsFile)
if ( is.null( results.path)) {
results.path <- getOptionValue( optT, "results.path", notfound=".", verbose=F)
}
annT <- readAnnotationTable( annotationFile)
isPaired <- getAnnotationTrue( annT, sampleID, "PairedEnd", notfound=FALSE, verbose=F)
isStranded <- getAnnotationTrue( annT, sampleID, "StrandSpecific", notfound=FALSE, verbose=F)
doPairs <- ( isPaired && isStranded)
NG <- length( genes)
gmap <- getCurrentGeneMap()
where <- match( genes, gmap$GENE_ID, nomatch=0)
if ( any( where == 0)) {
cat( "\nSome genes not found in current species: ", genes[ where == 0])
where <- where[ where > 0]
genes <- genes[ where]
NG <- length( genes)
}
gptrs <- where
# determine the set of BAM files to visit
Stages <- c( "riboClear", "genomic", "splice")
if ( ! all( stages %in% Stages)) {
cat( "\nAllowed pipeline stages: ", Stages)
stop()
}
bamFiles <- vector()
if (doPairs) {
if ( "riboClear" %in% stages) {
bamFiles <- c( bamFiles, file.path( results.path, "riboClear",
paste( sampleID, "_", 1:2, ".ribo.converted.bam", sep="")))
}
if ( "genomic" %in% stages) {
bamFiles <- c( bamFiles, file.path( results.path, "align",
paste( sampleID, "_", 1:2, ".genomic.bam", sep="")))
}
if ( "splice" %in% stages) {
bamFiles <- c( bamFiles, file.path( results.path, "splicing",
paste( sampleID, "_", 1:2, ".splice.converted.bam", sep="")))
}
} else {
if ( "riboClear" %in% stages) {
bamFiles <- c( bamFiles, file.path( results.path, "riboClear",
paste( sampleID, ".ribo.converted.bam", sep="")))
}
if ( "genomic" %in% stages) {
bamFiles <- c( bamFiles, file.path( results.path, "align",
paste( sampleID, ".genomic.bam", sep="")))
}
if ( "splice" %in% stages) {
bamFiles <- c( bamFiles, file.path( results.path, "splicing",
paste( sampleID, ".splice.converted.bam", sep="")))
}
}
#out <- data.frame()
outrefid <- outpos <- outncig <- outcig <- outflag <- outseq <- outqual <- vector()
outname <- outrev <- outsize <- outgid <- outstage <- vector()
nout <- 0
for ( f in bamFiles) {
# make sure we have that BAM file sorted and indexed
cat( "\nFile: ", basename(f))
bamf <- BAM.verifySorted( f)
if ( is.null( bamf)) next
bamidx <- paste( bamf, "bai", sep=".")
reader <- bamReader( bamf, indexname=bamidx)
refData <- getRefData( reader)
thisStage <- "genomic"
if ( regexpr( "ribo", bamf) > 0) thisStage <- "riboClear"
if ( regexpr( "splice", bamf) > 0) thisStage <- "splice"
# visit every gene we were given
for ( ig in 1:NG) {
sml <- gmap[ gptrs[ ig], ]
# extract the chunk of reads for this gene's loci
refid <- seqID2refID( sml$SEQ_ID, refData=refData)
start <- sml$POSITION - tail.width
end <- sml$END + tail.width
chunk <- bamRange( reader, coords=c(refid, start, end))
if ( size(chunk) < 1) next
smallDF <- as.data.frame( chunk)
if (asFASTQ) {
smallDF$seq <- readSeq( chunk)
smallDF$qual <- readQual( chunk)
}
smallDF$geneid <- sml$GENE_ID
smallDF$stage <- thisStage
# splices have the reads broken by the splice junction, and a modified readID
# we need to rebuild the originals
saveDF <<- smallDF
if ( thisStage == "splice") {
smallDF <- rejoinSplicedReads( smallDF)
}
# if we want the raw reads, don't keep MARs
if (asFASTQ) {
dups <- which( duplicated( smallDF$name))
if ( length(dups) > 0) {
smallDF <- smallDF[ -dups, ]
}
}
if ( verbose) cat( "\n", sml$GENE_ID, "\tN_Alignments: ", nrow(smallDF))
#out <- rbind( out, smallDF)
now <- (nout + 1) : (nout + nrow(smallDF))
outrefid[now] <- smallDF$refid
outpos[now] <- smallDF$position
outncig[now] <- smallDF$nCigar
outcig[now] <- smallDF$cigar
outflag[now] <- smallDF$flag
outseq[now] <- smallDF$seq
outqual[now] <- smallDF$qual
outname[now] <- smallDF$name
outrev[now] <- smallDF$revstrand
outsize[now] <- smallDF$insertsize
outgid[now] <- smallDF$geneid
outstage[now] <- smallDF$stage
nout <- max( now)
}
bamClose( reader)
}
if ( verbose) cat( "\nTotal Aignments: ", nout, "\n")
# put into chromosomal order
cat( "\nSorting..")
ord <- order( outrefid, outpos)
outrefid <- outrefid[ ord]
outpos <- outpos[ ord]
outncig <- outncig[ ord]
outcig <- outcig[ ord]
outflag <- outflag[ ord]
outseq <- outseq[ ord]
outqual <- outqual[ ord]
outname <- outname[ ord]
outrev <- outrev[ ord]
outsize <- outsize[ ord]
outgid <- outgid[ ord]
outstage <- outstage[ ord]
out <- data.frame( "refid"=outrefid, "position"=outpos, "nCigar"=outncig, "cigar"=outcig,
"flag"=outflag, "seq"=outseq, "qual"=outqual, "name"=outname,
"revstrand"=outrev, "insertsize"=outsize, "geneid"=outgid,
"stage"=outstage, stringsAsFactors=FALSE)
rownames(out) <- 1:nrow(out)
cat( " Done.\n")
if ( asFASTQ) {
if (verbose) cat( "\nConverting Alignments back to FASTQ..")
outfile <- paste( sampleID, fastq.keyword, "fastq.gz", sep=".")
outfile <- file.path( results.path, "fastq", outfile)
fqDF <- data.frame( "READ_ID"=out$name, "READ_SEQ"=out$seq, "SCORE"=out$qual,
stringsAsFactors=FALSE)
# there may be duplicate readIDs, that mapped to more than one location in the genome
# don't let them be written out more than once...
dups <- which( duplicated( fqDF$READ_ID))
if ( length(dups) > 0) {
if (verbose) cat( "\nDropping redundant MAR alignments from FASTQ: ", length(dups))
fqDF <- fqDF[ -dups, ]
}
writeFastqFile( fqDF, outfile, compress=T)
cat( "\nWrote file: ", outfile, "\n")
return(NULL)
} else {
return( out)
}
}
`pipe.GatherRegionAlignments` <- function( sampleID, seqids, starts, stops,
annotationFile="Annotation.txt", optionsFile="Options.txt",
results.path=NULL, stages=c("genomic", "splice"),
asFASTQ=FALSE, fastq.keyword="Region", verbose=TRUE) {
# get needed paths, etc. from the options file
optT <- readOptionsTable( optionsFile)
if ( is.null( results.path)) {
results.path <- getOptionValue( optT, "results.path", notfound=".", verbose=F)
}
annT <- readAnnotationTable( annotationFile)
isPaired <- getAnnotationTrue( annT, sampleID, "PairedEnd", notfound=FALSE, verbose=F)
isStranded <- getAnnotationTrue( annT, sampleID, "StrandSpecific", notfound=FALSE, verbose=F)
doPairs <- ( isPaired && isStranded)
#gmap <- subset( getCurrentGeneMap(), SEQ_ID == seqid & POSITION < stop & END > start)
#if ( nrow(gmap) < 1) {
# cat( "\nRegion specifies less than 1 gene: Chr=", seqid, " ", start, "to", stop, "\n")
# return( data.frame())
#} else {
# cat( "\nRegion: Chr=", seqid, " ", start, "to", stop, "\nN_Genes: ", sum( gmap$REAL_G), "\n")
#}
# determine the set of BAM files to visit
Stages <- c( "riboClear", "genomic", "splice")
if ( ! all( stages %in% Stages)) {
cat( "\nAllowed pipeline stages: ", Stages)
stop()
}
bamFiles <- vector()
if (doPairs) {
if ( "riboClear" %in% stages) {
bamFiles <- c( bamFiles, file.path( results.path, "riboClear",
paste( sampleID, "_", 1:2, ".ribo.converted.bam", sep="")))
}
if ( "genomic" %in% stages) {
bamFiles <- c( bamFiles, file.path( results.path, "align",
paste( sampleID, "_", 1:2, ".genomic.bam", sep="")))
}
if ( "splice" %in% stages) {
bamFiles <- c( bamFiles, file.path( results.path, "splicing",
paste( sampleID, "_", 1:2, ".splice.converted.bam", sep="")))
}
} else {
if ( "riboClear" %in% stages) {
bamFiles <- c( bamFiles, file.path( results.path, "riboClear",
paste( sampleID, ".ribo.converted.bam", sep="")))
}
if ( "genomic" %in% stages) {
bamFiles <- c( bamFiles, file.path( results.path, "align",
paste( sampleID, ".genomic.bam", sep="")))
}
if ( "splice" %in% stages) {
bamFiles <- c( bamFiles, file.path( results.path, "splicing",
paste( sampleID, ".splice.converted.bam", sep="")))
}
}
# can have more than one region...
nRegions <- length( starts)
if (length(stops) != nRegions) stop( "'starts' and 'stops' must be of same length")
if (length(seqids) < nRegions) seqids <- rep( seqids, length.out=nRegions)
out <- data.frame()
for ( f in bamFiles) {
# make sure we have that BAM file sorted and indexed
bamf <- BAM.verifySorted( f)
if ( is.null( bamf)) next
bamidx <- paste( bamf, "bai", sep=".")
reader <- bamReader( bamf, indexname=bamidx)
refData <- getRefData( reader)
thisStage <- "genomic"
if ( regexpr( "ribo", bamf) > 0) thisStage <- "riboClear"
if ( regexpr( "splice", bamf) > 0) thisStage <- "splice"
# visit this region
for ( iregion in 1:nRegions) {
seqid <- seqids[iregion]
start <- starts[iregion]
stop <- stops[iregion]
# extract the chunk of reads for this gene's loci
refid <- seqID2refID( seqid, refData=refData)
chunk <- bamRange( reader, coords=c(refid, start, stop))
if ( size(chunk) < 1) next
smallDF <- as.data.frame( chunk)
if ( asFASTQ) {
smallDF$seq <- readSeq( chunk)
smallDF$qual <- readQual( chunk)
}
smallDF$stage <- thisStage
# splices have the reads broken by the splice junction, and a modified readID
# we need to rebuild the originals
if ( thisStage == "splice") {
smallDF <- rejoinSplicedReads( smallDF)
}
# if we want the raw reads, don't keep MARs
if (asFASTQ) {
dups <- which( duplicated( smallDF$name))
if ( length(dups) > 0) {
smallDF <- smallDF[ -dups, ]
}
}
if ( verbose) cat( "\n", basename(f), "\nSeqID, Start, Stop: ", seqid, start, stop, "\tN_Alignments: ", nrow(smallDF))
out <- rbind( out, smallDF)
}
bamClose( reader)
}
if ( verbose) cat( "\nTotal Aignments: ", nrow(out), "\n")
# put into chromosomal order
ord <- order( out$seq, out$position)
out <- out[ ord, ]
rownames(out) <- 1:nrow(out)
if ( asFASTQ) {
if (verbose) cat( "\nConverting Alignments back to FASTQ..")
outfile <- paste( sampleID, fastq.keyword, "fastq.gz", sep=".")
outfile <- file.path( results.path, "fastq", outfile)
fqDF <- data.frame( "READ_ID"=out$name, "READ_SEQ"=out$seq, "SCORE"=out$qual,
stringsAsFactors=FALSE)
# there may be duplicate readIDs, that mapped to more than one location in the genome
# don't let them be written out more than once...
dups <- which( duplicated( fqDF$READ_ID))
if ( length(dups) > 0) {
if (verbose) cat( "\nDropping redundant MAR alignments from FASTQ: ", length(dups))
fqDF <- fqDF[ -dups, ]
}
writeFastqFile( fqDF, outfile, compress=T)
cat( "\nWrote file: ", outfile, "\n")
return(NULL)
} else {
return( out)
}
}
`rejoinSplicedReads` <- function( tbl) {
# given a data frame of alignments from a splice BAM file, put the halve back together
if ( ! all( c( "position", "seq", "name", "qual") %in% colnames(tbl))) stop( "Not given a splice BAM alignment data frame")
posIn <- tbl$position
nameIn <- tbl$name
seqIn <- tbl$seq
qualIn <- tbl$qual
# all the first halves say 'splice1'
isFront <- grep( "::splice1", nameIn, fixed=T)
isBack <- grep( "::splice2", nameIn, fixed=T)
# grab all the partial items we will need
nameOut <- sub( "::splice[12]", "", nameIn)
nameFront <- nameOut[ isFront]
nameBack <- nameOut[ isBack]
# to be a usable read, we need to see both halves of the same readID
frontHitsBack <- match( nameFront, nameBack, nomatch=0)
keepers <- isFront[ frontHitsBack > 0]
keepBack <- isBack[ frontHitsBack]
# grab that subset of the given table as the result, then update the bits that need it
out <- tbl[ keepers, ]
out$name <- nameOut[ keepers]
out$seq <- paste( seqIn[keepers], seqIn[keepBack], sep="")
out$qual <- paste( qualIn[keepers], qualIn[keepBack], sep="")
# all done, just these pairs that got resolved go back
out
}
| /R/pipe.GatherGeneAlignments.R | no_license | sturkarslan/DuffyNGS | R | false | false | 12,208 | r | # pipe.GatherGeneAlignments.R -- collect up the reads that align to some genes, and
# optionally repackage in their original FASTQ format
`pipe.GatherGeneAlignments` <- function( sampleID, genes,
annotationFile="Annotation.txt", optionsFile="Options.txt",
results.path=NULL, tail.width=0,
stages=c("genomic", "splice"),
asFASTQ=FALSE, fastq.keyword="Genes", verbose=TRUE) {
# get needed paths, etc. from the options file
optT <- readOptionsTable( optionsFile)
if ( is.null( results.path)) {
results.path <- getOptionValue( optT, "results.path", notfound=".", verbose=F)
}
annT <- readAnnotationTable( annotationFile)
isPaired <- getAnnotationTrue( annT, sampleID, "PairedEnd", notfound=FALSE, verbose=F)
isStranded <- getAnnotationTrue( annT, sampleID, "StrandSpecific", notfound=FALSE, verbose=F)
doPairs <- ( isPaired && isStranded)
NG <- length( genes)
gmap <- getCurrentGeneMap()
where <- match( genes, gmap$GENE_ID, nomatch=0)
if ( any( where == 0)) {
cat( "\nSome genes not found in current species: ", genes[ where == 0])
where <- where[ where > 0]
genes <- genes[ where]
NG <- length( genes)
}
gptrs <- where
# determine the set of BAM files to visit
Stages <- c( "riboClear", "genomic", "splice")
if ( ! all( stages %in% Stages)) {
cat( "\nAllowed pipeline stages: ", Stages)
stop()
}
bamFiles <- vector()
if (doPairs) {
if ( "riboClear" %in% stages) {
bamFiles <- c( bamFiles, file.path( results.path, "riboClear",
paste( sampleID, "_", 1:2, ".ribo.converted.bam", sep="")))
}
if ( "genomic" %in% stages) {
bamFiles <- c( bamFiles, file.path( results.path, "align",
paste( sampleID, "_", 1:2, ".genomic.bam", sep="")))
}
if ( "splice" %in% stages) {
bamFiles <- c( bamFiles, file.path( results.path, "splicing",
paste( sampleID, "_", 1:2, ".splice.converted.bam", sep="")))
}
} else {
if ( "riboClear" %in% stages) {
bamFiles <- c( bamFiles, file.path( results.path, "riboClear",
paste( sampleID, ".ribo.converted.bam", sep="")))
}
if ( "genomic" %in% stages) {
bamFiles <- c( bamFiles, file.path( results.path, "align",
paste( sampleID, ".genomic.bam", sep="")))
}
if ( "splice" %in% stages) {
bamFiles <- c( bamFiles, file.path( results.path, "splicing",
paste( sampleID, ".splice.converted.bam", sep="")))
}
}
#out <- data.frame()
outrefid <- outpos <- outncig <- outcig <- outflag <- outseq <- outqual <- vector()
outname <- outrev <- outsize <- outgid <- outstage <- vector()
nout <- 0
for ( f in bamFiles) {
# make sure we have that BAM file sorted and indexed
cat( "\nFile: ", basename(f))
bamf <- BAM.verifySorted( f)
if ( is.null( bamf)) next
bamidx <- paste( bamf, "bai", sep=".")
reader <- bamReader( bamf, indexname=bamidx)
refData <- getRefData( reader)
thisStage <- "genomic"
if ( regexpr( "ribo", bamf) > 0) thisStage <- "riboClear"
if ( regexpr( "splice", bamf) > 0) thisStage <- "splice"
# visit every gene we were given
for ( ig in 1:NG) {
sml <- gmap[ gptrs[ ig], ]
# extract the chunk of reads for this gene's loci
refid <- seqID2refID( sml$SEQ_ID, refData=refData)
start <- sml$POSITION - tail.width
end <- sml$END + tail.width
chunk <- bamRange( reader, coords=c(refid, start, end))
if ( size(chunk) < 1) next
smallDF <- as.data.frame( chunk)
if (asFASTQ) {
smallDF$seq <- readSeq( chunk)
smallDF$qual <- readQual( chunk)
}
smallDF$geneid <- sml$GENE_ID
smallDF$stage <- thisStage
# splices have the reads broken by the splice junction, and a modified readID
# we need to rebuild the originals
saveDF <<- smallDF
if ( thisStage == "splice") {
smallDF <- rejoinSplicedReads( smallDF)
}
# if we want the raw reads, don't keep MARs
if (asFASTQ) {
dups <- which( duplicated( smallDF$name))
if ( length(dups) > 0) {
smallDF <- smallDF[ -dups, ]
}
}
if ( verbose) cat( "\n", sml$GENE_ID, "\tN_Alignments: ", nrow(smallDF))
#out <- rbind( out, smallDF)
now <- (nout + 1) : (nout + nrow(smallDF))
outrefid[now] <- smallDF$refid
outpos[now] <- smallDF$position
outncig[now] <- smallDF$nCigar
outcig[now] <- smallDF$cigar
outflag[now] <- smallDF$flag
outseq[now] <- smallDF$seq
outqual[now] <- smallDF$qual
outname[now] <- smallDF$name
outrev[now] <- smallDF$revstrand
outsize[now] <- smallDF$insertsize
outgid[now] <- smallDF$geneid
outstage[now] <- smallDF$stage
nout <- max( now)
}
bamClose( reader)
}
if ( verbose) cat( "\nTotal Aignments: ", nout, "\n")
# put into chromosomal order
cat( "\nSorting..")
ord <- order( outrefid, outpos)
outrefid <- outrefid[ ord]
outpos <- outpos[ ord]
outncig <- outncig[ ord]
outcig <- outcig[ ord]
outflag <- outflag[ ord]
outseq <- outseq[ ord]
outqual <- outqual[ ord]
outname <- outname[ ord]
outrev <- outrev[ ord]
outsize <- outsize[ ord]
outgid <- outgid[ ord]
outstage <- outstage[ ord]
out <- data.frame( "refid"=outrefid, "position"=outpos, "nCigar"=outncig, "cigar"=outcig,
"flag"=outflag, "seq"=outseq, "qual"=outqual, "name"=outname,
"revstrand"=outrev, "insertsize"=outsize, "geneid"=outgid,
"stage"=outstage, stringsAsFactors=FALSE)
rownames(out) <- 1:nrow(out)
cat( " Done.\n")
if ( asFASTQ) {
if (verbose) cat( "\nConverting Alignments back to FASTQ..")
outfile <- paste( sampleID, fastq.keyword, "fastq.gz", sep=".")
outfile <- file.path( results.path, "fastq", outfile)
fqDF <- data.frame( "READ_ID"=out$name, "READ_SEQ"=out$seq, "SCORE"=out$qual,
stringsAsFactors=FALSE)
# there may be duplicate readIDs, that mapped to more than one location in the genome
# don't let them be written out more than once...
dups <- which( duplicated( fqDF$READ_ID))
if ( length(dups) > 0) {
if (verbose) cat( "\nDropping redundant MAR alignments from FASTQ: ", length(dups))
fqDF <- fqDF[ -dups, ]
}
writeFastqFile( fqDF, outfile, compress=T)
cat( "\nWrote file: ", outfile, "\n")
return(NULL)
} else {
return( out)
}
}
`pipe.GatherRegionAlignments` <- function( sampleID, seqids, starts, stops,
annotationFile="Annotation.txt", optionsFile="Options.txt",
results.path=NULL, stages=c("genomic", "splice"),
asFASTQ=FALSE, fastq.keyword="Region", verbose=TRUE) {
# get needed paths, etc. from the options file
optT <- readOptionsTable( optionsFile)
if ( is.null( results.path)) {
results.path <- getOptionValue( optT, "results.path", notfound=".", verbose=F)
}
annT <- readAnnotationTable( annotationFile)
isPaired <- getAnnotationTrue( annT, sampleID, "PairedEnd", notfound=FALSE, verbose=F)
isStranded <- getAnnotationTrue( annT, sampleID, "StrandSpecific", notfound=FALSE, verbose=F)
doPairs <- ( isPaired && isStranded)
#gmap <- subset( getCurrentGeneMap(), SEQ_ID == seqid & POSITION < stop & END > start)
#if ( nrow(gmap) < 1) {
# cat( "\nRegion specifies less than 1 gene: Chr=", seqid, " ", start, "to", stop, "\n")
# return( data.frame())
#} else {
# cat( "\nRegion: Chr=", seqid, " ", start, "to", stop, "\nN_Genes: ", sum( gmap$REAL_G), "\n")
#}
# determine the set of BAM files to visit
Stages <- c( "riboClear", "genomic", "splice")
if ( ! all( stages %in% Stages)) {
cat( "\nAllowed pipeline stages: ", Stages)
stop()
}
bamFiles <- vector()
if (doPairs) {
if ( "riboClear" %in% stages) {
bamFiles <- c( bamFiles, file.path( results.path, "riboClear",
paste( sampleID, "_", 1:2, ".ribo.converted.bam", sep="")))
}
if ( "genomic" %in% stages) {
bamFiles <- c( bamFiles, file.path( results.path, "align",
paste( sampleID, "_", 1:2, ".genomic.bam", sep="")))
}
if ( "splice" %in% stages) {
bamFiles <- c( bamFiles, file.path( results.path, "splicing",
paste( sampleID, "_", 1:2, ".splice.converted.bam", sep="")))
}
} else {
if ( "riboClear" %in% stages) {
bamFiles <- c( bamFiles, file.path( results.path, "riboClear",
paste( sampleID, ".ribo.converted.bam", sep="")))
}
if ( "genomic" %in% stages) {
bamFiles <- c( bamFiles, file.path( results.path, "align",
paste( sampleID, ".genomic.bam", sep="")))
}
if ( "splice" %in% stages) {
bamFiles <- c( bamFiles, file.path( results.path, "splicing",
paste( sampleID, ".splice.converted.bam", sep="")))
}
}
# can have more than one region...
nRegions <- length( starts)
if (length(stops) != nRegions) stop( "'starts' and 'stops' must be of same length")
if (length(seqids) < nRegions) seqids <- rep( seqids, length.out=nRegions)
out <- data.frame()
for ( f in bamFiles) {
# make sure we have that BAM file sorted and indexed
bamf <- BAM.verifySorted( f)
if ( is.null( bamf)) next
bamidx <- paste( bamf, "bai", sep=".")
reader <- bamReader( bamf, indexname=bamidx)
refData <- getRefData( reader)
thisStage <- "genomic"
if ( regexpr( "ribo", bamf) > 0) thisStage <- "riboClear"
if ( regexpr( "splice", bamf) > 0) thisStage <- "splice"
# visit this region
for ( iregion in 1:nRegions) {
seqid <- seqids[iregion]
start <- starts[iregion]
stop <- stops[iregion]
# extract the chunk of reads for this gene's loci
refid <- seqID2refID( seqid, refData=refData)
chunk <- bamRange( reader, coords=c(refid, start, stop))
if ( size(chunk) < 1) next
smallDF <- as.data.frame( chunk)
if ( asFASTQ) {
smallDF$seq <- readSeq( chunk)
smallDF$qual <- readQual( chunk)
}
smallDF$stage <- thisStage
# splices have the reads broken by the splice junction, and a modified readID
# we need to rebuild the originals
if ( thisStage == "splice") {
smallDF <- rejoinSplicedReads( smallDF)
}
# if we want the raw reads, don't keep MARs
if (asFASTQ) {
dups <- which( duplicated( smallDF$name))
if ( length(dups) > 0) {
smallDF <- smallDF[ -dups, ]
}
}
if ( verbose) cat( "\n", basename(f), "\nSeqID, Start, Stop: ", seqid, start, stop, "\tN_Alignments: ", nrow(smallDF))
out <- rbind( out, smallDF)
}
bamClose( reader)
}
if ( verbose) cat( "\nTotal Aignments: ", nrow(out), "\n")
# put into chromosomal order
ord <- order( out$seq, out$position)
out <- out[ ord, ]
rownames(out) <- 1:nrow(out)
if ( asFASTQ) {
if (verbose) cat( "\nConverting Alignments back to FASTQ..")
outfile <- paste( sampleID, fastq.keyword, "fastq.gz", sep=".")
outfile <- file.path( results.path, "fastq", outfile)
fqDF <- data.frame( "READ_ID"=out$name, "READ_SEQ"=out$seq, "SCORE"=out$qual,
stringsAsFactors=FALSE)
# there may be duplicate readIDs, that mapped to more than one location in the genome
# don't let them be written out more than once...
dups <- which( duplicated( fqDF$READ_ID))
if ( length(dups) > 0) {
if (verbose) cat( "\nDropping redundant MAR alignments from FASTQ: ", length(dups))
fqDF <- fqDF[ -dups, ]
}
writeFastqFile( fqDF, outfile, compress=T)
cat( "\nWrote file: ", outfile, "\n")
return(NULL)
} else {
return( out)
}
}
`rejoinSplicedReads` <- function( tbl) {
# given a data frame of alignments from a splice BAM file, put the halve back together
if ( ! all( c( "position", "seq", "name", "qual") %in% colnames(tbl))) stop( "Not given a splice BAM alignment data frame")
posIn <- tbl$position
nameIn <- tbl$name
seqIn <- tbl$seq
qualIn <- tbl$qual
# all the first halves say 'splice1'
isFront <- grep( "::splice1", nameIn, fixed=T)
isBack <- grep( "::splice2", nameIn, fixed=T)
# grab all the partial items we will need
nameOut <- sub( "::splice[12]", "", nameIn)
nameFront <- nameOut[ isFront]
nameBack <- nameOut[ isBack]
# to be a usable read, we need to see both halves of the same readID
frontHitsBack <- match( nameFront, nameBack, nomatch=0)
keepers <- isFront[ frontHitsBack > 0]
keepBack <- isBack[ frontHitsBack]
# grab that subset of the given table as the result, then update the bits that need it
out <- tbl[ keepers, ]
out$name <- nameOut[ keepers]
out$seq <- paste( seqIn[keepers], seqIn[keepBack], sep="")
out$qual <- paste( qualIn[keepers], qualIn[keepBack], sep="")
# all done, just these pairs that got resolved go back
out
}
|
#######################################################################################
# #
# Code for "A ggplot2 Tutorial for Beautiful Plotting in R" #
# cedricscherer.netlify.app/2019/08/05/a-ggplot2-tutorial-for-beautiful-plotting-in-r #
# #
# Cédric Scherer (@CedScherer | cedricphilippscherer@gmail.com) #
# Last Update: 2020-12-02 #
# #
#######################################################################################
## install CRAN packages
## install.packages(c("tidyverse", "colorspace", "corrr", "cowplot",
## "ggdark", "ggforce", "ggrepel", "ggridges", "ggsci",
## "ggtext", "ggthemes", "grid", "gridExtra", "patchwork",
## "rcartocolor", "scico", "showtext", "shiny",
## "plotly", "highcharter", "echarts4r"))
##
## install from GitHub since not on CRAN
## devtools::install_github("JohnCoene/charter")
chic <- readr::read_csv("https://raw.githubusercontent.com/Z3tt/R-Tutorials/master/ggplot2/chicago-nmmaps.csv")
tibble::glimpse(chic)
head(chic, 10)
#library(ggplot2)
library(tidyverse)
(g <- ggplot(chic, aes(x = date, y = temp)))
g + geom_point()
g + geom_line()
g + geom_line() + geom_point()
g + geom_point(color = "firebrick", shape = "diamond", size = 2)
g + geom_point(color = "firebrick", shape = "diamond", size = 2) +
geom_line(color = "firebrick", linetype = "dotted", size = .3)
theme_set(theme_bw())
g + geom_point(color = "firebrick")
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)")
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
xlab("Year") +
ylab("Temperature (°F)")
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = expression(paste("Temperature (", degree ~ F, ")"^"(Hey, why should we use metric units?!)")))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(axis.title.x = element_text(vjust = 0, size = 15),
axis.title.y = element_text(vjust = 2, size = 15))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(axis.title.x = element_text(margin = margin(t = 10), size = 15),
axis.title.y = element_text(margin = margin(r = 10), size = 15))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(axis.title = element_text(size = 15, color = "firebrick",
face = "italic"))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(axis.title.x = element_text(color = "sienna", size = 15),
axis.title.y = element_text(color = "orangered", size = 15))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(axis.title = element_text(color = "sienna", size = 15),
axis.title.y = element_text(color = "orangered", size = 15))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(axis.title = element_text(color = "sienna", size = 15, face = "bold"),
axis.title.y = element_text(face = "bold.italic"))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(axis.text = element_text(color = "dodgerblue", size = 12),
axis.text.x = element_text(face = "italic"))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(axis.text.x = element_text(angle = 50, vjust = 1, hjust = 1, size = 12))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(axis.ticks.y = element_blank(),
axis.text.y = element_blank())
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = NULL, y = "")
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
ylim(c(0, 50))
library(tidyverse)
chic_high <- dplyr::filter(chic, temp > 25, o3 > 20)
ggplot(chic_high, aes(x = temp, y = o3)) +
geom_point(color = "darkcyan") +
labs(x = "Temperature higher than 25°F",
y = "Ozone higher than 20 ppb") +
expand_limits(x = 0, y = 0)
library(tidyverse)
chic_high <- dplyr::filter(chic, temp > 25, o3 > 20)
ggplot(chic_high, aes(x = temp, y = o3)) +
geom_point(color = "darkcyan") +
labs(x = "Temperature higher than 25°F",
y = "Ozone higher than 20 ppb") +
coord_cartesian(xlim = c(0, NA), ylim = c(0, NA))
ggplot(chic_high, aes(x = temp, y = o3)) +
geom_point(color = "darkcyan") +
labs(x = "Temperature higher than 25°F",
y = "Ozone higher than 20 ppb") +
expand_limits(x = 0, y = 0) +
scale_x_continuous(expand = c(0, 0)) +
scale_y_continuous(expand = c(0, 0)) +
coord_cartesian(clip = "off")
ggplot(chic, aes(x = temp, y = temp + rnorm(nrow(chic), sd = 20))) +
geom_point(color = "sienna") +
labs(x = "Temperature (°F)", y = "Temperature (°F) + random noise") +
xlim(c(0, 100)) + ylim(c(0, 150)) +
coord_fixed()
ggplot(chic, aes(x = temp, y = temp + rnorm(nrow(chic), sd = 20))) +
geom_point(color = "sienna") +
labs(x = "Temperature (°F)", y = "Temperature (°F) + random noise") +
xlim(c(0, 100)) + ylim(c(0, 150)) +
coord_fixed(ratio = 1/5)
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = NULL) +
scale_y_continuous(label = function(x) {return(paste(x, "Degrees Fahrenheit"))})
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
ggtitle("Temperatures in Chicago")
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)",
title = "Temperatures in Chicago",
subtitle = "Seasonal pattern of daily temperatures from 1997 to 2001",
caption = "Data: NMMAPS",
tag = "Fig. 1")
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)",
title = "Temperatures in Chicago") +
theme(plot.title = element_text(face = "bold",
margin = margin(10, 0, 10, 0),
size = 14))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = NULL,
title = "Temperatures in Chicago",
caption = "Data: NMMAPS") +
theme(plot.title = element_text(hjust = 1, size = 16, face = "bold.italic"))
(g <- ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
scale_y_continuous(label = function(x) {return(paste(x, "Degrees Fahrenheit"))}) +
labs(x = "Year", y = NULL,
title = "Temperatures in Chicago between 1997 and 2001 in Degrees Fahrenheit",
caption = "Data: NMMAPS") +
theme(plot.title = element_text(size = 14, face = "bold.italic"),
plot.caption = element_text(hjust = 0)))
g + theme(plot.title.position = "plot",
plot.caption.position = "plot")
library(showtext)
font_add_google("Playfair Display", ## name of Google font
"Playfair") ## name that will be used in R
font_add_google("Bangers", "Bangers")
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)",
title = "Temperatures in Chicago",
subtitle = "Daily temperatures in °F from 1997 to 2001") +
theme(plot.title = element_text(family = "Bangers", hjust = .5, size = 25),
plot.subtitle = element_text(family = "Playfair", hjust = .5, size = 15))
font_add_google("Roboto Condensed", "Roboto Condensed")
theme_set(theme_bw(base_size = 12, base_family = "Roboto Condensed"))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
ggtitle("Temperatures in Chicago\nfrom 1997 to 2001") +
theme(plot.title = element_text(lineheight = .8, size = 16))
ggplot(chic,
aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)")
ggplot(chic,
aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
theme(legend.position = "none")
ggplot(chic,
aes(x = date, y = temp,
color = season, shape = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
guides(color = "none")
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
theme(legend.title = element_blank())
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
scale_color_discrete(name = NULL)
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
labs(color = NULL)
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
theme(legend.position = "top")
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
theme(legend.position = c(.2, .1),
legend.background = element_rect(fill = "transparent"))
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
theme(legend.position = c(.5, .97),
legend.background = element_rect(fill = "transparent")) +
guides(color = guide_legend(direction = "horizontal"))
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
theme(legend.title = element_text(family = "Playfair",
color = "chocolate",
size = 14, face = "bold"))
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)",
color = "Seasons\nindicated\nby colors:") +
theme(legend.title = element_text(family = "Playfair",
color = "chocolate",
size = 14, face = "bold"))
## ggplot(chic, aes(x = date, y = temp, color = season))) +
## geom_point() +
## labs(x = "Year", y = "Temperature (°F)") +
## theme(legend.title = element_text(family = "Playfair",
## color = "chocolate",
## size = 14, face = "bold")) +
## scale_color_discrete(name = "Seasons\nindicated\nby colors:")
chic$season <-
factor(chic$season,
levels = c("Winter", "Spring", "Summer", "Autumn"))
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)")
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
scale_color_discrete("Seasons:", labels = c("Mar—May", "Jun—Aug",
"Sep—Nov", "Dec—Feb")) +
theme(legend.title = element_text(family = "Playfair",
color = "chocolate",
size = 14, face = 2))
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
theme(legend.key = element_rect(fill = "darkgoldenrod1"),
legend.title = element_text(family = "Playfair",
color = "chocolate",
size = 14, face = 2)) +
scale_color_discrete("Seasons:")
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
theme(legend.key = element_rect(fill = NA),
legend.title = element_text(color = "chocolate",
size = 14, face = 2)) +
scale_color_discrete("Seasons:") +
guides(color = guide_legend(override.aes = list(size = 6)))
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
geom_rug()
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
geom_rug(show.legend = FALSE)
ggplot(chic, aes(x = date, y = o3)) +
geom_line(color = "gray") +
geom_point(color = "darkorange2") +
labs(x = "Year", y = "Ozone")
ggplot(chic, aes(x = date, y = o3)) +
geom_line(aes(color = "line")) +
geom_point(aes(color = "points")) +
labs(x = "Year", y = "Ozone") +
scale_color_discrete("Type:")
ggplot(chic, aes(x = date, y = o3)) +
geom_line(aes(color = "line")) +
geom_point(aes(color = "points")) +
labs(x = "Year", y = "Ozone") +
scale_color_manual(name = NULL,
guide = "legend",
values = c("points" = "darkorange2",
"line" = "gray")) +
guides(color = guide_legend(override.aes = list(linetype = c(1, 0),
shape = c(NA, 16))))
ggplot(chic,
aes(x = date, y = temp, color = temp)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)", color = "Temperature (°F)")
ggplot(chic,
aes(x = date, y = temp, color = temp)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)", color = "Temperature (°F)") +
guides(color = guide_legend())
ggplot(chic,
aes(x = date, y = temp, color = temp)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)", color = "Temperature (°F)") +
guides(color = guide_bins())
ggplot(chic,
aes(x = date, y = temp, color = temp)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)", color = "Temperature (°F)") +
guides(color = guide_colorsteps())
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(panel.background = element_rect(fill = "gray90"),
panel.grid.major = element_line(color = "gray10", size = .5),
panel.grid.minor = element_line(color = "gray70", size = .25))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(panel.background = element_rect(fill = "gray90"),
panel.grid.major = element_line(size = .5, linetype = "dashed"),
panel.grid.minor = element_line(size = .25, linetype = "dotted"),
panel.grid.major.x = element_line(color = "red1"),
panel.grid.major.y = element_line(color = "blue1"),
panel.grid.minor.x = element_line(color = "red4"),
panel.grid.minor.y = element_line(color = "blue4"))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(panel.grid.minor = element_blank())
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(panel.grid = element_blank())
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
scale_y_continuous(breaks = seq(0, 100, 10),
minor_breaks = seq(0, 100, 2.5))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "#1D8565", size = 2) +
labs(x = "Year", y = "Temperature (°F)") +
theme(panel.background = element_rect(fill = "#64D2AA",
color = "#64D2AA", size = 2))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "#1D8565", size = 2) +
labs(x = "Year", y = "Temperature (°F)") +
theme(panel.border = element_rect(fill = "#64D2AA99",
color = "#64D2AA", size = 2))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(plot.background = element_rect(fill = "gray60",
color = "gray30", size = 2))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(panel.background = element_rect(fill = NA),
plot.background = element_rect(fill = "gray60",
color = "gray30", size = 2))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(plot.background = element_rect(fill = "gray60"),
plot.margin = unit(c(1, 3, 1, 8), "cm"))
g <- ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "chartreuse4", alpha = .3) +
labs(x = "Year", y = "Temperature (°F)") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
g + facet_wrap(~ year, nrow = 1)
g + facet_wrap(~ year, nrow = 2)
g + facet_wrap(~ year, ncol = 3) + theme(axis.title.x = element_text(hjust = .15))
g + facet_wrap(~ year, nrow = 2, scales = "free")
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "orangered", alpha = .3) +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
labs(x = "Year", y = "Temperature (°F)") +
facet_grid(year ~ season)
g + facet_wrap(year ~ season, nrow = 4, scales = "free_x")
g + facet_wrap(~ year, nrow = 1, scales = "free_x") +
theme(strip.text = element_text(face = "bold", color = "chartreuse4",
hjust = 0, size = 20),
strip.background = element_rect(fill = "chartreuse3", linetype = "dotted"))
library(ggtext)
library(rlang)
element_textbox_highlight <- function(..., hi.labels = NULL, hi.fill = NULL,
hi.col = NULL, hi.box.col = NULL, hi.family = NULL) {
structure(
c(element_textbox(...),
list(hi.labels = hi.labels, hi.fill = hi.fill, hi.col = hi.col, hi.box.col = hi.box.col, hi.family = hi.family)
),
class = c("element_textbox_highlight", "element_textbox", "element_text", "element")
)
}
element_grob.element_textbox_highlight <- function(element, label = "", ...) {
if (label %in% element$hi.labels) {
element$fill <- element$hi.fill %||% element$fill
element$colour <- element$hi.col %||% element$colour
element$box.colour <- element$hi.box.col %||% element$box.colour
element$family <- element$hi.family %||% element$family
}
NextMethod()
}
g + facet_wrap(year ~ season, nrow = 4, scales = "free_x") +
theme(
strip.background = element_blank(),
strip.text = element_textbox_highlight(
family = "Playfair", size = 12, face = "bold",
fill = "white", box.color = "chartreuse4", color = "chartreuse4",
halign = .5, linetype = 1, r = unit(5, "pt"), width = unit(1, "npc"),
padding = margin(5, 0, 3, 0), margin = margin(0, 1, 3, 1),
hi.labels = c("1997", "1998", "1999", "2000"),
hi.fill = "chartreuse4", hi.box.col = "black", hi.col = "white"
)
)
ggplot(chic, aes(x = date, y = temp)) +
geom_point(aes(color = season == "Summer"), alpha = .3) +
labs(x = "Year", y = "Temperature (°F)") +
facet_wrap(~ season, nrow = 1) +
scale_color_manual(values = c("gray40", "firebrick"), guide = "none") +
theme(
axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1),
strip.background = element_blank(),
strip.text = element_textbox_highlight(
size = 12, face = "bold",
fill = "white", box.color = "white", color = "gray40",
halign = .5, linetype = 1, r = unit(0, "pt"), width = unit(1, "npc"),
padding = margin(2, 0, 1, 0), margin = margin(0, 1, 3, 1),
hi.labels = "Summer", hi.family = "Bangers",
hi.fill = "firebrick", hi.box.col = "firebrick", hi.col = "white"
)
)
p1 <- ggplot(chic, aes(x = date, y = temp,
color = season)) +
geom_point() +
geom_rug() +
labs(x = "Year", y = "Temperature (°F)")
p2 <- ggplot(chic, aes(x = date, y = o3)) +
geom_line(color = "gray") +
geom_point(color = "darkorange2") +
labs(x = "Year", y = "Ozone")
library(patchwork)
p1 + p2
p1 / p2
(g + p2) / p1
library(cowplot)
plot_grid(plot_grid(g, p1), p2, ncol = 1)
library(gridExtra)
grid.arrange(g, p1, p2,
layout_matrix = rbind(c(1, 2), c(3, 3)))
layout <- "
AABBBB#
AACCDDE
##CCDD#
##CC###
"
p2 + p1 + p1 + g + p2 +
plot_layout(design = layout)
ggplot(chic, aes(year)) +
geom_bar(aes(fill = season), color = "grey", size = 2) +
labs(x = "Year", y = "Observations", fill = "Season:")
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "steelblue", size = 2) +
labs(x = "Year", y = "Temperature (°F)")
ggplot(chic, aes(x = date, y = temp)) +
geom_point(shape = 21, size = 2, stroke = 1,
color = "#3cc08f", fill = "#c08f3c") +
labs(x = "Year", y = "Temperature (°F)")
(ga <- ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)", color = NULL))
ga + scale_color_manual(values = c("dodgerblue4",
"darkolivegreen4",
"darkorchid3",
"goldenrod1"))
ga + scale_color_brewer(palette = "Set1")
library(ggthemes)
ga + scale_color_tableau()
library(ggsci)
g1 <- ga + scale_color_aaas()
g2 <- ga + scale_color_npg()
library(patchwork)
(g1 + g2) * theme(legend.position = "top")
gb <- ggplot(chic, aes(x = date, y = temp, color = temp)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)", color = "Temperature (°F):")
gb + scale_color_continuous()
## gb + scale_color_gradient()
mid <- mean(chic$temp) ## midpoint
gb + scale_color_gradient2(midpoint = mid)
gb + scale_color_gradient(low = "darkkhaki",
high = "darkgreen")
gb + scale_color_gradient2(midpoint = mid, low = "#dd8a0b",
mid = "grey92", high = "#32a676")
p1 <- gb + scale_color_viridis_c() + ggtitle("'viridis' (default)")
p2 <- gb + scale_color_viridis_c(option = "inferno") + ggtitle("'inferno'")
p3 <- gb + scale_color_viridis_c(option = "plasma") + ggtitle("'plasma'")
p4 <- gb + scale_color_viridis_c(option = "cividis") + ggtitle("'cividis'")
library(patchwork)
(p1 + p2 + p3 + p4) * theme(legend.position = "bottom")
ga + scale_color_viridis_d(guide = "none")
library(rcartocolor)
g1 <- gb + scale_color_carto_c(palette = "BurgYl")
g2 <- gb + scale_color_carto_c(palette = "Earth")
(g1 + g2) * theme(legend.position = "bottom")
library(scico)
g1 <- gb + scale_color_scico(palette = "berlin")
g2 <- gb + scale_color_scico(palette = "hawaii", direction = -1)
(g1 + g2) * theme(legend.position = "bottom")
library(ggdark)
ggplot(chic, aes(date, temp, color = temp)) +
geom_point(size = 5) +
geom_point(aes(color = temp,
color = after_scale(invert_color(color))),
size = 2) +
scale_color_scico(palette = "hawaii", guide = "none") +
labs(x = "Year", y = "Temperature (°F)")
library(colorspace)
ggplot(chic, aes(date, temp)) +
geom_boxplot(aes(color = season,
fill = after_scale(desaturate(lighten(color, .6), .6))),
size = 1) +
scale_color_brewer(palette = "Dark2", guide = "none") +
labs(x = "Year", y = "Temperature (°F)")
library(ggthemes)
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
ggtitle("Ups and Downs of Chicago's Daily Temperatures") +
theme_economist() +
scale_color_economist(name = NULL)
library(dplyr)
chic_2000 <- filter(chic, year == 2000)
ggplot(chic_2000, aes(x = temp, y = o3)) +
geom_point() +
labs(x = "Temperature (°F)", y = "Ozone") +
ggtitle("Temperature and Ozone Levels During the Year 2000 in Chicago") +
theme_tufte()
library(hrbrthemes)
ggplot(chic, aes(x = temp, y = o3)) +
geom_point(aes(color = dewpoint), show.legend = FALSE) +
labs(x = "Temperature (°F)", y = "Ozone") +
ggtitle("Temperature and Ozone Levels in Chicago") +
theme_modern_rc()
g <- ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)",
title = "Temperatures in Chicago")
g + theme_bw(base_family = "Playfair")
g + theme_bw(base_size = 30, base_family = "Roboto Condensed")
g + theme_bw(base_line_size = 1, base_rect_size = 1)
theme_gray
theme_custom <- function (base_size = 12, base_family = "Roboto Condensed") {
half_line <- base_size/2
theme(
line = element_line(color = "black", size = .5,
linetype = 1, lineend = "butt"),
rect = element_rect(fill = "white", color = "black",
size = .5, linetype = 1),
text = element_text(family = base_family, face = "plain",
color = "black", size = base_size,
lineheight = .9, hjust = .5, vjust = .5,
angle = 0, margin = margin(), debug = FALSE),
axis.line = element_blank(),
axis.line.x = NULL,
axis.line.y = NULL,
axis.text = element_text(size = base_size * 1.1, color = "gray30"),
axis.text.x = element_text(margin = margin(t = .8 * half_line/2),
vjust = 1),
axis.text.x.top = element_text(margin = margin(b = .8 * half_line/2),
vjust = 0),
axis.text.y = element_text(margin = margin(r = .8 * half_line/2),
hjust = 1),
axis.text.y.right = element_text(margin = margin(l = .8 * half_line/2),
hjust = 0),
axis.ticks = element_line(color = "gray30", size = .7),
axis.ticks.length = unit(half_line / 1.5, "pt"),
axis.ticks.length.x = NULL,
axis.ticks.length.x.top = NULL,
axis.ticks.length.x.bottom = NULL,
axis.ticks.length.y = NULL,
axis.ticks.length.y.left = NULL,
axis.ticks.length.y.right = NULL,
axis.title.x = element_text(margin = margin(t = half_line),
vjust = 1, size = base_size * 1.3,
face = "bold"),
axis.title.x.top = element_text(margin = margin(b = half_line),
vjust = 0),
axis.title.y = element_text(angle = 90, vjust = 1,
margin = margin(r = half_line),
size = base_size * 1.3, face = "bold"),
axis.title.y.right = element_text(angle = -90, vjust = 0,
margin = margin(l = half_line)),
legend.background = element_rect(color = NA),
legend.spacing = unit(.4, "cm"),
legend.spacing.x = NULL,
legend.spacing.y = NULL,
legend.margin = margin(.2, .2, .2, .2, "cm"),
legend.key = element_rect(fill = "gray95", color = "white"),
legend.key.size = unit(1.2, "lines"),
legend.key.height = NULL,
legend.key.width = NULL,
legend.text = element_text(size = rel(.8)),
legend.text.align = NULL,
legend.title = element_text(hjust = 0),
legend.title.align = NULL,
legend.position = "right",
legend.direction = NULL,
legend.justification = "center",
legend.box = NULL,
legend.box.margin = margin(0, 0, 0, 0, "cm"),
legend.box.background = element_blank(),
legend.box.spacing = unit(.4, "cm"),
panel.background = element_rect(fill = "white", color = NA),
panel.border = element_rect(color = "gray30",
fill = NA, size = .7),
panel.grid.major = element_line(color = "gray90", size = 1),
panel.grid.minor = element_line(color = "gray90", size = .5,
linetype = "dashed"),
panel.spacing = unit(base_size, "pt"),
panel.spacing.x = NULL,
panel.spacing.y = NULL,
panel.ontop = FALSE,
strip.background = element_rect(fill = "white", color = "gray30"),
strip.text = element_text(color = "black", size = base_size),
strip.text.x = element_text(margin = margin(t = half_line,
b = half_line)),
strip.text.y = element_text(angle = -90,
margin = margin(l = half_line,
r = half_line)),
strip.text.y.left = element_text(angle = 90),
strip.placement = "inside",
strip.placement.x = NULL,
strip.placement.y = NULL,
strip.switch.pad.grid = unit(0.1, "cm"),
strip.switch.pad.wrap = unit(0.1, "cm"),
plot.background = element_rect(color = NA),
plot.title = element_text(size = base_size * 1.8, hjust = .5,
vjust = 1, face = "bold",
margin = margin(b = half_line * 1.2)),
plot.title.position = "panel",
plot.subtitle = element_text(size = base_size * 1.3,
hjust = .5, vjust = 1,
margin = margin(b = half_line * .9)),
plot.caption = element_text(size = rel(0.9), hjust = 1, vjust = 1,
margin = margin(t = half_line * .9)),
plot.caption.position = "panel",
plot.tag = element_text(size = rel(1.2), hjust = .5, vjust = .5),
plot.tag.position = "topleft",
plot.margin = margin(base_size, base_size, base_size, base_size),
complete = TRUE
)
}
theme_set(theme_custom())
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() + labs(x = "Year", y = "Temperature (°F)") + guides(color = FALSE)
theme_custom <- theme_update(panel.background = element_rect(fill = "gray60"))
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() + labs(x = "Year", y = "Temperature (°F)") + guides(color = FALSE)
theme_custom <- theme_update(panel.background = element_rect(fill = "white"),
panel.grid.major = element_line(size = .5),
panel.grid.minor = element_blank())
ggplot(chic, aes(x = date, y = temp, color = o3)) +
geom_point() +
geom_hline(yintercept = c(0, 73)) +
labs(x = "Year", y = "Temperature (°F)")
g <- ggplot(chic, aes(x = temp, y = dewpoint)) +
geom_point(alpha = .5) +
labs(x = "Temperature (°F)", y = "Dewpoint")
g +
geom_vline(aes(xintercept = median(temp)), size = 1.5,
color = "firebrick", linetype = "dashed") +
geom_hline(aes(yintercept = median(dewpoint)), size = 1.5,
color = "firebrick", linetype = "dashed")
reg <- lm(dewpoint ~ temp, data = chic)
g +
geom_abline(intercept = coefficients(reg)[1],
slope = coefficients(reg)[2],
color = "darkorange2", size = 1.5) +
labs(title = paste0("y = ", round(coefficients(reg)[2], 2),
" * x + ", round(coefficients(reg)[1], 2)))
g +
## vertical line
geom_linerange(aes(x = 50, ymin = 20, ymax = 55),
color = "steelblue", size = 2) +
## horizontal line
geom_linerange(aes(xmin = -Inf, xmax = 25, y = 0),
color = "red", size = 1)
g +
geom_segment(aes(x = 50, xend = 75,
y = 20, yend = 45),
color = "purple", size = 2)
g +
geom_curve(aes(x = 0, y = 60, xend = 75, yend = 0),
size = 2, color = "tan") +
geom_curve(aes(x = 0, y = 60, xend = 75, yend = 0),
curvature = -0.7, angle = 45,
color = "darkgoldenrod1", size = 1) +
geom_curve(aes(x = 0, y = 60, xend = 75, yend = 0),
curvature = 0, size = 1.5)
g +
geom_curve(aes(x = 0, y = 60, xend = 75, yend = 0),
size = 2, color = "tan",
arrow = arrow(length = unit(0.07, "npc"))) +
geom_curve(aes(x = 5, y = 55, xend = 70, yend = 5),
curvature = -0.7, angle = 45,
color = "darkgoldenrod1", size = 1,
arrow = arrow(length = unit(0.03, "npc"),
type = "closed",
ends = "both"))
set.seed(2020)
library(dplyr)
sample <- chic %>%
dplyr::group_by(season) %>%
dplyr::sample_frac(0.01)
## code without pipes:
## sample <- sample_frac(group_by(chic, season), .01)
ggplot(sample, aes(x = date, y = temp, label = season)) +
geom_point() +
geom_text(aes(color = factor(temp)), hjust = .5, vjust = -.5) +
labs(x = "Year", y = "Temperature (°F)") +
xlim(as.Date(c('1997-01-01', '2000-12-31'))) +
ylim(c(0, 90)) +
theme(legend.position = "none")
ggplot(sample, aes(x = date, y = temp, label = season)) +
geom_point() +
geom_label(aes(fill = factor(temp)), color = "white",
fontface = "bold", hjust = .5, vjust = -.25) +
labs(x = "Year", y = "Temperature (°F)") +
xlim(as.Date(c('1997-01-01', '2000-12-31'))) +
ylim(c(0, 90)) +
theme(legend.position = "none")
library(ggrepel)
ggplot(sample, aes(x = date, y = temp, label = season)) +
geom_point() +
geom_label_repel(aes(fill = factor(temp)),
color = "white", fontface = "bold") +
labs(x = "Year", y = "Temperature (°F)") +
theme(legend.position = "none")
g <-
ggplot(chic, aes(x = temp, y = dewpoint)) +
geom_point(alpha = .5) +
labs(x = "Temperature (°F)", y = "Dewpoint")
g +
geom_text(aes(x = 25, y = 60,
label = "This is an useful annotation"))
g +
geom_text(aes(x = 25, y = 60,
label = "This is an useful annotation"),
stat = "unique")
g +
geom_text(aes(x = 25, y = 60,
label = "This is an useful annotation"),
stat = "unique", family = "Bangers",
size = 7, color = "darkcyan")
ann <- data.frame(
o3 = 30,
temp = 20,
season = factor("Summer", levels = levels(chic$season)),
label = "Here is enough space\nfor some annotations."
)
g <-
ggplot(chic, aes(x = o3, y = temp)) +
geom_point() +
labs(x = "Ozone", y = "Temperature (°F)")
g +
geom_text(data = ann, aes(label = label),
size = 7, fontface = "bold",
family = "Roboto Condensed") +
facet_wrap(~season)
g +
geom_text(aes(x = 23, y = 97,
label = "This is not an useful annotation"),
size = 5, fontface = "bold") +
scale_y_continuous(limits = c(NA, 100)) +
facet_wrap(~season, scales = "free_x")
library(tidyverse)
(ann <-
chic %>%
group_by(season) %>%
summarize(o3 = min(o3, na.rm = TRUE) +
(max(o3, na.rm = TRUE) - min(o3, na.rm = TRUE)) / 2))
ann
g +
geom_text(data = ann,
aes(x = o3, y = 97,
label = "This is an useful annotation"),
size = 5, fontface = "bold") +
scale_y_continuous(limits = c(NA, 100)) +
facet_wrap(~season, scales = "free_x")
library(grid)
my_grob <- grobTree(textGrob("This text stays in place!",
x = .1, y = .9, hjust = 0,
gp = gpar(col = "black",
fontsize = 15,
fontface = "bold")))
g +
annotation_custom(my_grob) +
facet_wrap(~season, scales = "free_x") +
scale_y_continuous(limits = c(NA, 100))
library(ggtext)
lab_md <- "This plot shows **temperature** in *°F* versus **ozone level** in *ppm*"
g +
geom_richtext(aes(x = 35, y = 3, label = lab_md),
stat = "unique")
lab_html <- "★ This plot shows <b style='color:red;'>temperature</b> in <i>°F</i> versus <b style='color:blue;'>ozone level</b>in <i>ppm</i> ★"
g +
geom_richtext(aes(x = 33, y = 3, label = lab_html),
stat = "unique")
g +
geom_richtext(aes(x = 10, y = 25, label = lab_md),
stat = "unique", angle = 30,
color = "white", fill = "steelblue",
label.color = NA, hjust = 0, vjust = 0,
family = "Playfair Display")
lab_long <- "**Lorem ipsum dolor**<br><i style='font-size:8pt;color:red;'>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.<br>Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.</i>"
g +
geom_textbox(aes(x = 40, y = 10, label = lab_long),
width = unit(15, "lines"), stat = "unique")
ggplot(chic, aes(x = season, y = o3)) +
geom_boxplot(fill = "indianred") +
labs(x = "Season", y = "Ozone") +
coord_flip()
ggplot(chic, aes(x = o3, y = season)) +
geom_boxplot(fill = "indianred", orientation = "y") +
labs(x = "Ozone", y = "Season")
ggplot(chic, aes(x = temp, y = o3)) +
geom_point() +
labs(x = "Temperature (°F)", y = "Ozone Level") +
scale_x_continuous(breaks = seq(0, 80, by = 20)) +
coord_fixed(ratio = 1)
ggplot(chic, aes(x = temp, y = o3)) +
geom_point() +
labs(x = "Temperature (°F)", y = "Ozone Level") +
scale_x_continuous(breaks = seq(0, 80, by = 20)) +
coord_fixed(ratio = 1/3) +
theme(plot.background = element_rect(fill = "grey80"))
ggplot(chic, aes(x = date, y = temp, color = o3)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
scale_y_reverse()
## the default
ggplot(chic, aes(x = temp, y = season)) +
geom_jitter(aes(color = season),
orientation = "y", show.legend = FALSE) +
labs(x = "Temperature (°F)", y = NULL)
library(forcats)
ggplot(chic, aes(x = temp, y = fct_rev(season))) +
geom_jitter(aes(color = season),
orientation = "y", show.legend = FALSE) +
labs(x = "Temperature (°F)", y = NULL)
ggplot(chic, aes(x = date, y = temp, color = o3)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
scale_y_log10(lim = c(0.1, 100))
library(tidyverse)
chic %>%
dplyr::group_by(season) %>%
dplyr::summarize(o3 = median(o3)) %>%
ggplot(aes(x = season, y = o3)) +
geom_col(aes(fill = season), color = NA) +
labs(x = "", y = "Median Ozone Level") +
coord_polar() +
guides(fill = FALSE)
chic_sum <-
chic %>%
dplyr::mutate(o3_avg = median(o3)) %>%
dplyr::filter(o3 > o3_avg) %>%
dplyr::mutate(n_all = n()) %>%
dplyr::group_by(season) %>%
dplyr::summarize(rel = n() / unique(n_all))
ggplot(chic_sum, aes(x = "", y = rel)) +
geom_col(aes(fill = season), width = 1, color = NA) +
labs(x = "", y = "Proportion of Days Exceeding\nthe Median Ozone Level") +
coord_polar(theta = "y") +
scale_fill_brewer(palette = "Set1", name = "Season:") +
theme(axis.ticks = element_blank(),
panel.grid = element_blank())
ggplot(chic_sum, aes(x = "", y = rel)) +
geom_col(aes(fill = season), width = 1, color = NA) +
labs(x = "", y = "Proportion of Days Exceeding\nthe Median Ozone Level") +
#coord_polar(theta = "y") +
scale_fill_brewer(palette = "Set1", name = "Season:") +
theme(axis.ticks = element_blank(),
panel.grid = element_blank())
g <-
ggplot(chic, aes(x = season, y = o3,
color = season)) +
labs(x = "Season", y = "Ozone") +
scale_color_brewer(palette = "Dark2", guide = "none")
g + geom_boxplot()
g + geom_point()
g + geom_point(alpha = .1)
g + geom_jitter(width = .3, alpha = .5)
g + geom_violin(fill = "gray80", size = 1, alpha = .5)
g + geom_violin(fill = "gray80", size = 1, alpha = .5) +
geom_jitter(alpha = .25, width = .3) +
coord_flip()
library(ggforce)
g + geom_violin(fill = "gray80", size = 1, alpha = .5) +
geom_sina(alpha = .25) +
coord_flip()
g + geom_violin(aes(fill = season), size = 1, alpha = .5) +
geom_boxplot(outlier.alpha = 0, coef = 0,
color = "gray40", width = .2) +
scale_fill_brewer(palette = "Dark2", guide = "none") +
coord_flip()
ggplot(chic, aes(x = date, y = temp,
color = season)) +
geom_point(show.legend = FALSE) +
geom_rug(show.legend = FALSE) +
labs(x = "Year", y = "Temperature (°F)")
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point(show.legend = FALSE) +
geom_rug(sides = "r", alpha = .3, show.legend = FALSE) +
labs(x = "Year", y = "Temperature (°F)")
library(tidyverse)
corm <-
chic %>%
select(death, temp, dewpoint, pm10, o3) %>%
corrr::correlate(diagonal = 1) %>%
corrr::shave(upper = FALSE)
corm
corm <- corm %>%
pivot_longer(
cols = -rowname,
names_to = "colname",
values_to = "corr"
) %>%
mutate(rowname = fct_inorder(rowname),
colname = fct_inorder(colname))
corm
ggplot(corm, aes(rowname, fct_rev(colname),
fill = corr)) +
geom_tile() +
geom_text(aes(label = round(corr, 2))) +
coord_fixed() +
labs(x = NULL, y = NULL)
ggplot(corm, aes(rowname, fct_rev(colname),
fill = corr)) +
geom_tile() +
geom_text(aes(
label = format(round(corr, 2), nsmall = 2),
color = abs(corr) < .75
)) +
coord_fixed(expand = FALSE) +
scale_color_manual(values = c("white", "black"),
guide = "none") +
scale_fill_distiller(
palette = "PuOr", na.value = "white",
direction = 1, limits = c(-1, 1)
) +
labs(x = NULL, y = NULL) +
theme(panel.border = element_rect(color = NA, fill = NA),
legend.position = c(.85, .8))
ggplot(chic, aes(temp, o3)) +
geom_density_2d() +
labs(x = "Temperature (°F)", x = "Ozone Level")
ggplot(chic, aes(temp, o3)) +
geom_density_2d_filled(show.legend = FALSE) +
coord_cartesian(expand = FALSE) +
labs(x = "Temperature (°F)", x = "Ozone Level")
## interpolate data
library(akima)
fld <- with(chic, interp(x = temp, y = o3, z = dewpoint))
## prepare data in long format
library(reshape2)
df <- melt(fld$z, na.rm = TRUE)
names(df) <- c("x", "y", "Dewpoint")
g <- ggplot(data = df, aes(x = x, y = y, z = Dewpoint)) +
labs(x = "Temperature (°F)", y = "Ozone Level",
color = "Dewpoint")
g + stat_contour(aes(color = ..level.., fill = Dewpoint))
g + geom_tile(aes(fill = Dewpoint)) +
scale_fill_viridis_c(option = "inferno")
g + geom_tile(aes(fill = Dewpoint)) +
stat_contour(color = "white", size = .7, bins = 5) +
scale_fill_viridis_c()
ggplot(chic, aes(temp, o3)) +
geom_hex() +
scale_fill_distiller(palette = "YlOrRd", direction = 1) +
labs(x = "Temperature (°F)", y = "Ozone Level")
ggplot(chic, aes(temp, o3)) +
geom_hex(aes(color = ..count..)) +
scale_fill_distiller(palette = "YlOrRd", direction = 1) +
scale_color_distiller(palette = "YlOrRd", direction = 1) +
labs(x = "Temperature (°F)", y = "Ozone Level")
ggplot(chic, aes(temp, o3)) +
geom_hex(color = "grey") +
scale_fill_distiller(palette = "YlOrRd", direction = 1) +
labs(x = "Temperature (°F)", y = "Ozone Level")
ggplot(chic, aes(temp, o3, fill = ..density..)) +
geom_hex(bins = 50, color = "grey") +
scale_fill_distiller(palette = "YlOrRd", direction = 1) +
labs(x = "Temperature (°F)", y = "Ozone Level")
ggplot(chic, aes(temp, o3, fill = ..density..)) +
geom_bin2d(bins = 15, color = "grey") +
scale_fill_distiller(palette = "YlOrRd", direction = 1) +
labs(x = "Temperature (°F)", y = "Ozone Level")
library(ggridges)
ggplot(chic, aes(x = temp, y = factor(year))) +
geom_density_ridges(fill = "gray90") +
labs(x = "Temperature (°F)", y = "Year")
ggplot(chic, aes(x = temp, y = factor(year), fill = year)) +
geom_density_ridges(alpha = .8, color = "white",
scale = 2.5, rel_min_height = .01) +
labs(x = "Temperature (°F)", y = "Year") +
guides(fill = FALSE) +
theme_ridges()
ggplot(chic, aes(x = temp, y = season, fill = ..x..)) +
geom_density_ridges_gradient(scale = .9, gradient_lwd = .5,
color = "black") +
scale_fill_viridis_c(option = "plasma", name = "") +
labs(x = "Temperature (°F)", y = "Season") +
theme_ridges(font_family = "Roboto Condensed", grid = FALSE)
library(tidyverse)
## only plot extreme season using dplyr from the tidyverse
ggplot(data = filter(chic, season %in% c("Summer", "Winter")),
aes(x = temp, y = year, fill = paste(year, season))) +
geom_density_ridges(alpha = .7, rel_min_height = .01,
color = "white", from = -5, to = 95) +
scale_fill_cyclical(breaks = c("1997 Summer", "1997 Winter"),
labels = c(`1997 Summer` = "Summer",
`1997 Winter` = "Winter"),
values = c("tomato", "dodgerblue"),
name = "Season:", guide = "legend") +
theme_ridges(grid = FALSE) +
labs(x = "Temperature (°F)", y = "Year")
ggplot(chic, aes(x = temp, y = factor(year), fill = year)) +
geom_density_ridges(stat = "binline", bins = 25, scale = .9,
draw_baseline = FALSE, show.legend = FALSE) +
theme_minimal() +
labs(x = "Temperature (°F)", y = "Season")
chic$o3run <- as.numeric(stats::filter(chic$o3, rep(1/30, 30), sides = 2))
ggplot(chic, aes(x = date, y = o3run)) +
geom_line(color = "chocolate", lwd = .8) +
labs(x = "Year", y = "Ozone")
ggplot(chic, aes(x = date, y = o3run)) +
geom_ribbon(aes(ymin = 0, ymax = o3run),
fill = "orange", alpha = .4) +
geom_line(color = "chocolate", lwd = .8) +
labs(x = "Year", y = "Ozone")
ggplot(chic, aes(x = date, y = o3run)) +
geom_area(color = "chocolate", lwd = .8,
fill = "orange", alpha = .4) +
labs(x = "Year", y = "Ozone")
chic$mino3 <- chic$o3run - sd(chic$o3run, na.rm = TRUE)
chic$maxo3 <- chic$o3run + sd(chic$o3run, na.rm = TRUE)
ggplot(chic, aes(x = date, y = o3run)) +
geom_ribbon(aes(ymin = mino3, ymax = maxo3), alpha = .5,
fill = "darkseagreen3", color = "transparent") +
geom_line(color = "aquamarine4", lwd = .7) +
labs(x = "Year", y = "Ozone")
ggplot(chic, aes(x = date, y = temp)) +
labs(x = "Year", y = "Temperature (°F)") +
stat_smooth() +
geom_point(color = "gray40", alpha = .5)
ggplot(chic, aes(x = temp, y = death)) +
labs(x = "Temperature (°F)", y = "Deaths") +
stat_smooth(method = "lm", se = FALSE,
color = "firebrick", size = 1.3) +
geom_point(color = "gray40", alpha = .5)
ggplot(chic, aes(x = o3, y = temp))+
labs(x = "Ozone Level", y = "Temperature (°F)") +
geom_smooth(
method = "lm",
formula = y ~ x + I(x^2) + I(x^3) + I(x^4) + I(x^5),
color = "black",
fill = "firebrick"
) +
geom_point(color = "gray40", alpha = .3)
ggplot(chic, aes(x = o3, y = temp))+
labs(x = "Ozone Level", y = "Temperature (°F)") +
geom_smooth(stat = "smooth") + ## the default
geom_point(color = "gray40", alpha = .3)
ggplot(chic, aes(x = o3, y = temp))+
labs(x = "Ozone Level", y = "Temperature (°F)") +
stat_smooth(geom = "smooth") + ## the default
geom_point(color = "gray40", alpha = .3)
cols <- c("darkorange2", "firebrick", "dodgerblue3")
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "gray40", alpha = .3) +
labs(x = "Year", y = "Temperature (°F)") +
stat_smooth(aes(col = "1000"),
method = "gam",
formula = y ~ s(x, k = 1000),
se = FALSE, size = 1.3) +
stat_smooth(aes(col = "100"),
method = "gam",
formula = y ~ s(x, k = 100),
se = FALSE, size = 1) +
stat_smooth(aes(col = "10"),
method = "gam",
formula = y ~ s(x, k = 10),
se = FALSE, size = .8) +
scale_color_manual(name = "k", values = cols)
## library(shiny)
## runExample("01_hello")
## runExample("04_mpg")
library(plotly)
g <- ggplot(chic, aes(date, temp)) +
geom_line(color = "grey") +
geom_point(aes(color = season)) +
scale_color_brewer(palette = "Dark2", guide = "none") +
labs(x = NULL, y = "Temperature (°F)") +
theme_bw()
g
ggplotly(g)
library(ggiraph)
g <- ggplot(chic, aes(date, temp)) +
geom_line(color = "grey") +
geom_point_interactive(
aes(color = season, tooltip = season, data_id = season)
) +
scale_color_brewer(palette = "Dark2", guide = "none") +
labs(x = NULL, y = "Temperature (°F)") +
theme_bw()
girafe(ggobj = g)
library(highcharter)
hchart(chic, "scatter", hcaes(x = date, y = temp, group = season))
library(echarts4r)
chic %>%
e_charts(date) %>%
e_scatter(temp, symbol_size = 7) %>%
e_visual_map(temp) %>%
e_y_axis(name = "Temperature (°F)") %>%
e_legend(FALSE)
library(charter)
chic$date_num <- as.numeric(chic$date)
chart(data = chic, caes(date_num, temp)) %>%
c_scatter(caes(color = season, group = season)) %>%
c_colors(RColorBrewer::brewer.pal(4, name = "Dark2"))
| /ggplot2/ggplot-tutorial-cedric-raw.R | no_license | kiangfc/tulous | R | false | false | 49,019 | r | #######################################################################################
# #
# Code for "A ggplot2 Tutorial for Beautiful Plotting in R" #
# cedricscherer.netlify.app/2019/08/05/a-ggplot2-tutorial-for-beautiful-plotting-in-r #
# #
# Cédric Scherer (@CedScherer | cedricphilippscherer@gmail.com) #
# Last Update: 2020-12-02 #
# #
#######################################################################################
## install CRAN packages
## install.packages(c("tidyverse", "colorspace", "corrr", "cowplot",
## "ggdark", "ggforce", "ggrepel", "ggridges", "ggsci",
## "ggtext", "ggthemes", "grid", "gridExtra", "patchwork",
## "rcartocolor", "scico", "showtext", "shiny",
## "plotly", "highcharter", "echarts4r"))
##
## install from GitHub since not on CRAN
## devtools::install_github("JohnCoene/charter")
chic <- readr::read_csv("https://raw.githubusercontent.com/Z3tt/R-Tutorials/master/ggplot2/chicago-nmmaps.csv")
tibble::glimpse(chic)
head(chic, 10)
#library(ggplot2)
library(tidyverse)
(g <- ggplot(chic, aes(x = date, y = temp)))
g + geom_point()
g + geom_line()
g + geom_line() + geom_point()
g + geom_point(color = "firebrick", shape = "diamond", size = 2)
g + geom_point(color = "firebrick", shape = "diamond", size = 2) +
geom_line(color = "firebrick", linetype = "dotted", size = .3)
theme_set(theme_bw())
g + geom_point(color = "firebrick")
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)")
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
xlab("Year") +
ylab("Temperature (°F)")
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = expression(paste("Temperature (", degree ~ F, ")"^"(Hey, why should we use metric units?!)")))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(axis.title.x = element_text(vjust = 0, size = 15),
axis.title.y = element_text(vjust = 2, size = 15))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(axis.title.x = element_text(margin = margin(t = 10), size = 15),
axis.title.y = element_text(margin = margin(r = 10), size = 15))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(axis.title = element_text(size = 15, color = "firebrick",
face = "italic"))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(axis.title.x = element_text(color = "sienna", size = 15),
axis.title.y = element_text(color = "orangered", size = 15))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(axis.title = element_text(color = "sienna", size = 15),
axis.title.y = element_text(color = "orangered", size = 15))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(axis.title = element_text(color = "sienna", size = 15, face = "bold"),
axis.title.y = element_text(face = "bold.italic"))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(axis.text = element_text(color = "dodgerblue", size = 12),
axis.text.x = element_text(face = "italic"))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(axis.text.x = element_text(angle = 50, vjust = 1, hjust = 1, size = 12))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(axis.ticks.y = element_blank(),
axis.text.y = element_blank())
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = NULL, y = "")
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
ylim(c(0, 50))
library(tidyverse)
chic_high <- dplyr::filter(chic, temp > 25, o3 > 20)
ggplot(chic_high, aes(x = temp, y = o3)) +
geom_point(color = "darkcyan") +
labs(x = "Temperature higher than 25°F",
y = "Ozone higher than 20 ppb") +
expand_limits(x = 0, y = 0)
library(tidyverse)
chic_high <- dplyr::filter(chic, temp > 25, o3 > 20)
ggplot(chic_high, aes(x = temp, y = o3)) +
geom_point(color = "darkcyan") +
labs(x = "Temperature higher than 25°F",
y = "Ozone higher than 20 ppb") +
coord_cartesian(xlim = c(0, NA), ylim = c(0, NA))
ggplot(chic_high, aes(x = temp, y = o3)) +
geom_point(color = "darkcyan") +
labs(x = "Temperature higher than 25°F",
y = "Ozone higher than 20 ppb") +
expand_limits(x = 0, y = 0) +
scale_x_continuous(expand = c(0, 0)) +
scale_y_continuous(expand = c(0, 0)) +
coord_cartesian(clip = "off")
ggplot(chic, aes(x = temp, y = temp + rnorm(nrow(chic), sd = 20))) +
geom_point(color = "sienna") +
labs(x = "Temperature (°F)", y = "Temperature (°F) + random noise") +
xlim(c(0, 100)) + ylim(c(0, 150)) +
coord_fixed()
ggplot(chic, aes(x = temp, y = temp + rnorm(nrow(chic), sd = 20))) +
geom_point(color = "sienna") +
labs(x = "Temperature (°F)", y = "Temperature (°F) + random noise") +
xlim(c(0, 100)) + ylim(c(0, 150)) +
coord_fixed(ratio = 1/5)
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = NULL) +
scale_y_continuous(label = function(x) {return(paste(x, "Degrees Fahrenheit"))})
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
ggtitle("Temperatures in Chicago")
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)",
title = "Temperatures in Chicago",
subtitle = "Seasonal pattern of daily temperatures from 1997 to 2001",
caption = "Data: NMMAPS",
tag = "Fig. 1")
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)",
title = "Temperatures in Chicago") +
theme(plot.title = element_text(face = "bold",
margin = margin(10, 0, 10, 0),
size = 14))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = NULL,
title = "Temperatures in Chicago",
caption = "Data: NMMAPS") +
theme(plot.title = element_text(hjust = 1, size = 16, face = "bold.italic"))
(g <- ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
scale_y_continuous(label = function(x) {return(paste(x, "Degrees Fahrenheit"))}) +
labs(x = "Year", y = NULL,
title = "Temperatures in Chicago between 1997 and 2001 in Degrees Fahrenheit",
caption = "Data: NMMAPS") +
theme(plot.title = element_text(size = 14, face = "bold.italic"),
plot.caption = element_text(hjust = 0)))
g + theme(plot.title.position = "plot",
plot.caption.position = "plot")
library(showtext)
font_add_google("Playfair Display", ## name of Google font
"Playfair") ## name that will be used in R
font_add_google("Bangers", "Bangers")
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)",
title = "Temperatures in Chicago",
subtitle = "Daily temperatures in °F from 1997 to 2001") +
theme(plot.title = element_text(family = "Bangers", hjust = .5, size = 25),
plot.subtitle = element_text(family = "Playfair", hjust = .5, size = 15))
font_add_google("Roboto Condensed", "Roboto Condensed")
theme_set(theme_bw(base_size = 12, base_family = "Roboto Condensed"))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
ggtitle("Temperatures in Chicago\nfrom 1997 to 2001") +
theme(plot.title = element_text(lineheight = .8, size = 16))
ggplot(chic,
aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)")
ggplot(chic,
aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
theme(legend.position = "none")
ggplot(chic,
aes(x = date, y = temp,
color = season, shape = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
guides(color = "none")
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
theme(legend.title = element_blank())
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
scale_color_discrete(name = NULL)
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
labs(color = NULL)
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
theme(legend.position = "top")
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
theme(legend.position = c(.2, .1),
legend.background = element_rect(fill = "transparent"))
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
theme(legend.position = c(.5, .97),
legend.background = element_rect(fill = "transparent")) +
guides(color = guide_legend(direction = "horizontal"))
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
theme(legend.title = element_text(family = "Playfair",
color = "chocolate",
size = 14, face = "bold"))
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)",
color = "Seasons\nindicated\nby colors:") +
theme(legend.title = element_text(family = "Playfair",
color = "chocolate",
size = 14, face = "bold"))
## ggplot(chic, aes(x = date, y = temp, color = season))) +
## geom_point() +
## labs(x = "Year", y = "Temperature (°F)") +
## theme(legend.title = element_text(family = "Playfair",
## color = "chocolate",
## size = 14, face = "bold")) +
## scale_color_discrete(name = "Seasons\nindicated\nby colors:")
chic$season <-
factor(chic$season,
levels = c("Winter", "Spring", "Summer", "Autumn"))
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)")
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
scale_color_discrete("Seasons:", labels = c("Mar—May", "Jun—Aug",
"Sep—Nov", "Dec—Feb")) +
theme(legend.title = element_text(family = "Playfair",
color = "chocolate",
size = 14, face = 2))
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
theme(legend.key = element_rect(fill = "darkgoldenrod1"),
legend.title = element_text(family = "Playfair",
color = "chocolate",
size = 14, face = 2)) +
scale_color_discrete("Seasons:")
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
theme(legend.key = element_rect(fill = NA),
legend.title = element_text(color = "chocolate",
size = 14, face = 2)) +
scale_color_discrete("Seasons:") +
guides(color = guide_legend(override.aes = list(size = 6)))
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
geom_rug()
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
geom_rug(show.legend = FALSE)
ggplot(chic, aes(x = date, y = o3)) +
geom_line(color = "gray") +
geom_point(color = "darkorange2") +
labs(x = "Year", y = "Ozone")
ggplot(chic, aes(x = date, y = o3)) +
geom_line(aes(color = "line")) +
geom_point(aes(color = "points")) +
labs(x = "Year", y = "Ozone") +
scale_color_discrete("Type:")
ggplot(chic, aes(x = date, y = o3)) +
geom_line(aes(color = "line")) +
geom_point(aes(color = "points")) +
labs(x = "Year", y = "Ozone") +
scale_color_manual(name = NULL,
guide = "legend",
values = c("points" = "darkorange2",
"line" = "gray")) +
guides(color = guide_legend(override.aes = list(linetype = c(1, 0),
shape = c(NA, 16))))
ggplot(chic,
aes(x = date, y = temp, color = temp)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)", color = "Temperature (°F)")
ggplot(chic,
aes(x = date, y = temp, color = temp)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)", color = "Temperature (°F)") +
guides(color = guide_legend())
ggplot(chic,
aes(x = date, y = temp, color = temp)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)", color = "Temperature (°F)") +
guides(color = guide_bins())
ggplot(chic,
aes(x = date, y = temp, color = temp)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)", color = "Temperature (°F)") +
guides(color = guide_colorsteps())
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(panel.background = element_rect(fill = "gray90"),
panel.grid.major = element_line(color = "gray10", size = .5),
panel.grid.minor = element_line(color = "gray70", size = .25))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(panel.background = element_rect(fill = "gray90"),
panel.grid.major = element_line(size = .5, linetype = "dashed"),
panel.grid.minor = element_line(size = .25, linetype = "dotted"),
panel.grid.major.x = element_line(color = "red1"),
panel.grid.major.y = element_line(color = "blue1"),
panel.grid.minor.x = element_line(color = "red4"),
panel.grid.minor.y = element_line(color = "blue4"))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(panel.grid.minor = element_blank())
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(panel.grid = element_blank())
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
scale_y_continuous(breaks = seq(0, 100, 10),
minor_breaks = seq(0, 100, 2.5))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "#1D8565", size = 2) +
labs(x = "Year", y = "Temperature (°F)") +
theme(panel.background = element_rect(fill = "#64D2AA",
color = "#64D2AA", size = 2))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "#1D8565", size = 2) +
labs(x = "Year", y = "Temperature (°F)") +
theme(panel.border = element_rect(fill = "#64D2AA99",
color = "#64D2AA", size = 2))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(plot.background = element_rect(fill = "gray60",
color = "gray30", size = 2))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(panel.background = element_rect(fill = NA),
plot.background = element_rect(fill = "gray60",
color = "gray30", size = 2))
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)") +
theme(plot.background = element_rect(fill = "gray60"),
plot.margin = unit(c(1, 3, 1, 8), "cm"))
g <- ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "chartreuse4", alpha = .3) +
labs(x = "Year", y = "Temperature (°F)") +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1))
g + facet_wrap(~ year, nrow = 1)
g + facet_wrap(~ year, nrow = 2)
g + facet_wrap(~ year, ncol = 3) + theme(axis.title.x = element_text(hjust = .15))
g + facet_wrap(~ year, nrow = 2, scales = "free")
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "orangered", alpha = .3) +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
labs(x = "Year", y = "Temperature (°F)") +
facet_grid(year ~ season)
g + facet_wrap(year ~ season, nrow = 4, scales = "free_x")
g + facet_wrap(~ year, nrow = 1, scales = "free_x") +
theme(strip.text = element_text(face = "bold", color = "chartreuse4",
hjust = 0, size = 20),
strip.background = element_rect(fill = "chartreuse3", linetype = "dotted"))
library(ggtext)
library(rlang)
element_textbox_highlight <- function(..., hi.labels = NULL, hi.fill = NULL,
hi.col = NULL, hi.box.col = NULL, hi.family = NULL) {
structure(
c(element_textbox(...),
list(hi.labels = hi.labels, hi.fill = hi.fill, hi.col = hi.col, hi.box.col = hi.box.col, hi.family = hi.family)
),
class = c("element_textbox_highlight", "element_textbox", "element_text", "element")
)
}
element_grob.element_textbox_highlight <- function(element, label = "", ...) {
if (label %in% element$hi.labels) {
element$fill <- element$hi.fill %||% element$fill
element$colour <- element$hi.col %||% element$colour
element$box.colour <- element$hi.box.col %||% element$box.colour
element$family <- element$hi.family %||% element$family
}
NextMethod()
}
g + facet_wrap(year ~ season, nrow = 4, scales = "free_x") +
theme(
strip.background = element_blank(),
strip.text = element_textbox_highlight(
family = "Playfair", size = 12, face = "bold",
fill = "white", box.color = "chartreuse4", color = "chartreuse4",
halign = .5, linetype = 1, r = unit(5, "pt"), width = unit(1, "npc"),
padding = margin(5, 0, 3, 0), margin = margin(0, 1, 3, 1),
hi.labels = c("1997", "1998", "1999", "2000"),
hi.fill = "chartreuse4", hi.box.col = "black", hi.col = "white"
)
)
ggplot(chic, aes(x = date, y = temp)) +
geom_point(aes(color = season == "Summer"), alpha = .3) +
labs(x = "Year", y = "Temperature (°F)") +
facet_wrap(~ season, nrow = 1) +
scale_color_manual(values = c("gray40", "firebrick"), guide = "none") +
theme(
axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1),
strip.background = element_blank(),
strip.text = element_textbox_highlight(
size = 12, face = "bold",
fill = "white", box.color = "white", color = "gray40",
halign = .5, linetype = 1, r = unit(0, "pt"), width = unit(1, "npc"),
padding = margin(2, 0, 1, 0), margin = margin(0, 1, 3, 1),
hi.labels = "Summer", hi.family = "Bangers",
hi.fill = "firebrick", hi.box.col = "firebrick", hi.col = "white"
)
)
p1 <- ggplot(chic, aes(x = date, y = temp,
color = season)) +
geom_point() +
geom_rug() +
labs(x = "Year", y = "Temperature (°F)")
p2 <- ggplot(chic, aes(x = date, y = o3)) +
geom_line(color = "gray") +
geom_point(color = "darkorange2") +
labs(x = "Year", y = "Ozone")
library(patchwork)
p1 + p2
p1 / p2
(g + p2) / p1
library(cowplot)
plot_grid(plot_grid(g, p1), p2, ncol = 1)
library(gridExtra)
grid.arrange(g, p1, p2,
layout_matrix = rbind(c(1, 2), c(3, 3)))
layout <- "
AABBBB#
AACCDDE
##CCDD#
##CC###
"
p2 + p1 + p1 + g + p2 +
plot_layout(design = layout)
ggplot(chic, aes(year)) +
geom_bar(aes(fill = season), color = "grey", size = 2) +
labs(x = "Year", y = "Observations", fill = "Season:")
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "steelblue", size = 2) +
labs(x = "Year", y = "Temperature (°F)")
ggplot(chic, aes(x = date, y = temp)) +
geom_point(shape = 21, size = 2, stroke = 1,
color = "#3cc08f", fill = "#c08f3c") +
labs(x = "Year", y = "Temperature (°F)")
(ga <- ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)", color = NULL))
ga + scale_color_manual(values = c("dodgerblue4",
"darkolivegreen4",
"darkorchid3",
"goldenrod1"))
ga + scale_color_brewer(palette = "Set1")
library(ggthemes)
ga + scale_color_tableau()
library(ggsci)
g1 <- ga + scale_color_aaas()
g2 <- ga + scale_color_npg()
library(patchwork)
(g1 + g2) * theme(legend.position = "top")
gb <- ggplot(chic, aes(x = date, y = temp, color = temp)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)", color = "Temperature (°F):")
gb + scale_color_continuous()
## gb + scale_color_gradient()
mid <- mean(chic$temp) ## midpoint
gb + scale_color_gradient2(midpoint = mid)
gb + scale_color_gradient(low = "darkkhaki",
high = "darkgreen")
gb + scale_color_gradient2(midpoint = mid, low = "#dd8a0b",
mid = "grey92", high = "#32a676")
p1 <- gb + scale_color_viridis_c() + ggtitle("'viridis' (default)")
p2 <- gb + scale_color_viridis_c(option = "inferno") + ggtitle("'inferno'")
p3 <- gb + scale_color_viridis_c(option = "plasma") + ggtitle("'plasma'")
p4 <- gb + scale_color_viridis_c(option = "cividis") + ggtitle("'cividis'")
library(patchwork)
(p1 + p2 + p3 + p4) * theme(legend.position = "bottom")
ga + scale_color_viridis_d(guide = "none")
library(rcartocolor)
g1 <- gb + scale_color_carto_c(palette = "BurgYl")
g2 <- gb + scale_color_carto_c(palette = "Earth")
(g1 + g2) * theme(legend.position = "bottom")
library(scico)
g1 <- gb + scale_color_scico(palette = "berlin")
g2 <- gb + scale_color_scico(palette = "hawaii", direction = -1)
(g1 + g2) * theme(legend.position = "bottom")
library(ggdark)
ggplot(chic, aes(date, temp, color = temp)) +
geom_point(size = 5) +
geom_point(aes(color = temp,
color = after_scale(invert_color(color))),
size = 2) +
scale_color_scico(palette = "hawaii", guide = "none") +
labs(x = "Year", y = "Temperature (°F)")
library(colorspace)
ggplot(chic, aes(date, temp)) +
geom_boxplot(aes(color = season,
fill = after_scale(desaturate(lighten(color, .6), .6))),
size = 1) +
scale_color_brewer(palette = "Dark2", guide = "none") +
labs(x = "Year", y = "Temperature (°F)")
library(ggthemes)
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
ggtitle("Ups and Downs of Chicago's Daily Temperatures") +
theme_economist() +
scale_color_economist(name = NULL)
library(dplyr)
chic_2000 <- filter(chic, year == 2000)
ggplot(chic_2000, aes(x = temp, y = o3)) +
geom_point() +
labs(x = "Temperature (°F)", y = "Ozone") +
ggtitle("Temperature and Ozone Levels During the Year 2000 in Chicago") +
theme_tufte()
library(hrbrthemes)
ggplot(chic, aes(x = temp, y = o3)) +
geom_point(aes(color = dewpoint), show.legend = FALSE) +
labs(x = "Temperature (°F)", y = "Ozone") +
ggtitle("Temperature and Ozone Levels in Chicago") +
theme_modern_rc()
g <- ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "firebrick") +
labs(x = "Year", y = "Temperature (°F)",
title = "Temperatures in Chicago")
g + theme_bw(base_family = "Playfair")
g + theme_bw(base_size = 30, base_family = "Roboto Condensed")
g + theme_bw(base_line_size = 1, base_rect_size = 1)
theme_gray
theme_custom <- function (base_size = 12, base_family = "Roboto Condensed") {
half_line <- base_size/2
theme(
line = element_line(color = "black", size = .5,
linetype = 1, lineend = "butt"),
rect = element_rect(fill = "white", color = "black",
size = .5, linetype = 1),
text = element_text(family = base_family, face = "plain",
color = "black", size = base_size,
lineheight = .9, hjust = .5, vjust = .5,
angle = 0, margin = margin(), debug = FALSE),
axis.line = element_blank(),
axis.line.x = NULL,
axis.line.y = NULL,
axis.text = element_text(size = base_size * 1.1, color = "gray30"),
axis.text.x = element_text(margin = margin(t = .8 * half_line/2),
vjust = 1),
axis.text.x.top = element_text(margin = margin(b = .8 * half_line/2),
vjust = 0),
axis.text.y = element_text(margin = margin(r = .8 * half_line/2),
hjust = 1),
axis.text.y.right = element_text(margin = margin(l = .8 * half_line/2),
hjust = 0),
axis.ticks = element_line(color = "gray30", size = .7),
axis.ticks.length = unit(half_line / 1.5, "pt"),
axis.ticks.length.x = NULL,
axis.ticks.length.x.top = NULL,
axis.ticks.length.x.bottom = NULL,
axis.ticks.length.y = NULL,
axis.ticks.length.y.left = NULL,
axis.ticks.length.y.right = NULL,
axis.title.x = element_text(margin = margin(t = half_line),
vjust = 1, size = base_size * 1.3,
face = "bold"),
axis.title.x.top = element_text(margin = margin(b = half_line),
vjust = 0),
axis.title.y = element_text(angle = 90, vjust = 1,
margin = margin(r = half_line),
size = base_size * 1.3, face = "bold"),
axis.title.y.right = element_text(angle = -90, vjust = 0,
margin = margin(l = half_line)),
legend.background = element_rect(color = NA),
legend.spacing = unit(.4, "cm"),
legend.spacing.x = NULL,
legend.spacing.y = NULL,
legend.margin = margin(.2, .2, .2, .2, "cm"),
legend.key = element_rect(fill = "gray95", color = "white"),
legend.key.size = unit(1.2, "lines"),
legend.key.height = NULL,
legend.key.width = NULL,
legend.text = element_text(size = rel(.8)),
legend.text.align = NULL,
legend.title = element_text(hjust = 0),
legend.title.align = NULL,
legend.position = "right",
legend.direction = NULL,
legend.justification = "center",
legend.box = NULL,
legend.box.margin = margin(0, 0, 0, 0, "cm"),
legend.box.background = element_blank(),
legend.box.spacing = unit(.4, "cm"),
panel.background = element_rect(fill = "white", color = NA),
panel.border = element_rect(color = "gray30",
fill = NA, size = .7),
panel.grid.major = element_line(color = "gray90", size = 1),
panel.grid.minor = element_line(color = "gray90", size = .5,
linetype = "dashed"),
panel.spacing = unit(base_size, "pt"),
panel.spacing.x = NULL,
panel.spacing.y = NULL,
panel.ontop = FALSE,
strip.background = element_rect(fill = "white", color = "gray30"),
strip.text = element_text(color = "black", size = base_size),
strip.text.x = element_text(margin = margin(t = half_line,
b = half_line)),
strip.text.y = element_text(angle = -90,
margin = margin(l = half_line,
r = half_line)),
strip.text.y.left = element_text(angle = 90),
strip.placement = "inside",
strip.placement.x = NULL,
strip.placement.y = NULL,
strip.switch.pad.grid = unit(0.1, "cm"),
strip.switch.pad.wrap = unit(0.1, "cm"),
plot.background = element_rect(color = NA),
plot.title = element_text(size = base_size * 1.8, hjust = .5,
vjust = 1, face = "bold",
margin = margin(b = half_line * 1.2)),
plot.title.position = "panel",
plot.subtitle = element_text(size = base_size * 1.3,
hjust = .5, vjust = 1,
margin = margin(b = half_line * .9)),
plot.caption = element_text(size = rel(0.9), hjust = 1, vjust = 1,
margin = margin(t = half_line * .9)),
plot.caption.position = "panel",
plot.tag = element_text(size = rel(1.2), hjust = .5, vjust = .5),
plot.tag.position = "topleft",
plot.margin = margin(base_size, base_size, base_size, base_size),
complete = TRUE
)
}
theme_set(theme_custom())
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() + labs(x = "Year", y = "Temperature (°F)") + guides(color = FALSE)
theme_custom <- theme_update(panel.background = element_rect(fill = "gray60"))
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point() + labs(x = "Year", y = "Temperature (°F)") + guides(color = FALSE)
theme_custom <- theme_update(panel.background = element_rect(fill = "white"),
panel.grid.major = element_line(size = .5),
panel.grid.minor = element_blank())
ggplot(chic, aes(x = date, y = temp, color = o3)) +
geom_point() +
geom_hline(yintercept = c(0, 73)) +
labs(x = "Year", y = "Temperature (°F)")
g <- ggplot(chic, aes(x = temp, y = dewpoint)) +
geom_point(alpha = .5) +
labs(x = "Temperature (°F)", y = "Dewpoint")
g +
geom_vline(aes(xintercept = median(temp)), size = 1.5,
color = "firebrick", linetype = "dashed") +
geom_hline(aes(yintercept = median(dewpoint)), size = 1.5,
color = "firebrick", linetype = "dashed")
reg <- lm(dewpoint ~ temp, data = chic)
g +
geom_abline(intercept = coefficients(reg)[1],
slope = coefficients(reg)[2],
color = "darkorange2", size = 1.5) +
labs(title = paste0("y = ", round(coefficients(reg)[2], 2),
" * x + ", round(coefficients(reg)[1], 2)))
g +
## vertical line
geom_linerange(aes(x = 50, ymin = 20, ymax = 55),
color = "steelblue", size = 2) +
## horizontal line
geom_linerange(aes(xmin = -Inf, xmax = 25, y = 0),
color = "red", size = 1)
g +
geom_segment(aes(x = 50, xend = 75,
y = 20, yend = 45),
color = "purple", size = 2)
g +
geom_curve(aes(x = 0, y = 60, xend = 75, yend = 0),
size = 2, color = "tan") +
geom_curve(aes(x = 0, y = 60, xend = 75, yend = 0),
curvature = -0.7, angle = 45,
color = "darkgoldenrod1", size = 1) +
geom_curve(aes(x = 0, y = 60, xend = 75, yend = 0),
curvature = 0, size = 1.5)
g +
geom_curve(aes(x = 0, y = 60, xend = 75, yend = 0),
size = 2, color = "tan",
arrow = arrow(length = unit(0.07, "npc"))) +
geom_curve(aes(x = 5, y = 55, xend = 70, yend = 5),
curvature = -0.7, angle = 45,
color = "darkgoldenrod1", size = 1,
arrow = arrow(length = unit(0.03, "npc"),
type = "closed",
ends = "both"))
set.seed(2020)
library(dplyr)
sample <- chic %>%
dplyr::group_by(season) %>%
dplyr::sample_frac(0.01)
## code without pipes:
## sample <- sample_frac(group_by(chic, season), .01)
ggplot(sample, aes(x = date, y = temp, label = season)) +
geom_point() +
geom_text(aes(color = factor(temp)), hjust = .5, vjust = -.5) +
labs(x = "Year", y = "Temperature (°F)") +
xlim(as.Date(c('1997-01-01', '2000-12-31'))) +
ylim(c(0, 90)) +
theme(legend.position = "none")
ggplot(sample, aes(x = date, y = temp, label = season)) +
geom_point() +
geom_label(aes(fill = factor(temp)), color = "white",
fontface = "bold", hjust = .5, vjust = -.25) +
labs(x = "Year", y = "Temperature (°F)") +
xlim(as.Date(c('1997-01-01', '2000-12-31'))) +
ylim(c(0, 90)) +
theme(legend.position = "none")
library(ggrepel)
ggplot(sample, aes(x = date, y = temp, label = season)) +
geom_point() +
geom_label_repel(aes(fill = factor(temp)),
color = "white", fontface = "bold") +
labs(x = "Year", y = "Temperature (°F)") +
theme(legend.position = "none")
g <-
ggplot(chic, aes(x = temp, y = dewpoint)) +
geom_point(alpha = .5) +
labs(x = "Temperature (°F)", y = "Dewpoint")
g +
geom_text(aes(x = 25, y = 60,
label = "This is an useful annotation"))
g +
geom_text(aes(x = 25, y = 60,
label = "This is an useful annotation"),
stat = "unique")
g +
geom_text(aes(x = 25, y = 60,
label = "This is an useful annotation"),
stat = "unique", family = "Bangers",
size = 7, color = "darkcyan")
ann <- data.frame(
o3 = 30,
temp = 20,
season = factor("Summer", levels = levels(chic$season)),
label = "Here is enough space\nfor some annotations."
)
g <-
ggplot(chic, aes(x = o3, y = temp)) +
geom_point() +
labs(x = "Ozone", y = "Temperature (°F)")
g +
geom_text(data = ann, aes(label = label),
size = 7, fontface = "bold",
family = "Roboto Condensed") +
facet_wrap(~season)
g +
geom_text(aes(x = 23, y = 97,
label = "This is not an useful annotation"),
size = 5, fontface = "bold") +
scale_y_continuous(limits = c(NA, 100)) +
facet_wrap(~season, scales = "free_x")
library(tidyverse)
(ann <-
chic %>%
group_by(season) %>%
summarize(o3 = min(o3, na.rm = TRUE) +
(max(o3, na.rm = TRUE) - min(o3, na.rm = TRUE)) / 2))
ann
g +
geom_text(data = ann,
aes(x = o3, y = 97,
label = "This is an useful annotation"),
size = 5, fontface = "bold") +
scale_y_continuous(limits = c(NA, 100)) +
facet_wrap(~season, scales = "free_x")
library(grid)
my_grob <- grobTree(textGrob("This text stays in place!",
x = .1, y = .9, hjust = 0,
gp = gpar(col = "black",
fontsize = 15,
fontface = "bold")))
g +
annotation_custom(my_grob) +
facet_wrap(~season, scales = "free_x") +
scale_y_continuous(limits = c(NA, 100))
library(ggtext)
lab_md <- "This plot shows **temperature** in *°F* versus **ozone level** in *ppm*"
g +
geom_richtext(aes(x = 35, y = 3, label = lab_md),
stat = "unique")
lab_html <- "★ This plot shows <b style='color:red;'>temperature</b> in <i>°F</i> versus <b style='color:blue;'>ozone level</b>in <i>ppm</i> ★"
g +
geom_richtext(aes(x = 33, y = 3, label = lab_html),
stat = "unique")
g +
geom_richtext(aes(x = 10, y = 25, label = lab_md),
stat = "unique", angle = 30,
color = "white", fill = "steelblue",
label.color = NA, hjust = 0, vjust = 0,
family = "Playfair Display")
lab_long <- "**Lorem ipsum dolor**<br><i style='font-size:8pt;color:red;'>Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.<br>Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.</i>"
g +
geom_textbox(aes(x = 40, y = 10, label = lab_long),
width = unit(15, "lines"), stat = "unique")
ggplot(chic, aes(x = season, y = o3)) +
geom_boxplot(fill = "indianred") +
labs(x = "Season", y = "Ozone") +
coord_flip()
ggplot(chic, aes(x = o3, y = season)) +
geom_boxplot(fill = "indianred", orientation = "y") +
labs(x = "Ozone", y = "Season")
ggplot(chic, aes(x = temp, y = o3)) +
geom_point() +
labs(x = "Temperature (°F)", y = "Ozone Level") +
scale_x_continuous(breaks = seq(0, 80, by = 20)) +
coord_fixed(ratio = 1)
ggplot(chic, aes(x = temp, y = o3)) +
geom_point() +
labs(x = "Temperature (°F)", y = "Ozone Level") +
scale_x_continuous(breaks = seq(0, 80, by = 20)) +
coord_fixed(ratio = 1/3) +
theme(plot.background = element_rect(fill = "grey80"))
ggplot(chic, aes(x = date, y = temp, color = o3)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
scale_y_reverse()
## the default
ggplot(chic, aes(x = temp, y = season)) +
geom_jitter(aes(color = season),
orientation = "y", show.legend = FALSE) +
labs(x = "Temperature (°F)", y = NULL)
library(forcats)
ggplot(chic, aes(x = temp, y = fct_rev(season))) +
geom_jitter(aes(color = season),
orientation = "y", show.legend = FALSE) +
labs(x = "Temperature (°F)", y = NULL)
ggplot(chic, aes(x = date, y = temp, color = o3)) +
geom_point() +
labs(x = "Year", y = "Temperature (°F)") +
scale_y_log10(lim = c(0.1, 100))
library(tidyverse)
chic %>%
dplyr::group_by(season) %>%
dplyr::summarize(o3 = median(o3)) %>%
ggplot(aes(x = season, y = o3)) +
geom_col(aes(fill = season), color = NA) +
labs(x = "", y = "Median Ozone Level") +
coord_polar() +
guides(fill = FALSE)
chic_sum <-
chic %>%
dplyr::mutate(o3_avg = median(o3)) %>%
dplyr::filter(o3 > o3_avg) %>%
dplyr::mutate(n_all = n()) %>%
dplyr::group_by(season) %>%
dplyr::summarize(rel = n() / unique(n_all))
ggplot(chic_sum, aes(x = "", y = rel)) +
geom_col(aes(fill = season), width = 1, color = NA) +
labs(x = "", y = "Proportion of Days Exceeding\nthe Median Ozone Level") +
coord_polar(theta = "y") +
scale_fill_brewer(palette = "Set1", name = "Season:") +
theme(axis.ticks = element_blank(),
panel.grid = element_blank())
ggplot(chic_sum, aes(x = "", y = rel)) +
geom_col(aes(fill = season), width = 1, color = NA) +
labs(x = "", y = "Proportion of Days Exceeding\nthe Median Ozone Level") +
#coord_polar(theta = "y") +
scale_fill_brewer(palette = "Set1", name = "Season:") +
theme(axis.ticks = element_blank(),
panel.grid = element_blank())
g <-
ggplot(chic, aes(x = season, y = o3,
color = season)) +
labs(x = "Season", y = "Ozone") +
scale_color_brewer(palette = "Dark2", guide = "none")
g + geom_boxplot()
g + geom_point()
g + geom_point(alpha = .1)
g + geom_jitter(width = .3, alpha = .5)
g + geom_violin(fill = "gray80", size = 1, alpha = .5)
g + geom_violin(fill = "gray80", size = 1, alpha = .5) +
geom_jitter(alpha = .25, width = .3) +
coord_flip()
library(ggforce)
g + geom_violin(fill = "gray80", size = 1, alpha = .5) +
geom_sina(alpha = .25) +
coord_flip()
g + geom_violin(aes(fill = season), size = 1, alpha = .5) +
geom_boxplot(outlier.alpha = 0, coef = 0,
color = "gray40", width = .2) +
scale_fill_brewer(palette = "Dark2", guide = "none") +
coord_flip()
ggplot(chic, aes(x = date, y = temp,
color = season)) +
geom_point(show.legend = FALSE) +
geom_rug(show.legend = FALSE) +
labs(x = "Year", y = "Temperature (°F)")
ggplot(chic, aes(x = date, y = temp, color = season)) +
geom_point(show.legend = FALSE) +
geom_rug(sides = "r", alpha = .3, show.legend = FALSE) +
labs(x = "Year", y = "Temperature (°F)")
library(tidyverse)
corm <-
chic %>%
select(death, temp, dewpoint, pm10, o3) %>%
corrr::correlate(diagonal = 1) %>%
corrr::shave(upper = FALSE)
corm
corm <- corm %>%
pivot_longer(
cols = -rowname,
names_to = "colname",
values_to = "corr"
) %>%
mutate(rowname = fct_inorder(rowname),
colname = fct_inorder(colname))
corm
ggplot(corm, aes(rowname, fct_rev(colname),
fill = corr)) +
geom_tile() +
geom_text(aes(label = round(corr, 2))) +
coord_fixed() +
labs(x = NULL, y = NULL)
ggplot(corm, aes(rowname, fct_rev(colname),
fill = corr)) +
geom_tile() +
geom_text(aes(
label = format(round(corr, 2), nsmall = 2),
color = abs(corr) < .75
)) +
coord_fixed(expand = FALSE) +
scale_color_manual(values = c("white", "black"),
guide = "none") +
scale_fill_distiller(
palette = "PuOr", na.value = "white",
direction = 1, limits = c(-1, 1)
) +
labs(x = NULL, y = NULL) +
theme(panel.border = element_rect(color = NA, fill = NA),
legend.position = c(.85, .8))
ggplot(chic, aes(temp, o3)) +
geom_density_2d() +
labs(x = "Temperature (°F)", x = "Ozone Level")
ggplot(chic, aes(temp, o3)) +
geom_density_2d_filled(show.legend = FALSE) +
coord_cartesian(expand = FALSE) +
labs(x = "Temperature (°F)", x = "Ozone Level")
## interpolate data
library(akima)
fld <- with(chic, interp(x = temp, y = o3, z = dewpoint))
## prepare data in long format
library(reshape2)
df <- melt(fld$z, na.rm = TRUE)
names(df) <- c("x", "y", "Dewpoint")
g <- ggplot(data = df, aes(x = x, y = y, z = Dewpoint)) +
labs(x = "Temperature (°F)", y = "Ozone Level",
color = "Dewpoint")
g + stat_contour(aes(color = ..level.., fill = Dewpoint))
g + geom_tile(aes(fill = Dewpoint)) +
scale_fill_viridis_c(option = "inferno")
g + geom_tile(aes(fill = Dewpoint)) +
stat_contour(color = "white", size = .7, bins = 5) +
scale_fill_viridis_c()
ggplot(chic, aes(temp, o3)) +
geom_hex() +
scale_fill_distiller(palette = "YlOrRd", direction = 1) +
labs(x = "Temperature (°F)", y = "Ozone Level")
ggplot(chic, aes(temp, o3)) +
geom_hex(aes(color = ..count..)) +
scale_fill_distiller(palette = "YlOrRd", direction = 1) +
scale_color_distiller(palette = "YlOrRd", direction = 1) +
labs(x = "Temperature (°F)", y = "Ozone Level")
ggplot(chic, aes(temp, o3)) +
geom_hex(color = "grey") +
scale_fill_distiller(palette = "YlOrRd", direction = 1) +
labs(x = "Temperature (°F)", y = "Ozone Level")
ggplot(chic, aes(temp, o3, fill = ..density..)) +
geom_hex(bins = 50, color = "grey") +
scale_fill_distiller(palette = "YlOrRd", direction = 1) +
labs(x = "Temperature (°F)", y = "Ozone Level")
ggplot(chic, aes(temp, o3, fill = ..density..)) +
geom_bin2d(bins = 15, color = "grey") +
scale_fill_distiller(palette = "YlOrRd", direction = 1) +
labs(x = "Temperature (°F)", y = "Ozone Level")
library(ggridges)
ggplot(chic, aes(x = temp, y = factor(year))) +
geom_density_ridges(fill = "gray90") +
labs(x = "Temperature (°F)", y = "Year")
ggplot(chic, aes(x = temp, y = factor(year), fill = year)) +
geom_density_ridges(alpha = .8, color = "white",
scale = 2.5, rel_min_height = .01) +
labs(x = "Temperature (°F)", y = "Year") +
guides(fill = FALSE) +
theme_ridges()
ggplot(chic, aes(x = temp, y = season, fill = ..x..)) +
geom_density_ridges_gradient(scale = .9, gradient_lwd = .5,
color = "black") +
scale_fill_viridis_c(option = "plasma", name = "") +
labs(x = "Temperature (°F)", y = "Season") +
theme_ridges(font_family = "Roboto Condensed", grid = FALSE)
library(tidyverse)
## only plot extreme season using dplyr from the tidyverse
ggplot(data = filter(chic, season %in% c("Summer", "Winter")),
aes(x = temp, y = year, fill = paste(year, season))) +
geom_density_ridges(alpha = .7, rel_min_height = .01,
color = "white", from = -5, to = 95) +
scale_fill_cyclical(breaks = c("1997 Summer", "1997 Winter"),
labels = c(`1997 Summer` = "Summer",
`1997 Winter` = "Winter"),
values = c("tomato", "dodgerblue"),
name = "Season:", guide = "legend") +
theme_ridges(grid = FALSE) +
labs(x = "Temperature (°F)", y = "Year")
ggplot(chic, aes(x = temp, y = factor(year), fill = year)) +
geom_density_ridges(stat = "binline", bins = 25, scale = .9,
draw_baseline = FALSE, show.legend = FALSE) +
theme_minimal() +
labs(x = "Temperature (°F)", y = "Season")
chic$o3run <- as.numeric(stats::filter(chic$o3, rep(1/30, 30), sides = 2))
ggplot(chic, aes(x = date, y = o3run)) +
geom_line(color = "chocolate", lwd = .8) +
labs(x = "Year", y = "Ozone")
ggplot(chic, aes(x = date, y = o3run)) +
geom_ribbon(aes(ymin = 0, ymax = o3run),
fill = "orange", alpha = .4) +
geom_line(color = "chocolate", lwd = .8) +
labs(x = "Year", y = "Ozone")
ggplot(chic, aes(x = date, y = o3run)) +
geom_area(color = "chocolate", lwd = .8,
fill = "orange", alpha = .4) +
labs(x = "Year", y = "Ozone")
chic$mino3 <- chic$o3run - sd(chic$o3run, na.rm = TRUE)
chic$maxo3 <- chic$o3run + sd(chic$o3run, na.rm = TRUE)
ggplot(chic, aes(x = date, y = o3run)) +
geom_ribbon(aes(ymin = mino3, ymax = maxo3), alpha = .5,
fill = "darkseagreen3", color = "transparent") +
geom_line(color = "aquamarine4", lwd = .7) +
labs(x = "Year", y = "Ozone")
ggplot(chic, aes(x = date, y = temp)) +
labs(x = "Year", y = "Temperature (°F)") +
stat_smooth() +
geom_point(color = "gray40", alpha = .5)
ggplot(chic, aes(x = temp, y = death)) +
labs(x = "Temperature (°F)", y = "Deaths") +
stat_smooth(method = "lm", se = FALSE,
color = "firebrick", size = 1.3) +
geom_point(color = "gray40", alpha = .5)
ggplot(chic, aes(x = o3, y = temp))+
labs(x = "Ozone Level", y = "Temperature (°F)") +
geom_smooth(
method = "lm",
formula = y ~ x + I(x^2) + I(x^3) + I(x^4) + I(x^5),
color = "black",
fill = "firebrick"
) +
geom_point(color = "gray40", alpha = .3)
ggplot(chic, aes(x = o3, y = temp))+
labs(x = "Ozone Level", y = "Temperature (°F)") +
geom_smooth(stat = "smooth") + ## the default
geom_point(color = "gray40", alpha = .3)
ggplot(chic, aes(x = o3, y = temp))+
labs(x = "Ozone Level", y = "Temperature (°F)") +
stat_smooth(geom = "smooth") + ## the default
geom_point(color = "gray40", alpha = .3)
cols <- c("darkorange2", "firebrick", "dodgerblue3")
ggplot(chic, aes(x = date, y = temp)) +
geom_point(color = "gray40", alpha = .3) +
labs(x = "Year", y = "Temperature (°F)") +
stat_smooth(aes(col = "1000"),
method = "gam",
formula = y ~ s(x, k = 1000),
se = FALSE, size = 1.3) +
stat_smooth(aes(col = "100"),
method = "gam",
formula = y ~ s(x, k = 100),
se = FALSE, size = 1) +
stat_smooth(aes(col = "10"),
method = "gam",
formula = y ~ s(x, k = 10),
se = FALSE, size = .8) +
scale_color_manual(name = "k", values = cols)
## library(shiny)
## runExample("01_hello")
## runExample("04_mpg")
library(plotly)
g <- ggplot(chic, aes(date, temp)) +
geom_line(color = "grey") +
geom_point(aes(color = season)) +
scale_color_brewer(palette = "Dark2", guide = "none") +
labs(x = NULL, y = "Temperature (°F)") +
theme_bw()
g
ggplotly(g)
library(ggiraph)
g <- ggplot(chic, aes(date, temp)) +
geom_line(color = "grey") +
geom_point_interactive(
aes(color = season, tooltip = season, data_id = season)
) +
scale_color_brewer(palette = "Dark2", guide = "none") +
labs(x = NULL, y = "Temperature (°F)") +
theme_bw()
girafe(ggobj = g)
library(highcharter)
hchart(chic, "scatter", hcaes(x = date, y = temp, group = season))
library(echarts4r)
chic %>%
e_charts(date) %>%
e_scatter(temp, symbol_size = 7) %>%
e_visual_map(temp) %>%
e_y_axis(name = "Temperature (°F)") %>%
e_legend(FALSE)
library(charter)
chic$date_num <- as.numeric(chic$date)
chart(data = chic, caes(date_num, temp)) %>%
c_scatter(caes(color = season, group = season)) %>%
c_colors(RColorBrewer::brewer.pal(4, name = "Dark2"))
|
###########################################################################################################
# Name: Pradeep Sathyamurthy
# Date of submission: 16 - Oct - 2016
# Problem statement: Working with Hospitality data
############################################################################################################
setwd("D:/Courses/Coursera/R")
data <- read.csv("outcome-of-care-measures.csv",na.strings="Not Available",stringsAsFactors=FALSE )
# defining the function for hospital ranking
rankall <- function (outcome,num="best"){
# Setting sample data, wil be deleted
#state <- "MD"
#outcome <- "heart attack"
#rank <- "worst"
# Read the data
validating_col <- c(2,7,11,17,23)
data.needed <- data[,validating_col]
#str(data.needed)
names(data.needed) <- c("Name","State","DR_Heart_Attack","DR_Heart_Failure","DR_Pneumonia")
# Check that outcome is valid
for(i in valid_outcome){
if(outcome == i){
#print("valid outcome")
k <- 1
break
}else{
#print("invalid outcome")
k<-0
}
}
if(k==1){
print("Valid outcome")
}else{
stop("invalid outcome")
}
# Heart Attack
if(outcome=="heart attack"){
#print(outcomes)
data.validate.rank <- data.needed[order(data.needed$State,data.needed$DR_Heart_Attack,data.needed$Name),]
data.validate.rank.clean <- na.omit(data.validate.rank)
data.validate.rank.clean <- data.validate.rank.clean[order(data.validate.rank.clean$State,data.validate.rank.clean$DR_Heart_Attack,data.validate.rank.clean$Name),]
#data.validate.rank.clean$RANK <- seq_len(nrow(data.validate.rank.clean))
if(class(num)=="character"){
if(rank == "worst"){
rank.max <- max(data.validate.rank.clean$RANK)
result <- data.validate.rank.clean[which(data.validate.rank.clean$RANK==rank.max),]
return(result$Name[1])
}else if(rank == "best"){
rank.min <- min(data.validate.rank.clean$RANK)
result <- data.validate.rank.clean[which(data.validate.rank.clean$RANK==rank.min),]
return(result$Name[1])
}else{
print("Type a valid value for rank [Number]/worst/best")
}
}else if(class(rank)=="numeric"){
rank.max <- max(data.validate.rank.clean$RANK)
if(rank>rank.max){
return("NA")
}else{
result <- data.validate.rank.clean[which(data.validate.rank.clean$RANK==rank),]
return(result$Name[1])
}
}
}
# heart failure
if(outcome=="heart failure"){
#print(outcomes)
data.validate.rank <- data.needed[order(data.needed$State,data.needed$DR_Heart_Failure,data.needed$Name),]
data.validate.rank.clean <- na.omit(data.validate.rank)
data.validate.rank.clean <- data.validate.rank.clean[order(data.validate.rank.clean$State,data.validate.rank.clean$DR_Heart_Attack,data.validate.rank.clean$Name),]
#data.validate.rank.clean$RANK <- seq_len(nrow(data.validate.rank.clean))
if(class(num)=="character"){
if(rank == "worst"){
rank.max <- max(data.validate.rank.clean$RANK)
result <- data.validate.rank.clean[which(data.validate.rank.clean$RANK==rank.max),]
return(result$Name[1])
}else if(rank == "best"){
rank.min <- min(data.validate.rank.clean$RANK)
result <- data.validate.rank.clean[which(data.validate.rank.clean$RANK==rank.min),]
return(result$Name[1])
}else{
print("Type a valid value for rank [Number]/worst/best")
}
}else if(class(rank)=="numeric"){
rank.max <- max(data.validate.rank.clean$RANK)
if(rank>rank.max){
return("NA")
}else{
result <- data.validate.rank.clean[which(data.validate.rank.clean$RANK==rank),]
return(result$Name[1])
}
}
}
# pneumonia
if(outcome=="pneumonia"){
#print(outcomes)
data.validate.rank <- data.needed[order(data.needed$State,data.needed$DR_Pneumonia,data.needed$Name),]
data.validate.rank.clean <- na.omit(data.validate.rank)
data.validate.rank.clean <- data.validate.rank.clean[order(data.validate.rank.clean$State,data.validate.rank.clean$DR_Heart_Attack,data.validate.rank.clean$Name),]
#data.validate.rank.clean$RANK <- seq_len(nrow(data.validate.rank.clean))
if(class(num)=="character"){
if(rank == "worst"){
rank.max <- max(data.validate.rank.clean$RANK)
result <- data.validate.rank.clean[which(data.validate.rank.clean$RANK==rank.max),]
return(result$Name[1])
}else if(rank == "best"){
rank.min <- min(data.validate.rank.clean$RANK)
result <- data.validate.rank.clean[which(data.validate.rank.clean$RANK==rank.min),]
return(result$Name[1])
}else{
print("Type a valid value for rank [Number]/worst/best")
}
}else if(class(rank)=="numeric"){
rank.max <- max(data.validate.rank.clean$RANK)
if(rank>rank.max){
return("NA")
}else{
result <- data.validate.rank.clean[which(data.validate.rank.clean$RANK==rank),]
return(result$Name[1])
}
}
}
}
# train
r <- rankall("heart attack", 4)
as.character(subset(r, state == "HI")$hospital) # Castle medical centre
r <- rankall("pneumonia", "worst")
as.character(subset(r, state == "NJ")$hospital) # BERGEN REGIONAL MEDICAL CENTER
r <- rankall("heart failure", 10)
as.character(subset(r, state == "NV")$hospital) # RENOWN SOUTH MEADOWS MEDICAL CENTER
| /R Programming/rankall.R | no_license | pradeepsathyamurthy/Data_Science_in_R | R | false | false | 6,080 | r | ###########################################################################################################
# Name: Pradeep Sathyamurthy
# Date of submission: 16 - Oct - 2016
# Problem statement: Working with Hospitality data
############################################################################################################
setwd("D:/Courses/Coursera/R")
data <- read.csv("outcome-of-care-measures.csv",na.strings="Not Available",stringsAsFactors=FALSE )
# defining the function for hospital ranking
rankall <- function (outcome,num="best"){
# Setting sample data, wil be deleted
#state <- "MD"
#outcome <- "heart attack"
#rank <- "worst"
# Read the data
validating_col <- c(2,7,11,17,23)
data.needed <- data[,validating_col]
#str(data.needed)
names(data.needed) <- c("Name","State","DR_Heart_Attack","DR_Heart_Failure","DR_Pneumonia")
# Check that outcome is valid
for(i in valid_outcome){
if(outcome == i){
#print("valid outcome")
k <- 1
break
}else{
#print("invalid outcome")
k<-0
}
}
if(k==1){
print("Valid outcome")
}else{
stop("invalid outcome")
}
# Heart Attack
if(outcome=="heart attack"){
#print(outcomes)
data.validate.rank <- data.needed[order(data.needed$State,data.needed$DR_Heart_Attack,data.needed$Name),]
data.validate.rank.clean <- na.omit(data.validate.rank)
data.validate.rank.clean <- data.validate.rank.clean[order(data.validate.rank.clean$State,data.validate.rank.clean$DR_Heart_Attack,data.validate.rank.clean$Name),]
#data.validate.rank.clean$RANK <- seq_len(nrow(data.validate.rank.clean))
if(class(num)=="character"){
if(rank == "worst"){
rank.max <- max(data.validate.rank.clean$RANK)
result <- data.validate.rank.clean[which(data.validate.rank.clean$RANK==rank.max),]
return(result$Name[1])
}else if(rank == "best"){
rank.min <- min(data.validate.rank.clean$RANK)
result <- data.validate.rank.clean[which(data.validate.rank.clean$RANK==rank.min),]
return(result$Name[1])
}else{
print("Type a valid value for rank [Number]/worst/best")
}
}else if(class(rank)=="numeric"){
rank.max <- max(data.validate.rank.clean$RANK)
if(rank>rank.max){
return("NA")
}else{
result <- data.validate.rank.clean[which(data.validate.rank.clean$RANK==rank),]
return(result$Name[1])
}
}
}
# heart failure
if(outcome=="heart failure"){
#print(outcomes)
data.validate.rank <- data.needed[order(data.needed$State,data.needed$DR_Heart_Failure,data.needed$Name),]
data.validate.rank.clean <- na.omit(data.validate.rank)
data.validate.rank.clean <- data.validate.rank.clean[order(data.validate.rank.clean$State,data.validate.rank.clean$DR_Heart_Attack,data.validate.rank.clean$Name),]
#data.validate.rank.clean$RANK <- seq_len(nrow(data.validate.rank.clean))
if(class(num)=="character"){
if(rank == "worst"){
rank.max <- max(data.validate.rank.clean$RANK)
result <- data.validate.rank.clean[which(data.validate.rank.clean$RANK==rank.max),]
return(result$Name[1])
}else if(rank == "best"){
rank.min <- min(data.validate.rank.clean$RANK)
result <- data.validate.rank.clean[which(data.validate.rank.clean$RANK==rank.min),]
return(result$Name[1])
}else{
print("Type a valid value for rank [Number]/worst/best")
}
}else if(class(rank)=="numeric"){
rank.max <- max(data.validate.rank.clean$RANK)
if(rank>rank.max){
return("NA")
}else{
result <- data.validate.rank.clean[which(data.validate.rank.clean$RANK==rank),]
return(result$Name[1])
}
}
}
# pneumonia
if(outcome=="pneumonia"){
#print(outcomes)
data.validate.rank <- data.needed[order(data.needed$State,data.needed$DR_Pneumonia,data.needed$Name),]
data.validate.rank.clean <- na.omit(data.validate.rank)
data.validate.rank.clean <- data.validate.rank.clean[order(data.validate.rank.clean$State,data.validate.rank.clean$DR_Heart_Attack,data.validate.rank.clean$Name),]
#data.validate.rank.clean$RANK <- seq_len(nrow(data.validate.rank.clean))
if(class(num)=="character"){
if(rank == "worst"){
rank.max <- max(data.validate.rank.clean$RANK)
result <- data.validate.rank.clean[which(data.validate.rank.clean$RANK==rank.max),]
return(result$Name[1])
}else if(rank == "best"){
rank.min <- min(data.validate.rank.clean$RANK)
result <- data.validate.rank.clean[which(data.validate.rank.clean$RANK==rank.min),]
return(result$Name[1])
}else{
print("Type a valid value for rank [Number]/worst/best")
}
}else if(class(rank)=="numeric"){
rank.max <- max(data.validate.rank.clean$RANK)
if(rank>rank.max){
return("NA")
}else{
result <- data.validate.rank.clean[which(data.validate.rank.clean$RANK==rank),]
return(result$Name[1])
}
}
}
}
# train
r <- rankall("heart attack", 4)
as.character(subset(r, state == "HI")$hospital) # Castle medical centre
r <- rankall("pneumonia", "worst")
as.character(subset(r, state == "NJ")$hospital) # BERGEN REGIONAL MEDICAL CENTER
r <- rankall("heart failure", 10)
as.character(subset(r, state == "NV")$hospital) # RENOWN SOUTH MEADOWS MEDICAL CENTER
|
/R/5_modelagem_do_many.R | no_license | inma-mcti/MNE_BHRD | R | false | false | 5,961 | r | ||
# Statistical analysis in R
# 17 January 2021
# EMD
#--------------------------
library(tidyverse)
# data frame construction for regression analysis
n <- 50 # number of observations (rows)
var_A <- runif(n) # random uniform (independent)
var_B <- runif(n) # random uniform (dependent)
var_C <- 5.5 + var_A*10 # a noise linear relationship with var_A
ID <- 1:n
reg_data <- data.frame(ID, var_A, var_B, var_C)
head(reg_data)
str(reg_data)
# regression analysis in R
reg_model <- lm(var_B~var_A, data=reg_data)
print(reg_model)
str(reg_model)
head(reg_model$residuals) # contains residuals
# summary has the elements that we need
summary(reg_model)
z <- unlist(summary(reg_model))
reg_stats <- list(intercept = z$coefficients1,
slope = z$coefficients2,
intercept_p = z$coefficients7,
slope_p=z$coefficients8,
r2=z$r.squared)
print(reg_stats)
reg_stats$r2
reg_stats[[5]]
reg_stats[5] # no this is just a list item
reg_plot <- ggplot(reg_data) +
aes(x=var_A, y=var_B) +
geom_point() +
stat_smooth(method=lm,se=0.99) # default se = 0.95
print(reg_plot)
ggsave(filename="RegressionPlot.pdf",
plot=reg_plot,
device="pdf")
# data frame
n_groups <- 3 # number of treatment groups
n_name <- c('control', 'Treat1', 'Treat2') # names of treatment groups
n_size <- c(12, 17, 9) # sample sizes
n_mean <- c(40, 41, 60) # mean responses
n_sd <- c(5,5,5) # Standard deviation of each group
ID <- 1:sum(n_size) # create unique id
res_var <- c(rnorm(n=n_size[1],mean=n_mean[1], sd=n_sd[1]),
rnorm(n=n_size[2],mean=n_mean[2], sd=n_sd[2]),
rnorm(n=n_size[3],mean=n_mean[3], sd=n_sd[3]))
trt_group <- rep(n_name, n_size)
ano_data <- data.frame(ID,trt_group,res_var)
head(ano_data)
# analysis of variance in R
#(one way so it could be a t test if there were two groups)
ano_model <- aov(res_var~trt_group,data=ano_data)
print(ano_model)
z <- summary(ano_model)
print(z)
flat_out <- unlist(z)
ano_stats <- list(f_ratio <- unlist(z)[7],
f_pval <- unlist(z)[9])
print(ano_stats)
# basic ggplot of anova data
ano_plot <- ggplot(ano_data) + aes(x=trt_group, y=res_var) +
geom_boxplot()
print(ano_plot)
ggsave(filename='ANOVAPlot.pdf',
plot=ano_plot,
device = 'pdf')
ggplot(lreg_data) +
aes(x=x,y=y) +
geom_point() +
stat_smooth(method=glm,
method.args = list(family=binomial))
# Logistic Regression
# construct data frame for logistic regression
x_var <- sort(rgamma(n=200,shape=5,scale=5))
y_var <- sample(rep(c(1,0),each=100),prob=seq_len(200))
lreg_data <- data.frame(ID=1:200, xVar=x_var, yVar=y_var)
head(lreg_data)
# logistic regression analysis
lreg_model <- glm(yVar ~ xVar,
data=lreg_data,
family=binomial(link=logit))
summary(lreg_model)
summary(lreg_model)$coefficients
# logistical regression plot
lreg_plot <- ggplot(lreg_data) +
aes(x=xVar, y=yVar) +
geom_point() +
stat_smooth(method=glm,
method.args=list(family=binomial))
print(lreg_plot)
# contingency data are counts for different classifications
vec_1 <- c(50,66,22)
vec_2 <- c(120,22,30)
data_matrix <- rbind(vec_1,vec_2)
rownames(data_matrix) <- c('Cold', 'Warm')
colnames(data_matrix) <- c('Species1', 'Species2', 'Species3')
print(data_matrix)
# statistical analysis of contingency data
print(chisq.test(data_matrix))
# plotting contingency data
mosaicplot(x=data_matrix,
col=c('goldenrod', 'grey', 'black'),
shade = FALSE)
barplot(height=data_matrix,
beside=TRUE,
col=c("cornflowerblue","tomato"))
d_frame <- as.data.frame(data_matrix)
head(d_frame)
d_frae <- cbind(d_frame,list(Treatment=c('Cold','Warm')))
head(d_frae)
d_frame <- gather(d_frame,
key = Species,
Species1:Species3,
value=Counts)
head(d_frame)
contingency_graph <- ggplot(d_frame,aes(x=Species, y=Counts, fill=Treatment)) +
geom_bar(stat='identity',
position = 'dodge',
color = I('black')) +
scale_fill_manual(values=c('cornflowerblue','tomato'))
| /StatsAnalysis.R | no_license | emdean99/Bio381Scripting | R | false | false | 4,201 | r | # Statistical analysis in R
# 17 January 2021
# EMD
#--------------------------
library(tidyverse)
# data frame construction for regression analysis
n <- 50 # number of observations (rows)
var_A <- runif(n) # random uniform (independent)
var_B <- runif(n) # random uniform (dependent)
var_C <- 5.5 + var_A*10 # a noise linear relationship with var_A
ID <- 1:n
reg_data <- data.frame(ID, var_A, var_B, var_C)
head(reg_data)
str(reg_data)
# regression analysis in R
reg_model <- lm(var_B~var_A, data=reg_data)
print(reg_model)
str(reg_model)
head(reg_model$residuals) # contains residuals
# summary has the elements that we need
summary(reg_model)
z <- unlist(summary(reg_model))
reg_stats <- list(intercept = z$coefficients1,
slope = z$coefficients2,
intercept_p = z$coefficients7,
slope_p=z$coefficients8,
r2=z$r.squared)
print(reg_stats)
reg_stats$r2
reg_stats[[5]]
reg_stats[5] # no this is just a list item
reg_plot <- ggplot(reg_data) +
aes(x=var_A, y=var_B) +
geom_point() +
stat_smooth(method=lm,se=0.99) # default se = 0.95
print(reg_plot)
ggsave(filename="RegressionPlot.pdf",
plot=reg_plot,
device="pdf")
# data frame
n_groups <- 3 # number of treatment groups
n_name <- c('control', 'Treat1', 'Treat2') # names of treatment groups
n_size <- c(12, 17, 9) # sample sizes
n_mean <- c(40, 41, 60) # mean responses
n_sd <- c(5,5,5) # Standard deviation of each group
ID <- 1:sum(n_size) # create unique id
res_var <- c(rnorm(n=n_size[1],mean=n_mean[1], sd=n_sd[1]),
rnorm(n=n_size[2],mean=n_mean[2], sd=n_sd[2]),
rnorm(n=n_size[3],mean=n_mean[3], sd=n_sd[3]))
trt_group <- rep(n_name, n_size)
ano_data <- data.frame(ID,trt_group,res_var)
head(ano_data)
# analysis of variance in R
#(one way so it could be a t test if there were two groups)
ano_model <- aov(res_var~trt_group,data=ano_data)
print(ano_model)
z <- summary(ano_model)
print(z)
flat_out <- unlist(z)
ano_stats <- list(f_ratio <- unlist(z)[7],
f_pval <- unlist(z)[9])
print(ano_stats)
# basic ggplot of anova data
ano_plot <- ggplot(ano_data) + aes(x=trt_group, y=res_var) +
geom_boxplot()
print(ano_plot)
ggsave(filename='ANOVAPlot.pdf',
plot=ano_plot,
device = 'pdf')
ggplot(lreg_data) +
aes(x=x,y=y) +
geom_point() +
stat_smooth(method=glm,
method.args = list(family=binomial))
# Logistic Regression
# construct data frame for logistic regression
x_var <- sort(rgamma(n=200,shape=5,scale=5))
y_var <- sample(rep(c(1,0),each=100),prob=seq_len(200))
lreg_data <- data.frame(ID=1:200, xVar=x_var, yVar=y_var)
head(lreg_data)
# logistic regression analysis
lreg_model <- glm(yVar ~ xVar,
data=lreg_data,
family=binomial(link=logit))
summary(lreg_model)
summary(lreg_model)$coefficients
# logistical regression plot
lreg_plot <- ggplot(lreg_data) +
aes(x=xVar, y=yVar) +
geom_point() +
stat_smooth(method=glm,
method.args=list(family=binomial))
print(lreg_plot)
# contingency data are counts for different classifications
vec_1 <- c(50,66,22)
vec_2 <- c(120,22,30)
data_matrix <- rbind(vec_1,vec_2)
rownames(data_matrix) <- c('Cold', 'Warm')
colnames(data_matrix) <- c('Species1', 'Species2', 'Species3')
print(data_matrix)
# statistical analysis of contingency data
print(chisq.test(data_matrix))
# plotting contingency data
mosaicplot(x=data_matrix,
col=c('goldenrod', 'grey', 'black'),
shade = FALSE)
barplot(height=data_matrix,
beside=TRUE,
col=c("cornflowerblue","tomato"))
d_frame <- as.data.frame(data_matrix)
head(d_frame)
d_frae <- cbind(d_frame,list(Treatment=c('Cold','Warm')))
head(d_frae)
d_frame <- gather(d_frame,
key = Species,
Species1:Species3,
value=Counts)
head(d_frame)
contingency_graph <- ggplot(d_frame,aes(x=Species, y=Counts, fill=Treatment)) +
geom_bar(stat='identity',
position = 'dodge',
color = I('black')) +
scale_fill_manual(values=c('cornflowerblue','tomato'))
|
# 更改变量中的数值 ####
ID<-c(1:13)
Col<-c('a','a','a','ac','ac','ac','a','a','ac','a','ac','a','a')
new_data<-data.frame(ID,Col)
new_data[,2]<-as.character(new_data[,2])
new_data$Col[which(new_data$Col=='a')]<-'b'
new_data
# 任务2-20171128-云课堂2017年收入数据统计与分析 ####
# 下载并导入数据 ####
library(rJava)
library(xlsx)
library(xlsxjars)
setwd('D:/Rstudy/02Study163/data')
ss01<-read.xlsx(file = '01.xlsx',sheetName = 'sheet1',header = TRUE,encoding = 'UTF-8')
ss02<-read.xlsx(file = '02.xlsx',sheetName = 'sheet1',header = TRUE,encoding = 'UTF-8')
ss03<-read.xlsx(file = '03.xlsx',sheetName = 'sheet1',header = TRUE,encoding = 'UTF-8')
ss04<-read.xlsx(file = '04.xlsx',sheetName = 'sheet1',header = TRUE,encoding = 'UTF-8')
ss_data<-rbind(ss01,ss02,ss03,ss04)
# 用sqldf函数对数据进行透视分析 ####
library(sqldf)
# 变量重命名
names(ss_data)
names(ss_data)<-c('ID','time','type','name','describe',
'original cost','dealing_price','customer','promotion','promotion_name',
'plat red','user_paid','paid_way','third_paid','promotion_paid',
'sever_paid','actual_income','ex_state','ex_time','arrival_state',
'arrival_time')
# 提取交易成功的订单
ss_s<-sqldf("select * from ss_data where ex_state in ('交易成功')")
# 中文变量名出现乱码,怎么解决??
# 数据拆分
install.packages('data.table')
library(data.table)
ss_spl<-tstrsplit()
# 在完整数据中提取十一月交易成功的订单
ss_s11<-sqldf('select * from ss_s where time like '2017%'')
# 提取课程和实际收入
ss_inc1<-sqldf('select * from ss_s where ')
# 用subset函数进行数据透视表 ####
# 选取交易成功的订单
ss_d<-subset(ss_data,ex_state=='交易成功',select=c(1:21))
# 提取交易成功的订单课程和实际收入
ss_inc1<-subset(ss_d,select = c('name','actual_income'))
# 用长宽表转换对ss_inc1做数据透视表 ####
library(reshape2)
ss_re<-dcast(ss_inc1,name~actual_income)
| /02Study163/code/20171129第十八课:R语言_练习.R | permissive | foouer/Rstudy | R | false | false | 2,083 | r |
# 更改变量中的数值 ####
ID<-c(1:13)
Col<-c('a','a','a','ac','ac','ac','a','a','ac','a','ac','a','a')
new_data<-data.frame(ID,Col)
new_data[,2]<-as.character(new_data[,2])
new_data$Col[which(new_data$Col=='a')]<-'b'
new_data
# 任务2-20171128-云课堂2017年收入数据统计与分析 ####
# 下载并导入数据 ####
library(rJava)
library(xlsx)
library(xlsxjars)
setwd('D:/Rstudy/02Study163/data')
ss01<-read.xlsx(file = '01.xlsx',sheetName = 'sheet1',header = TRUE,encoding = 'UTF-8')
ss02<-read.xlsx(file = '02.xlsx',sheetName = 'sheet1',header = TRUE,encoding = 'UTF-8')
ss03<-read.xlsx(file = '03.xlsx',sheetName = 'sheet1',header = TRUE,encoding = 'UTF-8')
ss04<-read.xlsx(file = '04.xlsx',sheetName = 'sheet1',header = TRUE,encoding = 'UTF-8')
ss_data<-rbind(ss01,ss02,ss03,ss04)
# 用sqldf函数对数据进行透视分析 ####
library(sqldf)
# 变量重命名
names(ss_data)
names(ss_data)<-c('ID','time','type','name','describe',
'original cost','dealing_price','customer','promotion','promotion_name',
'plat red','user_paid','paid_way','third_paid','promotion_paid',
'sever_paid','actual_income','ex_state','ex_time','arrival_state',
'arrival_time')
# 提取交易成功的订单
ss_s<-sqldf("select * from ss_data where ex_state in ('交易成功')")
# 中文变量名出现乱码,怎么解决??
# 数据拆分
install.packages('data.table')
library(data.table)
ss_spl<-tstrsplit()
# 在完整数据中提取十一月交易成功的订单
ss_s11<-sqldf('select * from ss_s where time like '2017%'')
# 提取课程和实际收入
ss_inc1<-sqldf('select * from ss_s where ')
# 用subset函数进行数据透视表 ####
# 选取交易成功的订单
ss_d<-subset(ss_data,ex_state=='交易成功',select=c(1:21))
# 提取交易成功的订单课程和实际收入
ss_inc1<-subset(ss_d,select = c('name','actual_income'))
# 用长宽表转换对ss_inc1做数据透视表 ####
library(reshape2)
ss_re<-dcast(ss_inc1,name~actual_income)
|
grafico <- IDEB %>%
filter(!is.na(Valor) & Localidade == LocRef$Localidade & Ano >= "2011-01-01") %>%
select(Localidade, Ano, Anos, Rede, Valor) %>%
ggplot(aes(x = Ano, y = Valor)) +
geom_line(aes(color = Rede), size = 1) +
scale_color_manual(values = mypallete) +
theme_minimal() +
scale_x_date(date_breaks = "1 years",labels = date_format("%Y")) +
scale_y_continuous(limits = c(0,NA)) +
theme(legend.position="bottom",
axis.text.x = element_text(angle = 90),
legend.title = element_blank())+
facet_wrap(~Anos,ncol = 1,scales = "free_y") | /Lines/IDEB.R | no_license | supervedovatto/AnexoA | R | false | false | 596 | r | grafico <- IDEB %>%
filter(!is.na(Valor) & Localidade == LocRef$Localidade & Ano >= "2011-01-01") %>%
select(Localidade, Ano, Anos, Rede, Valor) %>%
ggplot(aes(x = Ano, y = Valor)) +
geom_line(aes(color = Rede), size = 1) +
scale_color_manual(values = mypallete) +
theme_minimal() +
scale_x_date(date_breaks = "1 years",labels = date_format("%Y")) +
scale_y_continuous(limits = c(0,NA)) +
theme(legend.position="bottom",
axis.text.x = element_text(angle = 90),
legend.title = element_blank())+
facet_wrap(~Anos,ncol = 1,scales = "free_y") |
% Generated by roxygen2 (4.0.0): do not edit by hand
\name{gsisimRepUnits2List}
\alias{gsisimRepUnits2List}
\title{reads a gsi_sim reporting units file and returns and list of reporting units}
\usage{
gsisimRepUnits2List(ru.file = "/Users/eriq/Documents/xp_dev_svn_checkouts/gsi_sim/snpset/2010_SNPset_GSI_TOOLS/Baseline/snpset_ReportingUnits.txt")
}
\description{
They are ordered as they are ordered in the rep-units file. Names of the components
are the names of the reporting units and the constituent populations are the
contained character vector, in the order they appear in the reporting units file
}
| /man/gsisimRepUnits2List.Rd | permissive | mackerman44/lowergranite | R | false | false | 614 | rd | % Generated by roxygen2 (4.0.0): do not edit by hand
\name{gsisimRepUnits2List}
\alias{gsisimRepUnits2List}
\title{reads a gsi_sim reporting units file and returns and list of reporting units}
\usage{
gsisimRepUnits2List(ru.file = "/Users/eriq/Documents/xp_dev_svn_checkouts/gsi_sim/snpset/2010_SNPset_GSI_TOOLS/Baseline/snpset_ReportingUnits.txt")
}
\description{
They are ordered as they are ordered in the rep-units file. Names of the components
are the names of the reporting units and the constituent populations are the
contained character vector, in the order they appear in the reporting units file
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EPFR.r
\name{fcn.indent.else}
\alias{fcn.indent.else}
\title{fcn.indent.else}
\usage{
fcn.indent.else(x, y)
}
\arguments{
\item{x}{= a line of code in a function}
\item{y}{= number of tabs}
}
\description{
T/F depending on whether line has an else statement
}
\seealso{
Other fcn: \code{\link{fcn.all.canonical}},
\code{\link{fcn.all.roxygenize}},
\code{\link{fcn.all.sub}}, \code{\link{fcn.all.super}},
\code{\link{fcn.args.actual}},
\code{\link{fcn.canonical}}, \code{\link{fcn.clean}},
\code{\link{fcn.comments.parse}},
\code{\link{fcn.dates.parse}}, \code{\link{fcn.date}},
\code{\link{fcn.direct.sub}},
\code{\link{fcn.direct.super}}, \code{\link{fcn.dir}},
\code{\link{fcn.expressions.count}},
\code{\link{fcn.extract.args}},
\code{\link{fcn.extract.out}}, \code{\link{fcn.has}},
\code{\link{fcn.indent.decrease}},
\code{\link{fcn.indent.ignore}},
\code{\link{fcn.indent.increase}},
\code{\link{fcn.indent.proper}},
\code{\link{fcn.indirect}}, \code{\link{fcn.lines.code}},
\code{\link{fcn.lines.count}}, \code{\link{fcn.list}},
\code{\link{fcn.lite}}, \code{\link{fcn.mat.col}},
\code{\link{fcn.mat.num}}, \code{\link{fcn.mat.vec}},
\code{\link{fcn.nonNA}}, \code{\link{fcn.num.nonNA}},
\code{\link{fcn.order}}, \code{\link{fcn.path}},
\code{\link{fcn.roxygenize}}, \code{\link{fcn.sho}},
\code{\link{fcn.simple}}, \code{\link{fcn.to.comments}},
\code{\link{fcn.to.txt}}, \code{\link{fcn.vec.grp}},
\code{\link{fcn.vec.num}}
}
\keyword{fcn.indent.else}
| /man/fcn.indent.else.Rd | no_license | vsrimurthy/EPFR | R | false | true | 1,589 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EPFR.r
\name{fcn.indent.else}
\alias{fcn.indent.else}
\title{fcn.indent.else}
\usage{
fcn.indent.else(x, y)
}
\arguments{
\item{x}{= a line of code in a function}
\item{y}{= number of tabs}
}
\description{
T/F depending on whether line has an else statement
}
\seealso{
Other fcn: \code{\link{fcn.all.canonical}},
\code{\link{fcn.all.roxygenize}},
\code{\link{fcn.all.sub}}, \code{\link{fcn.all.super}},
\code{\link{fcn.args.actual}},
\code{\link{fcn.canonical}}, \code{\link{fcn.clean}},
\code{\link{fcn.comments.parse}},
\code{\link{fcn.dates.parse}}, \code{\link{fcn.date}},
\code{\link{fcn.direct.sub}},
\code{\link{fcn.direct.super}}, \code{\link{fcn.dir}},
\code{\link{fcn.expressions.count}},
\code{\link{fcn.extract.args}},
\code{\link{fcn.extract.out}}, \code{\link{fcn.has}},
\code{\link{fcn.indent.decrease}},
\code{\link{fcn.indent.ignore}},
\code{\link{fcn.indent.increase}},
\code{\link{fcn.indent.proper}},
\code{\link{fcn.indirect}}, \code{\link{fcn.lines.code}},
\code{\link{fcn.lines.count}}, \code{\link{fcn.list}},
\code{\link{fcn.lite}}, \code{\link{fcn.mat.col}},
\code{\link{fcn.mat.num}}, \code{\link{fcn.mat.vec}},
\code{\link{fcn.nonNA}}, \code{\link{fcn.num.nonNA}},
\code{\link{fcn.order}}, \code{\link{fcn.path}},
\code{\link{fcn.roxygenize}}, \code{\link{fcn.sho}},
\code{\link{fcn.simple}}, \code{\link{fcn.to.comments}},
\code{\link{fcn.to.txt}}, \code{\link{fcn.vec.grp}},
\code{\link{fcn.vec.num}}
}
\keyword{fcn.indent.else}
|
#' Is the input the empty model?
#'
#' Checks to see if the input is the empty model.
#'
#' @param x Input to check.
#' @param .xname Not intended to be used directly.
#' @param severity How severe should the consequences of the assertion be?
#' Either \code{"stop"}, \code{"warning"}, \code{"message"}, or \code{"none"}.
#' @return \code{is_[non_]empty_model} returns \code{TRUE} if the input is an
#' [non] empty model. (\code{has_terms} is used to determine that a variable
#' is a model object.) The model is considered empty if there are no
#' factors and no intercept. The \code{assert_*} functions return nothing
#' but throw an error if the corresponding \code{is_*} function returns
#' \code{FALSE}.
#' @seealso \code{\link[stats]{is.empty.model}} and \code{is_empty}.
#' @examples
#' assert_is_empty_model(lm(uptake ~ 0, CO2))
#' assert_is_non_empty_model(lm(uptake ~ conc, CO2))
#' assert_is_non_empty_model(lm(uptake ~ 1, CO2))
#' @importFrom stats terms
#' @export
is_empty_model <- function(x, .xname = get_name_in_parent(x))
{
if(!has_terms(x))
{
return(
false(
gettext("%s has no terms; is probably not a model."),
.xname
)
)
}
tt <- terms(x)
if(length(attr(tt, "factors")) != 0L)
{
return(false(gettext("%s has factors."), .xname))
}
if(attr(tt, "intercept") != 0L)
{
return(false(gettext("%s has an intercept."), .xname))
}
TRUE
}
#' @rdname is_empty_model
#' @importFrom stats terms
#' @export
is_non_empty_model <- function(x, .xname = get_name_in_parent(x))
{
if(!has_terms(x))
{
return(
false(
gettext("%s has no terms; is probably not a model."),
.xname
)
)
}
tt <- terms(x)
if(length(attr(tt, "factors")) == 0L && attr(tt, "intercept") == 0L)
{
return(false(gettext("%s is an empty model."), .xname))
}
TRUE
}
| /assertive.models/R/is-empty-model.R | no_license | ingted/R-Examples | R | false | false | 1,938 | r | #' Is the input the empty model?
#'
#' Checks to see if the input is the empty model.
#'
#' @param x Input to check.
#' @param .xname Not intended to be used directly.
#' @param severity How severe should the consequences of the assertion be?
#' Either \code{"stop"}, \code{"warning"}, \code{"message"}, or \code{"none"}.
#' @return \code{is_[non_]empty_model} returns \code{TRUE} if the input is an
#' [non] empty model. (\code{has_terms} is used to determine that a variable
#' is a model object.) The model is considered empty if there are no
#' factors and no intercept. The \code{assert_*} functions return nothing
#' but throw an error if the corresponding \code{is_*} function returns
#' \code{FALSE}.
#' @seealso \code{\link[stats]{is.empty.model}} and \code{is_empty}.
#' @examples
#' assert_is_empty_model(lm(uptake ~ 0, CO2))
#' assert_is_non_empty_model(lm(uptake ~ conc, CO2))
#' assert_is_non_empty_model(lm(uptake ~ 1, CO2))
#' @importFrom stats terms
#' @export
is_empty_model <- function(x, .xname = get_name_in_parent(x))
{
if(!has_terms(x))
{
return(
false(
gettext("%s has no terms; is probably not a model."),
.xname
)
)
}
tt <- terms(x)
if(length(attr(tt, "factors")) != 0L)
{
return(false(gettext("%s has factors."), .xname))
}
if(attr(tt, "intercept") != 0L)
{
return(false(gettext("%s has an intercept."), .xname))
}
TRUE
}
#' @rdname is_empty_model
#' @importFrom stats terms
#' @export
is_non_empty_model <- function(x, .xname = get_name_in_parent(x))
{
if(!has_terms(x))
{
return(
false(
gettext("%s has no terms; is probably not a model."),
.xname
)
)
}
tt <- terms(x)
if(length(attr(tt, "factors")) == 0L && attr(tt, "intercept") == 0L)
{
return(false(gettext("%s is an empty model."), .xname))
}
TRUE
}
|
## Predicting who will win the Men's 2019 Australian Tennis Open based on data from 2000 to 2019
rm(list=ls()) # clear workspace
# Load packages
library(dplyr)
library(caret)
library(ggplot2)
library(readr)
library(tidyverse)
library(plyr)
library(dummies)
# Set the working directory
setwd("~/Desktop/ATP")
# Load the training data set
tennis_data <- read.csv("merged.csv",stringsAsFactors = FALSE,header = TRUE)
####################################################################
# Exploring raw data
# Understand data
####################################################################
# View its dimensions
dim(tennis_data)
# View its class
class(tennis_data) # dataframe
# Review the first 5 observations
head(tennis_data)
# Explore the structure of the data
str(tennis_data)
glimpse(tennis_data)
# Explore dimensions of the data - 52383 rows and 83 columns
# Check for missing values
is.na(tennis_data)
#################################################################
#Subsetting data
################################################################
# Take only the first 26 columns of results data
names(tennis_data)[1:26]
# We want to filter data for the Autralian Tennis Open tournaments so that we can work with a subset of data
aust_open <- tennis_data[tennis_data$Tournament=="Australian Open", 1:26]
# View structure of training data the dplyr way.
glimpse(aust_open)
# Save the dataframe to a csv file to write the csv file into R working folder:
write.csv(aust_open,file = "aust_open.csv", row.names = FALSE)
##############################################################
# Pre-Processing the Training Data (Data Cleaning)
##############################################################
# Exported aust_open.csv file was exported and a new column was created in Excel to extract the year from the data with non-standardised formatting
a <- read.csv("aus_open.csv",stringsAsFactors = FALSE,header = TRUE)
#############################################################
# Exploratory Data Analysis
#############################################################
glimpse(a) # view the structure of the training data
summary(a) # descriptive statistics
# Transform character variables into numeric variables
a$W1 <- as.numeric(a$W1)
a$L1 <- as.numeric(a$L1)
a$WRank <- as.numeric(a$WRank)
a$LRank <- as.numeric(a$LRank)
##########################################################
# encoding categorical features
##########################################################
# Convert categorical variables into factors to represent their levels
a$Location <- factor(a$Location)
a$Tournament <- factor(a$Tournament)
a$Series <- factor(a$Series)
a$Court <- factor(a$Court)
a$Surface <- factor(a$Surface)
a$Best.of <- factor(a$Best.of)
a$Round <- factor(a$Round)
a$Winner <- factor(a$Winner)
a$Loser <- factor(a$Loser)
a$Comment <- factor(a$Comment)
glimpse(a) # check that structure of categorical variables have converted with levels
#######################################
# Detect Missing values
######################################
complete.cases(a) # view missing values
which(complete.cases(a)) # view which row has full row values are located in
which(!complete.cases(a)) # view which row has 'full 'NA' row values are located in
na_vec <- which(complete.cases(a))
na_vec <- which(!complete.cases(a)) # create a vector for NA values
# a[-na_vec] # vector with NA rows removed. We do not want to remove all rows as it will impact on observations
sum(is.na(a)) # Check for any missing values. There are 6810 missing values
mean(is.na(a)) # 10.4 % of data is missing values (5% is the acceptable threshold) hence I will remove the columns 'Series, Court ' which have the most missing values
colSums(is.na(a)) # Number of missing per column/variable
# To get a percentage of missing value of the attributes
sapply(a, function(df){
sum(is.na(df) ==TRUE)/length(df);
})
# install.packages("Amelia") in the console
library(Amelia)
require(Amelia)
# plot the missing value map
missmap(a, main = "Missing Map")
## missing values are significant in Lsets, Wsets, L5, W5, L4, W4 and L1)
###########################################
# Impute missing values
############################################
# Techniques from the blog post https://datascienceplus.com/imputing-missing-data-with-r-mice-package/
# remove categorical variables
a.mis <- subset(a, select = -c(Location, Tournament,Date, Series, Court,Surface, Round, Best.of, Winner, Loser, Comment))
summary(a.mis)
# install.packages("mice") into the console
library(mice)
md.pattern(a.mis) # plot of the pattern of the missing values
# install.packages("VIM") into the console
library(VIM) # plot the missing values
aggr_plot <- aggr(data, col=c('navyblue','red'), numbers=TRUE, sortVars=TRUE, labels=names(a.mis), cex.axis=.7, gap=3, ylab=c("Histogram of missing data","Pattern"))
# Impute missing values with "pmm" - predicted mean matching. m=5 imputed data sets is default
imputed_Data <- mice(a.mis, m=5, maxit = 50, method = 'pmm', seed = 500)
summary(imputed_Data)
# inspect that missing data has been imputed
imputed_Data$imp$Lsets
# check imputed method
imputed_Data$meth
# Inspecting the distribution of the original and plotted data
xyplot(imputed_Data,WRank ~ W1+L1+W2+L2+W3+L3+W4+L4+L5+W5+LRank,pch=18,cex=1)
# density plot
densityplot(imputed_Data)
# View the data as individual points
stripplot(imputed_Data, pch = 20, cex = 1.2)
# Create dummy variables for the categorical variables with more than 2 levels
library(dummies)
Round <- dummy(a$Round)
Best.of <- dummy(a$Best.of)
Winner <- dummy(a$Winner)
Loser <- dummy(a$Loser)
Comment <- dummy(a$Comment)
head(a) # check that the values are been converted to dummy variables
str(a)
summary(a) # Descriptive statistics
#####################################################
# Explore and visualise the data in r
####################################################
# Descriptive statistics for each attribute
library(ggplot2)
# Scatterplot of a subset of data - non-linear
#pairs(imputed_Data[, c("WRank","LRank","W1","L1","W2","L2","L3","W3","L4","W4","L5","W5#","Wsets","Lsets")], main = "tennis results data")
# Density plot of numeric variables
p1 <- ggplot(a, aes(x=a$Year)) + geom_histogram() + ggtitle(" Histogram of Year")
p1
p2 <- ggplot(a, aes(x=a$WRank)) + geom_histogram()+ ggtitle(" Histogram of Winner's Ranking")
p2
p3 <- ggplot(a, aes(x=a$LRank)) + geom_histogram()+ ggtitle(" Histogram of Loser's Ranking")
p3
p4 <- ggplot(a, aes(x=a$W1)) + geom_histogram()+ ggtitle(" Histogram of Winner in the first set")
p4
p5 <- ggplot(a, aes(x=a$L1)) + geom_histogram()+ ggtitle(" Histogram of Loser in the first set")
p5
p6 <- ggplot(a, aes(x=a$W2)) + geom_histogram()+ ggtitle(" Histogram of Winner in the second set")
p6
p7 <- ggplot(a, aes(x=a$L2)) + geom_histogram()+ ggtitle(" Histogram of Loser in the second set")
p7
p8 <- ggplot(a, aes(x=a$W3)) + geom_histogram()+ ggtitle(" Histogram of Winner in the third set")
p8
p9 <- ggplot(a, aes(x=a$L3)) + geom_histogram()+ ggtitle(" Histogram of Loser in the third set")
p9
p10 <- ggplot(a, aes(x=a$W4)) + geom_histogram()+ ggtitle(" Histogram of Winner in the fourth set")
p10
p11 <- ggplot(a, aes(x=a$L4)) + geom_histogram()+ ggtitle(" Histogram of Loser in the fourth set")
p11
p12 <- ggplot(a, aes(x=a$W5)) + geom_histogram()+ ggtitle(" Histogram of Winner in the fifth set")
p12
p13 <- ggplot(a, aes(x=a$L5)) + geom_histogram()+ ggtitle(" Histogram of Loser in the fifth set")
p13
p14 <- ggplot(a, aes(x=a$Wsets)) + geom_histogram()+ ggtitle(" Histogram of Winner set")
p14
p15 <- ggplot(a, aes(x=a$Lsets)) + geom_histogram()+ ggtitle(" Histogram of Loser set")
p15
# visualise categorical variables that have been dummy coded or one-hot encoded
p16 <- plot(x = a$Comment,
main = "Distribution of Comment", xlab = "Comment",
ylab = "count")
p16
p17 <- plot(x= a$Winner,main = "Distribution of Winner", xlab = "Winner",
ylab = "count")
p17
p18 <- plot( x = a$Loser, main = "Distribution of Loser", xlab = "Loser",
ylab = "Count")
p18
p19 <- plot( x = a$Best.of, main = "Distribution of Best.of", xlab = "Best Of",
ylab = "Count")
p19
p20 <- plot( x = a$Round, main = "Distribution of Tennis Round", xlab = "Round",
ylab = "Count")
p20
p21 <- barplot(table(a$Winner), main="Name of Tennis Winners")
xyplot(imputed_Data,WRank ~ W1+L1+W2+L2+W3+L3+W4+L4+L5+W5+LRank,pch=18,cex=1)
| /AustOpen.R | no_license | wendy-wong/wendywong.github.io | R | false | false | 8,583 | r | ## Predicting who will win the Men's 2019 Australian Tennis Open based on data from 2000 to 2019
rm(list=ls()) # clear workspace
# Load packages
library(dplyr)
library(caret)
library(ggplot2)
library(readr)
library(tidyverse)
library(plyr)
library(dummies)
# Set the working directory
setwd("~/Desktop/ATP")
# Load the training data set
tennis_data <- read.csv("merged.csv",stringsAsFactors = FALSE,header = TRUE)
####################################################################
# Exploring raw data
# Understand data
####################################################################
# View its dimensions
dim(tennis_data)
# View its class
class(tennis_data) # dataframe
# Review the first 5 observations
head(tennis_data)
# Explore the structure of the data
str(tennis_data)
glimpse(tennis_data)
# Explore dimensions of the data - 52383 rows and 83 columns
# Check for missing values
is.na(tennis_data)
#################################################################
#Subsetting data
################################################################
# Take only the first 26 columns of results data
names(tennis_data)[1:26]
# We want to filter data for the Autralian Tennis Open tournaments so that we can work with a subset of data
aust_open <- tennis_data[tennis_data$Tournament=="Australian Open", 1:26]
# View structure of training data the dplyr way.
glimpse(aust_open)
# Save the dataframe to a csv file to write the csv file into R working folder:
write.csv(aust_open,file = "aust_open.csv", row.names = FALSE)
##############################################################
# Pre-Processing the Training Data (Data Cleaning)
##############################################################
# Exported aust_open.csv file was exported and a new column was created in Excel to extract the year from the data with non-standardised formatting
a <- read.csv("aus_open.csv",stringsAsFactors = FALSE,header = TRUE)
#############################################################
# Exploratory Data Analysis
#############################################################
glimpse(a) # view the structure of the training data
summary(a) # descriptive statistics
# Transform character variables into numeric variables
a$W1 <- as.numeric(a$W1)
a$L1 <- as.numeric(a$L1)
a$WRank <- as.numeric(a$WRank)
a$LRank <- as.numeric(a$LRank)
##########################################################
# encoding categorical features
##########################################################
# Convert categorical variables into factors to represent their levels
a$Location <- factor(a$Location)
a$Tournament <- factor(a$Tournament)
a$Series <- factor(a$Series)
a$Court <- factor(a$Court)
a$Surface <- factor(a$Surface)
a$Best.of <- factor(a$Best.of)
a$Round <- factor(a$Round)
a$Winner <- factor(a$Winner)
a$Loser <- factor(a$Loser)
a$Comment <- factor(a$Comment)
glimpse(a) # check that structure of categorical variables have converted with levels
#######################################
# Detect Missing values
######################################
complete.cases(a) # view missing values
which(complete.cases(a)) # view which row has full row values are located in
which(!complete.cases(a)) # view which row has 'full 'NA' row values are located in
na_vec <- which(complete.cases(a))
na_vec <- which(!complete.cases(a)) # create a vector for NA values
# a[-na_vec] # vector with NA rows removed. We do not want to remove all rows as it will impact on observations
sum(is.na(a)) # Check for any missing values. There are 6810 missing values
mean(is.na(a)) # 10.4 % of data is missing values (5% is the acceptable threshold) hence I will remove the columns 'Series, Court ' which have the most missing values
colSums(is.na(a)) # Number of missing per column/variable
# To get a percentage of missing value of the attributes
sapply(a, function(df){
sum(is.na(df) ==TRUE)/length(df);
})
# install.packages("Amelia") in the console
library(Amelia)
require(Amelia)
# plot the missing value map
missmap(a, main = "Missing Map")
## missing values are significant in Lsets, Wsets, L5, W5, L4, W4 and L1)
###########################################
# Impute missing values
############################################
# Techniques from the blog post https://datascienceplus.com/imputing-missing-data-with-r-mice-package/
# remove categorical variables
a.mis <- subset(a, select = -c(Location, Tournament,Date, Series, Court,Surface, Round, Best.of, Winner, Loser, Comment))
summary(a.mis)
# install.packages("mice") into the console
library(mice)
md.pattern(a.mis) # plot of the pattern of the missing values
# install.packages("VIM") into the console
library(VIM) # plot the missing values
aggr_plot <- aggr(data, col=c('navyblue','red'), numbers=TRUE, sortVars=TRUE, labels=names(a.mis), cex.axis=.7, gap=3, ylab=c("Histogram of missing data","Pattern"))
# Impute missing values with "pmm" - predicted mean matching. m=5 imputed data sets is default
imputed_Data <- mice(a.mis, m=5, maxit = 50, method = 'pmm', seed = 500)
summary(imputed_Data)
# inspect that missing data has been imputed
imputed_Data$imp$Lsets
# check imputed method
imputed_Data$meth
# Inspecting the distribution of the original and plotted data
xyplot(imputed_Data,WRank ~ W1+L1+W2+L2+W3+L3+W4+L4+L5+W5+LRank,pch=18,cex=1)
# density plot
densityplot(imputed_Data)
# View the data as individual points
stripplot(imputed_Data, pch = 20, cex = 1.2)
# Create dummy variables for the categorical variables with more than 2 levels
library(dummies)
Round <- dummy(a$Round)
Best.of <- dummy(a$Best.of)
Winner <- dummy(a$Winner)
Loser <- dummy(a$Loser)
Comment <- dummy(a$Comment)
head(a) # check that the values are been converted to dummy variables
str(a)
summary(a) # Descriptive statistics
#####################################################
# Explore and visualise the data in r
####################################################
# Descriptive statistics for each attribute
library(ggplot2)
# Scatterplot of a subset of data - non-linear
#pairs(imputed_Data[, c("WRank","LRank","W1","L1","W2","L2","L3","W3","L4","W4","L5","W5#","Wsets","Lsets")], main = "tennis results data")
# Density plot of numeric variables
p1 <- ggplot(a, aes(x=a$Year)) + geom_histogram() + ggtitle(" Histogram of Year")
p1
p2 <- ggplot(a, aes(x=a$WRank)) + geom_histogram()+ ggtitle(" Histogram of Winner's Ranking")
p2
p3 <- ggplot(a, aes(x=a$LRank)) + geom_histogram()+ ggtitle(" Histogram of Loser's Ranking")
p3
p4 <- ggplot(a, aes(x=a$W1)) + geom_histogram()+ ggtitle(" Histogram of Winner in the first set")
p4
p5 <- ggplot(a, aes(x=a$L1)) + geom_histogram()+ ggtitle(" Histogram of Loser in the first set")
p5
p6 <- ggplot(a, aes(x=a$W2)) + geom_histogram()+ ggtitle(" Histogram of Winner in the second set")
p6
p7 <- ggplot(a, aes(x=a$L2)) + geom_histogram()+ ggtitle(" Histogram of Loser in the second set")
p7
p8 <- ggplot(a, aes(x=a$W3)) + geom_histogram()+ ggtitle(" Histogram of Winner in the third set")
p8
p9 <- ggplot(a, aes(x=a$L3)) + geom_histogram()+ ggtitle(" Histogram of Loser in the third set")
p9
p10 <- ggplot(a, aes(x=a$W4)) + geom_histogram()+ ggtitle(" Histogram of Winner in the fourth set")
p10
p11 <- ggplot(a, aes(x=a$L4)) + geom_histogram()+ ggtitle(" Histogram of Loser in the fourth set")
p11
p12 <- ggplot(a, aes(x=a$W5)) + geom_histogram()+ ggtitle(" Histogram of Winner in the fifth set")
p12
p13 <- ggplot(a, aes(x=a$L5)) + geom_histogram()+ ggtitle(" Histogram of Loser in the fifth set")
p13
p14 <- ggplot(a, aes(x=a$Wsets)) + geom_histogram()+ ggtitle(" Histogram of Winner set")
p14
p15 <- ggplot(a, aes(x=a$Lsets)) + geom_histogram()+ ggtitle(" Histogram of Loser set")
p15
# visualise categorical variables that have been dummy coded or one-hot encoded
p16 <- plot(x = a$Comment,
main = "Distribution of Comment", xlab = "Comment",
ylab = "count")
p16
p17 <- plot(x= a$Winner,main = "Distribution of Winner", xlab = "Winner",
ylab = "count")
p17
p18 <- plot( x = a$Loser, main = "Distribution of Loser", xlab = "Loser",
ylab = "Count")
p18
p19 <- plot( x = a$Best.of, main = "Distribution of Best.of", xlab = "Best Of",
ylab = "Count")
p19
p20 <- plot( x = a$Round, main = "Distribution of Tennis Round", xlab = "Round",
ylab = "Count")
p20
p21 <- barplot(table(a$Winner), main="Name of Tennis Winners")
xyplot(imputed_Data,WRank ~ W1+L1+W2+L2+W3+L3+W4+L4+L5+W5+LRank,pch=18,cex=1)
|
\name{SimuChemPC}
\alias{SimuChemPC}
\alias{SimuChemPC, character list, character list, character list, character list,integer}
\title{SimuChemPC}
\description{ This function executes a simulation to compare 4 methods for predicting potent compounds. These methods are EI, GP, NN and RA.}
\usage{
SimuChemPC(dataX, dataY, method="RA", experiment=1)
}
\arguments{
\item{dataX}{ m * n martrix of data (features/descriptors).}
\item{dataY}{ m * 1 matrix of target values consist of potencies, pIC50 or other measurements of compound affinities that are desired to be maximized.}
\item{method}{ One of "EI", "GP", "NN" or "RA". }
\item{experiment}{ An integer value that indicates a number by which experiment repeats. In our published experiment it was set to 25. }
}
\details{
This function withholds 4 simulation methods to predict potent compounds .
\code{method} can be RA, NN , EI or GP. The explanation of the abbreviations is listed below.
\code{RA selection:} One compound will be selected randomly and added to train data each time.
\code{NN selection:} The compound which is nearest (based on Tonimito Coefficient) to the most potent compound in training data is selected and added to train data.
\code{EI selection} A compound for which maximum expected potency improvement is reached, is selected and then it is added to train data.
\code{GP selection} A compound holding maximum potency in test data is selected.
\code{Feature selection}
Feature selection employed in this package is based on Spearman Rank Correlation such that
before each training step those attributes in which revealed a significant Spearman rank correlation
with the logarithmic potency values (q-value < 5%) of the training data are selected. Q-values
are computed from original p-values via the multiple testing correction method by Benjamini and Hochberg.
\code{The purpose of simulation step}
Simulation step is employed to select the compound(in the case where input files are chemical compounds)
in which maximal expected potency improvement is met. Subsequently, this compound is added to train data
and simulation continues until all test data are consumed. Finally, the number of simulation steps is determined which
the algorithm used to select the most potent compound in the "original" test set.
\code{In this code, given our data sets (chemical compounds), we do the followings:}
1. We split our data into two distinguish parts namely Train and Test data
2. We do normalization on both parts
3. We employ a specific feature selection algorithm (i.e. Multiple Testing Correction) to overcome high dimensionality
4. Then we benefit Gaussian Process Regression in order to learn our model iteratively such that in each iteration training data are trained, the model is learnt and prediction is done for test data. One compound holding specific property will be added to train data and the progress will repeat until no test data is left.
Result of this work is accepted in the Journal of Chemical Information and Modeling within the subject "Predicting Potent Compounds via Model-Based Global Optimization".
}
\value{returns a matrix (m * experiment) of original potencies in test set.
}
\references{
\code{1.}Predicting Potent Compounds via Model-Based Global Optimization, Journal of Chemical Information and Modeling, 2013, 53 (3), pp 553-559, M Ahmadi, M Vogt, P Iyer, J Bajorath, H Froehlich.
\code{2.}Software MOE is used to calculate the numerical descriptors in data sets. Ref: http://www.chemcomp.com/MOE-Molecular_Operating_Environment.htm
\code{3.}ChEMBL was the source of the compound data and potency annotations in data sets. Ref: https://www.ebi.ac.uk/chembl/
}
\author{Mohsen Ahmadi}
\examples{
x = as.data.frame(array(1:100, dim=c(20,5)))
y = as.matrix(as.numeric(array(1:20, dim=c(20,1))))
SimuChemPC(x, y, "RA", 5)
}
\keyword{chemical, potent compounds, constraint global optimization, expected potency improvement, gaussian process}
| /man/SimuChemPC.Rd | no_license | cran/SimuChemPC | R | false | false | 3,991 | rd | \name{SimuChemPC}
\alias{SimuChemPC}
\alias{SimuChemPC, character list, character list, character list, character list,integer}
\title{SimuChemPC}
\description{ This function executes a simulation to compare 4 methods for predicting potent compounds. These methods are EI, GP, NN and RA.}
\usage{
SimuChemPC(dataX, dataY, method="RA", experiment=1)
}
\arguments{
\item{dataX}{ m * n martrix of data (features/descriptors).}
\item{dataY}{ m * 1 matrix of target values consist of potencies, pIC50 or other measurements of compound affinities that are desired to be maximized.}
\item{method}{ One of "EI", "GP", "NN" or "RA". }
\item{experiment}{ An integer value that indicates a number by which experiment repeats. In our published experiment it was set to 25. }
}
\details{
This function withholds 4 simulation methods to predict potent compounds .
\code{method} can be RA, NN , EI or GP. The explanation of the abbreviations is listed below.
\code{RA selection:} One compound will be selected randomly and added to train data each time.
\code{NN selection:} The compound which is nearest (based on Tonimito Coefficient) to the most potent compound in training data is selected and added to train data.
\code{EI selection} A compound for which maximum expected potency improvement is reached, is selected and then it is added to train data.
\code{GP selection} A compound holding maximum potency in test data is selected.
\code{Feature selection}
Feature selection employed in this package is based on Spearman Rank Correlation such that
before each training step those attributes in which revealed a significant Spearman rank correlation
with the logarithmic potency values (q-value < 5%) of the training data are selected. Q-values
are computed from original p-values via the multiple testing correction method by Benjamini and Hochberg.
\code{The purpose of simulation step}
Simulation step is employed to select the compound(in the case where input files are chemical compounds)
in which maximal expected potency improvement is met. Subsequently, this compound is added to train data
and simulation continues until all test data are consumed. Finally, the number of simulation steps is determined which
the algorithm used to select the most potent compound in the "original" test set.
\code{In this code, given our data sets (chemical compounds), we do the followings:}
1. We split our data into two distinguish parts namely Train and Test data
2. We do normalization on both parts
3. We employ a specific feature selection algorithm (i.e. Multiple Testing Correction) to overcome high dimensionality
4. Then we benefit Gaussian Process Regression in order to learn our model iteratively such that in each iteration training data are trained, the model is learnt and prediction is done for test data. One compound holding specific property will be added to train data and the progress will repeat until no test data is left.
Result of this work is accepted in the Journal of Chemical Information and Modeling within the subject "Predicting Potent Compounds via Model-Based Global Optimization".
}
\value{returns a matrix (m * experiment) of original potencies in test set.
}
\references{
\code{1.}Predicting Potent Compounds via Model-Based Global Optimization, Journal of Chemical Information and Modeling, 2013, 53 (3), pp 553-559, M Ahmadi, M Vogt, P Iyer, J Bajorath, H Froehlich.
\code{2.}Software MOE is used to calculate the numerical descriptors in data sets. Ref: http://www.chemcomp.com/MOE-Molecular_Operating_Environment.htm
\code{3.}ChEMBL was the source of the compound data and potency annotations in data sets. Ref: https://www.ebi.ac.uk/chembl/
}
\author{Mohsen Ahmadi}
\examples{
x = as.data.frame(array(1:100, dim=c(20,5)))
y = as.matrix(as.numeric(array(1:20, dim=c(20,1))))
SimuChemPC(x, y, "RA", 5)
}
\keyword{chemical, potent compounds, constraint global optimization, expected potency improvement, gaussian process}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/itan.R
\name{pBis}
\alias{pBis}
\title{Correlación biserial puntual.}
\usage{
pBis(respuestas, clave, alternativas, correccionPje = TRUE, digitos = 2)
}
\arguments{
\item{respuestas}{Un data frame con las alternativas seleccionadas por los
estudiantes a cada ítem de la prueba.}
\item{clave}{Un data frame con la alternativa correcta para cada ítem.}
\item{alternativas}{Un vector con las alternativas posibles para cada ítem.}
\item{correccionPje}{Un valor lógico para usar o no la corrección de puntaje.
La corrección de puntaje consiste en restar del puntaje total el punto obtenido
por el ítem analizado.}
\item{digitos}{La cantidad de dígitos significativos que tendrá el resultado.}
}
\value{
Un data frame con la correlación biserial puntual para cada alternativa
en cada ítem.
}
\description{
Calcula la correlación biserial puntual para cada alternativa en cada ítem con
respecto al puntaje obtenido en la prueba.
}
\details{
Para su cálculo se utiliza la siguiente ecuación:
\deqn{
r_{bp} = \frac{\overline{X_{p}}-\overline{X_{q}}}{\sigma_{X}}\sqrt{p \cdot q}
}
}
\examples{
respuestas <- datos[, -1]
alternativas <- LETTERS[1:5]
pBis(respuestas, clave, alternativas)
}
\references{
Attorresi, H, Galibert, M. y Aguerri, M. (1999). Valoración de los
ejercicios en las pruebas de rendimiento escolar. Educación Matemática. Vol. 11 No. 3, pp. 104-119.
Recuperado de \url{http://www.revista-educacion-matematica.org.mx/descargas/Vol11/3/10Attorresi.pdf}
}
\seealso{
\code{\link{analizarAlternativas}}, \code{\link{calcularFrecuenciaAlternativas}}
\code{\link{datos}} y \code{\link{clave}}.
}
| /man/pBis.Rd | no_license | cran/itan | R | false | true | 1,705 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/itan.R
\name{pBis}
\alias{pBis}
\title{Correlación biserial puntual.}
\usage{
pBis(respuestas, clave, alternativas, correccionPje = TRUE, digitos = 2)
}
\arguments{
\item{respuestas}{Un data frame con las alternativas seleccionadas por los
estudiantes a cada ítem de la prueba.}
\item{clave}{Un data frame con la alternativa correcta para cada ítem.}
\item{alternativas}{Un vector con las alternativas posibles para cada ítem.}
\item{correccionPje}{Un valor lógico para usar o no la corrección de puntaje.
La corrección de puntaje consiste en restar del puntaje total el punto obtenido
por el ítem analizado.}
\item{digitos}{La cantidad de dígitos significativos que tendrá el resultado.}
}
\value{
Un data frame con la correlación biserial puntual para cada alternativa
en cada ítem.
}
\description{
Calcula la correlación biserial puntual para cada alternativa en cada ítem con
respecto al puntaje obtenido en la prueba.
}
\details{
Para su cálculo se utiliza la siguiente ecuación:
\deqn{
r_{bp} = \frac{\overline{X_{p}}-\overline{X_{q}}}{\sigma_{X}}\sqrt{p \cdot q}
}
}
\examples{
respuestas <- datos[, -1]
alternativas <- LETTERS[1:5]
pBis(respuestas, clave, alternativas)
}
\references{
Attorresi, H, Galibert, M. y Aguerri, M. (1999). Valoración de los
ejercicios en las pruebas de rendimiento escolar. Educación Matemática. Vol. 11 No. 3, pp. 104-119.
Recuperado de \url{http://www.revista-educacion-matematica.org.mx/descargas/Vol11/3/10Attorresi.pdf}
}
\seealso{
\code{\link{analizarAlternativas}}, \code{\link{calcularFrecuenciaAlternativas}}
\code{\link{datos}} y \code{\link{clave}}.
}
|
#' Scales for smiling and frowning
#'
#' \code{scale_smile} lets you customise how smiles are generated from your data.
#' It also lets you tweak the appearance of legends and so on.
#'
#' Use \code{range} to vary how happily/sadly your maximum/minimum values are represented.
#' Minima smaller than -1 and maxima greater than +1 are possible but might look odd!
#' You can use \code{midpoint} to set a specific 'zero' value in your data or to have smiles represented as relative to average.
#'
#' The function \code{scale_smile} is an alias of \code{scale_smile_continuous}.
#' At some point we might also want to design a \code{scale_smile_discrete}, \code{scale_smile_manual} and so on.
#'
#' Legends are a work in progress. In particular, \code{size} mappings might produce odd results.
#'
#' @param ... Other arguments passed onto \code{\link[ggplot2]{continuous_scale}} to control name, limits, breaks, labels and so forth.
#' @param range Output range of smiles. +1 corresponds to a full smile and -1 corresponds to a full frown.
#' @param midpoint A value or function of your data that will return a neutral/straight face, i.e. \code{:-|}
#'
#' @seealso \code{\link{geom_chernoff}}, \code{\link{scale_brow}}
#'
#' @importFrom scales rescale_mid
#'
#' @examples
#' library(ggplot2)
#' p <- ggplot(iris) +
#' aes(Sepal.Width, Sepal.Length, fill = Species, smile = Sepal.Length) +
#' geom_chernoff()
#' p
#' p + scale_smile_continuous(midpoint = min)
#' p + scale_smile_continuous(range = c(-.5, 2))
#'
#' @rdname scale_smile
#'
#' @return
#' A \code{\link[ggplot2:ggplot2-ggproto]{Scale}} layer object for use with \code{ggplot2}.
#'
#' @export
scale_smile_continuous <- function(..., range = c(-1, 1), midpoint = mean) {
if (is.numeric(midpoint)) {
neutral <- function(...) return(midpoint)
} else {
neutral <- match.fun(midpoint)
}
continuous_scale('smile', 'smile_c',
function(x) scales::rescale_mid(x, to = range, mid = neutral(x, na.rm = TRUE)),
..., na.value = 1)
}
#' @rdname scale_smile
#' @export
scale_smile <- scale_smile_continuous
| /R/scale_smile.R | no_license | Selbosh/ggChernoff | R | false | false | 2,114 | r | #' Scales for smiling and frowning
#'
#' \code{scale_smile} lets you customise how smiles are generated from your data.
#' It also lets you tweak the appearance of legends and so on.
#'
#' Use \code{range} to vary how happily/sadly your maximum/minimum values are represented.
#' Minima smaller than -1 and maxima greater than +1 are possible but might look odd!
#' You can use \code{midpoint} to set a specific 'zero' value in your data or to have smiles represented as relative to average.
#'
#' The function \code{scale_smile} is an alias of \code{scale_smile_continuous}.
#' At some point we might also want to design a \code{scale_smile_discrete}, \code{scale_smile_manual} and so on.
#'
#' Legends are a work in progress. In particular, \code{size} mappings might produce odd results.
#'
#' @param ... Other arguments passed onto \code{\link[ggplot2]{continuous_scale}} to control name, limits, breaks, labels and so forth.
#' @param range Output range of smiles. +1 corresponds to a full smile and -1 corresponds to a full frown.
#' @param midpoint A value or function of your data that will return a neutral/straight face, i.e. \code{:-|}
#'
#' @seealso \code{\link{geom_chernoff}}, \code{\link{scale_brow}}
#'
#' @importFrom scales rescale_mid
#'
#' @examples
#' library(ggplot2)
#' p <- ggplot(iris) +
#' aes(Sepal.Width, Sepal.Length, fill = Species, smile = Sepal.Length) +
#' geom_chernoff()
#' p
#' p + scale_smile_continuous(midpoint = min)
#' p + scale_smile_continuous(range = c(-.5, 2))
#'
#' @rdname scale_smile
#'
#' @return
#' A \code{\link[ggplot2:ggplot2-ggproto]{Scale}} layer object for use with \code{ggplot2}.
#'
#' @export
scale_smile_continuous <- function(..., range = c(-1, 1), midpoint = mean) {
if (is.numeric(midpoint)) {
neutral <- function(...) return(midpoint)
} else {
neutral <- match.fun(midpoint)
}
continuous_scale('smile', 'smile_c',
function(x) scales::rescale_mid(x, to = range, mid = neutral(x, na.rm = TRUE)),
..., na.value = 1)
}
#' @rdname scale_smile
#' @export
scale_smile <- scale_smile_continuous
|
---
title: "Incredible Journey of Tesla Candle stick analysis"
author: "Biswa Pujarini"
date: "26/07/2020"
output:
html_document
---
library(plotly)
library(quantmod)
getSymbols("TSLA",src='yahoo')
df <- data.frame(Date=index(TSLA),coredata(TSLA))
# create Bollinger Bands
bbands <- BBands(TSLA[,c("TSLA.High","TSLA.Low","TSLA.Close")])
# join and subset data
df <- subset(cbind(df, data.frame(bbands[,1:3])), Date >= "2018-02-14")
# colors column for increasing and decreasing
for (i in 1:length(df[,1])) {
if (df$TSLA.Close[i] >= df$TSLA.Open[i]) {
df$direction[i] = 'Increasing'
} else {
df$direction[i] = 'Decreasing'
}
}
i <- list(line = list(color = '#17BECF'))
d <- list(line = list(color = '#7F7F7F'))
# plot candlestick chart
fig <- df %>% plot_ly(x = ~Date, type="candlestick",
open = ~TSLA.Open, close = ~TSLA.Close,
high = ~TSLA.High, low = ~TSLA.Low, name = "TSLA",
increasing = i, decreasing = d)
fig <- fig %>% add_lines(x = ~Date, y = ~up , name = "B Bands",
line = list(color = '#ccc', width = 0.5),
legendgroup = "Bollinger Bands",
hoverinfo = "none", inherit = F)
fig <- fig %>% add_lines(x = ~Date, y = ~dn, name = "B Bands",
line = list(color = '#ccc', width = 0.5),
legendgroup = "Bollinger Bands", inherit = F,
showlegend = FALSE, hoverinfo = "none")
fig <- fig %>% add_lines(x = ~Date, y = ~mavg, name = "Mv Avg",
line = list(color = '#E377C2', width = 0.5),
hoverinfo = "none", inherit = F)
fig <- fig %>% layout(yaxis = list(title = "Price"))
# plot volume bar chart
fig2 <- df
fig2 <- fig2 %>% plot_ly(x=~Date, y=~TSLA.Volume, type='bar', name = "TSLA Volume",
color = ~direction, colors = c('#17BECF','#7F7F7F'))
fig2 <- fig2 %>% layout(yaxis = list(title = "Volume"))
# create rangeselector buttons
rs <- list(visible = TRUE, x = 0.5, y = -0.055,
xanchor = 'center', yref = 'paper',
font = list(size = 9),
buttons = list(
list(count=1,
label='RESET',
step='all'),
list(count=1,
label='1 YR',
step='year',
stepmode='backward'),
list(count=3,
label='3 MO',
step='month',
stepmode='backward'),
list(count=1,
label='1 MO',
step='month',
stepmode='backward')
))
# subplot with shared x axis
fig <- subplot(fig, fig2, heights = c(0.7,0.2), nrows=2,
shareX = TRUE, titleY = TRUE)
fig <- fig %>% layout(title = paste("Tesla: 2018-02-14 -",Sys.Date()),
xaxis = list(rangeselector = rs),
legend = list(orientation = 'h', x = 0.5, y = 1,
xanchor = 'center', yref = 'paper',
font = list(size = 10),
bgcolor = 'transparent'))
fig | /TSLACandlestickpattern.R | no_license | biswapm/ChartAnalysis | R | false | false | 3,235 | r |
---
title: "Incredible Journey of Tesla Candle stick analysis"
author: "Biswa Pujarini"
date: "26/07/2020"
output:
html_document
---
library(plotly)
library(quantmod)
getSymbols("TSLA",src='yahoo')
df <- data.frame(Date=index(TSLA),coredata(TSLA))
# create Bollinger Bands
bbands <- BBands(TSLA[,c("TSLA.High","TSLA.Low","TSLA.Close")])
# join and subset data
df <- subset(cbind(df, data.frame(bbands[,1:3])), Date >= "2018-02-14")
# colors column for increasing and decreasing
for (i in 1:length(df[,1])) {
if (df$TSLA.Close[i] >= df$TSLA.Open[i]) {
df$direction[i] = 'Increasing'
} else {
df$direction[i] = 'Decreasing'
}
}
i <- list(line = list(color = '#17BECF'))
d <- list(line = list(color = '#7F7F7F'))
# plot candlestick chart
fig <- df %>% plot_ly(x = ~Date, type="candlestick",
open = ~TSLA.Open, close = ~TSLA.Close,
high = ~TSLA.High, low = ~TSLA.Low, name = "TSLA",
increasing = i, decreasing = d)
fig <- fig %>% add_lines(x = ~Date, y = ~up , name = "B Bands",
line = list(color = '#ccc', width = 0.5),
legendgroup = "Bollinger Bands",
hoverinfo = "none", inherit = F)
fig <- fig %>% add_lines(x = ~Date, y = ~dn, name = "B Bands",
line = list(color = '#ccc', width = 0.5),
legendgroup = "Bollinger Bands", inherit = F,
showlegend = FALSE, hoverinfo = "none")
fig <- fig %>% add_lines(x = ~Date, y = ~mavg, name = "Mv Avg",
line = list(color = '#E377C2', width = 0.5),
hoverinfo = "none", inherit = F)
fig <- fig %>% layout(yaxis = list(title = "Price"))
# plot volume bar chart
fig2 <- df
fig2 <- fig2 %>% plot_ly(x=~Date, y=~TSLA.Volume, type='bar', name = "TSLA Volume",
color = ~direction, colors = c('#17BECF','#7F7F7F'))
fig2 <- fig2 %>% layout(yaxis = list(title = "Volume"))
# create rangeselector buttons
rs <- list(visible = TRUE, x = 0.5, y = -0.055,
xanchor = 'center', yref = 'paper',
font = list(size = 9),
buttons = list(
list(count=1,
label='RESET',
step='all'),
list(count=1,
label='1 YR',
step='year',
stepmode='backward'),
list(count=3,
label='3 MO',
step='month',
stepmode='backward'),
list(count=1,
label='1 MO',
step='month',
stepmode='backward')
))
# subplot with shared x axis
fig <- subplot(fig, fig2, heights = c(0.7,0.2), nrows=2,
shareX = TRUE, titleY = TRUE)
fig <- fig %>% layout(title = paste("Tesla: 2018-02-14 -",Sys.Date()),
xaxis = list(rangeselector = rs),
legend = list(orientation = 'h', x = 0.5, y = 1,
xanchor = 'center', yref = 'paper',
font = list(size = 10),
bgcolor = 'transparent'))
fig |
#' Delayed Release of a Resource
#'
#' This brick encapsulates a delayed release: the arrival releases the resource
#' and continues its way immediately, but the resource is busy for an additional
#' period of time.
#'
#' @inheritParams simmer::release
#' @inheritParams simmer::timeout
#' @inheritParams simmer::get_capacity
#' @inheritParams simmer::add_resource
#' @inheritParams simmer::clone
#'
#' @return Returns the following chain of activities: \code{\link[simmer]{clone}}
#' > \code{\link[simmer:clone]{synchronize}} (see examples below).
#' @export
#'
#' @examples
#' ## These are equivalent for a non-preemptive resource:
#' trajectory() %>%
#' delayed_release("res1", 5, 1)
#'
#' trajectory() %>%
#' clone(
#' 2,
#' trajectory() %>%
#' set_capacity("res1", -1, mod="+") %>%
#' release("res1", 1),
#' trajectory() %>%
#' timeout(5) %>%
#' set_capacity("res1", 1, mod="+")
#' ) %>%
#' synchronize(wait=FALSE)
#'
#' ## These are equivalent for a preemptive resource:
#' trajectory() %>%
#' delayed_release("res2", 5, 1, preemptive=TRUE)
#'
#' trajectory() %>%
#' clone(
#' 2,
#' trajectory() %>%
#' release("res2", 1),
#' trajectory() %>%
#' set_prioritization(c(rep(.Machine$integer.max, 2), 0)) %>%
#' seize("res2", 1) %>%
#' timeout(5) %>%
#' release("res2", 1)
#' ) %>%
#' synchronize(wait=FALSE)
#'
delayed_release <- function(.trj, resource, task, amount=1, preemptive=FALSE, mon_all=FALSE) {
if (!preemptive) {
.clone <- clone(
.trj, 2,
trajectory() %>%
set_capacity(resource, Minus(amount), mod="+") %>%
release(resource, amount),
trajectory() %>%
timeout(task) %>%
set_capacity(resource, amount, mod="+")
)
} else {
.clone <- clone(
.trj, 2,
trajectory() %>%
release(resource, amount),
trajectory() %>%
set_prioritization(c(rep(.Machine$integer.max, 2), 0)) %>%
seize(resource, amount) %>%
timeout(task) %>%
release(resource, amount)
)
}
.clone %>% synchronize(wait=FALSE, mon_all=mon_all)
}
#' @rdname delayed_release
#' @export
delayed_release_selected <- function(.trj, task, amount=1, preemptive=FALSE, mon_all=FALSE) {
if (!preemptive) {
.clone <- clone(
.trj, 2,
trajectory() %>%
set_capacity_selected(Minus(amount), mod="+") %>%
release_selected(amount),
trajectory() %>%
timeout(task) %>%
set_capacity_selected(amount, mod="+")
)
} else {
.clone <- clone(
.trj, 2,
trajectory() %>%
release_selected(amount),
trajectory() %>%
set_prioritization(c(rep(.Machine$integer.max, 2), 0)) %>%
seize_selected(amount) %>%
timeout(task) %>%
release_selected(amount)
)
}
.clone %>% synchronize(wait=FALSE, mon_all=mon_all)
}
| /R/delayed_release.R | no_license | r-simmer/simmer.bricks | R | false | false | 2,900 | r | #' Delayed Release of a Resource
#'
#' This brick encapsulates a delayed release: the arrival releases the resource
#' and continues its way immediately, but the resource is busy for an additional
#' period of time.
#'
#' @inheritParams simmer::release
#' @inheritParams simmer::timeout
#' @inheritParams simmer::get_capacity
#' @inheritParams simmer::add_resource
#' @inheritParams simmer::clone
#'
#' @return Returns the following chain of activities: \code{\link[simmer]{clone}}
#' > \code{\link[simmer:clone]{synchronize}} (see examples below).
#' @export
#'
#' @examples
#' ## These are equivalent for a non-preemptive resource:
#' trajectory() %>%
#' delayed_release("res1", 5, 1)
#'
#' trajectory() %>%
#' clone(
#' 2,
#' trajectory() %>%
#' set_capacity("res1", -1, mod="+") %>%
#' release("res1", 1),
#' trajectory() %>%
#' timeout(5) %>%
#' set_capacity("res1", 1, mod="+")
#' ) %>%
#' synchronize(wait=FALSE)
#'
#' ## These are equivalent for a preemptive resource:
#' trajectory() %>%
#' delayed_release("res2", 5, 1, preemptive=TRUE)
#'
#' trajectory() %>%
#' clone(
#' 2,
#' trajectory() %>%
#' release("res2", 1),
#' trajectory() %>%
#' set_prioritization(c(rep(.Machine$integer.max, 2), 0)) %>%
#' seize("res2", 1) %>%
#' timeout(5) %>%
#' release("res2", 1)
#' ) %>%
#' synchronize(wait=FALSE)
#'
delayed_release <- function(.trj, resource, task, amount=1, preemptive=FALSE, mon_all=FALSE) {
if (!preemptive) {
.clone <- clone(
.trj, 2,
trajectory() %>%
set_capacity(resource, Minus(amount), mod="+") %>%
release(resource, amount),
trajectory() %>%
timeout(task) %>%
set_capacity(resource, amount, mod="+")
)
} else {
.clone <- clone(
.trj, 2,
trajectory() %>%
release(resource, amount),
trajectory() %>%
set_prioritization(c(rep(.Machine$integer.max, 2), 0)) %>%
seize(resource, amount) %>%
timeout(task) %>%
release(resource, amount)
)
}
.clone %>% synchronize(wait=FALSE, mon_all=mon_all)
}
#' @rdname delayed_release
#' @export
delayed_release_selected <- function(.trj, task, amount=1, preemptive=FALSE, mon_all=FALSE) {
if (!preemptive) {
.clone <- clone(
.trj, 2,
trajectory() %>%
set_capacity_selected(Minus(amount), mod="+") %>%
release_selected(amount),
trajectory() %>%
timeout(task) %>%
set_capacity_selected(amount, mod="+")
)
} else {
.clone <- clone(
.trj, 2,
trajectory() %>%
release_selected(amount),
trajectory() %>%
set_prioritization(c(rep(.Machine$integer.max, 2), 0)) %>%
seize_selected(amount) %>%
timeout(task) %>%
release_selected(amount)
)
}
.clone %>% synchronize(wait=FALSE, mon_all=mon_all)
}
|
# test fcn
#' Title
#'
#' @return
#' @export
#'
#' @examples
tidy_kmeans <- function() {
}
| /R/sptidy.R | permissive | JacobMcFarlane/sptidy | R | false | false | 92 | r | # test fcn
#' Title
#'
#' @return
#' @export
#'
#' @examples
tidy_kmeans <- function() {
}
|
#' Plot housekeeping expression levels
#'
#' @param eset An ExpressionSet object
#' @param actin Acting gene id
#' @param gapdh GAPDH gene id
#' @param id Column name for the gene identifier used in eset object
#'
#' @return
#' @export
#'
#' @examples
plot_hk <- function(eset, id, actin, gapdh, filename=NULL, ...) {
if (!is.null(filename)) {
pdf(filename, ...)
exp_gapdh <- exprs(eset)[which(fData(eset)[, id] %in% gapdh)[1],]
exp_actin <- exprs(eset)[which(fData(eset)[, id] %in% actin)[1],]
exp_df <- data.frame(samples = names(exp_actin),
exp_actin, exp_gapdh, row.names = NULL, stringsAsFactors = F)
if (grepl(".CEL.gz", exp_df$samples[1])) {
exp_df$samples <- gsub(".CEL.gz", "", exp_df$samples)
}
exp_df$samples <- factor(exp_df$samples, levels = exp_df$samples)
lim <- range(c(range(exp_actin), range(exp_gapdh)))
lim <- lim + c(-0.5, 0.5)
print(ggplot2::ggplot(exp_df) +
ggplot2::geom_line(aes(x = samples, y = exp_actin, group = 1, col = "Actin"), lwd = 1) +
ggplot2::geom_point(aes(x = samples, y = exp_actin, group = 1), col = "blue") +
ggplot2::geom_line(aes(x = samples, y = exp_gapdh, group = 2, col = "GAPDH"), lwd = 1) +
ggplot2::geom_point(aes(x = samples, y = exp_gapdh, group = 2), col = "red") +
ggplot2::scale_y_continuous(limits = lim) +
ggplot2::scale_color_manual(values = c("Actin" = "blue", "GAPDH" = "red")) +
ggplot2::labs(title = "Housekeeping genes expression levels",
x = "Samples", y = "log2-normalized expression level", col = "") +
ggplot2::theme_bw() +
ggplot2::theme(axis.text.x = element_text(angle = 90, vjust = 0.5)))
dev.off()
} else {
exp_gapdh <- exprs(eset)[which(fData(eset)[, id] %in% gapdh)[1],]
exp_actin <- exprs(eset)[which(fData(eset)[, id] %in% actin)[1],]
exp_df <- data.frame(samples = names(exp_actin),
exp_actin, exp_gapdh, row.names = NULL, stringsAsFactors = F)
if (grepl(".CEL.gz", exp_df$samples[1])) {
exp_df$samples <- gsub(".CEL.gz", "", exp_df$samples)
}
exp_df$samples <- factor(exp_df$samples, levels = exp_df$samples)
lim <- range(c(range(exp_actin), range(exp_gapdh)))
lim <- lim + c(-0.5, 0.5)
print(ggplot2::ggplot(exp_df) +
ggplot2::geom_line(aes(x = samples, y = exp_actin, group = 1, col = "Actin"), lwd = 1) +
ggplot2::geom_point(aes(x = samples, y = exp_actin, group = 1), col = "blue") +
ggplot2::geom_line(aes(x = samples, y = exp_gapdh, group = 2, col = "GAPDH"), lwd = 1) +
ggplot2::geom_point(aes(x = samples, y = exp_gapdh, group = 2), col = "red") +
ggplot2::scale_y_continuous(limits = lim) +
ggplot2::scale_color_manual(values = c("Actin" = "blue", "GAPDH" = "red")) +
ggplot2::labs(title = "Housekeeping genes expression levels",
x = "Samples", y = "log2-normalized expression level", col = "") +
ggplot2::theme_bw() +
ggplot2::theme(axis.text.x = element_text(angle = 90, vjust = 0.5)))
}
}
| /R/plot_hk.R | no_license | cfreis/MicroarrayMethods | R | false | false | 3,138 | r | #' Plot housekeeping expression levels
#'
#' @param eset An ExpressionSet object
#' @param actin Acting gene id
#' @param gapdh GAPDH gene id
#' @param id Column name for the gene identifier used in eset object
#'
#' @return
#' @export
#'
#' @examples
plot_hk <- function(eset, id, actin, gapdh, filename=NULL, ...) {
if (!is.null(filename)) {
pdf(filename, ...)
exp_gapdh <- exprs(eset)[which(fData(eset)[, id] %in% gapdh)[1],]
exp_actin <- exprs(eset)[which(fData(eset)[, id] %in% actin)[1],]
exp_df <- data.frame(samples = names(exp_actin),
exp_actin, exp_gapdh, row.names = NULL, stringsAsFactors = F)
if (grepl(".CEL.gz", exp_df$samples[1])) {
exp_df$samples <- gsub(".CEL.gz", "", exp_df$samples)
}
exp_df$samples <- factor(exp_df$samples, levels = exp_df$samples)
lim <- range(c(range(exp_actin), range(exp_gapdh)))
lim <- lim + c(-0.5, 0.5)
print(ggplot2::ggplot(exp_df) +
ggplot2::geom_line(aes(x = samples, y = exp_actin, group = 1, col = "Actin"), lwd = 1) +
ggplot2::geom_point(aes(x = samples, y = exp_actin, group = 1), col = "blue") +
ggplot2::geom_line(aes(x = samples, y = exp_gapdh, group = 2, col = "GAPDH"), lwd = 1) +
ggplot2::geom_point(aes(x = samples, y = exp_gapdh, group = 2), col = "red") +
ggplot2::scale_y_continuous(limits = lim) +
ggplot2::scale_color_manual(values = c("Actin" = "blue", "GAPDH" = "red")) +
ggplot2::labs(title = "Housekeeping genes expression levels",
x = "Samples", y = "log2-normalized expression level", col = "") +
ggplot2::theme_bw() +
ggplot2::theme(axis.text.x = element_text(angle = 90, vjust = 0.5)))
dev.off()
} else {
exp_gapdh <- exprs(eset)[which(fData(eset)[, id] %in% gapdh)[1],]
exp_actin <- exprs(eset)[which(fData(eset)[, id] %in% actin)[1],]
exp_df <- data.frame(samples = names(exp_actin),
exp_actin, exp_gapdh, row.names = NULL, stringsAsFactors = F)
if (grepl(".CEL.gz", exp_df$samples[1])) {
exp_df$samples <- gsub(".CEL.gz", "", exp_df$samples)
}
exp_df$samples <- factor(exp_df$samples, levels = exp_df$samples)
lim <- range(c(range(exp_actin), range(exp_gapdh)))
lim <- lim + c(-0.5, 0.5)
print(ggplot2::ggplot(exp_df) +
ggplot2::geom_line(aes(x = samples, y = exp_actin, group = 1, col = "Actin"), lwd = 1) +
ggplot2::geom_point(aes(x = samples, y = exp_actin, group = 1), col = "blue") +
ggplot2::geom_line(aes(x = samples, y = exp_gapdh, group = 2, col = "GAPDH"), lwd = 1) +
ggplot2::geom_point(aes(x = samples, y = exp_gapdh, group = 2), col = "red") +
ggplot2::scale_y_continuous(limits = lim) +
ggplot2::scale_color_manual(values = c("Actin" = "blue", "GAPDH" = "red")) +
ggplot2::labs(title = "Housekeeping genes expression levels",
x = "Samples", y = "log2-normalized expression level", col = "") +
ggplot2::theme_bw() +
ggplot2::theme(axis.text.x = element_text(angle = 90, vjust = 0.5)))
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metaX.R
\docType{methods}
\name{validation<-}
\alias{validation<-}
\title{validation}
\usage{
validation(para) <- value
}
\arguments{
\item{para}{An object of plsDAPara}
\item{value}{value}
}
\value{
An object of plsDAPara
}
\description{
validation
}
\examples{
para <- new("plsDAPara")
validation(para) <- "CV"
}
\author{
Bo Wen \email{wenbo@genomics.cn}
}
| /man/validation.Rd | no_license | jaspershen/metaX | R | false | true | 467 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/metaX.R
\docType{methods}
\name{validation<-}
\alias{validation<-}
\title{validation}
\usage{
validation(para) <- value
}
\arguments{
\item{para}{An object of plsDAPara}
\item{value}{value}
}
\value{
An object of plsDAPara
}
\description{
validation
}
\examples{
para <- new("plsDAPara")
validation(para) <- "CV"
}
\author{
Bo Wen \email{wenbo@genomics.cn}
}
|
load("Figures/resubmission/onset_correlogram_plot.Rdata")
load("Figures/resubmission/offset_correlogram_plot.Rdata")
load("Figures/resubmission/duration_correlogram_plot.Rdata")
cp <- cowplot::plot_grid(onset_correlogram_plot, offset_correlogram_plot,
duration_correlogram_plot,
ncol = 1, labels = c("Emergence",
"Termination",
"Duration"),
label_x = 0.099)
cp
library(ggplot2)
ggsave("Figures/resubmission/MoransI.png", width = 8, height = 6)
| /scripts/figures/MoransI_Fig.R | no_license | mbelitz/InsectDuration | R | false | false | 613 | r | load("Figures/resubmission/onset_correlogram_plot.Rdata")
load("Figures/resubmission/offset_correlogram_plot.Rdata")
load("Figures/resubmission/duration_correlogram_plot.Rdata")
cp <- cowplot::plot_grid(onset_correlogram_plot, offset_correlogram_plot,
duration_correlogram_plot,
ncol = 1, labels = c("Emergence",
"Termination",
"Duration"),
label_x = 0.099)
cp
library(ggplot2)
ggsave("Figures/resubmission/MoransI.png", width = 8, height = 6)
|
\name{NMixPlugDensMarg}
\alias{NMixPlugDensMarg}
\alias{NMixPlugDensMarg.default}
\alias{NMixPlugDensMarg.NMixMCMC}
\alias{NMixPlugDensMarg.GLMM_MCMC}
\title{
Marginal (univariate) densities: plug-in estimate
}
\description{
This function serves as an inference tool for the MCMC output
obtained using the function \code{\link{NMixMCMC}}. It computes
marginal (univariate) plug-in densities obtained by using posterior
summary statistics (e.g., posterior means) of mixture weights, means
and variances.
}
\usage{
NMixPlugDensMarg(x, \dots)
\method{NMixPlugDensMarg}{default}(x, scale, w, mu, Sigma, \dots)
\method{NMixPlugDensMarg}{NMixMCMC}(x, grid, lgrid=500, scaled=FALSE, \dots)
\method{NMixPlugDensMarg}{GLMM_MCMC}(x, grid, lgrid=500, scaled=FALSE, \dots)
}
\arguments{
\item{x}{a list with the grid values (see below) for
\code{NMixPlugDensMarg.default} function.
An object of class \code{NMixMCMC} for
\code{NMixPlugDensMarg.NMixMCMC} function.
An object of class \code{GLMM_MCMC} for
\code{NMixPlugDensMarg.GLMM_MCMC} function.
}
\item{scale}{a two component list giving the \code{shift} and the
\code{scale}. If not given, shift is equal to zero and scale is
equal to one.
}
\item{w}{a numeric vector with posterior summary statistics for the
mixture weights. The length of this vector determines the number of
mixture components.
}
\item{mu}{a matrix with posterior summary statistics for
mixture means in rows. That is, \code{mu} has
\eqn{K} rows and \eqn{p} columns, where \eqn{K} denotes the number
of mixture components and \eqn{p} is dimension of the mixture
distribution.
}
\item{Sigma}{a list with posterior summary statistics for mixture covariance matrices.
}
\item{grid}{a list with the grid values for each margin in which
the density should be evaluated.
If \code{grid} is not specified, it is created automatically using
the information from the posterior summary statistics stored in \code{x}.
}
\item{lgrid}{a length of the grid used to create the \code{grid} if
that is not specified.
}
\item{scaled}{if \code{TRUE}, the density of shifted and scaled data is
summarized. The shift and scale vector are taken from the
\code{scale} component of the object \code{x}.
}
\item{\dots}{optional additional arguments.}
}
\value{
An object of class \code{NMixPlugDensMarg} which has the following components:
\item{x}{a list with the grid values for each margin. The components
of the list are named \code{x1}, \ldots or take names from
\code{grid} argument.}
\item{dens}{a list with the computed densities for each
margin. The components of the list are named \code{1}, \ldots, i.e.,
\code{dens[[1]]}\eqn{=}\code{dens[["1"]]} is the predictive
density for margin 1 etc.}
There is also a \code{plot} method implemented for the resulting object.
}
\seealso{
\code{\link{plot.NMixPlugDensMarg}}, \code{\link{NMixMCMC}},
\code{\link{GLMM_MCMC}}, \code{\link{NMixPredDensMarg}}.
}
\author{
Arnošt Komárek \email{arnost.komarek@mff.cuni.cz}
}
\keyword{multivariate}
\keyword{dplot}
\keyword{smooth}
| /man/NMixPlugDensMarg.Rd | no_license | cran/mixAK | R | false | false | 3,187 | rd | \name{NMixPlugDensMarg}
\alias{NMixPlugDensMarg}
\alias{NMixPlugDensMarg.default}
\alias{NMixPlugDensMarg.NMixMCMC}
\alias{NMixPlugDensMarg.GLMM_MCMC}
\title{
Marginal (univariate) densities: plug-in estimate
}
\description{
This function serves as an inference tool for the MCMC output
obtained using the function \code{\link{NMixMCMC}}. It computes
marginal (univariate) plug-in densities obtained by using posterior
summary statistics (e.g., posterior means) of mixture weights, means
and variances.
}
\usage{
NMixPlugDensMarg(x, \dots)
\method{NMixPlugDensMarg}{default}(x, scale, w, mu, Sigma, \dots)
\method{NMixPlugDensMarg}{NMixMCMC}(x, grid, lgrid=500, scaled=FALSE, \dots)
\method{NMixPlugDensMarg}{GLMM_MCMC}(x, grid, lgrid=500, scaled=FALSE, \dots)
}
\arguments{
\item{x}{a list with the grid values (see below) for
\code{NMixPlugDensMarg.default} function.
An object of class \code{NMixMCMC} for
\code{NMixPlugDensMarg.NMixMCMC} function.
An object of class \code{GLMM_MCMC} for
\code{NMixPlugDensMarg.GLMM_MCMC} function.
}
\item{scale}{a two component list giving the \code{shift} and the
\code{scale}. If not given, shift is equal to zero and scale is
equal to one.
}
\item{w}{a numeric vector with posterior summary statistics for the
mixture weights. The length of this vector determines the number of
mixture components.
}
\item{mu}{a matrix with posterior summary statistics for
mixture means in rows. That is, \code{mu} has
\eqn{K} rows and \eqn{p} columns, where \eqn{K} denotes the number
of mixture components and \eqn{p} is dimension of the mixture
distribution.
}
\item{Sigma}{a list with posterior summary statistics for mixture covariance matrices.
}
\item{grid}{a list with the grid values for each margin in which
the density should be evaluated.
If \code{grid} is not specified, it is created automatically using
the information from the posterior summary statistics stored in \code{x}.
}
\item{lgrid}{a length of the grid used to create the \code{grid} if
that is not specified.
}
\item{scaled}{if \code{TRUE}, the density of shifted and scaled data is
summarized. The shift and scale vector are taken from the
\code{scale} component of the object \code{x}.
}
\item{\dots}{optional additional arguments.}
}
\value{
An object of class \code{NMixPlugDensMarg} which has the following components:
\item{x}{a list with the grid values for each margin. The components
of the list are named \code{x1}, \ldots or take names from
\code{grid} argument.}
\item{dens}{a list with the computed densities for each
margin. The components of the list are named \code{1}, \ldots, i.e.,
\code{dens[[1]]}\eqn{=}\code{dens[["1"]]} is the predictive
density for margin 1 etc.}
There is also a \code{plot} method implemented for the resulting object.
}
\seealso{
\code{\link{plot.NMixPlugDensMarg}}, \code{\link{NMixMCMC}},
\code{\link{GLMM_MCMC}}, \code{\link{NMixPredDensMarg}}.
}
\author{
Arnošt Komárek \email{arnost.komarek@mff.cuni.cz}
}
\keyword{multivariate}
\keyword{dplot}
\keyword{smooth}
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/autonomic_ganglia.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.95,family="gaussian",standardize=TRUE)
sink('./autonomic_ganglia_093.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
| /Model/EN/AvgRank/autonomic_ganglia/autonomic_ganglia_093.R | no_license | esbgkannan/QSMART | R | false | false | 368 | r | library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/AvgRank/autonomic_ganglia.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.95,family="gaussian",standardize=TRUE)
sink('./autonomic_ganglia_093.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
data_main <- read.table("./household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
data_main$Date <- as.Date(data_main$Date)
data_filtered <- data_main[data_main$Date %in% c(as.Date("1/2/2007"),as.Date("2/2/2007")) ,]
#variables
Global_active_power <- as.numeric(data_filtered$Global_active_power)
#plot
png("plot1.png", width=480, height=480)
hist(Global_active_power, main="Global Active Power", xlab="Global Active Power (kilowatts)", col="red")
dev.off()
| /plot1.R | no_license | aormenoa/ExData_Plotting1 | R | false | false | 492 | r | data_main <- read.table("./household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
data_main$Date <- as.Date(data_main$Date)
data_filtered <- data_main[data_main$Date %in% c(as.Date("1/2/2007"),as.Date("2/2/2007")) ,]
#variables
Global_active_power <- as.numeric(data_filtered$Global_active_power)
#plot
png("plot1.png", width=480, height=480)
hist(Global_active_power, main="Global Active Power", xlab="Global Active Power (kilowatts)", col="red")
dev.off()
|
setwd("~/Documents/Julius")
####################################################################
#Library
####################################################################
library(DESeq2)
library(ggplot2)
library(gplots)
library(reshape2)
library(pheatmap)
library(VennDiagram)
library(ggrepel)
library(ggforce)
sampleDataFilename <- 'sampleTable.txt'
sampleTable = read.table(sampleDataFilename,header=TRUE)
head(sampleTable)
htseqDir<-getwd()
## Read in the results from the LibiNorm analysis (the counts files)
ddsHTSeq<-DESeqDataSetFromHTSeqCount(sampleTable=sampleTable,directory = htseqDir,design = ~ condition)
## design<- you say to the test to do everything in relations to condition
## if you have more than one conditions you want to differentiate (for example different genotypes) you change design = ~ condition + genotype
## And perform the analysis (details in the manual)
## And perform the analysis (details in the manual)
dds<-DESeq(ddsHTSeq)
gene_name<-read.delim("~/Downloads/Gene_name_locus.txt")
rownames(dds) <- gene_name[,2]
####################################################################
# Do PCA
####################################################################
#principal component analysis
vst = vst(dds)
v <- plotPCA(vst, intgroup=c("condition"))
v<- v+ geom_label_repel(aes(label = name))
v
pcaData <- DESeq2::plotPCA(vst, intgroup=c("condition"), returnData=TRUE)
percentVar <- round(100 * attr(pcaData, "percentVar"))
#pdf("PCA_parents.pdf", height = 6, width = 6)
ggplot(pcaData, aes(PC1, PC2, color=condition, shape=condition)) +
geom_point(size=3) + geom_mark_ellipse(aes(fill=condition))+
#scale_colour_manual(name="",values = c("a12"="goldenrod2", "gd33"="darkslateblue", "f1"="saddlebrown"))+
xlab(paste0("PC1: ",percentVar[1],"% variance")) +
ylab(paste0("PC2: ",percentVar[2],"% variance")) +
geom_label_repel(aes(label = name)) +
coord_fixed()+theme_classic()
####################################################################
#Plotting Reps
####################################################################
plot_reps = function(dds,x=1,y=2,cond_choice=1, cond='condition'){
## Estimate the size factors for normalisation
dds<-estimateSizeFactors(dds)
## Extract the normalised counts for the condition you want
rep_values<- counts(dds, normalized=TRUE)[,dds[[cond]]==cond_choice]
# Take logs of these values
vals <- log2(rep_values[,c(x,y)] + 0.5)
# And plot
plot(vals,pch=16, cex=0.4,xlab=paste('rep',x),ylab=paste('rep',y))
grid(col = "darkgray", lty = "solid",lwd = par("lwd"), equilogs = TRUE)
title(paste("Comparison of",cond_choice,"replicates"))
}
par(mfrow = c(3,1))
plot_reps(dds, x=1, y=2, cond_choice="E")
plot_reps(dds, x=1, y=3, cond_choice="E")
plot_reps(dds, x=2, y=3, cond_choice="E")
plot_reps(dds, x=1, y=2, cond_choice="M")
plot_reps(dds, x=1, y=3, cond_choice="M")
plot_reps(dds, x=2, y=3, cond_choice="M")
####################################################################
#DEGs
####################################################################
filter_degs <- function(res){
summary(res)
res2 = res[!(is.na(res$padj)),]
res2 = res2[res2$padj < 0.05,]
return(res2)
}
resultsNames(dds)
E_M_DEGs = results(dds, contrast= c("condition", "E", "M"), alpha = 0.05, pAdjustMethod = "BH")
E_M_DEG = filter_degs(E_M_DEGs)
summary(E_M_DEG)
head(E_M_DEG)
write.table(rownames(E_M_DEG),"E_M_DEG",quote=F,row.names = F,col.names = F)
####################################################################
#Up and Downregulation
####################################################################
E_M_DEG_up <- E_M_DEG[E_M_DEG[,2]>0,]
E_M_DEG_down <- E_M_DEG[E_M_DEG[,2]<0,]
####################################################################
#MA Plots
####################################################################
par(mfrow = c(1,1))
DESeq2::plotMA(E_M_DEGs, ylim=c(-10,15), main='E_M_DEGs')
####################################################################
#Volcano Plots
####################################################################
library(EnhancedVolcano)
EnhancedVolcano(E_M_DEGs,
lab = rownames(E_M_DEGs),
x = 'log2FoldChange',
y = 'pvalue',
xlim = c(-5, 8),
ylim = c(0,60))
####################################################################
#Heatmap
####################################################################
counts = counts(dds, normalized = TRUE)
counts <- counts[apply(counts, MARGIN = 1, FUN = function(x) sd(x) != 0 ),]#it removes genes that are not express and have no variance
colnames(counts) <- c("E1","E2","E3","M1","M2","M3")
counts <- counts[rownames(counts) %in% rownames(E_M_DEG),]
pheatmap((log2(counts+1)), scale = "row",border_color=NA,show_rownames = F,
color = colorRampPalette(rev(brewer.pal(n = 7, name = "RdBu")))(100),main = 'DEGs expression across samples',cluster_rows = T, cluster_cols = T)
###################################################################
#TF terms
###################################################################
library(goseq)
library(tidyr)
library(dplyr)
TFs<- read.delim("./families_data.txt")
###################################################################
#Gage and list construction
###################################################################
library(gage)
ddsHTSeq<-DESeqDataSetFromHTSeqCount(sampleTable=sampleTable,directory = htseqDir,design = ~ condition)
dds<-DESeq(ddsHTSeq)
#Exclude lowly expressed genes for GSEA
DESeq2_negative_gene_IDs <- is.na(as.data.frame(E_M_DEGs$log2FoldChange))
###################################################################
list <- list()
for(i in 1:52){
TF_class <- as.character(unique(TFs$TF))
TF_class_name <- TF_class[i]
list[[i]] <- TFs[grep(paste(TF_class_name),TFs$TF),1]
}
names(list)<-TF_class[1:52]
#Run GAGE command for all leaky and induced expressed transgenics
Enriched <- gage(counts(dds)[!DESeq2_negative_gene_IDs,],list,ref=c(4:6),samp=c(1:3),
rank.test = T,
set.size=c(1,800), compare="unpaired",same.dir = T)
Enriched_greater <- Enriched$greater[1:51,1:5]
Enriched_lesser <- Enriched$less[1:51,1:5]
q.val <- -log10(Enriched_greater[,4])
data<-data.frame(rownames(Enriched_greater), q.val)
colnames(data) <- c("TF","q.val")
library(ggplot2)
ggplot(data[1:51,], aes(x=TF, y=q.val)) +geom_bar(stat="identity") +
geom_col(aes(fill = q.val)) +
scale_fill_gradient2(low = "blue",
high = "red",
mid ="yellow",
midpoint = median(data$q.val)) +
xlab("TF class") +
ylab("-log10(q.val)") +
ggtitle("Gene set enrichment analysis of TF classes upregulated") +
theme_bw(base_size=10) +
theme(
legend.position='none',
legend.background=element_rect(),
plot.title=element_text(angle=0, size=16, face="bold", vjust=1),
axis.text.x=element_text(angle=0, size=10, face="bold", hjust=1.10),
axis.text.y=element_text(angle=0, size=10, face="bold", vjust=0.5),
axis.title=element_text(size=12, face="bold"),
legend.key=element_blank(), #removes the border
legend.key.size=unit(1, "cm"), #Sets overall area/size of the legend
legend.text=element_text(size=14), #Text size
title=element_text(size=14)) +
guides(colour=guide_legend(override.aes=list(size=2.5)))+
geom_hline(yintercept=1.3,linetype="dashed", color = "red") +
ylim(0,1.5)+
coord_flip()
q.val <- -log10(Enriched_lesser[,4])
data<-data.frame(rownames(Enriched_lesser), q.val)
colnames(data) <- c("TF","q.val")
library(ggplot2)
ggplot(data[1:51,], aes(x=TF, y=q.val)) +geom_bar(stat="identity") +
geom_col(aes(fill = q.val)) +
scale_fill_gradient2(low = "blue",
high = "red",
mid ="yellow",
midpoint = median(data$q.val)) +
xlab("TF class") +
ylab("-log10(q.val)") +
ggtitle("Gene set enrichment analysis of TF classes downregulated") +
theme_bw(base_size=10) +
theme(
legend.position='none',
legend.background=element_rect(),
plot.title=element_text(angle=0, size=16, face="bold", vjust=1),
axis.text.x=element_text(angle=0, size=10, face="bold", hjust=1.10),
axis.text.y=element_text(angle=0, size=10, face="bold", vjust=0.5),
axis.title=element_text(size=12, face="bold"),
legend.key=element_blank(), #removes the border
legend.key.size=unit(1, "cm"), #Sets overall area/size of the legend
legend.text=element_text(size=14), #Text size
title=element_text(size=14)) +
guides(colour=guide_legend(override.aes=list(size=2.5)))+
geom_hline(yintercept=1.3,linetype="dashed", color = "red") +
ylim(0,5)+
coord_flip()
###################################################################
library("biomaRt")
library(topGO)
#collect gene names from biomart
mart <- biomaRt::useMart(biomart = "plants_mart",
dataset = "athaliana_eg_gene",
host = 'plants.ensembl.org')
# Get ensembl gene ids and GO terms
GTOGO <- biomaRt::getBM(attributes = c( "ensembl_gene_id",
"go_id"), mart = mart)
#examine result
head (GTOGO)
#Remove blank entries
GTOGO <- GTOGO[GTOGO$go_id != '',]
# convert from table format to list format
geneID2GO <- by(GTOGO$go_id,
GTOGO$ensembl_gene_id,
function(x) as.character(x))
#examine result
head (geneID2GO)
all.genes <- sort(unique(as.character(GTOGO$ensembl_gene_id)))
int.genes <- rownames(E_M_DEG) # some random genes
int.genes <- factor(as.integer(all.genes %in% int.genes))
names(int.genes) = all.genes
go.obj <- new("topGOdata", ontology='BP'
, allGenes = int.genes
, annot = annFUN.gene2GO
, gene2GO = geneID2GO
)
resultsFisher <- runTest(go.obj, algorithm = "elim", statistic = "fisher")
allRes <- GenTable(go.obj, classic = resultsFisher,
orderBy = "Fisher", ranksOf = "classic", topNodes = 17)
plot_go = function(goterms,name){
goterms$percquery = allRes$Significant*100
goterms$percback = allRes$Expected*100
filtered_go = goterms[allRes$classic < 0.05,]
#filtered_go = filtered_go[filtered_go$term_type == "P",]
filtered_go_perc = cbind(filtered_go$percquery, filtered_go$percback)
colnames(filtered_go_perc) = c("query","background")
row.names(filtered_go_perc) = paste(filtered_go$Term,filtered_go$GO_acc,sep ="-->")
meled = melt(filtered_go_perc)
x = ggplot(meled, aes(Var1, value, fill=Var2)) +
geom_bar(stat="identity", position="dodge")+
theme_bw() +
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
xlab("sig GO Term") +
ylab("Ratio genes with term in list") +
ggtitle(name)+
coord_flip()
plot(x)
return(x)
}
plot_go(allRes, "E vs M DEG GO terms")
showSigOfNodes(go.obj, score(resultsFisher), firstSigNodes = 20, useInfo = 'pval')
printGraph(go.obj, resultsFisher, firstSigNodes = 17, fn.prefix = "tGO", useInfo = "def", pdfSW = TRUE)
AgriGo <- read.delim('AgriGOv2_table.txt')
###################################################################
#go
####################################################################
plot_go = function(goterms,name){
goterms$percquery = goterms$queryitem/goterms$querytotal*100
goterms$percback = goterms$bgitem/goterms$bgtotal*100
filtered_go = goterms[goterms$FDR < 0.05,]
#filtered_go = filtered_go[filtered_go$term_type == "P",]
filtered_go_perc = cbind(filtered_go$percquery, filtered_go$percback)
colnames(filtered_go_perc) = c("query","background")
row.names(filtered_go_perc) = paste(filtered_go$Term,filtered_go$GO_acc,sep ="-->")
meled = melt(filtered_go_perc)
x = ggplot(meled, aes(Var1, value, fill=Var2)) +
geom_bar(stat="identity", position="dodge")+
theme_bw() +
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
xlab("sig GO Term") +
ylab("Ratio genes with term in list") +
ggtitle(name)+
coord_flip()
plot(x)
return(x)
}
v<-plot_go(AgriGo, "0")
plot_go(over_allRes[1:50,],"Top50 over-represented microspore GO term analysis 0.05")
plot_go(under_allRes[1:50,],"Top50 under-represented microspore GO term analysis 0.05")
| /ESF1 peptides.R | no_license | meehanca/ESB1-Peptide---Casparian-Strip | R | false | false | 12,307 | r |
setwd("~/Documents/Julius")
####################################################################
#Library
####################################################################
library(DESeq2)
library(ggplot2)
library(gplots)
library(reshape2)
library(pheatmap)
library(VennDiagram)
library(ggrepel)
library(ggforce)
sampleDataFilename <- 'sampleTable.txt'
sampleTable = read.table(sampleDataFilename,header=TRUE)
head(sampleTable)
htseqDir<-getwd()
## Read in the results from the LibiNorm analysis (the counts files)
ddsHTSeq<-DESeqDataSetFromHTSeqCount(sampleTable=sampleTable,directory = htseqDir,design = ~ condition)
## design<- you say to the test to do everything in relations to condition
## if you have more than one conditions you want to differentiate (for example different genotypes) you change design = ~ condition + genotype
## And perform the analysis (details in the manual)
## And perform the analysis (details in the manual)
dds<-DESeq(ddsHTSeq)
gene_name<-read.delim("~/Downloads/Gene_name_locus.txt")
rownames(dds) <- gene_name[,2]
####################################################################
# Do PCA
####################################################################
#principal component analysis
vst = vst(dds)
v <- plotPCA(vst, intgroup=c("condition"))
v<- v+ geom_label_repel(aes(label = name))
v
pcaData <- DESeq2::plotPCA(vst, intgroup=c("condition"), returnData=TRUE)
percentVar <- round(100 * attr(pcaData, "percentVar"))
#pdf("PCA_parents.pdf", height = 6, width = 6)
ggplot(pcaData, aes(PC1, PC2, color=condition, shape=condition)) +
geom_point(size=3) + geom_mark_ellipse(aes(fill=condition))+
#scale_colour_manual(name="",values = c("a12"="goldenrod2", "gd33"="darkslateblue", "f1"="saddlebrown"))+
xlab(paste0("PC1: ",percentVar[1],"% variance")) +
ylab(paste0("PC2: ",percentVar[2],"% variance")) +
geom_label_repel(aes(label = name)) +
coord_fixed()+theme_classic()
####################################################################
#Plotting Reps
####################################################################
plot_reps = function(dds,x=1,y=2,cond_choice=1, cond='condition'){
## Estimate the size factors for normalisation
dds<-estimateSizeFactors(dds)
## Extract the normalised counts for the condition you want
rep_values<- counts(dds, normalized=TRUE)[,dds[[cond]]==cond_choice]
# Take logs of these values
vals <- log2(rep_values[,c(x,y)] + 0.5)
# And plot
plot(vals,pch=16, cex=0.4,xlab=paste('rep',x),ylab=paste('rep',y))
grid(col = "darkgray", lty = "solid",lwd = par("lwd"), equilogs = TRUE)
title(paste("Comparison of",cond_choice,"replicates"))
}
par(mfrow = c(3,1))
plot_reps(dds, x=1, y=2, cond_choice="E")
plot_reps(dds, x=1, y=3, cond_choice="E")
plot_reps(dds, x=2, y=3, cond_choice="E")
plot_reps(dds, x=1, y=2, cond_choice="M")
plot_reps(dds, x=1, y=3, cond_choice="M")
plot_reps(dds, x=2, y=3, cond_choice="M")
####################################################################
#DEGs
####################################################################
filter_degs <- function(res){
summary(res)
res2 = res[!(is.na(res$padj)),]
res2 = res2[res2$padj < 0.05,]
return(res2)
}
resultsNames(dds)
E_M_DEGs = results(dds, contrast= c("condition", "E", "M"), alpha = 0.05, pAdjustMethod = "BH")
E_M_DEG = filter_degs(E_M_DEGs)
summary(E_M_DEG)
head(E_M_DEG)
write.table(rownames(E_M_DEG),"E_M_DEG",quote=F,row.names = F,col.names = F)
####################################################################
#Up and Downregulation
####################################################################
E_M_DEG_up <- E_M_DEG[E_M_DEG[,2]>0,]
E_M_DEG_down <- E_M_DEG[E_M_DEG[,2]<0,]
####################################################################
#MA Plots
####################################################################
par(mfrow = c(1,1))
DESeq2::plotMA(E_M_DEGs, ylim=c(-10,15), main='E_M_DEGs')
####################################################################
#Volcano Plots
####################################################################
library(EnhancedVolcano)
EnhancedVolcano(E_M_DEGs,
lab = rownames(E_M_DEGs),
x = 'log2FoldChange',
y = 'pvalue',
xlim = c(-5, 8),
ylim = c(0,60))
####################################################################
#Heatmap
####################################################################
counts = counts(dds, normalized = TRUE)
counts <- counts[apply(counts, MARGIN = 1, FUN = function(x) sd(x) != 0 ),]#it removes genes that are not express and have no variance
colnames(counts) <- c("E1","E2","E3","M1","M2","M3")
counts <- counts[rownames(counts) %in% rownames(E_M_DEG),]
pheatmap((log2(counts+1)), scale = "row",border_color=NA,show_rownames = F,
color = colorRampPalette(rev(brewer.pal(n = 7, name = "RdBu")))(100),main = 'DEGs expression across samples',cluster_rows = T, cluster_cols = T)
###################################################################
#TF terms
###################################################################
library(goseq)
library(tidyr)
library(dplyr)
TFs<- read.delim("./families_data.txt")
###################################################################
#Gage and list construction
###################################################################
library(gage)
ddsHTSeq<-DESeqDataSetFromHTSeqCount(sampleTable=sampleTable,directory = htseqDir,design = ~ condition)
dds<-DESeq(ddsHTSeq)
#Exclude lowly expressed genes for GSEA
DESeq2_negative_gene_IDs <- is.na(as.data.frame(E_M_DEGs$log2FoldChange))
###################################################################
list <- list()
for(i in 1:52){
TF_class <- as.character(unique(TFs$TF))
TF_class_name <- TF_class[i]
list[[i]] <- TFs[grep(paste(TF_class_name),TFs$TF),1]
}
names(list)<-TF_class[1:52]
#Run GAGE command for all leaky and induced expressed transgenics
Enriched <- gage(counts(dds)[!DESeq2_negative_gene_IDs,],list,ref=c(4:6),samp=c(1:3),
rank.test = T,
set.size=c(1,800), compare="unpaired",same.dir = T)
Enriched_greater <- Enriched$greater[1:51,1:5]
Enriched_lesser <- Enriched$less[1:51,1:5]
q.val <- -log10(Enriched_greater[,4])
data<-data.frame(rownames(Enriched_greater), q.val)
colnames(data) <- c("TF","q.val")
library(ggplot2)
ggplot(data[1:51,], aes(x=TF, y=q.val)) +geom_bar(stat="identity") +
geom_col(aes(fill = q.val)) +
scale_fill_gradient2(low = "blue",
high = "red",
mid ="yellow",
midpoint = median(data$q.val)) +
xlab("TF class") +
ylab("-log10(q.val)") +
ggtitle("Gene set enrichment analysis of TF classes upregulated") +
theme_bw(base_size=10) +
theme(
legend.position='none',
legend.background=element_rect(),
plot.title=element_text(angle=0, size=16, face="bold", vjust=1),
axis.text.x=element_text(angle=0, size=10, face="bold", hjust=1.10),
axis.text.y=element_text(angle=0, size=10, face="bold", vjust=0.5),
axis.title=element_text(size=12, face="bold"),
legend.key=element_blank(), #removes the border
legend.key.size=unit(1, "cm"), #Sets overall area/size of the legend
legend.text=element_text(size=14), #Text size
title=element_text(size=14)) +
guides(colour=guide_legend(override.aes=list(size=2.5)))+
geom_hline(yintercept=1.3,linetype="dashed", color = "red") +
ylim(0,1.5)+
coord_flip()
q.val <- -log10(Enriched_lesser[,4])
data<-data.frame(rownames(Enriched_lesser), q.val)
colnames(data) <- c("TF","q.val")
library(ggplot2)
ggplot(data[1:51,], aes(x=TF, y=q.val)) +geom_bar(stat="identity") +
geom_col(aes(fill = q.val)) +
scale_fill_gradient2(low = "blue",
high = "red",
mid ="yellow",
midpoint = median(data$q.val)) +
xlab("TF class") +
ylab("-log10(q.val)") +
ggtitle("Gene set enrichment analysis of TF classes downregulated") +
theme_bw(base_size=10) +
theme(
legend.position='none',
legend.background=element_rect(),
plot.title=element_text(angle=0, size=16, face="bold", vjust=1),
axis.text.x=element_text(angle=0, size=10, face="bold", hjust=1.10),
axis.text.y=element_text(angle=0, size=10, face="bold", vjust=0.5),
axis.title=element_text(size=12, face="bold"),
legend.key=element_blank(), #removes the border
legend.key.size=unit(1, "cm"), #Sets overall area/size of the legend
legend.text=element_text(size=14), #Text size
title=element_text(size=14)) +
guides(colour=guide_legend(override.aes=list(size=2.5)))+
geom_hline(yintercept=1.3,linetype="dashed", color = "red") +
ylim(0,5)+
coord_flip()
###################################################################
library("biomaRt")
library(topGO)
#collect gene names from biomart
mart <- biomaRt::useMart(biomart = "plants_mart",
dataset = "athaliana_eg_gene",
host = 'plants.ensembl.org')
# Get ensembl gene ids and GO terms
GTOGO <- biomaRt::getBM(attributes = c( "ensembl_gene_id",
"go_id"), mart = mart)
#examine result
head (GTOGO)
#Remove blank entries
GTOGO <- GTOGO[GTOGO$go_id != '',]
# convert from table format to list format
geneID2GO <- by(GTOGO$go_id,
GTOGO$ensembl_gene_id,
function(x) as.character(x))
#examine result
head (geneID2GO)
all.genes <- sort(unique(as.character(GTOGO$ensembl_gene_id)))
int.genes <- rownames(E_M_DEG) # some random genes
int.genes <- factor(as.integer(all.genes %in% int.genes))
names(int.genes) = all.genes
go.obj <- new("topGOdata", ontology='BP'
, allGenes = int.genes
, annot = annFUN.gene2GO
, gene2GO = geneID2GO
)
resultsFisher <- runTest(go.obj, algorithm = "elim", statistic = "fisher")
allRes <- GenTable(go.obj, classic = resultsFisher,
orderBy = "Fisher", ranksOf = "classic", topNodes = 17)
plot_go = function(goterms,name){
goterms$percquery = allRes$Significant*100
goterms$percback = allRes$Expected*100
filtered_go = goterms[allRes$classic < 0.05,]
#filtered_go = filtered_go[filtered_go$term_type == "P",]
filtered_go_perc = cbind(filtered_go$percquery, filtered_go$percback)
colnames(filtered_go_perc) = c("query","background")
row.names(filtered_go_perc) = paste(filtered_go$Term,filtered_go$GO_acc,sep ="-->")
meled = melt(filtered_go_perc)
x = ggplot(meled, aes(Var1, value, fill=Var2)) +
geom_bar(stat="identity", position="dodge")+
theme_bw() +
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
xlab("sig GO Term") +
ylab("Ratio genes with term in list") +
ggtitle(name)+
coord_flip()
plot(x)
return(x)
}
plot_go(allRes, "E vs M DEG GO terms")
showSigOfNodes(go.obj, score(resultsFisher), firstSigNodes = 20, useInfo = 'pval')
printGraph(go.obj, resultsFisher, firstSigNodes = 17, fn.prefix = "tGO", useInfo = "def", pdfSW = TRUE)
AgriGo <- read.delim('AgriGOv2_table.txt')
###################################################################
#go
####################################################################
plot_go = function(goterms,name){
goterms$percquery = goterms$queryitem/goterms$querytotal*100
goterms$percback = goterms$bgitem/goterms$bgtotal*100
filtered_go = goterms[goterms$FDR < 0.05,]
#filtered_go = filtered_go[filtered_go$term_type == "P",]
filtered_go_perc = cbind(filtered_go$percquery, filtered_go$percback)
colnames(filtered_go_perc) = c("query","background")
row.names(filtered_go_perc) = paste(filtered_go$Term,filtered_go$GO_acc,sep ="-->")
meled = melt(filtered_go_perc)
x = ggplot(meled, aes(Var1, value, fill=Var2)) +
geom_bar(stat="identity", position="dodge")+
theme_bw() +
theme(axis.text.x = element_text(angle = 90, hjust = 1))+
xlab("sig GO Term") +
ylab("Ratio genes with term in list") +
ggtitle(name)+
coord_flip()
plot(x)
return(x)
}
v<-plot_go(AgriGo, "0")
plot_go(over_allRes[1:50,],"Top50 over-represented microspore GO term analysis 0.05")
plot_go(under_allRes[1:50,],"Top50 under-represented microspore GO term analysis 0.05")
|
# R script to pressure correct the sea level rates which have previously been
# PGR corrected
s177PressureCorrectedRates <- array(NA,dim=c(56,177))
s177PressureCorrectedMean <- array(0,dim=c(56,1))
for (i in 1:177){
# Remember that a positive rate of pressure increase is equivalent to a
# negative rate of sea level rise (i.e. falling sea level). High pressure
# pushes sea level down.
s177PressureCorrectedRates[,i] <-
stns177Rates[1:56,i] + s177SlpRates[,i]
}
for (i in 1:56){
s177PressureCorrectedMean[i] <- mean(s177PressureCorrectedRates[i,],
na.rm=TRUE)
dataArrayTmp <- dataArray
dataArrayTmp[,5] <- s177PressureCorrectedRates[i,]
dataList177[i] <- list(data=dataArrayTmp)
}
| /global_tgs/decadal/pressureCorrectedRates177_2.R | no_license | simonholgate/R-Scripts | R | false | false | 703 | r | # R script to pressure correct the sea level rates which have previously been
# PGR corrected
s177PressureCorrectedRates <- array(NA,dim=c(56,177))
s177PressureCorrectedMean <- array(0,dim=c(56,1))
for (i in 1:177){
# Remember that a positive rate of pressure increase is equivalent to a
# negative rate of sea level rise (i.e. falling sea level). High pressure
# pushes sea level down.
s177PressureCorrectedRates[,i] <-
stns177Rates[1:56,i] + s177SlpRates[,i]
}
for (i in 1:56){
s177PressureCorrectedMean[i] <- mean(s177PressureCorrectedRates[i,],
na.rm=TRUE)
dataArrayTmp <- dataArray
dataArrayTmp[,5] <- s177PressureCorrectedRates[i,]
dataList177[i] <- list(data=dataArrayTmp)
}
|
library(bitops)
library(shiny)
runBitAnd <- function(i1,i2){
tryCatch(
{
if (i1>0 &i2>0)
{
bitAnd(i1,i2)
}
else{
"No negative Values"
}
},
error = function(cond){
"Please enter numeric Value"
}
)
}
shinyServer(
function(input,output){
output$oid1 <- renderPrint({runBitAnd(input$id1,input$id2)})
}
)
| /server.R | no_license | TermiJAG/DevDataProd | R | false | false | 388 | r | library(bitops)
library(shiny)
runBitAnd <- function(i1,i2){
tryCatch(
{
if (i1>0 &i2>0)
{
bitAnd(i1,i2)
}
else{
"No negative Values"
}
},
error = function(cond){
"Please enter numeric Value"
}
)
}
shinyServer(
function(input,output){
output$oid1 <- renderPrint({runBitAnd(input$id1,input$id2)})
}
)
|
#' Performs PLS-MGA to report significance of path differences between two subgroups of data
#'
#' @param pls_model SEMinR PLS model estimated on the full sample
#' @param condition logical vector of TRUE/FALSE indicating which rows of sample data are in group 1
#' @param nboot number of bootstrap resamples to use in PLS-MGA
#' @param ... any further parameters for bootstrapping (e.g., cores)
#'
#' @examples
#' mobi <- mobi
#'
#' #seminr syntax for creating measurement model
#' mobi_mm <- constructs(
#' composite("Image", multi_items("IMAG", 1:5)),
#' composite("Expectation", multi_items("CUEX", 1:3)),
#' composite("Quality", multi_items("PERQ", 1:7)),
#' composite("Value", multi_items("PERV", 1:2)),
#' composite("Satisfaction", multi_items("CUSA", 1:3)),
#' composite("Complaints", single_item("CUSCO")),
#' composite("Loyalty", multi_items("CUSL", 1:3))
#' )
#'
#' #seminr syntax for creating structural model
#' mobi_sm <- relationships(
#' paths(from = "Image", to = c("Expectation", "Satisfaction", "Loyalty")),
#' paths(from = "Expectation", to = c("Quality", "Value", "Satisfaction")),
#' paths(from = "Quality", to = c("Value", "Satisfaction")),
#' paths(from = "Value", to = c("Satisfaction")),
#' paths(from = "Satisfaction", to = c("Complaints", "Loyalty")),
#' paths(from = "Complaints", to = "Loyalty")
#' )
#'
#' mobi_pls <- estimate_pls(data = mobi,
#' measurement_model = mobi_mm,
#' structural_model = mobi_sm,
#' missing = mean_replacement,
#' missing_value = NA)
#'
#' # Should usually use nboot ~2000 and don't specify cores for full parallel processing
#'
#' mobi_mga <- estimate_pls_mga(mobi_pls, mobi$CUEX1 < 8, nboot=50, cores = 2)
#'
#' @references Henseler, J., Ringle, C. M. & Sinkovics, R. R. New Challenges to International Marketing. Adv Int Marketing 277–319 (2009) doi:10.1108/s1474-7979(2009)0000020014
#'
#' @export
estimate_pls_mga <- function(pls_model, condition, nboot = 2000, ...) {
pls_data <- pls_model$rawdata
# Given a beta report matrix (paths as rows) get estimates form a path_coef matrix
path_estimate <- function(path, path_coef) {
path_coef[path["source"], path["target"]]
}
# Allocate and Estimate Two Alternative Datasets + Models
group1_data <- pls_data[condition, ]
group2_data <- pls_data[!condition, ]
message("Estimating and bootstrapping groups...")
group1_model <- rerun(pls_model, data = group1_data)
group2_model <- rerun(pls_model, data = group2_data)
group1_boot <- bootstrap_model(seminr_model = group1_model, nboot = nboot, ...)
group2_boot <- bootstrap_model(seminr_model = group2_model, nboot = nboot, ...)
message("Computing similarity of groups")
# Produce beta report matrix on all paths (as rows)
beta <- as.data.frame(pls_model$smMatrix[,c("source", "target"), drop = F])
path_names <- do.call(paste0, cbind(beta["source"], " -> ", beta["target"]))
rownames(beta) <- path_names
beta$estimate <- apply(beta, MARGIN = 1, FUN=path_estimate, path_coef = pls_model$path_coef)
beta$group1_beta <- apply(beta, MARGIN = 1, FUN=path_estimate, path_coef = group1_model$path_coef)
beta$group2_beta <- apply(beta, MARGIN = 1, FUN=path_estimate, path_coef = group2_model$path_coef)
beta_diff <- group1_model$path_coef - group2_model$path_coef
beta$diff <- apply(beta, MARGIN = 1, FUN=path_estimate, path_coef = beta_diff)
# Get bootstrapped paths for both groups
boot1_betas <- boot_paths_df(group1_boot)
boot2_betas <- boot_paths_df(group2_boot)
# PLSc may not resolve in some bootstrap runs - limit bootstrap paths to resolved number of boots
J <- min(dim(boot1_betas)[1], dim(boot2_betas)[1])
if (J < nboot) {
message(paste("NOTE: Using", J, "bootstrapped results of each group after removing inadmissible runs"))
}
boot1_betas <- boot1_betas[1:J,]
boot2_betas <- boot2_betas[1:J,]
# Insert bootstrap descriptives into beta matrix
beta$group1_beta_mean <- apply(boot1_betas, MARGIN=2, FUN=mean)
beta$group2_beta_mean <- apply(boot2_betas, MARGIN=2, FUN=mean)
# beta$group1_beta_sd <- apply(boot1_betas, MARGIN=2, FUN=sd)
# beta$group2_beta_sd <- apply(boot2_betas, MARGIN=2, FUN=sd)
# Compute PLS-MGA p-value
# see: Henseler, J., Ringle, C. M., & Sinkovics, R. R. (2009). The use of partial least squares path modeling in international marketing. In New challenges to international marketing. Emerald Group Publishing Limited.
Theta <- function(s) {
ifelse(s > 0, 1, 0)
}
beta_comparison <- function(i, beta, beta1_boots, beta2_boots) {
for_all <- expand.grid(beta1_boots[,i], beta2_boots[,i])
2*beta$group1_beta_mean[i] - for_all[,1] - 2*beta$group2_beta_mean[i] + for_all[,2]
}
pls_mga_p <- function(i, beta, beta1_boots, beta2_boots) {
1 - (sum(Theta(beta_comparison(i, beta, beta1_boots, beta2_boots))) / J^2)
}
beta$pls_mga_p <- sapply(1:nrow(beta), FUN=pls_mga_p, beta=beta, beta1_boots=boot1_betas, beta2_boots=boot2_betas)
class(beta) <- c("seminr_pls_mga", class(beta))
beta
}
| /R/estimate_pls_mga.R | no_license | sem-in-r/seminr | R | false | false | 5,171 | r | #' Performs PLS-MGA to report significance of path differences between two subgroups of data
#'
#' @param pls_model SEMinR PLS model estimated on the full sample
#' @param condition logical vector of TRUE/FALSE indicating which rows of sample data are in group 1
#' @param nboot number of bootstrap resamples to use in PLS-MGA
#' @param ... any further parameters for bootstrapping (e.g., cores)
#'
#' @examples
#' mobi <- mobi
#'
#' #seminr syntax for creating measurement model
#' mobi_mm <- constructs(
#' composite("Image", multi_items("IMAG", 1:5)),
#' composite("Expectation", multi_items("CUEX", 1:3)),
#' composite("Quality", multi_items("PERQ", 1:7)),
#' composite("Value", multi_items("PERV", 1:2)),
#' composite("Satisfaction", multi_items("CUSA", 1:3)),
#' composite("Complaints", single_item("CUSCO")),
#' composite("Loyalty", multi_items("CUSL", 1:3))
#' )
#'
#' #seminr syntax for creating structural model
#' mobi_sm <- relationships(
#' paths(from = "Image", to = c("Expectation", "Satisfaction", "Loyalty")),
#' paths(from = "Expectation", to = c("Quality", "Value", "Satisfaction")),
#' paths(from = "Quality", to = c("Value", "Satisfaction")),
#' paths(from = "Value", to = c("Satisfaction")),
#' paths(from = "Satisfaction", to = c("Complaints", "Loyalty")),
#' paths(from = "Complaints", to = "Loyalty")
#' )
#'
#' mobi_pls <- estimate_pls(data = mobi,
#' measurement_model = mobi_mm,
#' structural_model = mobi_sm,
#' missing = mean_replacement,
#' missing_value = NA)
#'
#' # Should usually use nboot ~2000 and don't specify cores for full parallel processing
#'
#' mobi_mga <- estimate_pls_mga(mobi_pls, mobi$CUEX1 < 8, nboot=50, cores = 2)
#'
#' @references Henseler, J., Ringle, C. M. & Sinkovics, R. R. New Challenges to International Marketing. Adv Int Marketing 277–319 (2009) doi:10.1108/s1474-7979(2009)0000020014
#'
#' @export
estimate_pls_mga <- function(pls_model, condition, nboot = 2000, ...) {
pls_data <- pls_model$rawdata
# Given a beta report matrix (paths as rows) get estimates form a path_coef matrix
path_estimate <- function(path, path_coef) {
path_coef[path["source"], path["target"]]
}
# Allocate and Estimate Two Alternative Datasets + Models
group1_data <- pls_data[condition, ]
group2_data <- pls_data[!condition, ]
message("Estimating and bootstrapping groups...")
group1_model <- rerun(pls_model, data = group1_data)
group2_model <- rerun(pls_model, data = group2_data)
group1_boot <- bootstrap_model(seminr_model = group1_model, nboot = nboot, ...)
group2_boot <- bootstrap_model(seminr_model = group2_model, nboot = nboot, ...)
message("Computing similarity of groups")
# Produce beta report matrix on all paths (as rows)
beta <- as.data.frame(pls_model$smMatrix[,c("source", "target"), drop = F])
path_names <- do.call(paste0, cbind(beta["source"], " -> ", beta["target"]))
rownames(beta) <- path_names
beta$estimate <- apply(beta, MARGIN = 1, FUN=path_estimate, path_coef = pls_model$path_coef)
beta$group1_beta <- apply(beta, MARGIN = 1, FUN=path_estimate, path_coef = group1_model$path_coef)
beta$group2_beta <- apply(beta, MARGIN = 1, FUN=path_estimate, path_coef = group2_model$path_coef)
beta_diff <- group1_model$path_coef - group2_model$path_coef
beta$diff <- apply(beta, MARGIN = 1, FUN=path_estimate, path_coef = beta_diff)
# Get bootstrapped paths for both groups
boot1_betas <- boot_paths_df(group1_boot)
boot2_betas <- boot_paths_df(group2_boot)
# PLSc may not resolve in some bootstrap runs - limit bootstrap paths to resolved number of boots
J <- min(dim(boot1_betas)[1], dim(boot2_betas)[1])
if (J < nboot) {
message(paste("NOTE: Using", J, "bootstrapped results of each group after removing inadmissible runs"))
}
boot1_betas <- boot1_betas[1:J,]
boot2_betas <- boot2_betas[1:J,]
# Insert bootstrap descriptives into beta matrix
beta$group1_beta_mean <- apply(boot1_betas, MARGIN=2, FUN=mean)
beta$group2_beta_mean <- apply(boot2_betas, MARGIN=2, FUN=mean)
# beta$group1_beta_sd <- apply(boot1_betas, MARGIN=2, FUN=sd)
# beta$group2_beta_sd <- apply(boot2_betas, MARGIN=2, FUN=sd)
# Compute PLS-MGA p-value
# see: Henseler, J., Ringle, C. M., & Sinkovics, R. R. (2009). The use of partial least squares path modeling in international marketing. In New challenges to international marketing. Emerald Group Publishing Limited.
Theta <- function(s) {
ifelse(s > 0, 1, 0)
}
beta_comparison <- function(i, beta, beta1_boots, beta2_boots) {
for_all <- expand.grid(beta1_boots[,i], beta2_boots[,i])
2*beta$group1_beta_mean[i] - for_all[,1] - 2*beta$group2_beta_mean[i] + for_all[,2]
}
pls_mga_p <- function(i, beta, beta1_boots, beta2_boots) {
1 - (sum(Theta(beta_comparison(i, beta, beta1_boots, beta2_boots))) / J^2)
}
beta$pls_mga_p <- sapply(1:nrow(beta), FUN=pls_mga_p, beta=beta, beta1_boots=boot1_betas, beta2_boots=boot2_betas)
class(beta) <- c("seminr_pls_mga", class(beta))
beta
}
|
## Read the datafile.
svm_data <- read.csv('Companiesnew.csv')
svm_data <- svm_data[,-1]
svm_data <- svm_data[,-10]
str(svm_data)
table(svm_data$status)
#now converting data to numeric variables
levels(svm_data$category_code)
svm_data$category_code <- factor(svm_data$category_code, labels = c(1:42))
levels(svm_data$country_code)
svm_data$country_code <- factor(svm_data$country_code, labels = c(1:12))
str(svm_data)
svm_data$state_code <- factor(svm_data$state_code,labels = c(1:49))
svm_data$region <- factor(svm_data$region, labels = c(1:299))
svm_data$city <- factor(svm_data$city, labels = c(1:891))
svm_data$status <-factor(svm_data$status,labels = c(1:4))
#levels(svm_data$status)
#svm_data$status <- ifelse(svm_data$status == 'acquired', 'acquired','not acquired')
#svm_data$status <- ifelse(svm_data$status=='acquired',1,0)
#histogram(svm_data$status)
svm_data <- svm_data[,c(3,1,2,4,5,6,7,8,9)] #rearrarranging column
table(svm_data$status)
str(svm_data)
svm_data$status <- as.numeric(svm_data$status)
svm_data$status <- as.factor(svm_data$status)
svm_data$category_code <- as.numeric(svm_data$category_code)
svm_data$country_code <- as.numeric(svm_data$country_code)
svm_data$region <- as.numeric(svm_data$region)
svm_data$state_code <-as.numeric(svm_data$state_code)
svm_data$city <- as.numeric(svm_data$city)
install.packages("tabplot")
library(tabplot)
tableplot(svm_data)
str(svm_data)
library(ggplot2)
ggplot(data=svm_data,aes(x=svm_data$status,y=svm_data$funding)) + geom_point(shape=1)
## Since the measurement of data is different it needs to be normalized
install.packages("tabplot")
library(tabplot)
tableplot(svm_data)
install.packages("PerformanceAnalytics")
library(PerformanceAnalytics)
chart.Correlation(svm_datan[,2:9],col=svm_data$f)
## for running support vector machine
## for this input is soil data which has 9 variable and 1 class variable
## SVM learner require all features to be numneric
## Generally we need to normalize the data but this svm package will perform this activity.
## Now we need to divide the data into testing and training phase
set.seed(1337)
n <- nrow(svm_data)
shuffled_data <- svm_data[sample(n),]
svm_data <- shuffled_data
#library(caret)
table(svm_data$status)
svm_data <- upSample(svm_data,svm_data$status)
sample <- createDataPartition(svm_data$status, p = .70, list = FALSE)
svm_test <- svm_data[sample, ]
svm_train <- svm_data[-sample, ]
install.packages("kernlab")
install.packages("e1071")
library(e1071)
library(kernlab)
model<-ksvm(status ~.,data=svm_train, kernel="polydot")
model
status_predict<-predict(model,svm_test)
conf=table(status_predict,svm_test$status)
conf
#library(caret)
confusionMatrix(status_predict,svm_test$status)
roc_obj2 <- roc(svm_test$status, status_predict2)
auc(roc_obj2)
plot(roc_obj2)
Hyperbolic<-ksvm(status ~ .,data=svm_train, kernel="tanhdot")
Hyperbolic
status_predict1<-predict(Hyperbolic,svm_test)
conf=table(status_predict1,svm_test$status)
conf
confusionMatrix(status_predict1,svm_test$status)
roc_obj1 <- roc(as.numeric(svm_test$status),as.numeric(status_predict1))
auc(roc_obj1)
plot(roc_obj1)
Radial<-ksvm(status ~ .,data=svm_train, kernel="rbfdot")
Radial
status_predict2<-predict(Radial,svm_test)
table(status_predict2,svm_test$status)
confusionMatrix(status_predict2,svm_test$status)
status_predict2
library(pROC)
roc_obj <- roc(as.numeric(svm_test$status), as.numeric(status_predict2))
auc(roc_obj)
plot(roc_obj)
nn.pred = prediction(predicted_class, ann_test$status)
pref <- performance(status_predict2, "tpr", "fpr")
plot(pref,colorize=T)
abline(a=0,b=1)
library(pROC)
| /svm.R | no_license | enthkunal/Startup-Analysis | R | false | false | 3,632 | r | ## Read the datafile.
svm_data <- read.csv('Companiesnew.csv')
svm_data <- svm_data[,-1]
svm_data <- svm_data[,-10]
str(svm_data)
table(svm_data$status)
#now converting data to numeric variables
levels(svm_data$category_code)
svm_data$category_code <- factor(svm_data$category_code, labels = c(1:42))
levels(svm_data$country_code)
svm_data$country_code <- factor(svm_data$country_code, labels = c(1:12))
str(svm_data)
svm_data$state_code <- factor(svm_data$state_code,labels = c(1:49))
svm_data$region <- factor(svm_data$region, labels = c(1:299))
svm_data$city <- factor(svm_data$city, labels = c(1:891))
svm_data$status <-factor(svm_data$status,labels = c(1:4))
#levels(svm_data$status)
#svm_data$status <- ifelse(svm_data$status == 'acquired', 'acquired','not acquired')
#svm_data$status <- ifelse(svm_data$status=='acquired',1,0)
#histogram(svm_data$status)
svm_data <- svm_data[,c(3,1,2,4,5,6,7,8,9)] #rearrarranging column
table(svm_data$status)
str(svm_data)
svm_data$status <- as.numeric(svm_data$status)
svm_data$status <- as.factor(svm_data$status)
svm_data$category_code <- as.numeric(svm_data$category_code)
svm_data$country_code <- as.numeric(svm_data$country_code)
svm_data$region <- as.numeric(svm_data$region)
svm_data$state_code <-as.numeric(svm_data$state_code)
svm_data$city <- as.numeric(svm_data$city)
install.packages("tabplot")
library(tabplot)
tableplot(svm_data)
str(svm_data)
library(ggplot2)
ggplot(data=svm_data,aes(x=svm_data$status,y=svm_data$funding)) + geom_point(shape=1)
## Since the measurement of data is different it needs to be normalized
install.packages("tabplot")
library(tabplot)
tableplot(svm_data)
install.packages("PerformanceAnalytics")
library(PerformanceAnalytics)
chart.Correlation(svm_datan[,2:9],col=svm_data$f)
## for running support vector machine
## for this input is soil data which has 9 variable and 1 class variable
## SVM learner require all features to be numneric
## Generally we need to normalize the data but this svm package will perform this activity.
## Now we need to divide the data into testing and training phase
set.seed(1337)
n <- nrow(svm_data)
shuffled_data <- svm_data[sample(n),]
svm_data <- shuffled_data
#library(caret)
table(svm_data$status)
svm_data <- upSample(svm_data,svm_data$status)
sample <- createDataPartition(svm_data$status, p = .70, list = FALSE)
svm_test <- svm_data[sample, ]
svm_train <- svm_data[-sample, ]
install.packages("kernlab")
install.packages("e1071")
library(e1071)
library(kernlab)
model<-ksvm(status ~.,data=svm_train, kernel="polydot")
model
status_predict<-predict(model,svm_test)
conf=table(status_predict,svm_test$status)
conf
#library(caret)
confusionMatrix(status_predict,svm_test$status)
roc_obj2 <- roc(svm_test$status, status_predict2)
auc(roc_obj2)
plot(roc_obj2)
Hyperbolic<-ksvm(status ~ .,data=svm_train, kernel="tanhdot")
Hyperbolic
status_predict1<-predict(Hyperbolic,svm_test)
conf=table(status_predict1,svm_test$status)
conf
confusionMatrix(status_predict1,svm_test$status)
roc_obj1 <- roc(as.numeric(svm_test$status),as.numeric(status_predict1))
auc(roc_obj1)
plot(roc_obj1)
Radial<-ksvm(status ~ .,data=svm_train, kernel="rbfdot")
Radial
status_predict2<-predict(Radial,svm_test)
table(status_predict2,svm_test$status)
confusionMatrix(status_predict2,svm_test$status)
status_predict2
library(pROC)
roc_obj <- roc(as.numeric(svm_test$status), as.numeric(status_predict2))
auc(roc_obj)
plot(roc_obj)
nn.pred = prediction(predicted_class, ann_test$status)
pref <- performance(status_predict2, "tpr", "fpr")
plot(pref,colorize=T)
abline(a=0,b=1)
library(pROC)
|
rm(list=ls())
library("bdt")
library(parallel)
library("ROCR")
library(latticeExtra)
thisScriptDir = getScriptDir()
source(paste0(thisScriptDir, '/../../../config/bdt_path.R'))
## some ultility functions
pairInConfiguration <- function(x, y, xs, ys) {
matched = 0
for (m in 1:length(xs)) {
if (x == xs[m] && y == ys[m]) {
matched = m
break
}
}
return (matched)
}
pairInTwoConfig <- function(x1, y1, xs1, ys1, x2, y2, xs2, ys2) {
matched=0
for (m in 1:length(xs1)) {
if(x1 == xs1[m] && y1 == ys1[m] && x2 == xs2[m] && y2 == ys2[m]) {
matched = m
break
}
}
return (matched)
}
getConfigCnt <- function(ks, ns) {
oval = unique(ks+ns*1000) #put known factors to rightmost
return (length(oval))
}
getConfigOrder <- function(ks, ns, k, n) {
oval = unique(ks+ns*1000) #put known factors to rightmost
oval = oval[order(oval)]
v = k + n*1000
for( i in 1:length(oval)) {
if(v==oval[i])
return (i)
}
return (0)
}
getConfigTexts <- function(ks, ns) {
oval=unique(ks+ns*1000) #put known factors to rightmost
oval=oval[order(oval)]
txts=rep("", length(oval))
for( i in 1:length(oval)) {
if(oval[i] >= 1000) {
txts[i]="KF"
} else {
txts[i] = as.character(oval[i]%%1000)
}
}
return (txts)
}
twoMatRowCor <- function(i, mat1, mat2, rowIDs1, rowIDs2) {
r = cor(mat1[rowIDs1[i],], mat2[rowIDs2[i],])
return (r)
}
readVectorFromTxt <- function(txtFile) {
vec = read.table(txtFile, sep = "\t")
vec = vec[,1]
return (vec)
}
need_export = FALSE
num_threads = 24
## 132 cell types both in DNase and Exon dataset
## export DNase data
## use the first sample in each cell type as column id and obtain cell type level measurement
dnaseSampleIds = c(1, 3, 9, 17, 19, 22, 28, 36, 39, 42, 49, 55, 60, 62, 68, 71,
73, 75, 79, 81, 84, 88, 90, 93, 95, 97, 100, 102, 104, 109,
112, 115, 117, 119, 121, 125, 127, 130, 137, 146, 154, 157,
159, 161, 164, 168, 171, 175, 177, 181, 185, 190, 201, 209,
215, 217, 221, 223, 225, 227, 229, 231, 233, 235, 237, 244,
249, 251, 252, 259, 261, 262, 264, 265, 267, 269, 271, 273,
275, 276, 278, 280, 282, 284, 286, 288, 290, 296, 298, 300,
302, 304, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326,
328, 329, 331, 333, 335, 337, 339, 341, 351, 353, 371, 374,
376, 378, 380, 382, 386, 388, 390, 392, 396, 398, 400, 402,
404, 409, 414, 420, 422, 424)
unwanted_factors_dnase = c(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 0)
known_factors_dnase = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)
config_names_dnase = as.character(unwanted_factors_dnase)
# last one is for know factor only
config_names_dnase[length(config_names_dnase)] = 'KF'
if (need_export) {
exportDNaseRet = bdvdExport(
bdt_home = bdtHome,
thread_num = num_threads,
mem_size = 16000,
column_ids = dnaseSampleIds,
bdvd_dir = paste0(thisScriptDir, '/../s02-bdvd/out'),
component = 'signal', #cell type level measurement
artifact_detection = 'conservative',
unwanted_factors = unwanted_factors_dnase,
known_factors = known_factors_dnase,
rowidxs_input = paste0("text-rowids@", bdtDatasetsDir, "/DNaseExonCorrelation/100bp/s01-TSS-PairIdxs/DNase_UniqueFeatureIdxs.txt"),
rowidxs_index_base = 0,
out = paste0(thisScriptDir,"/Dnase"))
} else {
exportDNaseRet = readBdvdExportOutput(paste0(thisScriptDir,"/Dnase"))
}
## export Exon data
## use the first sample in each cell type as column id and obtain cell type level measurement
exonSampleIds = c(78, 63, 55, 54, 80, 99, 43, 121, 124, 127, 72, 5, 22, 24, 40,
26, 28, 30, 18, 101, 51, 119, 11, 37, 86, 8, 76, 84, 45, 104,
48, 82, 92, 90, 14, 115, 117, 109, 1, 111, 67, 65, 39, 130, 98,
133, 32, 107, 57, 61, 16, 69, 35, 88, 94, 96, 204, 206, 208, 210,
212, 237, 277, 166, 164, 214, 153, 279, 233, 317, 319, 321, 328,
297, 217, 295, 280, 271, 323, 324, 219, 243, 231, 245, 221, 239,
223, 299, 301, 202, 282, 171, 247, 315, 251, 253, 255, 257, 259,
241, 249, 225, 310, 261, 265, 263, 180, 182, 303, 227, 267, 187,
305, 189, 273, 326, 269, 229, 235, 311, 193, 313, 290, 195, 173,
292, 161, 157, 176, 294, 306, 308)
unwanted_factors_exon = c(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 0)
known_factors_exon = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)
config_names_exon = as.character(unwanted_factors_exon)
# last one is for know factor only
config_names_exon[length(config_names_exon)] = 'KF'
# export randomly selected rows from Exon dataset
if (need_export) {
exportExonNoiseRet = bdvdExport(
bdt_home = bdtHome,
thread_num = num_threads,
mem_size = 16000,
column_ids = exonSampleIds,
bdvd_dir = paste0(thisScriptDir, '/../../DukeUwExonArray/s01-bdvd/out'),
component = 'signal', #cell type level measurement
artifact_detection = 'conservative',
unwanted_factors = unwanted_factors_exon,
known_factors = known_factors_exon,
rowidxs_input = paste0("text-rowids@", bdtDatasetsDir, "/DNaseExonCorrelation/100bp/s02-Random-PairIdxs/Exon_UniqueFeatureIdxs.txt"),
rowidxs_index_base = 0,
out = paste0(thisScriptDir,"/ExonNoise"))
} else {
exportExonNoiseRet = readBdvdExportOutput(paste0(thisScriptDir,"/ExonNoise"))
}
# export associated rows (via TSS) from Exon dataset
if (need_export) {
exportExonSignalRet = bdvdExport(
bdt_home = bdtHome,
thread_num = num_threads,
mem_size = 16000,
column_ids = exonSampleIds,
bdvd_dir = paste0(thisScriptDir, '/../../DukeUwExonArray/s01-bdvd/out'),
component = 'signal', #cell type level measurement
artifact_detection = 'conservative',
unwanted_factors = unwanted_factors_exon,
known_factors = known_factors_exon,
rowidxs_input = paste0("text-rowids@", bdtDatasetsDir, "/DNaseExonCorrelation/100bp/s01-TSS-PairIdxs/Exon_UniqueFeatureIdxs.txt"),
rowidxs_index_base = 0,
out = paste0(thisScriptDir,"/ExonSignal"))
} else {
exportExonSignalRet = readBdvdExportOutput(paste0(thisScriptDir,"/ExonSignal"))
}
# 1-based row ids
rowIDs_s1 = readVectorFromTxt(paste0(bdtDatasetsDir, "/DNaseExonCorrelation/100bp/s01-TSS-PairIdxs/DNase_RowIDs.txt"))
rowIDs_s2 = readVectorFromTxt(paste0(bdtDatasetsDir, "/DNaseExonCorrelation/100bp/s01-TSS-PairIdxs/Exon_RowIDs.txt"))
rowIDs_n1 = readVectorFromTxt(paste0(bdtDatasetsDir, "/DNaseExonCorrelation/100bp/s01-TSS-PairIdxs/DNase_RowIDs.txt"))
rowIDs_n2 = readVectorFromTxt(paste0(bdtDatasetsDir, "/DNaseExonCorrelation/100bp/s02-Random-PairIdxs/Exon_RowIDs.txt"))
rowIDs = 1:length(rowIDs_s1)
# a subset of configs are to be used for analysis
KsMate1 = c(0, 0, 1, 2, 2, 3, 3, 10)
NsMate1 = c(0, 1, 0, 0, 0, 0, 0, 0)
KsMate2 = c(0, 0, 1, 2, 3, 2, 3, 10)
NsMate2 = c(0, 1, 0, 0, 0, 0, 0, 0)
OnewayConfig = TRUE
#KsMate1 = c(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0)
#NsMate1 = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)
#KsMate2 = c(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0)
#NsMate2 = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)
#OnewayConfig = FALSE
N = length(KsMate1) * length(KsMate2)
if(OnewayConfig){
N = length(KsMate1)
}
corSignals = vector(mode="list", length = N)
corNoises = vector(mode="list", length = N)
runInfos = data.frame(
name = rep("", N),
k1 = rep(0, N),
n1 = rep(0, N),
k2 = rep(0, N),
n2 = rep(0, N),
stringsAsFactors = FALSE)
H1H0Ratio = 1
MAX_FDR = 0.05
##
## compute correlations for signal pairs
##
n = 0
print("compute correlations for signal pairs")
for (i in 1:length(unwanted_factors_dnase)) {
k_1 = unwanted_factors_dnase[i]
extW_1 = known_factors_dnase[i]
if (pairInConfiguration(k_1, extW_1, KsMate1, NsMate1 )== 0) {
next
}
mat1 = readMat(exportDNaseRet$mats[[i]])
for (j in 1:length(unwanted_factors_exon)) {
k_2 = unwanted_factors_exon[j]
extW_2 = known_factors_exon[j]
cfg2=0
if (OnewayConfig) {
cfg2 = pairInTwoConfig(k_1, extW_1, KsMate1, NsMate1, k_2, extW_2, KsMate2, NsMate2)
} else {
cfg2 = pairInConfiguration(k_2, extW_2, KsMate2, NsMate2)
}
if (cfg2 == 0) {
next
}
mat2 = readMat(exportExonSignalRet$mats[[j]])
n = n + 1
runInfos[n,"k1"] = k_1
runInfos[n,"n1"] = extW_1
runInfos[n,"k2"] = k_2
runInfos[n,"n2"] = extW_2
runInfos[n,"name"] = paste(config_names_dnase[i], config_names_exon[j], sep = ",")
print(runInfos[n, "name"])
corSignals[[n]] = mclapply(rowIDs, twoMatRowCor, mat1, mat2, rowIDs_s1, rowIDs_s2, mc.cores=num_threads)
}
}
##
## compute correlations for background pairs
##
n = 0
print("compute correlations for background pairs")
for (i in 1:length(unwanted_factors_dnase)) {
k_1 = unwanted_factors_dnase[i]
extW_1 = known_factors_dnase[i]
if (pairInConfiguration(k_1, extW_1, KsMate1, NsMate1 )== 0) {
next
}
mat1 = readMat(exportDNaseRet$mats[[i]])
for (j in 1:length(unwanted_factors_exon)) {
k_2 = unwanted_factors_exon[j]
extW_2 = known_factors_exon[j]
cfg2=0
if (OnewayConfig) {
cfg2 = pairInTwoConfig(k_1, extW_1, KsMate1, NsMate1, k_2, extW_2, KsMate2, NsMate2)
} else {
cfg2 = pairInConfiguration(k_2, extW_2, KsMate2, NsMate2)
}
if (cfg2 == 0) {
next
}
mat2 = readMat(exportExonNoiseRet$mats[[j]])
n = n + 1
print(runInfos[n, "name"])
corNoises[[n]] = mclapply(rowIDs, twoMatRowCor, mat1, mat2, rowIDs_n1, rowIDs_n2, mc.cores=num_threads)
}
}
plotOutDir = paste0(thisScriptDir, "/Dnase")
##
## AUC Table
##
runRowCnt = getConfigCnt(runInfos[,"k1"], runInfos[,"n1"])
runColCnt = getConfigCnt(runInfos[,"k2"], runInfos[,"n2"])
runAUCs = matrix(0, runRowCnt, runColCnt)
#max TPR within given FDR level
runTPRs = matrix(0, runRowCnt, runColCnt)
#max TPR within given FDR level
runSensitivity = matrix(0, runRowCnt, runColCnt)
FullRowCnt = length(corSignals[[1]])
TopCnt = min(50000, FullRowCnt)
##
## Signal and Noise density
##
for(n in 1:N) {
signalScores = unlist(corSignals[[n]])
noiseScores = unlist(corNoises[[n]])
pdf(file = paste0(plotOutDir, "/sn_density_", runInfos[n,"name"], ".pdf"))
plot(density(noiseScores, na.rm = TRUE, bw = 0.01), lwd = 3, col = "deepskyblue", xlim = c(-1, 1), ylim = c(0, 3))
lines(density(signalScores, na.rm = TRUE), lwd = 3, col = "red")
dev.off()
}
##
## Sensitivity plot
##
pdf(file = paste0(plotOutDir, "/accuracy.pdf"))
plot(c(0, TopCnt), c(80, 100), type = "n", xlab = "top # of pairs", ylab = "% of signal pairs", xlim = c(0, TopCnt))
colors = c("salmon4", "red2", "dodgerblue3", "darkorange1", "green2", "black")
colors = rep(colors, as.integer(N/length(colors))+1)
linetype <- rep(1, N)
plotchar <- rep(19, N)
sens = rep(0, N)
legendTxts = rep("", N)
for(i in 1:N) {
scores = c(unlist(corSignals[[i]]), unlist(corNoises[[i]]))
lbs = c(rep(1,FullRowCnt), rep(0,FullRowCnt))
oRowIDs = order(scores, decreasing = TRUE)
scores = scores[oRowIDs[1:TopCnt]]
lbs = lbs[oRowIDs[1:TopCnt]]
xs = seq(from=100, to=TopCnt, by=100)
ys = rep(1, length(xs))
for( j in 1:length(xs)) {
ys[j] = sum(lbs[1:xs[j]])/xs[j]
}
print(xs)
print(ys)
sens[i] = ys[length(xs)]
lines(xs, ys*100, type = "l", lwd = 2, lty = linetype[i], col = colors[i], pch = plotchar[i])
legendTxts[i] = paste0("[", runInfos[i,"name"],"], accuracy ", sprintf("%.3f",sens[i]))
}
# add a legend
legend(200, 94, legend = legendTxts, cex = 1, col = colors, pch = plotchar, lty = linetype, bty = "n")
dev.off()
q(save="no")
##
## AUC Table
##
print(N)
for(n in 1:N) {
scores=c(unlist(corSignals[[n]]), unlist(corNoises[[n]]))
lbs = c(rep(1,FullRowCnt), rep(0,FullRowCnt))
oRowIDs = order(scores, decreasing = TRUE)
scores = scores[oRowIDs[1:TopCnt]]
lbs = lbs[oRowIDs[1:TopCnt]]
pred <- prediction(scores, lbs)
perf <- performance(pred,"auc")
runRowID = getConfigOrder(
runInfos[,"k1"], runInfos[,"n1"],
runInfos[n,"k1"], runInfos[n,"n1"])
runColID = getConfigOrder(
runInfos[,"k2"], runInfos[,"n2"],
runInfos[n,"k2"], runInfos[n,"n2"])
runAUCs[runRowID, runColID] = as.numeric(perf@y.values)
perf <- performance(pred,"tpr","fpr")
xs = as.numeric(unlist(perf@x.values)) #fpr
ys = as.numeric(unlist(perf@y.values)) #tpr
fdr = xs/(xs+ys*H1H0Ratio)
runTPRs[runRowID, runColID] = max(ys[fdr<MAX_FDR], na.rm = TRUE)
}
xlabls = getConfigTexts(runInfos[,"k1"], runInfos[,"n1"])
xats=1:length(xlabls)
ylabls = getConfigTexts(runInfos[,"k2"], runInfos[,"n2"])
yats=1:length(ylabls)
##
## AUC matrix plot
##
minAUC = min(runAUCs)
maxAUC = max(runAUCs)
pdf(file = paste0(plotOutDir,"/aucs.pdf"))
levelplot(runAUCs,
scales = list(x = list(at=xats, labels=xlabls), y = list(at=yats, labels=ylabls),tck = c(1,0)),
main="AUC",
colorkey = FALSE,
xlab="DNase",
ylab="Exon",
at=unique(c(seq(minAUC-0.01, maxAUC+0.01,length=100))),
col.regions = colorRampPalette(c("white", "red"))(1e2),
panel=function(x,y,z,...) {
panel.levelplot(x,y,z,...)
panel.text(x, y, round(z,2))})
dev.off()
##
## TPR matrix plot
##
minTPR = min(runTPRs)
maxTPR = max(runTPRs)
pdf(file = paste0(plotOutDir, "/tprs.pdf"))
levelplot(runTPRs,
scales = list(x = list(at=xats, labels=xlabls), y = list(at=yats, labels=ylabls),tck = c(1,0)),
main=paste("Max TPR with FDR <",MAX_FDR,sep=""),
colorkey = FALSE,
xlab="DNase",
ylab="Exon",
at = unique(c(seq(minTPR-0.01, maxTPR+0.01,length=100))),
col.regions = colorRampPalette(c("white", "red"))(1e2),
panel=function(x,y,z,...) {
panel.levelplot(x,y,z,...)
panel.text(x, y, round(z,2))})
dev.off()
##
## ROC plot
##
pdf(file = paste0(plotOutDir,"/roc.pdf"))
plot(c(0,1), c(0,1), type="n", xlab="False positive rate", ylab="True positive rate", xlim=c(0,1))
abline(a=0, b=1, col="gray", lwd=1, lty = 2)
colors =c("salmon4", "red2", "dodgerblue3", "darkorange1", "green2", "black")
colors=rep(colors,as.integer(N/length(colors))+1)
linetype <- rep(1,N)
plotchar <- rep(19,N)
aucs = rep(0,N)
legendTxts = rep("",N)
FullRowCnt = length(corSignals[[1]])
TopCnt = min(50000, FullRowCnt)
for(i in 1:N) {
scores=c(unlist(corSignals[[i]]), unlist(corNoises[[i]]))
lbs=c(rep(1,FullRowCnt), rep(0,FullRowCnt))
oRowIDs = order(scores,decreasing = TRUE)
scores=scores[oRowIDs[1:TopCnt]]
lbs=lbs[oRowIDs[1:TopCnt]]
pred <- prediction( scores, lbs)
perf <- performance(pred,"tpr","fpr")
xs=as.numeric(unlist(perf@x.values))
ys=as.numeric(unlist(perf@y.values))
perf <- performance(pred,"auc")
aucs[i]=perf@y.values
lines(xs, ys, type="l", lwd=2, lty=linetype[i], col=colors[i], pch=plotchar[i])
legendTxts[i]=paste("[",runInfos[i,"name"],"], auc ",sprintf("%.2f",aucs[i]),sep="")
}
# add a legend
legend(0.6,0.6, legend=legendTxts,
cex=1, col=colors, pch=plotchar, lty=linetype, bty ="n")
dev.off()
##
## FDR plot
##
pdf(file = paste0(plotOutDir, "/fdr.pdf", sep=""))
plot(c(0,1), c(0,1), type="n", xlab="False discovery rate", ylab="True positive rate", xlim=c(0,1))
abline(a=0,b=1, col="gray", lwd=1, lty = 2)
#colors <- 1:N
linetype <- rep(1,N)
plotchar <- rep(19,N)
aucs = rep(0,N)
legendTxts=rep("",N)
for(i in 1:N) {
scores = c(unlist(corSignals[[i]]), unlist(corNoises[[i]]))
lbs=c(rep(1,FullRowCnt), rep(0,FullRowCnt))
oRowIDs = order(scores,decreasing = TRUE)
scores=scores[oRowIDs[1:TopCnt]]
lbs=lbs[oRowIDs[1:TopCnt]]
pred <- prediction( scores, lbs)
perf <- performance(pred,"tpr","fpr")
xs=as.numeric(unlist(perf@x.values)) #fpr
ys=as.numeric(unlist(perf@y.values)) #tpr
fdr=xs/(xs+ys*H1H0Ratio)
maxTPR=max(ys[fdr<MAX_FDR],na.rm = TRUE)
aucs[i]=maxTPR
xs=fdr
#perf <- performance(pred,"auc")
#aucs[i]=perf@y.values
lines(xs, ys, type="l", lwd=2, lty=linetype[i], col=colors[i], pch=plotchar[i])
legendTxts[i]=paste("[",runInfos[i,"name"],"], TPR ",sprintf("%.2f",aucs[i]),sep="")
}
# add a legend
legend(0.6,1.05, legend=legendTxts,
cex=1, col=colors, pch=plotchar, lty=linetype, bty ="n")
dev.off() | /examples/analysis/DukeUwDnase/s12-correlation-conservative/bdvd-correlation.R | no_license | ecto/BDT | R | false | false | 16,706 | r | rm(list=ls())
library("bdt")
library(parallel)
library("ROCR")
library(latticeExtra)
thisScriptDir = getScriptDir()
source(paste0(thisScriptDir, '/../../../config/bdt_path.R'))
## some ultility functions
pairInConfiguration <- function(x, y, xs, ys) {
matched = 0
for (m in 1:length(xs)) {
if (x == xs[m] && y == ys[m]) {
matched = m
break
}
}
return (matched)
}
pairInTwoConfig <- function(x1, y1, xs1, ys1, x2, y2, xs2, ys2) {
matched=0
for (m in 1:length(xs1)) {
if(x1 == xs1[m] && y1 == ys1[m] && x2 == xs2[m] && y2 == ys2[m]) {
matched = m
break
}
}
return (matched)
}
getConfigCnt <- function(ks, ns) {
oval = unique(ks+ns*1000) #put known factors to rightmost
return (length(oval))
}
getConfigOrder <- function(ks, ns, k, n) {
oval = unique(ks+ns*1000) #put known factors to rightmost
oval = oval[order(oval)]
v = k + n*1000
for( i in 1:length(oval)) {
if(v==oval[i])
return (i)
}
return (0)
}
getConfigTexts <- function(ks, ns) {
oval=unique(ks+ns*1000) #put known factors to rightmost
oval=oval[order(oval)]
txts=rep("", length(oval))
for( i in 1:length(oval)) {
if(oval[i] >= 1000) {
txts[i]="KF"
} else {
txts[i] = as.character(oval[i]%%1000)
}
}
return (txts)
}
twoMatRowCor <- function(i, mat1, mat2, rowIDs1, rowIDs2) {
r = cor(mat1[rowIDs1[i],], mat2[rowIDs2[i],])
return (r)
}
readVectorFromTxt <- function(txtFile) {
vec = read.table(txtFile, sep = "\t")
vec = vec[,1]
return (vec)
}
need_export = FALSE
num_threads = 24
## 132 cell types both in DNase and Exon dataset
## export DNase data
## use the first sample in each cell type as column id and obtain cell type level measurement
dnaseSampleIds = c(1, 3, 9, 17, 19, 22, 28, 36, 39, 42, 49, 55, 60, 62, 68, 71,
73, 75, 79, 81, 84, 88, 90, 93, 95, 97, 100, 102, 104, 109,
112, 115, 117, 119, 121, 125, 127, 130, 137, 146, 154, 157,
159, 161, 164, 168, 171, 175, 177, 181, 185, 190, 201, 209,
215, 217, 221, 223, 225, 227, 229, 231, 233, 235, 237, 244,
249, 251, 252, 259, 261, 262, 264, 265, 267, 269, 271, 273,
275, 276, 278, 280, 282, 284, 286, 288, 290, 296, 298, 300,
302, 304, 308, 310, 312, 314, 316, 318, 320, 322, 324, 326,
328, 329, 331, 333, 335, 337, 339, 341, 351, 353, 371, 374,
376, 378, 380, 382, 386, 388, 390, 392, 396, 398, 400, 402,
404, 409, 414, 420, 422, 424)
unwanted_factors_dnase = c(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 0)
known_factors_dnase = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)
config_names_dnase = as.character(unwanted_factors_dnase)
# last one is for know factor only
config_names_dnase[length(config_names_dnase)] = 'KF'
if (need_export) {
exportDNaseRet = bdvdExport(
bdt_home = bdtHome,
thread_num = num_threads,
mem_size = 16000,
column_ids = dnaseSampleIds,
bdvd_dir = paste0(thisScriptDir, '/../s02-bdvd/out'),
component = 'signal', #cell type level measurement
artifact_detection = 'conservative',
unwanted_factors = unwanted_factors_dnase,
known_factors = known_factors_dnase,
rowidxs_input = paste0("text-rowids@", bdtDatasetsDir, "/DNaseExonCorrelation/100bp/s01-TSS-PairIdxs/DNase_UniqueFeatureIdxs.txt"),
rowidxs_index_base = 0,
out = paste0(thisScriptDir,"/Dnase"))
} else {
exportDNaseRet = readBdvdExportOutput(paste0(thisScriptDir,"/Dnase"))
}
## export Exon data
## use the first sample in each cell type as column id and obtain cell type level measurement
exonSampleIds = c(78, 63, 55, 54, 80, 99, 43, 121, 124, 127, 72, 5, 22, 24, 40,
26, 28, 30, 18, 101, 51, 119, 11, 37, 86, 8, 76, 84, 45, 104,
48, 82, 92, 90, 14, 115, 117, 109, 1, 111, 67, 65, 39, 130, 98,
133, 32, 107, 57, 61, 16, 69, 35, 88, 94, 96, 204, 206, 208, 210,
212, 237, 277, 166, 164, 214, 153, 279, 233, 317, 319, 321, 328,
297, 217, 295, 280, 271, 323, 324, 219, 243, 231, 245, 221, 239,
223, 299, 301, 202, 282, 171, 247, 315, 251, 253, 255, 257, 259,
241, 249, 225, 310, 261, 265, 263, 180, 182, 303, 227, 267, 187,
305, 189, 273, 326, 269, 229, 235, 311, 193, 313, 290, 195, 173,
292, 161, 157, 176, 294, 306, 308)
unwanted_factors_exon = c(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 30, 0)
known_factors_exon = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)
config_names_exon = as.character(unwanted_factors_exon)
# last one is for know factor only
config_names_exon[length(config_names_exon)] = 'KF'
# export randomly selected rows from Exon dataset
if (need_export) {
exportExonNoiseRet = bdvdExport(
bdt_home = bdtHome,
thread_num = num_threads,
mem_size = 16000,
column_ids = exonSampleIds,
bdvd_dir = paste0(thisScriptDir, '/../../DukeUwExonArray/s01-bdvd/out'),
component = 'signal', #cell type level measurement
artifact_detection = 'conservative',
unwanted_factors = unwanted_factors_exon,
known_factors = known_factors_exon,
rowidxs_input = paste0("text-rowids@", bdtDatasetsDir, "/DNaseExonCorrelation/100bp/s02-Random-PairIdxs/Exon_UniqueFeatureIdxs.txt"),
rowidxs_index_base = 0,
out = paste0(thisScriptDir,"/ExonNoise"))
} else {
exportExonNoiseRet = readBdvdExportOutput(paste0(thisScriptDir,"/ExonNoise"))
}
# export associated rows (via TSS) from Exon dataset
if (need_export) {
exportExonSignalRet = bdvdExport(
bdt_home = bdtHome,
thread_num = num_threads,
mem_size = 16000,
column_ids = exonSampleIds,
bdvd_dir = paste0(thisScriptDir, '/../../DukeUwExonArray/s01-bdvd/out'),
component = 'signal', #cell type level measurement
artifact_detection = 'conservative',
unwanted_factors = unwanted_factors_exon,
known_factors = known_factors_exon,
rowidxs_input = paste0("text-rowids@", bdtDatasetsDir, "/DNaseExonCorrelation/100bp/s01-TSS-PairIdxs/Exon_UniqueFeatureIdxs.txt"),
rowidxs_index_base = 0,
out = paste0(thisScriptDir,"/ExonSignal"))
} else {
exportExonSignalRet = readBdvdExportOutput(paste0(thisScriptDir,"/ExonSignal"))
}
# 1-based row ids
rowIDs_s1 = readVectorFromTxt(paste0(bdtDatasetsDir, "/DNaseExonCorrelation/100bp/s01-TSS-PairIdxs/DNase_RowIDs.txt"))
rowIDs_s2 = readVectorFromTxt(paste0(bdtDatasetsDir, "/DNaseExonCorrelation/100bp/s01-TSS-PairIdxs/Exon_RowIDs.txt"))
rowIDs_n1 = readVectorFromTxt(paste0(bdtDatasetsDir, "/DNaseExonCorrelation/100bp/s01-TSS-PairIdxs/DNase_RowIDs.txt"))
rowIDs_n2 = readVectorFromTxt(paste0(bdtDatasetsDir, "/DNaseExonCorrelation/100bp/s02-Random-PairIdxs/Exon_RowIDs.txt"))
rowIDs = 1:length(rowIDs_s1)
# a subset of configs are to be used for analysis
KsMate1 = c(0, 0, 1, 2, 2, 3, 3, 10)
NsMate1 = c(0, 1, 0, 0, 0, 0, 0, 0)
KsMate2 = c(0, 0, 1, 2, 3, 2, 3, 10)
NsMate2 = c(0, 1, 0, 0, 0, 0, 0, 0)
OnewayConfig = TRUE
#KsMate1 = c(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0)
#NsMate1 = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)
#KsMate2 = c(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 0)
#NsMate2 = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1)
#OnewayConfig = FALSE
N = length(KsMate1) * length(KsMate2)
if(OnewayConfig){
N = length(KsMate1)
}
corSignals = vector(mode="list", length = N)
corNoises = vector(mode="list", length = N)
runInfos = data.frame(
name = rep("", N),
k1 = rep(0, N),
n1 = rep(0, N),
k2 = rep(0, N),
n2 = rep(0, N),
stringsAsFactors = FALSE)
H1H0Ratio = 1
MAX_FDR = 0.05
##
## compute correlations for signal pairs
##
n = 0
print("compute correlations for signal pairs")
for (i in 1:length(unwanted_factors_dnase)) {
k_1 = unwanted_factors_dnase[i]
extW_1 = known_factors_dnase[i]
if (pairInConfiguration(k_1, extW_1, KsMate1, NsMate1 )== 0) {
next
}
mat1 = readMat(exportDNaseRet$mats[[i]])
for (j in 1:length(unwanted_factors_exon)) {
k_2 = unwanted_factors_exon[j]
extW_2 = known_factors_exon[j]
cfg2=0
if (OnewayConfig) {
cfg2 = pairInTwoConfig(k_1, extW_1, KsMate1, NsMate1, k_2, extW_2, KsMate2, NsMate2)
} else {
cfg2 = pairInConfiguration(k_2, extW_2, KsMate2, NsMate2)
}
if (cfg2 == 0) {
next
}
mat2 = readMat(exportExonSignalRet$mats[[j]])
n = n + 1
runInfos[n,"k1"] = k_1
runInfos[n,"n1"] = extW_1
runInfos[n,"k2"] = k_2
runInfos[n,"n2"] = extW_2
runInfos[n,"name"] = paste(config_names_dnase[i], config_names_exon[j], sep = ",")
print(runInfos[n, "name"])
corSignals[[n]] = mclapply(rowIDs, twoMatRowCor, mat1, mat2, rowIDs_s1, rowIDs_s2, mc.cores=num_threads)
}
}
##
## compute correlations for background pairs
##
n = 0
print("compute correlations for background pairs")
for (i in 1:length(unwanted_factors_dnase)) {
k_1 = unwanted_factors_dnase[i]
extW_1 = known_factors_dnase[i]
if (pairInConfiguration(k_1, extW_1, KsMate1, NsMate1 )== 0) {
next
}
mat1 = readMat(exportDNaseRet$mats[[i]])
for (j in 1:length(unwanted_factors_exon)) {
k_2 = unwanted_factors_exon[j]
extW_2 = known_factors_exon[j]
cfg2=0
if (OnewayConfig) {
cfg2 = pairInTwoConfig(k_1, extW_1, KsMate1, NsMate1, k_2, extW_2, KsMate2, NsMate2)
} else {
cfg2 = pairInConfiguration(k_2, extW_2, KsMate2, NsMate2)
}
if (cfg2 == 0) {
next
}
mat2 = readMat(exportExonNoiseRet$mats[[j]])
n = n + 1
print(runInfos[n, "name"])
corNoises[[n]] = mclapply(rowIDs, twoMatRowCor, mat1, mat2, rowIDs_n1, rowIDs_n2, mc.cores=num_threads)
}
}
plotOutDir = paste0(thisScriptDir, "/Dnase")
##
## AUC Table
##
runRowCnt = getConfigCnt(runInfos[,"k1"], runInfos[,"n1"])
runColCnt = getConfigCnt(runInfos[,"k2"], runInfos[,"n2"])
runAUCs = matrix(0, runRowCnt, runColCnt)
#max TPR within given FDR level
runTPRs = matrix(0, runRowCnt, runColCnt)
#max TPR within given FDR level
runSensitivity = matrix(0, runRowCnt, runColCnt)
FullRowCnt = length(corSignals[[1]])
TopCnt = min(50000, FullRowCnt)
##
## Signal and Noise density
##
for(n in 1:N) {
signalScores = unlist(corSignals[[n]])
noiseScores = unlist(corNoises[[n]])
pdf(file = paste0(plotOutDir, "/sn_density_", runInfos[n,"name"], ".pdf"))
plot(density(noiseScores, na.rm = TRUE, bw = 0.01), lwd = 3, col = "deepskyblue", xlim = c(-1, 1), ylim = c(0, 3))
lines(density(signalScores, na.rm = TRUE), lwd = 3, col = "red")
dev.off()
}
##
## Sensitivity plot
##
pdf(file = paste0(plotOutDir, "/accuracy.pdf"))
plot(c(0, TopCnt), c(80, 100), type = "n", xlab = "top # of pairs", ylab = "% of signal pairs", xlim = c(0, TopCnt))
colors = c("salmon4", "red2", "dodgerblue3", "darkorange1", "green2", "black")
colors = rep(colors, as.integer(N/length(colors))+1)
linetype <- rep(1, N)
plotchar <- rep(19, N)
sens = rep(0, N)
legendTxts = rep("", N)
for(i in 1:N) {
scores = c(unlist(corSignals[[i]]), unlist(corNoises[[i]]))
lbs = c(rep(1,FullRowCnt), rep(0,FullRowCnt))
oRowIDs = order(scores, decreasing = TRUE)
scores = scores[oRowIDs[1:TopCnt]]
lbs = lbs[oRowIDs[1:TopCnt]]
xs = seq(from=100, to=TopCnt, by=100)
ys = rep(1, length(xs))
for( j in 1:length(xs)) {
ys[j] = sum(lbs[1:xs[j]])/xs[j]
}
print(xs)
print(ys)
sens[i] = ys[length(xs)]
lines(xs, ys*100, type = "l", lwd = 2, lty = linetype[i], col = colors[i], pch = plotchar[i])
legendTxts[i] = paste0("[", runInfos[i,"name"],"], accuracy ", sprintf("%.3f",sens[i]))
}
# add a legend
legend(200, 94, legend = legendTxts, cex = 1, col = colors, pch = plotchar, lty = linetype, bty = "n")
dev.off()
q(save="no")
##
## AUC Table
##
print(N)
for(n in 1:N) {
scores=c(unlist(corSignals[[n]]), unlist(corNoises[[n]]))
lbs = c(rep(1,FullRowCnt), rep(0,FullRowCnt))
oRowIDs = order(scores, decreasing = TRUE)
scores = scores[oRowIDs[1:TopCnt]]
lbs = lbs[oRowIDs[1:TopCnt]]
pred <- prediction(scores, lbs)
perf <- performance(pred,"auc")
runRowID = getConfigOrder(
runInfos[,"k1"], runInfos[,"n1"],
runInfos[n,"k1"], runInfos[n,"n1"])
runColID = getConfigOrder(
runInfos[,"k2"], runInfos[,"n2"],
runInfos[n,"k2"], runInfos[n,"n2"])
runAUCs[runRowID, runColID] = as.numeric(perf@y.values)
perf <- performance(pred,"tpr","fpr")
xs = as.numeric(unlist(perf@x.values)) #fpr
ys = as.numeric(unlist(perf@y.values)) #tpr
fdr = xs/(xs+ys*H1H0Ratio)
runTPRs[runRowID, runColID] = max(ys[fdr<MAX_FDR], na.rm = TRUE)
}
xlabls = getConfigTexts(runInfos[,"k1"], runInfos[,"n1"])
xats=1:length(xlabls)
ylabls = getConfigTexts(runInfos[,"k2"], runInfos[,"n2"])
yats=1:length(ylabls)
##
## AUC matrix plot
##
minAUC = min(runAUCs)
maxAUC = max(runAUCs)
pdf(file = paste0(plotOutDir,"/aucs.pdf"))
levelplot(runAUCs,
scales = list(x = list(at=xats, labels=xlabls), y = list(at=yats, labels=ylabls),tck = c(1,0)),
main="AUC",
colorkey = FALSE,
xlab="DNase",
ylab="Exon",
at=unique(c(seq(minAUC-0.01, maxAUC+0.01,length=100))),
col.regions = colorRampPalette(c("white", "red"))(1e2),
panel=function(x,y,z,...) {
panel.levelplot(x,y,z,...)
panel.text(x, y, round(z,2))})
dev.off()
##
## TPR matrix plot
##
minTPR = min(runTPRs)
maxTPR = max(runTPRs)
pdf(file = paste0(plotOutDir, "/tprs.pdf"))
levelplot(runTPRs,
scales = list(x = list(at=xats, labels=xlabls), y = list(at=yats, labels=ylabls),tck = c(1,0)),
main=paste("Max TPR with FDR <",MAX_FDR,sep=""),
colorkey = FALSE,
xlab="DNase",
ylab="Exon",
at = unique(c(seq(minTPR-0.01, maxTPR+0.01,length=100))),
col.regions = colorRampPalette(c("white", "red"))(1e2),
panel=function(x,y,z,...) {
panel.levelplot(x,y,z,...)
panel.text(x, y, round(z,2))})
dev.off()
##
## ROC plot
##
pdf(file = paste0(plotOutDir,"/roc.pdf"))
plot(c(0,1), c(0,1), type="n", xlab="False positive rate", ylab="True positive rate", xlim=c(0,1))
abline(a=0, b=1, col="gray", lwd=1, lty = 2)
colors =c("salmon4", "red2", "dodgerblue3", "darkorange1", "green2", "black")
colors=rep(colors,as.integer(N/length(colors))+1)
linetype <- rep(1,N)
plotchar <- rep(19,N)
aucs = rep(0,N)
legendTxts = rep("",N)
FullRowCnt = length(corSignals[[1]])
TopCnt = min(50000, FullRowCnt)
for(i in 1:N) {
scores=c(unlist(corSignals[[i]]), unlist(corNoises[[i]]))
lbs=c(rep(1,FullRowCnt), rep(0,FullRowCnt))
oRowIDs = order(scores,decreasing = TRUE)
scores=scores[oRowIDs[1:TopCnt]]
lbs=lbs[oRowIDs[1:TopCnt]]
pred <- prediction( scores, lbs)
perf <- performance(pred,"tpr","fpr")
xs=as.numeric(unlist(perf@x.values))
ys=as.numeric(unlist(perf@y.values))
perf <- performance(pred,"auc")
aucs[i]=perf@y.values
lines(xs, ys, type="l", lwd=2, lty=linetype[i], col=colors[i], pch=plotchar[i])
legendTxts[i]=paste("[",runInfos[i,"name"],"], auc ",sprintf("%.2f",aucs[i]),sep="")
}
# add a legend
legend(0.6,0.6, legend=legendTxts,
cex=1, col=colors, pch=plotchar, lty=linetype, bty ="n")
dev.off()
##
## FDR plot
##
pdf(file = paste0(plotOutDir, "/fdr.pdf", sep=""))
plot(c(0,1), c(0,1), type="n", xlab="False discovery rate", ylab="True positive rate", xlim=c(0,1))
abline(a=0,b=1, col="gray", lwd=1, lty = 2)
#colors <- 1:N
linetype <- rep(1,N)
plotchar <- rep(19,N)
aucs = rep(0,N)
legendTxts=rep("",N)
for(i in 1:N) {
scores = c(unlist(corSignals[[i]]), unlist(corNoises[[i]]))
lbs=c(rep(1,FullRowCnt), rep(0,FullRowCnt))
oRowIDs = order(scores,decreasing = TRUE)
scores=scores[oRowIDs[1:TopCnt]]
lbs=lbs[oRowIDs[1:TopCnt]]
pred <- prediction( scores, lbs)
perf <- performance(pred,"tpr","fpr")
xs=as.numeric(unlist(perf@x.values)) #fpr
ys=as.numeric(unlist(perf@y.values)) #tpr
fdr=xs/(xs+ys*H1H0Ratio)
maxTPR=max(ys[fdr<MAX_FDR],na.rm = TRUE)
aucs[i]=maxTPR
xs=fdr
#perf <- performance(pred,"auc")
#aucs[i]=perf@y.values
lines(xs, ys, type="l", lwd=2, lty=linetype[i], col=colors[i], pch=plotchar[i])
legendTxts[i]=paste("[",runInfos[i,"name"],"], TPR ",sprintf("%.2f",aucs[i]),sep="")
}
# add a legend
legend(0.6,1.05, legend=legendTxts,
cex=1, col=colors, pch=plotchar, lty=linetype, bty ="n")
dev.off() |
context('View taxonomic authorities')
library(taxonomyCleanr)
# Trim white space ------------------------------------------------------------
testthat::test_that('View available authorities', {
authorities <- view_taxa_authorities()
expect_equal(
class(authorities),
'data.frame'
)
expect_equal(
all(
colnames(authorities) %in%
c('id', 'authority', 'resolve_sci_taxa', 'resolve_comm_taxa')),
TRUE
)
})
| /tests/testthat/test_view_taxa_authorities.R | permissive | EDIorg/taxonomyCleanr | R | false | false | 448 | r | context('View taxonomic authorities')
library(taxonomyCleanr)
# Trim white space ------------------------------------------------------------
testthat::test_that('View available authorities', {
authorities <- view_taxa_authorities()
expect_equal(
class(authorities),
'data.frame'
)
expect_equal(
all(
colnames(authorities) %in%
c('id', 'authority', 'resolve_sci_taxa', 'resolve_comm_taxa')),
TRUE
)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{summarise_per_minute}
\alias{summarise_per_minute}
\title{Summarize data per minute}
\usage{
summarise_per_minute(
data,
id_columns = c("idPlayerSeason"),
scale_columns = c("pts", "fg", "ast", "tov", "blk", "stl", "drb", "trb", "orb", "ft",
"pf", "countLayupsShooting", "countDunks", "hlf")
)
}
\arguments{
\item{data}{a data frame}
\item{id_columns}{vector of id columns}
\item{scale_columns}{vector of columns to scale}
}
\value{
a \code{tibble}
}
\description{
Summarize data per minute
}
| /man/summarise_per_minute.Rd | no_license | abresler/nbastatR | R | false | true | 596 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{summarise_per_minute}
\alias{summarise_per_minute}
\title{Summarize data per minute}
\usage{
summarise_per_minute(
data,
id_columns = c("idPlayerSeason"),
scale_columns = c("pts", "fg", "ast", "tov", "blk", "stl", "drb", "trb", "orb", "ft",
"pf", "countLayupsShooting", "countDunks", "hlf")
)
}
\arguments{
\item{data}{a data frame}
\item{id_columns}{vector of id columns}
\item{scale_columns}{vector of columns to scale}
}
\value{
a \code{tibble}
}
\description{
Summarize data per minute
}
|
library(vmstools)
data(tacsat)
#Sort the VMS data
tacsat <- sortTacsat(tacsat)
tacsat <- tacsat[1:1000,]
#Filter the Tacsat data
tacsat <- filterTacsat(tacsat,c(4,8),hd=NULL,remDup=T)
#Interpolate the VMS data
interpolation <- interpolateTacsat(tacsat,interval=120,margin=10,res=100,method="cHs",params=list(fm=0.5,distscale=20,sigline=0.2,st=c(2,6)),headingAdjustment=0)
xrange <- range(unlist(lapply(interpolation,function(x){return(range(x[-1,1]))})),na.rm=T)
yrange <- range(unlist(lapply(interpolation,function(x){return(range(x[-1,2]))})),na.rm=T)
plot(interpolation[[1]][-1,1],interpolation[[1]][-1,2],type="l",pch=19,lwd=1,asp=1/lonLatRatio(interpolation[[1]][2,1],interpolation[[1]][2,2])[1],xlim=xrange,ylim=yrange,xlab="Longitude",ylab="Latitude")
for(i in 2:length(interpolation)){
lines(interpolation[[i]][-1,1],interpolation[[i]][-1,2],type="l",pch=19,lwd=1,asp=1/lonLatRatio(interpolation[[i]][2,1],interpolation[[i]][2,2])[1])
}
interpolationGearWidth <- addWidth(interpolation,gearWidth=0.5)
plot(interpolationGearWidth,border="grey",col="grey",asp=1/lonLatRatio(interpolation[[1]][2,1],interpolation[[1]][2,2])[1],xlim=xrange,ylim=yrange,xlab="Longitude",ylab="Latitude")
box(); axis(1); axis(2); mtext(side=1,"Longitude",line=3); mtext(side=2,"Latitude",line=3)
#Plot the interpolation
int <- 112
plot(interpolation[[int]][-1,1],interpolation[[int]][-1,2],type="l",pch=19,lwd=2,asp=1/lonLatRatio(interpolation[[int]][2,1],interpolation[[int]][2,2])[1])
xs <- interpolation[[int]][-1,1]
ys <- interpolation[[int]][-1,2]
#Calculate the bearing towards and away from each point
bear1 <- bearing(xs[1:(length(xs)-2)],ys[1:(length(xs)-2)],xs[2:(length(xs)-1)],ys[2:(length(xs)-1)])
bear2 <- bearing(xs[2:(length(xs)-1)],ys[2:(length(xs)-1)],xs[3:length(xs)],ys[3:length(xs)])
avbear<- atan2(mapply(sum,sin(bear1*(pi/180))+sin(bear2*(pi/180))),mapply(sum,cos(bear1*(pi/180))+cos(bear2*(pi/180))))*(180/pi)
#Take the average of the two
avbear<- c(avbear[1],avbear,avbear[length(avbear)])
#Calculate the destinated point taking a begin point, a bearing and a certain distance to travel
outpointr <- destFromBearing(xs,ys,(avbear+90+360)%%360,0.5)
outpointl <- destFromBearing(xs,ys,(avbear-90+360)%%360,0.5)
#Plot these lines
lines(outpointr[,1],outpointr[,2],col="red",lty=2)
lines(outpointl[,1],outpointl[,2],col="red",lty=2)
#Create polygons from it
for(i in 1:(nrow(outpointr)-1)){
polygon(x=c(outpointr[i,1],outpointl[i,1],outpointl[i+1,1],outpointr[i+1,1]),y=c(outpointr[i,2],outpointl[i,2],outpointl[i+1,2],outpointr[i+1,2]),col="black")
}
pols <- list()
for(i in 1:(nrow(outpointr)-1)){
pols[[i]] <- Polygon(cbind(c(outpointr[i,1],outpointl[i,1],outpointl[i+1,1],outpointr[i+1,1],outpointr[i,1]),
c(outpointr[i,2],outpointl[i,2],outpointl[i+1,2],outpointr[i+1,2],outpointr[i,2])))
}
polys <- list()
for(i in 1:10){
polys[[i]] <- Polygons(pols,ID=ac(i))
}
spPolys <- SpatialPolygons(polys)
plot(spPolys,col="black")
#Destination:
#This function takes a starting x,y position, a bearing and a distance to follow the initial bearing.
#To get this bearing, you have to compute the bearing from an towards the x,y position. +/- 90 degrees
#as this the most outer point taken from the point you are heading in. Then travel along that heading
#for a certain km's and you'll get to your endpoint.
| /vmstools/inst/scripts/#destFromBearing.r | no_license | nielshintzen/vmstools | R | false | false | 3,498 | r | library(vmstools)
data(tacsat)
#Sort the VMS data
tacsat <- sortTacsat(tacsat)
tacsat <- tacsat[1:1000,]
#Filter the Tacsat data
tacsat <- filterTacsat(tacsat,c(4,8),hd=NULL,remDup=T)
#Interpolate the VMS data
interpolation <- interpolateTacsat(tacsat,interval=120,margin=10,res=100,method="cHs",params=list(fm=0.5,distscale=20,sigline=0.2,st=c(2,6)),headingAdjustment=0)
xrange <- range(unlist(lapply(interpolation,function(x){return(range(x[-1,1]))})),na.rm=T)
yrange <- range(unlist(lapply(interpolation,function(x){return(range(x[-1,2]))})),na.rm=T)
plot(interpolation[[1]][-1,1],interpolation[[1]][-1,2],type="l",pch=19,lwd=1,asp=1/lonLatRatio(interpolation[[1]][2,1],interpolation[[1]][2,2])[1],xlim=xrange,ylim=yrange,xlab="Longitude",ylab="Latitude")
for(i in 2:length(interpolation)){
lines(interpolation[[i]][-1,1],interpolation[[i]][-1,2],type="l",pch=19,lwd=1,asp=1/lonLatRatio(interpolation[[i]][2,1],interpolation[[i]][2,2])[1])
}
interpolationGearWidth <- addWidth(interpolation,gearWidth=0.5)
plot(interpolationGearWidth,border="grey",col="grey",asp=1/lonLatRatio(interpolation[[1]][2,1],interpolation[[1]][2,2])[1],xlim=xrange,ylim=yrange,xlab="Longitude",ylab="Latitude")
box(); axis(1); axis(2); mtext(side=1,"Longitude",line=3); mtext(side=2,"Latitude",line=3)
#Plot the interpolation
int <- 112
plot(interpolation[[int]][-1,1],interpolation[[int]][-1,2],type="l",pch=19,lwd=2,asp=1/lonLatRatio(interpolation[[int]][2,1],interpolation[[int]][2,2])[1])
xs <- interpolation[[int]][-1,1]
ys <- interpolation[[int]][-1,2]
#Calculate the bearing towards and away from each point
bear1 <- bearing(xs[1:(length(xs)-2)],ys[1:(length(xs)-2)],xs[2:(length(xs)-1)],ys[2:(length(xs)-1)])
bear2 <- bearing(xs[2:(length(xs)-1)],ys[2:(length(xs)-1)],xs[3:length(xs)],ys[3:length(xs)])
avbear<- atan2(mapply(sum,sin(bear1*(pi/180))+sin(bear2*(pi/180))),mapply(sum,cos(bear1*(pi/180))+cos(bear2*(pi/180))))*(180/pi)
#Take the average of the two
avbear<- c(avbear[1],avbear,avbear[length(avbear)])
#Calculate the destinated point taking a begin point, a bearing and a certain distance to travel
outpointr <- destFromBearing(xs,ys,(avbear+90+360)%%360,0.5)
outpointl <- destFromBearing(xs,ys,(avbear-90+360)%%360,0.5)
#Plot these lines
lines(outpointr[,1],outpointr[,2],col="red",lty=2)
lines(outpointl[,1],outpointl[,2],col="red",lty=2)
#Create polygons from it
for(i in 1:(nrow(outpointr)-1)){
polygon(x=c(outpointr[i,1],outpointl[i,1],outpointl[i+1,1],outpointr[i+1,1]),y=c(outpointr[i,2],outpointl[i,2],outpointl[i+1,2],outpointr[i+1,2]),col="black")
}
pols <- list()
for(i in 1:(nrow(outpointr)-1)){
pols[[i]] <- Polygon(cbind(c(outpointr[i,1],outpointl[i,1],outpointl[i+1,1],outpointr[i+1,1],outpointr[i,1]),
c(outpointr[i,2],outpointl[i,2],outpointl[i+1,2],outpointr[i+1,2],outpointr[i,2])))
}
polys <- list()
for(i in 1:10){
polys[[i]] <- Polygons(pols,ID=ac(i))
}
spPolys <- SpatialPolygons(polys)
plot(spPolys,col="black")
#Destination:
#This function takes a starting x,y position, a bearing and a distance to follow the initial bearing.
#To get this bearing, you have to compute the bearing from an towards the x,y position. +/- 90 degrees
#as this the most outer point taken from the point you are heading in. Then travel along that heading
#for a certain km's and you'll get to your endpoint.
|
#' Group Generic Functions for annmatrix Class
#'
#' The functions listed here work under the hood and are almost never called by the user.
#'
#' @param e1,e2 annmatrix objects.
#' @param x,y The objects being dispatched on by the group generic.
#' @param mx,my The methods found for objects 'x' and 'y'.
#' @param cl The call to the group generic.
#' @param reverse A logical value indicating whether 'x' and 'y' are reversed from the way they were supplied to the generic.
#'
#' @return An object of class 'annmatrix'.
#'
#' @author Karolis Koncevičius
#' @name groupgenerics
#' @export
Ops.annmatrix <- function(e1, e2) {
if (is.annmatrix(e1)) {
myclass <- setdiff(class(e1), "annmatrix")
pairclass <- oldClass(e2)
rann <- attr(e1, ".annmatrix.rann")
cann <- attr(e1, ".annmatrix.cann")
e1 <- as.matrix(e1)
} else if (is.annmatrix(e2)) {
myclass <- setdiff(class(e2), "annmatrix")
pairclass <- oldClass(e1)
rann <- attr(e2, ".annmatrix.rann")
cann <- attr(e2, ".annmatrix.cann")
e2 <- as.matrix(e2)
}
result <- callGeneric(e1, e2)
# Only return annmatrix if there is no specific method defined for this operations from the pair class
# With help from Mikael Jagan on Stack Overflow: https://stackoverflow.com/a/75953638/1953718
if (is.null(pairclass) ||
(all(is.na(match(paste0("Ops.", pairclass), .S3methods("Ops")))) &&
all(is.na(match(paste0(.Generic, ".", pairclass), .S3methods(.Generic)))))) {
result <- structure(result, class = c("annmatrix", myclass), .annmatrix.rann = rann, .annmatrix.cann = cann)
}
result
}
#' @rdname groupgenerics
#' @export
chooseOpsMethod.annmatrix <- function(x, y, mx, my, cl, reverse) {
TRUE
}
| /R/groupgenerics.r | no_license | karoliskoncevicius/annmatrix | R | false | false | 1,724 | r | #' Group Generic Functions for annmatrix Class
#'
#' The functions listed here work under the hood and are almost never called by the user.
#'
#' @param e1,e2 annmatrix objects.
#' @param x,y The objects being dispatched on by the group generic.
#' @param mx,my The methods found for objects 'x' and 'y'.
#' @param cl The call to the group generic.
#' @param reverse A logical value indicating whether 'x' and 'y' are reversed from the way they were supplied to the generic.
#'
#' @return An object of class 'annmatrix'.
#'
#' @author Karolis Koncevičius
#' @name groupgenerics
#' @export
Ops.annmatrix <- function(e1, e2) {
if (is.annmatrix(e1)) {
myclass <- setdiff(class(e1), "annmatrix")
pairclass <- oldClass(e2)
rann <- attr(e1, ".annmatrix.rann")
cann <- attr(e1, ".annmatrix.cann")
e1 <- as.matrix(e1)
} else if (is.annmatrix(e2)) {
myclass <- setdiff(class(e2), "annmatrix")
pairclass <- oldClass(e1)
rann <- attr(e2, ".annmatrix.rann")
cann <- attr(e2, ".annmatrix.cann")
e2 <- as.matrix(e2)
}
result <- callGeneric(e1, e2)
# Only return annmatrix if there is no specific method defined for this operations from the pair class
# With help from Mikael Jagan on Stack Overflow: https://stackoverflow.com/a/75953638/1953718
if (is.null(pairclass) ||
(all(is.na(match(paste0("Ops.", pairclass), .S3methods("Ops")))) &&
all(is.na(match(paste0(.Generic, ".", pairclass), .S3methods(.Generic)))))) {
result <- structure(result, class = c("annmatrix", myclass), .annmatrix.rann = rann, .annmatrix.cann = cann)
}
result
}
#' @rdname groupgenerics
#' @export
chooseOpsMethod.annmatrix <- function(x, y, mx, my, cl, reverse) {
TRUE
}
|
library(car)
library(pastecs)
library(WRS)
library(multcomp)
library(compute.es)
library(effects)
library(ggplot2)
library(dplyr)
df<- read.delim('/home/atrides/Desktop/R/statistics_with_R/11_GLM2_ANCOVA/Data_Files/ViagraCovariate.dat', header=TRUE)
head(df)
# summary basic means
by(df$libido ,df$dose, mean)
by(df$partnerLibido ,df$dose, mean)
is.factor(df$dose)
df$dose<- factor(df$dose, levels=c(1,2,3))
# boxplots
box<- ggplot(df, aes(dose, libido))+
geom_boxplot()+
scale_y_continuous(limits = c(0, 10))
box
# checking homogeneity of variances
leveneTest(df$libido, df$dose)
# also , could use Hartley F max test in addition , done in python notebook :)
# Checking assumption 1, independence of covariate and experimental manipulator
aov1<- aov(partnerLibido~dose, data=df)
summary(aov1)
# from summary we can see the relationship between groups and covariate is non-significant
# hence, our assumption is followed
m01<- aov(libido~dose+partnerLibido, data=df)
Anova(m01, type='III') # defaults to type="II"
# Planned Contrasts
con1<- c(-2,1, 1)
con2<- c(0, 1,-1)
contrasts(df$dose)<- cbind(con1, con2)
contrast_model<- aov(libido~ dose+partnerLibido, data=df)
Anova(contrast_model, type="III")
summary.lm(contrast_model)
# adjusting for the effect of covariate
adjustedMeans<- effect("dose", m01)
summary(adjustedMeans)
adjustedMeans$se
# Interpreting the covariate
dotplot<- ggplot(df, aes(partnerLibido, libido))+
geom_point()+
geom_smooth(method='lm')+
scale_y_continuous(breaks=pretty(df$libido,n=5))
dotplot
# post hoc test in Ancova
# we can use only the glht() function; the pairwise.t.test() function will not test the adjusted means.
postHocs<- glht(m01, linfct=mcp(dose='Tukey'))
summary(postHocs)
confint(postHocs)
# plots in ancova
plot(m01)
# Final Remarks
model_justAnova<- aov(libido~dose, data=df)
summary(model_justAnova)
# so , if we hadn't taken covariate in our calculation , the resulting
# summary would be incorrect and misleading
# Checking assumption of homegeniety of regression slopes
hoRS<- aov(libido~ dose*partnerLibido, data=df)
summary(hoRS)
# since the interaction term is significant , the assumption is broken
# also, we can plot this
hoRS_plot<- ggplot(df, aes(libido,partnerLibido))+
geom_point(color='black')
hoRS_plot
q1 <- ggplot() +
geom_smooth(data = filter(df, dose==1), aes(libido,partnerLibido, color = "blue"), method = "lm")
q2 <- ggplot() +
geom_smooth(data = filter(df, dose==2), aes(libido,partnerLibido, color = "orange"), method = "lm")
q3 <- ggplot() +
geom_smooth(data = filter(df, dose==3), aes(libido,partnerLibido, color = "green"), method = "lm")
hoRS_plot<- hoRS_plot+q1$layers[[1]]+q2$layers[[1]]+q3$layers[[1]]
hoRS_plot
# Effect Sizes
# partial R2
Anova(m01)
partial_R2_dose<- 25.185/(25.185+79.047)
partial_R2_partner<- 15.076/(15.076+79.047)
partial_R2_dose
partial_R2_dose
# R_Contrast
r_contrast<- function(t, dof){
cat("r : ",sqrt(t^2/(t^2+dof)))
}
summary.lm(contrast_model)
r_contrast(2.785, 26)
r_contrast(-0.541, 26)
r_contrast(2.227, 26)
# an alternative of getting effect size of contrasts,
# is to get all pairwise effect sizes
summary(adjustedMeans)
n<- c(9,8,13)
adjustedMeans$sd<- adjustedMeans$se*sqrt(n)
adjustedMeans$sd
# placebo-low
mes(2.92, 4.71, 1.79, 1.46, 9,8)
# high-low
mes(5.15, 4.71, 2.11, 1.46, 13,8)
# high-placebo
mes(5.15, 2.92, 2.11, 1.79, 13,9)
| /R/statistics_with_R/11_GLM2_ANCOVA/Script_Files/01_Ancova.R | permissive | snehilk1312/AppliedStatistics | R | false | false | 3,427 | r | library(car)
library(pastecs)
library(WRS)
library(multcomp)
library(compute.es)
library(effects)
library(ggplot2)
library(dplyr)
df<- read.delim('/home/atrides/Desktop/R/statistics_with_R/11_GLM2_ANCOVA/Data_Files/ViagraCovariate.dat', header=TRUE)
head(df)
# summary basic means
by(df$libido ,df$dose, mean)
by(df$partnerLibido ,df$dose, mean)
is.factor(df$dose)
df$dose<- factor(df$dose, levels=c(1,2,3))
# boxplots
box<- ggplot(df, aes(dose, libido))+
geom_boxplot()+
scale_y_continuous(limits = c(0, 10))
box
# checking homogeneity of variances
leveneTest(df$libido, df$dose)
# also , could use Hartley F max test in addition , done in python notebook :)
# Checking assumption 1, independence of covariate and experimental manipulator
aov1<- aov(partnerLibido~dose, data=df)
summary(aov1)
# from summary we can see the relationship between groups and covariate is non-significant
# hence, our assumption is followed
m01<- aov(libido~dose+partnerLibido, data=df)
Anova(m01, type='III') # defaults to type="II"
# Planned Contrasts
con1<- c(-2,1, 1)
con2<- c(0, 1,-1)
contrasts(df$dose)<- cbind(con1, con2)
contrast_model<- aov(libido~ dose+partnerLibido, data=df)
Anova(contrast_model, type="III")
summary.lm(contrast_model)
# adjusting for the effect of covariate
adjustedMeans<- effect("dose", m01)
summary(adjustedMeans)
adjustedMeans$se
# Interpreting the covariate
dotplot<- ggplot(df, aes(partnerLibido, libido))+
geom_point()+
geom_smooth(method='lm')+
scale_y_continuous(breaks=pretty(df$libido,n=5))
dotplot
# post hoc test in Ancova
# we can use only the glht() function; the pairwise.t.test() function will not test the adjusted means.
postHocs<- glht(m01, linfct=mcp(dose='Tukey'))
summary(postHocs)
confint(postHocs)
# plots in ancova
plot(m01)
# Final Remarks
model_justAnova<- aov(libido~dose, data=df)
summary(model_justAnova)
# so , if we hadn't taken covariate in our calculation , the resulting
# summary would be incorrect and misleading
# Checking assumption of homegeniety of regression slopes
hoRS<- aov(libido~ dose*partnerLibido, data=df)
summary(hoRS)
# since the interaction term is significant , the assumption is broken
# also, we can plot this
hoRS_plot<- ggplot(df, aes(libido,partnerLibido))+
geom_point(color='black')
hoRS_plot
q1 <- ggplot() +
geom_smooth(data = filter(df, dose==1), aes(libido,partnerLibido, color = "blue"), method = "lm")
q2 <- ggplot() +
geom_smooth(data = filter(df, dose==2), aes(libido,partnerLibido, color = "orange"), method = "lm")
q3 <- ggplot() +
geom_smooth(data = filter(df, dose==3), aes(libido,partnerLibido, color = "green"), method = "lm")
hoRS_plot<- hoRS_plot+q1$layers[[1]]+q2$layers[[1]]+q3$layers[[1]]
hoRS_plot
# Effect Sizes
# partial R2
Anova(m01)
partial_R2_dose<- 25.185/(25.185+79.047)
partial_R2_partner<- 15.076/(15.076+79.047)
partial_R2_dose
partial_R2_dose
# R_Contrast
r_contrast<- function(t, dof){
cat("r : ",sqrt(t^2/(t^2+dof)))
}
summary.lm(contrast_model)
r_contrast(2.785, 26)
r_contrast(-0.541, 26)
r_contrast(2.227, 26)
# an alternative of getting effect size of contrasts,
# is to get all pairwise effect sizes
summary(adjustedMeans)
n<- c(9,8,13)
adjustedMeans$sd<- adjustedMeans$se*sqrt(n)
adjustedMeans$sd
# placebo-low
mes(2.92, 4.71, 1.79, 1.46, 9,8)
# high-low
mes(5.15, 4.71, 2.11, 1.46, 13,8)
# high-placebo
mes(5.15, 2.92, 2.11, 1.79, 13,9)
|
setwd ("F:/Exploratory Data Analysis")
fileUrl<- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,destfile="F:/Exploratory Data Analysis/household_power_consumption.zip")
unzip(zipfile="./household_power_consumption.zip",exdir="./tempfile")
housepower <- read.table("F:/Exploratory Data Analysis/tempfile/household_power_consumption.txt",skip=1,sep=";")
names(housepower)<- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
plotdata<-subset(housepower,housepower$Date=="1/2/2007" | housepower$Date =="2/2/2007")
plotdata$Global_active_power <- as.numeric(as.character(plotdata$Global_active_power))
plotdata$datetime <-paste(plotdata$Date, plotdata$Time)
plotdata$Sub_metering_1 <- as.numeric(as.character(plotdata$Sub_metering_1))
plotdata$Sub_metering_2 <- as.numeric(as.character(plotdata$Sub_metering_2))
plotdata$Sub_metering_3 <- as.numeric(as.character(plotdata$Sub_metering_3))
plotdata$Voltage <- as.numeric(as.character(plotdata$Voltage))
par(mfcol = c(2,2))
plot(strptime(plotdata$datetime, "%d/%m/%Y %H:%M:%S"), plotdata$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power(kilowatts)")
plot(strptime(plotdata$datetime, "%d/%m/%Y %H:%M:%S"), plotdata$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(strptime(plotdata$datetime, "%d/%m/%Y %H:%M:%S"), plotdata$Sub_metering_2, type = "l", col = "red" )
lines(strptime(plotdata$datetime, "%d/%m/%Y %H:%M:%S"), plotdata$Sub_metering_3, type = "l", col = "blue" )
legend("topright", lty= 1, col = c("Black", "red", "blue"), legend = c( "Sub_meter_1", "Sub_meter_2", "Sub_meter_3"), cex=.65)
plot(strptime(plotdata$datetime, "%d/%m/%Y %H:%M:%S"), plotdata$Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
plot(strptime(plotdata$datetime, "%d/%m/%Y %H:%M:%S"), plotdata$Global_active_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
dev.copy(png, file = "plot4.png")
dev.off()
png(height=450, width=450 | /plot4.R | no_license | edfaynor/Explore-Graphs | R | false | false | 2,090 | r | setwd ("F:/Exploratory Data Analysis")
fileUrl<- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl,destfile="F:/Exploratory Data Analysis/household_power_consumption.zip")
unzip(zipfile="./household_power_consumption.zip",exdir="./tempfile")
housepower <- read.table("F:/Exploratory Data Analysis/tempfile/household_power_consumption.txt",skip=1,sep=";")
names(housepower)<- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
plotdata<-subset(housepower,housepower$Date=="1/2/2007" | housepower$Date =="2/2/2007")
plotdata$Global_active_power <- as.numeric(as.character(plotdata$Global_active_power))
plotdata$datetime <-paste(plotdata$Date, plotdata$Time)
plotdata$Sub_metering_1 <- as.numeric(as.character(plotdata$Sub_metering_1))
plotdata$Sub_metering_2 <- as.numeric(as.character(plotdata$Sub_metering_2))
plotdata$Sub_metering_3 <- as.numeric(as.character(plotdata$Sub_metering_3))
plotdata$Voltage <- as.numeric(as.character(plotdata$Voltage))
par(mfcol = c(2,2))
plot(strptime(plotdata$datetime, "%d/%m/%Y %H:%M:%S"), plotdata$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power(kilowatts)")
plot(strptime(plotdata$datetime, "%d/%m/%Y %H:%M:%S"), plotdata$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(strptime(plotdata$datetime, "%d/%m/%Y %H:%M:%S"), plotdata$Sub_metering_2, type = "l", col = "red" )
lines(strptime(plotdata$datetime, "%d/%m/%Y %H:%M:%S"), plotdata$Sub_metering_3, type = "l", col = "blue" )
legend("topright", lty= 1, col = c("Black", "red", "blue"), legend = c( "Sub_meter_1", "Sub_meter_2", "Sub_meter_3"), cex=.65)
plot(strptime(plotdata$datetime, "%d/%m/%Y %H:%M:%S"), plotdata$Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
plot(strptime(plotdata$datetime, "%d/%m/%Y %H:%M:%S"), plotdata$Global_active_power, type = "l", xlab = "datetime", ylab = "Global_reactive_power")
dev.copy(png, file = "plot4.png")
dev.off()
png(height=450, width=450 |
#india Corona
pacman::p_load(ggplot2, dplyr, rvest, xml2, gridExtra, reshape2,wesanderson)
#india gov site-----
(caption2 = paste('Compiled from https://www.mohfw.gov.in/', ' @Dhiraj ', ' :Status on', Sys.time()))
indcorona <- xml2::read_html("https://www.mohfw.gov.in/")
#table1 - today-----
#table no changed----
indcovid <- indcorona %>% html_nodes("table") %>% .[[1]] %>% html_table()
head(indcovid)
tail(indcovid)
dim(indcovid)
tail(indcovid,2)
indcovid1 <- indcovid %>% slice(1 : 1:(n()-2))
indcovid1
tail(indcovid1)
newcolsIndia = c('ser','state', 'Confirmed','Recovered','Death')
var = c('Indians', 'Foreign', 'recoveredAll', 'death')
head(indcovid1)
names(indcovid1) = newcolsIndia
names(indcovid1)
head(indcovid1)
indcovid1$state = factor(indcovid1$state)
indcovid1B <- indcovid1 %>% select(-'ser') %>% filter(!grepl('India', state) | !grepl('Total', state))
#indcovid1B$compileDate = Sys.Date()
indcovid1Melt1 <- indcovid1B %>% melt(id.vars='state')
head(indcovid1Melt1)
table(indcovid1Melt1$state)
table(indcovid1Melt1$variable)
#indcovid1Melt1$variable = factor(indcovid1Melt1$variable, levels=c('Ind','For','Rec','Death'), labels= c('Indians', 'Foreign', 'RecoveredAll', 'Death'))
str(indcovid1Melt1)
indcovid1Melt1$value = as.integer(indcovid1Melt1$value)
#+ scale_fill_discrete(name='status', labels=var)
gbarIndia1A <- ggplot(indcovid1Melt1, aes(x=variable, y=value, fill=variable)) + geom_bar(stat='identity', position=position_dodge2(.7), width=.7) + facet_wrap(state ~., scale='free') + theme(axis.text.x = element_text(angle=0, size=rel(.2)), legend.position = 'top', plot.title = element_text(hjust = 0.5, color = "#666666"), axis.text.y = element_text(size=rel(.7))) + geom_text(aes(label=value, y=value), position=position_dodge2(.7), size=rel(2.5), vjust=0) + labs(title=paste('gbarIndia1A:', ' Corona Status', 'My Country(India) : Free Scaling'), caption = caption2, x='State/Cases', y='Numbers') + expand_limits(y = 0) + scale_y_continuous(name = "Numbers", breaks=c(5,10,15,20,50))
gbarIndia1A
str(indcovid1Melt1)
indcovid1Melt1$value = as.integer(indcovid1Melt1$value)
gbarIndia1B <- ggplot(indcovid1Melt1, aes(x=state, y=value, fill=state)) + geom_bar(stat='identity', position=position_dodge2(.7), width=.7) + facet_grid(variable ~., scale='free') + theme(legend.position = 'top', plot.title = element_text(hjust = 0.5, color = "#666666"),axis.text.x = element_text(angle=90, size=rel(.9)), axis.text.y = element_text(size=rel(.7))) + geom_text(aes(label=value, y=value), size=rel(2.5), nudge_y = 0, nudge_x = 0.1, lineheight = 0.9) + scale_fill_discrete(name='status', labels=var) + labs(title=paste('gbarIndia1B:', ' Corona Status', 'My Country(India) : Free Scaling'), caption = caption2, x='State/Cases', y='Numbers') + expand_limits(y = 0) + scale_y_continuous(name = "Numbers", breaks=c(5,10,15,20,50)) + guides(fill=F)
gbarIndia1B
str(indcovid1Melt1)
#var = c('Indians', 'Foreign', 'recoveredAll', 'death')
variable_names <- list('Ind'='Indian', 'For'='Foreign','Rec'='Recovered','Death'='Deaths')
variable_labeller <- function(variable, value){
return(variable_names[value])
}
str(indcovid1Melt1)
gbarIndia2 <- ggplot(indcovid1Melt1, aes(x=state, y=value, fill=state)) + geom_bar(stat='identity', position=position_dodge2(.7)) + theme(legend.position = 'top', plot.title = element_text(hjust = 0.5, color = "#666666")) + geom_text(aes(label=value, y=value), position=position_dodge2(.7), size=rel(2.5))+ labs(title=paste('gbarIndia2: ', 'Corona Status', 'My Country(India) : Free Scaling'), caption = caption2, x='State/Cases', y='Numbers') + guides(fill=F) + coord_flip() + facet_grid(. ~ variable , scale='free')
#+ facet_grid(. ~ variable , scale='free', labeller=variable_labeller)
gbarIndia2
indcovid1
gbarIndia2
gbarIndia3 <- ggplot(indcovid1Melt1, aes(x=state, y=value, fill=state)) + geom_bar(stat='identity', position=position_dodge2(.7)) + theme(legend.position = 'top', plot.title = element_text(hjust = 0.5, color = "#666666")) + geom_text(aes(label=value, y=value), position=position_dodge2(.7), size=rel(2.5))+ labs(title=paste('gbarIndia3: ', 'Corona Status', 'My Country(India) : Free Scaling'), caption =caption2, x='State/Cases', y='Numbers') + guides(fill=F) + coord_flip() + facet_grid(. ~ variable , scale='free')
#+ facet_grid(. ~ variable , scale='free', labeller=variable_labeller)
gbarIndia3
#all graphs -----
gbarIndia2
gbarIndia1
gbarIndia3 #use this instead of gbarIndia2
#pie
table(indcovid1Melt1$variable)
str(indcovid1Melt1)
indcovid1Melt1 %>% filter(variable=='Ind') %>% select(state, value) %>% mutate(level = ifelse(value >5, 'G10', 'L10')) %>% group_by(level) %>% summarise(n=length(level)) %>% ggpubr::ggpie(., "n", label = 'level', lab.pos = 'in', fill = "level", color='white') + theme_classic() + guides(fill=F)
#scale_fill_gradient(low="blue", high="red")
#+ scale_fill_grey(start=0.8, end=0.2)
write.csv(indcovid1B,paste('E:/data/indcovid',Sys.Date(),'.csv'), na='', row.names=F)
| /corona/coronaINDIA.R | no_license | loxavia/rphd | R | false | false | 4,986 | r | #india Corona
pacman::p_load(ggplot2, dplyr, rvest, xml2, gridExtra, reshape2,wesanderson)
#india gov site-----
(caption2 = paste('Compiled from https://www.mohfw.gov.in/', ' @Dhiraj ', ' :Status on', Sys.time()))
indcorona <- xml2::read_html("https://www.mohfw.gov.in/")
#table1 - today-----
#table no changed----
indcovid <- indcorona %>% html_nodes("table") %>% .[[1]] %>% html_table()
head(indcovid)
tail(indcovid)
dim(indcovid)
tail(indcovid,2)
indcovid1 <- indcovid %>% slice(1 : 1:(n()-2))
indcovid1
tail(indcovid1)
newcolsIndia = c('ser','state', 'Confirmed','Recovered','Death')
var = c('Indians', 'Foreign', 'recoveredAll', 'death')
head(indcovid1)
names(indcovid1) = newcolsIndia
names(indcovid1)
head(indcovid1)
indcovid1$state = factor(indcovid1$state)
indcovid1B <- indcovid1 %>% select(-'ser') %>% filter(!grepl('India', state) | !grepl('Total', state))
#indcovid1B$compileDate = Sys.Date()
indcovid1Melt1 <- indcovid1B %>% melt(id.vars='state')
head(indcovid1Melt1)
table(indcovid1Melt1$state)
table(indcovid1Melt1$variable)
#indcovid1Melt1$variable = factor(indcovid1Melt1$variable, levels=c('Ind','For','Rec','Death'), labels= c('Indians', 'Foreign', 'RecoveredAll', 'Death'))
str(indcovid1Melt1)
indcovid1Melt1$value = as.integer(indcovid1Melt1$value)
#+ scale_fill_discrete(name='status', labels=var)
gbarIndia1A <- ggplot(indcovid1Melt1, aes(x=variable, y=value, fill=variable)) + geom_bar(stat='identity', position=position_dodge2(.7), width=.7) + facet_wrap(state ~., scale='free') + theme(axis.text.x = element_text(angle=0, size=rel(.2)), legend.position = 'top', plot.title = element_text(hjust = 0.5, color = "#666666"), axis.text.y = element_text(size=rel(.7))) + geom_text(aes(label=value, y=value), position=position_dodge2(.7), size=rel(2.5), vjust=0) + labs(title=paste('gbarIndia1A:', ' Corona Status', 'My Country(India) : Free Scaling'), caption = caption2, x='State/Cases', y='Numbers') + expand_limits(y = 0) + scale_y_continuous(name = "Numbers", breaks=c(5,10,15,20,50))
gbarIndia1A
str(indcovid1Melt1)
indcovid1Melt1$value = as.integer(indcovid1Melt1$value)
gbarIndia1B <- ggplot(indcovid1Melt1, aes(x=state, y=value, fill=state)) + geom_bar(stat='identity', position=position_dodge2(.7), width=.7) + facet_grid(variable ~., scale='free') + theme(legend.position = 'top', plot.title = element_text(hjust = 0.5, color = "#666666"),axis.text.x = element_text(angle=90, size=rel(.9)), axis.text.y = element_text(size=rel(.7))) + geom_text(aes(label=value, y=value), size=rel(2.5), nudge_y = 0, nudge_x = 0.1, lineheight = 0.9) + scale_fill_discrete(name='status', labels=var) + labs(title=paste('gbarIndia1B:', ' Corona Status', 'My Country(India) : Free Scaling'), caption = caption2, x='State/Cases', y='Numbers') + expand_limits(y = 0) + scale_y_continuous(name = "Numbers", breaks=c(5,10,15,20,50)) + guides(fill=F)
gbarIndia1B
str(indcovid1Melt1)
#var = c('Indians', 'Foreign', 'recoveredAll', 'death')
variable_names <- list('Ind'='Indian', 'For'='Foreign','Rec'='Recovered','Death'='Deaths')
variable_labeller <- function(variable, value){
return(variable_names[value])
}
str(indcovid1Melt1)
gbarIndia2 <- ggplot(indcovid1Melt1, aes(x=state, y=value, fill=state)) + geom_bar(stat='identity', position=position_dodge2(.7)) + theme(legend.position = 'top', plot.title = element_text(hjust = 0.5, color = "#666666")) + geom_text(aes(label=value, y=value), position=position_dodge2(.7), size=rel(2.5))+ labs(title=paste('gbarIndia2: ', 'Corona Status', 'My Country(India) : Free Scaling'), caption = caption2, x='State/Cases', y='Numbers') + guides(fill=F) + coord_flip() + facet_grid(. ~ variable , scale='free')
#+ facet_grid(. ~ variable , scale='free', labeller=variable_labeller)
gbarIndia2
indcovid1
gbarIndia2
gbarIndia3 <- ggplot(indcovid1Melt1, aes(x=state, y=value, fill=state)) + geom_bar(stat='identity', position=position_dodge2(.7)) + theme(legend.position = 'top', plot.title = element_text(hjust = 0.5, color = "#666666")) + geom_text(aes(label=value, y=value), position=position_dodge2(.7), size=rel(2.5))+ labs(title=paste('gbarIndia3: ', 'Corona Status', 'My Country(India) : Free Scaling'), caption =caption2, x='State/Cases', y='Numbers') + guides(fill=F) + coord_flip() + facet_grid(. ~ variable , scale='free')
#+ facet_grid(. ~ variable , scale='free', labeller=variable_labeller)
gbarIndia3
#all graphs -----
gbarIndia2
gbarIndia1
gbarIndia3 #use this instead of gbarIndia2
#pie
table(indcovid1Melt1$variable)
str(indcovid1Melt1)
indcovid1Melt1 %>% filter(variable=='Ind') %>% select(state, value) %>% mutate(level = ifelse(value >5, 'G10', 'L10')) %>% group_by(level) %>% summarise(n=length(level)) %>% ggpubr::ggpie(., "n", label = 'level', lab.pos = 'in', fill = "level", color='white') + theme_classic() + guides(fill=F)
#scale_fill_gradient(low="blue", high="red")
#+ scale_fill_grey(start=0.8, end=0.2)
write.csv(indcovid1B,paste('E:/data/indcovid',Sys.Date(),'.csv'), na='', row.names=F)
|
### April 2016
## match list of SNPs for read depth, allele freq and chromosome
## Can match on recombination rate
## (only for mel, also matching on inversion status)
## USAGE: Rscript ../match_SNPs/match_snps_dp_ch_SNPbySNP_recomb.R means_dpfreq.2L.Rdata mel_2L.bootstrap_fmean_dp.txt 100
## changed to only take one file
# 1) number of matched sets to produce
# 2) the Rdata object with the mean and dps
# 2) the output filename
# 3) the file to match
### OUTPUT:
# 1) chrom (focal SNP)
# 2) pos (focal SNP)
# 3) pos of matched SNP (one column per matched set)
## must have run export R_LIBS=/hsgs/projects/petrov/hmachado/software/R_libs/
## install packages like this:
#install.packages("foreach", repos="http://cran.r-project.org", lib="/hsgs/projects/petrov/hmachado/software/R_libs/")
#export R_LIBS=/home/hmachado/R/x86_64-unknown-linux-gnu-library/
#install.packages("foreach", repos="http://cran.r-project.org", lib="/home/hmachado/R/x86_64-unknown-linux-gnu-library/")
#install.packages("doParallel", repos="http://cran.r-project.org", lib="/home/hmachado/R/x86_64-unknown-linux-gnu-library/")
#install.packages("data.table", repos="http://cran.r-project.org", lib="/home/hmachado/R/x86_64-unknown-linux-gnu-library/")
#install.packages("doMC", repos="http://cran.r-project.org", lib="/home/hmachado/R/x86_64-unknown-linux-gnu-library/")
library(foreach)
library(iterators)
library(data.table)
library(doMC)
library(doParallel)
cl <- makeCluster(1)
registerDoParallel(cl)
args <- commandArgs(trailingOnly = TRUE)
#args = c("means_dpfreq_otherFreqBin.X.Rdata", "bootstrap_otherfmean_dp.mel_X.txt", 100)
load(args[1])
snpinfo = means_dpfreq
n = as.numeric(args[3])
fileout = args[2]
snpinfo$RRbin = trunc(snpinfo$RR) #### only if including recombination rate
snpinfo$RRbin[snpinfo$RRbin>5] = 6 #### 7 bins of recombination rate
snpinfo$chrom = as.character(snpinfo[,1])
snpinfo$pos = as.numeric(snpinfo[,2])
inv <- data.frame(#inv = c("In2Lt", "In2RNS", "In3RK", "In3RMo", "In3RP", "In3LP", "InXA", "InXBe"), # combined the overlapping inversions on 3R and X
chr=c("2L", "2R", "3R", "3L", "X" ),
prox = c(2225744,11278659,7576289,3173046,13519769),
dist = c(13154180,16163839,24857019,16301941,19487744))
if (length(args)==4){
focalA = read.table(args[4], stringsAsFactors=FALSE, header=FALSE) # read in file of SNPs that need to be matched to a control. File contains header
focal = na.omit(focalA[,1:2]) ## there should be no NA's for chrom or pos
focalinfo = merge(snpinfo, focal, by=c(1,2) )
} else if (length(args)==3){
focalinfo = snpinfo
} else warning("input arguments not of length 3 or 4")
snpinfo = focalinfo
registerDoMC(12)
out1 = foreach(s=1:nrow(focalinfo), .combine=rbind) %dopar% {
# can only match major chromosomal arms, so exlude other arms
focalsnp = focalinfo[s,]
if ( (focalsnp[1] %in% inv$chr) == FALSE ){ # if the chromosome is not among 2L 2R 3L 3R or X, skip
return()
}
# same chrom, dp quantile, freq quantile, and rec rate
potentialmatchesa = snpinfo[ which(as.character(snpinfo$chrom)==as.character(focalsnp$chrom) & as.numeric(snpinfo$NeQuant10)==as.numeric(focalsnp$NeQuant10) & as.numeric(snpinfo$FfreqQuant10)==as.numeric(focalsnp$FfreqQuant10) & as.numeric(snpinfo$RRbin)==as.numeric(focalsnp$RRbin) ),]
### exclude the focal snp
potentialmatches = potentialmatchesa[ which(as.numeric(potentialmatchesa[,2]) != as.numeric(focalsnp[2]) ),]
# extract the inversion coordinates
invCh = inv[inv[,1]==focalsnp$chrom, ]
start = as.numeric(invCh[2]) # inversion start
end = as.numeric(invCh[3]) # inversion end
# if focal SNP is in the inversion, use inversion SNPs, if not, use SNPs before or after inversion
if (focalsnp$pos >= start & focalsnp$pos <= end){
potentialmatches2 = potentialmatches[ which(potentialmatches$pos >= start & potentialmatches$pos <= end), ]
} else potentialmatches2 = potentialmatches[ which(potentialmatches$pos < start | potentialmatches$pos > end), ]
if (nrow(potentialmatches2) < n/10){ # there has to be at least 10 different matches if doing 100 sets
bychromSNP = c(focalsnp[1], rep(NA, times=n) ) } else {
bychromSNP = c(focalsnp[1:2], sample(potentialmatches2[,2], size=n, replace=TRUE) )
}
unlist(bychromSNP)
}
write.table(out1, file=fileout, quote=FALSE, row.names=FALSE, col.names=FALSE)
#stopCluster(cl)
| /create_input_files/match_snps_dp_ch_SNPbySNP_recomb_chromposFilter.R | no_license | machadoheather/dmel_seasonal_RTEC | R | false | false | 4,495 | r | ### April 2016
## match list of SNPs for read depth, allele freq and chromosome
## Can match on recombination rate
## (only for mel, also matching on inversion status)
## USAGE: Rscript ../match_SNPs/match_snps_dp_ch_SNPbySNP_recomb.R means_dpfreq.2L.Rdata mel_2L.bootstrap_fmean_dp.txt 100
## changed to only take one file
# 1) number of matched sets to produce
# 2) the Rdata object with the mean and dps
# 2) the output filename
# 3) the file to match
### OUTPUT:
# 1) chrom (focal SNP)
# 2) pos (focal SNP)
# 3) pos of matched SNP (one column per matched set)
## must have run export R_LIBS=/hsgs/projects/petrov/hmachado/software/R_libs/
## install packages like this:
#install.packages("foreach", repos="http://cran.r-project.org", lib="/hsgs/projects/petrov/hmachado/software/R_libs/")
#export R_LIBS=/home/hmachado/R/x86_64-unknown-linux-gnu-library/
#install.packages("foreach", repos="http://cran.r-project.org", lib="/home/hmachado/R/x86_64-unknown-linux-gnu-library/")
#install.packages("doParallel", repos="http://cran.r-project.org", lib="/home/hmachado/R/x86_64-unknown-linux-gnu-library/")
#install.packages("data.table", repos="http://cran.r-project.org", lib="/home/hmachado/R/x86_64-unknown-linux-gnu-library/")
#install.packages("doMC", repos="http://cran.r-project.org", lib="/home/hmachado/R/x86_64-unknown-linux-gnu-library/")
library(foreach)
library(iterators)
library(data.table)
library(doMC)
library(doParallel)
cl <- makeCluster(1)
registerDoParallel(cl)
args <- commandArgs(trailingOnly = TRUE)
#args = c("means_dpfreq_otherFreqBin.X.Rdata", "bootstrap_otherfmean_dp.mel_X.txt", 100)
load(args[1])
snpinfo = means_dpfreq
n = as.numeric(args[3])
fileout = args[2]
snpinfo$RRbin = trunc(snpinfo$RR) #### only if including recombination rate
snpinfo$RRbin[snpinfo$RRbin>5] = 6 #### 7 bins of recombination rate
snpinfo$chrom = as.character(snpinfo[,1])
snpinfo$pos = as.numeric(snpinfo[,2])
inv <- data.frame(#inv = c("In2Lt", "In2RNS", "In3RK", "In3RMo", "In3RP", "In3LP", "InXA", "InXBe"), # combined the overlapping inversions on 3R and X
chr=c("2L", "2R", "3R", "3L", "X" ),
prox = c(2225744,11278659,7576289,3173046,13519769),
dist = c(13154180,16163839,24857019,16301941,19487744))
if (length(args)==4){
focalA = read.table(args[4], stringsAsFactors=FALSE, header=FALSE) # read in file of SNPs that need to be matched to a control. File contains header
focal = na.omit(focalA[,1:2]) ## there should be no NA's for chrom or pos
focalinfo = merge(snpinfo, focal, by=c(1,2) )
} else if (length(args)==3){
focalinfo = snpinfo
} else warning("input arguments not of length 3 or 4")
snpinfo = focalinfo
registerDoMC(12)
out1 = foreach(s=1:nrow(focalinfo), .combine=rbind) %dopar% {
# can only match major chromosomal arms, so exlude other arms
focalsnp = focalinfo[s,]
if ( (focalsnp[1] %in% inv$chr) == FALSE ){ # if the chromosome is not among 2L 2R 3L 3R or X, skip
return()
}
# same chrom, dp quantile, freq quantile, and rec rate
potentialmatchesa = snpinfo[ which(as.character(snpinfo$chrom)==as.character(focalsnp$chrom) & as.numeric(snpinfo$NeQuant10)==as.numeric(focalsnp$NeQuant10) & as.numeric(snpinfo$FfreqQuant10)==as.numeric(focalsnp$FfreqQuant10) & as.numeric(snpinfo$RRbin)==as.numeric(focalsnp$RRbin) ),]
### exclude the focal snp
potentialmatches = potentialmatchesa[ which(as.numeric(potentialmatchesa[,2]) != as.numeric(focalsnp[2]) ),]
# extract the inversion coordinates
invCh = inv[inv[,1]==focalsnp$chrom, ]
start = as.numeric(invCh[2]) # inversion start
end = as.numeric(invCh[3]) # inversion end
# if focal SNP is in the inversion, use inversion SNPs, if not, use SNPs before or after inversion
if (focalsnp$pos >= start & focalsnp$pos <= end){
potentialmatches2 = potentialmatches[ which(potentialmatches$pos >= start & potentialmatches$pos <= end), ]
} else potentialmatches2 = potentialmatches[ which(potentialmatches$pos < start | potentialmatches$pos > end), ]
if (nrow(potentialmatches2) < n/10){ # there has to be at least 10 different matches if doing 100 sets
bychromSNP = c(focalsnp[1], rep(NA, times=n) ) } else {
bychromSNP = c(focalsnp[1:2], sample(potentialmatches2[,2], size=n, replace=TRUE) )
}
unlist(bychromSNP)
}
write.table(out1, file=fileout, quote=FALSE, row.names=FALSE, col.names=FALSE)
#stopCluster(cl)
|
library(phyloseq)
library(paleotree)
library(picante)
library(phytools)
library(MCMCglmm)
library(reshape2)
library(RColorBrewer)
library(ggplot2)
library(MASS)
tissue.fams <- c('c__Chloroplast','f__Flammeovirgaceae', 'f__[Amoebophilaceae]', 'f__Cryomorphaceae', 'f__Flavobacteriaceae', 'f__Hyphomicrobiaceae', 'f__Methylobacteriaceae', 'f__Phyllobacteriaceae', 'f__Rhodobacteraceae', 'f__Rhodospirillaceae', 'f__Pelagibacteraceae', 'f__Alteromonadaceae', 'f__OM60', 'f__Endozoicimonaceae', 'f__Moraxellaceae', 'f__Piscirickettsiaceae', 'f__Vibrionaceae', 'Unassigned', 'c__Alphaproteobacteria', 'o__Kiloniellales')
skeleton.fams <- c('c__Chloroplast','f__Flammeovirgaceae', 'f__[Amoebophilaceae]', 'f__Flavobacteriaceae', 'f__Clostridiaceae', 'f__Pirellulaceae', 'f__Hyphomicrobiaceae', 'f__Methylobacteriaceae', 'f__Phyllobacteriaceae', 'f__Rhodobacteraceae', 'f__Rhodospirillaceae', 'f__Alteromonadaceae', 'f__Endozoicimonaceae', 'f__Piscirickettsiaceae', 'f__Spirochaetaceae', 'Unassigned', 'c__Alphaproteobacteria', 'o__Myxococcales')
mucus.fams <- c('c__Chloroplast','f__Flammeovirgaceae', 'f__Cryomorphaceae', 'f__Flavobacteriaceae', 'f__Synechococcaceae', 'f__Methylobacteriaceae', 'f__Rhodobacteraceae', 'f__Pelagibacteraceae', 'f__Alteromonadaceae', 'f__OM60', 'f__Endozoicimonaceae', 'f__Halomonadaceae', 'f__Moraxellaceae', 'f__Piscirickettsiaceae', 'f__Pseudoalteromonadaceae', 'Unassigned', 'c__Alphaproteobacteria')
famlist <- list(T=tissue.fams,S=skeleton.fams,M=mucus.fams)
compartments <- list(T='tissue',S='skeleton',M='mucus')
map <- import_qiime_sample_data('/Users/Ryan/Dropbox/Selectively_Shared_Vega_Lab_Stuff/GCMP/Projects/Australia_Coevolution_Paper/16S_analysis/1_canonical_starting_files/gcmp16S_map_r22_with_mitochondrial_data.txt')
map[map=='Unknown'] <- NA
biom_object <- import_biom('/Users/Ryan/Dropbox/Selectively_Shared_Vega_Lab_Stuff/GCMP/Projects/Australia_Coevolution_Paper/16S_analysis/4_coevolution/output/MED_otu_table.biom')
colnames(tax_table(biom_object)) <- c('Kingdom','Phylum','Class','Order','Family','Genus','Species')
otu_data_full <- merge_phyloseq(biom_object,map)
otu_data_pruned <- prune_samples(sample_sums(otu_data_full) >= 1000, otu_data_full)
otu_data <- subset_samples(otu_data_pruned, !is.na(colony_name))
rm(list=c('biom_object','otu_data_full','otu_data_pruned'))
gc()
hosttree <- read.tree('/Users/Ryan/Dropbox/Selectively_Shared_Vega_Lab_Stuff/GCMP/Projects/Australia_Coevolution_Paper/16S_analysis/1_canonical_starting_files/host_tree_from_step_11.newick')
for(compart in c('T','S','M')) {
comp.pruned <- subset_samples(otu_data, tissue_compartment==compart)
for(taxon in famlist[[compart]]) {
dir.create(paste0('/Volumes/Moorea/4-coevolution/coevolution/',compartments[[compart]],'/',taxon,'/'), recursive=T)
tre <- read.nexus(paste0('/Volumes/Moorea/acoev/output/filtered/',compartments[[compart]],'/',taxon,'/beast/',taxon,'_final_tree.tree'))
taxon_data <- merge_phyloseq(comp.pruned,tre)
sample_data(taxon_data)$sample_sum <- sample_sums(taxon_data)
n.pruned <- prune_samples(sample_sums(taxon_data) >= 10, taxon_data)
pruned.hosttree <- drop.tip(hosttree,hosttree$tip.label[!hosttree$tip.label %in% sample_data(n.pruned)$X16S_tree_name])
sample_data(n.pruned)$X16S_tree_name[!sample_data(n.pruned)$X16S_tree_name %in% pruned.hosttree$tip.label] <- NA
sample_data(n.pruned)$X16S_tree_name <- droplevels(sample_data(n.pruned)$X16S_tree_name)
c.pruned <- prune_samples(!is.na(sample_data(n.pruned)$X16S_tree_name), n.pruned)
pruned <- filter_taxa(c.pruned, function(x) any(x>0),TRUE)
otutable <- as.matrix(as.data.frame(otu_table(pruned)))
assocs <- melt(otutable,as.is=T)
assocs <- data.frame(count=assocs$value,otu=assocs$Var1,sample=assocs$Var2)
assocs <- merge(assocs,sample_data(pruned)[,c('X16S_tree_name','geographic_area','sample_sum','colony_name')],by.x='sample',by.y=0,all=F)
inv.host.full <- inverseA(pruned.hosttree)
inv.host <- inv.host.full$Ainv
host.ancests <- vector()
for(tip in pruned.hosttree$tip.label) {
temp <- list()
check <- 1
counter <- tip
while(check==1) {
temp[counter] <- inv.host.full$pedigree[inv.host.full$pedigree[,'node.names']==counter,][[2]]
counter <- temp[[length(temp)]]
if(is.na(inv.host.full$pedigree[inv.host.full$pedigree[,'node.names']==counter,][[2]])) {check <- 0}
}
host.ancests[tip] <- paste(temp, collapse=',')
}
pedigree_hosts <- unique(merge(as(map,'data.frame')[,c('X16S_tree_name','field_host_name')],host.ancests,by.x='X16S_tree_name',by.y=0))
write.table(pedigree_hosts,file=paste0('/Volumes/Moorea/4-coevolution/coevolution/',compartments[[compart]],'/',taxon,'/',taxon,'_host_pedigree.txt'),sep='\t',quote=F,row.names=F)
pruned.bacttree <- phy_tree(pruned)
pruned.bacttree$node.label <- NULL
inv.bact.full <- inverseA(pruned.bacttree)
inv.bact <- inv.bact.full$Ainv
bact.ancests <- vector()
for(tip in pruned.bacttree$tip.label) {
temp <- list()
check <- 1
counter <- tip
while(check==1) {
temp[counter] <- inv.bact.full$pedigree[inv.bact.full$pedigree[,'node.names']==counter,][[2]]
counter <- temp[[length(temp)]]
if(is.na(inv.bact.full$pedigree[inv.bact.full$pedigree[,'node.names']==counter,][[2]])) {check <- 0}
}
bact.ancests[tip] <- paste(temp, collapse=',')
}
pedigree_bacts <- unique(merge(as(tax_table(pruned),'matrix'),bact.ancests,by=0))
write.table(pedigree_bacts,file=paste0('/Volumes/Moorea/4-coevolution/coevolution/',compartments[[compart]],'/',taxon,'/',taxon,'_bact_pedigree.txt'),sep='\t',quote=F,row.names=F)
host.otuA<-as(kronecker(inv.host, inv.bact), "dgCMatrix") # coevolutionary effect
host.otuAS<-as(kronecker(inv.host, Diagonal(nrow(inv.bact))), "dgCMatrix") # host evolutionary effect
host.otuSA<-as(kronecker(Diagonal(nrow(inv.host)), inv.bact), "dgCMatrix") # parasite evolutionary effect
rownames(host.otuA)<-apply(expand.grid(rownames(inv.bact), rownames(inv.host)), 1, function(x){paste(x[2],x[1], sep=".")})
rownames(host.otuAS)<-rownames(host.otuSA)<-rownames(host.otuA)
##assocs$otu # non-phylogenetic main effect for bacteria
##assocs$X16S_tree_name # non-phylogenetic main effect for hosts
assocs$otu.phy<-assocs$otu # phylogenetic main effect for bacteria
assocs$X16S_tree_name.phy<-assocs$X16S_tree_name # phylogenetic main effect for hosts
assocs$Host.otu<-paste(assocs$X16S_tree_name, assocs$otu, sep=".") # non-phylogentic interaction effect
assocs$Host.otu[is.na(assocs$X16S_tree_name)] <- NA
assocs$Host.otu.cophy<-paste(assocs$X16S_tree_name, assocs$otu, sep=".") # phylogentic coevolution effect
assocs$Host.otu.cophy[is.na(assocs$X16S_tree_name)] <- NA
assocs$Host.otu.hostphy<-paste(assocs$X16S_tree_name, assocs$otu, sep=".") # phylogentic host evolutionary effect (specifies whether abundance is determined by an interaction between non-phylogenetic otu and the phylogenetic position of the host)
assocs$Host.otu.hostphy[is.na(assocs$X16S_tree_name)] <- NA
assocs$Host.otu.otuphy<-paste(assocs$X16S_tree_name, assocs$otu, sep=".") # phylogentic parasite evolutionary effect (specifies whether abundance is determined by an interaction between non-phylogenetic host species and the phylogenetic position of the otu)
assocs$Host.otu.otuphy[is.na(assocs$X16S_tree_name)] <- NA
assocs$colony.otu.phy <- paste(assocs$colony_name, assocs$otu, sep=".")
assocs$geo.otu <- paste(assocs$geographic_area, assocs$otu, sep=".")
otu.colonySA <- as(kronecker(Diagonal(length(unique(assocs$colony_name[!is.na(assocs$colony_name)]))), inv.bact), "dgCMatrix")
rownames(otu.colonySA)<-apply(expand.grid(rownames(inv.bact), unique(assocs$colony_name[!is.na(assocs$colony_name)])), 1, function(x){paste(x[2],x[1], sep=".")})
randfacts <- c('otu.phy','otu','geo.otu','Host.otu.hostphy','Host.otu.otuphy','Host.otu','Host.otu.cophy')
rand <- as.formula(paste0('~ ',paste(randfacts, collapse=' + ')))
priorC <- list(B=list(mu=c(0,1), V=diag(c(1e+8,1e-6))), R=list(V=1, nu=0))
## priors for the random evolutionary effects (from Hadfield):
phypri<-lapply(1:length(randfacts), function(x){list(V=1, nu=1, alpha.mu=0, alpha.V=1000)})
## combine priors:
priorC$G<-phypri
names(priorC$G)<-paste("G", 1:length(randfacts), sep="")
save.image(file=paste0('/Volumes/Moorea/4-coevolution/coevolution/',compartments[[compart]],'/',taxon,'/',taxon,'_mcmc_setup.RData'))
}
} | /4_coevolution/procedure/5a_setup_mcmcglmm_coev_fixed_rel.r | no_license | xushifen/GCMP_Australia_Coevolution | R | false | false | 9,162 | r | library(phyloseq)
library(paleotree)
library(picante)
library(phytools)
library(MCMCglmm)
library(reshape2)
library(RColorBrewer)
library(ggplot2)
library(MASS)
tissue.fams <- c('c__Chloroplast','f__Flammeovirgaceae', 'f__[Amoebophilaceae]', 'f__Cryomorphaceae', 'f__Flavobacteriaceae', 'f__Hyphomicrobiaceae', 'f__Methylobacteriaceae', 'f__Phyllobacteriaceae', 'f__Rhodobacteraceae', 'f__Rhodospirillaceae', 'f__Pelagibacteraceae', 'f__Alteromonadaceae', 'f__OM60', 'f__Endozoicimonaceae', 'f__Moraxellaceae', 'f__Piscirickettsiaceae', 'f__Vibrionaceae', 'Unassigned', 'c__Alphaproteobacteria', 'o__Kiloniellales')
skeleton.fams <- c('c__Chloroplast','f__Flammeovirgaceae', 'f__[Amoebophilaceae]', 'f__Flavobacteriaceae', 'f__Clostridiaceae', 'f__Pirellulaceae', 'f__Hyphomicrobiaceae', 'f__Methylobacteriaceae', 'f__Phyllobacteriaceae', 'f__Rhodobacteraceae', 'f__Rhodospirillaceae', 'f__Alteromonadaceae', 'f__Endozoicimonaceae', 'f__Piscirickettsiaceae', 'f__Spirochaetaceae', 'Unassigned', 'c__Alphaproteobacteria', 'o__Myxococcales')
mucus.fams <- c('c__Chloroplast','f__Flammeovirgaceae', 'f__Cryomorphaceae', 'f__Flavobacteriaceae', 'f__Synechococcaceae', 'f__Methylobacteriaceae', 'f__Rhodobacteraceae', 'f__Pelagibacteraceae', 'f__Alteromonadaceae', 'f__OM60', 'f__Endozoicimonaceae', 'f__Halomonadaceae', 'f__Moraxellaceae', 'f__Piscirickettsiaceae', 'f__Pseudoalteromonadaceae', 'Unassigned', 'c__Alphaproteobacteria')
famlist <- list(T=tissue.fams,S=skeleton.fams,M=mucus.fams)
compartments <- list(T='tissue',S='skeleton',M='mucus')
map <- import_qiime_sample_data('/Users/Ryan/Dropbox/Selectively_Shared_Vega_Lab_Stuff/GCMP/Projects/Australia_Coevolution_Paper/16S_analysis/1_canonical_starting_files/gcmp16S_map_r22_with_mitochondrial_data.txt')
map[map=='Unknown'] <- NA
biom_object <- import_biom('/Users/Ryan/Dropbox/Selectively_Shared_Vega_Lab_Stuff/GCMP/Projects/Australia_Coevolution_Paper/16S_analysis/4_coevolution/output/MED_otu_table.biom')
colnames(tax_table(biom_object)) <- c('Kingdom','Phylum','Class','Order','Family','Genus','Species')
otu_data_full <- merge_phyloseq(biom_object,map)
otu_data_pruned <- prune_samples(sample_sums(otu_data_full) >= 1000, otu_data_full)
otu_data <- subset_samples(otu_data_pruned, !is.na(colony_name))
rm(list=c('biom_object','otu_data_full','otu_data_pruned'))
gc()
hosttree <- read.tree('/Users/Ryan/Dropbox/Selectively_Shared_Vega_Lab_Stuff/GCMP/Projects/Australia_Coevolution_Paper/16S_analysis/1_canonical_starting_files/host_tree_from_step_11.newick')
for(compart in c('T','S','M')) {
comp.pruned <- subset_samples(otu_data, tissue_compartment==compart)
for(taxon in famlist[[compart]]) {
dir.create(paste0('/Volumes/Moorea/4-coevolution/coevolution/',compartments[[compart]],'/',taxon,'/'), recursive=T)
tre <- read.nexus(paste0('/Volumes/Moorea/acoev/output/filtered/',compartments[[compart]],'/',taxon,'/beast/',taxon,'_final_tree.tree'))
taxon_data <- merge_phyloseq(comp.pruned,tre)
sample_data(taxon_data)$sample_sum <- sample_sums(taxon_data)
n.pruned <- prune_samples(sample_sums(taxon_data) >= 10, taxon_data)
pruned.hosttree <- drop.tip(hosttree,hosttree$tip.label[!hosttree$tip.label %in% sample_data(n.pruned)$X16S_tree_name])
sample_data(n.pruned)$X16S_tree_name[!sample_data(n.pruned)$X16S_tree_name %in% pruned.hosttree$tip.label] <- NA
sample_data(n.pruned)$X16S_tree_name <- droplevels(sample_data(n.pruned)$X16S_tree_name)
c.pruned <- prune_samples(!is.na(sample_data(n.pruned)$X16S_tree_name), n.pruned)
pruned <- filter_taxa(c.pruned, function(x) any(x>0),TRUE)
otutable <- as.matrix(as.data.frame(otu_table(pruned)))
assocs <- melt(otutable,as.is=T)
assocs <- data.frame(count=assocs$value,otu=assocs$Var1,sample=assocs$Var2)
assocs <- merge(assocs,sample_data(pruned)[,c('X16S_tree_name','geographic_area','sample_sum','colony_name')],by.x='sample',by.y=0,all=F)
inv.host.full <- inverseA(pruned.hosttree)
inv.host <- inv.host.full$Ainv
host.ancests <- vector()
for(tip in pruned.hosttree$tip.label) {
temp <- list()
check <- 1
counter <- tip
while(check==1) {
temp[counter] <- inv.host.full$pedigree[inv.host.full$pedigree[,'node.names']==counter,][[2]]
counter <- temp[[length(temp)]]
if(is.na(inv.host.full$pedigree[inv.host.full$pedigree[,'node.names']==counter,][[2]])) {check <- 0}
}
host.ancests[tip] <- paste(temp, collapse=',')
}
pedigree_hosts <- unique(merge(as(map,'data.frame')[,c('X16S_tree_name','field_host_name')],host.ancests,by.x='X16S_tree_name',by.y=0))
write.table(pedigree_hosts,file=paste0('/Volumes/Moorea/4-coevolution/coevolution/',compartments[[compart]],'/',taxon,'/',taxon,'_host_pedigree.txt'),sep='\t',quote=F,row.names=F)
pruned.bacttree <- phy_tree(pruned)
pruned.bacttree$node.label <- NULL
inv.bact.full <- inverseA(pruned.bacttree)
inv.bact <- inv.bact.full$Ainv
bact.ancests <- vector()
for(tip in pruned.bacttree$tip.label) {
temp <- list()
check <- 1
counter <- tip
while(check==1) {
temp[counter] <- inv.bact.full$pedigree[inv.bact.full$pedigree[,'node.names']==counter,][[2]]
counter <- temp[[length(temp)]]
if(is.na(inv.bact.full$pedigree[inv.bact.full$pedigree[,'node.names']==counter,][[2]])) {check <- 0}
}
bact.ancests[tip] <- paste(temp, collapse=',')
}
pedigree_bacts <- unique(merge(as(tax_table(pruned),'matrix'),bact.ancests,by=0))
write.table(pedigree_bacts,file=paste0('/Volumes/Moorea/4-coevolution/coevolution/',compartments[[compart]],'/',taxon,'/',taxon,'_bact_pedigree.txt'),sep='\t',quote=F,row.names=F)
host.otuA<-as(kronecker(inv.host, inv.bact), "dgCMatrix") # coevolutionary effect
host.otuAS<-as(kronecker(inv.host, Diagonal(nrow(inv.bact))), "dgCMatrix") # host evolutionary effect
host.otuSA<-as(kronecker(Diagonal(nrow(inv.host)), inv.bact), "dgCMatrix") # parasite evolutionary effect
rownames(host.otuA)<-apply(expand.grid(rownames(inv.bact), rownames(inv.host)), 1, function(x){paste(x[2],x[1], sep=".")})
rownames(host.otuAS)<-rownames(host.otuSA)<-rownames(host.otuA)
##assocs$otu # non-phylogenetic main effect for bacteria
##assocs$X16S_tree_name # non-phylogenetic main effect for hosts
assocs$otu.phy<-assocs$otu # phylogenetic main effect for bacteria
assocs$X16S_tree_name.phy<-assocs$X16S_tree_name # phylogenetic main effect for hosts
assocs$Host.otu<-paste(assocs$X16S_tree_name, assocs$otu, sep=".") # non-phylogentic interaction effect
assocs$Host.otu[is.na(assocs$X16S_tree_name)] <- NA
assocs$Host.otu.cophy<-paste(assocs$X16S_tree_name, assocs$otu, sep=".") # phylogentic coevolution effect
assocs$Host.otu.cophy[is.na(assocs$X16S_tree_name)] <- NA
assocs$Host.otu.hostphy<-paste(assocs$X16S_tree_name, assocs$otu, sep=".") # phylogentic host evolutionary effect (specifies whether abundance is determined by an interaction between non-phylogenetic otu and the phylogenetic position of the host)
assocs$Host.otu.hostphy[is.na(assocs$X16S_tree_name)] <- NA
assocs$Host.otu.otuphy<-paste(assocs$X16S_tree_name, assocs$otu, sep=".") # phylogentic parasite evolutionary effect (specifies whether abundance is determined by an interaction between non-phylogenetic host species and the phylogenetic position of the otu)
assocs$Host.otu.otuphy[is.na(assocs$X16S_tree_name)] <- NA
assocs$colony.otu.phy <- paste(assocs$colony_name, assocs$otu, sep=".")
assocs$geo.otu <- paste(assocs$geographic_area, assocs$otu, sep=".")
otu.colonySA <- as(kronecker(Diagonal(length(unique(assocs$colony_name[!is.na(assocs$colony_name)]))), inv.bact), "dgCMatrix")
rownames(otu.colonySA)<-apply(expand.grid(rownames(inv.bact), unique(assocs$colony_name[!is.na(assocs$colony_name)])), 1, function(x){paste(x[2],x[1], sep=".")})
randfacts <- c('otu.phy','otu','geo.otu','Host.otu.hostphy','Host.otu.otuphy','Host.otu','Host.otu.cophy')
rand <- as.formula(paste0('~ ',paste(randfacts, collapse=' + ')))
priorC <- list(B=list(mu=c(0,1), V=diag(c(1e+8,1e-6))), R=list(V=1, nu=0))
## priors for the random evolutionary effects (from Hadfield):
phypri<-lapply(1:length(randfacts), function(x){list(V=1, nu=1, alpha.mu=0, alpha.V=1000)})
## combine priors:
priorC$G<-phypri
names(priorC$G)<-paste("G", 1:length(randfacts), sep="")
save.image(file=paste0('/Volumes/Moorea/4-coevolution/coevolution/',compartments[[compart]],'/',taxon,'/',taxon,'_mcmc_setup.RData'))
}
} |
library(GetHFData)
### Name: process.lob.from.df
### Title: Process LOB from asset dataframe
### Aliases: process.lob.from.df
### ** Examples
# no example (internal)
| /data/genthat_extracted_code/GetHFData/examples/process.lob.from.df.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 173 | r | library(GetHFData)
### Name: process.lob.from.df
### Title: Process LOB from asset dataframe
### Aliases: process.lob.from.df
### ** Examples
# no example (internal)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.