content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
par(cex=1.5)
| /cex.R | no_license | dushoff/Generation_distributions | R | false | false | 14 | r | par(cex=1.5)
|
#sap xep noi bot
vec = c(1,-1,3,2,10,9)
bubble <- function(x){
n<-length(x)
for(j in 1:(n-1)){
for(i in 1:(n-j)){
if(x[i]>x[i+1]){
temp<-x[i]
x[i]<-x[i+1]
x[i+1]<-temp
}
}
}
return(x)
}
bubble(vec)
| /Bài tập 16.R | no_license | LBMH-lang/CTDTGT_R | R | false | false | 270 | r | #sap xep noi bot
vec = c(1,-1,3,2,10,9)
bubble <- function(x){
n<-length(x)
for(j in 1:(n-1)){
for(i in 1:(n-j)){
if(x[i]>x[i+1]){
temp<-x[i]
x[i]<-x[i+1]
x[i+1]<-temp
}
}
}
return(x)
}
bubble(vec)
|
##
# The script takes a list of movie names, does a google search and retrieves the URLs
# which contain 'imdb' in it. The goal is to eventually get the imdb IDs of the movies
# Some of the code is taken from this stackoverflow answerhttps://stackoverflow.com/questions/32889136/how-to-get-google-search-results/40181564
##
# Code for getting the IMDb Url for the movies
library(urltools)
library(rvest)
library(curl)
#load data
df<-read.csv("nominated_movies.csv", header = TRUE, sep = ";", stringsAsFactors = FALSE)
movie_list<-as.character(paste0(df$Film, "(", df$Year, ")"))
# Function for getting website.
getWebsite <- function(name){
url = URLencode(paste0("https://www.google.com/search?q=",name))
#page <- read_html(url)
page<-read_html(curl(url, handle = curl::new_handle("useragent"="Chrome")))
results <- page %>%
html_nodes("cite") %>% # Get all notes of type cite. You can change this to grab other node types.
html_text()
result<-results[grep("^....imdb.*", results)]
# Return results if you want to see them all.
return(result)
}
URL<-sapply(movie_list, getWebsite) # depending on the network you are on, you will most likely face a HTTP error 503
# add to dataframe
df$URL<-URL
# Write to CSV
df<-apply(df, 2, as.character)
write.csv(df, file = "MovieSite.csv", sep = ',', row.names = FALSE, append = T) # The resulting data frame will need further preprocessing | /get-movie-url.R | no_license | mdoucem/oscar-movie-appearance-analysis | R | false | false | 1,426 | r | ##
# The script takes a list of movie names, does a google search and retrieves the URLs
# which contain 'imdb' in it. The goal is to eventually get the imdb IDs of the movies
# Some of the code is taken from this stackoverflow answerhttps://stackoverflow.com/questions/32889136/how-to-get-google-search-results/40181564
##
# Code for getting the IMDb Url for the movies
library(urltools)
library(rvest)
library(curl)
#load data
df<-read.csv("nominated_movies.csv", header = TRUE, sep = ";", stringsAsFactors = FALSE)
movie_list<-as.character(paste0(df$Film, "(", df$Year, ")"))
# Function for getting website.
getWebsite <- function(name){
url = URLencode(paste0("https://www.google.com/search?q=",name))
#page <- read_html(url)
page<-read_html(curl(url, handle = curl::new_handle("useragent"="Chrome")))
results <- page %>%
html_nodes("cite") %>% # Get all notes of type cite. You can change this to grab other node types.
html_text()
result<-results[grep("^....imdb.*", results)]
# Return results if you want to see them all.
return(result)
}
URL<-sapply(movie_list, getWebsite) # depending on the network you are on, you will most likely face a HTTP error 503
# add to dataframe
df$URL<-URL
# Write to CSV
df<-apply(df, 2, as.character)
write.csv(df, file = "MovieSite.csv", sep = ',', row.names = FALSE, append = T) # The resulting data frame will need further preprocessing |
\alias{gtkDrawSlider}
\name{gtkDrawSlider}
\title{gtkDrawSlider}
\description{
Draws a slider in the given rectangle on \code{window} using the
given style and orientation.
\strong{WARNING: \code{gtk_draw_slider} is deprecated and should not be used in newly-written code.}
}
\usage{gtkDrawSlider(object, window, state.type, shadow.type, x, y,
width, height, orientation)}
\arguments{
\item{\verb{object}}{a \code{\link{GtkStyle}}}
\item{\verb{window}}{a \code{\link{GdkWindow}}}
\item{\verb{state.type}}{a state}
\item{\verb{shadow.type}}{a shadow}
\item{\verb{x}}{the x origin of the rectangle in which to draw a slider}
\item{\verb{y}}{the y origin of the rectangle in which to draw a slider}
\item{\verb{width}}{the width of the rectangle in which to draw a slider}
\item{\verb{height}}{the height of the rectangle in which to draw a slider}
\item{\verb{orientation}}{the orientation to be used}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
| /RGtk2/man/gtkDrawSlider.Rd | no_license | lawremi/RGtk2 | R | false | false | 978 | rd | \alias{gtkDrawSlider}
\name{gtkDrawSlider}
\title{gtkDrawSlider}
\description{
Draws a slider in the given rectangle on \code{window} using the
given style and orientation.
\strong{WARNING: \code{gtk_draw_slider} is deprecated and should not be used in newly-written code.}
}
\usage{gtkDrawSlider(object, window, state.type, shadow.type, x, y,
width, height, orientation)}
\arguments{
\item{\verb{object}}{a \code{\link{GtkStyle}}}
\item{\verb{window}}{a \code{\link{GdkWindow}}}
\item{\verb{state.type}}{a state}
\item{\verb{shadow.type}}{a shadow}
\item{\verb{x}}{the x origin of the rectangle in which to draw a slider}
\item{\verb{y}}{the y origin of the rectangle in which to draw a slider}
\item{\verb{width}}{the width of the rectangle in which to draw a slider}
\item{\verb{height}}{the height of the rectangle in which to draw a slider}
\item{\verb{orientation}}{the orientation to be used}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
col.2.rgb.fun<-function(col.pal, alpha.val=.5) {
col.rgb<-col2rgb(col.pal)/255
new.pal<-c()
for(i in 1:ncol(col.rgb)) {
new.pal[i]<-rgb(col.rgb[1,i], col.rgb[2,i], col.rgb[3,i], alpha=alpha.val)
}
return(new.pal)
}
##### Function to process output of stan model and plot fit
plot.stan.fun<-function(model,
count.data, # Count data used for the fit
mid_age, # mid_points of age categories in data, used for fit
col.plot="royalblue",
title=NULL,
year.counts=NA,
ind.keep=NA
) {
pred.rate<-extract(model, pars="poi_rate", inc_warmup=FALSE, permute=F)
if(!is.na(ind.keep)){
pred.rate<-pred.rate[,ind.keep,]
}
pred.rate<-pred.rate[,1,]
#### Calculate number of plots needed
nrow_plots<-ceiling(ncol(count.data)/3)
par(mfrow=c(nrow_plots, 3), mar=c(0.5, 0.5, 0.5, 0.5), oma=c(5,5,5,3 ))
for(j in 1:ncol(count.data)) {
plot(mid_age, count.data[,j], pch=19, cex=.3, ylim=c(0, max(count.data)), ylab="", xlab="", yaxt="n", xaxt="n")
for(i in 1:min(nrow(pred.rate), 200)) {
lines(mid_age, pred.rate[floor(runif(1, 1, dim(pred.rate)[[1]])),((j*length(mid_age)-(length(mid_age)-1))):(j*length(mid_age))], col=col.2.rgb.fun(col.plot, .2), lwd=.05)
}
points(mid_age, count.data[,j], pch=19, cex=.3)
if(c(j-1)%%3==0) {
axis(2)
}
if(j > c(ncol(count.data)-ncol(count.data)%%3)) {
axis(1)
}
axis(1, labels=F, tick=T)
axis(2, labels=F, tick=T)
if(!is.na(year.counts[j])) {legend("topright", legend=c(year.counts[j]), bty="n", cex=.7)}
}
mtext("Age", 1, line=3, cex=1, outer=T)
mtext("Dengue counts", 2, line=3, cex=1, outer=T)
mtext(title, 3, line=2, cex=1.2, outer=T)
}
| /Code/figure_plot_stan_output.R | permissive | isabelrodbar/dengue_foi | R | false | false | 1,659 | r |
col.2.rgb.fun<-function(col.pal, alpha.val=.5) {
col.rgb<-col2rgb(col.pal)/255
new.pal<-c()
for(i in 1:ncol(col.rgb)) {
new.pal[i]<-rgb(col.rgb[1,i], col.rgb[2,i], col.rgb[3,i], alpha=alpha.val)
}
return(new.pal)
}
##### Function to process output of stan model and plot fit
plot.stan.fun<-function(model,
count.data, # Count data used for the fit
mid_age, # mid_points of age categories in data, used for fit
col.plot="royalblue",
title=NULL,
year.counts=NA,
ind.keep=NA
) {
pred.rate<-extract(model, pars="poi_rate", inc_warmup=FALSE, permute=F)
if(!is.na(ind.keep)){
pred.rate<-pred.rate[,ind.keep,]
}
pred.rate<-pred.rate[,1,]
#### Calculate number of plots needed
nrow_plots<-ceiling(ncol(count.data)/3)
par(mfrow=c(nrow_plots, 3), mar=c(0.5, 0.5, 0.5, 0.5), oma=c(5,5,5,3 ))
for(j in 1:ncol(count.data)) {
plot(mid_age, count.data[,j], pch=19, cex=.3, ylim=c(0, max(count.data)), ylab="", xlab="", yaxt="n", xaxt="n")
for(i in 1:min(nrow(pred.rate), 200)) {
lines(mid_age, pred.rate[floor(runif(1, 1, dim(pred.rate)[[1]])),((j*length(mid_age)-(length(mid_age)-1))):(j*length(mid_age))], col=col.2.rgb.fun(col.plot, .2), lwd=.05)
}
points(mid_age, count.data[,j], pch=19, cex=.3)
if(c(j-1)%%3==0) {
axis(2)
}
if(j > c(ncol(count.data)-ncol(count.data)%%3)) {
axis(1)
}
axis(1, labels=F, tick=T)
axis(2, labels=F, tick=T)
if(!is.na(year.counts[j])) {legend("topright", legend=c(year.counts[j]), bty="n", cex=.7)}
}
mtext("Age", 1, line=3, cex=1, outer=T)
mtext("Dengue counts", 2, line=3, cex=1, outer=T)
mtext(title, 3, line=2, cex=1.2, outer=T)
}
|
% Generated by roxygen2 (4.1.0.9001): do not edit by hand
% Please edit documentation in R/pedigree.format.R
\name{pedigree.format}
\alias{pedigree.format}
\title{Format Pedigree
This function formats the pedigree for downstream analysis.}
\usage{
pedigree.format(ped, pedigree.type = "simple")
}
\arguments{
\item{ped}{Pedigree object in "simple" format (Three columns for ANIMAL,
MOTHER and FATHER) or in "plink" format (Five to Six columns for FAMILY,
ANIMAL, FATHER, MOTHER, SEX and Phenotype, where the phenotype column is
optional). The simple argument can recognise the order of parents if
they are named sensibly. Run simple.ped.name.rules() for an example.}
\item{pedigree.type}{Defaults to "simple", can also accept "plink" which
is equivalent for for first 5 to 6 columns of a PLINK .ped file.}
}
\description{
Format Pedigree
This function formats the pedigree for downstream analysis.
}
| /man/pedigree.format.Rd | no_license | susjoh/simperSNP | R | false | false | 902 | rd | % Generated by roxygen2 (4.1.0.9001): do not edit by hand
% Please edit documentation in R/pedigree.format.R
\name{pedigree.format}
\alias{pedigree.format}
\title{Format Pedigree
This function formats the pedigree for downstream analysis.}
\usage{
pedigree.format(ped, pedigree.type = "simple")
}
\arguments{
\item{ped}{Pedigree object in "simple" format (Three columns for ANIMAL,
MOTHER and FATHER) or in "plink" format (Five to Six columns for FAMILY,
ANIMAL, FATHER, MOTHER, SEX and Phenotype, where the phenotype column is
optional). The simple argument can recognise the order of parents if
they are named sensibly. Run simple.ped.name.rules() for an example.}
\item{pedigree.type}{Defaults to "simple", can also accept "plink" which
is equivalent for for first 5 to 6 columns of a PLINK .ped file.}
}
\description{
Format Pedigree
This function formats the pedigree for downstream analysis.
}
|
data(iris)
str(iris)
head(iris)
plot(iris)
| /dadas_iris.R | no_license | Marcos001/R_Projects | R | false | false | 44 | r |
data(iris)
str(iris)
head(iris)
plot(iris)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{load_data}
\alias{load_data}
\title{User friendly data loader}
\usage{
load_data(input, sep, cols_to_read = NULL)
}
\arguments{
\item{input}{can be a file name or a data frame}
\item{sep}{delimiter if input is a file name}
\item{cols_to_read}{cols_only object indicating which columns to read if input is a file path, default = NULL}
}
\value{
loaded data
}
\description{
User friendly data loader
}
\examples{
load_data(data.frame(x = c(1,2,3),
y = c(4,5,6)))
}
| /man/load_data.Rd | permissive | keviny2/methylationfun | R | false | true | 586 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing.R
\name{load_data}
\alias{load_data}
\title{User friendly data loader}
\usage{
load_data(input, sep, cols_to_read = NULL)
}
\arguments{
\item{input}{can be a file name or a data frame}
\item{sep}{delimiter if input is a file name}
\item{cols_to_read}{cols_only object indicating which columns to read if input is a file path, default = NULL}
}
\value{
loaded data
}
\description{
User friendly data loader
}
\examples{
load_data(data.frame(x = c(1,2,3),
y = c(4,5,6)))
}
|
library(sqldf)
getTheAveragesOfTheTrainingAndTestDataByActivityAndSubject <-
function()
{
theTrainingAndTestData <-
getTheTrainingAndTestData()
theNamesOfTheMeansAndStandardDeviationColumns <-
names(theTrainingAndTestData)[c(-1, -2, -3)]
theAverageMeansAndStandardDeviationsSqlStatement <-
paste(
'avg("',
theNamesOfTheMeansAndStandardDeviationColumns,
'") `AVG-',
theNamesOfTheMeansAndStandardDeviationColumns,
'`',
sep = "",
collapse = ", "
)
theSqlStatementToAverageBySubjectAndActivity <-
paste(
"select ActivityId, ActivityName, SubjectId, ",
theAverageMeansAndStandardDeviationsSqlStatement,
"from theTrainingAndTestData",
"group by ActivityId, ActivityName, SubjectId"
)
theAveragesBySubjectAndActivity <-
sqldf(
theSqlStatementToAverageBySubjectAndActivity
)
theAveragesBySubjectAndActivity
}
getTheTrainingAndTestData <-
function()
{
theTrainingData <-
getTheMeansAndStandardDeviationsOfTheSpecifiedData("train")
theTestData <-
getTheMeansAndStandardDeviationsOfTheSpecifiedData("test")
theMergedTrainingAndTestData <-
rbind(
theTrainingData,
theTestData
)
theMergedTrainingAndTestData
}
basePathOfTheData <-
"UCI HAR Dataset/"
getTheMeansAndStandardDeviationsOfTheSpecifiedData <-
function(
theNameOfTheData # either test or train
)
{
theSubjectIds <-
getTheSubjectsOfTheSpecifiedData(
theNameOfTheData
)
theActivities <-
getTheActivitiesOfTheSpecifiedData(
theNameOfTheData
)
theData <-
getTheSpecifiedData(
theNameOfTheData
)
# add the activity id and name columns
theData <-
cbind(
theActivities,
theData
)
# add the subject id column
theData <-
cbind(
theSubjectIds,
theData
)
theIndexesOfTheFeaturesRegardingMeansAndStandardDeviations <-
getTheIndexesOfTheFeaturesRegardingMeansAndStandardDeviations()
# take only the
theData <-
theData[
,
c(
1, # subject id
2, # activity id
3, # activity name
(theIndexesOfTheFeaturesRegardingMeansAndStandardDeviations + 3)
)
]
theData
}
getTheSubjectsOfTheSpecifiedData <-
function(
theNameOfTheData # either test or train
)
{
theSubjectIds <-
read.table(
paste(
basePathOfTheData,
theNameOfTheData,
"/subject_",
theNameOfTheData,
".txt",
sep = ""
),
header = FALSE,
col.names = c("SubjectId")
)
theSubjectIds
}
getTheActivitiesOfTheSpecifiedData <-
function(
theNameOfTheData # either test or train
)
{
theActivityIds <-
getTheActivityIdsOfTheSpecifiedData(
theNameOfTheData
)
theActivityMasterData <-
getTheActivityMasterData()
theActivities <-
merge(
theActivityIds,
theActivityMasterData,
by.x = "ActivityId",
by.y = "Id")
names(theActivities)[2] <-
"ActivityName"
theActivities
}
getTheActivityIdsOfTheSpecifiedData <-
function(
theNameOfTheData # either test or train
)
{
theActivityIds <-
read.table(
paste(
basePathOfTheData,
theNameOfTheData,
"/y_",
theNameOfTheData,
".txt",
sep = ""
),
header = FALSE,
col.names = c("ActivityId")
)
theActivityIds
}
getTheSpecifiedData <-
function(
theNameOfTheData # either test or train
)
{
theFeatureIndexesAndNames <-
getTheFeatureIndexesAndNames()
theData <-
read.table(
paste(
basePathOfTheData,
theNameOfTheData,
"/X_",
theNameOfTheData,
".txt",
sep = ""
),
header = FALSE,
col.names = theFeatureIndexesAndNames$Name
)
theData
}
getTheIndexesOfTheFeaturesRegardingMeansAndStandardDeviations <-
function()
{
theFeatures <-
getTheFeatureIndexesAndNames()
theFeaturesRegardingMeansAndStandardDeviations <-
theFeatures[
grep(
"mean|std",
theFeatures$Name,
ignore.case = TRUE
),
]
theFeaturesRegardingMeansAndStandardDeviations$Index
}
getTheFeatureIndexesAndNames <-
function()
{
theFeatureIndexesAndNames <-
read.table(
paste(
basePathOfTheData,
"features.txt",
sep = ""
),
header = FALSE,
col.names =
c(
"Index",
"Name"
)
)
theFeatureIndexesAndNames$Name <-
gsub(
"\\(\\)",
"",
theFeatureIndexesAndNames$Name,
)
theFeatureIndexesAndNames
}
getTheActivityMasterData <-
function()
{
theActivities <-
read.table(
paste(
basePathOfTheData,
"activity_labels.txt",
sep = ""
),
header = FALSE,
col.names =
c(
"Id",
"Name"
)
)
theActivities
} | /run_analysis.R | no_license | marcolisi/Coursera_GettingAndCleaningData_CourseProject | R | false | false | 7,247 | r | library(sqldf)
getTheAveragesOfTheTrainingAndTestDataByActivityAndSubject <-
function()
{
theTrainingAndTestData <-
getTheTrainingAndTestData()
theNamesOfTheMeansAndStandardDeviationColumns <-
names(theTrainingAndTestData)[c(-1, -2, -3)]
theAverageMeansAndStandardDeviationsSqlStatement <-
paste(
'avg("',
theNamesOfTheMeansAndStandardDeviationColumns,
'") `AVG-',
theNamesOfTheMeansAndStandardDeviationColumns,
'`',
sep = "",
collapse = ", "
)
theSqlStatementToAverageBySubjectAndActivity <-
paste(
"select ActivityId, ActivityName, SubjectId, ",
theAverageMeansAndStandardDeviationsSqlStatement,
"from theTrainingAndTestData",
"group by ActivityId, ActivityName, SubjectId"
)
theAveragesBySubjectAndActivity <-
sqldf(
theSqlStatementToAverageBySubjectAndActivity
)
theAveragesBySubjectAndActivity
}
getTheTrainingAndTestData <-
function()
{
theTrainingData <-
getTheMeansAndStandardDeviationsOfTheSpecifiedData("train")
theTestData <-
getTheMeansAndStandardDeviationsOfTheSpecifiedData("test")
theMergedTrainingAndTestData <-
rbind(
theTrainingData,
theTestData
)
theMergedTrainingAndTestData
}
basePathOfTheData <-
"UCI HAR Dataset/"
getTheMeansAndStandardDeviationsOfTheSpecifiedData <-
function(
theNameOfTheData # either test or train
)
{
theSubjectIds <-
getTheSubjectsOfTheSpecifiedData(
theNameOfTheData
)
theActivities <-
getTheActivitiesOfTheSpecifiedData(
theNameOfTheData
)
theData <-
getTheSpecifiedData(
theNameOfTheData
)
# add the activity id and name columns
theData <-
cbind(
theActivities,
theData
)
# add the subject id column
theData <-
cbind(
theSubjectIds,
theData
)
theIndexesOfTheFeaturesRegardingMeansAndStandardDeviations <-
getTheIndexesOfTheFeaturesRegardingMeansAndStandardDeviations()
# take only the
theData <-
theData[
,
c(
1, # subject id
2, # activity id
3, # activity name
(theIndexesOfTheFeaturesRegardingMeansAndStandardDeviations + 3)
)
]
theData
}
getTheSubjectsOfTheSpecifiedData <-
function(
theNameOfTheData # either test or train
)
{
theSubjectIds <-
read.table(
paste(
basePathOfTheData,
theNameOfTheData,
"/subject_",
theNameOfTheData,
".txt",
sep = ""
),
header = FALSE,
col.names = c("SubjectId")
)
theSubjectIds
}
getTheActivitiesOfTheSpecifiedData <-
function(
theNameOfTheData # either test or train
)
{
theActivityIds <-
getTheActivityIdsOfTheSpecifiedData(
theNameOfTheData
)
theActivityMasterData <-
getTheActivityMasterData()
theActivities <-
merge(
theActivityIds,
theActivityMasterData,
by.x = "ActivityId",
by.y = "Id")
names(theActivities)[2] <-
"ActivityName"
theActivities
}
getTheActivityIdsOfTheSpecifiedData <-
function(
theNameOfTheData # either test or train
)
{
theActivityIds <-
read.table(
paste(
basePathOfTheData,
theNameOfTheData,
"/y_",
theNameOfTheData,
".txt",
sep = ""
),
header = FALSE,
col.names = c("ActivityId")
)
theActivityIds
}
getTheSpecifiedData <-
function(
theNameOfTheData # either test or train
)
{
theFeatureIndexesAndNames <-
getTheFeatureIndexesAndNames()
theData <-
read.table(
paste(
basePathOfTheData,
theNameOfTheData,
"/X_",
theNameOfTheData,
".txt",
sep = ""
),
header = FALSE,
col.names = theFeatureIndexesAndNames$Name
)
theData
}
getTheIndexesOfTheFeaturesRegardingMeansAndStandardDeviations <-
function()
{
theFeatures <-
getTheFeatureIndexesAndNames()
theFeaturesRegardingMeansAndStandardDeviations <-
theFeatures[
grep(
"mean|std",
theFeatures$Name,
ignore.case = TRUE
),
]
theFeaturesRegardingMeansAndStandardDeviations$Index
}
getTheFeatureIndexesAndNames <-
function()
{
theFeatureIndexesAndNames <-
read.table(
paste(
basePathOfTheData,
"features.txt",
sep = ""
),
header = FALSE,
col.names =
c(
"Index",
"Name"
)
)
theFeatureIndexesAndNames$Name <-
gsub(
"\\(\\)",
"",
theFeatureIndexesAndNames$Name,
)
theFeatureIndexesAndNames
}
getTheActivityMasterData <-
function()
{
theActivities <-
read.table(
paste(
basePathOfTheData,
"activity_labels.txt",
sep = ""
),
header = FALSE,
col.names =
c(
"Id",
"Name"
)
)
theActivities
} |
\name{d_german}
\alias{d_german}
\docType{data}
\title{German Sample Data (complete orderings)}
\description{The German Sample dataset (\code{d_german}) is part of a comparative cross-sectional study on political actions and mass participation involving five Western countries. The dataset regards a sample of \eqn{N=2262} German respondents who were asked to rank \eqn{K=4} political goals in order of desirability, namely: 1 = maintaining order in the nation, 2 = giving people more say in the decisions of government, 3 = fighting rising prices and 4 = protecting freedom of speech. The dataset is composed of complete orderings.
}
\usage{data(d_german)}
\format{
Object of S3 class \code{c("top_ordering","matrix")} gathering a matrix of complete orderings with \eqn{N=2262} rows and \eqn{K=4} columns. Each row lists the political goals from the most desiderable (\code{Rank_1}) to the least desiderable (\code{Rank_4}) for a given respondent.
}
\references{
Croon, M. A. (1989). Latent class models for the analysis of rankings. In De Soete, G., Feger, H. and Klauer, K. C. (eds), \emph{New Developments in Psychological Choice Modeling}, pages 99--121. North-Holland: Amsterdam.
Barnes, S. H. et al. (1979). Political action. Mass participation in five Western democracies. London: Sage.
}
\examples{
data(d_german)
head(d_german)
}
\keyword{datasets}
| /PLMIX/man/d_german.Rd | no_license | akhikolla/InformationHouse | R | false | false | 1,361 | rd | \name{d_german}
\alias{d_german}
\docType{data}
\title{German Sample Data (complete orderings)}
\description{The German Sample dataset (\code{d_german}) is part of a comparative cross-sectional study on political actions and mass participation involving five Western countries. The dataset regards a sample of \eqn{N=2262} German respondents who were asked to rank \eqn{K=4} political goals in order of desirability, namely: 1 = maintaining order in the nation, 2 = giving people more say in the decisions of government, 3 = fighting rising prices and 4 = protecting freedom of speech. The dataset is composed of complete orderings.
}
\usage{data(d_german)}
\format{
Object of S3 class \code{c("top_ordering","matrix")} gathering a matrix of complete orderings with \eqn{N=2262} rows and \eqn{K=4} columns. Each row lists the political goals from the most desiderable (\code{Rank_1}) to the least desiderable (\code{Rank_4}) for a given respondent.
}
\references{
Croon, M. A. (1989). Latent class models for the analysis of rankings. In De Soete, G., Feger, H. and Klauer, K. C. (eds), \emph{New Developments in Psychological Choice Modeling}, pages 99--121. North-Holland: Amsterdam.
Barnes, S. H. et al. (1979). Political action. Mass participation in five Western democracies. London: Sage.
}
\examples{
data(d_german)
head(d_german)
}
\keyword{datasets}
|
rm(list=ls())
require(optimx)
## Optimization test function HOBBS
## ?? refs (put in .doc??)
## Nash and Walker-Smith (1987, 1989) ...
hobbs.f<- function(x){ # # Hobbs weeds problem -- function
if (abs(12*x[3]) > 500) { # check computability
fbad<-.Machine$double.xmax
return(fbad)
}
res<-hobbs.res(x)
f<-sum(res*res)
}
hobbs.res<-function(x){ # Hobbs weeds problem -- residual
# This variant uses looping
if(length(x) != 3) stop("hobbs.res -- parameter vector n!=3")
y<-c(5.308, 7.24, 9.638, 12.866, 17.069, 23.192, 31.443, 38.558, 50.156, 62.948,
75.995, 91.972)
t<-1:12
if(abs(12*x[3])>50) {
res<-rep(Inf,12)
} else {
res<-x[1]/(1+x[2]*exp(-x[3]*t)) - y
}
}
hobbs.jac<-function(x){ # Jacobian of Hobbs weeds problem
jj<-matrix(0.0, 12, 3)
t<-1:12
yy<-exp(-x[3]*t)
zz<-1.0/(1+x[2]*yy)
jj[t,1] <- zz
jj[t,2] <- -x[1]*zz*zz*yy
jj[t,3] <- x[1]*zz*zz*yy*x[2]*t
return(jj)
}
hobbs.g<-function(x){ # gradient of Hobbs weeds problem
# NOT EFFICIENT TO CALL AGAIN
jj<-hobbs.jac(x)
res<-hobbs.res(x)
gg<-as.vector(2.*t(jj) %*% res)
return(gg)
}
hobbs.rsd<-function(x) { # Jacobian second derivative
rsd<-array(0.0, c(12,3,3))
t<-1:12
yy<-exp(-x[3]*t)
zz<-1.0/(1+x[2]*yy)
rsd[t,1,1]<- 0.0
rsd[t,2,1]<- -yy*zz*zz
rsd[t,1,2]<- -yy*zz*zz
rsd[t,2,2]<- 2.0*x[1]*yy*yy*zz*zz*zz
rsd[t,3,1]<- t*x[2]*yy*zz*zz
rsd[t,1,3]<- t*x[2]*yy*zz*zz
rsd[t,3,2]<- t*x[1]*yy*zz*zz*(1-2*x[2]*yy*zz)
rsd[t,2,3]<- t*x[1]*yy*zz*zz*(1-2*x[2]*yy*zz)
## rsd[t,3,3]<- 2*t*t*x[1]*x[2]*x[2]*yy*yy*zz*zz*zz
rsd[t,3,3]<- -t*t*x[1]*x[2]*yy*zz*zz*(1-2*yy*zz*x[2])
return(rsd)
}
hobbs.h <- function(x) { ## compute Hessian
# cat("Hessian not yet available\n")
# return(NULL)
H<-matrix(0,3,3)
res<-hobbs.res(x)
jj<-hobbs.jac(x)
rsd<-hobbs.rsd(x)
## H<-2.0*(t(res) %*% rsd + t(jj) %*% jj)
for (j in 1:3) {
for (k in 1:3) {
for (i in 1:12) {
H[j,k]<-H[j,k]+res[i]*rsd[i,j,k]
}
}
}
H<-2*(H + t(jj) %*% jj)
return(H)
}
allm <- c("BFGS", "CG", "Nelder-Mead", "nlm", "nlminb",
"lbfgsb3c", "Rcgmin", "Rtnmin", "Rvmmin",
"spg", "ucminf", "bobyqa", "hjkb", "hjn",
"subplex")
# Dropped "L-BFGS-B", "newuoa", "nmkb", "snewton", "snewtonm","lbfgs", as they give trouble
# Not an optimx problem, but one in underlying methods
x0 <- c(200, 50, .3)
# This start seems to be OK for all methods
cat("Start for Hobbs:")
print(x0)
cat("Initial value of hobbs.f = ",hobbs.f(x0),"\n")
## Following revealed typo in optimr() for lbfgsb3c
# ahobb01 <- opm(x0, hobbs.f, hobbs.g, hess=hobbs.h, method="lbfgsb3c")
# ahobb01
ahobb0 <- opm(x0, hobbs.f, hobbs.g, hess=hobbs.h, method=allm)
print(summary(ahobb0, order=value))
x1 <- c(1, 1, 1)
# Several methods fail because f or g becomes Inf.
cat("Start for Hobbs:")
print(x1)
cat("Initial value of hobbs.f = ",hobbs.f(x1),"\n")
ahobb1 <- opm(x1, hobbs.f, hobbs.g, hess=hobbs.h, method=allm)
print(summary(ahobb1, order=value))
# ahobb1lbfgsb<- optim(x1, hobbs.f, hobbs.g, method="L-BFGS-B", control=list(trace=3))
# Note that optim alone fails in the above
x1s <- c(100, 10, .1)
# L-BFGS-B and lbfgb3 both fail because f or g becomes Inf.
cat("Start for Hobbs:")
print(x1s)
cat("Initial value of hobbs.f = ",hobbs.f(x1s),"\n")
ahobb1s <- opm(x1s, hobbs.f, hobbs.g, hess=hobbs.h, method=allm)
print(summary(ahobb1s, order=value))
| /tests/hobbs.R | no_license | Giatomo/optimx | R | false | false | 3,536 | r | rm(list=ls())
require(optimx)
## Optimization test function HOBBS
## ?? refs (put in .doc??)
## Nash and Walker-Smith (1987, 1989) ...
hobbs.f<- function(x){ # # Hobbs weeds problem -- function
if (abs(12*x[3]) > 500) { # check computability
fbad<-.Machine$double.xmax
return(fbad)
}
res<-hobbs.res(x)
f<-sum(res*res)
}
hobbs.res<-function(x){ # Hobbs weeds problem -- residual
# This variant uses looping
if(length(x) != 3) stop("hobbs.res -- parameter vector n!=3")
y<-c(5.308, 7.24, 9.638, 12.866, 17.069, 23.192, 31.443, 38.558, 50.156, 62.948,
75.995, 91.972)
t<-1:12
if(abs(12*x[3])>50) {
res<-rep(Inf,12)
} else {
res<-x[1]/(1+x[2]*exp(-x[3]*t)) - y
}
}
hobbs.jac<-function(x){ # Jacobian of Hobbs weeds problem
jj<-matrix(0.0, 12, 3)
t<-1:12
yy<-exp(-x[3]*t)
zz<-1.0/(1+x[2]*yy)
jj[t,1] <- zz
jj[t,2] <- -x[1]*zz*zz*yy
jj[t,3] <- x[1]*zz*zz*yy*x[2]*t
return(jj)
}
hobbs.g<-function(x){ # gradient of Hobbs weeds problem
# NOT EFFICIENT TO CALL AGAIN
jj<-hobbs.jac(x)
res<-hobbs.res(x)
gg<-as.vector(2.*t(jj) %*% res)
return(gg)
}
hobbs.rsd<-function(x) { # Jacobian second derivative
rsd<-array(0.0, c(12,3,3))
t<-1:12
yy<-exp(-x[3]*t)
zz<-1.0/(1+x[2]*yy)
rsd[t,1,1]<- 0.0
rsd[t,2,1]<- -yy*zz*zz
rsd[t,1,2]<- -yy*zz*zz
rsd[t,2,2]<- 2.0*x[1]*yy*yy*zz*zz*zz
rsd[t,3,1]<- t*x[2]*yy*zz*zz
rsd[t,1,3]<- t*x[2]*yy*zz*zz
rsd[t,3,2]<- t*x[1]*yy*zz*zz*(1-2*x[2]*yy*zz)
rsd[t,2,3]<- t*x[1]*yy*zz*zz*(1-2*x[2]*yy*zz)
## rsd[t,3,3]<- 2*t*t*x[1]*x[2]*x[2]*yy*yy*zz*zz*zz
rsd[t,3,3]<- -t*t*x[1]*x[2]*yy*zz*zz*(1-2*yy*zz*x[2])
return(rsd)
}
hobbs.h <- function(x) { ## compute Hessian
# cat("Hessian not yet available\n")
# return(NULL)
H<-matrix(0,3,3)
res<-hobbs.res(x)
jj<-hobbs.jac(x)
rsd<-hobbs.rsd(x)
## H<-2.0*(t(res) %*% rsd + t(jj) %*% jj)
for (j in 1:3) {
for (k in 1:3) {
for (i in 1:12) {
H[j,k]<-H[j,k]+res[i]*rsd[i,j,k]
}
}
}
H<-2*(H + t(jj) %*% jj)
return(H)
}
allm <- c("BFGS", "CG", "Nelder-Mead", "nlm", "nlminb",
"lbfgsb3c", "Rcgmin", "Rtnmin", "Rvmmin",
"spg", "ucminf", "bobyqa", "hjkb", "hjn",
"subplex")
# Dropped "L-BFGS-B", "newuoa", "nmkb", "snewton", "snewtonm","lbfgs", as they give trouble
# Not an optimx problem, but one in underlying methods
x0 <- c(200, 50, .3)
# This start seems to be OK for all methods
cat("Start for Hobbs:")
print(x0)
cat("Initial value of hobbs.f = ",hobbs.f(x0),"\n")
## Following revealed typo in optimr() for lbfgsb3c
# ahobb01 <- opm(x0, hobbs.f, hobbs.g, hess=hobbs.h, method="lbfgsb3c")
# ahobb01
ahobb0 <- opm(x0, hobbs.f, hobbs.g, hess=hobbs.h, method=allm)
print(summary(ahobb0, order=value))
x1 <- c(1, 1, 1)
# Several methods fail because f or g becomes Inf.
cat("Start for Hobbs:")
print(x1)
cat("Initial value of hobbs.f = ",hobbs.f(x1),"\n")
ahobb1 <- opm(x1, hobbs.f, hobbs.g, hess=hobbs.h, method=allm)
print(summary(ahobb1, order=value))
# ahobb1lbfgsb<- optim(x1, hobbs.f, hobbs.g, method="L-BFGS-B", control=list(trace=3))
# Note that optim alone fails in the above
x1s <- c(100, 10, .1)
# L-BFGS-B and lbfgb3 both fail because f or g becomes Inf.
cat("Start for Hobbs:")
print(x1s)
cat("Initial value of hobbs.f = ",hobbs.f(x1s),"\n")
ahobb1s <- opm(x1s, hobbs.f, hobbs.g, hess=hobbs.h, method=allm)
print(summary(ahobb1s, order=value))
|
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#@@@@@@ Based on 2004 Efron's paper @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#@@@ Efron B. (2004) The estimation of prediction error: covariance
#@@@ penalties and cross-validation,
#@@@ Journal of the American Statistical Association, Vol. 99(467), 619-632
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
library(reshape2)
library(tidyverse)
#Create a sample dataset of X
n<-1000
#x1<-sample(1:n,n,replace=T)/n*10
#x2<-sample(1:n,n,replace=T)/n*10
#x3<-x1^2
#x4<-x2^2
#x5<-x1 + x4
#x6<-sample(1:n,n,replace=T)/n*10
x1<-rnorm(n)
x2<-rnorm(n)*2
x3<-rnorm(n)*3
x4<-x1^2
x5<-x2^2
# x6 is a combination of x1 and x4
x6<-x1 + x4
x7<-sample(1:n,n,replace=T)/n*10
b<-c(1,1,1,1,1,1,1,1)
#@@@@@ TRUE MODEL @@@@@@@@@@@@@@@@@@@@@@@@@@@
#@@@@@ Create logodds of probabilities
#Empty model
#Model1
#ytoprob<-(b[1])/5
#clean model with independent variables
#Model2
#ytoprob<-(b[1]+b[2]*x1+b[3]*x2+b[4]*x3)/5
#Excessive models
#Model3
ytoprob<-(b[1]+b[2]*x1+b[3]*x2+b[4]*x3+b[5]*x4+b[6]*x5+b[7]*x6+b[8]*x7)/5
#Model4
#ytoprob<-b[1]+b[2]*x1+b[3]*x2+b[4]*x3+b[5]*x4
hist(ytoprob)
#Create TRUE probabilities
Probs<-exp(ytoprob)/(1+exp(ytoprob))
hist(Probs)
mean(Probs)
#@@@@@@@@@@ BINARY OUTCOMES FROM THE MODELS @@@@@@@
#create binary outcomes
Zbin<-rbinom(n,1,Probs)
table(Zbin)
#hist(ytoprob)
#This is the true value of binomial deviance
Lambdai<-log(Probs/(1-Probs))
hist(Lambdai)
sum(Lambdai)
#Run an estimation model
#ZmodBin<-glm(Zbin~x1+x2+x3+x4+x5+x6,family="binomial")
ZmodBin<-glm(Zbin~x1+x2+x3+x4+x5+x6+x7,family="binomial")
#ZmodBin<-glm(Zbin~x1+x2,family="binomial")
#ZmodBin<-lm(Zbin~x1+x2+x6)
summary(ZmodBin)
names(ZmodBin)
muhat<-ZmodBin$fitted.values
hist(muhat)
totmean<-mean(muhat)
VarTot<-totmean*(1-totmean)
#Estimate Total Variance
VarEst<-muhat*(1-muhat)
hist(VarEst)
sum(VarEst)
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#@@@ Estimation of DFs by perturbing one observation at a time (conditional covariance) @
#@@@@ Based on equation 3.22 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#@@@@ Discussion in the simulation section shows that df=Omega/2. We have DF=Omega @@@@@@
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# Omega<-rep(0,n)
# DFi<-rep(0,n)
#
# for (k in 1:n)
# {
# Pert1<-Zbin
# Pert2<-Zbin
# Pert1[k]<-1
# Pert2[k]<-0
# Muhat1<-glm(Pert1~x1+x2+x3+x4+x5+x6+x7,family="binomial")$fitted.values
# Muhat2<-glm(Pert2~x1+x2+x3+x4+x5+x6+x7,family="binomial")$fitted.values
# # Muhat1<-glm(Pert1~x1+x2,family="binomial")$fitted.values
# # Muhat2<-glm(Pert2~x1+x2,family="binomial")$fitted.values
# LambdaEst1<-log(Muhat1/(1-Muhat1))
# LambdaEst2<-log(Muhat2/(1-Muhat2))
#
# #Below is not Omega but rather omega/2. Don't need to divide by 2 later.
# Omega[k]<-VarEst[k]*(LambdaEst1[k]-LambdaEst2[k])
# # Should be this, if following logic of equation 2.12, but doesn't work as expected
# DFi[k]<-VarEst[k]*(LambdaEst1[k]-LambdaEst2[k])/2/VarTot
# }
#
# hist(Omega)
# #hist(DFi)
# #sum(DFi)
# #DFraw seems to be working OK
# Dfraw<-sum(Omega)
# Dfraw
#
# #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# #@@@@ Parametric (unconditional) Bootstrap @@@@@@@@@@@@@
# #@@@@ Use the idea of linear regression of y on muhat @@
# #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#
# mm<-1000
# Simout<-matrix(0,ncol=mm,nrow=n)
# ZbinSim<-matrix(0,ncol=mm,nrow=n)
#
# for (k in 1:mm)
# {
# ZbinSim[,k]<-rbinom(n,1,muhat)
#
# #True values of mu the correct result
# #ZbinSim[,k]<-rbinom(n,1,Probs)
#
# #ZmodBin1<-glm(ZbinSim[,k]~x1+x2+x3+x4+x5+x7,family="binomial")
# ZmodBin1<-glm(ZbinSim[,k]~x1+x2,family="binomial")
# #ZmodBin1<-glm(ZbinSim[,k]~x1+x2+x3+x4+x5,family="binomial")
# Simout[,k]<-ZmodBin1$fitted.values
#
# }
#
#
# CoefReg<-rep(0,n)
# for (k in 1:n)
# {
# CoefReg[k]<-lm(Simout[k,]~ZbinSim[k,])$coefficients[2]
# print(CoefReg[k])
#
# # if (is.na(CoefReg[k])) {print(Simout[k,])
# # plot(ZbinSim[k,],Simout[k,])
# # print(summary(lm(Simout[k,]~ZbinSim[k,])))}
# }
#some bootsrtrap samplescan lead to degenerate samples and geenrate NA in regression
#should go away with very large samples that will have enough variability
#Here we ignore the NA runs and rather than directly calculating the sum
#we calclulate the mean fo non-NA values and multuply by the n
mean(CoefReg,na.rm=T)*n
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#@@@@@@@ NEURAL NETWORKS @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#@@@@ Parametric (conditional) Bootstrap @@@@@@@@@@@@@@
#@@@@ Use the calculations in Efron formula 3.17 @@@@@@
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
library(nnet)
targets<-Zbin
indep<-cbind(x1,x2,x3,x4,x5,x6,x7)
trainInd <- sample(1:n,2*n/3)
testInd <- (1:n)[-trainInd]
table(targets[trainInd])
NodeList<-1:10
CorVec<-1:length(NodeList)
#@@@@@@@@@@@@@ Estimating which model performs the best @@@@@@
#@@@@@@@@@@@@@ bestNode is the model @@@@@@@@@@@@@@@@@@@@@@@@@@
# for (k in 1:length(NodeList))
# {
# hidNodes<-NodeList[k]
# NetOut<- nnet(indep[trainInd,], targets[trainInd], size = hidNodes, rang = 0.01,
# decay = 5e-4, maxit = 200,trace=F)
#
# #plot(Probs,NetOut$fitted.values)
# print(c(hidNodes,cor(targets[testInd],predict(NetOut,indep[testInd,]))))
# CorVec[k]<-cor(targets[testInd],predict(NetOut,indep[testInd,]))
# }
# CorVec
# bestNode<-NodeList[order(CorVec)[length(CorVec)]]
# bestNode
#@@@@@@@@@@@@@ Calculating the DF using conditional perturbation @@@@@@@@@@@@@@@@@@@@@@
pert_nnet <- function(sizes, maxits, reps = 1){
df_list = list()
iter = 1
Omega_df = data.frame(matrix(nrow = n*length(maxits)*length(sizes)*reps, ncol = 5))
colnames(Omega_df) = c("rep", "size", "maxit", "omega_num", "omega")
for (r in 1:reps){
Omega<-rep(0,n)
OmegaLarge <- rep(0,n)
DFi<-rep(0,n)
for (k in 1:n)
{
Pert1<-Zbin
Pert2<-Zbin
Pert1[k]<-1
Pert2[k]<-0
col = 1
for (i in 1:length(maxits)){
for (j in 1:length(sizes)){
Muhat1<- nnet(indep, Pert1, size = sizes[j], rang = 0.1,
decay = 5e-4, maxit = maxits[i],trace=F)$fitted.values
Muhat2<- nnet(indep, Pert2, size = sizes[j], rang = 0.1,
decay = 5e-4, maxit = maxits[i],trace=F)$fitted.values
Muhat1<-ifelse(Muhat1>0.9999,0.9999,Muhat1)
Muhat1<-ifelse(Muhat1<0.0001,0.0001,Muhat1)
Muhat2<-ifelse(Muhat2>0.9999,0.9999,Muhat2)
Muhat2<-ifelse(Muhat2<0.0001,0.0001,Muhat2)
LambdaEst1<-log(Muhat1/(1-Muhat1))
LambdaEst2<-log(Muhat2/(1-Muhat2))
Omega_df[iter,]<-list(r, sizes[j], maxits[i], k, VarEst[k]*(LambdaEst1[k]-LambdaEst2[k]))
iter = iter+1
#print(Muhat1[k]-Muhat2[k])
}
}
print(paste(k, "finished"))
# Should be this, if following logic of equation 2.12, but doesn't work as expected
#DFi[k]<-VarEst[k]*(LambdaEst1[k]-LambdaEst2[k])/2/VarTot
}
}
return(Omega_df)
}
# Put in Number of nodes you want to see here
library(reshape2)
library(dplyr)
omega_df = pert_nnet(c(1,2,3), maxits = c(20, 50, 100, 150, 200, 300, 400, 500), reps = 2)
omega_df$lambda_diff = ifelse(omega_df$omega > 0, "pos", "neg")
omega_sum = omega_df %>% group_by(rep, size, maxit) %>% summarise(GDF = sum(omega)) %>%group_by(size,maxit) %>%
summarise(mean_over_reps = mean(GDF), sd_over_reps = sd(GDF))
sum_df = spread(omega_df %>% group_by(size, maxit) %>% count(lambda_diff), lambda_diff, n) %>%
mutate('prop_neg' = neg/(neg + pos))
sum_df$size = as.factor(sum_df$size)
ggplot(sum_df, aes(maxit, prop_neg, col = size)) +
geom_point() +
labs(x = "Max Iterations", y = "Proportion of Negative Omegas", title = paste("N = ", n, ", Partitions = ", n))
omega_df$lambda_diff = ifelse(omega_df$omega > 0, 1, 0)
pos_df = omega_df %>% group_by(size, maxit, omega_num) %>% summarise(prop_pos = sum(lambda_diff), avg_omega = mean(omega))
csums = sapply(omega_list, colSums)
print(csums)
rnames = rownames(csums)
df = as.data.frame(csums) %>% tibble::rownames_to_column("num_nodes") %>%
melt() %>% rename(repetition = variable, GDF = value)
ggplot(df, aes(num_nodes, GDF, colour = repetition)) +
geom_point() +
labs(x = "Num Hidden Nodes", y = "GDF", title = paste("N = ", n))
# ----------------------MULTIPLE POINT PERTURBATION WITH MAXITS ------------
targets<-Zbin
indep<-cbind(x1,x2,x3,x4,x5,x6,x7)
trainInd <- sample(1:n,2*n/3)
testInd <- (1:n)[-trainInd]
table(targets[trainInd])
NodeList<-1:10
CorVec<-1:length(NodeList)
parts = 50
split_data <- function(n, partitions = 100){
samp = seq(1,n)
rand = sample(samp, n, replace = F)
spl = split(rand, ceiling(seq_along(samp) / (n/partitions)))
return(spl)
}
VarParts = list()
split = split_data(n, partitions = parts)
for (p in 1:length(split)){
VarParts[[p]] = VarEst[split[[p]]]
}
pert_nnet_mult <- function(sizes, maxits, reps = 1){
iter = 1
df_list = list()
Omega_df = data.frame(matrix(nrow = parts*length(maxits)*length(sizes)*reps, ncol = 5))
colnames(Omega_df) = c("rep", "size", "maxit", "partition", "omega")
for (r in 1:reps){
Omega<-rep(0,parts)
DFi<-rep(0,parts)
for (k in 1:parts)
{
Pert1<-Zbin
Pert2<-Zbin
inds = split[[k]]
Pert1[inds]<-1
Pert2[inds]<-0
for (i in 1:length(maxits)){
for (j in 1:length(sizes)){
Muhat1<- nnet(indep, Pert1, size = sizes[j], rang = 0.1,
decay = 5e-4, maxit =maxits[i],trace=F)$fitted.values
Muhat2<- nnet(indep, Pert2, size = sizes[j], rang = 0.1,
decay = 5e-4, maxit = maxits[i],trace=F)$fitted.values
Muhat1<-ifelse(Muhat1>0.9999,0.9999,Muhat1)
Muhat1<-ifelse(Muhat1<0.0001,0.0001,Muhat1)
Muhat2<-ifelse(Muhat2>0.9999,0.9999,Muhat2)
Muhat2<-ifelse(Muhat2<0.0001,0.0001,Muhat2)
LambdaEst1<-log(Muhat1/(1-Muhat1))
LambdaEst2<-log(Muhat2/(1-Muhat2))
Omega_df[iter,]<-list(r, sizes[j], maxits[i], k, sum(VarEst[inds]*(LambdaEst1[inds]-
LambdaEst2[inds])))
iter = iter+1
#print(Muhat1[k]-Muhat2[k])
}
}
print(paste(k, "finished"))
# Should be this, if following logic of equation 2.12, but doesn't work as expected
#DFi[k]<-VarEst[k]*(LambdaEst1[k]-LambdaEst2[k])/2/VarTot
}
Omega_df = as.data.frame(Omega_df)
}
return(Omega_df)
}
# Put in Number of nodes you want to see here
omega_list = pert_nnet_mult(1, maxits = c(200), reps = 50)
omega_sum = omega_list %>% group_by(rep, size, maxit) %>% summarise(GDF = sum(omega)) %>%group_by(size,maxit) %>%
summarise(mean_over_reps = mean(GDF), sd_over_reps = sd(GDF))
omega_list$lambda_diff = ifelse(omega_list$omega > 0, "pos", "neg")
sum_df = spread(omega_list %>% group_by(size, maxit) %>% count(lambda_diff), lambda_diff, n) %>%
mutate('prop_neg' = neg/(neg + pos))
sum_df$size = as.factor(sum_df$size)
ggplot(sum_df, aes(maxit, prop_neg, col = size)) +
geom_point() +
labs(x = "Max Iterations", y = "Proportion of Negative Omegas", title = paste("N = ", n, ", Partitions = ", parts))
by_partition = spread(omega_list %>% select(-lambda_diff), rep, omega)
by_partition = by_partition %>% select(-size, -maxit, -partition)
by_partition = as.data.frame(t(by_partition))
ggplot(data = melt(by_partition), aes(variable, value)) +
geom_boxplot() +
coord_flip() +
labs(y = 'Omega Value', x = 'Partition', title = "1 Hidden Node, Maxit = 200")
partition_quantiles = as.data.frame(t(apply
(by_partition, 1, quantile, probs = c(0, 0.1, 0.25, 0.5, 0.75, 0.9, 1))))
csums = sapply(omega_list, colSums)
print(csums)
rnames = rownames(csums)
df = as.data.frame(csums) %>% tibble::rownames_to_column("num_nodes") %>%
melt() %>% rename(repetition = variable, GDF = value)
ggplot(df, aes(num_nodes, GDF, colour = repetition)) +
geom_point() +
labs(x = "Num Hidden Nodes", y = "GDF", title = paste("N = ", n))
# ------------------- Using neuralnet ---------------------------
pert_neuralnet <- function(sizes, reps = 1){
df_list = list()
for (r in 1:reps){
Omega<-rep(0,n)
OmegaLarge <- rep(0,n)
DFi<-rep(0,n)
Omega_df <- matrix(0, nrow = n, ncol = length(sizes))
for (k in 1:n)
{
Pert1<-Zbin
Pert2<-Zbin
Pert1[k]<-1
Pert2[k]<-0
AllSet<-data.frame(Pert1, Pert2, x1,x2,x3,x4,x5,x6,x7)
for (j in 1:length(sizes)){
Muhat1<- neuralnet(Pert1~x1+x2+x3+x4+x5+x6+x7, data=AllSet, stepmax = 200,
hidden= sizes[j], linear.output = FALSE)$net.result[[1]][,1]
Muhat2<- neuralnet(Pert2~x1+x2+x3+x4+x5+x6+x7, data=AllSet, stepmax = 200,
hidden= sizes[j], linear.output = FALSE)$net.result[[1]][,1]
Muhat1<-ifelse(Muhat1>0.9999,0.9999,Muhat1)
Muhat1<-ifelse(Muhat1<0.0001,0.0001,Muhat1)
Muhat2<-ifelse(Muhat2>0.9999,0.9999,Muhat2)
Muhat2<-ifelse(Muhat2<0.0001,0.0001,Muhat2)
LambdaEst1<-log(Muhat1/(1-Muhat1))
LambdaEst2<-log(Muhat2/(1-Muhat2))
Omega_df[k,j]<-VarEst[k]*(LambdaEst1[k]-LambdaEst2[k])
#print(Muhat1[k]-Muhat2[k])
}
print(paste(k, "finished"))
# Should be this, if following logic of equation 2.12, but doesn't work as expected
#DFi[k]<-VarEst[k]*(LambdaEst1[k]-LambdaEst2[k])/2/VarTot
}
Omega_df = as.data.frame(Omega_df)
colnames(Omega_df) = sizes
df_list[[r]] = Omega_df
}
return(df_list)
}
# Put in Number of nodes you want to see here
library(reshape2)
library(dplyr)
omega_list = pert_neuralnet(c(1,2,3), reps = 3)
csums = sapply(omega_list, colSums)
print(csums)
rnames = rownames(csums)
df = as.data.frame(csums) %>% tibble::rownames_to_column("num_nodes") %>%
melt() %>% rename(repetition = variable, GDF = value)
ggplot(df, aes(num_nodes, GDF, colour = repetition)) +
geom_point() +
labs(x = "Num Hidden Nodes", y = "GDF", title = paste("N = ", n))
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ms
#@@@@@@@@@@@@@@ Another package for neural network @@@@@@@@@@@@@@@@
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# library(neuralnet)
# AllSet<-data.frame(Zbin,x1,x2,x3,x4,x5,x6,x7)
# trainInd <- sample(1:n,2*n/3)
# testInd <- (1:n)[-trainInd]
# NodeList<-1:10
# CorVec<-1:length(NodeList)
#
# #@@@@@@@@@@@@@ Estimating which model performs the best @@@@@@
# #@@@@@@@@@@@@@ bestNode is the model @@@@@@@@@@@@@@@@@@@@@@@@@@
#
# for (k in 1:length(NodeList))
# {
# hidNodes<-NodeList[k]
# NetOut<- neuralnet(Zbin~x1+x2+x3+x4+x5+x6+x7, data=AllSet[trainInd,], hidden= hidNodes, linear.output = FALSE)
#
# plot(Probs[trainInd],NetOut$net.result[[1]])
# print(c(hidNodes,cor(targets[testInd],predict(NetOut,AllSet[testInd,]))))
# CorVec[k]<-cor(targets[testInd],predict(NetOut,AllSet[testInd,]))
# }
# CorVec
# bestNode<-NodeList[order(CorVec)[length(CorVec)]]
# bestNode
#
# #@@@@@@@@@@@@@ Calculating the DF using conditional perturbation @@@@@@@@@@@@@@@@@@@@@@
# Omega<-rep(0,n)
# DFi<-rep(0,n)
#
# for (k in 1:n)
# {
#
# AllSet$Pert1<-Zbin
# AllSet$Pert2<-Zbin
# AllSet$Pert1[k]<-1
# AllSet$Pert2[k]<-0
#
# # Muhat1<- neuralnet(Pert1~x1+x2+x3+x4+x5+x6+x7, data=AllSet, hidden=bestNode, linear.output = FALSE)$net.result[[1]]
# # Muhat2<- neuralnet(Pert2~x1+x2+x3+x4+x5+x6+x7, data=AllSet, hidden=bestNode, linear.output = FALSE)$net.result[[1]]
# Muhat1<- neuralnet(Pert1~x1+x2+x3+x4+x5+x6+x7, data=AllSet, hidden=1, linear.output = FALSE)$net.result[[1]]
# Muhat2<- neuralnet(Pert2~x1+x2+x3+x4+x5+x6+x7, data=AllSet, hidden=1, linear.output = FALSE)$net.result[[1]]
# print(Muhat1[k]-Muhat2[k])
#
# #Guard against extreme values
# Muhat1<-ifelse(Muhat1>0.9999,0.9999,Muhat1)
# Muhat1<-ifelse(Muhat1<0.0001,0.0001,Muhat1)
# Muhat2<-ifelse(Muhat2>0.9999,0.9999,Muhat2)
# Muhat2<-ifelse(Muhat2<0.0001,0.0001,Muhat2)
#
# LambdaEst1<-log(Muhat1/(1-Muhat1))
# LambdaEst2<-log(Muhat2/(1-Muhat2))
# Omega[k]<-VarEst[k]*(LambdaEst1[k]-LambdaEst2[k])
# # Should be this, if following logic of equation 2.12, but doesn't work as expected
# DFi[k]<-VarEst[k]*(LambdaEst1[k]-LambdaEst2[k])/2/VarTot
# }
#
# hist(Omega)
# hist(DFi)
# sum(DFi)
#
# #DFraw seems to be working OK
# Dfraw<-sum(Omega)
# mean(Omega,na.rm=T)*n
#
# Dfraw
#
#
#
#
# #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# #@@@@ Parametric (unconditional) Bootstrap @@@@@@@@@@@@@
# #@@@@ Use the calculations in Efron formula 3.17 @@@@@@
# #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# #@@ Just started, haven't implemented yet
#
# mm<-1000
# Simout<-matrix(0,ncol=mm,nrow=n)
# ZbinSim<-matrix(0,ncol=mm,nrow=n)
#
#
#
#
# #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# #@@@@ Trying a new package dglars and gdf function @@@@@@@@@@@@@@@@@
# #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# #@ dglars is using least angle regress (LARS) which is still a regression model
# #install.packages("dglars")
# library(dglars)
# fit <- dglars(Zbin~x1+x2+x3+x4+x5+x6+x7, binomial)
# gdf(fit)
| /RTI/Model_Complexity/nnet_pert.r | no_license | ishanUNC/Internship_Work | R | false | false | 17,570 | r | #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#@@@@@@ Based on 2004 Efron's paper @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#@@@ Efron B. (2004) The estimation of prediction error: covariance
#@@@ penalties and cross-validation,
#@@@ Journal of the American Statistical Association, Vol. 99(467), 619-632
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
library(reshape2)
library(tidyverse)
#Create a sample dataset of X
n<-1000
#x1<-sample(1:n,n,replace=T)/n*10
#x2<-sample(1:n,n,replace=T)/n*10
#x3<-x1^2
#x4<-x2^2
#x5<-x1 + x4
#x6<-sample(1:n,n,replace=T)/n*10
x1<-rnorm(n)
x2<-rnorm(n)*2
x3<-rnorm(n)*3
x4<-x1^2
x5<-x2^2
# x6 is a combination of x1 and x4
x6<-x1 + x4
x7<-sample(1:n,n,replace=T)/n*10
b<-c(1,1,1,1,1,1,1,1)
#@@@@@ TRUE MODEL @@@@@@@@@@@@@@@@@@@@@@@@@@@
#@@@@@ Create logodds of probabilities
#Empty model
#Model1
#ytoprob<-(b[1])/5
#clean model with independent variables
#Model2
#ytoprob<-(b[1]+b[2]*x1+b[3]*x2+b[4]*x3)/5
#Excessive models
#Model3
ytoprob<-(b[1]+b[2]*x1+b[3]*x2+b[4]*x3+b[5]*x4+b[6]*x5+b[7]*x6+b[8]*x7)/5
#Model4
#ytoprob<-b[1]+b[2]*x1+b[3]*x2+b[4]*x3+b[5]*x4
hist(ytoprob)
#Create TRUE probabilities
Probs<-exp(ytoprob)/(1+exp(ytoprob))
hist(Probs)
mean(Probs)
#@@@@@@@@@@ BINARY OUTCOMES FROM THE MODELS @@@@@@@
#create binary outcomes
Zbin<-rbinom(n,1,Probs)
table(Zbin)
#hist(ytoprob)
#This is the true value of binomial deviance
Lambdai<-log(Probs/(1-Probs))
hist(Lambdai)
sum(Lambdai)
#Run an estimation model
#ZmodBin<-glm(Zbin~x1+x2+x3+x4+x5+x6,family="binomial")
ZmodBin<-glm(Zbin~x1+x2+x3+x4+x5+x6+x7,family="binomial")
#ZmodBin<-glm(Zbin~x1+x2,family="binomial")
#ZmodBin<-lm(Zbin~x1+x2+x6)
summary(ZmodBin)
names(ZmodBin)
muhat<-ZmodBin$fitted.values
hist(muhat)
totmean<-mean(muhat)
VarTot<-totmean*(1-totmean)
#Estimate Total Variance
VarEst<-muhat*(1-muhat)
hist(VarEst)
sum(VarEst)
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#@@@ Estimation of DFs by perturbing one observation at a time (conditional covariance) @
#@@@@ Based on equation 3.22 @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#@@@@ Discussion in the simulation section shows that df=Omega/2. We have DF=Omega @@@@@@
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# Omega<-rep(0,n)
# DFi<-rep(0,n)
#
# for (k in 1:n)
# {
# Pert1<-Zbin
# Pert2<-Zbin
# Pert1[k]<-1
# Pert2[k]<-0
# Muhat1<-glm(Pert1~x1+x2+x3+x4+x5+x6+x7,family="binomial")$fitted.values
# Muhat2<-glm(Pert2~x1+x2+x3+x4+x5+x6+x7,family="binomial")$fitted.values
# # Muhat1<-glm(Pert1~x1+x2,family="binomial")$fitted.values
# # Muhat2<-glm(Pert2~x1+x2,family="binomial")$fitted.values
# LambdaEst1<-log(Muhat1/(1-Muhat1))
# LambdaEst2<-log(Muhat2/(1-Muhat2))
#
# #Below is not Omega but rather omega/2. Don't need to divide by 2 later.
# Omega[k]<-VarEst[k]*(LambdaEst1[k]-LambdaEst2[k])
# # Should be this, if following logic of equation 2.12, but doesn't work as expected
# DFi[k]<-VarEst[k]*(LambdaEst1[k]-LambdaEst2[k])/2/VarTot
# }
#
# hist(Omega)
# #hist(DFi)
# #sum(DFi)
# #DFraw seems to be working OK
# Dfraw<-sum(Omega)
# Dfraw
#
# #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# #@@@@ Parametric (unconditional) Bootstrap @@@@@@@@@@@@@
# #@@@@ Use the idea of linear regression of y on muhat @@
# #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#
# mm<-1000
# Simout<-matrix(0,ncol=mm,nrow=n)
# ZbinSim<-matrix(0,ncol=mm,nrow=n)
#
# for (k in 1:mm)
# {
# ZbinSim[,k]<-rbinom(n,1,muhat)
#
# #True values of mu the correct result
# #ZbinSim[,k]<-rbinom(n,1,Probs)
#
# #ZmodBin1<-glm(ZbinSim[,k]~x1+x2+x3+x4+x5+x7,family="binomial")
# ZmodBin1<-glm(ZbinSim[,k]~x1+x2,family="binomial")
# #ZmodBin1<-glm(ZbinSim[,k]~x1+x2+x3+x4+x5,family="binomial")
# Simout[,k]<-ZmodBin1$fitted.values
#
# }
#
#
# CoefReg<-rep(0,n)
# for (k in 1:n)
# {
# CoefReg[k]<-lm(Simout[k,]~ZbinSim[k,])$coefficients[2]
# print(CoefReg[k])
#
# # if (is.na(CoefReg[k])) {print(Simout[k,])
# # plot(ZbinSim[k,],Simout[k,])
# # print(summary(lm(Simout[k,]~ZbinSim[k,])))}
# }
#some bootsrtrap samplescan lead to degenerate samples and geenrate NA in regression
#should go away with very large samples that will have enough variability
#Here we ignore the NA runs and rather than directly calculating the sum
#we calclulate the mean fo non-NA values and multuply by the n
mean(CoefReg,na.rm=T)*n
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#@@@@@@@ NEURAL NETWORKS @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
#@@@@ Parametric (conditional) Bootstrap @@@@@@@@@@@@@@
#@@@@ Use the calculations in Efron formula 3.17 @@@@@@
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
library(nnet)
targets<-Zbin
indep<-cbind(x1,x2,x3,x4,x5,x6,x7)
trainInd <- sample(1:n,2*n/3)
testInd <- (1:n)[-trainInd]
table(targets[trainInd])
NodeList<-1:10
CorVec<-1:length(NodeList)
#@@@@@@@@@@@@@ Estimating which model performs the best @@@@@@
#@@@@@@@@@@@@@ bestNode is the model @@@@@@@@@@@@@@@@@@@@@@@@@@
# for (k in 1:length(NodeList))
# {
# hidNodes<-NodeList[k]
# NetOut<- nnet(indep[trainInd,], targets[trainInd], size = hidNodes, rang = 0.01,
# decay = 5e-4, maxit = 200,trace=F)
#
# #plot(Probs,NetOut$fitted.values)
# print(c(hidNodes,cor(targets[testInd],predict(NetOut,indep[testInd,]))))
# CorVec[k]<-cor(targets[testInd],predict(NetOut,indep[testInd,]))
# }
# CorVec
# bestNode<-NodeList[order(CorVec)[length(CorVec)]]
# bestNode
#@@@@@@@@@@@@@ Calculating the DF using conditional perturbation @@@@@@@@@@@@@@@@@@@@@@
pert_nnet <- function(sizes, maxits, reps = 1){
df_list = list()
iter = 1
Omega_df = data.frame(matrix(nrow = n*length(maxits)*length(sizes)*reps, ncol = 5))
colnames(Omega_df) = c("rep", "size", "maxit", "omega_num", "omega")
for (r in 1:reps){
Omega<-rep(0,n)
OmegaLarge <- rep(0,n)
DFi<-rep(0,n)
for (k in 1:n)
{
Pert1<-Zbin
Pert2<-Zbin
Pert1[k]<-1
Pert2[k]<-0
col = 1
for (i in 1:length(maxits)){
for (j in 1:length(sizes)){
Muhat1<- nnet(indep, Pert1, size = sizes[j], rang = 0.1,
decay = 5e-4, maxit = maxits[i],trace=F)$fitted.values
Muhat2<- nnet(indep, Pert2, size = sizes[j], rang = 0.1,
decay = 5e-4, maxit = maxits[i],trace=F)$fitted.values
Muhat1<-ifelse(Muhat1>0.9999,0.9999,Muhat1)
Muhat1<-ifelse(Muhat1<0.0001,0.0001,Muhat1)
Muhat2<-ifelse(Muhat2>0.9999,0.9999,Muhat2)
Muhat2<-ifelse(Muhat2<0.0001,0.0001,Muhat2)
LambdaEst1<-log(Muhat1/(1-Muhat1))
LambdaEst2<-log(Muhat2/(1-Muhat2))
Omega_df[iter,]<-list(r, sizes[j], maxits[i], k, VarEst[k]*(LambdaEst1[k]-LambdaEst2[k]))
iter = iter+1
#print(Muhat1[k]-Muhat2[k])
}
}
print(paste(k, "finished"))
# Should be this, if following logic of equation 2.12, but doesn't work as expected
#DFi[k]<-VarEst[k]*(LambdaEst1[k]-LambdaEst2[k])/2/VarTot
}
}
return(Omega_df)
}
# Put in Number of nodes you want to see here
library(reshape2)
library(dplyr)
omega_df = pert_nnet(c(1,2,3), maxits = c(20, 50, 100, 150, 200, 300, 400, 500), reps = 2)
omega_df$lambda_diff = ifelse(omega_df$omega > 0, "pos", "neg")
omega_sum = omega_df %>% group_by(rep, size, maxit) %>% summarise(GDF = sum(omega)) %>%group_by(size,maxit) %>%
summarise(mean_over_reps = mean(GDF), sd_over_reps = sd(GDF))
sum_df = spread(omega_df %>% group_by(size, maxit) %>% count(lambda_diff), lambda_diff, n) %>%
mutate('prop_neg' = neg/(neg + pos))
sum_df$size = as.factor(sum_df$size)
ggplot(sum_df, aes(maxit, prop_neg, col = size)) +
geom_point() +
labs(x = "Max Iterations", y = "Proportion of Negative Omegas", title = paste("N = ", n, ", Partitions = ", n))
omega_df$lambda_diff = ifelse(omega_df$omega > 0, 1, 0)
pos_df = omega_df %>% group_by(size, maxit, omega_num) %>% summarise(prop_pos = sum(lambda_diff), avg_omega = mean(omega))
csums = sapply(omega_list, colSums)
print(csums)
rnames = rownames(csums)
df = as.data.frame(csums) %>% tibble::rownames_to_column("num_nodes") %>%
melt() %>% rename(repetition = variable, GDF = value)
ggplot(df, aes(num_nodes, GDF, colour = repetition)) +
geom_point() +
labs(x = "Num Hidden Nodes", y = "GDF", title = paste("N = ", n))
# ----------------------MULTIPLE POINT PERTURBATION WITH MAXITS ------------
targets<-Zbin
indep<-cbind(x1,x2,x3,x4,x5,x6,x7)
trainInd <- sample(1:n,2*n/3)
testInd <- (1:n)[-trainInd]
table(targets[trainInd])
NodeList<-1:10
CorVec<-1:length(NodeList)
parts = 50
split_data <- function(n, partitions = 100){
samp = seq(1,n)
rand = sample(samp, n, replace = F)
spl = split(rand, ceiling(seq_along(samp) / (n/partitions)))
return(spl)
}
VarParts = list()
split = split_data(n, partitions = parts)
for (p in 1:length(split)){
VarParts[[p]] = VarEst[split[[p]]]
}
pert_nnet_mult <- function(sizes, maxits, reps = 1){
iter = 1
df_list = list()
Omega_df = data.frame(matrix(nrow = parts*length(maxits)*length(sizes)*reps, ncol = 5))
colnames(Omega_df) = c("rep", "size", "maxit", "partition", "omega")
for (r in 1:reps){
Omega<-rep(0,parts)
DFi<-rep(0,parts)
for (k in 1:parts)
{
Pert1<-Zbin
Pert2<-Zbin
inds = split[[k]]
Pert1[inds]<-1
Pert2[inds]<-0
for (i in 1:length(maxits)){
for (j in 1:length(sizes)){
Muhat1<- nnet(indep, Pert1, size = sizes[j], rang = 0.1,
decay = 5e-4, maxit =maxits[i],trace=F)$fitted.values
Muhat2<- nnet(indep, Pert2, size = sizes[j], rang = 0.1,
decay = 5e-4, maxit = maxits[i],trace=F)$fitted.values
Muhat1<-ifelse(Muhat1>0.9999,0.9999,Muhat1)
Muhat1<-ifelse(Muhat1<0.0001,0.0001,Muhat1)
Muhat2<-ifelse(Muhat2>0.9999,0.9999,Muhat2)
Muhat2<-ifelse(Muhat2<0.0001,0.0001,Muhat2)
LambdaEst1<-log(Muhat1/(1-Muhat1))
LambdaEst2<-log(Muhat2/(1-Muhat2))
Omega_df[iter,]<-list(r, sizes[j], maxits[i], k, sum(VarEst[inds]*(LambdaEst1[inds]-
LambdaEst2[inds])))
iter = iter+1
#print(Muhat1[k]-Muhat2[k])
}
}
print(paste(k, "finished"))
# Should be this, if following logic of equation 2.12, but doesn't work as expected
#DFi[k]<-VarEst[k]*(LambdaEst1[k]-LambdaEst2[k])/2/VarTot
}
Omega_df = as.data.frame(Omega_df)
}
return(Omega_df)
}
# Put in Number of nodes you want to see here
omega_list = pert_nnet_mult(1, maxits = c(200), reps = 50)
omega_sum = omega_list %>% group_by(rep, size, maxit) %>% summarise(GDF = sum(omega)) %>%group_by(size,maxit) %>%
summarise(mean_over_reps = mean(GDF), sd_over_reps = sd(GDF))
omega_list$lambda_diff = ifelse(omega_list$omega > 0, "pos", "neg")
sum_df = spread(omega_list %>% group_by(size, maxit) %>% count(lambda_diff), lambda_diff, n) %>%
mutate('prop_neg' = neg/(neg + pos))
sum_df$size = as.factor(sum_df$size)
ggplot(sum_df, aes(maxit, prop_neg, col = size)) +
geom_point() +
labs(x = "Max Iterations", y = "Proportion of Negative Omegas", title = paste("N = ", n, ", Partitions = ", parts))
by_partition = spread(omega_list %>% select(-lambda_diff), rep, omega)
by_partition = by_partition %>% select(-size, -maxit, -partition)
by_partition = as.data.frame(t(by_partition))
ggplot(data = melt(by_partition), aes(variable, value)) +
geom_boxplot() +
coord_flip() +
labs(y = 'Omega Value', x = 'Partition', title = "1 Hidden Node, Maxit = 200")
partition_quantiles = as.data.frame(t(apply
(by_partition, 1, quantile, probs = c(0, 0.1, 0.25, 0.5, 0.75, 0.9, 1))))
csums = sapply(omega_list, colSums)
print(csums)
rnames = rownames(csums)
df = as.data.frame(csums) %>% tibble::rownames_to_column("num_nodes") %>%
melt() %>% rename(repetition = variable, GDF = value)
ggplot(df, aes(num_nodes, GDF, colour = repetition)) +
geom_point() +
labs(x = "Num Hidden Nodes", y = "GDF", title = paste("N = ", n))
# ------------------- Using neuralnet ---------------------------
pert_neuralnet <- function(sizes, reps = 1){
df_list = list()
for (r in 1:reps){
Omega<-rep(0,n)
OmegaLarge <- rep(0,n)
DFi<-rep(0,n)
Omega_df <- matrix(0, nrow = n, ncol = length(sizes))
for (k in 1:n)
{
Pert1<-Zbin
Pert2<-Zbin
Pert1[k]<-1
Pert2[k]<-0
AllSet<-data.frame(Pert1, Pert2, x1,x2,x3,x4,x5,x6,x7)
for (j in 1:length(sizes)){
Muhat1<- neuralnet(Pert1~x1+x2+x3+x4+x5+x6+x7, data=AllSet, stepmax = 200,
hidden= sizes[j], linear.output = FALSE)$net.result[[1]][,1]
Muhat2<- neuralnet(Pert2~x1+x2+x3+x4+x5+x6+x7, data=AllSet, stepmax = 200,
hidden= sizes[j], linear.output = FALSE)$net.result[[1]][,1]
Muhat1<-ifelse(Muhat1>0.9999,0.9999,Muhat1)
Muhat1<-ifelse(Muhat1<0.0001,0.0001,Muhat1)
Muhat2<-ifelse(Muhat2>0.9999,0.9999,Muhat2)
Muhat2<-ifelse(Muhat2<0.0001,0.0001,Muhat2)
LambdaEst1<-log(Muhat1/(1-Muhat1))
LambdaEst2<-log(Muhat2/(1-Muhat2))
Omega_df[k,j]<-VarEst[k]*(LambdaEst1[k]-LambdaEst2[k])
#print(Muhat1[k]-Muhat2[k])
}
print(paste(k, "finished"))
# Should be this, if following logic of equation 2.12, but doesn't work as expected
#DFi[k]<-VarEst[k]*(LambdaEst1[k]-LambdaEst2[k])/2/VarTot
}
Omega_df = as.data.frame(Omega_df)
colnames(Omega_df) = sizes
df_list[[r]] = Omega_df
}
return(df_list)
}
# Put in Number of nodes you want to see here
library(reshape2)
library(dplyr)
omega_list = pert_neuralnet(c(1,2,3), reps = 3)
csums = sapply(omega_list, colSums)
print(csums)
rnames = rownames(csums)
df = as.data.frame(csums) %>% tibble::rownames_to_column("num_nodes") %>%
melt() %>% rename(repetition = variable, GDF = value)
ggplot(df, aes(num_nodes, GDF, colour = repetition)) +
geom_point() +
labs(x = "Num Hidden Nodes", y = "GDF", title = paste("N = ", n))
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@ms
#@@@@@@@@@@@@@@ Another package for neural network @@@@@@@@@@@@@@@@
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# library(neuralnet)
# AllSet<-data.frame(Zbin,x1,x2,x3,x4,x5,x6,x7)
# trainInd <- sample(1:n,2*n/3)
# testInd <- (1:n)[-trainInd]
# NodeList<-1:10
# CorVec<-1:length(NodeList)
#
# #@@@@@@@@@@@@@ Estimating which model performs the best @@@@@@
# #@@@@@@@@@@@@@ bestNode is the model @@@@@@@@@@@@@@@@@@@@@@@@@@
#
# for (k in 1:length(NodeList))
# {
# hidNodes<-NodeList[k]
# NetOut<- neuralnet(Zbin~x1+x2+x3+x4+x5+x6+x7, data=AllSet[trainInd,], hidden= hidNodes, linear.output = FALSE)
#
# plot(Probs[trainInd],NetOut$net.result[[1]])
# print(c(hidNodes,cor(targets[testInd],predict(NetOut,AllSet[testInd,]))))
# CorVec[k]<-cor(targets[testInd],predict(NetOut,AllSet[testInd,]))
# }
# CorVec
# bestNode<-NodeList[order(CorVec)[length(CorVec)]]
# bestNode
#
# #@@@@@@@@@@@@@ Calculating the DF using conditional perturbation @@@@@@@@@@@@@@@@@@@@@@
# Omega<-rep(0,n)
# DFi<-rep(0,n)
#
# for (k in 1:n)
# {
#
# AllSet$Pert1<-Zbin
# AllSet$Pert2<-Zbin
# AllSet$Pert1[k]<-1
# AllSet$Pert2[k]<-0
#
# # Muhat1<- neuralnet(Pert1~x1+x2+x3+x4+x5+x6+x7, data=AllSet, hidden=bestNode, linear.output = FALSE)$net.result[[1]]
# # Muhat2<- neuralnet(Pert2~x1+x2+x3+x4+x5+x6+x7, data=AllSet, hidden=bestNode, linear.output = FALSE)$net.result[[1]]
# Muhat1<- neuralnet(Pert1~x1+x2+x3+x4+x5+x6+x7, data=AllSet, hidden=1, linear.output = FALSE)$net.result[[1]]
# Muhat2<- neuralnet(Pert2~x1+x2+x3+x4+x5+x6+x7, data=AllSet, hidden=1, linear.output = FALSE)$net.result[[1]]
# print(Muhat1[k]-Muhat2[k])
#
# #Guard against extreme values
# Muhat1<-ifelse(Muhat1>0.9999,0.9999,Muhat1)
# Muhat1<-ifelse(Muhat1<0.0001,0.0001,Muhat1)
# Muhat2<-ifelse(Muhat2>0.9999,0.9999,Muhat2)
# Muhat2<-ifelse(Muhat2<0.0001,0.0001,Muhat2)
#
# LambdaEst1<-log(Muhat1/(1-Muhat1))
# LambdaEst2<-log(Muhat2/(1-Muhat2))
# Omega[k]<-VarEst[k]*(LambdaEst1[k]-LambdaEst2[k])
# # Should be this, if following logic of equation 2.12, but doesn't work as expected
# DFi[k]<-VarEst[k]*(LambdaEst1[k]-LambdaEst2[k])/2/VarTot
# }
#
# hist(Omega)
# hist(DFi)
# sum(DFi)
#
# #DFraw seems to be working OK
# Dfraw<-sum(Omega)
# mean(Omega,na.rm=T)*n
#
# Dfraw
#
#
#
#
# #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# #@@@@ Parametric (unconditional) Bootstrap @@@@@@@@@@@@@
# #@@@@ Use the calculations in Efron formula 3.17 @@@@@@
# #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# #@@ Just started, haven't implemented yet
#
# mm<-1000
# Simout<-matrix(0,ncol=mm,nrow=n)
# ZbinSim<-matrix(0,ncol=mm,nrow=n)
#
#
#
#
# #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# #@@@@ Trying a new package dglars and gdf function @@@@@@@@@@@@@@@@@
# #@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# #@ dglars is using least angle regress (LARS) which is still a regression model
# #install.packages("dglars")
# library(dglars)
# fit <- dglars(Zbin~x1+x2+x3+x4+x5+x6+x7, binomial)
# gdf(fit)
|
library(EpiDynamics)
### Name: sir2AgeClasses
### Title: SIR model with 2 age classes (P 3.3).
### Aliases: sir2AgeClasses
### ** Examples
# Parameters and initial conditions.
parameters <- c(betaCC = 100, betaCA = 10, betaAC = 10, betaAA = 20,
gamma = 10, lC = 0.0666667, muC = 0.0, muA = 0.016667)
initials <- c(SC = 0.1, IC = 0.0001, SA = 0.1, IA = 0.0001)
# Solve and plot.
sir2AgeClasses <- sir2AgeClasses(pars = parameters,
init = initials, time = seq(0, 100, 0.01))
PlotMods(sir2AgeClasses, variables = c('IA', 'IC'), grid = FALSE)
| /data/genthat_extracted_code/EpiDynamics/examples/sir2AgeClasses.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 591 | r | library(EpiDynamics)
### Name: sir2AgeClasses
### Title: SIR model with 2 age classes (P 3.3).
### Aliases: sir2AgeClasses
### ** Examples
# Parameters and initial conditions.
parameters <- c(betaCC = 100, betaCA = 10, betaAC = 10, betaAA = 20,
gamma = 10, lC = 0.0666667, muC = 0.0, muA = 0.016667)
initials <- c(SC = 0.1, IC = 0.0001, SA = 0.1, IA = 0.0001)
# Solve and plot.
sir2AgeClasses <- sir2AgeClasses(pars = parameters,
init = initials, time = seq(0, 100, 0.01))
PlotMods(sir2AgeClasses, variables = c('IA', 'IC'), grid = FALSE)
|
# RipRAM Random Forest Draft
# January 29, 2021
# Heili Lowman
# The following script will walk through a random forest created to predict state-wide RipRAM scores, with datasets from Kevin O'Connor, SMC, and StreamCat databases. The dependent variable in this case will be the Riparian Rapid Assessment Method (RipRAM) index state-wide.
# Step One - Load In ------------------------------------------------------
# Load packages.
library(quantregForest)
library(caret)
library(tidyverse)
library(tidymodels)
library(skimr)
library(sf)
library(ggspatial)
library(nhdplusTools)
library(patchwork)
library(Metrics)
library(gt)
library(sp)
library(maptools)
library(rgdal)
library(dataRetrieval) # New USGS package used to extract COMIDs from lat/long sf points.
# Load datasets.
# Need to bind RipRAM data to NHD CA dataset to get COMIDs.
# Load in cleaned dataset from Kevin O'Connor's spreadsheet.
ripram_df <- read_csv("RipRAM_clean_012621.csv")
# Load in NHD_Plus_CA dataset from Annie as well as watersheds from Jeff.
# Full state of California
nhd_ca <- read_sf("/Users/heilil/Desktop/hw_datasets/NHD_Plus_CA/NHDPlus_V2_FLowline_CA.shp") %>%
mutate(COMID = as.numeric(COMID))
# Spatial Join Method #1:
# ripram_sf <- st_as_sf(ripram_df, # create sf compatible dataframe
# coords = c("D_long", "D_lat"), # identify lon & lat
# remove = F, # do not remove lat/lon columns
# crs = 4269) # use NAD83 projection
#
# nhd_z <- st_zm(nhd_ca)
#
# EPSG <- make_EPSG() # create data frame of available EPSG codes
# EPSG[grepl("NAD83$", EPSG$note), ] # search for NAD 83 code
# Units are in meters
#
# Join NHD dataset to RipRAM dataset.
# Using this method, provided by O. Liu, since it's closest to the ArcGIS workflow.
# stream_samples_join <- st_join(nhd_ca, ripram_sf,
# join = st_is_within_distance, # the predicate to st_join
# dist = 40) # joins samples within 40 m distance
# stream_remaining <- stream_samples_join %>%
# filter(SiteTag != "NA")
# Spatial Join Method #2:
# Another option is to make polygon buffer from lines.
# This yielded a lot of duplicate matches (150+) so I chose to go with the above workflow.
#streams_buff <- st_buffer(nhd_z, dist = 0.001) # width/thickness = 0.001 degrees
#streams_samples_join <- st_join(streams_buff, ripram_sf) # Joins points to polygons made in the line above.
#streams_remaining <- streams_samples_join %>%
# filter(SiteTag != "NA")
#
# Exporting dataset for Annie to double-check in ArcGIS. It matched pretty well but went with method #3 to avoid issues in small catchments that may arise from snapping to flowlines.
# as_tibble(stream_remaining) %>% # remove geometry
# select(c(COMID, D_lat, D_long)) %>% # select necessary columns
# write_csv("ripram_sites.csv") # export as .csv
# Spatial Join Method #3:
# I'll be using the USGS' dataRetrieval package function findNLDI(), which uses the NHD catchment boundaries instead of flowlines.
test_comid <- findNLDI(location = c(-89.362239, 43.090266)) # testing to be sure this generates COMID 13293750
test <- findNLDI(location = c(ripram_df$D_long[1], ripram_df$D_lat[1])) # this works
# After a lot of testing, it seems the findNLDI function can only take one input at a time, so I need to figure out a way to map all of the rows into it one by one.
long <- ripram_df$D_long # vector of longitudes
lat <- ripram_df$D_lat # vector of latitudes
new_df <- data.frame(matrix(ncol = 360, nrow = 1)) # empty vector to receive the data
# for loop
for(k in 1:360) { # iterate from 1 to 360
new_df[k] <- as.data.frame.list(unlist(findNLDI(location = c(long[k], lat[k]), no_sf = TRUE))) %>%
pivot_wider(names_from = sourceName, values_from = comid)
# save input of findNLDI to output vector new_df for every iteration of k
# needs to be unlisted because of findNLDI output structure
}
# I couldn't quite figure out how to get the dataset to format properly, so I'm creating a WIDE dataframe in the for loop above. I QAQC'ed a few by hand, and they are indeed printing out the COMIDs.
ripram_comids_beta <- new_df %>% # take the wide dataset
pivot_longer(cols = starts_with("X"),
names_to = "index",
values_to = "comid") # pivot this into a column
ripram_comid <- ripram_df %>%
mutate(COMID = as.numeric(ripram_comids_beta$comid), # bind said column to a new dataframe that will be used below
RIPRAM = Idx_Score) # and create newly named score column
# Watershed characteristics' data available from StreamCat.
ca <- read_csv("streamcat_params.csv")
skim(ca)
str(ca) # Checking to be sure COMID is numeric in both datasets.
# Perennial stream assessment data available from SCCWRP server.
ps6 <- read_csv("ps6_params.csv")
# In the ps6_rf_data script, if there are multiple Length_Fin measures for a given COMID, I have chosen the maximum of them and the associated PSA6 designation with that maximum.
# Bind the datasets together.
mydf <- ripram_comid %>%
select(SiteTag, COMID, RIPRAM) %>%
inner_join(ca) %>% # Join with StreamCat watershed characteristics.
inner_join(ps6) %>% # Join with PSA region dataset.
select(-c(PctOpCat, PctOpWs, PctOpCatRp100, PctOpWsRp100, NPDESDensCat,
NPDESDensWs, TRIDensCat, TRIDensWs, SuperfundDensCat, SuperfundDensWs)) # Remove "open" land use and discharge site columns.
skim(mydf) # Examing completeness of this joined dataset.
length(unique(mydf$COMID)) # Checking for duplicates. 219 unique COMIDs. Not many...
# Pull out only one instance of each COMID.
set.seed(1) # Every time I run the code below, it's based on the same random pull of data.
mydf2 <- mydf %>%
group_by(COMID) %>%
sample_n(size = 1) %>%
ungroup()
skim(mydf2) # Checking to make sure the dataset is complete.
# Important to have complete datasets for training data. For testing data, it's less critical.
# Step Two - Training Data ------------------------------------------------
# Create calibration and validation splits with tidymodels initial_split() function.
set.seed(4)
mydf2_split <- mydf2 %>%
initial_split(prop = 0.75, strata = PSA6) # splits data into training and testing set.
# default is 3/4ths split (but 75% training, 25% testing).
# Stratification (strata) = grouping training/testing sets by region, state, etc.
# Using the "strata" call ensures the number of data points in the training data is equivalent to the proportions in the original data set. (Strata below 10% of the total are pooled together.)
# Create a training data set with the training() function
# Pulls from training and testing sets created by initial_split()
mydf2_train <- training(mydf2_split)
mydf2_test <- testing(mydf2_split)
# Examine the environment to be sure # of observations looks like the 75/25 split. 165:54.
# Create a separate dataset of available COMIDS that were not used in the training dataset.
nottrain <- ca %>% # all COMIDS from StreamCat data, sampled or not
filter(!COMID %in% mydf2_train$COMID) # Removing sites used to train the model. n = 140,545
# Step Three - Kitchen Sink model -----------------------------------------
# Create finalized training dataset and include all possible variables.
rf_dat <- mydf2_train %>%
select(-SiteTag, -COMID, -PSA6, -Length_Fin)
# Random forest --
# a decision tree model, using predictors to answer dichotomous questions to create nested splits.
# no pruning happens - rather, multiple trees are built (the forest) and then you are looking for consensus across trees
# training data goes down the tree and ends up in a terminal node.
# if testing data goes down the same route, then this upholds our conclusions. Or, if it goes awry, this allows us to look for patterns in how it goes awry.
set.seed(2) # assures the data pulled is random, but sets it for the run below (makes outcome stable)
myrf <- randomForest(y = rf_dat$RIPRAM, # dependent variable
x = rf_dat %>%
select(-RIPRAM), # selecting all predictor variables
importance = T, # how useful is a predictor in predicting values (nothing causal)
proximity = T,
ntrees = 500) # 500 trees.
myrf # examine the results.
# 37.69% variance explained.
summary(myrf)
# mtry allows you to parameterize the number of splits
plot(myrf)
# model performance appears to improve most at ~300 trees
varImpPlot(myrf)
# displays which variables are most important
# helps to winnow down list of predictors
# recommended to weigh left pane more
# right pane also shows how evenly things split based on the list of predictors
# values close to 0 can be dropped, but don't have to be
# road, mine, and dam density appear to have the greatest impact
importance <- myrf$importance
View(importance)
# displays the data plotted in the plot above
# predict()
# returns out of bag predictions for training data
# in the bag: every time a tree is built, it uses ~80% of the original 75% we set aside from the original dataset used to create a tree to assure random data selection
# out of bag: looking at the remaining 20% of the training data to predict, when you want to know what your model does at the training location sites
# Predict RIPRAM scores state-wide for all COMIDs.
nottrain_prediction <- nottrain %>% # taking all available COMIDS, that haven't been used in training
na.omit() %>% # remove NAs
mutate(ripram_predicted = predict(myrf, newdata = nottrain %>% na.omit())) # using developed model (myrf), inputting predictor variables (nottrain - which contains COMIDs and associated StreamCat data) to predict output/dependent variable (ripram_predicted a.k.a. RIPRAM).
# rePredict RIPRAM scores for training data.
mydf2_train$ripram_predicted <- predict(myrf) # Add column of predicted RIPRAM values to training dataset.
# Creates new dataset of bound rows for both ...
ca_predictions <- bind_rows(nottrain_prediction %>%
mutate(Set = "Non-training"), # statewide COMIDs (not used for training)
mydf2_train %>%
mutate(Set = "Training")) # COMIDS from training dataset
# This creates the dataset that will be plotted to create a state-wide plot of predicted CRAM scores.
# Plot the data.
rf_plot1 <- ggplot(ca_predictions, aes(x = PctImp2011CatRp100, y = ripram_predicted)) +
geom_point(alpha = 0.1) +
labs(x = "Mean % imperviousness within catchment and within a 100-m buffer of NHD stream lines",
y = "Predicted RIPRAM Score") +
theme_classic() +
facet_wrap(.~Set)
rf_plot1
# Step Four - Predictor Selection -----------------------------------------
# Using caret to select the best predictors
# What are the parameters you want to use to run recursive feature elimination (rfe)?
my_ctrl <- rfeControl(functions = rfFuncs,
method = "cv",
verbose = FALSE,
returnResamp = "all")
# rfe = recursive feature elimination
# THIS STEP TAKES FOR-EV-ER!!!
set.seed(22)
my_rfe <- rfe(y = rf_dat$RIPRAM, # set dependent variable
x = rf_dat %>% select(-RIPRAM), # set predictor variables
size = c(3:10, 15, 20, 25, 30), # sets how many variables are in the overall model
# I have 34 total possible variables, so I've chosen increments of 5 to look at.
rfeControl = my_ctrl) # pull in control from above
# can you make your model even simpler?
# the following will pick a model with the smallest number of predictor variables based on the tolerance ("tol") that you specify (how much less than the best are you willing to tolerate?)
my_size <- pickSizeTolerance(my_rfe$results, metric = "RMSE", tol = 1, maximize = F)
# higher tol (~10) gives you less variables
# lower tol (~1) gives you more variables - "I'd like the simplest model within 1% of the best model."
pickVars(my_rfe$variables, size = my_size)
# pickVars (25): PctImp2011WsRp100, PctImp2011CatRp100, PctAgCat, FertWs, CanalDensWs
# PctAgWs, PctImp2011Ws, PctImp2011Cat, FertCat, PctUrbCatRp100
# PctAgCatRp100, PctAgWsRp100, CBNFCat, RdCrsWs, ManureWs
# AgKffactCat, AgKffactWs, PctUrbWsRp100, CBNFWs, PctUrbCat
# Proceed with a regular RF that yields mean weighted values and fit those into the following classification scheme:
#Likely condition approach: Compare mean to three RIPRAM thresholds (??, ??, ??) based on condition classes.
# Very likely altered: mean < ??
# Likely altered: mean < ??
# Possibly altered: mean < ??
# Likely unaltered: mean >= ??
# Predict scores using the above 20 variables:
# Create re-finalized training dataset and include all possible variables.
rf_dat2 <- mydf2_train %>%
select(RIPRAM, PctImp2011WsRp100, PctImp2011CatRp100, PctAgCat, FertWs, CanalDensWs, PctAgWs, PctImp2011Ws, PctImp2011Cat, FertCat, PctUrbCatRp100, PctAgCatRp100, PctAgWsRp100, CBNFCat, RdCrsWs, ManureWs, AgKffactCat, AgKffactWs, PctUrbWsRp100, CBNFWs, PctUrbCat)
set.seed(4) # assures the data pulled is random, but sets it for the run below (makes outcome stable)
myrf2 <- randomForest(y = rf_dat2$RIPRAM, # dependent variable
x = rf_dat2 %>%
select(-RIPRAM),
importance = T,
proximity = T,
ntrees = 500)
myrf2 # examine the results.
# 38.97% variance explained.
summary(myrf2)
plot(myrf2) # need min of 200 trees.
varImpPlot(myrf2)
importance2 <- as.data.frame(as.table(myrf2$importance))
View(importance2) # displays the data plotted in the plot above
# Nicer ggplot variable importance plot.
vip_plot_a <- importance2 %>%
filter(Var2 == "%IncMSE") %>%
mutate(Var1 = factor(Var1)) %>%
mutate(Var1_f = fct_reorder(Var1, Freq)) %>%
ggplot(aes(x = Freq, y = Var1_f)) +
geom_point(size = 3, alpha = 0.75) +
labs(x = "% Importance (MSE)",
y = "Variables") +
theme_bw()
vip_plot_b <- importance2 %>%
filter(Var2 == "IncNodePurity") %>%
mutate(Var1 = factor(Var1)) %>%
mutate(Var1_f = fct_reorder(Var1, Freq)) %>%
ggplot(aes(x = Freq, y = Var1_f)) +
geom_point(size = 3, alpha = 0.75) +
labs(x = "Node Purity",
y = "Variables") +
theme_bw()
vip_plot <- vip_plot_a + vip_plot_b
vip_plot
# ggsave("ripram_vip_plot.png",
# path = "/Users/heilil/Desktop/R_figures",
# width = 25,
# height = 10,
# units = "cm"
# )
# predict(myrf2) # returns out of bag predictions for training data
# Predict RIPRAM scores state-wide.
nottrain_prediction2 <- nottrain %>% # taking all COMIDS that haven't been used in training
na.omit() %>% # remove NAs
mutate(ripram_predicted = predict(myrf2, newdata = nottrain %>% na.omit())) # using developed model (myrf2), inputting predictor variables (nottrain - COMIDs and associated StreamCat data) to predict output/dependent variable (ripram_predicted a.k.a. RIPRAM).
# rePredict RIPRAM scores for training and testing data (to be used in validation below).
mydf2_train2 <- mydf2_train
mydf2_train2$ripram_predicted <- predict(myrf2) # Add column of predicted RIPRAM scores to training dataset.
mydf2_test2 <- mydf2_test %>%
mutate(ripram_predicted = predict(myrf2, newdata = mydf2_test %>% select(-c(SiteTag, RIPRAM, PSA6, Length_Fin)))) # Adds column of predicted RIPRAM values to testing dataset.
# Creates new dataset of bound rows for both ...
ca_predictions2 <- bind_rows(nottrain_prediction2 %>%
mutate(Set = "Non-training"), # statewide COMIDs (not used for training data)
mydf2_train2 %>%
mutate(Set = "Training")) # COMIDS from training dataset (used for training the model).
# This creates the dataset that will be plotted.
# Create table of number of sites that fall into each category.
# NEEDS TO BE REDONE WITH ACTUAL CLASSIFICATIONS!
# Add classification column.
ca_predictions2 <- ca_predictions2 %>%
mutate(classification = case_when(ripram_predicted < 60 ~"Very Likely Altered",
ripram_predicted >= 60 & ripram_predicted < 75 ~"Likely Altered",
ripram_predicted >= 75 & ripram_predicted < 90 ~"Possibly Altered",
ripram_predicted >= 90 ~"Likely Unaltered")) %>%
mutate(class_f = factor(classification, levels = c("Very Likely Altered", "Likely Altered", "Possibly Altered", "Likely Unaltered"))) # relevel classifications
#### Results .csv ####
# Export results.
#write_csv(ca_predictions2, "ripram_rf_results.csv")
# Summary table by site #.
ca_summary <- ca_predictions2 %>%
count(class_f) # count sites statewide by classification
# The numbering is greatly skewed to the "possibly altered" classification, so perhaps other thresholds are necessary.
# Summary table by stream length (m)
ca_summary_length <- ca_predictions2 %>%
group_by(class_f) %>% # group by classification
summarize(length = sum(Length_Fin, na.rm=TRUE)) # sum stream lengths
# Join and export.
ca_sum <- full_join(ca_summary, ca_summary_length)
#write_csv(ca_sum, "ripram_rf_results_summary.csv")
# Step Five - Quantile Regression model -----------------------------------
# Note - for the Healthy Watersheds Project, I did not pursue this structure, but I've kept some example code below in case future iterations call for it.
# Quantile random forest regression mode, instead of looking at the mode of trees, can compare to 10th, 50th, 90th percentiles etc.
# Need to make a new dataset taking the above results of pickVars into account.
# Create finalized training dataset and include all possible variables.
# qrf_dat <- mydf2_train %>%
# select(asci, RdCrsWs, PctAgWs, PctUrbWsRp100, PctOpWsRp100, PctOpWs, DamDensWs, RdDensWs, NABD_DensWs, PctUrbWs, PctUrbCatRp100, RdDensWsRp100, PctOpCat, PctUrbCat, RdDensCat, CBNFWs, PctOpCatRp100, PctAgWsRp100, TRIDensWs, AgKffactWs, FertWs)
# set.seed(20)
# myqrf <- quantregForest(y = qrf_dat$asci, # dependent variable
# x = qrf_dat %>%
# select(-asci),
# importance = T,
# proximity = T,
# keep.inbag=T,
# ntrees = 500)
#predict(myqrf) # automatically presents 10th %tile, median, and 90th %tile
#predict(myqrf, what=c(0.2, 0.3, 0.999)) # to print specific quantiles
#plot(myqrf) # plots the results.
# Again appears to improve after ~100 trees.
# Step Six - Model validation ---------------------------------------------
# Compare predicted vs. actual results, including by PSA region.
# Adding lines of slope=1 and linear models to each plot.
val1 <- ggplot(mydf2_train2, aes(x = ripram_predicted, y = RIPRAM)) +
geom_point(color = "#2A3927", alpha = 0.5) +
geom_smooth(method = "lm", se = FALSE, color = "#2A3927") +
labs(x = "RIPRAM predicted",
y = "RIPRAM measured",
title = "Training Data\nn=165") +
geom_abline(intercept = 0, slope = 1) +
theme_bw()
val1
lm1 <- lm(RIPRAM~ripram_predicted, data = mydf2_train2)
summary(lm1)
val2 <- ggplot(mydf2_test2, aes(x = ripram_predicted, y = RIPRAM)) +
geom_point(color = "#3793EC", alpha = 0.5) +
geom_smooth(method = "lm", se = FALSE, color = "#3793EC") +
scale_x_continuous(breaks = c(0.5, 0.7, 0.9)) +
labs(x = "RIPRAM predicted",
y = "RIPRAM measured",
title = "Testing Data\nn=54") +
geom_abline(intercept = 0, slope = 1) +
theme_bw()
val2
lm2 <- lm(RIPRAM~ripram_predicted, data = mydf2_test2)
summary(lm2)
# Create the full testing + training dataset to plot together.
mydf2_test2$set <- "Testing"
mydf2_train2$set <- "Training"
full_train_test <- bind_rows(mydf2_test2, mydf2_train2) %>%
mutate(set_f = factor(set, levels = c("Training", "Testing")))
val3 <- ggplot(full_train_test, aes(x = ripram_predicted, y = RIPRAM, color = set_f)) +
geom_point(alpha = 0.5) +
geom_smooth(method = "lm", se = FALSE) +
scale_color_manual(name = "Set", values = c("#2A3927", "#3793EC"), drop = FALSE) +
labs(x = "RIPRAM predicted",
y = "RIPRAM measured",
title = "All Data\nn=219") +
geom_abline(intercept = 0, slope = 1, color = "black") +
facet_wrap(~PSA6) +
theme_bw()
val3
val_fig <- (val1 + val2) /
(val3)
val_fig + plot_annotation(
title = 'RIPRAM Random Forest Results',
subtitle = 'All modeling performed using StreamCAT datasets.',
caption = 'Linear models are colored according to dataset. Lines of slope = 1 are denoted in black.'
)
# Save figure.
# ggsave("ripram_rfmodel_validation.png",
# path = "/Users/heilil/Desktop",
# width = 35,
# height = 25,
# units = "cm"
# )
# Chose not to compute confusion matrix / accuracy score since this is more applicable to categorical ouputs from random forest models -
# Instead, calculated Root Mean Squared Error (RMSE) of both training and test datasets.
# If test RMSE values are much greater than training, then possible the model has been over fit.
predtest <- predict(myrf2, mydf2_test2)
rmse(mydf2_test2$RIPRAM, predtest)
# 17.63
predtrain <- predict(myrf2, mydf2_train2)
rmse(mydf2_train2$RIPRAM, predtrain)
# 8.07
# Double checking using the original random forest dataset (rf_dat) with all 35 possible variables included to see where the error in number of predictors starts to increase dramatically (to help double check our decision to include 25 parameters).
dc <- rfcv(rf_dat %>%
select(-RIPRAM),
rf_dat$RIPRAM,
step = 0.7, # default is 0.5
scale="log")
dc$error.cv
# 34 24 17 12 8 6 4 3 2 1
# 370.0889 366.7509 386.6936 409.5337 441.2518 460.4203 513.3549 500.2896 536.4003 666.5010
# Appears between 24 and 17 variables, there is a significant increase in error. This model is roughly the same size as the CSCI (20) model.
# Step Seven - Map results state-wide -------------------------------------
# Using ca_predictions2 dataset generated above. But need to first associate lat/lon with each COMID.
# Load in NHD_Plus_CA dataset from Annie as well as watersheds from Jeff.
# Full state of California
nhd_ca <- read_sf("/Users/heilil/Desktop/hw_datasets/NHD_Plus_CA/NHDPlus_V2_FLowline_CA.shp") %>%
mutate(COMID = as.numeric(COMID))
# South Coast watersheds - Ventura River, San Juan Creek, San Diego River
nhd_vr <- read_sf("/Users/heilil/Desktop/hw_datasets/NHD_Watersheds/VenturaRiver_NHD_Clip.shp") %>%
mutate(COMID = as.numeric(COMID))
nhd_sjc <- read_sf("/Users/heilil/Desktop/hw_datasets/NHD_Watersheds/SanJuanCreek_NHD_Clip.shp") %>%
mutate(COMID = as.numeric(COMID))
nhd_sdr <- read_sf("/Users/heilil/Desktop/hw_datasets/NHD_Watersheds/SanDiegoRiver_NHD_Clip.shp") %>%
mutate(COMID = as.numeric(COMID))
# Assign modeled COMIDs to mcomid.
mcomid <- ca_predictions2$COMID
# Filter by and plot only modeled stream reaches.
modeled_ripram_map <- nhd_ca %>%
filter(COMID %in% mcomid) %>%
inner_join(ca_predictions2) %>%
ggplot() +
geom_sf(aes(color = class_f)) +
scale_color_manual(name = "Condition", values = c("red2", "lightpink", "lightskyblue2", "steelblue"), drop = FALSE) +
theme_bw()
modeled_ripram_map
# Note, sometimes this takes forever to render in the "plot" pane.
# Best to just save to your machine (below) and then take a look.
# ggsave("ripram_modeled_CA.png",
# path = "/Users/heilil/Desktop",
# width = 35,
# height = 35,
# units = "cm"
# )
# Additional Notes - Healthy Watersheds project ---------------------------
# Condition approach favored by Anna per meeting on 11/3/2020.
# Works Cited:
# Hill, Ryan A., Marc H. Weber, Scott G. Leibowitz, Anthony R. Olsen, and Darren J. Thornbrugh, 2016. The Stream-Catchment (StreamCat) Dataset: A Database of Watershed Metrics for the Conterminous United States. Journal of the American Water Resources Association (JAWRA) 52:120-128. DOI: 10.1111/1752-1688.12372.
# End of R script.
| /ripram_rf.R | no_license | SCCWRP/healthy_watershed_random_forest | R | false | false | 23,268 | r | # RipRAM Random Forest Draft
# January 29, 2021
# Heili Lowman
# The following script will walk through a random forest created to predict state-wide RipRAM scores, with datasets from Kevin O'Connor, SMC, and StreamCat databases. The dependent variable in this case will be the Riparian Rapid Assessment Method (RipRAM) index state-wide.
# Step One - Load In ------------------------------------------------------
# Load packages.
library(quantregForest)
library(caret)
library(tidyverse)
library(tidymodels)
library(skimr)
library(sf)
library(ggspatial)
library(nhdplusTools)
library(patchwork)
library(Metrics)
library(gt)
library(sp)
library(maptools)
library(rgdal)
library(dataRetrieval) # New USGS package used to extract COMIDs from lat/long sf points.
# Load datasets.
# Need to bind RipRAM data to NHD CA dataset to get COMIDs.
# Load in cleaned dataset from Kevin O'Connor's spreadsheet.
ripram_df <- read_csv("RipRAM_clean_012621.csv")
# Load in NHD_Plus_CA dataset from Annie as well as watersheds from Jeff.
# Full state of California
nhd_ca <- read_sf("/Users/heilil/Desktop/hw_datasets/NHD_Plus_CA/NHDPlus_V2_FLowline_CA.shp") %>%
mutate(COMID = as.numeric(COMID))
# Spatial Join Method #1:
# ripram_sf <- st_as_sf(ripram_df, # create sf compatible dataframe
# coords = c("D_long", "D_lat"), # identify lon & lat
# remove = F, # do not remove lat/lon columns
# crs = 4269) # use NAD83 projection
#
# nhd_z <- st_zm(nhd_ca)
#
# EPSG <- make_EPSG() # create data frame of available EPSG codes
# EPSG[grepl("NAD83$", EPSG$note), ] # search for NAD 83 code
# Units are in meters
#
# Join NHD dataset to RipRAM dataset.
# Using this method, provided by O. Liu, since it's closest to the ArcGIS workflow.
# stream_samples_join <- st_join(nhd_ca, ripram_sf,
# join = st_is_within_distance, # the predicate to st_join
# dist = 40) # joins samples within 40 m distance
# stream_remaining <- stream_samples_join %>%
# filter(SiteTag != "NA")
# Spatial Join Method #2:
# Another option is to make polygon buffer from lines.
# This yielded a lot of duplicate matches (150+) so I chose to go with the above workflow.
#streams_buff <- st_buffer(nhd_z, dist = 0.001) # width/thickness = 0.001 degrees
#streams_samples_join <- st_join(streams_buff, ripram_sf) # Joins points to polygons made in the line above.
#streams_remaining <- streams_samples_join %>%
# filter(SiteTag != "NA")
#
# Exporting dataset for Annie to double-check in ArcGIS. It matched pretty well but went with method #3 to avoid issues in small catchments that may arise from snapping to flowlines.
# as_tibble(stream_remaining) %>% # remove geometry
# select(c(COMID, D_lat, D_long)) %>% # select necessary columns
# write_csv("ripram_sites.csv") # export as .csv
# Spatial Join Method #3:
# I'll be using the USGS' dataRetrieval package function findNLDI(), which uses the NHD catchment boundaries instead of flowlines.
test_comid <- findNLDI(location = c(-89.362239, 43.090266)) # testing to be sure this generates COMID 13293750
test <- findNLDI(location = c(ripram_df$D_long[1], ripram_df$D_lat[1])) # this works
# After a lot of testing, it seems the findNLDI function can only take one input at a time, so I need to figure out a way to map all of the rows into it one by one.
long <- ripram_df$D_long # vector of longitudes
lat <- ripram_df$D_lat # vector of latitudes
new_df <- data.frame(matrix(ncol = 360, nrow = 1)) # empty vector to receive the data
# for loop
for(k in 1:360) { # iterate from 1 to 360
new_df[k] <- as.data.frame.list(unlist(findNLDI(location = c(long[k], lat[k]), no_sf = TRUE))) %>%
pivot_wider(names_from = sourceName, values_from = comid)
# save input of findNLDI to output vector new_df for every iteration of k
# needs to be unlisted because of findNLDI output structure
}
# I couldn't quite figure out how to get the dataset to format properly, so I'm creating a WIDE dataframe in the for loop above. I QAQC'ed a few by hand, and they are indeed printing out the COMIDs.
ripram_comids_beta <- new_df %>% # take the wide dataset
pivot_longer(cols = starts_with("X"),
names_to = "index",
values_to = "comid") # pivot this into a column
ripram_comid <- ripram_df %>%
mutate(COMID = as.numeric(ripram_comids_beta$comid), # bind said column to a new dataframe that will be used below
RIPRAM = Idx_Score) # and create newly named score column
# Watershed characteristics' data available from StreamCat.
ca <- read_csv("streamcat_params.csv")
skim(ca)
str(ca) # Checking to be sure COMID is numeric in both datasets.
# Perennial stream assessment data available from SCCWRP server.
ps6 <- read_csv("ps6_params.csv")
# In the ps6_rf_data script, if there are multiple Length_Fin measures for a given COMID, I have chosen the maximum of them and the associated PSA6 designation with that maximum.
# Bind the datasets together.
mydf <- ripram_comid %>%
select(SiteTag, COMID, RIPRAM) %>%
inner_join(ca) %>% # Join with StreamCat watershed characteristics.
inner_join(ps6) %>% # Join with PSA region dataset.
select(-c(PctOpCat, PctOpWs, PctOpCatRp100, PctOpWsRp100, NPDESDensCat,
NPDESDensWs, TRIDensCat, TRIDensWs, SuperfundDensCat, SuperfundDensWs)) # Remove "open" land use and discharge site columns.
skim(mydf) # Examing completeness of this joined dataset.
length(unique(mydf$COMID)) # Checking for duplicates. 219 unique COMIDs. Not many...
# Pull out only one instance of each COMID.
set.seed(1) # Every time I run the code below, it's based on the same random pull of data.
mydf2 <- mydf %>%
group_by(COMID) %>%
sample_n(size = 1) %>%
ungroup()
skim(mydf2) # Checking to make sure the dataset is complete.
# Important to have complete datasets for training data. For testing data, it's less critical.
# Step Two - Training Data ------------------------------------------------
# Create calibration and validation splits with tidymodels initial_split() function.
set.seed(4)
mydf2_split <- mydf2 %>%
initial_split(prop = 0.75, strata = PSA6) # splits data into training and testing set.
# default is 3/4ths split (but 75% training, 25% testing).
# Stratification (strata) = grouping training/testing sets by region, state, etc.
# Using the "strata" call ensures the number of data points in the training data is equivalent to the proportions in the original data set. (Strata below 10% of the total are pooled together.)
# Create a training data set with the training() function
# Pulls from training and testing sets created by initial_split()
mydf2_train <- training(mydf2_split)
mydf2_test <- testing(mydf2_split)
# Examine the environment to be sure # of observations looks like the 75/25 split. 165:54.
# Create a separate dataset of available COMIDS that were not used in the training dataset.
nottrain <- ca %>% # all COMIDS from StreamCat data, sampled or not
filter(!COMID %in% mydf2_train$COMID) # Removing sites used to train the model. n = 140,545
# Step Three - Kitchen Sink model -----------------------------------------
# Create finalized training dataset and include all possible variables.
rf_dat <- mydf2_train %>%
select(-SiteTag, -COMID, -PSA6, -Length_Fin)
# Random forest --
# a decision tree model, using predictors to answer dichotomous questions to create nested splits.
# no pruning happens - rather, multiple trees are built (the forest) and then you are looking for consensus across trees
# training data goes down the tree and ends up in a terminal node.
# if testing data goes down the same route, then this upholds our conclusions. Or, if it goes awry, this allows us to look for patterns in how it goes awry.
set.seed(2) # assures the data pulled is random, but sets it for the run below (makes outcome stable)
myrf <- randomForest(y = rf_dat$RIPRAM, # dependent variable
x = rf_dat %>%
select(-RIPRAM), # selecting all predictor variables
importance = T, # how useful is a predictor in predicting values (nothing causal)
proximity = T,
ntrees = 500) # 500 trees.
myrf # examine the results.
# 37.69% variance explained.
summary(myrf)
# mtry allows you to parameterize the number of splits
plot(myrf)
# model performance appears to improve most at ~300 trees
varImpPlot(myrf)
# displays which variables are most important
# helps to winnow down list of predictors
# recommended to weigh left pane more
# right pane also shows how evenly things split based on the list of predictors
# values close to 0 can be dropped, but don't have to be
# road, mine, and dam density appear to have the greatest impact
importance <- myrf$importance
View(importance)
# displays the data plotted in the plot above
# predict()
# returns out of bag predictions for training data
# in the bag: every time a tree is built, it uses ~80% of the original 75% we set aside from the original dataset used to create a tree to assure random data selection
# out of bag: looking at the remaining 20% of the training data to predict, when you want to know what your model does at the training location sites
# Predict RIPRAM scores state-wide for all COMIDs.
nottrain_prediction <- nottrain %>% # taking all available COMIDS, that haven't been used in training
na.omit() %>% # remove NAs
mutate(ripram_predicted = predict(myrf, newdata = nottrain %>% na.omit())) # using developed model (myrf), inputting predictor variables (nottrain - which contains COMIDs and associated StreamCat data) to predict output/dependent variable (ripram_predicted a.k.a. RIPRAM).
# rePredict RIPRAM scores for training data.
mydf2_train$ripram_predicted <- predict(myrf) # Add column of predicted RIPRAM values to training dataset.
# Creates new dataset of bound rows for both ...
ca_predictions <- bind_rows(nottrain_prediction %>%
mutate(Set = "Non-training"), # statewide COMIDs (not used for training)
mydf2_train %>%
mutate(Set = "Training")) # COMIDS from training dataset
# This creates the dataset that will be plotted to create a state-wide plot of predicted CRAM scores.
# Plot the data.
rf_plot1 <- ggplot(ca_predictions, aes(x = PctImp2011CatRp100, y = ripram_predicted)) +
geom_point(alpha = 0.1) +
labs(x = "Mean % imperviousness within catchment and within a 100-m buffer of NHD stream lines",
y = "Predicted RIPRAM Score") +
theme_classic() +
facet_wrap(.~Set)
rf_plot1
# Step Four - Predictor Selection -----------------------------------------
# Using caret to select the best predictors
# What are the parameters you want to use to run recursive feature elimination (rfe)?
my_ctrl <- rfeControl(functions = rfFuncs,
method = "cv",
verbose = FALSE,
returnResamp = "all")
# rfe = recursive feature elimination
# THIS STEP TAKES FOR-EV-ER!!!
set.seed(22)
my_rfe <- rfe(y = rf_dat$RIPRAM, # set dependent variable
x = rf_dat %>% select(-RIPRAM), # set predictor variables
size = c(3:10, 15, 20, 25, 30), # sets how many variables are in the overall model
# I have 34 total possible variables, so I've chosen increments of 5 to look at.
rfeControl = my_ctrl) # pull in control from above
# can you make your model even simpler?
# the following will pick a model with the smallest number of predictor variables based on the tolerance ("tol") that you specify (how much less than the best are you willing to tolerate?)
my_size <- pickSizeTolerance(my_rfe$results, metric = "RMSE", tol = 1, maximize = F)
# higher tol (~10) gives you less variables
# lower tol (~1) gives you more variables - "I'd like the simplest model within 1% of the best model."
pickVars(my_rfe$variables, size = my_size)
# pickVars (25): PctImp2011WsRp100, PctImp2011CatRp100, PctAgCat, FertWs, CanalDensWs
# PctAgWs, PctImp2011Ws, PctImp2011Cat, FertCat, PctUrbCatRp100
# PctAgCatRp100, PctAgWsRp100, CBNFCat, RdCrsWs, ManureWs
# AgKffactCat, AgKffactWs, PctUrbWsRp100, CBNFWs, PctUrbCat
# Proceed with a regular RF that yields mean weighted values and fit those into the following classification scheme:
#Likely condition approach: Compare mean to three RIPRAM thresholds (??, ??, ??) based on condition classes.
# Very likely altered: mean < ??
# Likely altered: mean < ??
# Possibly altered: mean < ??
# Likely unaltered: mean >= ??
# Predict scores using the above 20 variables:
# Create re-finalized training dataset and include all possible variables.
rf_dat2 <- mydf2_train %>%
select(RIPRAM, PctImp2011WsRp100, PctImp2011CatRp100, PctAgCat, FertWs, CanalDensWs, PctAgWs, PctImp2011Ws, PctImp2011Cat, FertCat, PctUrbCatRp100, PctAgCatRp100, PctAgWsRp100, CBNFCat, RdCrsWs, ManureWs, AgKffactCat, AgKffactWs, PctUrbWsRp100, CBNFWs, PctUrbCat)
set.seed(4) # assures the data pulled is random, but sets it for the run below (makes outcome stable)
myrf2 <- randomForest(y = rf_dat2$RIPRAM, # dependent variable
x = rf_dat2 %>%
select(-RIPRAM),
importance = T,
proximity = T,
ntrees = 500)
myrf2 # examine the results.
# 38.97% variance explained.
summary(myrf2)
plot(myrf2) # need min of 200 trees.
varImpPlot(myrf2)
importance2 <- as.data.frame(as.table(myrf2$importance))
View(importance2) # displays the data plotted in the plot above
# Nicer ggplot variable importance plot.
vip_plot_a <- importance2 %>%
filter(Var2 == "%IncMSE") %>%
mutate(Var1 = factor(Var1)) %>%
mutate(Var1_f = fct_reorder(Var1, Freq)) %>%
ggplot(aes(x = Freq, y = Var1_f)) +
geom_point(size = 3, alpha = 0.75) +
labs(x = "% Importance (MSE)",
y = "Variables") +
theme_bw()
vip_plot_b <- importance2 %>%
filter(Var2 == "IncNodePurity") %>%
mutate(Var1 = factor(Var1)) %>%
mutate(Var1_f = fct_reorder(Var1, Freq)) %>%
ggplot(aes(x = Freq, y = Var1_f)) +
geom_point(size = 3, alpha = 0.75) +
labs(x = "Node Purity",
y = "Variables") +
theme_bw()
vip_plot <- vip_plot_a + vip_plot_b
vip_plot
# ggsave("ripram_vip_plot.png",
# path = "/Users/heilil/Desktop/R_figures",
# width = 25,
# height = 10,
# units = "cm"
# )
# predict(myrf2) # returns out of bag predictions for training data
# Predict RIPRAM scores state-wide.
nottrain_prediction2 <- nottrain %>% # taking all COMIDS that haven't been used in training
na.omit() %>% # remove NAs
mutate(ripram_predicted = predict(myrf2, newdata = nottrain %>% na.omit())) # using developed model (myrf2), inputting predictor variables (nottrain - COMIDs and associated StreamCat data) to predict output/dependent variable (ripram_predicted a.k.a. RIPRAM).
# rePredict RIPRAM scores for training and testing data (to be used in validation below).
mydf2_train2 <- mydf2_train
mydf2_train2$ripram_predicted <- predict(myrf2) # Add column of predicted RIPRAM scores to training dataset.
mydf2_test2 <- mydf2_test %>%
mutate(ripram_predicted = predict(myrf2, newdata = mydf2_test %>% select(-c(SiteTag, RIPRAM, PSA6, Length_Fin)))) # Adds column of predicted RIPRAM values to testing dataset.
# Creates new dataset of bound rows for both ...
ca_predictions2 <- bind_rows(nottrain_prediction2 %>%
mutate(Set = "Non-training"), # statewide COMIDs (not used for training data)
mydf2_train2 %>%
mutate(Set = "Training")) # COMIDS from training dataset (used for training the model).
# This creates the dataset that will be plotted.
# Create table of number of sites that fall into each category.
# NEEDS TO BE REDONE WITH ACTUAL CLASSIFICATIONS!
# Add classification column.
ca_predictions2 <- ca_predictions2 %>%
mutate(classification = case_when(ripram_predicted < 60 ~"Very Likely Altered",
ripram_predicted >= 60 & ripram_predicted < 75 ~"Likely Altered",
ripram_predicted >= 75 & ripram_predicted < 90 ~"Possibly Altered",
ripram_predicted >= 90 ~"Likely Unaltered")) %>%
mutate(class_f = factor(classification, levels = c("Very Likely Altered", "Likely Altered", "Possibly Altered", "Likely Unaltered"))) # relevel classifications
#### Results .csv ####
# Export results.
#write_csv(ca_predictions2, "ripram_rf_results.csv")
# Summary table by site #.
ca_summary <- ca_predictions2 %>%
count(class_f) # count sites statewide by classification
# The numbering is greatly skewed to the "possibly altered" classification, so perhaps other thresholds are necessary.
# Summary table by stream length (m)
ca_summary_length <- ca_predictions2 %>%
group_by(class_f) %>% # group by classification
summarize(length = sum(Length_Fin, na.rm=TRUE)) # sum stream lengths
# Join and export.
ca_sum <- full_join(ca_summary, ca_summary_length)
#write_csv(ca_sum, "ripram_rf_results_summary.csv")
# Step Five - Quantile Regression model -----------------------------------
# Note - for the Healthy Watersheds Project, I did not pursue this structure, but I've kept some example code below in case future iterations call for it.
# Quantile random forest regression mode, instead of looking at the mode of trees, can compare to 10th, 50th, 90th percentiles etc.
# Need to make a new dataset taking the above results of pickVars into account.
# Create finalized training dataset and include all possible variables.
# qrf_dat <- mydf2_train %>%
# select(asci, RdCrsWs, PctAgWs, PctUrbWsRp100, PctOpWsRp100, PctOpWs, DamDensWs, RdDensWs, NABD_DensWs, PctUrbWs, PctUrbCatRp100, RdDensWsRp100, PctOpCat, PctUrbCat, RdDensCat, CBNFWs, PctOpCatRp100, PctAgWsRp100, TRIDensWs, AgKffactWs, FertWs)
# set.seed(20)
# myqrf <- quantregForest(y = qrf_dat$asci, # dependent variable
# x = qrf_dat %>%
# select(-asci),
# importance = T,
# proximity = T,
# keep.inbag=T,
# ntrees = 500)
#predict(myqrf) # automatically presents 10th %tile, median, and 90th %tile
#predict(myqrf, what=c(0.2, 0.3, 0.999)) # to print specific quantiles
#plot(myqrf) # plots the results.
# Again appears to improve after ~100 trees.
# Step Six - Model validation ---------------------------------------------
# Compare predicted vs. actual results, including by PSA region.
# Adding lines of slope=1 and linear models to each plot.
val1 <- ggplot(mydf2_train2, aes(x = ripram_predicted, y = RIPRAM)) +
geom_point(color = "#2A3927", alpha = 0.5) +
geom_smooth(method = "lm", se = FALSE, color = "#2A3927") +
labs(x = "RIPRAM predicted",
y = "RIPRAM measured",
title = "Training Data\nn=165") +
geom_abline(intercept = 0, slope = 1) +
theme_bw()
val1
lm1 <- lm(RIPRAM~ripram_predicted, data = mydf2_train2)
summary(lm1)
val2 <- ggplot(mydf2_test2, aes(x = ripram_predicted, y = RIPRAM)) +
geom_point(color = "#3793EC", alpha = 0.5) +
geom_smooth(method = "lm", se = FALSE, color = "#3793EC") +
scale_x_continuous(breaks = c(0.5, 0.7, 0.9)) +
labs(x = "RIPRAM predicted",
y = "RIPRAM measured",
title = "Testing Data\nn=54") +
geom_abline(intercept = 0, slope = 1) +
theme_bw()
val2
lm2 <- lm(RIPRAM~ripram_predicted, data = mydf2_test2)
summary(lm2)
# Create the full testing + training dataset to plot together.
mydf2_test2$set <- "Testing"
mydf2_train2$set <- "Training"
full_train_test <- bind_rows(mydf2_test2, mydf2_train2) %>%
mutate(set_f = factor(set, levels = c("Training", "Testing")))
val3 <- ggplot(full_train_test, aes(x = ripram_predicted, y = RIPRAM, color = set_f)) +
geom_point(alpha = 0.5) +
geom_smooth(method = "lm", se = FALSE) +
scale_color_manual(name = "Set", values = c("#2A3927", "#3793EC"), drop = FALSE) +
labs(x = "RIPRAM predicted",
y = "RIPRAM measured",
title = "All Data\nn=219") +
geom_abline(intercept = 0, slope = 1, color = "black") +
facet_wrap(~PSA6) +
theme_bw()
val3
val_fig <- (val1 + val2) /
(val3)
val_fig + plot_annotation(
title = 'RIPRAM Random Forest Results',
subtitle = 'All modeling performed using StreamCAT datasets.',
caption = 'Linear models are colored according to dataset. Lines of slope = 1 are denoted in black.'
)
# Save figure.
# ggsave("ripram_rfmodel_validation.png",
# path = "/Users/heilil/Desktop",
# width = 35,
# height = 25,
# units = "cm"
# )
# Chose not to compute confusion matrix / accuracy score since this is more applicable to categorical ouputs from random forest models -
# Instead, calculated Root Mean Squared Error (RMSE) of both training and test datasets.
# If test RMSE values are much greater than training, then possible the model has been over fit.
predtest <- predict(myrf2, mydf2_test2)
rmse(mydf2_test2$RIPRAM, predtest)
# 17.63
predtrain <- predict(myrf2, mydf2_train2)
rmse(mydf2_train2$RIPRAM, predtrain)
# 8.07
# Double checking using the original random forest dataset (rf_dat) with all 35 possible variables included to see where the error in number of predictors starts to increase dramatically (to help double check our decision to include 25 parameters).
dc <- rfcv(rf_dat %>%
select(-RIPRAM),
rf_dat$RIPRAM,
step = 0.7, # default is 0.5
scale="log")
dc$error.cv
# 34 24 17 12 8 6 4 3 2 1
# 370.0889 366.7509 386.6936 409.5337 441.2518 460.4203 513.3549 500.2896 536.4003 666.5010
# Appears between 24 and 17 variables, there is a significant increase in error. This model is roughly the same size as the CSCI (20) model.
# Step Seven - Map results state-wide -------------------------------------
# Using ca_predictions2 dataset generated above. But need to first associate lat/lon with each COMID.
# Load in NHD_Plus_CA dataset from Annie as well as watersheds from Jeff.
# Full state of California
nhd_ca <- read_sf("/Users/heilil/Desktop/hw_datasets/NHD_Plus_CA/NHDPlus_V2_FLowline_CA.shp") %>%
mutate(COMID = as.numeric(COMID))
# South Coast watersheds - Ventura River, San Juan Creek, San Diego River
nhd_vr <- read_sf("/Users/heilil/Desktop/hw_datasets/NHD_Watersheds/VenturaRiver_NHD_Clip.shp") %>%
mutate(COMID = as.numeric(COMID))
nhd_sjc <- read_sf("/Users/heilil/Desktop/hw_datasets/NHD_Watersheds/SanJuanCreek_NHD_Clip.shp") %>%
mutate(COMID = as.numeric(COMID))
nhd_sdr <- read_sf("/Users/heilil/Desktop/hw_datasets/NHD_Watersheds/SanDiegoRiver_NHD_Clip.shp") %>%
mutate(COMID = as.numeric(COMID))
# Assign modeled COMIDs to mcomid.
mcomid <- ca_predictions2$COMID
# Filter by and plot only modeled stream reaches.
modeled_ripram_map <- nhd_ca %>%
filter(COMID %in% mcomid) %>%
inner_join(ca_predictions2) %>%
ggplot() +
geom_sf(aes(color = class_f)) +
scale_color_manual(name = "Condition", values = c("red2", "lightpink", "lightskyblue2", "steelblue"), drop = FALSE) +
theme_bw()
modeled_ripram_map
# Note, sometimes this takes forever to render in the "plot" pane.
# Best to just save to your machine (below) and then take a look.
# ggsave("ripram_modeled_CA.png",
# path = "/Users/heilil/Desktop",
# width = 35,
# height = 35,
# units = "cm"
# )
# Additional Notes - Healthy Watersheds project ---------------------------
# Condition approach favored by Anna per meeting on 11/3/2020.
# Works Cited:
# Hill, Ryan A., Marc H. Weber, Scott G. Leibowitz, Anthony R. Olsen, and Darren J. Thornbrugh, 2016. The Stream-Catchment (StreamCat) Dataset: A Database of Watershed Metrics for the Conterminous United States. Journal of the American Water Resources Association (JAWRA) 52:120-128. DOI: 10.1111/1752-1688.12372.
# End of R script.
|
library(MatchLinReg)
### Name: mlr.bias.constructor
### Title: Generating the treatment effect bias constructor vector
### Aliases: mlr.bias.constructor
### ** Examples
# number of included adjustment covariates
K <- 10
# number of observations in treatment group
Nt <- 100
# number of observations in control group
Nc <- 100
N <- Nt + Nc
# treatment indicator variable
tr <- c(rep(1, Nt), rep(0, Nc))
# matrix of included (adjustment) covariates
Z.i <- matrix(runif(K*N), ncol = K)
ret <- mlr.bias.constructor(tr = tr, Z.i = Z.i)
# comparing with brute-force approach
X.i <- cbind(tr, 1, Z.i)
ret2 <- (solve(t(X.i) %*% X.i, t(X.i)))[1, ]
cat("check 1:", all.equal(ret2, ret), "\n")
# sampling with replacement
idx <- sample(1:N, size = round(0.75*N), replace = TRUE)
ret3 <- mlr.bias.constructor(tr = tr, Z.i = Z.i, idx = idx)
ret4 <- (solve(t(X.i[idx, ]) %*% X.i[idx, ], t(X.i[idx, ])))[1, ]
cat("check 2:", all.equal(ret3, ret4), "\n")
| /data/genthat_extracted_code/MatchLinReg/examples/mlr.bias.constructor.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 954 | r | library(MatchLinReg)
### Name: mlr.bias.constructor
### Title: Generating the treatment effect bias constructor vector
### Aliases: mlr.bias.constructor
### ** Examples
# number of included adjustment covariates
K <- 10
# number of observations in treatment group
Nt <- 100
# number of observations in control group
Nc <- 100
N <- Nt + Nc
# treatment indicator variable
tr <- c(rep(1, Nt), rep(0, Nc))
# matrix of included (adjustment) covariates
Z.i <- matrix(runif(K*N), ncol = K)
ret <- mlr.bias.constructor(tr = tr, Z.i = Z.i)
# comparing with brute-force approach
X.i <- cbind(tr, 1, Z.i)
ret2 <- (solve(t(X.i) %*% X.i, t(X.i)))[1, ]
cat("check 1:", all.equal(ret2, ret), "\n")
# sampling with replacement
idx <- sample(1:N, size = round(0.75*N), replace = TRUE)
ret3 <- mlr.bias.constructor(tr = tr, Z.i = Z.i, idx = idx)
ret4 <- (solve(t(X.i[idx, ]) %*% X.i[idx, ], t(X.i[idx, ])))[1, ]
cat("check 2:", all.equal(ret3, ret4), "\n")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.functions.R
\name{timeplot}
\alias{timeplot}
\title{Plot raw responses over time by treatment or class}
\usage{
timeplot(network, level = "treatment", plotby = "arm", link = "identity", ...)
}
\arguments{
\item{network}{An object of class \code{"mb.network"}.}
\item{level}{A string indicating whether nodes/facets should represent \code{treatment}
or \code{class} in the plot. Can be used to examine the expected impact of modelling
class/agent effects.}
\item{plotby}{A character object that can take either \code{"arm"} to indicate that raw responses
should be plotted separately for each study arm, or \code{"rel"} to indicate that the within-study
relative effects/treatment differences should be plotted. In this way the time-course of both the absolute
effects and the relative effects can be examined.}
\item{link}{Can take either \code{"identity"} (the default),
\code{"log"} (for modelling Ratios of Means \insertCite{friedrich2011}{MBNMAtime}) or
\code{"smd"} (for modelling Standardised Mean Differences - although this also corresponds to an identity link function).}
\item{...}{Arguments to be sent to \code{ggplot()}}
}
\value{
The function returns an object of \verb{class(c("gg", "ggplot")}. Characteristics
of the object can therefore be amended as with other plots generated by \code{ggplot()}.
}
\description{
Plot raw responses over time by treatment or class
}
\details{
Plots can be faceted by either treatment (\code{level="treatment"}) or class
(\code{level="class"}) to investigate similarity of treatment responses within classes/agents.
Points represent observed responses and lines connect between observations within the
same study and arm.
}
\examples{
\donttest{
# Make network
goutnet <- mb.network(goutSUA_CFB)
# Use timeplot to plot responses grouped by treatment
timeplot(goutnet)
# Use timeplot ot plot resposes grouped by class
timeplot(goutnet, level="class")
# Plot matrix of relative effects
timeplot(goutnet, level="class", plotby="rel")
# Plot using Standardised Mean Differences
copdnet <- mb.network(copd)
timeplot(copdnet, plotby="rel", link="smd")
}
}
| /man/timeplot.Rd | no_license | hugaped/MBNMAtime | R | false | true | 2,252 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot.functions.R
\name{timeplot}
\alias{timeplot}
\title{Plot raw responses over time by treatment or class}
\usage{
timeplot(network, level = "treatment", plotby = "arm", link = "identity", ...)
}
\arguments{
\item{network}{An object of class \code{"mb.network"}.}
\item{level}{A string indicating whether nodes/facets should represent \code{treatment}
or \code{class} in the plot. Can be used to examine the expected impact of modelling
class/agent effects.}
\item{plotby}{A character object that can take either \code{"arm"} to indicate that raw responses
should be plotted separately for each study arm, or \code{"rel"} to indicate that the within-study
relative effects/treatment differences should be plotted. In this way the time-course of both the absolute
effects and the relative effects can be examined.}
\item{link}{Can take either \code{"identity"} (the default),
\code{"log"} (for modelling Ratios of Means \insertCite{friedrich2011}{MBNMAtime}) or
\code{"smd"} (for modelling Standardised Mean Differences - although this also corresponds to an identity link function).}
\item{...}{Arguments to be sent to \code{ggplot()}}
}
\value{
The function returns an object of \verb{class(c("gg", "ggplot")}. Characteristics
of the object can therefore be amended as with other plots generated by \code{ggplot()}.
}
\description{
Plot raw responses over time by treatment or class
}
\details{
Plots can be faceted by either treatment (\code{level="treatment"}) or class
(\code{level="class"}) to investigate similarity of treatment responses within classes/agents.
Points represent observed responses and lines connect between observations within the
same study and arm.
}
\examples{
\donttest{
# Make network
goutnet <- mb.network(goutSUA_CFB)
# Use timeplot to plot responses grouped by treatment
timeplot(goutnet)
# Use timeplot ot plot resposes grouped by class
timeplot(goutnet, level="class")
# Plot matrix of relative effects
timeplot(goutnet, level="class", plotby="rel")
# Plot using Standardised Mean Differences
copdnet <- mb.network(copd)
timeplot(copdnet, plotby="rel", link="smd")
}
}
|
rna_seq<-read.table("GSE60424_GEOSubmit_FC1to11_normalized_counts.txt",header =TRUE)
column_names<-c("whole_blood","Neutrophil","Monocytes","Bcells","CD4T","CD8T","NK")
healthy_1<-cbind(rna_seq$lib221,rna_seq[,7:12])
healthy_2<-cbind(rna_seq[,74],rna_seq[,68:73])
healthy_3<-cbind(rna_seq[,81],rna_seq[,75:80])
healthy_4<-cbind(rna_seq[,95],rna_seq[,89:94])
sepsis_1<-cbind(rna_seq[,40],rna_seq[,34:39])
sepsis_2<-cbind(rna_seq[,46],rna_seq[,41:45])
sepsis_3<-cbind(rna_seq[,53],rna_seq[,47:52])
summerized<-cbind(sepsis_1,sepsis_2,sepsis_3)
summerized<-cbind(summerized,healthy_1,healthy_2,healthy_3,healthy_4)
#colnames(sepsis_1)<-column_names
colnames(summerized)<- c("sepsis1,whole_blood","sepsis1,Neutrophil","sepsis1,Monocytes","sepsis1,Bcells","sepsis1,CD4T",
"sepsis1,CD8T","sepsis1,NK","sepsis2,whole_blood","sepsis2,Neutrophil","sepsis2,Monocytes","sepsis2,Bcells","sepsis2,CD4T",
"sepsis2,CD8T","sepsis3,whole_blood","sepsis3,Neutrophil","sepsis3,Monocytes","sepsis3,Bcells","sepsis3,CD4T",
"sepsis3,CD8T","sepsis3,NK","healthy1,whole_blood","healthy1,Neutrophil","healthy1,Monocytes","healthy1,Bcells","healthy1,CD4T",
"healthy1,CD8T","healthy1,NK","healthy2,whole_blood","healthy2,Neutrophil","healthy2,Monocytes","healthy2,Bcells","healthy2,CD4T",
"healthy2,CD8T","healthy2,NK","healthy3,whole_blood","healthy3,Neutrophil","healthy3,Monocytes","healthy3,Bcells","healthy3,CD4T",
"healthy3,CD8T","healthy3,NK","healthy4,whole_blood","healthy4,Neutrophil","healthy4,Monocytes","healthy4,Bcells","healthy4,CD4T",
"healthy4,CD8T","healthy4,NK")
row.names(summerized)<-rna_seq[,1]
result <- as.data.frame(t(log2(1+summerized)))
result$disease <- c("sepsis","sepsis","sepsis","sepsis","sepsis",
"sepsis","sepsis","sepsis","sepsis","sepsis","sepsis","sepsis",
"sepsis","sepsis","sepsis","sepsis","sepsis","sepsis",
"sepsis","sepsis","healthy","healthy","healthy","healthy","healthy",
"healthy","healthy","healthy","healthy","healthy","healthy","healthy",
"healthy","healthy","healthy","healthy","healthy","healthy","healthy",
"healthy","healthy","healthy","healthy","healthy","healthy","healthy",
"healthy","healthy")
result$cell_type <- c("whole_blood","Neutrophil","Monocytes","Bcells","CD4T",
"CD8T","NK","whole_blood","Neutrophil","Monocytes","Bcells","CD4T",
"CD8T","whole_blood","Neutrophil","Monocytes","Bcells","CD4T",
"CD8T","NK","whole_blood","Neutrophil","Monocytes","Bcells","CD4T",
"CD8T","NK","whole_blood","Neutrophil","Monocytes","Bcells","CD4T",
"CD8T","NK","whole_blood","Neutrophil","Monocytes","Bcells","CD4T",
"CD8T","NK","whole_blood","Neutrophil","Monocytes","Bcells","CD4T",
"CD8T","NK")
PCA_all<- prcomp(result[,1:50045])
library(ggplot2)
ggplot(data=NULL, aes(PCA_all$x[,1],PCA_all$x[,2])) + geom_point(aes(color=result$cell_type,shape=result$disease))+
xlab("First Dimension") + ylab("Second Dimension") + labs(shape="Health_Status",color="Cell_type")
PoV <- PCA_all$sdev^2/sum(PCA_all$sdev^2)*100
barplot(PoV, xlab= "Dimensions", ylab="Proportion of explained variance (%)")
PoV[1]+PoV[2]
#####Generate the result using Neutrophil (sepsis and healthy, or only healthy)#########
#####Add the cells back to the PCA to see their spatial location on the PCA-plot ########
result_healthy_PC <- result[result$disease=="healthy"&result$cell_type=="whole_blood",]
result_sepsis_PC <- result[result$disease=="sepsis"&result$cell_type=="whole_blood",]
s_Neutr <- result[result$cell_type=="Neutrophil"&result$disease=="sepsis",]
h_Neutr <- result[result$cell_type=="Neutrophil"&result$disease=="healthy",]
all_Neutr <- as.data.frame(rbind(s_Neutr,h_Neutr))
all_Neutr$type <- c(rep("sepsis",3),rep("healthy",4))
PCA_m1 <- prcomp(all_Neutr[,1:50045],scale=F, center = T)
score_h <- predict(PCA_m1,result_healthy_PC)
score_s <- predict(PCA_m1,result_sepsis_PC)
table_n<-as.data.frame(rbind(score_h,score_s,PCA_m1$x))
table_n$ disease <- c(rep("healthy",4),rep("sepsis",6),rep("healthy",4))
table_n$ cell_type <- c(rep("whole_blood",7),rep("Neutrophil",7))
library(ggplot2)
ggplot(NULL, mapping = aes(table_n[,1],table_n[,2]) )+
geom_point(aes(color=table_n$cell_type,shape=table_n$disease)) +
xlab("First Dimension") + ylab("Second Dimension") + labs(color="Cell_type", shape="health_status")
########use all the components to predict the model of whole blood ##########
result_healthy_PC <- result[result$disease=="healthy"&result$cell_type=="whole_blood",]
result_sepsis_PC <- result[result$disease=="sepsis"&result$cell_type=="whole_blood",]
s_all <- result[result$cell_type!="whole_blood"&result$disease=="sepsis",]
h_all <- result[result$cell_type!="whole_blood"&result$disease=="healthy",]
all_allPC <- as.data.frame(rbind(s_all,h_all))
PCA_m1 <- prcomp(all_allPC[,1:50045],scale=F, center = T)
score_h <- predict(PCA_m1,result_healthy_PC)
score_s <- predict(PCA_m1,result_sepsis_PC)
table_n<-as.data.frame(rbind(score_h,score_s,PCA_m1$x))
healthy <- grep("healthy",rownames(table_n))
table_n$disease[healthy] <- "healthy"
table_n$disease[-healthy] <- "sepsis"
table_n$cell_type <- "blood"
WB <- grep("whole_blood",rownames(table_n))
table_n$cell_type[WB] <- "whole_blood"
Mono<- grep("Monocytes",rownames(table_n))
table_n$cell_type[Mono] <- "Monocytes"
BCells<- grep("Bcells",rownames(table_n))
table_n$cell_type[BCells] <- "Bcells"
table_n$cell_type[grep("Neutrophil",rownames(table_n))] <- "Neutrophil"
table_n$cell_type[grep("CD4T",rownames(table_n))] <- "CD4T"
table_n$cell_type[grep("CD8T",rownames(table_n))] <- "CD8T"
table_n$cell_type[grep("NK",rownames(table_n))] <- "NK"
library(ggplot2)
ggplot(NULL, mapping = aes(table_n[,1],table_n[,2]) )+
geom_point(aes(color=table_n$cell_type,shape=table_n$disease)) +
xlab("First Dimension") + ylab("Second Dimension") + labs(color="Cell_type", shape="health_status")
###########When using multiple components to generate the model
result_healthy_PC <- result[result$disease=="healthy"&result$cell_type=="whole_blood",]
result_sepsis_PC <- result[result$disease=="sepsis"&result$cell_type=="whole_blood",]
s_Neutr <- result[result$cell_type=="Neutrophil"&result$disease=="sepsis",]
h_Neutr <- result[result$cell_type=="Neutrophil"&result$disease=="healthy",]
s_Bcells <- result[result$disease=="sepsis"&result$cell_type=="Bcells",]
h_Bcells <- result[result$disease=="healthy"&result$cell_type=="Bcells",]
all_Neutr <- as.data.frame(rbind(s_Neutr,h_Neutr))
all_Bcells <- as.data.frame(rbind(s_Bcells,h_Bcells))
all_Neutr$type <- c(rep("sepsis",3),rep("healthy",4))
all_Bcells$type <- c(rep("sepsis",3),rep("healthy",4))
all_cells <- as.data.frame(rbind(all_Neutr,all_Bcells))
PCA_m1 <- prcomp(all_Neutr[,1:50045],scale=F, center = T)
score_h <- predict(PCA_m1,result_healthy_PC)
score_s <- predict(PCA_m1,result_sepsis_PC)
table_n<-as.data.frame(rbind(score_h,score_s,PCA_m1$x))
table_n$ disease <- c(rep("healthy",4),rep("sepsis",6),rep("healthy",4))
table_n$ cell_type <- c(rep("whole_blood",7),rep("Neutrophil",7))
library(ggplot2)
ggplot(NULL, mapping = aes(table_n[,1],table_n[,2]) )+
geom_point(aes(color=table_n$cell_type,shape=table_n$disease)) +
xlab("First Dimension") + ylab("Second Dimension") + labs(color="Cell_type", shape="health_status")
| /A few trials on the signature/Trial_2_PCA_Whole_blood.R | no_license | BinLiu9205/Master_thesis_RNA-seq | R | false | false | 7,640 | r | rna_seq<-read.table("GSE60424_GEOSubmit_FC1to11_normalized_counts.txt",header =TRUE)
column_names<-c("whole_blood","Neutrophil","Monocytes","Bcells","CD4T","CD8T","NK")
healthy_1<-cbind(rna_seq$lib221,rna_seq[,7:12])
healthy_2<-cbind(rna_seq[,74],rna_seq[,68:73])
healthy_3<-cbind(rna_seq[,81],rna_seq[,75:80])
healthy_4<-cbind(rna_seq[,95],rna_seq[,89:94])
sepsis_1<-cbind(rna_seq[,40],rna_seq[,34:39])
sepsis_2<-cbind(rna_seq[,46],rna_seq[,41:45])
sepsis_3<-cbind(rna_seq[,53],rna_seq[,47:52])
summerized<-cbind(sepsis_1,sepsis_2,sepsis_3)
summerized<-cbind(summerized,healthy_1,healthy_2,healthy_3,healthy_4)
#colnames(sepsis_1)<-column_names
colnames(summerized)<- c("sepsis1,whole_blood","sepsis1,Neutrophil","sepsis1,Monocytes","sepsis1,Bcells","sepsis1,CD4T",
"sepsis1,CD8T","sepsis1,NK","sepsis2,whole_blood","sepsis2,Neutrophil","sepsis2,Monocytes","sepsis2,Bcells","sepsis2,CD4T",
"sepsis2,CD8T","sepsis3,whole_blood","sepsis3,Neutrophil","sepsis3,Monocytes","sepsis3,Bcells","sepsis3,CD4T",
"sepsis3,CD8T","sepsis3,NK","healthy1,whole_blood","healthy1,Neutrophil","healthy1,Monocytes","healthy1,Bcells","healthy1,CD4T",
"healthy1,CD8T","healthy1,NK","healthy2,whole_blood","healthy2,Neutrophil","healthy2,Monocytes","healthy2,Bcells","healthy2,CD4T",
"healthy2,CD8T","healthy2,NK","healthy3,whole_blood","healthy3,Neutrophil","healthy3,Monocytes","healthy3,Bcells","healthy3,CD4T",
"healthy3,CD8T","healthy3,NK","healthy4,whole_blood","healthy4,Neutrophil","healthy4,Monocytes","healthy4,Bcells","healthy4,CD4T",
"healthy4,CD8T","healthy4,NK")
row.names(summerized)<-rna_seq[,1]
result <- as.data.frame(t(log2(1+summerized)))
result$disease <- c("sepsis","sepsis","sepsis","sepsis","sepsis",
"sepsis","sepsis","sepsis","sepsis","sepsis","sepsis","sepsis",
"sepsis","sepsis","sepsis","sepsis","sepsis","sepsis",
"sepsis","sepsis","healthy","healthy","healthy","healthy","healthy",
"healthy","healthy","healthy","healthy","healthy","healthy","healthy",
"healthy","healthy","healthy","healthy","healthy","healthy","healthy",
"healthy","healthy","healthy","healthy","healthy","healthy","healthy",
"healthy","healthy")
result$cell_type <- c("whole_blood","Neutrophil","Monocytes","Bcells","CD4T",
"CD8T","NK","whole_blood","Neutrophil","Monocytes","Bcells","CD4T",
"CD8T","whole_blood","Neutrophil","Monocytes","Bcells","CD4T",
"CD8T","NK","whole_blood","Neutrophil","Monocytes","Bcells","CD4T",
"CD8T","NK","whole_blood","Neutrophil","Monocytes","Bcells","CD4T",
"CD8T","NK","whole_blood","Neutrophil","Monocytes","Bcells","CD4T",
"CD8T","NK","whole_blood","Neutrophil","Monocytes","Bcells","CD4T",
"CD8T","NK")
PCA_all<- prcomp(result[,1:50045])
library(ggplot2)
ggplot(data=NULL, aes(PCA_all$x[,1],PCA_all$x[,2])) + geom_point(aes(color=result$cell_type,shape=result$disease))+
xlab("First Dimension") + ylab("Second Dimension") + labs(shape="Health_Status",color="Cell_type")
PoV <- PCA_all$sdev^2/sum(PCA_all$sdev^2)*100
barplot(PoV, xlab= "Dimensions", ylab="Proportion of explained variance (%)")
PoV[1]+PoV[2]
#####Generate the result using Neutrophil (sepsis and healthy, or only healthy)#########
#####Add the cells back to the PCA to see their spatial location on the PCA-plot ########
result_healthy_PC <- result[result$disease=="healthy"&result$cell_type=="whole_blood",]
result_sepsis_PC <- result[result$disease=="sepsis"&result$cell_type=="whole_blood",]
s_Neutr <- result[result$cell_type=="Neutrophil"&result$disease=="sepsis",]
h_Neutr <- result[result$cell_type=="Neutrophil"&result$disease=="healthy",]
all_Neutr <- as.data.frame(rbind(s_Neutr,h_Neutr))
all_Neutr$type <- c(rep("sepsis",3),rep("healthy",4))
PCA_m1 <- prcomp(all_Neutr[,1:50045],scale=F, center = T)
score_h <- predict(PCA_m1,result_healthy_PC)
score_s <- predict(PCA_m1,result_sepsis_PC)
table_n<-as.data.frame(rbind(score_h,score_s,PCA_m1$x))
table_n$ disease <- c(rep("healthy",4),rep("sepsis",6),rep("healthy",4))
table_n$ cell_type <- c(rep("whole_blood",7),rep("Neutrophil",7))
library(ggplot2)
ggplot(NULL, mapping = aes(table_n[,1],table_n[,2]) )+
geom_point(aes(color=table_n$cell_type,shape=table_n$disease)) +
xlab("First Dimension") + ylab("Second Dimension") + labs(color="Cell_type", shape="health_status")
########use all the components to predict the model of whole blood ##########
result_healthy_PC <- result[result$disease=="healthy"&result$cell_type=="whole_blood",]
result_sepsis_PC <- result[result$disease=="sepsis"&result$cell_type=="whole_blood",]
s_all <- result[result$cell_type!="whole_blood"&result$disease=="sepsis",]
h_all <- result[result$cell_type!="whole_blood"&result$disease=="healthy",]
all_allPC <- as.data.frame(rbind(s_all,h_all))
PCA_m1 <- prcomp(all_allPC[,1:50045],scale=F, center = T)
score_h <- predict(PCA_m1,result_healthy_PC)
score_s <- predict(PCA_m1,result_sepsis_PC)
table_n<-as.data.frame(rbind(score_h,score_s,PCA_m1$x))
healthy <- grep("healthy",rownames(table_n))
table_n$disease[healthy] <- "healthy"
table_n$disease[-healthy] <- "sepsis"
table_n$cell_type <- "blood"
WB <- grep("whole_blood",rownames(table_n))
table_n$cell_type[WB] <- "whole_blood"
Mono<- grep("Monocytes",rownames(table_n))
table_n$cell_type[Mono] <- "Monocytes"
BCells<- grep("Bcells",rownames(table_n))
table_n$cell_type[BCells] <- "Bcells"
table_n$cell_type[grep("Neutrophil",rownames(table_n))] <- "Neutrophil"
table_n$cell_type[grep("CD4T",rownames(table_n))] <- "CD4T"
table_n$cell_type[grep("CD8T",rownames(table_n))] <- "CD8T"
table_n$cell_type[grep("NK",rownames(table_n))] <- "NK"
library(ggplot2)
ggplot(NULL, mapping = aes(table_n[,1],table_n[,2]) )+
geom_point(aes(color=table_n$cell_type,shape=table_n$disease)) +
xlab("First Dimension") + ylab("Second Dimension") + labs(color="Cell_type", shape="health_status")
###########When using multiple components to generate the model
result_healthy_PC <- result[result$disease=="healthy"&result$cell_type=="whole_blood",]
result_sepsis_PC <- result[result$disease=="sepsis"&result$cell_type=="whole_blood",]
s_Neutr <- result[result$cell_type=="Neutrophil"&result$disease=="sepsis",]
h_Neutr <- result[result$cell_type=="Neutrophil"&result$disease=="healthy",]
s_Bcells <- result[result$disease=="sepsis"&result$cell_type=="Bcells",]
h_Bcells <- result[result$disease=="healthy"&result$cell_type=="Bcells",]
all_Neutr <- as.data.frame(rbind(s_Neutr,h_Neutr))
all_Bcells <- as.data.frame(rbind(s_Bcells,h_Bcells))
all_Neutr$type <- c(rep("sepsis",3),rep("healthy",4))
all_Bcells$type <- c(rep("sepsis",3),rep("healthy",4))
all_cells <- as.data.frame(rbind(all_Neutr,all_Bcells))
PCA_m1 <- prcomp(all_Neutr[,1:50045],scale=F, center = T)
score_h <- predict(PCA_m1,result_healthy_PC)
score_s <- predict(PCA_m1,result_sepsis_PC)
table_n<-as.data.frame(rbind(score_h,score_s,PCA_m1$x))
table_n$ disease <- c(rep("healthy",4),rep("sepsis",6),rep("healthy",4))
table_n$ cell_type <- c(rep("whole_blood",7),rep("Neutrophil",7))
library(ggplot2)
ggplot(NULL, mapping = aes(table_n[,1],table_n[,2]) )+
geom_point(aes(color=table_n$cell_type,shape=table_n$disease)) +
xlab("First Dimension") + ylab("Second Dimension") + labs(color="Cell_type", shape="health_status")
|
#radial scatter plot published in plotly using plotly R API
library(plotly)
library(ggplot2)
Sys.setenv("plotly_username"="caluchko")
Sys.setenv("plotly_api_key"="cqdk1g1p7k")
df$Activity <- as.factor(df$Activity)
p <- plot_ly(plotly::hobbs, r = df$Elevation..m., t = df$degjit, color = df$activity,
opacity = .8, mode = "markers") %>%
layout(title = "Fatal Swiss avalanches 1995 - 2016: Slope aspect, elevation, and activity",
orientation = 270, radialaxis = list(ticksuffix = "m", orientation = -40))
p
plotly_POST(p, filename = "av_scatter")
| /scatter.R | no_license | caluchko/avalanche | R | false | false | 568 | r | #radial scatter plot published in plotly using plotly R API
library(plotly)
library(ggplot2)
Sys.setenv("plotly_username"="caluchko")
Sys.setenv("plotly_api_key"="cqdk1g1p7k")
df$Activity <- as.factor(df$Activity)
p <- plot_ly(plotly::hobbs, r = df$Elevation..m., t = df$degjit, color = df$activity,
opacity = .8, mode = "markers") %>%
layout(title = "Fatal Swiss avalanches 1995 - 2016: Slope aspect, elevation, and activity",
orientation = 270, radialaxis = list(ticksuffix = "m", orientation = -40))
p
plotly_POST(p, filename = "av_scatter")
|
setMethodS3("getStateColorMap", "SegmentedGenomicSignalsInterface", function(this, ...) {
colorMap <- getBasicField(this, ".stateColorMap")
if (is.null(colorMap)) {
this <- setStateColorMap(this, colorMap="default")
colorMap <- getBasicField(this, ".stateColorMap")
}
colorMap
})
setMethodS3("setStateColorMap", "SegmentedGenomicSignalsInterface", function(this, colorMap="default", ...) {
# Argument 'colorMap':
names <- names(colorMap)
if (is.null(names)) {
colorMap <- match.arg(colorMap)
if (colorMap == "default") {
colorMap <- c(
"NA" = "#999999", # missing values
"0" = "#000000", # neutral
"-" = "blue", # losses
"+" = "red", # gains
"*" = "#000000" # default
)
}
} else {
colorMap <- Arguments$getCharacters(colorMap)
names(colorMap) <- names
}
this <- setBasicField(this, ".stateColorMap", colorMap)
invisible(this)
})
setMethodS3("getStateColors", "SegmentedGenomicSignalsInterface", function(this, na.rm=FALSE, ...) {
colorMap <- getStateColorMap(this)
if (na.rm) {
colorMap["NA"] <- NA_character_
}
hasDefColor <- is.element("*", names(colorMap))
if (hasDefColor) {
for (type in c("0", "-", "+")) {
if (!is.element(type, names(colorMap))) {
colorMap[type] <- colorMap["*"]
}
}
}
states <- getStates(this)
# print(table(states, exclude=NULL))
uStates <- sort(unique(states), na.last=TRUE)
uStates <- na.omit(uStates)
# Default missing-value colors
naColor <- as.character(colorMap["NA"])
cols <- rep(naColor, times=length(states))
# Neutral states
if (is.element("0", names(colorMap))) {
idxs <- which(states == 0)
cols[idxs] <- colorMap["0"]
}
# Negative states
if (is.element("-", names(colorMap))) {
idxs <- which(states < 0)
cols[idxs] <- colorMap["-"]
}
# Positive states
if (is.element("+", names(colorMap))) {
idxs <- which(states > 0)
cols[idxs] <- colorMap["+"]
}
for (kk in seq_along(uStates)) {
state <- uStates[kk]
key <- sprintf("%s", state)
if (is.element(key, names(colorMap))) {
idxs <- which(states == state)
cols[idxs] <- colorMap[key]
}
} # for (kk ...)
cols
})
| /R/SegmentedGenomicSignalsInterface.COLS.R | no_license | HenrikBengtsson/aroma.core | R | false | false | 2,259 | r | setMethodS3("getStateColorMap", "SegmentedGenomicSignalsInterface", function(this, ...) {
colorMap <- getBasicField(this, ".stateColorMap")
if (is.null(colorMap)) {
this <- setStateColorMap(this, colorMap="default")
colorMap <- getBasicField(this, ".stateColorMap")
}
colorMap
})
setMethodS3("setStateColorMap", "SegmentedGenomicSignalsInterface", function(this, colorMap="default", ...) {
# Argument 'colorMap':
names <- names(colorMap)
if (is.null(names)) {
colorMap <- match.arg(colorMap)
if (colorMap == "default") {
colorMap <- c(
"NA" = "#999999", # missing values
"0" = "#000000", # neutral
"-" = "blue", # losses
"+" = "red", # gains
"*" = "#000000" # default
)
}
} else {
colorMap <- Arguments$getCharacters(colorMap)
names(colorMap) <- names
}
this <- setBasicField(this, ".stateColorMap", colorMap)
invisible(this)
})
setMethodS3("getStateColors", "SegmentedGenomicSignalsInterface", function(this, na.rm=FALSE, ...) {
colorMap <- getStateColorMap(this)
if (na.rm) {
colorMap["NA"] <- NA_character_
}
hasDefColor <- is.element("*", names(colorMap))
if (hasDefColor) {
for (type in c("0", "-", "+")) {
if (!is.element(type, names(colorMap))) {
colorMap[type] <- colorMap["*"]
}
}
}
states <- getStates(this)
# print(table(states, exclude=NULL))
uStates <- sort(unique(states), na.last=TRUE)
uStates <- na.omit(uStates)
# Default missing-value colors
naColor <- as.character(colorMap["NA"])
cols <- rep(naColor, times=length(states))
# Neutral states
if (is.element("0", names(colorMap))) {
idxs <- which(states == 0)
cols[idxs] <- colorMap["0"]
}
# Negative states
if (is.element("-", names(colorMap))) {
idxs <- which(states < 0)
cols[idxs] <- colorMap["-"]
}
# Positive states
if (is.element("+", names(colorMap))) {
idxs <- which(states > 0)
cols[idxs] <- colorMap["+"]
}
for (kk in seq_along(uStates)) {
state <- uStates[kk]
key <- sprintf("%s", state)
if (is.element(key, names(colorMap))) {
idxs <- which(states == state)
cols[idxs] <- colorMap[key]
}
} # for (kk ...)
cols
})
|
#' dxsmall
#'
#' A SummarizedExperiment with covariates and mRNA counts from 501 AML cases.
#'
#' @importFrom utils data
#'
#' @usage data(dxsmall)
#' @format A \code{SummarizedExperiment} object
#' @source This data was created using inst/extdata/mRNA/dxsmall.reassemble.R
#'
#' @examples
#'
#' data(dxsmall)
#' show(dxsmall)
#'
#' # if `iSEE` is installed:
#' if (require("iSEE")) {
#' rownames(dxsmall)
#' names(colData(dxsmall))
#' table(dxsmall$FusionGroup)
#' table(dxsmall$AgeGroup)
#' iSEEapp(dxsmall)
#' }
#'
#' # if `mclust` is installed:
#' if (require("mclust")) {
#'
#' fit1 <- Mclust(logcounts(dxsmall)["MECOM", dxsmall$FusionGroup == "MLL"])
#' plot(fit1, what="density", xlab="log(MECOM read counts + 1) in KMT2Ar")
#'
#' fit2 <- Mclust(t(logcounts(dxsmall)[ c("MECOM", "PRDM16"), ]), G=1:3)
#' plot(fit2, what="classification")
#' plot(fit2, what="uncertainty")
#'
#' }
#'
#'
"dxsmall"
#' Ziru
#'
#' A tibble with weight measurements from C57BL/6 male mice over 10 weeks.
#'
#' @importFrom utils data
#'
#' @usage data(Ziru)
#' @format A \code{tibble}
#' @source This data was created using inst/extdata/processZiru.R
#'
#' @examples
#'
#' data(Ziru)
#' show(Ziru)
#'
#' # if `ggplot2` is installed:
#' if (require("ggplot2") & require("reshape2")) {
#'
#' Ziru$mouse <- seq_len(nrow(Ziru))
#' melted <- melt(Ziru,
#' id.vars="mouse",
#' variable.name="week",
#' value.name="weight")
#' melted$week <- as.ordered(melted$week)
#'
#' ggplot(melted, aes(group=mouse, x=week, y=weight, color=mouse)) +
#' geom_point() +
#' geom_line() +
#' theme_minimal()
#'
#' }
#'
#'
"Ziru"
| /R/data.R | no_license | VanAndelInstitute/bifurcatoR | R | false | false | 1,728 | r | #' dxsmall
#'
#' A SummarizedExperiment with covariates and mRNA counts from 501 AML cases.
#'
#' @importFrom utils data
#'
#' @usage data(dxsmall)
#' @format A \code{SummarizedExperiment} object
#' @source This data was created using inst/extdata/mRNA/dxsmall.reassemble.R
#'
#' @examples
#'
#' data(dxsmall)
#' show(dxsmall)
#'
#' # if `iSEE` is installed:
#' if (require("iSEE")) {
#' rownames(dxsmall)
#' names(colData(dxsmall))
#' table(dxsmall$FusionGroup)
#' table(dxsmall$AgeGroup)
#' iSEEapp(dxsmall)
#' }
#'
#' # if `mclust` is installed:
#' if (require("mclust")) {
#'
#' fit1 <- Mclust(logcounts(dxsmall)["MECOM", dxsmall$FusionGroup == "MLL"])
#' plot(fit1, what="density", xlab="log(MECOM read counts + 1) in KMT2Ar")
#'
#' fit2 <- Mclust(t(logcounts(dxsmall)[ c("MECOM", "PRDM16"), ]), G=1:3)
#' plot(fit2, what="classification")
#' plot(fit2, what="uncertainty")
#'
#' }
#'
#'
"dxsmall"
#' Ziru
#'
#' A tibble with weight measurements from C57BL/6 male mice over 10 weeks.
#'
#' @importFrom utils data
#'
#' @usage data(Ziru)
#' @format A \code{tibble}
#' @source This data was created using inst/extdata/processZiru.R
#'
#' @examples
#'
#' data(Ziru)
#' show(Ziru)
#'
#' # if `ggplot2` is installed:
#' if (require("ggplot2") & require("reshape2")) {
#'
#' Ziru$mouse <- seq_len(nrow(Ziru))
#' melted <- melt(Ziru,
#' id.vars="mouse",
#' variable.name="week",
#' value.name="weight")
#' melted$week <- as.ordered(melted$week)
#'
#' ggplot(melted, aes(group=mouse, x=week, y=weight, color=mouse)) +
#' geom_point() +
#' geom_line() +
#' theme_minimal()
#'
#' }
#'
#'
"Ziru"
|
data.set <- "C:/Users/User/Documents/exer/knnwithR/dataset/setofdata.csv"
print("read dataset")
df <- read.csv(data.set, header=TRUE)
df
#random number
ran <- sample(1:nrow(df), 0.9 * nrow(df))
#normalization
norm <- function(x) {(x-min(x))/(max(x)-min(x))}
data_norm <- as.data.frame(lapply(df[,c(1,2,3,4)],norm))
summary(data_norm)
#extract training set
data_train <- data_norm[ran,]
#extract testing set
data_test <- data_norm[-ran,]
data_target_cat <- df[ran,5]
data_test_cat <- df[-ran,5]
##load the package class
library(class)
##run knn function
pr <- knn(data_train,data_test,cl=data_target_cat,k=2)
##create confusion matrix
tab <- table(pr,data_test_cat)
##this function divides the correct predictions by total number of predictions that tell us how accurate teh model is.
accuracy <- function(x){sum(diag(x)/(sum(rowSums(x)))) * 100}
accuracy(tab)
| /knn.R | permissive | dittrash/KNN-Accuracy-With-R | R | false | false | 875 | r | data.set <- "C:/Users/User/Documents/exer/knnwithR/dataset/setofdata.csv"
print("read dataset")
df <- read.csv(data.set, header=TRUE)
df
#random number
ran <- sample(1:nrow(df), 0.9 * nrow(df))
#normalization
norm <- function(x) {(x-min(x))/(max(x)-min(x))}
data_norm <- as.data.frame(lapply(df[,c(1,2,3,4)],norm))
summary(data_norm)
#extract training set
data_train <- data_norm[ran,]
#extract testing set
data_test <- data_norm[-ran,]
data_target_cat <- df[ran,5]
data_test_cat <- df[-ran,5]
##load the package class
library(class)
##run knn function
pr <- knn(data_train,data_test,cl=data_target_cat,k=2)
##create confusion matrix
tab <- table(pr,data_test_cat)
##this function divides the correct predictions by total number of predictions that tell us how accurate teh model is.
accuracy <- function(x){sum(diag(x)/(sum(rowSums(x)))) * 100}
accuracy(tab)
|
library(rasterWidget)
library(raster)
library(classInt)
library(devtools)
meuseFile <- system.file("external/test.grd", package="raster")
meuseRaster <- raster(meuseFile)
plot(meuseRaster)
# execute again after changes in rasterWidget code
setwd("/Users/christopherstephan/Documents/git/rasterWidget/rasterWidget")
devtools::install()
library(rasterWidget)
rasterWidget(meuseRaster, nclass=5, style='fisher', colors=c('yellow', 'orange', 'darkorange', 'red', 'darkred'))
rasterWidget(meuseRaster, 500, 800, nclass=5, colors=c("yellow", "orange", "darkorange", "red", "darkred"))
rasterWidget(meuseRaster, nclass=15, style='fisher', colors=c("#FF0000", "#EC1200", "#DA2400", "#C83600", "#B64800", "#A35B00", "#916D00", "#7F7F00", "#6D9100", "#5BA300", "#48B600", "#36C800", "#24DA00", "#12EC00", "#00FF00"))
rasterWidget(meuseRaster)
rasterWidget(meuseRaster, nclass=7)
r <- raster()
r
rasterWidget(r, nclass=5, style='fisher', colors=c('yellow', 'orange', 'darkorange', 'red', 'darkred'))
# medium sized scene (300 x 300)
r1 <- raster(ncol=300, nrow=300)
r1[] <- rnorm(ncell(r1), 0, 50)
Sys.time()
# [1] "2016-07-17 20:54:10 CEST"
rasterWidget(r1, nclass=5, style="fisher", colors=c("yellow", "orange", "darkorange", "red", "darkred"))
Sys.time()
# [1] "2016-07-17 20:54:10 CEST"
#Large Landsat7 scene (593 x 653)
download.file(url = 'https://github.com/GeoScripting-WUR/IntroToRaster/raw/gh-pages/data/gewata.zip', destfile = 'gewata.zip', method = 'auto')
unzip('gewata.zip')
gewata <- raster('LE71700552001036SGS00_SR_Gewata_INT1U.tif')
Sys.time()
# [1] "2016-07-17 20:41:25 CEST"
rasterWidget(gewata, nclass=5, style="fisher", colors=c("yellow", "orange", "darkorange", "red", "darkred"))
Sys.time()
# [1] "2016-07-17 20:49:56 CEST"
# debug
r = na.omit(values(meuseRaster))
classIntervals(r)
nclass.Sturges(r)
nclass.scott(r)
nclass.FD(r)
# tests
| /demoRasterWidget.R | no_license | ChristopherStephan/rasterWidget | R | false | false | 1,863 | r | library(rasterWidget)
library(raster)
library(classInt)
library(devtools)
meuseFile <- system.file("external/test.grd", package="raster")
meuseRaster <- raster(meuseFile)
plot(meuseRaster)
# execute again after changes in rasterWidget code
setwd("/Users/christopherstephan/Documents/git/rasterWidget/rasterWidget")
devtools::install()
library(rasterWidget)
rasterWidget(meuseRaster, nclass=5, style='fisher', colors=c('yellow', 'orange', 'darkorange', 'red', 'darkred'))
rasterWidget(meuseRaster, 500, 800, nclass=5, colors=c("yellow", "orange", "darkorange", "red", "darkred"))
rasterWidget(meuseRaster, nclass=15, style='fisher', colors=c("#FF0000", "#EC1200", "#DA2400", "#C83600", "#B64800", "#A35B00", "#916D00", "#7F7F00", "#6D9100", "#5BA300", "#48B600", "#36C800", "#24DA00", "#12EC00", "#00FF00"))
rasterWidget(meuseRaster)
rasterWidget(meuseRaster, nclass=7)
r <- raster()
r
rasterWidget(r, nclass=5, style='fisher', colors=c('yellow', 'orange', 'darkorange', 'red', 'darkred'))
# medium sized scene (300 x 300)
r1 <- raster(ncol=300, nrow=300)
r1[] <- rnorm(ncell(r1), 0, 50)
Sys.time()
# [1] "2016-07-17 20:54:10 CEST"
rasterWidget(r1, nclass=5, style="fisher", colors=c("yellow", "orange", "darkorange", "red", "darkred"))
Sys.time()
# [1] "2016-07-17 20:54:10 CEST"
#Large Landsat7 scene (593 x 653)
download.file(url = 'https://github.com/GeoScripting-WUR/IntroToRaster/raw/gh-pages/data/gewata.zip', destfile = 'gewata.zip', method = 'auto')
unzip('gewata.zip')
gewata <- raster('LE71700552001036SGS00_SR_Gewata_INT1U.tif')
Sys.time()
# [1] "2016-07-17 20:41:25 CEST"
rasterWidget(gewata, nclass=5, style="fisher", colors=c("yellow", "orange", "darkorange", "red", "darkred"))
Sys.time()
# [1] "2016-07-17 20:49:56 CEST"
# debug
r = na.omit(values(meuseRaster))
classIntervals(r)
nclass.Sturges(r)
nclass.scott(r)
nclass.FD(r)
# tests
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EPFR.r
\name{combinations.next}
\alias{combinations.next}
\title{combinations.next}
\usage{
combinations.next(x)
}
\arguments{
\item{x}{= a logical vector}
}
\description{
returns the next combination in dictionary order
}
\seealso{
Other combinations: \code{\link{combinations.ex.int}},
\code{\link{combinations.to.int}},
\code{\link{combinations}}
}
\keyword{combinations.next}
| /man/combinations.next.Rd | no_license | vsrimurthy/EPFR | R | false | true | 462 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/EPFR.r
\name{combinations.next}
\alias{combinations.next}
\title{combinations.next}
\usage{
combinations.next(x)
}
\arguments{
\item{x}{= a logical vector}
}
\description{
returns the next combination in dictionary order
}
\seealso{
Other combinations: \code{\link{combinations.ex.int}},
\code{\link{combinations.to.int}},
\code{\link{combinations}}
}
\keyword{combinations.next}
|
########################################################################
# Radial Support Vector Machine (SVM) Classification
# Author: Arielle S. Selya
# Date posted: 2/28/2021
########################################################################
load("Managed Sanford Data.RData")
require(e1071)
# Set parameters to loop over
params <- list(cost = c(0.1, 0.5, 1, 5, 10, 25, 50), gamma=c(0.0001, 0.001, 0.003, 0.007, 0.1, 0.5, 1))
params <- do.call(expand.grid, params)
# Set variables to record outer-loop scores and best parms
outer.scores.train <- NULL
outer.scores.test <- NULL
outer.bestparams <- NULL
# Create empty tables/arrays to accumulate results across CV iterations
outer.train.table.all <- array(0, dim=c(2,2))
outer.test.table.all <- array(0, dim=c(2,2))
train.cell00 <- NULL
train.cell01 <- NULL
train.cell10 <- NULL
train.cell11 <- NULL
test.cell00 <- NULL
test.cell01 <- NULL
test.cell10 <- NULL
test.cell11 <- NULL
cv.scores <- NULL
# Split into 5 outer folds
outer.ind <- sample(1:dim(Sanford.data.diabetes)[1])
n.outer.folds <- length(outer.ind)/5
outer.ind <- split(outer.ind, ceiling(seq_along(outer.ind)/n.outer.folds))
# Loop over CV iterations
for(o in 1:5){ # Outer loop CV
# Create empty arrays for best parameters
best.parms <- NULL
best.score <- -100
# Create empty tables for accumulation
train.table.all <- array(0, dim=c(2,2))
test.table.all <- array(0, dim=c(2,2))
# Define train/test samples for outer loop
Sanford.data.diabetes.outer <- Sanford.data.diabetes[-outer.ind[[o]],]
for(p in 1:nrow(params)){ # Parameter loop
# Split into 5 inner folds
inner.ind <- sample(1:dim(Sanford.data.diabetes.outer)[1])
n.inner.folds <- length(inner.ind)/5
inner.ind <- split(inner.ind, ceiling(seq_along(inner.ind)/n.inner.folds))
for(i in 1:5){ # Inner loop
# Train model
svm.result <- svm(Unplanned.Visits.bin ~ ., data=Sanford.data.diabetes.outer[-inner.ind[[i]],], type = "C-classification",
kernel="radial", cost=params$cost[p], gamma=params$gamma[p])
# Get training and testing results
train.result <- predict(svm.result)
test.result <- predict(svm.result, newdata=Sanford.data.diabetes.outer[inner.ind[[i]],])
train.table <- prop.table(table(Sanford.data.diabetes.outer$Unplanned.Visits.bin[-inner.ind[[i]]], train.result),1)
test.table <- prop.table(table(Sanford.data.diabetes.outer$Unplanned.Visits.bin[inner.ind[[i]]], test.result),1)
# Prevent single-column tables
train.result <- as.factor(train.result)
test.result <- as.factor(test.result)
if(length(levels(train.result))==1){
if(levels(train.result)=="1") levels(train.result) <- c("1","0")
else if(levels(train.result)=="0") levels(train.result) <- c("0","1")
}
if(length(levels(test.result))==1){
if(levels(test.result)=="1") levels(test.result) <- c("1","0")
else if(levels(test.result)=="0") levels(test.result) <- c("0","1")
}
train.result <- factor(train.result, levels=c("0","1"))
test.result <- factor(test.result, levels=c("0","1"))
# Get tables of real vs. predicted classes
train.table <- prop.table(table(Sanford.data.diabetes.outer$Unplanned.Visits.bin[-inner.ind[[i]]], train.result),1)
test.table <- prop.table(table(Sanford.data.diabetes.outer$Unplanned.Visits.bin[inner.ind[[i]]], test.result),1)
# Increment tables
for(j in 1:dim(train.table)[1])
for(k in 1:dim(train.table)[2])
train.table.all[j,k] <- train.table.all[j,k] + train.table[j,k]
for(j in 1:dim(test.table)[1])
for(k in 1:dim(test.table)[2])
test.table.all[j,k] <- test.table.all[j,k] + test.table[j,k]
# Increment CV scores (i.e. sensitivity + specificity)
cv.scores <- c(cv.scores, (test.table[1,1] + test.table[2,2])/2)
}
# Compute mean CV scores over inner folds
mean.score <- mean(cv.scores)
if(mean.score > best.score){
best.score <- mean.score
best.params <- params[p,]
}
}
# Train and test classifier using best parameters from inner loop, on outer data set
svm.result <- svm(Unplanned.Visits.bin ~ ., data=Sanford.data.diabetes[-outer.ind[[o]],], type = "C-classification",
kernel="radial", cost=best.params$cost, gamma=best.params$gamma)
train.result <- predict(svm.result)
test.result <- predict(svm.result, newdata=Sanford.data.diabetes[outer.ind[[o]],])
train.table <- prop.table(table(Sanford.data.diabetes$Unplanned.Visits.bin[-outer.ind[[o]]], train.result),1)
test.table <- prop.table(table(Sanford.data.diabetes$Unplanned.Visits.bin[outer.ind[[o]]], test.result),1)
# Increment tables
for(j in 1:dim(train.table)[1])
for(k in 1:dim(train.table)[2])
outer.train.table.all[j,k] <- outer.train.table.all[j,k] + train.table[j,k]
for(j in 1:dim(test.table)[1])
for(k in 1:dim(test.table)[2])
outer.test.table.all[j,k] <- outer.test.table.all[j,k] + test.table[j,k]
print(prop.table(outer.train.table.all, 1))
print(prop.table(outer.test.table.all, 1))
# Increment outer scores
outer.scores.train <- c(outer.scores.train, (train.table[1,1] + train.table[2,2])/2)
outer.scores.test <- c(outer.scores.test, (test.table[1,1] + test.table[2,2])/2)
outer.bestparams <- rbind(outer.bestparams, best.params)
# Increment arrays for variance
train.cell00 <- c(train.cell00, train.table[1,1])
train.cell01 <- c(train.cell01, train.table[1,2])
train.cell10 <- c(train.cell10, train.table[2,1])
train.cell11 <- c(train.cell11, train.table[2,2])
test.cell00 <- c(test.cell00, test.table[1,1])
test.cell01 <- c(test.cell01, test.table[1,2])
test.cell10 <- c(test.cell10, test.table[2,1])
test.cell11 <- c(test.cell11, test.table[2,2])
print(o)
}
# Examine outer-loop CV scores and best parameters
mean(outer.scores.train)
mean(outer.scores.test)
sd(outer.scores.test)
apply(outer.bestparams,2,"mean")
# Create averaged confusion matrix across all CV iterations
train.table.all <- train.table.all/i
test.table.all <- test.table.all/i
SVM.R.train <- train.table.all
SVM.R.test <- test.table.all
SVM.R.train.CR <- train.cell00 # CR = correct rejections
SVM.R.train.FA <- train.cell01 # FA = false alarms
SVM.R.train.M <- train.cell10 # M = misses
SVM.R.train.H <- train.cell11 # H = hits
SVM.R.test.CR <- test.cell00
SVM.R.test.FA <- test.cell01
SVM.R.test.M <- test.cell10
SVM.R.test.H <- test.cell11
# Get means and sd's of each cell
# Training set
summary(SVM.R.train.CR)
sd(SVM.R.train.CR)
summary(SVM.R.train.FA)
sd(SVM.R.train.FA)
summary(SVM.R.train.M)
sd(SVM.R.train.M)
summary(SVM.R.train.H)
sd(SVM.R.train.H)
# Test set
summary(SVM.R.test.CR)
sd(SVM.R.test.CR)
summary(SVM.R.test.FA)
sd(SVM.R.test.FA)
summary(SVM.R.test.M)
sd(SVM.R.test.M)
summary(SVM.R.test.H)
sd(SVM.R.test.H)
| /SVM Radial Diabetes Predictive Model R2 final.R | no_license | ArielleSelya/Diabetes-Predictive-Model | R | false | false | 7,144 | r | ########################################################################
# Radial Support Vector Machine (SVM) Classification
# Author: Arielle S. Selya
# Date posted: 2/28/2021
########################################################################
load("Managed Sanford Data.RData")
require(e1071)
# Set parameters to loop over
params <- list(cost = c(0.1, 0.5, 1, 5, 10, 25, 50), gamma=c(0.0001, 0.001, 0.003, 0.007, 0.1, 0.5, 1))
params <- do.call(expand.grid, params)
# Set variables to record outer-loop scores and best parms
outer.scores.train <- NULL
outer.scores.test <- NULL
outer.bestparams <- NULL
# Create empty tables/arrays to accumulate results across CV iterations
outer.train.table.all <- array(0, dim=c(2,2))
outer.test.table.all <- array(0, dim=c(2,2))
train.cell00 <- NULL
train.cell01 <- NULL
train.cell10 <- NULL
train.cell11 <- NULL
test.cell00 <- NULL
test.cell01 <- NULL
test.cell10 <- NULL
test.cell11 <- NULL
cv.scores <- NULL
# Split into 5 outer folds
outer.ind <- sample(1:dim(Sanford.data.diabetes)[1])
n.outer.folds <- length(outer.ind)/5
outer.ind <- split(outer.ind, ceiling(seq_along(outer.ind)/n.outer.folds))
# Loop over CV iterations
for(o in 1:5){ # Outer loop CV
# Create empty arrays for best parameters
best.parms <- NULL
best.score <- -100
# Create empty tables for accumulation
train.table.all <- array(0, dim=c(2,2))
test.table.all <- array(0, dim=c(2,2))
# Define train/test samples for outer loop
Sanford.data.diabetes.outer <- Sanford.data.diabetes[-outer.ind[[o]],]
for(p in 1:nrow(params)){ # Parameter loop
# Split into 5 inner folds
inner.ind <- sample(1:dim(Sanford.data.diabetes.outer)[1])
n.inner.folds <- length(inner.ind)/5
inner.ind <- split(inner.ind, ceiling(seq_along(inner.ind)/n.inner.folds))
for(i in 1:5){ # Inner loop
# Train model
svm.result <- svm(Unplanned.Visits.bin ~ ., data=Sanford.data.diabetes.outer[-inner.ind[[i]],], type = "C-classification",
kernel="radial", cost=params$cost[p], gamma=params$gamma[p])
# Get training and testing results
train.result <- predict(svm.result)
test.result <- predict(svm.result, newdata=Sanford.data.diabetes.outer[inner.ind[[i]],])
train.table <- prop.table(table(Sanford.data.diabetes.outer$Unplanned.Visits.bin[-inner.ind[[i]]], train.result),1)
test.table <- prop.table(table(Sanford.data.diabetes.outer$Unplanned.Visits.bin[inner.ind[[i]]], test.result),1)
# Prevent single-column tables
train.result <- as.factor(train.result)
test.result <- as.factor(test.result)
if(length(levels(train.result))==1){
if(levels(train.result)=="1") levels(train.result) <- c("1","0")
else if(levels(train.result)=="0") levels(train.result) <- c("0","1")
}
if(length(levels(test.result))==1){
if(levels(test.result)=="1") levels(test.result) <- c("1","0")
else if(levels(test.result)=="0") levels(test.result) <- c("0","1")
}
train.result <- factor(train.result, levels=c("0","1"))
test.result <- factor(test.result, levels=c("0","1"))
# Get tables of real vs. predicted classes
train.table <- prop.table(table(Sanford.data.diabetes.outer$Unplanned.Visits.bin[-inner.ind[[i]]], train.result),1)
test.table <- prop.table(table(Sanford.data.diabetes.outer$Unplanned.Visits.bin[inner.ind[[i]]], test.result),1)
# Increment tables
for(j in 1:dim(train.table)[1])
for(k in 1:dim(train.table)[2])
train.table.all[j,k] <- train.table.all[j,k] + train.table[j,k]
for(j in 1:dim(test.table)[1])
for(k in 1:dim(test.table)[2])
test.table.all[j,k] <- test.table.all[j,k] + test.table[j,k]
# Increment CV scores (i.e. sensitivity + specificity)
cv.scores <- c(cv.scores, (test.table[1,1] + test.table[2,2])/2)
}
# Compute mean CV scores over inner folds
mean.score <- mean(cv.scores)
if(mean.score > best.score){
best.score <- mean.score
best.params <- params[p,]
}
}
# Train and test classifier using best parameters from inner loop, on outer data set
svm.result <- svm(Unplanned.Visits.bin ~ ., data=Sanford.data.diabetes[-outer.ind[[o]],], type = "C-classification",
kernel="radial", cost=best.params$cost, gamma=best.params$gamma)
train.result <- predict(svm.result)
test.result <- predict(svm.result, newdata=Sanford.data.diabetes[outer.ind[[o]],])
train.table <- prop.table(table(Sanford.data.diabetes$Unplanned.Visits.bin[-outer.ind[[o]]], train.result),1)
test.table <- prop.table(table(Sanford.data.diabetes$Unplanned.Visits.bin[outer.ind[[o]]], test.result),1)
# Increment tables
for(j in 1:dim(train.table)[1])
for(k in 1:dim(train.table)[2])
outer.train.table.all[j,k] <- outer.train.table.all[j,k] + train.table[j,k]
for(j in 1:dim(test.table)[1])
for(k in 1:dim(test.table)[2])
outer.test.table.all[j,k] <- outer.test.table.all[j,k] + test.table[j,k]
print(prop.table(outer.train.table.all, 1))
print(prop.table(outer.test.table.all, 1))
# Increment outer scores
outer.scores.train <- c(outer.scores.train, (train.table[1,1] + train.table[2,2])/2)
outer.scores.test <- c(outer.scores.test, (test.table[1,1] + test.table[2,2])/2)
outer.bestparams <- rbind(outer.bestparams, best.params)
# Increment arrays for variance
train.cell00 <- c(train.cell00, train.table[1,1])
train.cell01 <- c(train.cell01, train.table[1,2])
train.cell10 <- c(train.cell10, train.table[2,1])
train.cell11 <- c(train.cell11, train.table[2,2])
test.cell00 <- c(test.cell00, test.table[1,1])
test.cell01 <- c(test.cell01, test.table[1,2])
test.cell10 <- c(test.cell10, test.table[2,1])
test.cell11 <- c(test.cell11, test.table[2,2])
print(o)
}
# Examine outer-loop CV scores and best parameters
mean(outer.scores.train)
mean(outer.scores.test)
sd(outer.scores.test)
apply(outer.bestparams,2,"mean")
# Create averaged confusion matrix across all CV iterations
train.table.all <- train.table.all/i
test.table.all <- test.table.all/i
SVM.R.train <- train.table.all
SVM.R.test <- test.table.all
SVM.R.train.CR <- train.cell00 # CR = correct rejections
SVM.R.train.FA <- train.cell01 # FA = false alarms
SVM.R.train.M <- train.cell10 # M = misses
SVM.R.train.H <- train.cell11 # H = hits
SVM.R.test.CR <- test.cell00
SVM.R.test.FA <- test.cell01
SVM.R.test.M <- test.cell10
SVM.R.test.H <- test.cell11
# Get means and sd's of each cell
# Training set
summary(SVM.R.train.CR)
sd(SVM.R.train.CR)
summary(SVM.R.train.FA)
sd(SVM.R.train.FA)
summary(SVM.R.train.M)
sd(SVM.R.train.M)
summary(SVM.R.train.H)
sd(SVM.R.train.H)
# Test set
summary(SVM.R.test.CR)
sd(SVM.R.test.CR)
summary(SVM.R.test.FA)
sd(SVM.R.test.FA)
summary(SVM.R.test.M)
sd(SVM.R.test.M)
summary(SVM.R.test.H)
sd(SVM.R.test.H)
|
boots = 500
DGP = 1
load("func_forms1.RData")
if (DGP == 1) {
func_list = func_formsYM$func_listYMmis
forms = func_formsYM
}
if (DGP == 2) {
func_list = func_formsYZ$func_listYZmis
forms = func_formsYZ
}
if (DGP == 3) {
func_list = func_formsYS$func_listYSmis
forms = func_formsYS
}
form_name = paste0("forms", type)
forms = forms[[form_name]]
sim_kara = function(n, forms, truth, B = 500) {
data = gendata.SDEtransport(n,
f_W = truth$f_W,
f_S = truth$f_S,
f_A = truth$f_A,
f_Z = truth$f_Z,
f_M = truth$f_M,
f_Y = truth$f_Y)
res = SDE_glm_seq(data, forms, RCT = 0.5, transport = TRUE,
pooled = FALSE, gstar_S = 1, truth, B = 500)
res_eff = SDE_glm_eff_seq(data, forms, RCT = 0.5, transport = TRUE,
pooled = FALSE, gstar_S = 1, truth, B = 500)
return(list(res= res, res_eff = res_eff))
}
B = 1000
n=100
res100 = mclapply(1:B, FUN = function(x) sim_kara(n=100, forms=forms, truth=func_list, B = boots),
mc.cores = getOption("mc.cores", cores))
B = 1000
n=500
res500 = mclapply(1:B, FUN = function(x) sim_kara(n=500, forms=forms, truth=func_list, B = boots),
mc.cores = getOption("mc.cores", cores))
B = 1000
n=5000
res5000 = mclapply(1:B, FUN = function(x) sim_kara(n=5000, forms=forms, truth=func_list, B = boots),
mc.cores = getOption("mc.cores", cores))
| /scripts_new_eff/master_eff.R | no_license | jlstiles/SDEtransportsim | R | false | false | 1,654 | r |
boots = 500
DGP = 1
load("func_forms1.RData")
if (DGP == 1) {
func_list = func_formsYM$func_listYMmis
forms = func_formsYM
}
if (DGP == 2) {
func_list = func_formsYZ$func_listYZmis
forms = func_formsYZ
}
if (DGP == 3) {
func_list = func_formsYS$func_listYSmis
forms = func_formsYS
}
form_name = paste0("forms", type)
forms = forms[[form_name]]
sim_kara = function(n, forms, truth, B = 500) {
data = gendata.SDEtransport(n,
f_W = truth$f_W,
f_S = truth$f_S,
f_A = truth$f_A,
f_Z = truth$f_Z,
f_M = truth$f_M,
f_Y = truth$f_Y)
res = SDE_glm_seq(data, forms, RCT = 0.5, transport = TRUE,
pooled = FALSE, gstar_S = 1, truth, B = 500)
res_eff = SDE_glm_eff_seq(data, forms, RCT = 0.5, transport = TRUE,
pooled = FALSE, gstar_S = 1, truth, B = 500)
return(list(res= res, res_eff = res_eff))
}
B = 1000
n=100
res100 = mclapply(1:B, FUN = function(x) sim_kara(n=100, forms=forms, truth=func_list, B = boots),
mc.cores = getOption("mc.cores", cores))
B = 1000
n=500
res500 = mclapply(1:B, FUN = function(x) sim_kara(n=500, forms=forms, truth=func_list, B = boots),
mc.cores = getOption("mc.cores", cores))
B = 1000
n=5000
res5000 = mclapply(1:B, FUN = function(x) sim_kara(n=5000, forms=forms, truth=func_list, B = boots),
mc.cores = getOption("mc.cores", cores))
|
#' @title Get the currently open exercise
#'
#' @description If the currently open exercise (the exercise which is located at the current
#' working directory) is a valid TMC R exercise project, return it as a \code{c(name = path)}
#' vector.
#'
#' @usage exercisePathFromWd()
#'
#' @details The exercise is a valid TMC R exercise project if it contains the \code{R} and
#' \code{tests/testthat} folders and is located in the TMC R projects directory.
#'
#' @return Exercise as a named vector: \code{c(name = path)}. If the current working directory
#' isn't a valid exercise project folder, returns \code{c(" ", " ")}.
#'
#' @seealso \code{\link[base]{basename}}, \code{\link[base]{getwd}}, \code{\link{get_projects_folder}}
#' \code{\link{pathIsExercise}}, \code{\link{getExerciseName}}, \code{\link[stats]{setNames}}
# Returns the exercise that is selected in wd.
# If wd doesn't contain a valid exercise returns c(" ", " ").
# Returns exercise as a named vector: c(name = path)
exercisePathFromWd <- function() {
dirname <- dirname(getwd())
basename <- basename(getwd())
wdIsInProjectsDir <- grepl(paste0("^", get_projects_folder()), getwd())
# Current wd is not an exercise (a folder in exercises_path)
if(!pathIsExercise(getwd()) || !wdIsInProjectsDir) {
return(c(" " = " "))
} else {
exerciseName <- getExerciseName(getwd())
return(setNames(c(getwd()), c(exerciseName)))
}
}
#' @title Get a list of downloaded exercises
#'
#' @description Get a list of downloaded exercises as named vector \code{c(name = path)}format.
#'
#' @usage downloadedExercisesPaths()
#'
#' @details Searches for downloaded exercise projects at the TMC R project folder.
#'
#' @return List of downloaded exercises as named vector \code{c(name = path)}format.
#'
#' @seealso \code{\link{findExercisesFromPath}}, \code{\link{get_projects_folder}},
#' \code{\link{getExerciseName}}, \code{\link[stats]{setNames}}
# Returns a list of downloaded exercises as named vector format.
# For example: c(name = path, name1 = path1, name2 = path2)
downloadedExercisesPaths <- function() {
exercisePaths = findExercisesFromPath(get_projects_folder())
names <- vector()
for(exercisePath in exercisePaths) {
names <- c(names, getExerciseName(exercisePath))
}
return(setNames(exercisePaths, names))
}
#' @title Source all exercise R files.
#'
#' @description Source all exercise \code{R} files.
#'
#' @usage sourceExercise(exercisePath)
#'
#' @param exercisePath File path to the exercise project directory.
#'
#' @details The \code{R} files contained in the exercise
#' directory's \code{R} folder are sourced with \code{print.eval} on.
#'
#' @seealso \code{\link[base]{environment}}, \code{\link[base]{list.files}},
#' \code{\link[base]{file.path}}, \code{\link[base]{cat}}, \code{\link[base]{source}}
# Sources all files in exercise with print.eval on.
sourceExercise <- function(exercisePath) {
env <- new.env()
for (file in list.files(pattern = "[.]R$", path = file.path(exercisePath, "R"),
full.names = TRUE)) {
cat("Sourcing file: ", file, "\n\n")
source(file, env, print.eval = TRUE)
}
}
#' @title Get paths to exercises
#'
#' @description Recursively searches for exercise projects in the given file path.
#'
#' @usage findExercisesFromPath(path)
#'
#' @param path Path to the directory where the exercises are searched from.
#'
#' @return A vector of full file paths to the found exercises. Always contains an
#' empty \code{""} path.
#'
#' @seealso \code{\link[base]{list.dirs}}, \code{\link{pathIsExercise}}
# Finds exercises from a path recursively. Returns the full path of found exercises
# as a vector. Contains path empty path "" always.
findExercisesFromPath <- function(path) {
dirs <- list.dirs(path = path, full.names = TRUE, recursive = TRUE)
foundExercises <- c("")
for (dir in dirs) {
if (pathIsExercise(dir)) {
foundExercises <- c(foundExercises, dir)
}
}
return(foundExercises)
}
#' @title Determine if the given file path leads to an exercise project directory
#'
#' @description Determine if the given file path leads to an exercise project
#' directory.
#'
#' @usage pathIsExercise(path)
#'
#' @param path File path to be checked.
#'
#' @details Determines if the given file path leads to an exercise project path by
#' checking if the path leads to a directory which contains the \code{R} and
#' \code{tests/testthat} folders.
#'
#' @return \code{TRUE} if the file path leads to an exercise project directory.
#' \code{FALSE otherwise}.
#'
#' @seealso \code{\link[base]{file.path}}, \code{\link[base]{file.info}}
# Determines if a path is an exercise
pathIsExercise <- function(path) {
R_dir <- file.path(path, "R")
testthat_dir <- file.path(path, "tests", "testthat")
return(isTRUE(file.info(R_dir)$isdir) & isTRUE(file.info(testthat_dir)$isdir))
}
#' @title Get the exercise's name
#'
#' @description Get the name of the exercise located at the given file path.
#'
#' @usage getExerciseName(exercisePath)
#'
#' @param exercisePath File path to the exercise project directory.
#'
#' @details Reads the \code{.metadata.json} file for the exercise name.
#'
#' @return Exercise's name read from \code{.metadata.json}. If the file doesn't
#' exist or if the file doesn't have the exercise name, returns the name of the
#' path's basename (the final directory/file in the file path).
#'
#' @seealso \code{\link{get_exercise_metadata}}, \code{\link[base]{basename}}
# Read's exercises name from metadata file. If metadata file doesn't exist
# returns the name of the path's basename.
getExerciseName <- function(exercisePath) {
metadata <- get_exercise_metadata(exercisePath)
#No metadata: exercise name is folder name
if (is.null(metadata) || is.null(metadata$exercise_name)) {
return(basename(exercisePath))
} else {
return(metadata$exercise_name)
}
}
| /tmcrstudioaddin/R/Exercises.R | no_license | RTMC/tmc-rstudio | R | false | false | 5,885 | r | #' @title Get the currently open exercise
#'
#' @description If the currently open exercise (the exercise which is located at the current
#' working directory) is a valid TMC R exercise project, return it as a \code{c(name = path)}
#' vector.
#'
#' @usage exercisePathFromWd()
#'
#' @details The exercise is a valid TMC R exercise project if it contains the \code{R} and
#' \code{tests/testthat} folders and is located in the TMC R projects directory.
#'
#' @return Exercise as a named vector: \code{c(name = path)}. If the current working directory
#' isn't a valid exercise project folder, returns \code{c(" ", " ")}.
#'
#' @seealso \code{\link[base]{basename}}, \code{\link[base]{getwd}}, \code{\link{get_projects_folder}}
#' \code{\link{pathIsExercise}}, \code{\link{getExerciseName}}, \code{\link[stats]{setNames}}
# Returns the exercise that is selected in wd.
# If wd doesn't contain a valid exercise returns c(" ", " ").
# Returns exercise as a named vector: c(name = path)
exercisePathFromWd <- function() {
dirname <- dirname(getwd())
basename <- basename(getwd())
wdIsInProjectsDir <- grepl(paste0("^", get_projects_folder()), getwd())
# Current wd is not an exercise (a folder in exercises_path)
if(!pathIsExercise(getwd()) || !wdIsInProjectsDir) {
return(c(" " = " "))
} else {
exerciseName <- getExerciseName(getwd())
return(setNames(c(getwd()), c(exerciseName)))
}
}
#' @title Get a list of downloaded exercises
#'
#' @description Get a list of downloaded exercises as named vector \code{c(name = path)}format.
#'
#' @usage downloadedExercisesPaths()
#'
#' @details Searches for downloaded exercise projects at the TMC R project folder.
#'
#' @return List of downloaded exercises as named vector \code{c(name = path)}format.
#'
#' @seealso \code{\link{findExercisesFromPath}}, \code{\link{get_projects_folder}},
#' \code{\link{getExerciseName}}, \code{\link[stats]{setNames}}
# Returns a list of downloaded exercises as named vector format.
# For example: c(name = path, name1 = path1, name2 = path2)
downloadedExercisesPaths <- function() {
exercisePaths = findExercisesFromPath(get_projects_folder())
names <- vector()
for(exercisePath in exercisePaths) {
names <- c(names, getExerciseName(exercisePath))
}
return(setNames(exercisePaths, names))
}
#' @title Source all exercise R files.
#'
#' @description Source all exercise \code{R} files.
#'
#' @usage sourceExercise(exercisePath)
#'
#' @param exercisePath File path to the exercise project directory.
#'
#' @details The \code{R} files contained in the exercise
#' directory's \code{R} folder are sourced with \code{print.eval} on.
#'
#' @seealso \code{\link[base]{environment}}, \code{\link[base]{list.files}},
#' \code{\link[base]{file.path}}, \code{\link[base]{cat}}, \code{\link[base]{source}}
# Sources all files in exercise with print.eval on.
sourceExercise <- function(exercisePath) {
env <- new.env()
for (file in list.files(pattern = "[.]R$", path = file.path(exercisePath, "R"),
full.names = TRUE)) {
cat("Sourcing file: ", file, "\n\n")
source(file, env, print.eval = TRUE)
}
}
#' @title Get paths to exercises
#'
#' @description Recursively searches for exercise projects in the given file path.
#'
#' @usage findExercisesFromPath(path)
#'
#' @param path Path to the directory where the exercises are searched from.
#'
#' @return A vector of full file paths to the found exercises. Always contains an
#' empty \code{""} path.
#'
#' @seealso \code{\link[base]{list.dirs}}, \code{\link{pathIsExercise}}
# Finds exercises from a path recursively. Returns the full path of found exercises
# as a vector. Contains path empty path "" always.
findExercisesFromPath <- function(path) {
dirs <- list.dirs(path = path, full.names = TRUE, recursive = TRUE)
foundExercises <- c("")
for (dir in dirs) {
if (pathIsExercise(dir)) {
foundExercises <- c(foundExercises, dir)
}
}
return(foundExercises)
}
#' @title Determine if the given file path leads to an exercise project directory
#'
#' @description Determine if the given file path leads to an exercise project
#' directory.
#'
#' @usage pathIsExercise(path)
#'
#' @param path File path to be checked.
#'
#' @details Determines if the given file path leads to an exercise project path by
#' checking if the path leads to a directory which contains the \code{R} and
#' \code{tests/testthat} folders.
#'
#' @return \code{TRUE} if the file path leads to an exercise project directory.
#' \code{FALSE otherwise}.
#'
#' @seealso \code{\link[base]{file.path}}, \code{\link[base]{file.info}}
# Determines if a path is an exercise
pathIsExercise <- function(path) {
R_dir <- file.path(path, "R")
testthat_dir <- file.path(path, "tests", "testthat")
return(isTRUE(file.info(R_dir)$isdir) & isTRUE(file.info(testthat_dir)$isdir))
}
#' @title Get the exercise's name
#'
#' @description Get the name of the exercise located at the given file path.
#'
#' @usage getExerciseName(exercisePath)
#'
#' @param exercisePath File path to the exercise project directory.
#'
#' @details Reads the \code{.metadata.json} file for the exercise name.
#'
#' @return Exercise's name read from \code{.metadata.json}. If the file doesn't
#' exist or if the file doesn't have the exercise name, returns the name of the
#' path's basename (the final directory/file in the file path).
#'
#' @seealso \code{\link{get_exercise_metadata}}, \code{\link[base]{basename}}
# Read's exercises name from metadata file. If metadata file doesn't exist
# returns the name of the path's basename.
getExerciseName <- function(exercisePath) {
metadata <- get_exercise_metadata(exercisePath)
#No metadata: exercise name is folder name
if (is.null(metadata) || is.null(metadata$exercise_name)) {
return(basename(exercisePath))
} else {
return(metadata$exercise_name)
}
}
|
library(soilDB)
### Name: fetchNASISWebReport
### Title: Extract component tables from a the NASIS Web Reports
### Aliases: fetchNASISWebReport get_project_from_NASISWebReport
### get_progress_from_NASISWebReport
### get_project_correlation_from_NASISWebReport
### get_mapunit_from_NASISWebReport
### get_projectmapunit_from_NASISWebReport
### get_projectmapunit2_from_NASISWebReport
### get_component_from_NASISWebReport get_chorizon_from_NASISWebReport
### get_cosoilmoist_from_NASISWebReport
### get_sitesoilmoist_from_NASISWebReport
### Keywords: manip
### ** Examples
## Not run:
##D library(soilDB)
##D library(ggplot2)
##D library(gridExtra)
##D
##D # query soil components by projectname
##D test = fetchNASISWebReport_component(
##D "EVAL - MLRA 111A - Ross silt loam, 0 to 2 percent slopes, frequently flooded"
##D )
##D test = test$spc
##D
##D # profile plot
##D plot(test)
##D
##D # convert the data for depth plot
##D clay_slice = horizons(slice(test, 0:200 ~ claytotal_l + claytotal_r + claytotal_h))
##D names(clay_slice) <- gsub("claytotal_", "", names(clay_slice))
##D
##D om_slice = horizons(slice(test, 0:200 ~ om_l + om_r + om_h))
##D names(om_slice) = gsub("om_", "", names(om_slice))
##D
##D test2 = rbind(data.frame(clay_slice, var = "clay"),
##D data.frame(om_slice, var = "om")
##D )
##D
##D h = merge(test2, site(test)[c("dmuiid", "coiid", "compname", "comppct_r")],
##D by = "coiid",
##D all.x = TRUE
##D )
##D
##D # depth plot of clay content by soil component
##D gg_comp <- function(x) {
##D ggplot(x) +
##D geom_line(aes(y = r, x = hzdept_r)) +
##D geom_line(aes(y = r, x = hzdept_r)) +
##D geom_ribbon(aes(ymin = l, ymax = h, x = hzdept_r), alpha = 0.2) +
##D xlim(200, 0) +
##D xlab("depth (cm)") +
##D facet_grid(var ~ dmuiid + paste(compname, comppct_r)) +
##D coord_flip()
##D }
##D g1 <- gg_comp(subset(h, var == "clay"))
##D g2 <- gg_comp(subset(h, var == "om"))
##D
##D grid.arrange(g1, g2)
##D
##D
##D # query cosoilmoist (e.g. water table data) by mukey
##D # NA depths are interpreted as (???) with impute=TRUE argument
##D x <- get_cosoilmoist_from_NASISWebReport(
##D "EVAL - MLRA 111A - Ross silt loam, 0 to 2 percent slopes, frequently flooded"
##D )
##D
##D ggplot(x, aes(x = as.integer(month), y = dept_r, lty = status)) +
##D geom_rect(aes(xmin = as.integer(month), xmax = as.integer(month) + 1,
##D ymin = 0, ymax = max(x$depb_r),
##D fill = flodfreqcl)) +
##D geom_line(cex = 1) +
##D geom_point() +
##D geom_ribbon(aes(ymin = dept_l, ymax = dept_h), alpha = 0.2) +
##D ylim(max(x$depb_r), 0) +
##D xlab("month") + ylab("depth (cm)") +
##D scale_x_continuous(breaks = 1:12, labels = month.abb, name="Month") +
##D facet_wrap(~ paste0(compname, ' (', comppct_r , ')')) +
##D ggtitle(paste0(x$nationalmusym[1],
##D ': Water Table Levels from Component Soil Moisture Month Data'))
##D
##D
## End(Not run)
| /data/genthat_extracted_code/soilDB/examples/fetchLIMS_component.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 3,022 | r | library(soilDB)
### Name: fetchNASISWebReport
### Title: Extract component tables from a the NASIS Web Reports
### Aliases: fetchNASISWebReport get_project_from_NASISWebReport
### get_progress_from_NASISWebReport
### get_project_correlation_from_NASISWebReport
### get_mapunit_from_NASISWebReport
### get_projectmapunit_from_NASISWebReport
### get_projectmapunit2_from_NASISWebReport
### get_component_from_NASISWebReport get_chorizon_from_NASISWebReport
### get_cosoilmoist_from_NASISWebReport
### get_sitesoilmoist_from_NASISWebReport
### Keywords: manip
### ** Examples
## Not run:
##D library(soilDB)
##D library(ggplot2)
##D library(gridExtra)
##D
##D # query soil components by projectname
##D test = fetchNASISWebReport_component(
##D "EVAL - MLRA 111A - Ross silt loam, 0 to 2 percent slopes, frequently flooded"
##D )
##D test = test$spc
##D
##D # profile plot
##D plot(test)
##D
##D # convert the data for depth plot
##D clay_slice = horizons(slice(test, 0:200 ~ claytotal_l + claytotal_r + claytotal_h))
##D names(clay_slice) <- gsub("claytotal_", "", names(clay_slice))
##D
##D om_slice = horizons(slice(test, 0:200 ~ om_l + om_r + om_h))
##D names(om_slice) = gsub("om_", "", names(om_slice))
##D
##D test2 = rbind(data.frame(clay_slice, var = "clay"),
##D data.frame(om_slice, var = "om")
##D )
##D
##D h = merge(test2, site(test)[c("dmuiid", "coiid", "compname", "comppct_r")],
##D by = "coiid",
##D all.x = TRUE
##D )
##D
##D # depth plot of clay content by soil component
##D gg_comp <- function(x) {
##D ggplot(x) +
##D geom_line(aes(y = r, x = hzdept_r)) +
##D geom_line(aes(y = r, x = hzdept_r)) +
##D geom_ribbon(aes(ymin = l, ymax = h, x = hzdept_r), alpha = 0.2) +
##D xlim(200, 0) +
##D xlab("depth (cm)") +
##D facet_grid(var ~ dmuiid + paste(compname, comppct_r)) +
##D coord_flip()
##D }
##D g1 <- gg_comp(subset(h, var == "clay"))
##D g2 <- gg_comp(subset(h, var == "om"))
##D
##D grid.arrange(g1, g2)
##D
##D
##D # query cosoilmoist (e.g. water table data) by mukey
##D # NA depths are interpreted as (???) with impute=TRUE argument
##D x <- get_cosoilmoist_from_NASISWebReport(
##D "EVAL - MLRA 111A - Ross silt loam, 0 to 2 percent slopes, frequently flooded"
##D )
##D
##D ggplot(x, aes(x = as.integer(month), y = dept_r, lty = status)) +
##D geom_rect(aes(xmin = as.integer(month), xmax = as.integer(month) + 1,
##D ymin = 0, ymax = max(x$depb_r),
##D fill = flodfreqcl)) +
##D geom_line(cex = 1) +
##D geom_point() +
##D geom_ribbon(aes(ymin = dept_l, ymax = dept_h), alpha = 0.2) +
##D ylim(max(x$depb_r), 0) +
##D xlab("month") + ylab("depth (cm)") +
##D scale_x_continuous(breaks = 1:12, labels = month.abb, name="Month") +
##D facet_wrap(~ paste0(compname, ' (', comppct_r , ')')) +
##D ggtitle(paste0(x$nationalmusym[1],
##D ': Water Table Levels from Component Soil Moisture Month Data'))
##D
##D
## End(Not run)
|
# getting data
file <- "./Documents/comp/eda/household_power_consumption.txt"
data <- read.table(file, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subsetdata <- subset(data,data$Date=="1/2/2007" | data$Date =="2/2/2007")
# plot graph
active <- as.numeric(subsetdata$Global_active_power)
png("./Documents/comp/eda/plot1.png")
hist(active, col="red", main="global active power", xlab = "global active power (kilowatts)")
dev.off()
| /plot1.R | no_license | vespyrs/eda | R | false | false | 441 | r | # getting data
file <- "./Documents/comp/eda/household_power_consumption.txt"
data <- read.table(file, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
subsetdata <- subset(data,data$Date=="1/2/2007" | data$Date =="2/2/2007")
# plot graph
active <- as.numeric(subsetdata$Global_active_power)
png("./Documents/comp/eda/plot1.png")
hist(active, col="red", main="global active power", xlab = "global active power (kilowatts)")
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/celltrackR-package.R
\name{cheatsheet}
\alias{cheatsheet}
\title{Open the package cheat sheet}
\usage{
cheatsheet(opencmd = NULL)
}
\arguments{
\item{opencmd}{The command used to open pdfs from the command line.}
}
\value{
None
}
\description{
Running this function will open the package cheat sheet (a pdf) via a call to
\code{system()}.
}
| /man/cheatsheet.Rd | no_license | ingewortel/celltrackR | R | false | true | 419 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/celltrackR-package.R
\name{cheatsheet}
\alias{cheatsheet}
\title{Open the package cheat sheet}
\usage{
cheatsheet(opencmd = NULL)
}
\arguments{
\item{opencmd}{The command used to open pdfs from the command line.}
}
\value{
None
}
\description{
Running this function will open the package cheat sheet (a pdf) via a call to
\code{system()}.
}
|
library(quanteda)
neg_texts <- getTextDir("~/Dropbox/QUANTESS/corpora/movieReviews/smaller/neg/")
pos_texts <- getTextDir("~/Dropbox/QUANTESS/corpora/movieReviews/smaller/pos/")
texts <- c(neg_texts,pos_texts)
vals <-vector()
vals[1:1000] <- "neg"
vals[1001:2000] <- "pos"
atts <- data.frame(vals)
names(atts)<-c("label")
movies <- corpusCreate(texts, attribs=atts)
text <- "Keanu is really excellent in this movie, and Arnold is great too."
# called on a text
oneContext <- kwic(text, "great", window=2)
# called on a corpus
allContext <- kwic(movies, "great", window=8) | /working_files/examples/kwicExample.R | no_license | saldaihani/quanteda | R | false | false | 575 | r | library(quanteda)
neg_texts <- getTextDir("~/Dropbox/QUANTESS/corpora/movieReviews/smaller/neg/")
pos_texts <- getTextDir("~/Dropbox/QUANTESS/corpora/movieReviews/smaller/pos/")
texts <- c(neg_texts,pos_texts)
vals <-vector()
vals[1:1000] <- "neg"
vals[1001:2000] <- "pos"
atts <- data.frame(vals)
names(atts)<-c("label")
movies <- corpusCreate(texts, attribs=atts)
text <- "Keanu is really excellent in this movie, and Arnold is great too."
# called on a text
oneContext <- kwic(text, "great", window=2)
# called on a corpus
allContext <- kwic(movies, "great", window=8) |
# Detailed description of the tree data structure and related functions
# ---------------------------------------------------------------------
#
# We will use structures to implement the tree datatype. The tree is formed by
# nodes. The code of a node
#
# node <- new.node(name="Temperature", branches=c("Good", "Bad"))
#
# creates a node called 'Temperature' which has the branches: "Good" and
# "Bad". Before we will build the tree we create other node
#
# node2 <- new.node("Rain", c("True", "False"))
#
# Note: You can omit the names of the parameters 'name' and 'branches'
#
# A tree is a list with the following components:
# - nodesCount: numbers of nodes
# - nodes: is a list of nodes, whose first element is the root node of tree
# Each node are connected to his father by the 'parentId' value, except the
# root node whose value parentId = NULL
#
# Now we can build a tree, for this we need choose which node will be the root
# of tree. The code
#
# tree <- new.tree(node)
#
# Here we created a tree whose root is the node with attribute
# "Temperature". For definition branches of a tree we use the function
# add.subtree(). For example, we suppose there is a branch between node to node2
# for the value "Good" of node. The code to represent that is
#
# tree <- add.subtree(tree=tree, subtree=node2, parentId=node$branches[["Good"]])
#
# Note: You can also add a node a leaf or a tree.
#
# There are two nodes in a decision tree: decision node and leaf node. The
# first one are used to decide how classify a example. The last one are used
# to assign a class (or label) a example. A leaf node is different a decision
# node by two thing: (1) the leaf node does not have a 'name' and
# 'branches'. (2) The leaf node has a 'label'. The code to create a leaf node
# is:
#
# leaf <- new.leaf("Yes")
#
#
# Now we show as to use the tree data structure. For this, we are going to
# build the decision tree at figure 3.1 in the book "Machine Learning" by Tom
# Mitchell. But before, a bit of staff to use the data structure:
source("tree.R")
# The tree has three attributes. for each of them are representing by a node.
#
# The attribute Outlook
n0 <- new.node("Outlook", c("Sunny", "Overcast", "Rain"))
# The attribute Humidity
n1 <- new.node("Humidity", c("High", "Normal"))
# The attribute Wind
n2 <- new.node("Wind", c("Strong", "Weak"))
# Moreover, There also are two labels. These are 'leaf node'.
l.yes <- new.leaf("Yes")
l.no <- new.leaf("No")
# Now we build the tree. For that, we call the function new.tree() pass it as
# parameter the root of the tree
tree <- new.tree(n0)
# The following lines show how to build the branches of the tree
tree <- add.subtree(tree, n1, n0$branches[["Sunny"]])
tree <- add.subtree(tree, n2, n0$branches[["Rain"]])
tree <- add.subtree(tree, l.yes, n0$branches[["Overcast"]])
tree <- add.subtree(tree, l.no, n1$branches[["High"]])
tree <- add.subtree(tree, l.yes, n1$branches[["Normal"]])
tree <- add.subtree(tree, l.no, n2$branches[["Strong"]])
tree <- add.subtree(tree, l.yes, n2$branches[["Weak"]])
# Finally, we print the tree for screen
plot.tree(tree)
cat("_________________________________\n")
# There is other way to build a tree. This way, first is added nodes to a subtree
# and later is added this subtree to an incomplete tree. Follow the previous example
subtree <- new.tree(n2)
subtree <- add.subtree(subtree,l.no,n2$branches[["Strong"]])
subtree <- add.subtree(subtree,l.yes,n2$branches[["Weak"]])
plot.tree(subtree)
cat("_________________________________\n")
# Now create an incomplete tree adding nodes
incomplete.tree <- new.tree(n0)
# The following lines show how to build the branches of the tree
incomplete.tree <- add.subtree(incomplete.tree, n1, n0$branches[["Sunny"]])
incomplete.tree <- add.subtree(incomplete.tree, n2, n0$branches[["Rain"]])
incomplete.tree <- add.subtree(incomplete.tree, l.yes, n0$branches[["Overcast"]])
incomplete.tree <- add.subtree(incomplete.tree, l.no, n1$branches[["High"]])
incomplete.tree <- add.subtree(incomplete.tree, l.yes, n1$branches[["Normal"]])
plot.tree(incomplete.tree)
cat("_________________________________\n")
#Finally add the subtree to the incomplete tree
final.tree <- add.subtree(incomplete.tree,subtree,n0$branches[["Rain"]])
plot.tree(final.tree)
cat("_________________________________\n") | /src/dt.tutorial.R | no_license | mmunozpatino/l1-AM | R | false | false | 4,339 | r | # Detailed description of the tree data structure and related functions
# ---------------------------------------------------------------------
#
# We will use structures to implement the tree datatype. The tree is formed by
# nodes. The code of a node
#
# node <- new.node(name="Temperature", branches=c("Good", "Bad"))
#
# creates a node called 'Temperature' which has the branches: "Good" and
# "Bad". Before we will build the tree we create other node
#
# node2 <- new.node("Rain", c("True", "False"))
#
# Note: You can omit the names of the parameters 'name' and 'branches'
#
# A tree is a list with the following components:
# - nodesCount: numbers of nodes
# - nodes: is a list of nodes, whose first element is the root node of tree
# Each node are connected to his father by the 'parentId' value, except the
# root node whose value parentId = NULL
#
# Now we can build a tree, for this we need choose which node will be the root
# of tree. The code
#
# tree <- new.tree(node)
#
# Here we created a tree whose root is the node with attribute
# "Temperature". For definition branches of a tree we use the function
# add.subtree(). For example, we suppose there is a branch between node to node2
# for the value "Good" of node. The code to represent that is
#
# tree <- add.subtree(tree=tree, subtree=node2, parentId=node$branches[["Good"]])
#
# Note: You can also add a node a leaf or a tree.
#
# There are two nodes in a decision tree: decision node and leaf node. The
# first one are used to decide how classify a example. The last one are used
# to assign a class (or label) a example. A leaf node is different a decision
# node by two thing: (1) the leaf node does not have a 'name' and
# 'branches'. (2) The leaf node has a 'label'. The code to create a leaf node
# is:
#
# leaf <- new.leaf("Yes")
#
#
# Now we show as to use the tree data structure. For this, we are going to
# build the decision tree at figure 3.1 in the book "Machine Learning" by Tom
# Mitchell. But before, a bit of staff to use the data structure:
source("tree.R")
# The tree has three attributes. for each of them are representing by a node.
#
# The attribute Outlook
n0 <- new.node("Outlook", c("Sunny", "Overcast", "Rain"))
# The attribute Humidity
n1 <- new.node("Humidity", c("High", "Normal"))
# The attribute Wind
n2 <- new.node("Wind", c("Strong", "Weak"))
# Moreover, There also are two labels. These are 'leaf node'.
l.yes <- new.leaf("Yes")
l.no <- new.leaf("No")
# Now we build the tree. For that, we call the function new.tree() pass it as
# parameter the root of the tree
tree <- new.tree(n0)
# The following lines show how to build the branches of the tree
tree <- add.subtree(tree, n1, n0$branches[["Sunny"]])
tree <- add.subtree(tree, n2, n0$branches[["Rain"]])
tree <- add.subtree(tree, l.yes, n0$branches[["Overcast"]])
tree <- add.subtree(tree, l.no, n1$branches[["High"]])
tree <- add.subtree(tree, l.yes, n1$branches[["Normal"]])
tree <- add.subtree(tree, l.no, n2$branches[["Strong"]])
tree <- add.subtree(tree, l.yes, n2$branches[["Weak"]])
# Finally, we print the tree for screen
plot.tree(tree)
cat("_________________________________\n")
# There is other way to build a tree. This way, first is added nodes to a subtree
# and later is added this subtree to an incomplete tree. Follow the previous example
subtree <- new.tree(n2)
subtree <- add.subtree(subtree,l.no,n2$branches[["Strong"]])
subtree <- add.subtree(subtree,l.yes,n2$branches[["Weak"]])
plot.tree(subtree)
cat("_________________________________\n")
# Now create an incomplete tree adding nodes
incomplete.tree <- new.tree(n0)
# The following lines show how to build the branches of the tree
incomplete.tree <- add.subtree(incomplete.tree, n1, n0$branches[["Sunny"]])
incomplete.tree <- add.subtree(incomplete.tree, n2, n0$branches[["Rain"]])
incomplete.tree <- add.subtree(incomplete.tree, l.yes, n0$branches[["Overcast"]])
incomplete.tree <- add.subtree(incomplete.tree, l.no, n1$branches[["High"]])
incomplete.tree <- add.subtree(incomplete.tree, l.yes, n1$branches[["Normal"]])
plot.tree(incomplete.tree)
cat("_________________________________\n")
#Finally add the subtree to the incomplete tree
final.tree <- add.subtree(incomplete.tree,subtree,n0$branches[["Rain"]])
plot.tree(final.tree)
cat("_________________________________\n") |
library(tidyverse)
library(lme4)
library(lmerTest)
library(sjPlot)
## read in spp list from google drive
spp_traits <- read.csv('data/traits/insect_traits.csv', stringsAsFactors = FALSE) %>%
na_if("")
## read in phenesse outputs
model_df <- read.csv(file = "data/model_dfs/duration_climate_population_data.csv",
stringsAsFactors = FALSE)
id_cells <- model_df %>%
group_by(lon, lat) %>%
summarise(count = n()) %>%
tibble::rownames_to_column() %>%
rename(id_cells = rowname)
model_df <- left_join(model_df, id_cells)
model_df2 <- model_df %>%
na.omit() %>%
mutate(temp = scale(temp),
prec = scale(prec),
pop = scale(log10(pop)),
prec_seas = scale(bio15),
temp_seas = scale(bio4))
datadens <- model_df2 %>%
group_by(scientificName, Order) %>%
summarise(count = n())
has_10_cells <- filter(datadens, count >= 10) %>%
filter (scientificName != "Apis mellifera") # 145
model_df2 <- filter(model_df2, scientificName %in% has_10_cells$scientificName)
unique(model_df2$scientificName) %>% length() # 217 spp
## remove migratory species
mig_spp <- c("Anax junius", "Pantala flavenscens", "Pantala hymenaea", "Tramea lacerata",
"Sympetrum corruptum", "Sympetrum vicinum", "Libellula pulchella", "Libellula vibrans",
"Tramea lacerata", "Tramea onusta", "Pantala flavescens", "Libellula quadrimaculata",
"Ertythrodiplax umbrata", "Epiaeschna heros", "Tramea carolina",
"Libellula semifasciata", "Pantala hymenaea",
"Spoladea recurvalis", "Ponoquina ocola", "Plutella xylostella",
"Chrysodeixis includens", "Phoebis sennae", "Abaeis nicippe",
"Libytheana carinenta", "Agraulis vanillae", "Junonia coenia",
"Danaus plexippus", "Vanessa virginiensis", "Vanessa cardui",
"Vanessa atalanta", "Danaus gilippus", "Nymphalis antiopa",
"Polygonia interrogationis", "Lerema accius")
model_df2 <- filter(model_df2, !scientificName %in% mig_spp)
unique(model_df2$scientificName) %>% length() # left with 194 spp
# make season trait
spp_seas <- model_df2 %>%
group_by(scientificName) %>%
summarise(ave_on = mean(onset))
hist(spp_seas$ave_on)
spp_seas2 <- spp_seas %>%
mutate(seas = case_when(ave_on <= 125 ~ "Spring",
ave_on > 125 & ave_on <= 175 ~ "Summer",
ave_on > 175 ~ "Fall"))
# combine traits and results
model_df3 <- left_join(model_df2, spp_traits)
model_df3 <- left_join(model_df3, spp_seas2) %>%
select(-ave_on)
# Onset model
mon <- lmer(onset ~ temp + prec + prec_seas + temp_seas + temp:prec +
(1|id_cells) + (1|scientificName) +
(0 + temp | scientificName) +
(0 + prec | scientificName) +
(0 + temp_seas | scientificName) +
(0 + prec_seas | scientificName) +
(0 + temp:prec | scientificName),
data = model_df3, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
mon2 <- lmer(onset ~ temp + prec + prec_seas + temp_seas +
(1|id_cells) + (1|scientificName) +
(0 + temp | scientificName) +
(0 + prec | scientificName) +
(0 + temp_seas | scientificName) +
(0 + prec_seas | scientificName),
data = model_df3, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
car::vif(mon)
car::vif(mon2)
MuMIn::Weights(AIC(mon, mon2))
step(mon)
#Model found:
# onset ~ temp + prec + temp_seas + (1 | id_cells) + (1 | scientificName) +
# (0 + temp | scientificName) + (0 + prec | scientificName) +
# (0 + temp_seas | scientificName) + (0 + prec_seas | scientificName) +
# (0 + temp:prec | scientificName) + temp:prec
## final model
m_on_final <- lmer(onset ~ temp + prec + temp_seas +
(1|id_cells) + (1|scientificName) +
(0 + temp | scientificName) +
(0 + prec | scientificName) +
(0 + temp:prec | scientificName) +
temp:prec,
data = model_df3, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
summary(m_on_final)
car::vif(m_on_final)
suppressWarnings(MuMIn::r.squaredGLMM(m_on_final))
plot_model(m_on_final, type = "pred", terms = c("temp", "prec"))
### Extract Random term estimates
on_coef <- coef(m_on_final)$scientificName %>%
tibble::rownames_to_column("scientificName") %>%
rename(intercept_ave_onset = "(Intercept)") %>%
left_join(spp_traits, by = "scientificName") %>%
left_join(spp_seas2, by = "scientificName")
# clean on_coef
on_coef <- on_coef %>%
na.omit %>%
filter(diapause.stage != "None")
pivot_longer(on_coef, cols = c("higher_taxon", "larval.diet", "flights",
"immature.habitat", "development",
"diapause.stage", "seas")) %>%
ggplot(aes(x = value, y = intercept_ave_onset)) +
geom_boxplot() + geom_jitter() +
facet_wrap(~name, scales = "free")
pivot_longer(on_coef, cols = c("higher_taxon", "larval.diet", "flights",
"immature.habitat", "development",
"diapause.stage", "seas")) %>%
ggplot(aes(x = value, y = temp)) +
geom_boxplot() + geom_jitter() +
facet_wrap(~name, scales = "free") +
labs(y = "Sensentivity to temp")
pivot_longer(on_coef, cols = c("higher_taxon", "larval.diet", "flights",
"immature.habitat", "development",
"diapause.stage", "seas")) %>%
ggplot(aes(x = value, y = prec)) +
geom_boxplot() + geom_jitter() +
facet_wrap(~name, scales = "free") +
labs(y = "Sensentivity to prec")
pivot_longer(on_coef, cols = c("higher_taxon", "larval.diet", "flights",
"immature.habitat", "development",
"diapause.stage", "seas")) %>%
ggplot(aes(x = value, y = temp_seas)) +
geom_boxplot() + geom_jitter() +
facet_wrap(~name, scales = "free") +
labs(y = "Sensentivity to temp_seas")
######## Add traits to model
model_df4 <- na.omit(model_df3) %>%
filter(diapause.stage != "None")
m_on_traits <- lmer(onset ~ temp + prec + temp_seas + temp:prec +
development + temp:development + prec:development + temp_seas:development +
diapause.stage + temp:diapause.stage + prec:diapause.stage + temp_seas:diapause.stage +
flights + temp:flights + prec:flights + temp_seas:flights +
immature.habitat + temp:immature.habitat + prec:immature.habitat + temp_seas:immature.habitat +
larval.diet + temp:larval.diet + prec:larval.diet + temp_seas:larval.diet +
(1|id_cells) + (1|scientificName) +
(0 + temp | scientificName) +
(0 + prec | scientificName) +
(0 + temp_seas | scientificName) +
(0 + temp:prec | scientificName),
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
m_on_traits_s = step(m_on_traits, reduce.random = T)
m_on_traits_s
# Model found:
# onset ~ temp + prec + temp_seas + diapause.stage + immature.habitat +
# (1 | id_cells) + (1 | scientificName) + (0 + temp | scientificName) +
# (0 + prec | scientificName) + (0 + temp_seas | scientificName) +
# (0 + temp:prec | scientificName) + temp:prec + temp_seas:diapause.stage +
# temp:immature.habitat + prec:immature.habitat
m_on_traits_final <- lmer(onset ~ temp + prec + temp_seas + temp:prec +
diapause.stage + immature.habitat +
(1 | id_cells) + (1 | scientificName) +
(0 + temp | scientificName) +
(0 + prec | scientificName) +
(0 + temp_seas | scientificName) +
(0 + temp:prec | scientificName) +
temp_seas:diapause.stage +
temp:flights +
temp:immature.habitat +
prec:immature.habitat,
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
# First check vifs to see if final model will stand
car::vif(m_on_traits_final) ## Generalized Collinearity Diagnostics (Fox and Monette 1992)
# looks good so now make sure we have the top model & no more interactions are needed
m_on_traits_s2 = step(m_on_traits_final, reduce.random = T)
m_on_traits_s2 # looks like temp:flights is not needed interaction remove and test again
m_on_traits_final <- lmer(onset ~ temp + prec + temp_seas + temp:prec +
diapause.stage + immature.habitat +
(1 | id_cells) + (1 | scientificName) +
(0 + temp | scientificName) +
(0 + prec | scientificName) +
(0 + temp_seas | scientificName) +
(0 + temp:prec | scientificName) +
temp_seas:diapause.stage +
temp:immature.habitat +
prec:immature.habitat,
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
m_on_traits_s3 = step(m_on_traits_final, reduce.random = T)
m_on_traits_s3 # looks like we found the top model
summary(m_on_traits_final)
car::Anova(m_on_traits_final) # temp_seas:diapuase stage & temp:diapause stage are sig interactions
car::vif(m_on_traits_final) ## Generalized Collinearity Diagnostics (Fox and Monette 1992)
MuMIn::r.squaredGLMM(m_on_traits_final)
plot_model(m_on_traits_final, type = "pred", terms = c("temp_seas", "diapause.stage"), ci.lvl = NA)
plot_model(m_on_traits_final, type = "pred", terms = c("temp", "immature.habitat"), ci.lvl = NA)
plot_model(m_on_traits_final, type = "pred", terms = c("prec", "immature.habitat"), ci.lvl = NA)
## Explore traits for offset data
m_off <- lmer(offset ~ temp + prec + temp_seas + prec_seas + temp:prec +
(1|id_cells) + (1|scientificName) +
(0 + temp | scientificName) +
(0 + prec | scientificName) +
(0 + prec_seas | scientificName) +
(0 + temp_seas | scientificName) +
(0 + temp:prec | scientificName),
data = model_df3, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
summary(m_off)
m_off_s <- step(m_off)
m_off_s
#Model found:
# offset ~ prec + temp_seas + (1 | id_cells) + (1 | scientificName) +
# (0 + temp | scientificName) + (0 + prec_seas | scientificName) +
# (0 + temp_seas | scientificName) + (0 + temp:prec | scientificName)
# Final model without traits!
m_off_final <-lmer(offset ~ prec + temp_seas +
(1|id_cells) + (1|scientificName) +
(0 + prec | scientificName) +
(0 + temp_seas | scientificName),
data = model_df3, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
summary(m_off_final)
car::vif(m_off_final)
suppressWarnings(MuMIn::r.squaredGLMM(m_off_final))
### Extract Random term estimates
off_coef <- coef(m_off_final)$scientificName %>%
rownames_to_column("scientificName") %>%
rename(intercept_ave_offset = "(Intercept)") %>%
left_join(spp_traits, by = "scientificName") %>%
left_join(spp_seas2, by = "scientificName")
# clean off_coef
off_coef <- off_coef %>%
na.omit %>%
filter(diapause.stage != "None")
pivot_longer(off_coef, cols = c("higher_taxon", "larval.diet", "flights",
"immature.habitat", "development",
"diapause.stage", "seas")) %>%
ggplot(aes(x = value, y = intercept_ave_offset)) +
geom_boxplot() + geom_jitter() +
facet_wrap(~name, scales = "free")
pivot_longer(off_coef, cols = c("higher_taxon", "larval.diet", "flights",
"immature.habitat", "development",
"diapause.stage", "seas")) %>%
ggplot(aes(x = value, y = prec)) +
geom_boxplot() + geom_jitter() +
facet_wrap(~name, scales = "free") +
labs(y = "Sensentivity to prec")
pivot_longer(off_coef, cols = c("higher_taxon", "larval.diet", "flights",
"immature.habitat", "development",
"diapause.stage", "seas")) %>%
ggplot(aes(x = value, y = temp_seas)) +
geom_boxplot() + geom_jitter() +
facet_wrap(~name, scales = "free") +
labs(y = "Sensentivity to temp_seas")
######## Add traits to model
m_off_traits <- lmer(offset ~ prec + temp_seas +
seas + prec:seas + temp_seas:seas +
development + prec:development + temp_seas:development +
diapause.stage + prec:diapause.stage + temp_seas:diapause.stage +
flights + prec:flights + temp_seas:flights +
immature.habitat + prec:immature.habitat + temp_seas:immature.habitat +
larval.diet + prec:larval.diet + temp_seas:larval.diet +
(1|id_cells) + (1|scientificName) +
(0 + prec | scientificName) +
(0 + temp_seas | scientificName),
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
# stepwise regression to select model
m_off_traits_s = step(m_off_traits, reduce.random = T)
m_off_traits_s
# Model found:
# offset ~ prec + temp_seas + development + diapause.stage + flights +
# immature.habitat + larval.diet + (1 | id_cells) + (1 | scientificName) +
# (0 + prec | scientificName) + (0 + temp_seas | scientificName) +
# temp_seas:development + prec:diapause.stage + prec:immature.habitat +
# temp_seas:immature.habitat + prec:larval.diet
# Final offset model with traits
m_off_traits_final <- lmer(offset ~ prec + temp_seas +
seas + development + flights + immature.habitat +
(1|id_cells) + (1|scientificName) +
(0 + temp_seas | scientificName) +
(0 + prec | scientificName) +
prec:seas +
temp_seas:seas +
temp_seas:development +
prec:immature.habitat +
temp_seas:immature.habitat,
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
#first see if VIFs are okay
car::vif(m_off_traits_final) # they are not. temp_seas:development is inflated, so removed
# Final offset model with traits
m_off_traits_final <- lmer(offset ~ prec + temp_seas +
seas + development + flights + immature.habitat +
(1|id_cells) + (1|scientificName) +
(0 + temp_seas | scientificName) +
(0 + prec | scientificName) +
prec:seas +
temp_seas:seas +
prec:immature.habitat +
temp_seas:immature.habitat,
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
# check vifs again
car::vif(m_off_traits_final) # we're good. Double check best model
# stepwise regression to select model
m_off_traits_s2 = step(m_off_traits_final, reduce.random = T)
m_off_traits_s2
#Model found:
# offset ~ prec + temp_seas + seas + flights + immature.habitat +
# (1 | id_cells) + (1 | scientificName) +
# (0 + temp_seas | scientificName) + (0 + prec | scientificName) + prec:seas +
# temp_seas:seas + temp_seas:immature.habitat
m_off_traits_final <- lmer(offset ~ prec + temp_seas +
seas + flights + immature.habitat +
(1|id_cells) + (1|scientificName) +
(0 + temp_seas | scientificName) +
(0 + prec | scientificName) +
prec:seas +
temp_seas:seas +
temp_seas:immature.habitat,
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
# check vifs again
car::vif(m_off_traits_final)
summary(m_off_traits_final)
car::vif(m_off_traits_final)
car::Anova(m_off_traits_final)
MuMIn::r.squaredGLMM(m_off_traits_final)
plot_model(m_off_traits_final, type = "pred", terms = c("temp_seas", "seas"), ci.lvl = NA)
plot_model(m_off_traits_final, type = "pred", terms = c("temp_seas", "immature.habitat"), ci.lvl = NA)
plot_model(m_off_traits_final, type = "pred", terms = c("prec", "seas"),ci.lvl = NA)
############ Duration
m_dur <- lmer(duration ~ temp + prec + temp_seas + prec_seas + temp:prec +
(1|id_cells) + (1|scientificName) +
(0 + temp | scientificName) +
(0 + prec | scientificName) +
(0 + prec_seas | scientificName) +
(0 + temp_seas | scientificName) +
(0 + temp:prec | scientificName),
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
m_dur_s <- step(m_dur)
m_dur_s
# Model found:
# duration ~ temp + prec + temp_seas + (1 | id_cells) + (1 | scientificName) +
# (0 + temp | scientificName) + (0 + prec_seas | scientificName) +
# (0 + temp_seas | scientificName) + (0 + temp:prec | scientificName) +
# temp:prec
## Final Duration Model No Traits ##
m_dur_final <- lmer(duration ~ temp + prec + temp_seas + temp:prec +
(1|id_cells) + (1|scientificName) +
(0 + temp | scientificName) +
(0 + prec | scientificName) +
(0 + temp_seas | scientificName) +
(0 + temp:prec | scientificName),
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
summary(m_dur_final)
car::vif(m_dur_final)
suppressWarnings(MuMIn::r.squaredGLMM(m_dur_final))
### Extract Random term estimates
dur_coef <- coef(m_dur_final)$scientificName %>%
rownames_to_column("scientificName") %>%
rename(intercept_ave_duration = "(Intercept)") %>%
left_join(spp_traits, by = "scientificName") %>%
left_join(spp_seas2, by = "scientificName")
pivot_longer(dur_coef, cols = c("higher_taxon", "larval.diet", "flights",
"immature.habitat", "development",
"diapause.stage", "seas")) %>%
ggplot(aes(x = value, y = intercept_ave_duration)) +
geom_boxplot() + geom_jitter() +
facet_wrap(~name, scales = "free")
## Add traits to duration model
m_dur_traits <- lmer(duration ~ temp + prec + temp_seas + temp:prec +
seas + prec:seas + temp_seas:seas + temp:seas +
development + prec:development + temp_seas:development + temp:development +
diapause.stage + prec:diapause.stage + temp_seas:diapause.stage + temp:diapause.stage +
flights + prec:flights + temp_seas:flights + temp:flights +
immature.habitat + prec:immature.habitat + temp_seas:immature.habitat + temp:immature.habitat +
larval.diet + prec:larval.diet + temp_seas:larval.diet + temp:larval.diet +
(1|id_cells) + (1|scientificName) +
(0 + prec | scientificName) +
(0 + temp | scientificName) +
(0 + temp_seas | scientificName) +
(0 + temp:prec | scientificName),
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
m_dur_traits_s = step(m_dur_traits, reduce.random = T)
m_dur_traits_s
m_dur_traits_final <- lmer(duration ~ temp + prec + temp_seas +
seas + development + diapause.stage + flights + immature.habitat + larval.diet +
(1 | id_cells) + (1 | scientificName) +
(0 + prec | scientificName) +
(0 + temp | scientificName) +
(0 + temp_seas | scientificName) +
(0 + temp:prec | scientificName) +
temp_seas:seas + temp:development +
prec:diapause.stage + temp:flights +
temp_seas:immature.habitat + temp:immature.habitat +
prec:larval.diet,
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
#check vifs
car::vif(m_dur_traits_final) # temp:development is inflated remove
m_dur_traits_final <- lmer(duration ~ temp + prec + temp_seas +
seas + development + diapause.stage + flights + immature.habitat + larval.diet +
(1 | id_cells) + (1 | scientificName) +
(0 + prec | scientificName) +
(0 + temp | scientificName) +
(0 + temp_seas | scientificName) +
(0 + temp:prec | scientificName) +
temp_seas:seas +
prec:diapause.stage + temp:flights +
temp_seas:immature.habitat + temp:immature.habitat +
prec:larval.diet,
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
#check vifs again
car::vif(m_dur_traits_final) # development is inflated remove
m_dur_traits_final <- lmer(duration ~ temp + prec + temp_seas +
seas + diapause.stage + flights + immature.habitat + larval.diet +
(1 | id_cells) + (1 | scientificName) +
(0 + prec | scientificName) +
(0 + temp | scientificName) +
(0 + temp_seas | scientificName) +
temp_seas:seas +
prec:diapause.stage + temp:flights +
temp_seas:immature.habitat + temp:immature.habitat +
prec:larval.diet,
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
#check vifs again
car::vif(m_dur_traits_final) # good now
## see if final model remains the same
m_dur_traits_s2 = step(m_dur_traits_final, reduce.random = T)
m_dur_traits_s2
#Model found:
# duration ~ temp + prec + temp_seas + seas + diapause.stage +
# flights + immature.habitat + larval.diet +
# (1 | id_cells) + (1 | scientificName) +
# (0 + prec | scientificName) + (0 + temp | scientificName) + (0 + temp_seas | scientificName) +
# temp_seas:seas + prec:diapause.stage + temp:flights + temp_seas:immature.habitat +
# temp:immature.habitat + prec:larval.diet
m_dur_traits_final <- lmer(duration ~ temp + prec + temp_seas +
seas + diapause.stage + flights + immature.habitat + larval.diet +
(1 | id_cells) + (1 | scientificName) +
(0 + temp | scientificName) +
(0 + temp_seas | scientificName) +
(0 + prec | scientificName) +
temp_seas:seas + temp_seas:immature.habitat +
temp:flights + temp:immature.habitat +
prec:larval.diet + prec:diapause.stage,
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
summary(m_dur_traits_final)
car::vif(m_dur_traits_final)
car::Anova(m_dur_traits_final)
MuMIn::r.squaredGLMM(m_dur_traits_final)
plot_model(m_dur_traits_final, type = "pred", terms = c("temp_seas", "seas"), ci.lvl = NA)
plot_model(m_dur_traits_final, type = "pred", terms = c("temp_seas", "immature.habitat"), ci.lvl = NA)
plot_model(m_dur_traits_final, type = "pred", terms = c("temp", "flights"),ci.lvl = NA)
plot_model(m_dur_traits_final, type = "pred", terms = c("temp", "immature.habitat"),ci.lvl = NA)
plot_model(m_dur_traits_final, type = "pred", terms = c("prec", "diapause.stage"), ci.lvl = NA)
plot_model(m_dur_traits_final, type = "pred", terms = c("prec", "larval.diet"),ci.lvl = NA)
### NOW ADD PHYLOGENY ########
## try the ROTL package
library(rotl)
nmz <- tnrs_match_names(names = unique(model_df4$scientificName), context_name = "Animals")
nmz2 <- filter(nmz, ott_id != 7146697)
insect_tree <- tol_induced_subtree(ott_ids = nmz2$ott_id)
insect_tree$tip.label <- word(insect_tree$tip.label, start = 1, end = 2, sep = "_") %>%
sub(pattern = "_", replacement = " ")
plot(insect_tree, type = "fan")
plot(insect_tree)
insect_tree_bl <- ape::compute.brlen(insect_tree)
insect_tree_bl
on_coef
physig_col = function(inte){
col_to = names(inte)[-1]
out = vector("list", length = length(col_to))
names(out) = col_to
for(i in col_to){
x = inte[[i]]
names(x) = inte$scientificName
xp1 = phytools::phylosig(insect_tree_bl, x, method = "K", test = T)
xp2 = phytools::phylosig(insect_tree_bl, x, method = "lambda", test = T)
out[[i]] = tibble::tibble(statistic = c(xp1$K, xp2$lambda),
P = c(ifelse(xp1$P < 0.5, xp1$P, 1 - xp1$P), xp2$P),
test = c("K", "Lambda"))
}
bind_rows(out, .id = "terms")
}
inte = left_join(dplyr::select(on_coef, scientificName, intercept_ave_onset),
dplyr::select(off_coef, scientificName, intercept_ave_offset)) %>%
left_join(dplyr::select(dur_coef, scientificName, intercept_ave_duration))
physig_intercept = physig_col(inte) %>%
arrange(test) %>%
mutate(statistic = round(statistic, 4),
P = round(P, 3))
physig_intercept
physig_slopes = left_join(dplyr::select(on_coef, scientificName, temp_onset = temp, prec_onset = prec,
temp_seas_onset = temp_seas, tempprecint_onset = temp:prec),
dplyr::select(off_coef, scientificName, prec_offset = prec, temp_seas_offset = temp_seas,
)) %>%
left_join(dplyr::select(dur_coef, scientificName, temp_dur = temp, prec_dur = prec,
temp_seas_dur = temp_seas, tempprecint_dur = temp:prec,
)) %>%
physig_col() %>%
arrange(test) %>%
mutate(statistic = round(statistic, 4),
P = round(P, 3))
as.data.frame(physig_slopes)
####### PGLMM
# onset
library(phyr)
library(INLA)
# Burnsius communis not in phylogeny, remove
model_df5 <- filter(model_df4, scientificName %in% insect_tree_bl$tip.label)
a <- insect_tree_bl$tip.label %in% model_df5$scientificName
a[140]
a[32]
insect_tree_bl$tip.label[32]
insect_tree_bl$tip.label[140]
insect_tree_bl2 <- ape::drop.tip(insect_tree_bl, c("Enodia anthedon", "Aeshna cyanea")) # = (C:1,(D:1,E:1):1);
pm_onset <- pglmm(onset ~ temp + prec + temp_seas + temp:prec +
diapause.stage + immature.habitat +
(1 | id_cells) + (1 | scientificName__) +
(0 + temp | scientificName__) +
(0 + prec | scientificName__) +
(0 + temp_seas | scientificName__) +
temp_seas:diapause.stage +
temp:immature.habitat +
prec:immature.habitat,
data = model_df5,
cov_ranef = list(scientificName = insect_tree_bl),
bayes = TRUE)
ranef(pm_onset)
fixef(pm_onset) %>% knitr::kable()
inla_onset = pm_onset$inla.model
summary(inla_onset)
inla_onset$summary.fixed # kld are small, good
plot(inla_onset$marginals.fixed$`(Intercept)`)
plot(inla_onset$marginals.fixed$temp)
plot(inla_onset$marginals.fixed$`temp:prec`)
#
names(inla_onset$marginals.fixed)
names(inla_onset$marginals.hyperpar)
length(inla_onset$summary.random)
inla_onset$marginals.random
install.packages("remotes")
remotes::install_github("julianfaraway/brinla")
library(brinla)
# bri.hyperpar.plot(inla_onset, F)
# bri.lmresid.plot(inla_onset)
inla_onset$marginals.fitted.values
invsqrt <- function(x) 1/sqrt(x)
invsqrt(inla_onset$summary.hyperpar[, -2]) # SD of random terms
# bri.hyperpar.summary(inla_onset)
inla_onset$marginals.random # species level random term
# bri.random.plot(inla_onset)
#
# PGLMM OFFSET #
pm_offset <- pglmm(offset ~ prec + temp_seas + seas + flights + immature.habitat +
(1 | id_cells) + (1 | scientificName__) +
(0 + prec | scientificName__) +
(0 + temp_seas | scientificName__) +
prec:seas +
temp_seas:seas +
temp_seas:immature.habitat,
data = model_df5,
cov_ranef = list(scientificName = insect_tree_bl),
bayes = TRUE)
ranef(pm_offset)
fixef(pm_offset) %>% knitr::kable()
# PGLMM Duration #
pm_dur <- pglmm(duration ~ temp + prec + temp_seas +
seas + diapause.stage + flights + immature.habitat + larval.diet +
(1 | id_cells) + (1 | scientificName__) +
(0 + temp | scientificName__) +
(0 + prec | scientificName__) +
(0 + temp_seas | scientificName__) +
temp_seas:seas +
temp_seas:immature.habitat +
temp:flights +
temp:immature.habitat +
prec:larval.diet +
prec:diapause.stage,
data = model_df5,
cov_ranef = list(scientificName = insect_tree_bl),
bayes = TRUE)
ranef(pm_dur)
fixef(pm_dur) %>% knitr::kable()
| /scripts/08_trait_analyses2.R | no_license | mbelitz/insect-pheno-duration | R | false | false | 30,047 | r | library(tidyverse)
library(lme4)
library(lmerTest)
library(sjPlot)
## read in spp list from google drive
spp_traits <- read.csv('data/traits/insect_traits.csv', stringsAsFactors = FALSE) %>%
na_if("")
## read in phenesse outputs
model_df <- read.csv(file = "data/model_dfs/duration_climate_population_data.csv",
stringsAsFactors = FALSE)
id_cells <- model_df %>%
group_by(lon, lat) %>%
summarise(count = n()) %>%
tibble::rownames_to_column() %>%
rename(id_cells = rowname)
model_df <- left_join(model_df, id_cells)
model_df2 <- model_df %>%
na.omit() %>%
mutate(temp = scale(temp),
prec = scale(prec),
pop = scale(log10(pop)),
prec_seas = scale(bio15),
temp_seas = scale(bio4))
datadens <- model_df2 %>%
group_by(scientificName, Order) %>%
summarise(count = n())
has_10_cells <- filter(datadens, count >= 10) %>%
filter (scientificName != "Apis mellifera") # 145
model_df2 <- filter(model_df2, scientificName %in% has_10_cells$scientificName)
unique(model_df2$scientificName) %>% length() # 217 spp
## remove migratory species
mig_spp <- c("Anax junius", "Pantala flavenscens", "Pantala hymenaea", "Tramea lacerata",
"Sympetrum corruptum", "Sympetrum vicinum", "Libellula pulchella", "Libellula vibrans",
"Tramea lacerata", "Tramea onusta", "Pantala flavescens", "Libellula quadrimaculata",
"Ertythrodiplax umbrata", "Epiaeschna heros", "Tramea carolina",
"Libellula semifasciata", "Pantala hymenaea",
"Spoladea recurvalis", "Ponoquina ocola", "Plutella xylostella",
"Chrysodeixis includens", "Phoebis sennae", "Abaeis nicippe",
"Libytheana carinenta", "Agraulis vanillae", "Junonia coenia",
"Danaus plexippus", "Vanessa virginiensis", "Vanessa cardui",
"Vanessa atalanta", "Danaus gilippus", "Nymphalis antiopa",
"Polygonia interrogationis", "Lerema accius")
model_df2 <- filter(model_df2, !scientificName %in% mig_spp)
unique(model_df2$scientificName) %>% length() # left with 194 spp
# make season trait
spp_seas <- model_df2 %>%
group_by(scientificName) %>%
summarise(ave_on = mean(onset))
hist(spp_seas$ave_on)
spp_seas2 <- spp_seas %>%
mutate(seas = case_when(ave_on <= 125 ~ "Spring",
ave_on > 125 & ave_on <= 175 ~ "Summer",
ave_on > 175 ~ "Fall"))
# combine traits and results
model_df3 <- left_join(model_df2, spp_traits)
model_df3 <- left_join(model_df3, spp_seas2) %>%
select(-ave_on)
# Onset model
mon <- lmer(onset ~ temp + prec + prec_seas + temp_seas + temp:prec +
(1|id_cells) + (1|scientificName) +
(0 + temp | scientificName) +
(0 + prec | scientificName) +
(0 + temp_seas | scientificName) +
(0 + prec_seas | scientificName) +
(0 + temp:prec | scientificName),
data = model_df3, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
mon2 <- lmer(onset ~ temp + prec + prec_seas + temp_seas +
(1|id_cells) + (1|scientificName) +
(0 + temp | scientificName) +
(0 + prec | scientificName) +
(0 + temp_seas | scientificName) +
(0 + prec_seas | scientificName),
data = model_df3, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
car::vif(mon)
car::vif(mon2)
MuMIn::Weights(AIC(mon, mon2))
step(mon)
#Model found:
# onset ~ temp + prec + temp_seas + (1 | id_cells) + (1 | scientificName) +
# (0 + temp | scientificName) + (0 + prec | scientificName) +
# (0 + temp_seas | scientificName) + (0 + prec_seas | scientificName) +
# (0 + temp:prec | scientificName) + temp:prec
## final model
m_on_final <- lmer(onset ~ temp + prec + temp_seas +
(1|id_cells) + (1|scientificName) +
(0 + temp | scientificName) +
(0 + prec | scientificName) +
(0 + temp:prec | scientificName) +
temp:prec,
data = model_df3, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
summary(m_on_final)
car::vif(m_on_final)
suppressWarnings(MuMIn::r.squaredGLMM(m_on_final))
plot_model(m_on_final, type = "pred", terms = c("temp", "prec"))
### Extract Random term estimates
on_coef <- coef(m_on_final)$scientificName %>%
tibble::rownames_to_column("scientificName") %>%
rename(intercept_ave_onset = "(Intercept)") %>%
left_join(spp_traits, by = "scientificName") %>%
left_join(spp_seas2, by = "scientificName")
# clean on_coef
on_coef <- on_coef %>%
na.omit %>%
filter(diapause.stage != "None")
pivot_longer(on_coef, cols = c("higher_taxon", "larval.diet", "flights",
"immature.habitat", "development",
"diapause.stage", "seas")) %>%
ggplot(aes(x = value, y = intercept_ave_onset)) +
geom_boxplot() + geom_jitter() +
facet_wrap(~name, scales = "free")
pivot_longer(on_coef, cols = c("higher_taxon", "larval.diet", "flights",
"immature.habitat", "development",
"diapause.stage", "seas")) %>%
ggplot(aes(x = value, y = temp)) +
geom_boxplot() + geom_jitter() +
facet_wrap(~name, scales = "free") +
labs(y = "Sensentivity to temp")
pivot_longer(on_coef, cols = c("higher_taxon", "larval.diet", "flights",
"immature.habitat", "development",
"diapause.stage", "seas")) %>%
ggplot(aes(x = value, y = prec)) +
geom_boxplot() + geom_jitter() +
facet_wrap(~name, scales = "free") +
labs(y = "Sensentivity to prec")
pivot_longer(on_coef, cols = c("higher_taxon", "larval.diet", "flights",
"immature.habitat", "development",
"diapause.stage", "seas")) %>%
ggplot(aes(x = value, y = temp_seas)) +
geom_boxplot() + geom_jitter() +
facet_wrap(~name, scales = "free") +
labs(y = "Sensentivity to temp_seas")
######## Add traits to model
model_df4 <- na.omit(model_df3) %>%
filter(diapause.stage != "None")
m_on_traits <- lmer(onset ~ temp + prec + temp_seas + temp:prec +
development + temp:development + prec:development + temp_seas:development +
diapause.stage + temp:diapause.stage + prec:diapause.stage + temp_seas:diapause.stage +
flights + temp:flights + prec:flights + temp_seas:flights +
immature.habitat + temp:immature.habitat + prec:immature.habitat + temp_seas:immature.habitat +
larval.diet + temp:larval.diet + prec:larval.diet + temp_seas:larval.diet +
(1|id_cells) + (1|scientificName) +
(0 + temp | scientificName) +
(0 + prec | scientificName) +
(0 + temp_seas | scientificName) +
(0 + temp:prec | scientificName),
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
m_on_traits_s = step(m_on_traits, reduce.random = T)
m_on_traits_s
# Model found:
# onset ~ temp + prec + temp_seas + diapause.stage + immature.habitat +
# (1 | id_cells) + (1 | scientificName) + (0 + temp | scientificName) +
# (0 + prec | scientificName) + (0 + temp_seas | scientificName) +
# (0 + temp:prec | scientificName) + temp:prec + temp_seas:diapause.stage +
# temp:immature.habitat + prec:immature.habitat
m_on_traits_final <- lmer(onset ~ temp + prec + temp_seas + temp:prec +
diapause.stage + immature.habitat +
(1 | id_cells) + (1 | scientificName) +
(0 + temp | scientificName) +
(0 + prec | scientificName) +
(0 + temp_seas | scientificName) +
(0 + temp:prec | scientificName) +
temp_seas:diapause.stage +
temp:flights +
temp:immature.habitat +
prec:immature.habitat,
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
# First check vifs to see if final model will stand
car::vif(m_on_traits_final) ## Generalized Collinearity Diagnostics (Fox and Monette 1992)
# looks good so now make sure we have the top model & no more interactions are needed
m_on_traits_s2 = step(m_on_traits_final, reduce.random = T)
m_on_traits_s2 # looks like temp:flights is not needed interaction remove and test again
m_on_traits_final <- lmer(onset ~ temp + prec + temp_seas + temp:prec +
diapause.stage + immature.habitat +
(1 | id_cells) + (1 | scientificName) +
(0 + temp | scientificName) +
(0 + prec | scientificName) +
(0 + temp_seas | scientificName) +
(0 + temp:prec | scientificName) +
temp_seas:diapause.stage +
temp:immature.habitat +
prec:immature.habitat,
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
m_on_traits_s3 = step(m_on_traits_final, reduce.random = T)
m_on_traits_s3 # looks like we found the top model
summary(m_on_traits_final)
car::Anova(m_on_traits_final) # temp_seas:diapuase stage & temp:diapause stage are sig interactions
car::vif(m_on_traits_final) ## Generalized Collinearity Diagnostics (Fox and Monette 1992)
MuMIn::r.squaredGLMM(m_on_traits_final)
plot_model(m_on_traits_final, type = "pred", terms = c("temp_seas", "diapause.stage"), ci.lvl = NA)
plot_model(m_on_traits_final, type = "pred", terms = c("temp", "immature.habitat"), ci.lvl = NA)
plot_model(m_on_traits_final, type = "pred", terms = c("prec", "immature.habitat"), ci.lvl = NA)
## Explore traits for offset data
m_off <- lmer(offset ~ temp + prec + temp_seas + prec_seas + temp:prec +
(1|id_cells) + (1|scientificName) +
(0 + temp | scientificName) +
(0 + prec | scientificName) +
(0 + prec_seas | scientificName) +
(0 + temp_seas | scientificName) +
(0 + temp:prec | scientificName),
data = model_df3, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
summary(m_off)
m_off_s <- step(m_off)
m_off_s
#Model found:
# offset ~ prec + temp_seas + (1 | id_cells) + (1 | scientificName) +
# (0 + temp | scientificName) + (0 + prec_seas | scientificName) +
# (0 + temp_seas | scientificName) + (0 + temp:prec | scientificName)
# Final model without traits!
m_off_final <-lmer(offset ~ prec + temp_seas +
(1|id_cells) + (1|scientificName) +
(0 + prec | scientificName) +
(0 + temp_seas | scientificName),
data = model_df3, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
summary(m_off_final)
car::vif(m_off_final)
suppressWarnings(MuMIn::r.squaredGLMM(m_off_final))
### Extract Random term estimates
off_coef <- coef(m_off_final)$scientificName %>%
rownames_to_column("scientificName") %>%
rename(intercept_ave_offset = "(Intercept)") %>%
left_join(spp_traits, by = "scientificName") %>%
left_join(spp_seas2, by = "scientificName")
# clean off_coef
off_coef <- off_coef %>%
na.omit %>%
filter(diapause.stage != "None")
pivot_longer(off_coef, cols = c("higher_taxon", "larval.diet", "flights",
"immature.habitat", "development",
"diapause.stage", "seas")) %>%
ggplot(aes(x = value, y = intercept_ave_offset)) +
geom_boxplot() + geom_jitter() +
facet_wrap(~name, scales = "free")
pivot_longer(off_coef, cols = c("higher_taxon", "larval.diet", "flights",
"immature.habitat", "development",
"diapause.stage", "seas")) %>%
ggplot(aes(x = value, y = prec)) +
geom_boxplot() + geom_jitter() +
facet_wrap(~name, scales = "free") +
labs(y = "Sensentivity to prec")
pivot_longer(off_coef, cols = c("higher_taxon", "larval.diet", "flights",
"immature.habitat", "development",
"diapause.stage", "seas")) %>%
ggplot(aes(x = value, y = temp_seas)) +
geom_boxplot() + geom_jitter() +
facet_wrap(~name, scales = "free") +
labs(y = "Sensentivity to temp_seas")
######## Add traits to model
m_off_traits <- lmer(offset ~ prec + temp_seas +
seas + prec:seas + temp_seas:seas +
development + prec:development + temp_seas:development +
diapause.stage + prec:diapause.stage + temp_seas:diapause.stage +
flights + prec:flights + temp_seas:flights +
immature.habitat + prec:immature.habitat + temp_seas:immature.habitat +
larval.diet + prec:larval.diet + temp_seas:larval.diet +
(1|id_cells) + (1|scientificName) +
(0 + prec | scientificName) +
(0 + temp_seas | scientificName),
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
# stepwise regression to select model
m_off_traits_s = step(m_off_traits, reduce.random = T)
m_off_traits_s
# Model found:
# offset ~ prec + temp_seas + development + diapause.stage + flights +
# immature.habitat + larval.diet + (1 | id_cells) + (1 | scientificName) +
# (0 + prec | scientificName) + (0 + temp_seas | scientificName) +
# temp_seas:development + prec:diapause.stage + prec:immature.habitat +
# temp_seas:immature.habitat + prec:larval.diet
# Final offset model with traits
m_off_traits_final <- lmer(offset ~ prec + temp_seas +
seas + development + flights + immature.habitat +
(1|id_cells) + (1|scientificName) +
(0 + temp_seas | scientificName) +
(0 + prec | scientificName) +
prec:seas +
temp_seas:seas +
temp_seas:development +
prec:immature.habitat +
temp_seas:immature.habitat,
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
#first see if VIFs are okay
car::vif(m_off_traits_final) # they are not. temp_seas:development is inflated, so removed
# Final offset model with traits
m_off_traits_final <- lmer(offset ~ prec + temp_seas +
seas + development + flights + immature.habitat +
(1|id_cells) + (1|scientificName) +
(0 + temp_seas | scientificName) +
(0 + prec | scientificName) +
prec:seas +
temp_seas:seas +
prec:immature.habitat +
temp_seas:immature.habitat,
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
# check vifs again
car::vif(m_off_traits_final) # we're good. Double check best model
# stepwise regression to select model
m_off_traits_s2 = step(m_off_traits_final, reduce.random = T)
m_off_traits_s2
#Model found:
# offset ~ prec + temp_seas + seas + flights + immature.habitat +
# (1 | id_cells) + (1 | scientificName) +
# (0 + temp_seas | scientificName) + (0 + prec | scientificName) + prec:seas +
# temp_seas:seas + temp_seas:immature.habitat
m_off_traits_final <- lmer(offset ~ prec + temp_seas +
seas + flights + immature.habitat +
(1|id_cells) + (1|scientificName) +
(0 + temp_seas | scientificName) +
(0 + prec | scientificName) +
prec:seas +
temp_seas:seas +
temp_seas:immature.habitat,
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
# check vifs again
car::vif(m_off_traits_final)
summary(m_off_traits_final)
car::vif(m_off_traits_final)
car::Anova(m_off_traits_final)
MuMIn::r.squaredGLMM(m_off_traits_final)
plot_model(m_off_traits_final, type = "pred", terms = c("temp_seas", "seas"), ci.lvl = NA)
plot_model(m_off_traits_final, type = "pred", terms = c("temp_seas", "immature.habitat"), ci.lvl = NA)
plot_model(m_off_traits_final, type = "pred", terms = c("prec", "seas"),ci.lvl = NA)
############ Duration
m_dur <- lmer(duration ~ temp + prec + temp_seas + prec_seas + temp:prec +
(1|id_cells) + (1|scientificName) +
(0 + temp | scientificName) +
(0 + prec | scientificName) +
(0 + prec_seas | scientificName) +
(0 + temp_seas | scientificName) +
(0 + temp:prec | scientificName),
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
m_dur_s <- step(m_dur)
m_dur_s
# Model found:
# duration ~ temp + prec + temp_seas + (1 | id_cells) + (1 | scientificName) +
# (0 + temp | scientificName) + (0 + prec_seas | scientificName) +
# (0 + temp_seas | scientificName) + (0 + temp:prec | scientificName) +
# temp:prec
## Final Duration Model No Traits ##
m_dur_final <- lmer(duration ~ temp + prec + temp_seas + temp:prec +
(1|id_cells) + (1|scientificName) +
(0 + temp | scientificName) +
(0 + prec | scientificName) +
(0 + temp_seas | scientificName) +
(0 + temp:prec | scientificName),
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
summary(m_dur_final)
car::vif(m_dur_final)
suppressWarnings(MuMIn::r.squaredGLMM(m_dur_final))
### Extract Random term estimates
dur_coef <- coef(m_dur_final)$scientificName %>%
rownames_to_column("scientificName") %>%
rename(intercept_ave_duration = "(Intercept)") %>%
left_join(spp_traits, by = "scientificName") %>%
left_join(spp_seas2, by = "scientificName")
pivot_longer(dur_coef, cols = c("higher_taxon", "larval.diet", "flights",
"immature.habitat", "development",
"diapause.stage", "seas")) %>%
ggplot(aes(x = value, y = intercept_ave_duration)) +
geom_boxplot() + geom_jitter() +
facet_wrap(~name, scales = "free")
## Add traits to duration model
m_dur_traits <- lmer(duration ~ temp + prec + temp_seas + temp:prec +
seas + prec:seas + temp_seas:seas + temp:seas +
development + prec:development + temp_seas:development + temp:development +
diapause.stage + prec:diapause.stage + temp_seas:diapause.stage + temp:diapause.stage +
flights + prec:flights + temp_seas:flights + temp:flights +
immature.habitat + prec:immature.habitat + temp_seas:immature.habitat + temp:immature.habitat +
larval.diet + prec:larval.diet + temp_seas:larval.diet + temp:larval.diet +
(1|id_cells) + (1|scientificName) +
(0 + prec | scientificName) +
(0 + temp | scientificName) +
(0 + temp_seas | scientificName) +
(0 + temp:prec | scientificName),
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
m_dur_traits_s = step(m_dur_traits, reduce.random = T)
m_dur_traits_s
m_dur_traits_final <- lmer(duration ~ temp + prec + temp_seas +
seas + development + diapause.stage + flights + immature.habitat + larval.diet +
(1 | id_cells) + (1 | scientificName) +
(0 + prec | scientificName) +
(0 + temp | scientificName) +
(0 + temp_seas | scientificName) +
(0 + temp:prec | scientificName) +
temp_seas:seas + temp:development +
prec:diapause.stage + temp:flights +
temp_seas:immature.habitat + temp:immature.habitat +
prec:larval.diet,
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
#check vifs
car::vif(m_dur_traits_final) # temp:development is inflated remove
m_dur_traits_final <- lmer(duration ~ temp + prec + temp_seas +
seas + development + diapause.stage + flights + immature.habitat + larval.diet +
(1 | id_cells) + (1 | scientificName) +
(0 + prec | scientificName) +
(0 + temp | scientificName) +
(0 + temp_seas | scientificName) +
(0 + temp:prec | scientificName) +
temp_seas:seas +
prec:diapause.stage + temp:flights +
temp_seas:immature.habitat + temp:immature.habitat +
prec:larval.diet,
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
#check vifs again
car::vif(m_dur_traits_final) # development is inflated remove
m_dur_traits_final <- lmer(duration ~ temp + prec + temp_seas +
seas + diapause.stage + flights + immature.habitat + larval.diet +
(1 | id_cells) + (1 | scientificName) +
(0 + prec | scientificName) +
(0 + temp | scientificName) +
(0 + temp_seas | scientificName) +
temp_seas:seas +
prec:diapause.stage + temp:flights +
temp_seas:immature.habitat + temp:immature.habitat +
prec:larval.diet,
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
#check vifs again
car::vif(m_dur_traits_final) # good now
## see if final model remains the same
m_dur_traits_s2 = step(m_dur_traits_final, reduce.random = T)
m_dur_traits_s2
#Model found:
# duration ~ temp + prec + temp_seas + seas + diapause.stage +
# flights + immature.habitat + larval.diet +
# (1 | id_cells) + (1 | scientificName) +
# (0 + prec | scientificName) + (0 + temp | scientificName) + (0 + temp_seas | scientificName) +
# temp_seas:seas + prec:diapause.stage + temp:flights + temp_seas:immature.habitat +
# temp:immature.habitat + prec:larval.diet
m_dur_traits_final <- lmer(duration ~ temp + prec + temp_seas +
seas + diapause.stage + flights + immature.habitat + larval.diet +
(1 | id_cells) + (1 | scientificName) +
(0 + temp | scientificName) +
(0 + temp_seas | scientificName) +
(0 + prec | scientificName) +
temp_seas:seas + temp_seas:immature.habitat +
temp:flights + temp:immature.habitat +
prec:larval.diet + prec:diapause.stage,
data = model_df4, REML = FALSE,
lmerControl(optimizer = "bobyqa"))
summary(m_dur_traits_final)
car::vif(m_dur_traits_final)
car::Anova(m_dur_traits_final)
MuMIn::r.squaredGLMM(m_dur_traits_final)
plot_model(m_dur_traits_final, type = "pred", terms = c("temp_seas", "seas"), ci.lvl = NA)
plot_model(m_dur_traits_final, type = "pred", terms = c("temp_seas", "immature.habitat"), ci.lvl = NA)
plot_model(m_dur_traits_final, type = "pred", terms = c("temp", "flights"),ci.lvl = NA)
plot_model(m_dur_traits_final, type = "pred", terms = c("temp", "immature.habitat"),ci.lvl = NA)
plot_model(m_dur_traits_final, type = "pred", terms = c("prec", "diapause.stage"), ci.lvl = NA)
plot_model(m_dur_traits_final, type = "pred", terms = c("prec", "larval.diet"),ci.lvl = NA)
### NOW ADD PHYLOGENY ########
## try the ROTL package
library(rotl)
nmz <- tnrs_match_names(names = unique(model_df4$scientificName), context_name = "Animals")
nmz2 <- filter(nmz, ott_id != 7146697)
insect_tree <- tol_induced_subtree(ott_ids = nmz2$ott_id)
insect_tree$tip.label <- word(insect_tree$tip.label, start = 1, end = 2, sep = "_") %>%
sub(pattern = "_", replacement = " ")
plot(insect_tree, type = "fan")
plot(insect_tree)
insect_tree_bl <- ape::compute.brlen(insect_tree)
insect_tree_bl
on_coef
physig_col = function(inte){
col_to = names(inte)[-1]
out = vector("list", length = length(col_to))
names(out) = col_to
for(i in col_to){
x = inte[[i]]
names(x) = inte$scientificName
xp1 = phytools::phylosig(insect_tree_bl, x, method = "K", test = T)
xp2 = phytools::phylosig(insect_tree_bl, x, method = "lambda", test = T)
out[[i]] = tibble::tibble(statistic = c(xp1$K, xp2$lambda),
P = c(ifelse(xp1$P < 0.5, xp1$P, 1 - xp1$P), xp2$P),
test = c("K", "Lambda"))
}
bind_rows(out, .id = "terms")
}
inte = left_join(dplyr::select(on_coef, scientificName, intercept_ave_onset),
dplyr::select(off_coef, scientificName, intercept_ave_offset)) %>%
left_join(dplyr::select(dur_coef, scientificName, intercept_ave_duration))
physig_intercept = physig_col(inte) %>%
arrange(test) %>%
mutate(statistic = round(statistic, 4),
P = round(P, 3))
physig_intercept
physig_slopes = left_join(dplyr::select(on_coef, scientificName, temp_onset = temp, prec_onset = prec,
temp_seas_onset = temp_seas, tempprecint_onset = temp:prec),
dplyr::select(off_coef, scientificName, prec_offset = prec, temp_seas_offset = temp_seas,
)) %>%
left_join(dplyr::select(dur_coef, scientificName, temp_dur = temp, prec_dur = prec,
temp_seas_dur = temp_seas, tempprecint_dur = temp:prec,
)) %>%
physig_col() %>%
arrange(test) %>%
mutate(statistic = round(statistic, 4),
P = round(P, 3))
as.data.frame(physig_slopes)
####### PGLMM
# onset
library(phyr)
library(INLA)
# Burnsius communis not in phylogeny, remove
model_df5 <- filter(model_df4, scientificName %in% insect_tree_bl$tip.label)
a <- insect_tree_bl$tip.label %in% model_df5$scientificName
a[140]
a[32]
insect_tree_bl$tip.label[32]
insect_tree_bl$tip.label[140]
insect_tree_bl2 <- ape::drop.tip(insect_tree_bl, c("Enodia anthedon", "Aeshna cyanea")) # = (C:1,(D:1,E:1):1);
pm_onset <- pglmm(onset ~ temp + prec + temp_seas + temp:prec +
diapause.stage + immature.habitat +
(1 | id_cells) + (1 | scientificName__) +
(0 + temp | scientificName__) +
(0 + prec | scientificName__) +
(0 + temp_seas | scientificName__) +
temp_seas:diapause.stage +
temp:immature.habitat +
prec:immature.habitat,
data = model_df5,
cov_ranef = list(scientificName = insect_tree_bl),
bayes = TRUE)
ranef(pm_onset)
fixef(pm_onset) %>% knitr::kable()
inla_onset = pm_onset$inla.model
summary(inla_onset)
inla_onset$summary.fixed # kld are small, good
plot(inla_onset$marginals.fixed$`(Intercept)`)
plot(inla_onset$marginals.fixed$temp)
plot(inla_onset$marginals.fixed$`temp:prec`)
#
names(inla_onset$marginals.fixed)
names(inla_onset$marginals.hyperpar)
length(inla_onset$summary.random)
inla_onset$marginals.random
install.packages("remotes")
remotes::install_github("julianfaraway/brinla")
library(brinla)
# bri.hyperpar.plot(inla_onset, F)
# bri.lmresid.plot(inla_onset)
inla_onset$marginals.fitted.values
invsqrt <- function(x) 1/sqrt(x)
invsqrt(inla_onset$summary.hyperpar[, -2]) # SD of random terms
# bri.hyperpar.summary(inla_onset)
inla_onset$marginals.random # species level random term
# bri.random.plot(inla_onset)
#
# PGLMM OFFSET #
pm_offset <- pglmm(offset ~ prec + temp_seas + seas + flights + immature.habitat +
(1 | id_cells) + (1 | scientificName__) +
(0 + prec | scientificName__) +
(0 + temp_seas | scientificName__) +
prec:seas +
temp_seas:seas +
temp_seas:immature.habitat,
data = model_df5,
cov_ranef = list(scientificName = insect_tree_bl),
bayes = TRUE)
ranef(pm_offset)
fixef(pm_offset) %>% knitr::kable()
# PGLMM Duration #
pm_dur <- pglmm(duration ~ temp + prec + temp_seas +
seas + diapause.stage + flights + immature.habitat + larval.diet +
(1 | id_cells) + (1 | scientificName__) +
(0 + temp | scientificName__) +
(0 + prec | scientificName__) +
(0 + temp_seas | scientificName__) +
temp_seas:seas +
temp_seas:immature.habitat +
temp:flights +
temp:immature.habitat +
prec:larval.diet +
prec:diapause.stage,
data = model_df5,
cov_ranef = list(scientificName = insect_tree_bl),
bayes = TRUE)
ranef(pm_dur)
fixef(pm_dur) %>% knitr::kable()
|
fjComm::clear_()
df=read_tsv("K562_merged_rep1_20.6U_79.2U_304U_MNase.sorted.rmdup.autosomes.over140fragments.fragment_coverage.All_motifs_overlap_K562_ChIP.autosomes_top500.v2.200bp_flank_from_center.average_per_TF.txt")
df %<>% dplyr::filter(TF %in% qw("YBX1 HMBOX1 PKNOX1 ATF2 ELF1 ATF3")) %>% set_colnames(qw("TF pos cvg sites"))
p=ggplot(df)+geom_line(aes(pos-199,cvg))+facet_wrap(~TF,2,3,scales = "free_y")+
scale_x_continuous(limits = c(-200,200),expand = c(0,0))+xlab("(bp)")+ylab("MNase-seq coverage")+gg_theme_Publication()
print(p)
gg_save_pdf(p,14,6,filename = "corr_examples_not_used")
| /!ADD_Nat_revise1/6_ENCODE_corr/MNase_cvg_v3/examples.R | no_license | aquaflakes/individual_analyses | R | false | false | 605 | r | fjComm::clear_()
df=read_tsv("K562_merged_rep1_20.6U_79.2U_304U_MNase.sorted.rmdup.autosomes.over140fragments.fragment_coverage.All_motifs_overlap_K562_ChIP.autosomes_top500.v2.200bp_flank_from_center.average_per_TF.txt")
df %<>% dplyr::filter(TF %in% qw("YBX1 HMBOX1 PKNOX1 ATF2 ELF1 ATF3")) %>% set_colnames(qw("TF pos cvg sites"))
p=ggplot(df)+geom_line(aes(pos-199,cvg))+facet_wrap(~TF,2,3,scales = "free_y")+
scale_x_continuous(limits = c(-200,200),expand = c(0,0))+xlab("(bp)")+ylab("MNase-seq coverage")+gg_theme_Publication()
print(p)
gg_save_pdf(p,14,6,filename = "corr_examples_not_used")
|
#' @title plot hits as a xy dotplot
#' @description
#' \code{plot_hits} routines to make visually appealing dotplots
#' @name plot_hits
#'
#' @param gsParam A list of genespace parameters. This should be created
#' by init_genespace.
#' @param type character string of "all", "raw", or "syntenic" specifying which
#' type of dotplot to generate.
#' @param verbose logical, should updates be printed to the console?
#' @param hits data.table containg hits. See read_allBlast.
#' @param outDir file.path where pdf should be written
#' @param minGenes2plot integer specifying the minimum number of genes that can
#' be plotted
#' @param appendName character with text to append to the file name
#' @param dotsPerIn integer specifying how fine-scaled the heatmap is
#' @param quantileThresh integer specifying the top quantile to be thresholded
#' @param plotSize numeric smalled dimension of the plot
#' @param minScore numeric the minimum score to be permitted in the first plot
#' @param maxFacets integer the maximum number of facets to plot (doesn't plot
#' for genomes with lots of small chrs/scaffolds)
#' @param colorByBlks logical, should blocks be colored?
#' @param alpha numeric [0-1], specifying the transparency of the points
#' @param useOrder logical, should gene order or bp position be used?
#' @param minScore numeric, the minimum scoring hit to plot
#' @param minGenes2plot integer, the minimum number of hits to plot a chromosome
#' combination.
#'
#' \cr
#' If called, \code{plot_hits} returns its own arguments.
#'
#' @details Dotplots here aggregate across proximate positions to reduce file
#' size, especially for very large genomes. XY positions are always in gene-rank
#' order positions. Graphics are built with ggplot2.
#' @title Plot syntenic hits
#' @description
#' \code{plot_hits} The pipeline to plot syntenic hits in parallel
#' @rdname plot_hits
#' @import data.table
#' @import R.utils
#' @importFrom dbscan dbscan frNN
#' @importFrom parallel mclapply
#' @export
plot_hits <- function(gsParam,
verbose = TRUE,
type,
dotsPerIn = 256,
quantileThresh = .5,
plotSize = 12,
minScore = 50){
##############################################################################
# 1. setup
# -- 1.1 get env vars set up
query <- target <- lab <- nRegionHits <- nRegions <- nAnchorHits <- nBlks <-
nSVs <- selfOnly <- queryPloidy <- targetPloidy <- nGlobOgHits <- synHits <-
nTotalHits <- chunk <- inBuffer <- NULL
if(!"synteny" %in% names(gsParam))
gsParam <- set_syntenyParams(gsParam)
nCores <- gsParam$params$nCores
# -- 1.2 check that files are all ok
if(!"synteny" %in% names(gsParam))
stop("must run set_syntenyParams prior to synteny")
if(!all(file.exists(gsParam$synteny$blast$allBlast)) && type %in% c("all", "raw"))
stop("some annotated blast files dont exist, run annotate_blast() first\n")
if(!all(file.exists(gsParam$synteny$blast$synHits)) && type %in% c("all", "syntenic"))
stop("some syntenic blast files dont exist, run synteny() first\n")
# -- 1.1 split the metadata into chunks
blMd <- data.table(gsParam$synteny$blast)
blMd[,lab := align_charLeft(sprintf("%s v. %s:", query, target))]
blMd[,selfOnly := query == target & queryPloidy == 1 & targetPloidy == 1]
if(!"nGlobOgHits" %in% colnames(blMd))
blMd[,nGlobOgHits := file.size(synHits)]
if(!"nTotalHits" %in% colnames(blMd))
blMd[,nTotalHits := file.size(synHits)]
setorder(blMd, selfOnly, -nGlobOgHits, -nTotalHits)
blMd[,chunk := rep(1:.N, each = nCores)[1:.N]]
synMdSpl <- split(blMd, by = "chunk")
##############################################################################
# -- 2. loop through each chunk
blMdOut <- lapply(1:length(synMdSpl), function(chnki){
chnk <- data.table(synMdSpl[[chnki]])
############################################################################
# -- loop through each row in each chunk
outChnk <- mclapply(1:nrow(chnk), mc.cores = nCores, function(i){
# -- 2.1 read in the metadata and hits
outMd <- data.table(chnk[i,])
x <- data.table(outMd)
rawHits <- read_allBlast(x$allBlast)
l1 <- mean(table(rawHits$chr1[rawHits$sameOG]))/10
l2 <- mean(table(rawHits$chr2[rawHits$sameOG]))/10
minGenes = max(min(c(l1, l2)), 5)
dps <- gsParam$params$dotplots
if(dps == "check"){
nchr1 <- uniqueN(rawHits$chr1)
nchr2 <- uniqueN(rawHits$chr2)
ggdotplot(
hits = data.table(rawHits),
outDir = gsParam$paths$dotplots,
minGenes2plot = minGenes,
maxFacets = 10000,
type = type,
dotsPerIn = dotsPerIn,
quantileThresh = quantileThresh,
plotSize = plotSize,
minScore = minScore)
}else{
if(dps == "always"){
ggdotplot(
hits = data.table(rawHits),
outDir = gsParam$paths$dotplots,
minGenes2plot = minGenes,
maxFacets = Inf,
type = type,
dotsPerIn = dotsPerIn,
quantileThresh = quantileThresh,
plotSize = plotSize,
minScore = minScore)
}
}
})
})
return(gsParam)
}
#' @title make dotplots of syntenic hits
#' @description
#' \code{ggdotplot} ggplot2 integrated graphics to produce dotplots
#' @rdname plot_hits
#' @import data.table
#' @import ggplot2
#' @importFrom grDevices pdf dev.off rgb
#' @importFrom dbscan dbscan frNN
#' @export
ggdotplot <- function(hits,
type,
outDir = NULL,
minGenes2plot = 100,
appendName = "synHits",
dotsPerIn = 256,
quantileThresh = .5,
plotSize = 12,
minScore = 50,
maxFacets = 10000,
verbose = is.null(outDir)){
ofID1 <- ofID2 <- sameOg <- ngene1 <- ngene2 <- ord1 <- ord2 <- blkID <-
inBuffer <- rnd2 <- rnd1 <- n <- isArrayRep2 <- isArrayRep1 <- chr1 <-
noAnchor <- bitScore <- quantile <- chr2 <- sameOG <- isAnchor <- NULL
##############################################################################
# 1. Get the plot size figured out
tp <- data.table(hits)
un1 <- uniqueN(tp$ofID1)
un2 <- uniqueN(tp$ofID2)
if(un1 > un2){
ht <- plotSize
wd <- ht * (un1/un2)
}else{
wd <- plotSize
ht <- wd * (un2/un1)
}
x <- max(tp$ord1, na.rm = T)
y <- max(tp$ord2, na.rm = T)
ordPerIn <- x / dotsPerIn
totDots <- wd * dotsPerIn
xrnd2 <- floor(x / totDots)+1
ordPerIn <- y / dotsPerIn
totDots <- ht * dotsPerIn
yrnd2 <- floor(y / totDots)+1
tp[,`:=`(rnd1 = round_toInteger(ord1, xrnd2),
rnd2 = round_toInteger(ord2, yrnd2))]
tp <- subset(tp, complete.cases(tp[,c("rnd1", "rnd2", "chr1", "chr2")]))
##############################################################################
# 2. Make the plot with all hits, regardless of og
# -- 2.1 subset the hits to those with high enough score
makeCrappyDotplots <- list(p0 = NULL, p1 = NULL, p2 = NULL)
if(type %in% c("all", "raw")){
hc <- subset(tp, bitScore > minScore)
ng1 <- as.integer(uniqueN(hc$ofID1))
ng2 <- as.integer(uniqueN(hc$ofID2))
# -- 2.2 get axis labels
xlab <- sprintf(
"%s: gene rank order position (%s genes w/ blast hits), grids every 1000 genes",
hits$genome1[1], ng1)
ylab <- sprintf(
"%s: gene rank order position (%s genes w/ blast hits), grids every 1000 genes",
hits$genome2[1], ng2)
# -- 2.3 subset to chrs with enough genes on them
hc[,ngene1 := uniqueN(ofID1[!noAnchor & isArrayRep1], na.rm = T), by = "chr1"]
hc[,ngene2 := uniqueN(ofID2[!noAnchor & isArrayRep2], na.rm = T), by = "chr2"]
hc <- subset(hc, ngene1 > minGenes2plot & ngene2 > minGenes2plot)
# -- 2.4 count n hits in each aggregated position
hc <- hc[,c("chr1", "chr2", "rnd1", "rnd2")]
hc <- subset(hc, complete.cases(hc))
hc <- hc[,list(n = .N), by = c("chr1", "chr2", "rnd1", "rnd2")]
setorder(hc, -n)
hc <- subset(hc, !is.na(n))
# -- 2.5 threshold n to not highlight super strong regions
qthresh <- quantile(hc$n, quantileThresh)
if(qthresh > 20)
qthresh <- 20
if(qthresh < 5)
qthresh <- 5
hc$n[hc$n > qthresh] <- qthresh
# -- 2.6 get plot title
titlab <- sprintf(
"All blast hits with score > %s, %s/%s-gene x/y windows (heatmap range: 2-%s+ hits/window)",
minScore, xrnd2, yrnd2, round(qthresh))
# -- 2.7 make the plot
setorder(hc, n)
hc <- subset(hc, n > 1)
nfacets <- nrow(with(hc, expand.grid(unique(chr1), unique(chr2))))
if(nfacets < maxFacets){
chrOrd1 <- unique(tp$chr1[order(tp$rnd1)])
chrOrd2 <- unique(tp$chr2[order(tp$rnd2)])
hc[,`:=`(chr1 = factor(chr1, levels = chrOrd1),
chr2 = factor(chr2, levels = chrOrd2))]
p0 <- ggplot(hc, aes(rnd1, rnd2, col = n)) +
geom_point(pch = ".") +
scale_color_viridis_c(begin = .1, trans = "log10", guide = "none") +
scale_x_continuous(expand = c(0,0),
breaks = seq(from = 1e3, to = max(hc$rnd1), by = 1e3))+
scale_y_continuous(expand = c(0,0),
breaks = seq(from = 1e3, to = max(hc$rnd2), by = 1e3))+
theme_genespace()+
facet_grid(chr2 ~ chr1, scales = "free",
space = "free", as.table = F, switch = "both")+
labs(x = xlab, y = ylab, title = titlab)
}else{
p0 <- NULL
makeCrappyDotplots[["p0"]] <- subset(tp, bitScore > minScore)
}
##############################################################################
# 3. Make the plot with just OG hits
# -- 2.1 subset the hits to those with high enough score
hc <- subset(tp, sameOG)
ng1 <- as.integer(uniqueN(hc$ofID1))
ng2 <- as.integer(uniqueN(hc$ofID2))
# -- 2.2 get axis labels
xlab <- sprintf(
"%s: gene rank order position (%s genes w/ blast hits), grids every 1000 genes",
hits$genome1[1], ng1)
ylab <- sprintf(
"%s: gene rank order position (%s genes w/ blast hits), grids every 1000 genes",
hits$genome2[1], ng2)
# -- 2.3 subset to chrs with enough genes on them
hc[,ngene1 := uniqueN(ofID1[!noAnchor & isArrayRep1]), by = "chr1"]
hc[,ngene2 := uniqueN(ofID2[!noAnchor & isArrayRep2]), by = "chr2"]
hc <- subset(hc, ngene1 > minGenes2plot & ngene2 > minGenes2plot)
# -- 2.4 count n hits in each aggregated position
hc <- hc[,list(n = .N), by = c("chr1", "chr2", "rnd1", "rnd2")]
setorder(hc, -n)
hc <- subset(hc, !is.na(n))
# -- 2.5 threshold n to not highlight super strong regions
qthresh <- quantile(hc$n, quantileThresh)
if(qthresh > 20)
qthresh <- 20
if(qthresh < 5)
qthresh <- 5
hc$n[hc$n > qthresh] <- qthresh
# -- 2.6 get plot title
titlab <- sprintf(
"Blast hits where query and target are in the same orthogroup, %s/%s-gene x/y windows (heatmap range: 1-%s+ hits/window)",
xrnd2, yrnd2, round(qthresh))
# -- 2.7 make the plot
setorder(hc, n)
nfacets <- nrow(with(hc, expand.grid(unique(chr1), unique(chr2))))
if(nfacets < maxFacets){
chrOrd1 <- unique(tp$chr1[order(tp$rnd1)])
chrOrd2 <- unique(tp$chr2[order(tp$rnd2)])
hc[,`:=`(chr1 = factor(chr1, levels = chrOrd1),
chr2 = factor(chr2, levels = chrOrd2))]
p1 <- ggplot(hc, aes(rnd1, rnd2, col = n)) +
geom_point(pch = ".") +
scale_color_viridis_c(begin = .1, trans = "log10", guide = "none") +
scale_x_continuous(expand = c(0,0),
breaks = seq(from = 1e3, to = max(hc$rnd1), by = 1e3))+
scale_y_continuous(expand = c(0,0),
breaks = seq(from = 1e3, to = max(hc$rnd2), by = 1e3))+
theme_genespace()+
facet_grid(chr2 ~ chr1, scales = "free",
space = "free", as.table = F, switch = "both")+
labs(x = xlab, y = ylab, title = titlab)
}else{
p1 <- NULL
makeCrappyDotplots[["p1"]] <- subset(tp, sameOG)
}
}else{
p1 <- p0 <- NULL
}
if(type %in% c("all", "syntenic")){
##############################################################################
# 4. Make the plot with just anchors
hcBlk <- subset(tp, isAnchor)
hcBlk[,ngene1 := uniqueN(ofID1[!noAnchor & isArrayRep1]), by = "chr1"]
hcBlk[,ngene2 := uniqueN(ofID2[!noAnchor & isArrayRep2]), by = "chr2"]
hcBlk <- subset(hcBlk, ngene1 > minGenes2plot & ngene2 > minGenes2plot)
hcBlk <- hcBlk[,list(n = .N), by = c("chr1", "chr2", "rnd1", "rnd2", "blkID")]
blkCols <- sample(gs_colors(uniqueN(hcBlk$blkID)))
ng1 <- as.integer(uniqueN(hcBlk$ofID1))
ng2 <- as.integer(uniqueN(hcBlk$ofID2))
nfacets <- nrow(with(hcBlk, expand.grid(unique(chr1), unique(chr2))))
if(nfacets < maxFacets){
chrOrd1 <- unique(hcBlk$chr1[order(hcBlk$rnd1)])
chrOrd2 <- unique(hcBlk$chr2[order(hcBlk$rnd2)])
hcBlk[,`:=`(chr1 = factor(chr1, levels = chrOrd1),
chr2 = factor(chr2, levels = chrOrd2))]
hcBlk <- subset(hcBlk, !is.na(rnd1) & !is.na(rnd2))
if(nrow(hcBlk) < 1){
warning(sprintf("no syntenic hits found for %s vs. %s",
hits$genome1[1], hits$genome2[1]))
p2 <- NULL
}else{
p2 <- ggplot(hcBlk, aes(x = rnd1, y = rnd2, col = blkID)) +
geom_point(pch = ".") +
scale_color_manual(values = blkCols, guide = "none") +
scale_x_continuous(expand = c(0,0), breaks = seq(from = 1e3, to = max(hcBlk$rnd1), by = 1e3))+
scale_y_continuous(expand = c(0,0), breaks = seq(from = 1e3, to = max(hcBlk$rnd2), by = 1e3))+
theme_genespace() +
facet_grid(chr2 ~ chr1, scales = "free", space = "free", as.table = F, switch = "both")+
labs(x = sprintf("%s: gene rank order position (%s genes with blast hits), gridlines every 1000 genes",
hits$genome1[1], uniqueN(hits$ofID1[hits$isAnchor])),
y = sprintf("%s: gene rank order position (%s genes with blast hits), gridlines every 1000 genes",
hits$genome2[1], uniqueN(hits$ofID2[hits$isAnchor])),
title = sprintf("Syntenic anchor blast hits, colored by block ID"))
}
}else{
p2 <- NULL
makeCrappyDotplots[["p2"]] <- subset(tp, isAnchor)
}
}else{
p2 <- NULL
}
if(is.null(outDir)){
if(verbose)
cat("writing to the present graphics device")
if(!is.null(p0))
print(p0)
if(!is.null(p1))
print(p1)
if(!is.null(p2))
print(p2)
}else{
dpFile <- file.path(outDir,
sprintf("%s_vs_%s.%sHits.pdf",
tp$genome1[1], tp$genome2[1], type))
pdf(dpFile, height = ht, width = wd)
if(verbose)
cat(sprintf("writing to file: %s", dpFile))
if(!is.null(makeCrappyDotplots[["p0"]])){
with(makeCrappyDotplots[["p0"]], plot(
rnd1, rnd2, pch = ".",
xlab = "all gene rank order (genome1)",
ylab = "all gene rank order (genome2)",
main = "these genomes have too many chromosomes to plot nicely."))
}else{
if(!is.null(p0))
print(p0)
}
if(!is.null(makeCrappyDotplots[["p0"]])){
with(makeCrappyDotplots[["p0"]], plot(
rnd1, rnd2, pch = ".",
xlab = "orthogroup hit gene rank order (genome1)",
ylab = "orthogroup gene rank order (genome2)",
main = "these genomes have too many chromosomes to plot nicely."))
}else{
if(!is.null(p0))
print(p1)
}
if(!is.null(makeCrappyDotplots[["p2"]])){
with(makeCrappyDotplots[["p2"]], plot(
rnd1, rnd2, pch = ".",
xlab = "syntenic hit gene rank order (genome1)",
ylab = "syntenic hit gene rank order (genome2)",
main = "these genomes have too many chromosomes to plot nicely."))
}else{
if(!is.null(p2))
print(p2)
}
de <- dev.off()
}
}
#' @title simple dotplots from a hits data.table
#' @description
#' \code{gghits} ggplot2 integrated graphics to produce dotplots
#' @rdname plot_hits
#' @import data.table
#' @import ggplot2
#' @export
gghits <- function(hits,
colorByBlks = TRUE,
alpha = ifelse(colorByBlks, 1, .25),
useOrder = TRUE,
minScore = 0,
minGenes2plot = 0){
ofID1 <- ofID2 <- sameOg <- ngene1 <- ngene2 <- ord1 <- ord2 <- blkID <-
inBuffer <- rnd2 <- rnd1 <- n <- isArrayRep2 <- isArrayRep1 <- chr1 <-
noAnchor <- bitScore <- quantile <- chr2 <- sameOG <- isAnchor <-
start1 <- start2 <- x <- y <- NULL
tp <- data.table(hits)
if(colorByBlks){
hc <- subset(tp, !is.na(blkID) & isAnchor)
}else{
hc <- subset(tp, bitScore > minScore)
}
ng1 <- as.integer(uniqueN(hc$ofID1))
ng2 <- as.integer(uniqueN(hc$ofID2))
hc[,ngene1 := uniqueN(ofID1[!noAnchor & isArrayRep1], na.rm = T), by = "chr1"]
hc[,ngene2 := uniqueN(ofID2[!noAnchor & isArrayRep2], na.rm = T), by = "chr2"]
hc <- subset(hc, ngene1 > minGenes2plot & ngene2 > minGenes2plot)
if(useOrder){
hc[,`:=`(x = ord1, y = ord2)]
xlab <- "query gene rank order position"
ylab <- "target gene rank order position"
}else{
hc[,`:=`(x = start1/1e6, y = start2/1e6)]
xlab <- "query physical (Mb) gene position"
ylab <- "target physical (Mb) gene position"
}
if(!colorByBlks){
p <- ggplot(hc, aes(x = x, y = y)) +
geom_point(pch = ".", alpha = alpha) +
scale_x_continuous(expand = c(0,0), breaks = pretty(hc$x, n = 10))+
scale_y_continuous(expand = c(0,0), breaks = pretty(hc$y, n = 10))+
facet_grid(genome2 + chr2 ~ genome1 + chr1, scales = "free",
space = "free", as.table = F)+
labs(x = xlab, y = ylab)+
theme(panel.background = element_rect(fill = "black"),
panel.grid.minor = element_blank(),
panel.grid.major = element_line(
color = rgb(1, 1, 1, .2), size = .2, linetype = 2),
panel.spacing = unit(.1, "mm"),
axis.ticks = element_blank(),
strip.background = element_blank(),
axis.text = element_text(family = "Helvetica", size = 5),
axis.title = element_text(family = "Helvetica", size = 6),
plot.title = element_text(family = "Helvetica", size = 7))
}else{
blkCols <- sample(gs_colors(uniqueN(hc$blkID)))
p <- ggplot(hc, aes(x = x, y = y, col = blkID)) +
geom_point(pch = ".", alpha = alpha) +
scale_color_manual(values = blkCols, guide = "none") +
scale_x_continuous(expand = c(0,0), breaks = pretty(hc$x, n = 10))+
scale_y_continuous(expand = c(0,0), breaks = pretty(hc$y, n = 10))+
facet_grid(genome2 + chr2 ~ genome1 + chr1, scales = "free", space = "free",
as.table = F)+
labs(x = xlab, y = ylab)+
theme(panel.background = element_rect(fill = "black"),
panel.grid.minor = element_blank(),
panel.grid.major = element_line(
color = rgb(1, 1, 1, .2), size = .2, linetype = 2),
panel.spacing = unit(.1, "mm"),
axis.ticks = element_blank(),
strip.background = element_blank(),
axis.text = element_text(family = "Helvetica", size = 5),
axis.title = element_text(family = "Helvetica", size = 6),
plot.title = element_text(family = "Helvetica", size = 7))
}
print(p)
}
| /R/plot_hits.R | permissive | jtlovell/GENESPACE | R | false | false | 19,784 | r | #' @title plot hits as a xy dotplot
#' @description
#' \code{plot_hits} routines to make visually appealing dotplots
#' @name plot_hits
#'
#' @param gsParam A list of genespace parameters. This should be created
#' by init_genespace.
#' @param type character string of "all", "raw", or "syntenic" specifying which
#' type of dotplot to generate.
#' @param verbose logical, should updates be printed to the console?
#' @param hits data.table containg hits. See read_allBlast.
#' @param outDir file.path where pdf should be written
#' @param minGenes2plot integer specifying the minimum number of genes that can
#' be plotted
#' @param appendName character with text to append to the file name
#' @param dotsPerIn integer specifying how fine-scaled the heatmap is
#' @param quantileThresh integer specifying the top quantile to be thresholded
#' @param plotSize numeric smalled dimension of the plot
#' @param minScore numeric the minimum score to be permitted in the first plot
#' @param maxFacets integer the maximum number of facets to plot (doesn't plot
#' for genomes with lots of small chrs/scaffolds)
#' @param colorByBlks logical, should blocks be colored?
#' @param alpha numeric [0-1], specifying the transparency of the points
#' @param useOrder logical, should gene order or bp position be used?
#' @param minScore numeric, the minimum scoring hit to plot
#' @param minGenes2plot integer, the minimum number of hits to plot a chromosome
#' combination.
#'
#' \cr
#' If called, \code{plot_hits} returns its own arguments.
#'
#' @details Dotplots here aggregate across proximate positions to reduce file
#' size, especially for very large genomes. XY positions are always in gene-rank
#' order positions. Graphics are built with ggplot2.
#' @title Plot syntenic hits
#' @description
#' \code{plot_hits} The pipeline to plot syntenic hits in parallel
#' @rdname plot_hits
#' @import data.table
#' @import R.utils
#' @importFrom dbscan dbscan frNN
#' @importFrom parallel mclapply
#' @export
plot_hits <- function(gsParam,
verbose = TRUE,
type,
dotsPerIn = 256,
quantileThresh = .5,
plotSize = 12,
minScore = 50){
##############################################################################
# 1. setup
# -- 1.1 get env vars set up
query <- target <- lab <- nRegionHits <- nRegions <- nAnchorHits <- nBlks <-
nSVs <- selfOnly <- queryPloidy <- targetPloidy <- nGlobOgHits <- synHits <-
nTotalHits <- chunk <- inBuffer <- NULL
if(!"synteny" %in% names(gsParam))
gsParam <- set_syntenyParams(gsParam)
nCores <- gsParam$params$nCores
# -- 1.2 check that files are all ok
if(!"synteny" %in% names(gsParam))
stop("must run set_syntenyParams prior to synteny")
if(!all(file.exists(gsParam$synteny$blast$allBlast)) && type %in% c("all", "raw"))
stop("some annotated blast files dont exist, run annotate_blast() first\n")
if(!all(file.exists(gsParam$synteny$blast$synHits)) && type %in% c("all", "syntenic"))
stop("some syntenic blast files dont exist, run synteny() first\n")
# -- 1.1 split the metadata into chunks
blMd <- data.table(gsParam$synteny$blast)
blMd[,lab := align_charLeft(sprintf("%s v. %s:", query, target))]
blMd[,selfOnly := query == target & queryPloidy == 1 & targetPloidy == 1]
if(!"nGlobOgHits" %in% colnames(blMd))
blMd[,nGlobOgHits := file.size(synHits)]
if(!"nTotalHits" %in% colnames(blMd))
blMd[,nTotalHits := file.size(synHits)]
setorder(blMd, selfOnly, -nGlobOgHits, -nTotalHits)
blMd[,chunk := rep(1:.N, each = nCores)[1:.N]]
synMdSpl <- split(blMd, by = "chunk")
##############################################################################
# -- 2. loop through each chunk
blMdOut <- lapply(1:length(synMdSpl), function(chnki){
chnk <- data.table(synMdSpl[[chnki]])
############################################################################
# -- loop through each row in each chunk
outChnk <- mclapply(1:nrow(chnk), mc.cores = nCores, function(i){
# -- 2.1 read in the metadata and hits
outMd <- data.table(chnk[i,])
x <- data.table(outMd)
rawHits <- read_allBlast(x$allBlast)
l1 <- mean(table(rawHits$chr1[rawHits$sameOG]))/10
l2 <- mean(table(rawHits$chr2[rawHits$sameOG]))/10
minGenes = max(min(c(l1, l2)), 5)
dps <- gsParam$params$dotplots
if(dps == "check"){
nchr1 <- uniqueN(rawHits$chr1)
nchr2 <- uniqueN(rawHits$chr2)
ggdotplot(
hits = data.table(rawHits),
outDir = gsParam$paths$dotplots,
minGenes2plot = minGenes,
maxFacets = 10000,
type = type,
dotsPerIn = dotsPerIn,
quantileThresh = quantileThresh,
plotSize = plotSize,
minScore = minScore)
}else{
if(dps == "always"){
ggdotplot(
hits = data.table(rawHits),
outDir = gsParam$paths$dotplots,
minGenes2plot = minGenes,
maxFacets = Inf,
type = type,
dotsPerIn = dotsPerIn,
quantileThresh = quantileThresh,
plotSize = plotSize,
minScore = minScore)
}
}
})
})
return(gsParam)
}
#' @title make dotplots of syntenic hits
#' @description
#' \code{ggdotplot} ggplot2 integrated graphics to produce dotplots
#' @rdname plot_hits
#' @import data.table
#' @import ggplot2
#' @importFrom grDevices pdf dev.off rgb
#' @importFrom dbscan dbscan frNN
#' @export
ggdotplot <- function(hits,
type,
outDir = NULL,
minGenes2plot = 100,
appendName = "synHits",
dotsPerIn = 256,
quantileThresh = .5,
plotSize = 12,
minScore = 50,
maxFacets = 10000,
verbose = is.null(outDir)){
ofID1 <- ofID2 <- sameOg <- ngene1 <- ngene2 <- ord1 <- ord2 <- blkID <-
inBuffer <- rnd2 <- rnd1 <- n <- isArrayRep2 <- isArrayRep1 <- chr1 <-
noAnchor <- bitScore <- quantile <- chr2 <- sameOG <- isAnchor <- NULL
##############################################################################
# 1. Get the plot size figured out
tp <- data.table(hits)
un1 <- uniqueN(tp$ofID1)
un2 <- uniqueN(tp$ofID2)
if(un1 > un2){
ht <- plotSize
wd <- ht * (un1/un2)
}else{
wd <- plotSize
ht <- wd * (un2/un1)
}
x <- max(tp$ord1, na.rm = T)
y <- max(tp$ord2, na.rm = T)
ordPerIn <- x / dotsPerIn
totDots <- wd * dotsPerIn
xrnd2 <- floor(x / totDots)+1
ordPerIn <- y / dotsPerIn
totDots <- ht * dotsPerIn
yrnd2 <- floor(y / totDots)+1
tp[,`:=`(rnd1 = round_toInteger(ord1, xrnd2),
rnd2 = round_toInteger(ord2, yrnd2))]
tp <- subset(tp, complete.cases(tp[,c("rnd1", "rnd2", "chr1", "chr2")]))
##############################################################################
# 2. Make the plot with all hits, regardless of og
# -- 2.1 subset the hits to those with high enough score
makeCrappyDotplots <- list(p0 = NULL, p1 = NULL, p2 = NULL)
if(type %in% c("all", "raw")){
hc <- subset(tp, bitScore > minScore)
ng1 <- as.integer(uniqueN(hc$ofID1))
ng2 <- as.integer(uniqueN(hc$ofID2))
# -- 2.2 get axis labels
xlab <- sprintf(
"%s: gene rank order position (%s genes w/ blast hits), grids every 1000 genes",
hits$genome1[1], ng1)
ylab <- sprintf(
"%s: gene rank order position (%s genes w/ blast hits), grids every 1000 genes",
hits$genome2[1], ng2)
# -- 2.3 subset to chrs with enough genes on them
hc[,ngene1 := uniqueN(ofID1[!noAnchor & isArrayRep1], na.rm = T), by = "chr1"]
hc[,ngene2 := uniqueN(ofID2[!noAnchor & isArrayRep2], na.rm = T), by = "chr2"]
hc <- subset(hc, ngene1 > minGenes2plot & ngene2 > minGenes2plot)
# -- 2.4 count n hits in each aggregated position
hc <- hc[,c("chr1", "chr2", "rnd1", "rnd2")]
hc <- subset(hc, complete.cases(hc))
hc <- hc[,list(n = .N), by = c("chr1", "chr2", "rnd1", "rnd2")]
setorder(hc, -n)
hc <- subset(hc, !is.na(n))
# -- 2.5 threshold n to not highlight super strong regions
qthresh <- quantile(hc$n, quantileThresh)
if(qthresh > 20)
qthresh <- 20
if(qthresh < 5)
qthresh <- 5
hc$n[hc$n > qthresh] <- qthresh
# -- 2.6 get plot title
titlab <- sprintf(
"All blast hits with score > %s, %s/%s-gene x/y windows (heatmap range: 2-%s+ hits/window)",
minScore, xrnd2, yrnd2, round(qthresh))
# -- 2.7 make the plot
setorder(hc, n)
hc <- subset(hc, n > 1)
nfacets <- nrow(with(hc, expand.grid(unique(chr1), unique(chr2))))
if(nfacets < maxFacets){
chrOrd1 <- unique(tp$chr1[order(tp$rnd1)])
chrOrd2 <- unique(tp$chr2[order(tp$rnd2)])
hc[,`:=`(chr1 = factor(chr1, levels = chrOrd1),
chr2 = factor(chr2, levels = chrOrd2))]
p0 <- ggplot(hc, aes(rnd1, rnd2, col = n)) +
geom_point(pch = ".") +
scale_color_viridis_c(begin = .1, trans = "log10", guide = "none") +
scale_x_continuous(expand = c(0,0),
breaks = seq(from = 1e3, to = max(hc$rnd1), by = 1e3))+
scale_y_continuous(expand = c(0,0),
breaks = seq(from = 1e3, to = max(hc$rnd2), by = 1e3))+
theme_genespace()+
facet_grid(chr2 ~ chr1, scales = "free",
space = "free", as.table = F, switch = "both")+
labs(x = xlab, y = ylab, title = titlab)
}else{
p0 <- NULL
makeCrappyDotplots[["p0"]] <- subset(tp, bitScore > minScore)
}
##############################################################################
# 3. Make the plot with just OG hits
# -- 2.1 subset the hits to those with high enough score
hc <- subset(tp, sameOG)
ng1 <- as.integer(uniqueN(hc$ofID1))
ng2 <- as.integer(uniqueN(hc$ofID2))
# -- 2.2 get axis labels
xlab <- sprintf(
"%s: gene rank order position (%s genes w/ blast hits), grids every 1000 genes",
hits$genome1[1], ng1)
ylab <- sprintf(
"%s: gene rank order position (%s genes w/ blast hits), grids every 1000 genes",
hits$genome2[1], ng2)
# -- 2.3 subset to chrs with enough genes on them
hc[,ngene1 := uniqueN(ofID1[!noAnchor & isArrayRep1]), by = "chr1"]
hc[,ngene2 := uniqueN(ofID2[!noAnchor & isArrayRep2]), by = "chr2"]
hc <- subset(hc, ngene1 > minGenes2plot & ngene2 > minGenes2plot)
# -- 2.4 count n hits in each aggregated position
hc <- hc[,list(n = .N), by = c("chr1", "chr2", "rnd1", "rnd2")]
setorder(hc, -n)
hc <- subset(hc, !is.na(n))
# -- 2.5 threshold n to not highlight super strong regions
qthresh <- quantile(hc$n, quantileThresh)
if(qthresh > 20)
qthresh <- 20
if(qthresh < 5)
qthresh <- 5
hc$n[hc$n > qthresh] <- qthresh
# -- 2.6 get plot title
titlab <- sprintf(
"Blast hits where query and target are in the same orthogroup, %s/%s-gene x/y windows (heatmap range: 1-%s+ hits/window)",
xrnd2, yrnd2, round(qthresh))
# -- 2.7 make the plot
setorder(hc, n)
nfacets <- nrow(with(hc, expand.grid(unique(chr1), unique(chr2))))
if(nfacets < maxFacets){
chrOrd1 <- unique(tp$chr1[order(tp$rnd1)])
chrOrd2 <- unique(tp$chr2[order(tp$rnd2)])
hc[,`:=`(chr1 = factor(chr1, levels = chrOrd1),
chr2 = factor(chr2, levels = chrOrd2))]
p1 <- ggplot(hc, aes(rnd1, rnd2, col = n)) +
geom_point(pch = ".") +
scale_color_viridis_c(begin = .1, trans = "log10", guide = "none") +
scale_x_continuous(expand = c(0,0),
breaks = seq(from = 1e3, to = max(hc$rnd1), by = 1e3))+
scale_y_continuous(expand = c(0,0),
breaks = seq(from = 1e3, to = max(hc$rnd2), by = 1e3))+
theme_genespace()+
facet_grid(chr2 ~ chr1, scales = "free",
space = "free", as.table = F, switch = "both")+
labs(x = xlab, y = ylab, title = titlab)
}else{
p1 <- NULL
makeCrappyDotplots[["p1"]] <- subset(tp, sameOG)
}
}else{
p1 <- p0 <- NULL
}
if(type %in% c("all", "syntenic")){
##############################################################################
# 4. Make the plot with just anchors
hcBlk <- subset(tp, isAnchor)
hcBlk[,ngene1 := uniqueN(ofID1[!noAnchor & isArrayRep1]), by = "chr1"]
hcBlk[,ngene2 := uniqueN(ofID2[!noAnchor & isArrayRep2]), by = "chr2"]
hcBlk <- subset(hcBlk, ngene1 > minGenes2plot & ngene2 > minGenes2plot)
hcBlk <- hcBlk[,list(n = .N), by = c("chr1", "chr2", "rnd1", "rnd2", "blkID")]
blkCols <- sample(gs_colors(uniqueN(hcBlk$blkID)))
ng1 <- as.integer(uniqueN(hcBlk$ofID1))
ng2 <- as.integer(uniqueN(hcBlk$ofID2))
nfacets <- nrow(with(hcBlk, expand.grid(unique(chr1), unique(chr2))))
if(nfacets < maxFacets){
chrOrd1 <- unique(hcBlk$chr1[order(hcBlk$rnd1)])
chrOrd2 <- unique(hcBlk$chr2[order(hcBlk$rnd2)])
hcBlk[,`:=`(chr1 = factor(chr1, levels = chrOrd1),
chr2 = factor(chr2, levels = chrOrd2))]
hcBlk <- subset(hcBlk, !is.na(rnd1) & !is.na(rnd2))
if(nrow(hcBlk) < 1){
warning(sprintf("no syntenic hits found for %s vs. %s",
hits$genome1[1], hits$genome2[1]))
p2 <- NULL
}else{
p2 <- ggplot(hcBlk, aes(x = rnd1, y = rnd2, col = blkID)) +
geom_point(pch = ".") +
scale_color_manual(values = blkCols, guide = "none") +
scale_x_continuous(expand = c(0,0), breaks = seq(from = 1e3, to = max(hcBlk$rnd1), by = 1e3))+
scale_y_continuous(expand = c(0,0), breaks = seq(from = 1e3, to = max(hcBlk$rnd2), by = 1e3))+
theme_genespace() +
facet_grid(chr2 ~ chr1, scales = "free", space = "free", as.table = F, switch = "both")+
labs(x = sprintf("%s: gene rank order position (%s genes with blast hits), gridlines every 1000 genes",
hits$genome1[1], uniqueN(hits$ofID1[hits$isAnchor])),
y = sprintf("%s: gene rank order position (%s genes with blast hits), gridlines every 1000 genes",
hits$genome2[1], uniqueN(hits$ofID2[hits$isAnchor])),
title = sprintf("Syntenic anchor blast hits, colored by block ID"))
}
}else{
p2 <- NULL
makeCrappyDotplots[["p2"]] <- subset(tp, isAnchor)
}
}else{
p2 <- NULL
}
if(is.null(outDir)){
if(verbose)
cat("writing to the present graphics device")
if(!is.null(p0))
print(p0)
if(!is.null(p1))
print(p1)
if(!is.null(p2))
print(p2)
}else{
dpFile <- file.path(outDir,
sprintf("%s_vs_%s.%sHits.pdf",
tp$genome1[1], tp$genome2[1], type))
pdf(dpFile, height = ht, width = wd)
if(verbose)
cat(sprintf("writing to file: %s", dpFile))
if(!is.null(makeCrappyDotplots[["p0"]])){
with(makeCrappyDotplots[["p0"]], plot(
rnd1, rnd2, pch = ".",
xlab = "all gene rank order (genome1)",
ylab = "all gene rank order (genome2)",
main = "these genomes have too many chromosomes to plot nicely."))
}else{
if(!is.null(p0))
print(p0)
}
if(!is.null(makeCrappyDotplots[["p0"]])){
with(makeCrappyDotplots[["p0"]], plot(
rnd1, rnd2, pch = ".",
xlab = "orthogroup hit gene rank order (genome1)",
ylab = "orthogroup gene rank order (genome2)",
main = "these genomes have too many chromosomes to plot nicely."))
}else{
if(!is.null(p0))
print(p1)
}
if(!is.null(makeCrappyDotplots[["p2"]])){
with(makeCrappyDotplots[["p2"]], plot(
rnd1, rnd2, pch = ".",
xlab = "syntenic hit gene rank order (genome1)",
ylab = "syntenic hit gene rank order (genome2)",
main = "these genomes have too many chromosomes to plot nicely."))
}else{
if(!is.null(p2))
print(p2)
}
de <- dev.off()
}
}
#' @title simple dotplots from a hits data.table
#' @description
#' \code{gghits} ggplot2 integrated graphics to produce dotplots
#' @rdname plot_hits
#' @import data.table
#' @import ggplot2
#' @export
gghits <- function(hits,
colorByBlks = TRUE,
alpha = ifelse(colorByBlks, 1, .25),
useOrder = TRUE,
minScore = 0,
minGenes2plot = 0){
ofID1 <- ofID2 <- sameOg <- ngene1 <- ngene2 <- ord1 <- ord2 <- blkID <-
inBuffer <- rnd2 <- rnd1 <- n <- isArrayRep2 <- isArrayRep1 <- chr1 <-
noAnchor <- bitScore <- quantile <- chr2 <- sameOG <- isAnchor <-
start1 <- start2 <- x <- y <- NULL
tp <- data.table(hits)
if(colorByBlks){
hc <- subset(tp, !is.na(blkID) & isAnchor)
}else{
hc <- subset(tp, bitScore > minScore)
}
ng1 <- as.integer(uniqueN(hc$ofID1))
ng2 <- as.integer(uniqueN(hc$ofID2))
hc[,ngene1 := uniqueN(ofID1[!noAnchor & isArrayRep1], na.rm = T), by = "chr1"]
hc[,ngene2 := uniqueN(ofID2[!noAnchor & isArrayRep2], na.rm = T), by = "chr2"]
hc <- subset(hc, ngene1 > minGenes2plot & ngene2 > minGenes2plot)
if(useOrder){
hc[,`:=`(x = ord1, y = ord2)]
xlab <- "query gene rank order position"
ylab <- "target gene rank order position"
}else{
hc[,`:=`(x = start1/1e6, y = start2/1e6)]
xlab <- "query physical (Mb) gene position"
ylab <- "target physical (Mb) gene position"
}
if(!colorByBlks){
p <- ggplot(hc, aes(x = x, y = y)) +
geom_point(pch = ".", alpha = alpha) +
scale_x_continuous(expand = c(0,0), breaks = pretty(hc$x, n = 10))+
scale_y_continuous(expand = c(0,0), breaks = pretty(hc$y, n = 10))+
facet_grid(genome2 + chr2 ~ genome1 + chr1, scales = "free",
space = "free", as.table = F)+
labs(x = xlab, y = ylab)+
theme(panel.background = element_rect(fill = "black"),
panel.grid.minor = element_blank(),
panel.grid.major = element_line(
color = rgb(1, 1, 1, .2), size = .2, linetype = 2),
panel.spacing = unit(.1, "mm"),
axis.ticks = element_blank(),
strip.background = element_blank(),
axis.text = element_text(family = "Helvetica", size = 5),
axis.title = element_text(family = "Helvetica", size = 6),
plot.title = element_text(family = "Helvetica", size = 7))
}else{
blkCols <- sample(gs_colors(uniqueN(hc$blkID)))
p <- ggplot(hc, aes(x = x, y = y, col = blkID)) +
geom_point(pch = ".", alpha = alpha) +
scale_color_manual(values = blkCols, guide = "none") +
scale_x_continuous(expand = c(0,0), breaks = pretty(hc$x, n = 10))+
scale_y_continuous(expand = c(0,0), breaks = pretty(hc$y, n = 10))+
facet_grid(genome2 + chr2 ~ genome1 + chr1, scales = "free", space = "free",
as.table = F)+
labs(x = xlab, y = ylab)+
theme(panel.background = element_rect(fill = "black"),
panel.grid.minor = element_blank(),
panel.grid.major = element_line(
color = rgb(1, 1, 1, .2), size = .2, linetype = 2),
panel.spacing = unit(.1, "mm"),
axis.ticks = element_blank(),
strip.background = element_blank(),
axis.text = element_text(family = "Helvetica", size = 5),
axis.title = element_text(family = "Helvetica", size = 6),
plot.title = element_text(family = "Helvetica", size = 7))
}
print(p)
}
|
# Tests:
context("rule_text_color")
test_that("rule_text_color works", {
data(iris)
x <- condformat(iris[c(1:10, 51:60, 101:110),])
y <- x %>% rule_text_color(Species,
expression = ifelse(Species == "setosa", "blue", ""))
out <- condformat2html(y)
expect_match(out, "color: blue.*setosa")
out <- condformat2latex(y)
expect_true(grepl(pattern = "\\textcolor[RGB]{0,0,255}{setosa}", out, fixed = TRUE))
expect_false(grepl(pattern = "\\textcolor[RGB]{0,0,255}{versicolor}", out, fixed = TRUE))
})
| /tests/testthat/test_rule_text_color.R | no_license | leeleavitt/condformat | R | false | false | 540 | r | # Tests:
context("rule_text_color")
test_that("rule_text_color works", {
data(iris)
x <- condformat(iris[c(1:10, 51:60, 101:110),])
y <- x %>% rule_text_color(Species,
expression = ifelse(Species == "setosa", "blue", ""))
out <- condformat2html(y)
expect_match(out, "color: blue.*setosa")
out <- condformat2latex(y)
expect_true(grepl(pattern = "\\textcolor[RGB]{0,0,255}{setosa}", out, fixed = TRUE))
expect_false(grepl(pattern = "\\textcolor[RGB]{0,0,255}{versicolor}", out, fixed = TRUE))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{timesThree}
\alias{timesThree}
\title{Multiply a number by three}
\usage{
timesThree(x)
}
\arguments{
\item{x}{A single integer.}
}
\description{
Multiply a number by three
}
| /q2e/man/timesThree.Rd | no_license | bioarch-sjh/q2e | R | false | true | 273 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{timesThree}
\alias{timesThree}
\title{Multiply a number by three}
\usage{
timesThree(x)
}
\arguments{
\item{x}{A single integer.}
}
\description{
Multiply a number by three
}
|
library(tidyverse)
library(ggmap)
library(ggplot2)
library(dplyr)
library(sf)
library(leaflet)
mat_locations <- readr::read_csv("MAT_locations.csv")
mat_locations <- mat_locations %>% mutate(
search_address = paste(address, city, state, zip, sep = ", ")
)
register_google(key = "AIzaSyArse9KN5w6I6XekxNOkFWgt_brvM_m-CY", write = TRUE)
mat_locations <- mat_locations %>%
filter(!is.na(search_address)) %>%
mutate_geocode(search_address) #One NA value due to Address
# don't include the variable for the ID
#mat_locations <- mat_locations %>%
#select(-X1)
names(mat_locations)[names(mat_locations) == "lon"] <- "longitude"
names(mat_locations)[names(mat_locations) == "lat"] <- "latitude"
usethis::use_data(mat_locations, overwrite = TRUE)
| /stuff/mat_locations.R | permissive | DSPG-ISU/DSPG | R | false | false | 757 | r | library(tidyverse)
library(ggmap)
library(ggplot2)
library(dplyr)
library(sf)
library(leaflet)
mat_locations <- readr::read_csv("MAT_locations.csv")
mat_locations <- mat_locations %>% mutate(
search_address = paste(address, city, state, zip, sep = ", ")
)
register_google(key = "AIzaSyArse9KN5w6I6XekxNOkFWgt_brvM_m-CY", write = TRUE)
mat_locations <- mat_locations %>%
filter(!is.na(search_address)) %>%
mutate_geocode(search_address) #One NA value due to Address
# don't include the variable for the ID
#mat_locations <- mat_locations %>%
#select(-X1)
names(mat_locations)[names(mat_locations) == "lon"] <- "longitude"
names(mat_locations)[names(mat_locations) == "lat"] <- "latitude"
usethis::use_data(mat_locations, overwrite = TRUE)
|
# Geodistances ordered according to travelling salesman algo ####
# TRAVELLING SALESMAN
# Purpose is to estimate the right number of waypoints - to reduce api runs
tscalc <- function(xy,outpois, startposition, endposition) {
xy0 <- xy
for (j in 2:(nrow(xy0)-1)) {
xy <- xy0[-j,]
nu_kor <- xy0[j,c("lat","lng")]
xytsp <- ETSP(data.frame(xy[,1:2]))
colnames(xytsp)[1:2] <- c("x", "y")
xytour <- solve_TSP(xytsp)
plot(xytsp, xytour, pch=20, tour_col="red", tour_lty="solid")
re_ordered_xy <- xy[as.numeric(xytour), ]
# Making sure that route starts with start and ends with end..
for (i in 1:nrow(re_ordered_xy)) if (equals(re_ordered_xy[i,c("lat","lng")],startposition)) pos <- i
re_ordered_xy2 <- rbind(re_ordered_xy[pos:nrow(re_ordered_xy),],re_ordered_xy[1:(pos-1),])
for (i in 2:nrow(re_ordered_xy2)) {
re_ordered_xy2[i,"distance_to_last_point"] <- distm(x = rev(re_ordered_xy2[i-1,c("lat","lng")]), y = rev(re_ordered_xy2[i,c("lat","lng")]),fun = distGeo)[,1]
re_ordered_xy2[i,"distance_sum"] <- sum(na.omit(re_ordered_xy2[2:i,"distance_to_last_point"]))
re_ordered_xy2[i,"distance_roundtrip"] <- re_ordered_xy2[i,"distance_sum"] + distm(x = rev(re_ordered_xy2[i,c("lat","lng")]), y = rev(endposition),fun = distGeo)[,1]
}
sumdist <- max(na.omit(re_ordered_xy2[,"distance_roundtrip"]))
xy0[xy0$lat==nu_kor$lat & xy0$lng==nu_kor$lng,"sumdist"] <- sumdist
}
return(xy_df,)
}
n <- nrow(outpois) # if A to B. Changed below if not..
if (equals(startposition,endposition)) { # kun hvis rundtur
sumdist <- geodist_start_end + 1
n <- nrow(outpois)
xy <- rbind(startposition,outpois[1:n,c("lat","lng")],endposition)
while (sumdist > geodist_start_end) {
xytsp <- ETSP(data.frame(xy))
colnames(xytsp) <- c("x", "y")
xytour <- solve_TSP(xytsp)
plot(xytsp, xytour, pch=20, tour_col="red", tour_lty="solid")
re_ordered_xy <- xy[as.numeric(xytour), ]
# Making sure that route starts with start and ends with end..
for (i in 1:nrow(re_ordered_xy)) if (equals(re_ordered_xy[i,c("lat","lng")],startposition)) pos <- i
re_ordered_xy2 <- rbind(re_ordered_xy[pos:nrow(re_ordered_xy),],re_ordered_xy[1:(pos-1),])
for (i in 2:nrow(re_ordered_xy2)) {
re_ordered_xy2[i,"distance_to_last_point"] <- distm(x = rev(re_ordered_xy2[i-1,c("lat","lng")]), y = rev(re_ordered_xy2[i,c("lat","lng")]),fun = distGeo)[,1]
re_ordered_xy2[i,"distance_sum"] <- sum(na.omit(re_ordered_xy2[2:i,"distance_to_last_point"]))
re_ordered_xy2[i,"distance_roundtrip"] <- re_ordered_xy2[i,"distance_sum"] + distm(x = rev(re_ordered_xy2[i,c("lat","lng")]), y = rev(endposition),fun = distGeo)[,1]
}
sumdist <- max(na.omit(re_ordered_xy2[,"distance_roundtrip"]))
re_ordered_xy3 <- left_join(re_ordered_xy2, outpois, by = c("lat" = "lat", "lng" = "lng")) %>% # Automatically delete ID
select(lat, lng, distance_to_last_point, distance_sum, distance_roundtrip,user_ratings_sum)
}
n <- n - 1
}
| /iterator.R | no_license | pjerrot/r_stuff | R | false | false | 3,066 | r |
# Geodistances ordered according to travelling salesman algo ####
# TRAVELLING SALESMAN
# Purpose is to estimate the right number of waypoints - to reduce api runs
tscalc <- function(xy,outpois, startposition, endposition) {
xy0 <- xy
for (j in 2:(nrow(xy0)-1)) {
xy <- xy0[-j,]
nu_kor <- xy0[j,c("lat","lng")]
xytsp <- ETSP(data.frame(xy[,1:2]))
colnames(xytsp)[1:2] <- c("x", "y")
xytour <- solve_TSP(xytsp)
plot(xytsp, xytour, pch=20, tour_col="red", tour_lty="solid")
re_ordered_xy <- xy[as.numeric(xytour), ]
# Making sure that route starts with start and ends with end..
for (i in 1:nrow(re_ordered_xy)) if (equals(re_ordered_xy[i,c("lat","lng")],startposition)) pos <- i
re_ordered_xy2 <- rbind(re_ordered_xy[pos:nrow(re_ordered_xy),],re_ordered_xy[1:(pos-1),])
for (i in 2:nrow(re_ordered_xy2)) {
re_ordered_xy2[i,"distance_to_last_point"] <- distm(x = rev(re_ordered_xy2[i-1,c("lat","lng")]), y = rev(re_ordered_xy2[i,c("lat","lng")]),fun = distGeo)[,1]
re_ordered_xy2[i,"distance_sum"] <- sum(na.omit(re_ordered_xy2[2:i,"distance_to_last_point"]))
re_ordered_xy2[i,"distance_roundtrip"] <- re_ordered_xy2[i,"distance_sum"] + distm(x = rev(re_ordered_xy2[i,c("lat","lng")]), y = rev(endposition),fun = distGeo)[,1]
}
sumdist <- max(na.omit(re_ordered_xy2[,"distance_roundtrip"]))
xy0[xy0$lat==nu_kor$lat & xy0$lng==nu_kor$lng,"sumdist"] <- sumdist
}
return(xy_df,)
}
n <- nrow(outpois) # if A to B. Changed below if not..
if (equals(startposition,endposition)) { # kun hvis rundtur
sumdist <- geodist_start_end + 1
n <- nrow(outpois)
xy <- rbind(startposition,outpois[1:n,c("lat","lng")],endposition)
while (sumdist > geodist_start_end) {
xytsp <- ETSP(data.frame(xy))
colnames(xytsp) <- c("x", "y")
xytour <- solve_TSP(xytsp)
plot(xytsp, xytour, pch=20, tour_col="red", tour_lty="solid")
re_ordered_xy <- xy[as.numeric(xytour), ]
# Making sure that route starts with start and ends with end..
for (i in 1:nrow(re_ordered_xy)) if (equals(re_ordered_xy[i,c("lat","lng")],startposition)) pos <- i
re_ordered_xy2 <- rbind(re_ordered_xy[pos:nrow(re_ordered_xy),],re_ordered_xy[1:(pos-1),])
for (i in 2:nrow(re_ordered_xy2)) {
re_ordered_xy2[i,"distance_to_last_point"] <- distm(x = rev(re_ordered_xy2[i-1,c("lat","lng")]), y = rev(re_ordered_xy2[i,c("lat","lng")]),fun = distGeo)[,1]
re_ordered_xy2[i,"distance_sum"] <- sum(na.omit(re_ordered_xy2[2:i,"distance_to_last_point"]))
re_ordered_xy2[i,"distance_roundtrip"] <- re_ordered_xy2[i,"distance_sum"] + distm(x = rev(re_ordered_xy2[i,c("lat","lng")]), y = rev(endposition),fun = distGeo)[,1]
}
sumdist <- max(na.omit(re_ordered_xy2[,"distance_roundtrip"]))
re_ordered_xy3 <- left_join(re_ordered_xy2, outpois, by = c("lat" = "lat", "lng" = "lng")) %>% # Automatically delete ID
select(lat, lng, distance_to_last_point, distance_sum, distance_roundtrip,user_ratings_sum)
}
n <- n - 1
}
|
library(BayesianTools)
library(phydynR)
#get_BT_sample <- function(rds_file, end_sample){
# bt_posterior <- readRDS(rds_file)
# bt_sample <- getSample(bt_posterior, start = 1, end = end_sample, coda = TRUE)
#}
#pol gene ----
# list
rds_dirs <- list.files("Analyses/MCMC_Results/pol_all/results_zmatrix/stage13", full.names = TRUE)
#get rds files (containing results of MCMC runs)
rds_files <- list()
for(x in 1:length(rds_dirs)){
rds_files[x] <- list.files(rds_dirs[x], pattern = "out_pol.RDS", full.names = TRUE)
#run <- readRDS(rds_files[[x]])
#summary(run)
}
infns <- unlist(rds_files)
chs <- createMcmcSamplerList( lapply( infns, readRDS ) )
end_sample = 23580
chs_sample <- BayesianTools::getSample(chs, start = 1, end = end_sample, coda = TRUE)
pol_sample <- BayesianTools::getSample(chs, start = 5000, end = 23580, coda = TRUE)
(pol.ESS <- as.numeric(format(round(coda::effectiveSize(pol_sample), 0))))
#quartz()
#plot(pol_sample, ask = TRUE)
art_start <- 2005
burninpc <- 0.2120
numSamples <- 100
ncpu <- 1
outdir = "pol_all_data"
#down-sampled pol gene ----
# list
rds_dirs <- list.files("Analyses/MCMC_Results/down_sampled_pol/results_zmatrix/stage6", full.names = TRUE)
#get rds files (containing results of MCMC runs)
rds_files <- list()
for(x in 1:length(rds_dirs)){
rds_files[x] <- list.files(rds_dirs[x], pattern = "out.RDS", full.names = TRUE)
run <- readRDS(rds_files[[x]])
summary(run)
}
infns <- unlist(rds_files)
infns <- infns[c(1:7,9:10)]
chs <- createMcmcSamplerList( lapply( infns, readRDS ) )
end_sample = 24120
chs_sample <- BayesianTools::getSample(chs, start = 1, end = end_sample, coda = TRUE)
pol_sample <- BayesianTools::getSample(chs, start = 5000, end = end_sample, coda = TRUE)
gelmanDiagnostics(pol_sample)
(pol.ESS <- as.numeric(format(round(coda::effectiveSize(pol_sample), 0))))
quartz()
plot(pol_sample, ask = TRUE)
art_start <- 2005
burninpc <- 0.2173
numSamples <- 100
ncpu <- 1
outdir = "pol_downsampled_all_data"
#gag gene ----
# list
rds_dirs <- list.files("Analyses/MCMC_Results/gag_all/results_zmatrix/stage8", full.names = TRUE)
#get rds files (containing results of MCMC runs)
rds_files <- list()
for(x in 1:length(rds_dirs)){
rds_files[x] <- list.files(rds_dirs[x], pattern = "out_gag.RDS", full.names = TRUE)
#run <- readRDS(rds_files[[x]])
#summary(run)
}
infns <- unlist(rds_files)
infns <- infns[2:8]
chs <- createMcmcSamplerList( lapply( infns, readRDS ) )
end_sample = 24180
chs_sample <- BayesianTools::getSample(chs, start = 1, end = end_sample, coda = TRUE)
gag_sample <- BayesianTools::getSample(chs, start = 5000, end = end_sample, coda = TRUE)
gelmanDiagnostics(gag_sample)
(gog.ESS <- as.numeric(format(round(coda::effectiveSize(gag_sample), 0))))
quartz()
plot(gag_sample, ask = TRUE)
art_start <- 2005
burninpc <- 0.2068
numSamples <- 100
ncpu <- 1
outdir = "gag_all_data"
#env gene ----
# list
rds_dirs <- list.files("Analyses/MCMC_Results/env_all/results_zmatrix/stage7", full.names = TRUE)
#get rds files (containing results of MCMC runs)
rds_files <- list()
for(x in 1:length(rds_dirs)){
rds_files[x] <- list.files(rds_dirs[x], pattern = "out_env.RDS", full.names = TRUE)
run <- readRDS(rds_files[[x]])
summary(run)
}
infns <- unlist(rds_files)
chs <- createMcmcSamplerList( lapply( infns, readRDS ) )
end_sample = 24120
chs_sample <- BayesianTools::getSample(chs, start = 1, end = end_sample, coda = TRUE)
env_sample <- BayesianTools::getSample(chs, start = 5000, end = end_sample, coda = TRUE)
gelmanDiagnostics(env_sample)
(env.ESS <- as.numeric(format(round(coda::effectiveSize(env_sample), 0))))
quartz()
plot(env_sample, ask = TRUE)
art_start <- 2005
burninpc <- 0.2073
numSamples <- 100
ncpu <- 1
outdir = "env_all_data"
O = getSample( chs, start = 1, end = end_sample )
start <- floor( burninpc * nrow(O) )
o <- O[ (start:nrow(O)), ]
#~ browser()
o <- o[ sample(1:nrow(o), size = numSamples, replace=FALSE), ]
mod0 <- generate_model0(art_start=art_start)
if (is.null( outdir )){
outdir <- outdir_name
}
suppressWarnings( dir.create ( outdir ) )
outdir <- paste0( outdir, '/' )
#chs <- createMcmcSamplerList( lapply( infns, readRDS ) )
sink( file = paste0( outdir, 'mcmcSummary.txt' ))
print( summary( chs_sample ))
sink()
compute.traj0 <- function(theta){
print(theta)
p <- mod0$parms
p[ names(theta)] <- theta
x0 <- c( src = p$src1980, i = p$i1980 , z = 0) # NOTE this is set in compute.traj
s <- mod0$dm(p, x0 , t0 = 1980, t1 =2015, res = 100 )
s
}
i1980names <- colnames(O)[ grepl( 'i1980', colnames(O)) ]
ntres <- length( i1980names )
add.trajs <- function(ss){
s <- ss[[1]]
#for (k in 2:length( ss )){
# there is only one trajectory because there is only 1 tree?
.s <- ss[[1]]
s[[2]] <- lapply( 1:length( s[[2]]) , function(j) s[[2]][[j]] + .s[[2]][[j]] )
s[[3]] <- lapply( 1:length( s[[3]]) , function(j) s[[3]][[j]] + .s[[3]][[j]] )
s[[4]] <- lapply( 1:length( s[[3]]) , function(j) s[[4]][[j]] + .s[[4]][[j]] )
s[[5]][, -1] <- s[[5]][, -1] + .s[[5]][, -1]
#}
#browser()
s
}
compute.traj <- function( theta0){
traj <- list()
for ( k in 1:ntres){
theta <- theta0[setdiff( names(theta0), i1980names ) ]
theta <- c( theta , i1980 = unname( theta0[ i1980names[k] ] ) )
traj[[k]] <- compute.traj0( theta )
}
add.trajs( traj )
}
#tfgys <- parallel::mclapply( 1:nrow(o), function(i){
# compute.traj( o[i, ] )
#}, mc.cores = ncpu)
tfgys <- apply(o, MARGIN = 1, FUN = compute.traj)
imat <- do.call( cbind, lapply( tfgys, function(s) s[[5]][, 'i' ] ) )
zmat <- do.call( cbind, lapply( tfgys, function(s) s[[5]][, 'z' ] ) )
qi <- t( sapply( 1:nrow(imat) , function(i) quantile( imat[i, ], prob = c( .5, .025, .975 )) ) )
qz <- t( sapply( 1:nrow(imat) , function(i) quantile( zmat[i, ], prob = c( .5, .025, .975 )) ) )
pzmat <- zmat / ( imat + zmat )
qpz <- t( sapply( 1:nrow(pzmat) , function(i) quantile( pzmat[i, ], prob = c( .5, .025, .975 )) ) )
time <- seq( 1980, 2015, length.out = 100 )
#~ browser()
png( paste0( outdir, 'effinf.png'), width = 4, height = 3 , units = 'in', res = 300)
par(mai = c( .5, .85, .3, .25) )
matplot( time, qi, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'Effective infections')
dev.off()
svg( paste0( outdir, 'effinf.svg'), width = 4, height = 3 )
par(mai = c( .5, .85, .3, .25) )
matplot( time, qi, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'Effective infections')
dev.off()
png( paste0( outdir, 'ntreat.png'), width = 4, height = 3 , units = 'in', res = 300)
par(mai = c( .5, .85, .3, .25) )
matplot( time, qz, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'On ART')
dev.off()
svg( paste0( outdir, 'ntreat.svg'), width = 4, height = 3 )
par(mai = c( .5, .85, .3, .25) )
matplot( time, qz, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'On ART')
dev.off()
png( paste0( outdir, 'proptreat.png'), width = 4, height = 3 , units = 'in', res = 300)
par(mai = c( .5, .85, .3, .25) )
matplot( time, qpz, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'Proportion on ART')
dev.off()
svg( paste0( outdir, 'proptreat.svg'), width = 4, height = 3 )
par(mai = c( .5, .85, .3, .25) )
matplot( time, qpz, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'Proportion on ART')
dev.off()
newinf <- do.call( cbind, lapply( tfgys, function(s) {
newinf <- sapply( s[[2]], function(FF) sum(FF[,2]) )
rev( newinf )
}))
qnewinf <- t( sapply( 1:nrow( newinf), function(i) quantile( newinf[i, ] , prob =c( .5, .025, .975 ))))
png( paste0( outdir, 'ninf.png'), width = 4, height = 3 , units = 'in', res = 300)
par(mai = c( .5, .85, .3, .25) )
matplot( time, qnewinf, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'New infections / year')
dev.off()
svg( paste0( outdir, 'ninf.svg'), width = 4, height = 3 )
par(mai = c( .5, .85, .3, .25) )
matplot( time, qnewinf, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'New infections / year')
dev.off()
propImport <- do.call( cbind, lapply( tfgys, function(s) {
imp <- sapply( s[[3]], function(G) G[1,2] )
crosstrans <- sapply( s[[2]], function(FF) FF[1,2] )
newinf <- sapply( s[[2]], function(FF) FF[2,2] )
rev( (crosstrans + imp) / (crosstrans + imp + newinf ) )
}))
#~better as a box/whisker plot
qpi <- t( sapply( 1:nrow( propImport), function(i) quantile( propImport[i, ] , prob =c( .5, .025, .975 ))))
sink( file = paste0( outdir, 'mcmcSummary.txt' ), append = TRUE )
print('')
print ('Proportion imports' )
print( tail( qpi, 1) )
sink()
#X11(); matplot( time, qpi, type = 'l' , lty = c( 1, 3, 3), col = 'black' )
bo <- lapply( mod0$betaNames, function(bn) o[, bn] )
png(paste0( outdir, 'beta.png') , width = 4, height = 3 , units = 'in', res = 300 )
par(mai = c( .5, .85, .3, .25) )
boxplot( bo, names = mod0$betaTimes, ylab = 'Transmission rate' )
dev.off()
svg(paste0( outdir, 'beta.svg') , width = 4, height = 3 )
par(mai = c( .5, .85, .3, .25) )
boxplot( bo, names = mod0$betaTimes, ylab = 'Transmission rate' )
dev.off()
invisible( o )
#' Make tfgys for multiple sets of input files
#'
#' @export
trajectories_model0 <- function( infns , art_start, mod0=NULL, burninpc = .5, ncpu = 1, numSamples = 100 , onlyBestChain = FALSE ){
stopifnot( require(phydynR) )
library( BayesianTools )
chss <- lapply( infns, readRDS )
if (onlyBestChain & length(infns) > 1){
maps <- sapply( chss, function(ch) MAP( ch )$valuesMAP[1] )
chss <- list( chss[[ which.max( maps ) ]] )
}
chs <- createMcmcSamplerList( chss )
#~ O = getSample( chs )
#~ O1 = getSample( chs, start = 15e3, thin = 10, parametersOnly=FALSE )
#~ start <- floor( burninpc * nrow(O) )
#~ o <- O[ (start:nrow(O)), ]
#~ o <- o[ sample(1:nrow(o), size = numSamples, replace=FALSE), ]
O <- getSample( chs, numSamples = floor( numSamples / (1-burninpc) ) , parametersOnly = FALSE)
o <- tail( O , numSamples )
#~ browser()
if ( is.null(mod0)){
warning('model unspecified; using default model0')
mod0 <- generate_model0(art_start=art_start)
}
compute.traj0 <- function(theta )
{
print(theta)
p <- mod0$parms
p[ names(theta)] <- theta
x0 <- c( src = p$src1980, i = p$i1980 , z =0) # NOTE this is set in compute.traj
s <- mod0$dm(p, x0 , t0 = 1980, t1 =2015, res = 100 )
s
}
i1980names <- colnames(O)[ grepl( 'i1980', colnames(O)) ]
ntres <- length( i1980names )
add.trajs <- function(ss){
s <- ss[[1]]
if ( length( ss) > 1 ){
for (k in 2:length( ss )){
.s <- ss[[k]]
s[[2]] <- lapply( 1:length( s[[2]]) , function(j) s[[2]][[j]] + .s[[2]][[j]] )
s[[3]] <- lapply( 1:length( s[[3]]) , function(j) s[[3]][[j]] + .s[[3]][[j]] )
s[[4]] <- lapply( 1:length( s[[3]]) , function(j) s[[4]][[j]] + .s[[4]][[j]] )
s[[5]][, -1] <- s[[5]][, -1] + .s[[5]][, -1]
}
}
#browser()
s
}
compute.traj <- function( theta0){
traj <- list()
for ( k in 1:ntres){
theta <- theta0[setdiff( names(theta0), i1980names ) ]
theta <- c( theta , i1980 = unname( theta0[ i1980names[k] ] ) )
traj[[k]] <- compute.traj0( theta )
}
add.trajs( traj )
}
tfgys <- parallel::mclapply( 1:nrow(o), function(i){
compute.traj( o[i, ] )
}, mc.cores = ncpu)
list( tfgys = tfgys, o = o[, !grepl(pattern='i1980', colnames(o)) ])
}
#' @export
trajectories_model0_multigene <- function( infns_list, ... )
{
tfgyos <- lapply( infns_list, function(infn )
trajectories_model0( infn , ... )
)
tfgys <- do.call( c, lapply( tfgyos, '[[', 1 ) )
o <- do.call( rbind, lapply( tfgyos, '[[', 2 ) )
list( tfgys, o )
}
#' @export
summary_model0_tfgyos <- function(tfgys, o , art_start, outdir = NULL, axislog='', lquant = .025, uquant = .975 )
{
library( coda )
if (is.null( outdir )){
outdir <- 'summary0_tfgys'
}
suppressWarnings( dir.create ( outdir ) )
outdir <- paste0( outdir, '/' )
sink( file = paste0( outdir, 'mcmcSummary.txt' ))
print( summary( coda::as.mcmc( o )) )
sink()
imat <- do.call( cbind, lapply( tfgys, function(s) s[[5]][, 'i' ] ) )
zmat <- do.call( cbind, lapply( tfgys, function(s) s[[5]][, 'z' ] ) )
qi <- t( sapply( 1:nrow(imat) , function(i) quantile( imat[i, ], prob = c( .5, lquant, uquant )) ) )
qtotaliz <- t( sapply( 1:nrow(imat) , function(i) quantile( zmat[i, ] + imat[i, ] , prob = c( .5, lquant, uquant )) ) )
qz <- t( sapply( 1:nrow(imat) , function(i) quantile( zmat[i, ], prob = c( .5, lquant, uquant )) ) )
pzmat <- zmat / ( imat + zmat )
qpz <- t( sapply( 1:nrow(pzmat) , function(i) quantile( pzmat[i, ], prob = c( .5, lquant, uquant )) ) )
#time <- seq( 1980, 2015, length.out = 35*6)
time <- rev( tfgys[[1]][[1]] )
# R(t)
M = mod0 <- generate_model0(art_start = art_start )
#~ Rt <- sapply( 1:nrow(o), function(i){
#~ p <- as.list( o[i,] )
#~ p$art_start = unname( art_start )
#~ b <- M$parms$beta.t( time, p )
#~ r <- sapply( time, function(tt) M$parms$rho.t( tt, p ) )
#~ b / ( r + M$parms$gamma + M$parms$mu )
#~ })
Rt <- sapply( tfgys, m0R.t )
qRt <- t( sapply( 1:nrow(Rt) , function(i) quantile( Rt[i, ], prob = c( .5, lquant, uquant )) ) )
#~ browser()
png( paste0( outdir, 'R.png'), width = 4, height = 3 , units = 'in', res = 300)
par(mai = c( .5, .85, .3, .25) )
matplot( time, qRt, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'R(t)')
abline( h = 1,col = 'red')
dev.off()
svg( paste0( outdir, 'R.svg'), width = 4, height = 3 )
par(mai = c( .5, .85, .3, .25) )
matplot( time, qRt, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'R(t)')
abline( h = 1,col = 'red')
dev.off()
png( paste0( outdir, 'i.png'), width = 4, height = 3 , units = 'in', res = 300)
par(mai = c( .5, .85, .3, .25) )
matplot( time, qi, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'I(t)', log = axislog)
dev.off()
svg( paste0( outdir, 'i.svg'), width = 4, height = 3 )
par(mai = c( .5, .85, .3, .25) )
matplot( time, qi, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'I(t)' , log = axislog)
dev.off()
png( paste0( outdir, 'iztotal.png'), width = 4, height = 3 , units = 'in', res = 300)
par(mai = c( .5, .85, .3, .25) )
matplot( time, qtotaliz, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'I(t) + Z(t)', log = axislog)
dev.off()
svg( paste0( outdir, 'iztotal.svg'), width = 4, height = 3 )
par(mai = c( .5, .85, .3, .25) )
matplot( time, qtotaliz, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'I(t) + Z(t)', log = axislog)
dev.off()
png( paste0( outdir, 'ntreat.png'), width = 4, height = 3 , units = 'in', res = 300)
par(mai = c( .5, .85, .3, .25) )
matplot( time, qz, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'On ART')
dev.off()
svg( paste0( outdir, 'ntreat.svg'), width = 4, height = 3 )
par(mai = c( .5, .85, .3, .25) )
matplot( time, qz, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'On ART')
dev.off()
png( paste0( outdir, 'proptreat.png'), width = 4, height = 3 , units = 'in', res = 300)
par(mai = c( .5, .85, .3, .25) )
matplot( time, qpz, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'Proportion on ART')
dev.off()
svg( paste0( outdir, 'proptreat.svg'), width = 4, height = 3 )
par(mai = c( .5, .85, .3, .25) )
matplot( time, qpz, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'Proportion on ART')
dev.off()
newinf <- do.call( cbind, lapply( tfgys, function(s) {
newinf <- sapply( s[[2]], function(FF) sum( FF[,2]) )
rev( newinf )
}))
qnewinf <- t( sapply( 1:nrow( newinf), function(i) quantile( newinf[i, ] , prob =c( .5, lquant, uquant ))))
png( paste0( outdir, 'ninf.png'), width = 4, height = 3 , units = 'in', res = 300)
par(mai = c( .5, .85, .3, .25) )
matplot( time, qnewinf, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'New infections / year', log = axislog)
dev.off()
svg( paste0( outdir, 'ninf.svg'), width = 4, height = 3 )
par(mai = c( .5, .85, .3, .25) )
matplot( time, qnewinf, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'New infections / year', log = axislog)
dev.off()
propImport <- do.call( cbind, lapply( tfgys, function(s) {
imp <- sapply( s[[3]], function(G) G[1,2] )
crosstrans <- sapply( s[[2]], function(FF) FF[1,2] )
newinf <- sapply( s[[2]], function(FF) FF[2,2] )
rev( (crosstrans + imp) / (crosstrans + imp + newinf ) )
}))
#~better as a box/whisker plot
qpi <- t( sapply( 1:nrow( propImport), function(i) quantile( propImport[i, ] , prob =c( .5, lquant, uquant ))))
sink( file = paste0( outdir, 'mcmcSummary.txt' ), append = TRUE )
print('')
print ('Proportion imports' )
print( tail( qpi, 1) )
sink()
#X11(); matplot( time, qpi, type = 'l' , lty = c( 1, 3, 3), col = 'black' )
bo <- lapply( mod0$betaNames, function(bn) o[, bn] )
png(paste0( outdir, 'beta.png') , width = 4, height = 3 , units = 'in', res = 300 )
par(mai = c( .5, .85, .3, .25) )
boxplot( bo, names = mod0$betaTimes, ylab = 'Transmission rate' )
dev.off()
svg(paste0( outdir, 'beta.svg') , width = 4, height = 3 )
par(mai = c( .5, .85, .3, .25) )
boxplot( bo, names = mod0$betaTimes, ylab = 'Transmission rate' )
dev.off()
#~ browser()
invisible( o )
}
#' Compute R(t) by integrating over future hazard of removal and transmissions
#'
#' @export
m0R.t <- function ( s) {
i <- 2
y <- rev( sapply( s$sizes, '[', i ) )
d <- rev( sapply( s$deaths, '[', i ))
b <- rev( sapply( s$births, function(x) x[i,i] ))
x <- rev( s$times )
dx <- diff(x)[1]
r <- dx * b / y
r <- c( r, rep( tail(r, 1), 100 ))
loghaz <- log (1- d*dx / y )
lasthazard <- log (1- tail(d,1)*dx / tail(y,1) )
loghaz <- c( loghaz, rep(lasthazard, 100 ) )
logsurv <- cumsum( loghaz)
R <- sapply( 1:length( x ), function(k){
j <- k:length(r)
sum( exp( logsurv[j] - logsurv[k] ) * r[j] )
})
R
}
| /Analyses/Scripts/R-scripts/South_Africa/Summary.R | no_license | thednainus/pangeaZA | R | false | false | 18,607 | r | library(BayesianTools)
library(phydynR)
#get_BT_sample <- function(rds_file, end_sample){
# bt_posterior <- readRDS(rds_file)
# bt_sample <- getSample(bt_posterior, start = 1, end = end_sample, coda = TRUE)
#}
#pol gene ----
# list
rds_dirs <- list.files("Analyses/MCMC_Results/pol_all/results_zmatrix/stage13", full.names = TRUE)
#get rds files (containing results of MCMC runs)
rds_files <- list()
for(x in 1:length(rds_dirs)){
rds_files[x] <- list.files(rds_dirs[x], pattern = "out_pol.RDS", full.names = TRUE)
#run <- readRDS(rds_files[[x]])
#summary(run)
}
infns <- unlist(rds_files)
chs <- createMcmcSamplerList( lapply( infns, readRDS ) )
end_sample = 23580
chs_sample <- BayesianTools::getSample(chs, start = 1, end = end_sample, coda = TRUE)
pol_sample <- BayesianTools::getSample(chs, start = 5000, end = 23580, coda = TRUE)
(pol.ESS <- as.numeric(format(round(coda::effectiveSize(pol_sample), 0))))
#quartz()
#plot(pol_sample, ask = TRUE)
art_start <- 2005
burninpc <- 0.2120
numSamples <- 100
ncpu <- 1
outdir = "pol_all_data"
#down-sampled pol gene ----
# list
rds_dirs <- list.files("Analyses/MCMC_Results/down_sampled_pol/results_zmatrix/stage6", full.names = TRUE)
#get rds files (containing results of MCMC runs)
rds_files <- list()
for(x in 1:length(rds_dirs)){
rds_files[x] <- list.files(rds_dirs[x], pattern = "out.RDS", full.names = TRUE)
run <- readRDS(rds_files[[x]])
summary(run)
}
infns <- unlist(rds_files)
infns <- infns[c(1:7,9:10)]
chs <- createMcmcSamplerList( lapply( infns, readRDS ) )
end_sample = 24120
chs_sample <- BayesianTools::getSample(chs, start = 1, end = end_sample, coda = TRUE)
pol_sample <- BayesianTools::getSample(chs, start = 5000, end = end_sample, coda = TRUE)
gelmanDiagnostics(pol_sample)
(pol.ESS <- as.numeric(format(round(coda::effectiveSize(pol_sample), 0))))
quartz()
plot(pol_sample, ask = TRUE)
art_start <- 2005
burninpc <- 0.2173
numSamples <- 100
ncpu <- 1
outdir = "pol_downsampled_all_data"
#gag gene ----
# list
rds_dirs <- list.files("Analyses/MCMC_Results/gag_all/results_zmatrix/stage8", full.names = TRUE)
#get rds files (containing results of MCMC runs)
rds_files <- list()
for(x in 1:length(rds_dirs)){
rds_files[x] <- list.files(rds_dirs[x], pattern = "out_gag.RDS", full.names = TRUE)
#run <- readRDS(rds_files[[x]])
#summary(run)
}
infns <- unlist(rds_files)
infns <- infns[2:8]
chs <- createMcmcSamplerList( lapply( infns, readRDS ) )
end_sample = 24180
chs_sample <- BayesianTools::getSample(chs, start = 1, end = end_sample, coda = TRUE)
gag_sample <- BayesianTools::getSample(chs, start = 5000, end = end_sample, coda = TRUE)
gelmanDiagnostics(gag_sample)
(gog.ESS <- as.numeric(format(round(coda::effectiveSize(gag_sample), 0))))
quartz()
plot(gag_sample, ask = TRUE)
art_start <- 2005
burninpc <- 0.2068
numSamples <- 100
ncpu <- 1
outdir = "gag_all_data"
#env gene ----
# list
rds_dirs <- list.files("Analyses/MCMC_Results/env_all/results_zmatrix/stage7", full.names = TRUE)
#get rds files (containing results of MCMC runs)
rds_files <- list()
for(x in 1:length(rds_dirs)){
rds_files[x] <- list.files(rds_dirs[x], pattern = "out_env.RDS", full.names = TRUE)
run <- readRDS(rds_files[[x]])
summary(run)
}
infns <- unlist(rds_files)
chs <- createMcmcSamplerList( lapply( infns, readRDS ) )
end_sample = 24120
chs_sample <- BayesianTools::getSample(chs, start = 1, end = end_sample, coda = TRUE)
env_sample <- BayesianTools::getSample(chs, start = 5000, end = end_sample, coda = TRUE)
gelmanDiagnostics(env_sample)
(env.ESS <- as.numeric(format(round(coda::effectiveSize(env_sample), 0))))
quartz()
plot(env_sample, ask = TRUE)
art_start <- 2005
burninpc <- 0.2073
numSamples <- 100
ncpu <- 1
outdir = "env_all_data"
O = getSample( chs, start = 1, end = end_sample )
start <- floor( burninpc * nrow(O) )
o <- O[ (start:nrow(O)), ]
#~ browser()
o <- o[ sample(1:nrow(o), size = numSamples, replace=FALSE), ]
mod0 <- generate_model0(art_start=art_start)
if (is.null( outdir )){
outdir <- outdir_name
}
suppressWarnings( dir.create ( outdir ) )
outdir <- paste0( outdir, '/' )
#chs <- createMcmcSamplerList( lapply( infns, readRDS ) )
sink( file = paste0( outdir, 'mcmcSummary.txt' ))
print( summary( chs_sample ))
sink()
compute.traj0 <- function(theta){
print(theta)
p <- mod0$parms
p[ names(theta)] <- theta
x0 <- c( src = p$src1980, i = p$i1980 , z = 0) # NOTE this is set in compute.traj
s <- mod0$dm(p, x0 , t0 = 1980, t1 =2015, res = 100 )
s
}
i1980names <- colnames(O)[ grepl( 'i1980', colnames(O)) ]
ntres <- length( i1980names )
add.trajs <- function(ss){
s <- ss[[1]]
#for (k in 2:length( ss )){
# there is only one trajectory because there is only 1 tree?
.s <- ss[[1]]
s[[2]] <- lapply( 1:length( s[[2]]) , function(j) s[[2]][[j]] + .s[[2]][[j]] )
s[[3]] <- lapply( 1:length( s[[3]]) , function(j) s[[3]][[j]] + .s[[3]][[j]] )
s[[4]] <- lapply( 1:length( s[[3]]) , function(j) s[[4]][[j]] + .s[[4]][[j]] )
s[[5]][, -1] <- s[[5]][, -1] + .s[[5]][, -1]
#}
#browser()
s
}
compute.traj <- function( theta0){
traj <- list()
for ( k in 1:ntres){
theta <- theta0[setdiff( names(theta0), i1980names ) ]
theta <- c( theta , i1980 = unname( theta0[ i1980names[k] ] ) )
traj[[k]] <- compute.traj0( theta )
}
add.trajs( traj )
}
#tfgys <- parallel::mclapply( 1:nrow(o), function(i){
# compute.traj( o[i, ] )
#}, mc.cores = ncpu)
tfgys <- apply(o, MARGIN = 1, FUN = compute.traj)
imat <- do.call( cbind, lapply( tfgys, function(s) s[[5]][, 'i' ] ) )
zmat <- do.call( cbind, lapply( tfgys, function(s) s[[5]][, 'z' ] ) )
qi <- t( sapply( 1:nrow(imat) , function(i) quantile( imat[i, ], prob = c( .5, .025, .975 )) ) )
qz <- t( sapply( 1:nrow(imat) , function(i) quantile( zmat[i, ], prob = c( .5, .025, .975 )) ) )
pzmat <- zmat / ( imat + zmat )
qpz <- t( sapply( 1:nrow(pzmat) , function(i) quantile( pzmat[i, ], prob = c( .5, .025, .975 )) ) )
time <- seq( 1980, 2015, length.out = 100 )
#~ browser()
png( paste0( outdir, 'effinf.png'), width = 4, height = 3 , units = 'in', res = 300)
par(mai = c( .5, .85, .3, .25) )
matplot( time, qi, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'Effective infections')
dev.off()
svg( paste0( outdir, 'effinf.svg'), width = 4, height = 3 )
par(mai = c( .5, .85, .3, .25) )
matplot( time, qi, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'Effective infections')
dev.off()
png( paste0( outdir, 'ntreat.png'), width = 4, height = 3 , units = 'in', res = 300)
par(mai = c( .5, .85, .3, .25) )
matplot( time, qz, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'On ART')
dev.off()
svg( paste0( outdir, 'ntreat.svg'), width = 4, height = 3 )
par(mai = c( .5, .85, .3, .25) )
matplot( time, qz, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'On ART')
dev.off()
png( paste0( outdir, 'proptreat.png'), width = 4, height = 3 , units = 'in', res = 300)
par(mai = c( .5, .85, .3, .25) )
matplot( time, qpz, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'Proportion on ART')
dev.off()
svg( paste0( outdir, 'proptreat.svg'), width = 4, height = 3 )
par(mai = c( .5, .85, .3, .25) )
matplot( time, qpz, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'Proportion on ART')
dev.off()
newinf <- do.call( cbind, lapply( tfgys, function(s) {
newinf <- sapply( s[[2]], function(FF) sum(FF[,2]) )
rev( newinf )
}))
qnewinf <- t( sapply( 1:nrow( newinf), function(i) quantile( newinf[i, ] , prob =c( .5, .025, .975 ))))
png( paste0( outdir, 'ninf.png'), width = 4, height = 3 , units = 'in', res = 300)
par(mai = c( .5, .85, .3, .25) )
matplot( time, qnewinf, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'New infections / year')
dev.off()
svg( paste0( outdir, 'ninf.svg'), width = 4, height = 3 )
par(mai = c( .5, .85, .3, .25) )
matplot( time, qnewinf, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'New infections / year')
dev.off()
propImport <- do.call( cbind, lapply( tfgys, function(s) {
imp <- sapply( s[[3]], function(G) G[1,2] )
crosstrans <- sapply( s[[2]], function(FF) FF[1,2] )
newinf <- sapply( s[[2]], function(FF) FF[2,2] )
rev( (crosstrans + imp) / (crosstrans + imp + newinf ) )
}))
#~better as a box/whisker plot
qpi <- t( sapply( 1:nrow( propImport), function(i) quantile( propImport[i, ] , prob =c( .5, .025, .975 ))))
sink( file = paste0( outdir, 'mcmcSummary.txt' ), append = TRUE )
print('')
print ('Proportion imports' )
print( tail( qpi, 1) )
sink()
#X11(); matplot( time, qpi, type = 'l' , lty = c( 1, 3, 3), col = 'black' )
bo <- lapply( mod0$betaNames, function(bn) o[, bn] )
png(paste0( outdir, 'beta.png') , width = 4, height = 3 , units = 'in', res = 300 )
par(mai = c( .5, .85, .3, .25) )
boxplot( bo, names = mod0$betaTimes, ylab = 'Transmission rate' )
dev.off()
svg(paste0( outdir, 'beta.svg') , width = 4, height = 3 )
par(mai = c( .5, .85, .3, .25) )
boxplot( bo, names = mod0$betaTimes, ylab = 'Transmission rate' )
dev.off()
invisible( o )
#' Make tfgys for multiple sets of input files
#'
#' @export
trajectories_model0 <- function( infns , art_start, mod0=NULL, burninpc = .5, ncpu = 1, numSamples = 100 , onlyBestChain = FALSE ){
stopifnot( require(phydynR) )
library( BayesianTools )
chss <- lapply( infns, readRDS )
if (onlyBestChain & length(infns) > 1){
maps <- sapply( chss, function(ch) MAP( ch )$valuesMAP[1] )
chss <- list( chss[[ which.max( maps ) ]] )
}
chs <- createMcmcSamplerList( chss )
#~ O = getSample( chs )
#~ O1 = getSample( chs, start = 15e3, thin = 10, parametersOnly=FALSE )
#~ start <- floor( burninpc * nrow(O) )
#~ o <- O[ (start:nrow(O)), ]
#~ o <- o[ sample(1:nrow(o), size = numSamples, replace=FALSE), ]
O <- getSample( chs, numSamples = floor( numSamples / (1-burninpc) ) , parametersOnly = FALSE)
o <- tail( O , numSamples )
#~ browser()
if ( is.null(mod0)){
warning('model unspecified; using default model0')
mod0 <- generate_model0(art_start=art_start)
}
compute.traj0 <- function(theta )
{
print(theta)
p <- mod0$parms
p[ names(theta)] <- theta
x0 <- c( src = p$src1980, i = p$i1980 , z =0) # NOTE this is set in compute.traj
s <- mod0$dm(p, x0 , t0 = 1980, t1 =2015, res = 100 )
s
}
i1980names <- colnames(O)[ grepl( 'i1980', colnames(O)) ]
ntres <- length( i1980names )
add.trajs <- function(ss){
s <- ss[[1]]
if ( length( ss) > 1 ){
for (k in 2:length( ss )){
.s <- ss[[k]]
s[[2]] <- lapply( 1:length( s[[2]]) , function(j) s[[2]][[j]] + .s[[2]][[j]] )
s[[3]] <- lapply( 1:length( s[[3]]) , function(j) s[[3]][[j]] + .s[[3]][[j]] )
s[[4]] <- lapply( 1:length( s[[3]]) , function(j) s[[4]][[j]] + .s[[4]][[j]] )
s[[5]][, -1] <- s[[5]][, -1] + .s[[5]][, -1]
}
}
#browser()
s
}
compute.traj <- function( theta0){
traj <- list()
for ( k in 1:ntres){
theta <- theta0[setdiff( names(theta0), i1980names ) ]
theta <- c( theta , i1980 = unname( theta0[ i1980names[k] ] ) )
traj[[k]] <- compute.traj0( theta )
}
add.trajs( traj )
}
tfgys <- parallel::mclapply( 1:nrow(o), function(i){
compute.traj( o[i, ] )
}, mc.cores = ncpu)
list( tfgys = tfgys, o = o[, !grepl(pattern='i1980', colnames(o)) ])
}
#' @export
trajectories_model0_multigene <- function( infns_list, ... )
{
tfgyos <- lapply( infns_list, function(infn )
trajectories_model0( infn , ... )
)
tfgys <- do.call( c, lapply( tfgyos, '[[', 1 ) )
o <- do.call( rbind, lapply( tfgyos, '[[', 2 ) )
list( tfgys, o )
}
#' @export
summary_model0_tfgyos <- function(tfgys, o , art_start, outdir = NULL, axislog='', lquant = .025, uquant = .975 )
{
library( coda )
if (is.null( outdir )){
outdir <- 'summary0_tfgys'
}
suppressWarnings( dir.create ( outdir ) )
outdir <- paste0( outdir, '/' )
sink( file = paste0( outdir, 'mcmcSummary.txt' ))
print( summary( coda::as.mcmc( o )) )
sink()
imat <- do.call( cbind, lapply( tfgys, function(s) s[[5]][, 'i' ] ) )
zmat <- do.call( cbind, lapply( tfgys, function(s) s[[5]][, 'z' ] ) )
qi <- t( sapply( 1:nrow(imat) , function(i) quantile( imat[i, ], prob = c( .5, lquant, uquant )) ) )
qtotaliz <- t( sapply( 1:nrow(imat) , function(i) quantile( zmat[i, ] + imat[i, ] , prob = c( .5, lquant, uquant )) ) )
qz <- t( sapply( 1:nrow(imat) , function(i) quantile( zmat[i, ], prob = c( .5, lquant, uquant )) ) )
pzmat <- zmat / ( imat + zmat )
qpz <- t( sapply( 1:nrow(pzmat) , function(i) quantile( pzmat[i, ], prob = c( .5, lquant, uquant )) ) )
#time <- seq( 1980, 2015, length.out = 35*6)
time <- rev( tfgys[[1]][[1]] )
# R(t)
M = mod0 <- generate_model0(art_start = art_start )
#~ Rt <- sapply( 1:nrow(o), function(i){
#~ p <- as.list( o[i,] )
#~ p$art_start = unname( art_start )
#~ b <- M$parms$beta.t( time, p )
#~ r <- sapply( time, function(tt) M$parms$rho.t( tt, p ) )
#~ b / ( r + M$parms$gamma + M$parms$mu )
#~ })
Rt <- sapply( tfgys, m0R.t )
qRt <- t( sapply( 1:nrow(Rt) , function(i) quantile( Rt[i, ], prob = c( .5, lquant, uquant )) ) )
#~ browser()
png( paste0( outdir, 'R.png'), width = 4, height = 3 , units = 'in', res = 300)
par(mai = c( .5, .85, .3, .25) )
matplot( time, qRt, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'R(t)')
abline( h = 1,col = 'red')
dev.off()
svg( paste0( outdir, 'R.svg'), width = 4, height = 3 )
par(mai = c( .5, .85, .3, .25) )
matplot( time, qRt, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'R(t)')
abline( h = 1,col = 'red')
dev.off()
png( paste0( outdir, 'i.png'), width = 4, height = 3 , units = 'in', res = 300)
par(mai = c( .5, .85, .3, .25) )
matplot( time, qi, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'I(t)', log = axislog)
dev.off()
svg( paste0( outdir, 'i.svg'), width = 4, height = 3 )
par(mai = c( .5, .85, .3, .25) )
matplot( time, qi, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'I(t)' , log = axislog)
dev.off()
png( paste0( outdir, 'iztotal.png'), width = 4, height = 3 , units = 'in', res = 300)
par(mai = c( .5, .85, .3, .25) )
matplot( time, qtotaliz, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'I(t) + Z(t)', log = axislog)
dev.off()
svg( paste0( outdir, 'iztotal.svg'), width = 4, height = 3 )
par(mai = c( .5, .85, .3, .25) )
matplot( time, qtotaliz, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'I(t) + Z(t)', log = axislog)
dev.off()
png( paste0( outdir, 'ntreat.png'), width = 4, height = 3 , units = 'in', res = 300)
par(mai = c( .5, .85, .3, .25) )
matplot( time, qz, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'On ART')
dev.off()
svg( paste0( outdir, 'ntreat.svg'), width = 4, height = 3 )
par(mai = c( .5, .85, .3, .25) )
matplot( time, qz, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'On ART')
dev.off()
png( paste0( outdir, 'proptreat.png'), width = 4, height = 3 , units = 'in', res = 300)
par(mai = c( .5, .85, .3, .25) )
matplot( time, qpz, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'Proportion on ART')
dev.off()
svg( paste0( outdir, 'proptreat.svg'), width = 4, height = 3 )
par(mai = c( .5, .85, .3, .25) )
matplot( time, qpz, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'Proportion on ART')
dev.off()
newinf <- do.call( cbind, lapply( tfgys, function(s) {
newinf <- sapply( s[[2]], function(FF) sum( FF[,2]) )
rev( newinf )
}))
qnewinf <- t( sapply( 1:nrow( newinf), function(i) quantile( newinf[i, ] , prob =c( .5, lquant, uquant ))))
png( paste0( outdir, 'ninf.png'), width = 4, height = 3 , units = 'in', res = 300)
par(mai = c( .5, .85, .3, .25) )
matplot( time, qnewinf, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'New infections / year', log = axislog)
dev.off()
svg( paste0( outdir, 'ninf.svg'), width = 4, height = 3 )
par(mai = c( .5, .85, .3, .25) )
matplot( time, qnewinf, type = 'l', lty = c(1, 3, 3), col = 'black'
, xlab = '', ylab = 'New infections / year', log = axislog)
dev.off()
propImport <- do.call( cbind, lapply( tfgys, function(s) {
imp <- sapply( s[[3]], function(G) G[1,2] )
crosstrans <- sapply( s[[2]], function(FF) FF[1,2] )
newinf <- sapply( s[[2]], function(FF) FF[2,2] )
rev( (crosstrans + imp) / (crosstrans + imp + newinf ) )
}))
#~better as a box/whisker plot
qpi <- t( sapply( 1:nrow( propImport), function(i) quantile( propImport[i, ] , prob =c( .5, lquant, uquant ))))
sink( file = paste0( outdir, 'mcmcSummary.txt' ), append = TRUE )
print('')
print ('Proportion imports' )
print( tail( qpi, 1) )
sink()
#X11(); matplot( time, qpi, type = 'l' , lty = c( 1, 3, 3), col = 'black' )
bo <- lapply( mod0$betaNames, function(bn) o[, bn] )
png(paste0( outdir, 'beta.png') , width = 4, height = 3 , units = 'in', res = 300 )
par(mai = c( .5, .85, .3, .25) )
boxplot( bo, names = mod0$betaTimes, ylab = 'Transmission rate' )
dev.off()
svg(paste0( outdir, 'beta.svg') , width = 4, height = 3 )
par(mai = c( .5, .85, .3, .25) )
boxplot( bo, names = mod0$betaTimes, ylab = 'Transmission rate' )
dev.off()
#~ browser()
invisible( o )
}
#' Compute R(t) by integrating over future hazard of removal and transmissions
#'
#' @export
m0R.t <- function ( s) {
i <- 2
y <- rev( sapply( s$sizes, '[', i ) )
d <- rev( sapply( s$deaths, '[', i ))
b <- rev( sapply( s$births, function(x) x[i,i] ))
x <- rev( s$times )
dx <- diff(x)[1]
r <- dx * b / y
r <- c( r, rep( tail(r, 1), 100 ))
loghaz <- log (1- d*dx / y )
lasthazard <- log (1- tail(d,1)*dx / tail(y,1) )
loghaz <- c( loghaz, rep(lasthazard, 100 ) )
logsurv <- cumsum( loghaz)
R <- sapply( 1:length( x ), function(k){
j <- k:length(r)
sum( exp( logsurv[j] - logsurv[k] ) * r[j] )
})
R
}
|
"benchstats" <-
structure(c(-60.7831272900001, 34.3079583900001, 3.56745360999999,
9.94903490800002, 22.4989411700000, 33.89292992, 50.0425105499999,
53.2141230900001, 59.1443245600001, 58.6841452499999, 13.21342134,
5.15217918889568, 2.89643983703556, 0.95543584548527, 1.68101350129359,
2.0897844158153, 1.75364152690944, 1.65285717332056, 1.10690742807124,
0.737680372356471, 0.427056870264935, 1.99912021164428, 0.0515217918889568,
0.0289643983703556, 0.0095543584548527, 0.0168101350129359, 0.020897844158153,
0.0175364152690944, 0.0165285717332056, 0.0110690742807124, 0.00737680372356471,
0.00427056870264935, 0.0199912021164428, 0.0749820368184437,
0.0424238958019385, 0.0120982114296435, 0.0201087388192881, 0.0239164039838735,
0.0224611804560939, 0.0243408587514327, 0.0170645630747179, 0.0114256340298251,
0.00657670318302355, 0.0301557929820285), .Dim = as.integer(c(11,
4)), .Dimnames = list(c("alpha", "beta", "r.hat[1]", "r.hat[2]",
"r.hat[3]", "r.hat[4]", "r.hat[5]", "r.hat[6]", "r.hat[7]", "r.hat[8]",
"D"), c("Mean", "SD", "Naive SE", "Time-series SE")))
| /packages/nimble/inst/classic-bugs/vol2/beetles/bench-test1.R | permissive | nimble-dev/nimble | R | false | false | 1,088 | r | "benchstats" <-
structure(c(-60.7831272900001, 34.3079583900001, 3.56745360999999,
9.94903490800002, 22.4989411700000, 33.89292992, 50.0425105499999,
53.2141230900001, 59.1443245600001, 58.6841452499999, 13.21342134,
5.15217918889568, 2.89643983703556, 0.95543584548527, 1.68101350129359,
2.0897844158153, 1.75364152690944, 1.65285717332056, 1.10690742807124,
0.737680372356471, 0.427056870264935, 1.99912021164428, 0.0515217918889568,
0.0289643983703556, 0.0095543584548527, 0.0168101350129359, 0.020897844158153,
0.0175364152690944, 0.0165285717332056, 0.0110690742807124, 0.00737680372356471,
0.00427056870264935, 0.0199912021164428, 0.0749820368184437,
0.0424238958019385, 0.0120982114296435, 0.0201087388192881, 0.0239164039838735,
0.0224611804560939, 0.0243408587514327, 0.0170645630747179, 0.0114256340298251,
0.00657670318302355, 0.0301557929820285), .Dim = as.integer(c(11,
4)), .Dimnames = list(c("alpha", "beta", "r.hat[1]", "r.hat[2]",
"r.hat[3]", "r.hat[4]", "r.hat[5]", "r.hat[6]", "r.hat[7]", "r.hat[8]",
"D"), c("Mean", "SD", "Naive SE", "Time-series SE")))
|
# this is the server file for my project
library(shiny)
if (!require(quantmod)) {
stop("This app requires the quantmod package. To install it, run 'install.packages(\"quantmod\")'.\n")
}
require_symbol <- function(symbol, envir = parent.frame()) {
if (is.null(envir[[symbol]])) {
envir[[symbol]] <- getSymbols(symbol, auto.assign = FALSE)
}
envir[[symbol]]
}
shinyServer(function(input, output) {
symbol_env <- new.env()
make_chart <- function(symbol) {
symbol_data <- require_symbol(symbol, symbol_env)
chartSeries(symbol_data,
name = symbol,
type = input$chart_type,
subset = paste(input$date_range, collapse = "::"),
theme = "white",
TA=paste(input$indicators,collapse = ";"))
}
output$chart <- renderPlot({ make_chart(input$stock_select) })
}) | /app/app1.R | no_license | gustavovasquezperdomo/ProjectPitch | R | false | false | 1,064 | r | # this is the server file for my project
library(shiny)
if (!require(quantmod)) {
stop("This app requires the quantmod package. To install it, run 'install.packages(\"quantmod\")'.\n")
}
require_symbol <- function(symbol, envir = parent.frame()) {
if (is.null(envir[[symbol]])) {
envir[[symbol]] <- getSymbols(symbol, auto.assign = FALSE)
}
envir[[symbol]]
}
shinyServer(function(input, output) {
symbol_env <- new.env()
make_chart <- function(symbol) {
symbol_data <- require_symbol(symbol, symbol_env)
chartSeries(symbol_data,
name = symbol,
type = input$chart_type,
subset = paste(input$date_range, collapse = "::"),
theme = "white",
TA=paste(input$indicators,collapse = ";"))
}
output$chart <- renderPlot({ make_chart(input$stock_select) })
}) |
# Load Data From File
data <- read.table(file="household_power_consumption.txt",sep=";",header=TRUE
,stringsAsFactors=FALSE)
#Format and Filter Data
data$Date <- as.Date(data$Date,format="%d/%m/%Y")
data <- data[data$Date == "2007-02-01" | data$Date == "2007-02-02",]
data$Global_active_power <- as.numeric(data$Global_active_power)
data$Day <- as.POSIXct(paste(as.Date(data$Date, format="%d/%m/%Y"), data$Time, sep=" "))
# Construct Plot
png("plot2.png",width = 480, height = 480)
with(data,plot(x=Day, y=Global_active_power ,xlab="" ,ylab="Global Active Power (kilowatts)"
,type="l"))
dev.off() | /plot2.R | no_license | anshulr/ExData_Plotting1 | R | false | false | 633 | r | # Load Data From File
data <- read.table(file="household_power_consumption.txt",sep=";",header=TRUE
,stringsAsFactors=FALSE)
#Format and Filter Data
data$Date <- as.Date(data$Date,format="%d/%m/%Y")
data <- data[data$Date == "2007-02-01" | data$Date == "2007-02-02",]
data$Global_active_power <- as.numeric(data$Global_active_power)
data$Day <- as.POSIXct(paste(as.Date(data$Date, format="%d/%m/%Y"), data$Time, sep=" "))
# Construct Plot
png("plot2.png",width = 480, height = 480)
with(data,plot(x=Day, y=Global_active_power ,xlab="" ,ylab="Global Active Power (kilowatts)"
,type="l"))
dev.off() |
\name{predict.tsglm}
\alias{predict.tsglm}
\title{
Predicts Method for Time Series of Counts Following Generalised Linear Models
}
\description{
Predict future observations based on a fitted GLM-type model for time series of counts.
}
\usage{
\method{predict}{tsglm}(object, n.ahead=1, newobs=NULL, newxreg=NULL, level=0.95,
global=FALSE, type=c("quantiles", "shortest", "onesided"),
method=c("conddistr", "bootstrap"), B=1000,
estim=c("ignore", "bootstrap", "normapprox", "given"), B_estim=B,
coefs_given, ...)
}
\arguments{
\item{object}{
an object of class \code{"tsglm"}. Usually the result of a call to \code{\link{tsglm}}.
}
\item{n.ahead}{
positive integer value giving the number of steps ahead for which predictions should be made.
}
\item{newobs}{
integer vector of known future observations of the time series. This argument is only relevant if more than one observation ahead is to be predicted (\code{n.ahead} greater than 1). The \eqn{h}-step-ahead prediction for \eqn{h>1} is computed as a 1-step-ahead prediction given all previous values, which can be observations of the original time series or new observations provided in this argument. Previous observations which are not available are replaced by their respective 1-step-ahead prediction.
}
\item{newxreg}{
matrix or vector containing new values for the covariates to be used for prediction. If \code{newxreg} is omitted or contains less rows than the value of \code{n.ahead}, the last known values of the covariates are used for prediction. This is usually not reasonable and it is strongly advised to explicitely make assumptions on future covariates and to specify the argument \code{xreg} accordingly.
}
\item{level}{
numeric value determining the desired coverage rate of prediction intervals. If \code{level=0} no prediction intervals are computed.
}
\item{global}{
logical value saying whether the coverage rate for \eqn{Y_{n+1},...,Y_{n+h}}{Y[n+1],...,Y[n+h]} specified by argument \code{level} holds globally (\code{global=TRUE}) or for each of the \code{n.ahead} prediction intervals individually (\code{global=FALSE}, the default). In the former case the individual coverage rate for a single prediction interval is Bonferroni adjusted to a level of \code{1-(1-level)/n.ahead}.
}
\item{type}{
character value saying how the prediction interval shall be constructed. If \code{type="quantiles"} (the default), its limits are chosen to be the \code{a}- and \code{(1-a)}-quantiles of the respective (approximated) distribution, with \code{a=(1-level)/2}. If \code{type="shortest"} it is chosen such that it has minimal length. Note that these two types of construction principles frequently lead to the same result. If \code{type="onesided"} a one-sided prediction interval is constructed where the lower boundary is always zero.
}
\item{method}{
character value saying which method to be used for computing the prediction intervals. If \code{method="conddistr"} the prediction intervals are based on the conditional distribution given by the model with the unknown parameters being replaced by their respective estimations. This is only possible if only 1-step-ahead predictions are to be computed (possibly recursively using the new observations given in argument \code{newobs}). If \code{method="bootstrap"} the predictive distribution is approximated by a parametric bootstrap where \code{B} trajectories of the process are simulated from the fitted model. This is currently only possible if no new observations are given in argument \code{newobs}. By default the method \code{"conddistr"} is prefered whenever it is appliccable.
}
\item{B}{
positive integer value giving the number of samples of a parametric bootstrap to use for numerical determination of prediction intervals (only necessary if argument \code{method="bootstrap"}).
}
\item{estim}{
character value saying how the prediction intervals shall account for the additional uncertainty induced by the parameter estimation. This is particularly important if the model was fitted on a short time series. If \code{estim="ignore"} (the default), this additional uncertainty is ignored. The other two options (\code{estim="bootstrap"} and \code{estim="normapprox"}) are only possible if \code{method="bootstrap"}. If these are selected the bootstrap samples are not generated from a model with the parameters of the original fit. Instead, each of the \code{B} bootstrap samples is generated from a model with parameters which are itself randomly generated. This two-stage approach should take into account the additional estimation uncertainty.
If \code{estim="bootstrap"}, the parameters are obtained from a fit to a parametric bootstrap replication of the original time series.
If \code{estim="normapprox"}, the regression parameters are generated from a multivariate normal distribution which is based on the normal approximation of the original quasi maximum likelihood estimator and reflects the estimation uncertainty. In that case the additional distribution coefficients are not randomly generated such that their estimation uncertainty is ignored.
If \code{estim="given"}, the parameters are resampled from a table of possible parameters which need to be given in argument \code{coefs_given}.
}
\item{B_estim}{
positive integer value giving the number of parameters used for resampling to account for estimation uncertainty. Only necessary for \code{estim="bootstrap"} and \code{estim="normapprox"}. If \code{B_estim} is smaller than \code{B}, the parameters are resampled with replacement.
}
\item{coefs_given}{
table with parameters in the rows. Only necessary for \code{estim="given"}). If \code{nrow(coefs_given)} is smaller than \code{B}, the parameters are resampled with replacement.
}
\item{...}{
further arguments are currently ignored. Only for compatibility with generic function.
}
}
\details{
Returns predictions for the \code{n.ahead} observations following the fitted time series contained in argument \code{object}. The 1-step-ahead prediction is the conditional expectation of the observation to be predicted given the past. The true parameters are replaced by their estimations given in argument \code{object}. For a 2-step-ahead-prediction the true previous observation is used when given in argument \code{newobs}, otherwise it is replaced by the 1-step-ahead prediction computed before. For a 3-step-prediction this holds for the previous two observations, which are replaced by their respective predictions if not available, and so on.
Unless \code{level=0}, the function also returns prediction intervals. Read the description of the arguments \code{type} and\code{method} for further details on the computation. Note that the prediction intervals do not reflect the additional uncertainty induced by the parameter estimation. However, for sufficiently long time series used for model fitting, it is expected that this uncertainty is negligible compared to the uncertainty of the predictive distribution. The argument \code{estim} allows to account fot this additional estimation uncertainty if \code{method="bootstrap"}, see the description of this argument.
If prediction intervals are computed the function additionally returns the median of the predictive distribution. If \code{method="conddistr"} this is the analytical median of the conditional distribution, otherwise the empirical median of the simulated distribution.
}
\value{
A list with at least the following element:
\item{pred}{a numeric vector of the predictions. Has class \code{"ts"} if the response used for fitting has this class.}
If prediction intervals are calculated, the list has the additional element:
\item{interval}{a matrix with the columns \code{"lower"} and \code{"upper"} giving the lower and upper boundaries of prediction intervals for the future time points, each with an intended coverage rate as given in argument \code{level}. Has class \code{"ts"} if the response used for fitting has this class.}
\item{level}{a numeric value determining the desired coverage rate of prediction intervals.}
\item{global}{a logical value saying whether the coverage rate \code{level} holds globally or for each of the prediction intervals individually.}
\item{type}{a character value saying how the prediction intervals were computed. Possible values are \code{"quantiles"} and \code{"shortest"}.}
\item{method}{a character value saying which method were used for computation of prediction intervals. Possible values are \code{"conddistr"} and \code{"bootstrap"}.}
\item{B}{an integer value giving the number of bootstrap samples which were used for computing prediction intervals. Is \code{NULL} if computation was done by \code{method="conddistr"}.}
\item{estim}{a character value saying how the prediction intervals account for estimation uncertainty of the model parameters. Possible values are \code{"ignore"}, \code{"bootstrap"}, \code{"normapprox"} and \code{"given"}.}
\item{B_estim}{an integer value giving the number of parameter values used for resampling to account for estimation uncertainty. This value is zero if the estimation uncertainty is ignored.}
\item{warning_messages}{a character vector containing warning messages. This should be \code{NULL} if no warning messages occured.}
\item{median}{a vector giving the median of the predictive distribution for each of the future time points. Has class \code{"ts"} if the response used for fitting has this class.}
}
\references{
Liboschik, T., Fokianos, K. and Fried, R. (2017) tscount: An R package for analysis of count time series following generalized linear models. \emph{Journal of Statistical Software} \bold{82(5)}, 1--51, \url{http://dx.doi.org/10.18637/jss.v082.i05}.
}
\author{
Tobias Liboschik and Philipp Probst
}
\seealso{
\code{\link{tsglm}} for fitting a GLM for time series of counts.
}
\examples{
###Campylobacter infections in Canada (see help("campy"))
campyfit <- tsglm(ts=campy, model=list(past_obs=1, past_mean=c(7,13)))
predict(campyfit, n.ahead=1) #prediction interval using conditional distribution
predict(campyfit, n.ahead=5, global=TRUE) #prediction intervals using parametric bootstrap
}
\keyword{Prediction}
| /man/predict.tsglm.Rd | no_license | cran/tscount | R | false | false | 10,269 | rd | \name{predict.tsglm}
\alias{predict.tsglm}
\title{
Predicts Method for Time Series of Counts Following Generalised Linear Models
}
\description{
Predict future observations based on a fitted GLM-type model for time series of counts.
}
\usage{
\method{predict}{tsglm}(object, n.ahead=1, newobs=NULL, newxreg=NULL, level=0.95,
global=FALSE, type=c("quantiles", "shortest", "onesided"),
method=c("conddistr", "bootstrap"), B=1000,
estim=c("ignore", "bootstrap", "normapprox", "given"), B_estim=B,
coefs_given, ...)
}
\arguments{
\item{object}{
an object of class \code{"tsglm"}. Usually the result of a call to \code{\link{tsglm}}.
}
\item{n.ahead}{
positive integer value giving the number of steps ahead for which predictions should be made.
}
\item{newobs}{
integer vector of known future observations of the time series. This argument is only relevant if more than one observation ahead is to be predicted (\code{n.ahead} greater than 1). The \eqn{h}-step-ahead prediction for \eqn{h>1} is computed as a 1-step-ahead prediction given all previous values, which can be observations of the original time series or new observations provided in this argument. Previous observations which are not available are replaced by their respective 1-step-ahead prediction.
}
\item{newxreg}{
matrix or vector containing new values for the covariates to be used for prediction. If \code{newxreg} is omitted or contains less rows than the value of \code{n.ahead}, the last known values of the covariates are used for prediction. This is usually not reasonable and it is strongly advised to explicitely make assumptions on future covariates and to specify the argument \code{xreg} accordingly.
}
\item{level}{
numeric value determining the desired coverage rate of prediction intervals. If \code{level=0} no prediction intervals are computed.
}
\item{global}{
logical value saying whether the coverage rate for \eqn{Y_{n+1},...,Y_{n+h}}{Y[n+1],...,Y[n+h]} specified by argument \code{level} holds globally (\code{global=TRUE}) or for each of the \code{n.ahead} prediction intervals individually (\code{global=FALSE}, the default). In the former case the individual coverage rate for a single prediction interval is Bonferroni adjusted to a level of \code{1-(1-level)/n.ahead}.
}
\item{type}{
character value saying how the prediction interval shall be constructed. If \code{type="quantiles"} (the default), its limits are chosen to be the \code{a}- and \code{(1-a)}-quantiles of the respective (approximated) distribution, with \code{a=(1-level)/2}. If \code{type="shortest"} it is chosen such that it has minimal length. Note that these two types of construction principles frequently lead to the same result. If \code{type="onesided"} a one-sided prediction interval is constructed where the lower boundary is always zero.
}
\item{method}{
character value saying which method to be used for computing the prediction intervals. If \code{method="conddistr"} the prediction intervals are based on the conditional distribution given by the model with the unknown parameters being replaced by their respective estimations. This is only possible if only 1-step-ahead predictions are to be computed (possibly recursively using the new observations given in argument \code{newobs}). If \code{method="bootstrap"} the predictive distribution is approximated by a parametric bootstrap where \code{B} trajectories of the process are simulated from the fitted model. This is currently only possible if no new observations are given in argument \code{newobs}. By default the method \code{"conddistr"} is prefered whenever it is appliccable.
}
\item{B}{
positive integer value giving the number of samples of a parametric bootstrap to use for numerical determination of prediction intervals (only necessary if argument \code{method="bootstrap"}).
}
\item{estim}{
character value saying how the prediction intervals shall account for the additional uncertainty induced by the parameter estimation. This is particularly important if the model was fitted on a short time series. If \code{estim="ignore"} (the default), this additional uncertainty is ignored. The other two options (\code{estim="bootstrap"} and \code{estim="normapprox"}) are only possible if \code{method="bootstrap"}. If these are selected the bootstrap samples are not generated from a model with the parameters of the original fit. Instead, each of the \code{B} bootstrap samples is generated from a model with parameters which are itself randomly generated. This two-stage approach should take into account the additional estimation uncertainty.
If \code{estim="bootstrap"}, the parameters are obtained from a fit to a parametric bootstrap replication of the original time series.
If \code{estim="normapprox"}, the regression parameters are generated from a multivariate normal distribution which is based on the normal approximation of the original quasi maximum likelihood estimator and reflects the estimation uncertainty. In that case the additional distribution coefficients are not randomly generated such that their estimation uncertainty is ignored.
If \code{estim="given"}, the parameters are resampled from a table of possible parameters which need to be given in argument \code{coefs_given}.
}
\item{B_estim}{
positive integer value giving the number of parameters used for resampling to account for estimation uncertainty. Only necessary for \code{estim="bootstrap"} and \code{estim="normapprox"}. If \code{B_estim} is smaller than \code{B}, the parameters are resampled with replacement.
}
\item{coefs_given}{
table with parameters in the rows. Only necessary for \code{estim="given"}). If \code{nrow(coefs_given)} is smaller than \code{B}, the parameters are resampled with replacement.
}
\item{...}{
further arguments are currently ignored. Only for compatibility with generic function.
}
}
\details{
Returns predictions for the \code{n.ahead} observations following the fitted time series contained in argument \code{object}. The 1-step-ahead prediction is the conditional expectation of the observation to be predicted given the past. The true parameters are replaced by their estimations given in argument \code{object}. For a 2-step-ahead-prediction the true previous observation is used when given in argument \code{newobs}, otherwise it is replaced by the 1-step-ahead prediction computed before. For a 3-step-prediction this holds for the previous two observations, which are replaced by their respective predictions if not available, and so on.
Unless \code{level=0}, the function also returns prediction intervals. Read the description of the arguments \code{type} and\code{method} for further details on the computation. Note that the prediction intervals do not reflect the additional uncertainty induced by the parameter estimation. However, for sufficiently long time series used for model fitting, it is expected that this uncertainty is negligible compared to the uncertainty of the predictive distribution. The argument \code{estim} allows to account fot this additional estimation uncertainty if \code{method="bootstrap"}, see the description of this argument.
If prediction intervals are computed the function additionally returns the median of the predictive distribution. If \code{method="conddistr"} this is the analytical median of the conditional distribution, otherwise the empirical median of the simulated distribution.
}
\value{
A list with at least the following element:
\item{pred}{a numeric vector of the predictions. Has class \code{"ts"} if the response used for fitting has this class.}
If prediction intervals are calculated, the list has the additional element:
\item{interval}{a matrix with the columns \code{"lower"} and \code{"upper"} giving the lower and upper boundaries of prediction intervals for the future time points, each with an intended coverage rate as given in argument \code{level}. Has class \code{"ts"} if the response used for fitting has this class.}
\item{level}{a numeric value determining the desired coverage rate of prediction intervals.}
\item{global}{a logical value saying whether the coverage rate \code{level} holds globally or for each of the prediction intervals individually.}
\item{type}{a character value saying how the prediction intervals were computed. Possible values are \code{"quantiles"} and \code{"shortest"}.}
\item{method}{a character value saying which method were used for computation of prediction intervals. Possible values are \code{"conddistr"} and \code{"bootstrap"}.}
\item{B}{an integer value giving the number of bootstrap samples which were used for computing prediction intervals. Is \code{NULL} if computation was done by \code{method="conddistr"}.}
\item{estim}{a character value saying how the prediction intervals account for estimation uncertainty of the model parameters. Possible values are \code{"ignore"}, \code{"bootstrap"}, \code{"normapprox"} and \code{"given"}.}
\item{B_estim}{an integer value giving the number of parameter values used for resampling to account for estimation uncertainty. This value is zero if the estimation uncertainty is ignored.}
\item{warning_messages}{a character vector containing warning messages. This should be \code{NULL} if no warning messages occured.}
\item{median}{a vector giving the median of the predictive distribution for each of the future time points. Has class \code{"ts"} if the response used for fitting has this class.}
}
\references{
Liboschik, T., Fokianos, K. and Fried, R. (2017) tscount: An R package for analysis of count time series following generalized linear models. \emph{Journal of Statistical Software} \bold{82(5)}, 1--51, \url{http://dx.doi.org/10.18637/jss.v082.i05}.
}
\author{
Tobias Liboschik and Philipp Probst
}
\seealso{
\code{\link{tsglm}} for fitting a GLM for time series of counts.
}
\examples{
###Campylobacter infections in Canada (see help("campy"))
campyfit <- tsglm(ts=campy, model=list(past_obs=1, past_mean=c(7,13)))
predict(campyfit, n.ahead=1) #prediction interval using conditional distribution
predict(campyfit, n.ahead=5, global=TRUE) #prediction intervals using parametric bootstrap
}
\keyword{Prediction}
|
library(deSolve)
library(plot3D)
outputdir <- "/isi/olga/xin/GTA_project/output/20160701/X_ratio_c_N.txt"
c.array <- seq(0.0001, 1, length.out = 500)
K.array <- seq(1e5, 1e9, length.out = 500)
r.array <- seq(0.0001, 1, length.out = 500)
N.array <- c(1e-11, 1e-10, 5e-10, seq(1e-9, 1e-8, length.out = 495), 1e-7, 1e-6)
M <- mesh(c.array, N.array)
array <- matrix(NA, nrow = length(M$x), ncol = 5)
solveGTAeq <- function(Pars) {
yini <- c(X1 = 1, X2 = 1)
GTAeq <- function(Time, State, Pars) {
with(as.list(c(State, Pars)), {
dX1 <- r * (1-c) * (1-(X1+X2)/K) * X1 - c * X1 + c * N* X1 * X2 # GTA postive
dX2 <- r * (1-(X1+X2)/K) * X2 - c * N * X1 * X2 # GTA negative
return(list(c(dX1, dX2)))
})
}
times <- seq(0, 10000, by = 1)
out <- ode(yini, times, GTAeq, pars)
b = 0
for (a in 1:50){
deltaX <- abs(out[length(times), 2]-out[(length(times)-a), 2])
if (deltaX < 0.0001 && (out[length(times), 2] + out[length(times), 3]) > 1000) {
b = b + 1
}
}
if (b == 50){
output <- out[length(times),]
X1.ratio <- output[2]/(output[2] + output[3])
outputarray <- c(output[2], output[3], X1.ratio)
return(outputarray)
}
else {
times <- seq(0, 100000, by = 1)
out <- ode(yini, times, GTAeq, pars)
b = 0
for (a in 1:50){
deltaX <- abs(out[length(times), 2]-out[(length(times)-a), 2])
if (deltaX < 0.0001 && (out[length(times), 2] + out[length(times), 3]) > 1000) {
b = b + 1
}
}
if (b == 50){
output <- out[length(times),]
X1.ratio <- output[2]/(output[2] + output[3])
outputarray <- c(output[2], output[3], X1.ratio)
return(outputarray)
}
else {
times <- seq(0, 1000000, by = 1)
out <- ode(yini, times, GTAeq, pars)
b = 0
for (a in 1:50){
deltaX <- abs(out[length(times), 2]-out[(length(times)-a), 2])
if (deltaX < 0.0001 && (out[length(times), 2] + out[length(times), 3]) > 1000) {
b = b + 1
}
}
if (b == 50){
output <- out[length(times),]
X1.ratio <- output[2]/(output[2] + output[3])
outputarray <- c(output[2], output[3], X1.ratio)
return(outputarray)
}
else {
times <- seq(0, 30000000, by = 1)
out <- ode(yini, times, GTAeq, pars)
output <- out[length(times),]
X1.ratio <- output[2]/(output[2] + output[3])
outputarray <- c(output[2], output[3], X1.ratio)
return(outputarray)
}
}
}
}
for (j in 1 : length(M$x)){
pars <- c(r = 0.1, # growth rate
K = 1e9, # carry capacity
c = M$x[j], # cost
N = M$y[j]) # N = b*i*ro*mu
solve.output <- solveGTAeq(pars)
array[j,] <- c(solve.output, M$x[j], M$y[j])
print(j)
}
write.table(array, file = outputdir, quote = F, row.names = F, sep = "\t")
| /deSolveGTA_cN.R | no_license | mascoma/GTA_project_script | R | false | false | 2,877 | r | library(deSolve)
library(plot3D)
outputdir <- "/isi/olga/xin/GTA_project/output/20160701/X_ratio_c_N.txt"
c.array <- seq(0.0001, 1, length.out = 500)
K.array <- seq(1e5, 1e9, length.out = 500)
r.array <- seq(0.0001, 1, length.out = 500)
N.array <- c(1e-11, 1e-10, 5e-10, seq(1e-9, 1e-8, length.out = 495), 1e-7, 1e-6)
M <- mesh(c.array, N.array)
array <- matrix(NA, nrow = length(M$x), ncol = 5)
solveGTAeq <- function(Pars) {
yini <- c(X1 = 1, X2 = 1)
GTAeq <- function(Time, State, Pars) {
with(as.list(c(State, Pars)), {
dX1 <- r * (1-c) * (1-(X1+X2)/K) * X1 - c * X1 + c * N* X1 * X2 # GTA postive
dX2 <- r * (1-(X1+X2)/K) * X2 - c * N * X1 * X2 # GTA negative
return(list(c(dX1, dX2)))
})
}
times <- seq(0, 10000, by = 1)
out <- ode(yini, times, GTAeq, pars)
b = 0
for (a in 1:50){
deltaX <- abs(out[length(times), 2]-out[(length(times)-a), 2])
if (deltaX < 0.0001 && (out[length(times), 2] + out[length(times), 3]) > 1000) {
b = b + 1
}
}
if (b == 50){
output <- out[length(times),]
X1.ratio <- output[2]/(output[2] + output[3])
outputarray <- c(output[2], output[3], X1.ratio)
return(outputarray)
}
else {
times <- seq(0, 100000, by = 1)
out <- ode(yini, times, GTAeq, pars)
b = 0
for (a in 1:50){
deltaX <- abs(out[length(times), 2]-out[(length(times)-a), 2])
if (deltaX < 0.0001 && (out[length(times), 2] + out[length(times), 3]) > 1000) {
b = b + 1
}
}
if (b == 50){
output <- out[length(times),]
X1.ratio <- output[2]/(output[2] + output[3])
outputarray <- c(output[2], output[3], X1.ratio)
return(outputarray)
}
else {
times <- seq(0, 1000000, by = 1)
out <- ode(yini, times, GTAeq, pars)
b = 0
for (a in 1:50){
deltaX <- abs(out[length(times), 2]-out[(length(times)-a), 2])
if (deltaX < 0.0001 && (out[length(times), 2] + out[length(times), 3]) > 1000) {
b = b + 1
}
}
if (b == 50){
output <- out[length(times),]
X1.ratio <- output[2]/(output[2] + output[3])
outputarray <- c(output[2], output[3], X1.ratio)
return(outputarray)
}
else {
times <- seq(0, 30000000, by = 1)
out <- ode(yini, times, GTAeq, pars)
output <- out[length(times),]
X1.ratio <- output[2]/(output[2] + output[3])
outputarray <- c(output[2], output[3], X1.ratio)
return(outputarray)
}
}
}
}
for (j in 1 : length(M$x)){
pars <- c(r = 0.1, # growth rate
K = 1e9, # carry capacity
c = M$x[j], # cost
N = M$y[j]) # N = b*i*ro*mu
solve.output <- solveGTAeq(pars)
array[j,] <- c(solve.output, M$x[j], M$y[j])
print(j)
}
write.table(array, file = outputdir, quote = F, row.names = F, sep = "\t")
|
library(lubridateExtras)
context("Instances Functions")
test_that("yesterday", {
expect_equal(yesterday(), Sys.Date() - 1)
})
test_that("tomorrow", {
expect_equal(tomorrow(), Sys.Date() + 1)
})
test_that("days_ago", {
expect_equal(days_ago(0), Sys.Date())
expect_equal(days_ago(1), yesterday())
expect_equal(days_ago(2), Sys.Date() - 2)
expect_equal(days_ago(7), Sys.Date() - 7)
expect_equal(days_ago(-1), tomorrow())
})
test_that("days_hence", {
expect_equal(days_hence(0), Sys.Date())
expect_equal(days_hence(1), tomorrow())
expect_equal(days_hence(2), Sys.Date() + 2)
expect_equal(days_hence(7), Sys.Date() + 7)
expect_equal(days_hence(-1), yesterday())
})
test_that("this_month", {
expect_equal(this_month(), as.Date(strftime(Sys.Date(), format = "%Y-%m-01")))
})
test_that("last_month", {
expect_equal(last_month(), this_month() - months(1))
})
test_that("next_month", {
expect_equal(next_month(), this_month() + months(1))
})
test_that("this_week", {
expect_equal(this_week(), floor_date(today(), unit = "week"))
})
test_that("last_week", {
expect_equal(last_week(), this_week() - weeks(1))
})
test_that("next_week", {
expect_equal(next_week(), this_week() + weeks(1))
})
test_that("is.weekday", {
expect_equal(is.weekday("2017-10-23"), TRUE)
expect_equal(is.weekday("2017-10-22"), FALSE)
})
test_that("is.weekend", {
expect_equal(is.weekend("2017-10-23"), FALSE)
expect_equal(is.weekend("2017-10-22"), TRUE)
})
test_that("hms", {
expect_equal(
hms("2017-10-22 15:01:00"),
hms::hms(seconds = 0, minutes = 1, hours = 15)
)
expect_equal(
hms("2017-10-22 16:06:59"),
hms::hms(seconds = 59, minutes = 6, hours = 16)
)
})
| /tests/testthat/test_instances.R | permissive | j450h1/lubridateExtras | R | false | false | 1,704 | r | library(lubridateExtras)
context("Instances Functions")
test_that("yesterday", {
expect_equal(yesterday(), Sys.Date() - 1)
})
test_that("tomorrow", {
expect_equal(tomorrow(), Sys.Date() + 1)
})
test_that("days_ago", {
expect_equal(days_ago(0), Sys.Date())
expect_equal(days_ago(1), yesterday())
expect_equal(days_ago(2), Sys.Date() - 2)
expect_equal(days_ago(7), Sys.Date() - 7)
expect_equal(days_ago(-1), tomorrow())
})
test_that("days_hence", {
expect_equal(days_hence(0), Sys.Date())
expect_equal(days_hence(1), tomorrow())
expect_equal(days_hence(2), Sys.Date() + 2)
expect_equal(days_hence(7), Sys.Date() + 7)
expect_equal(days_hence(-1), yesterday())
})
test_that("this_month", {
expect_equal(this_month(), as.Date(strftime(Sys.Date(), format = "%Y-%m-01")))
})
test_that("last_month", {
expect_equal(last_month(), this_month() - months(1))
})
test_that("next_month", {
expect_equal(next_month(), this_month() + months(1))
})
test_that("this_week", {
expect_equal(this_week(), floor_date(today(), unit = "week"))
})
test_that("last_week", {
expect_equal(last_week(), this_week() - weeks(1))
})
test_that("next_week", {
expect_equal(next_week(), this_week() + weeks(1))
})
test_that("is.weekday", {
expect_equal(is.weekday("2017-10-23"), TRUE)
expect_equal(is.weekday("2017-10-22"), FALSE)
})
test_that("is.weekend", {
expect_equal(is.weekend("2017-10-23"), FALSE)
expect_equal(is.weekend("2017-10-22"), TRUE)
})
test_that("hms", {
expect_equal(
hms("2017-10-22 15:01:00"),
hms::hms(seconds = 0, minutes = 1, hours = 15)
)
expect_equal(
hms("2017-10-22 16:06:59"),
hms::hms(seconds = 59, minutes = 6, hours = 16)
)
})
|
help(package="rpart")
library(caret): confusionMatrix()
library(Metrics)
#ce( classificatino error),rmse
# auc: use prob, real numerical prediction and actual data has to be 1 and 0
#regression; method is anova
> auc_bag
[1] 0.7809724
auc_bag_caret
[1] 0.7762389
auc_bag
[1] 0.7809724
> auc(actual = ifelse(credit_test$default == "yes", 1, 0),
+ predicted = pred_tree_gini1[,"yes"])
auc_randomForest
0.803
auc_randomForest_update1#(tuning based on oob err, not on test sample)
[1] 0.6214353
auc_GBM
.78
rint(paste0("Test set AUC (OOB): ", auc_GBM_OOB))
[1] "Test set AUC (OOB): 0.78184899485741"
> print(paste0("Test set AUC (CV): ", auc_GBM_CV ))
[1] "Test set AUC (CV): 0.785764375876578"
sprintf("Decision Tree Test AUC: %.3f", dt_auc)
[1] "Decision Tree Test AUC: 0.627"
> sprintf("Bagged Trees Test AUC: %.3f", bag_auc)
[1] "Bagged Trees Test AUC: 0.781"
> sprintf("Random Forest Test AUC: %.3f", rf_auc)
[1] "Random Forest Test AUC: 0.804"
> sprintf("GBM Test AUC: %.3f", gbm_auc)
[1] "GBM Test AUC: 0.786" | /GBM/Intro Notes.R | no_license | li42125msa/R-projects | R | false | false | 1,060 | r | help(package="rpart")
library(caret): confusionMatrix()
library(Metrics)
#ce( classificatino error),rmse
# auc: use prob, real numerical prediction and actual data has to be 1 and 0
#regression; method is anova
> auc_bag
[1] 0.7809724
auc_bag_caret
[1] 0.7762389
auc_bag
[1] 0.7809724
> auc(actual = ifelse(credit_test$default == "yes", 1, 0),
+ predicted = pred_tree_gini1[,"yes"])
auc_randomForest
0.803
auc_randomForest_update1#(tuning based on oob err, not on test sample)
[1] 0.6214353
auc_GBM
.78
rint(paste0("Test set AUC (OOB): ", auc_GBM_OOB))
[1] "Test set AUC (OOB): 0.78184899485741"
> print(paste0("Test set AUC (CV): ", auc_GBM_CV ))
[1] "Test set AUC (CV): 0.785764375876578"
sprintf("Decision Tree Test AUC: %.3f", dt_auc)
[1] "Decision Tree Test AUC: 0.627"
> sprintf("Bagged Trees Test AUC: %.3f", bag_auc)
[1] "Bagged Trees Test AUC: 0.781"
> sprintf("Random Forest Test AUC: %.3f", rf_auc)
[1] "Random Forest Test AUC: 0.804"
> sprintf("GBM Test AUC: %.3f", gbm_auc)
[1] "GBM Test AUC: 0.786" |
testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ra = numeric(0), relh = -1.72131968218895e+83, rs = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, -3.54547898634432e+215, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result) | /meteor/inst/testfiles/ET0_PenmanMonteith/AFL_ET0_PenmanMonteith/ET0_PenmanMonteith_valgrind_files/1615839142-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 829 | r | testlist <- list(G = numeric(0), Rn = numeric(0), atmp = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), ra = numeric(0), relh = -1.72131968218895e+83, rs = numeric(0), temp = c(8.5728629954997e-312, 1.56898424065867e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, -3.54547898634432e+215, 2.16562581831091e+161))
result <- do.call(meteor:::ET0_PenmanMonteith,testlist)
str(result) |
# generate_figure3.R
#
# Author: Bob Verity
# Date: 2019-12-17
#
# Purpose:
# Produce maps of first few principal components.
#
# ------------------------------------------------------------------
# NOTE - uncomment these lines to install packages as needed
#devtools::install_github("mrc-ide/MIPAnalyzer", ref = "version1.0.0")
#devtools::install_github("bobverity/bobfunctions2", ref = "version1.0.0")
# load packages
library(devtools)
library(MIPanalyzer)
library(tidyr)
library(bobfunctions2)
library(rworldmap)
# ------------------------------------------------------------------
# read in raw data
dat <- readRDS("source_data/biallelic_distances.rds")
# PCA on WSAFs
wsaf_impute <- get_wsaf(dat, impute = TRUE, FUN = mean)
pca <- pca_wsaf(wsaf_impute)
# find which points belong in which spatial clusters
dat$samples$lonlat <- paste(dat$samples$long, dat$samples$lat, sep = ":")
cluster_index <- match(dat$samples$lonlat, unique(dat$samples$lonlat))
first_index <- mapply(function(x) which(cluster_index == x)[1], 1:max(cluster_index))
# get mean PC value per cluster
pc_mean <- mapply(function(y) {
mapply(mean, split(y, f = cluster_index))
}, split(pca$x[,1:4], f = col(pca$x[,1:4])))
colnames(pc_mean) <- c("a) PC1", "b) PC2", "c) PC3", "d) PC4")
# make plotting dataframe
plot_df <- subset(dat$samples, select = c("long", "lat"))[first_index,]
plot_df <- cbind(plot_df, pc_mean)
# get into long format for facetted plot
plot_df_long <- tidyr::gather(data = plot_df, key = component, value = value, 3:6, factor_key = TRUE)
# -----------------------------------
# PLOTTING
# define plotting parameters
col_country <- grey(0.95)
col_country_border <- grey(0.5)
size_country_border <- 0.5
col_sea <- grey(1.0)
resolution <- "coarse"
col_limits <- c(-4,4)
point_size <- 2.5
stroke_col <- grey(0.5)
stroke_size <- 0.25
col_vec <- rev(colorRampPalette(bobfunctions2::col_tim())(100))
# load country shapefiles
world_map <- getMap(resolution = resolution)
# basic map plot
plot_base <- ggplot() + theme_bw() + theme(panel.background = element_rect(fill = col_sea),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
strip.text = element_text(angle = 0, hjust = 0, size = 12))
# add country borders
plot_base <- plot_base + geom_polygon(aes(long, lat, group = group),
size = size_country_border, color = col_country_border,
fill = col_country, data = world_map)
# make separate inset plots
inset_list <- list()
for (i in 1:4) {
# get colours for this principal component
plot_df$col <- plot_df[,2+i]
# create inset plot from base
inset_list[[i]] <- plot_base + coord_cartesian(xlim = c(-5, 3), ylim = c(4,11)) +
geom_point(aes(x = long, y = lat, fill = col), color = stroke_col, shape = 21,
stroke = stroke_size, size = point_size, data = plot_df) +
scale_fill_gradientn(colours = col_vec, limits = col_limits, guide = FALSE) +
theme(axis.title.x = element_blank(), axis.title.y = element_blank()) +
theme(plot.margin = grid::unit(c(1,1,0,0), "mm"))
}
# create main facetted plot
plot1 <- plot_base + coord_cartesian(xlim = c(5, 33), ylim = c(-13,8)) +
geom_point(aes(x = long, y = lat, fill = value), color = stroke_col, shape = 21,
stroke = stroke_size, size = point_size, data = plot_df_long) +
scale_fill_gradientn(colours = col_vec, name = "PC value", limits = col_limits) +
xlab("longitude") + ylab("latitude") +
facet_wrap(~component)
# add rect for inset
plot_combined <- plot1 + geom_rect(aes(xmin = x0, xmax = x1, ymin = y0, ymax = y1), color = "black",
fill = NA, data = data.frame(x0 = 4, x1 = 16, y0 = -1, y1 = 8.6))
# add inset plots
for (i in 1:4) {
plot_combined <- plot_combined + gg_inset(ggplotGrob(inset_list[[i]]),
data = data.frame(component = names(plot_df)[2+i]),
xmin = 4, xmax = 16, ymin = -1, ymax = 8.6)
}
# save to file
ggsave("figure3_PCA_maps/figure3_PCA_maps.pdf", plot = plot_combined, device = "pdf", width = 9, height = 7)
ggsave("figure3_PCA_maps/figure3_PCA_maps.png", plot = plot_combined, device = "png", width = 9, height = 7, dpi = 100)
| /figure3_PCA_maps/generate_figure3.R | no_license | OJWatson/antimalarial_resistance_DRC | R | false | false | 4,498 | r |
# generate_figure3.R
#
# Author: Bob Verity
# Date: 2019-12-17
#
# Purpose:
# Produce maps of first few principal components.
#
# ------------------------------------------------------------------
# NOTE - uncomment these lines to install packages as needed
#devtools::install_github("mrc-ide/MIPAnalyzer", ref = "version1.0.0")
#devtools::install_github("bobverity/bobfunctions2", ref = "version1.0.0")
# load packages
library(devtools)
library(MIPanalyzer)
library(tidyr)
library(bobfunctions2)
library(rworldmap)
# ------------------------------------------------------------------
# read in raw data
dat <- readRDS("source_data/biallelic_distances.rds")
# PCA on WSAFs
wsaf_impute <- get_wsaf(dat, impute = TRUE, FUN = mean)
pca <- pca_wsaf(wsaf_impute)
# find which points belong in which spatial clusters
dat$samples$lonlat <- paste(dat$samples$long, dat$samples$lat, sep = ":")
cluster_index <- match(dat$samples$lonlat, unique(dat$samples$lonlat))
first_index <- mapply(function(x) which(cluster_index == x)[1], 1:max(cluster_index))
# get mean PC value per cluster
pc_mean <- mapply(function(y) {
mapply(mean, split(y, f = cluster_index))
}, split(pca$x[,1:4], f = col(pca$x[,1:4])))
colnames(pc_mean) <- c("a) PC1", "b) PC2", "c) PC3", "d) PC4")
# make plotting dataframe
plot_df <- subset(dat$samples, select = c("long", "lat"))[first_index,]
plot_df <- cbind(plot_df, pc_mean)
# get into long format for facetted plot
plot_df_long <- tidyr::gather(data = plot_df, key = component, value = value, 3:6, factor_key = TRUE)
# -----------------------------------
# PLOTTING
# define plotting parameters
col_country <- grey(0.95)
col_country_border <- grey(0.5)
size_country_border <- 0.5
col_sea <- grey(1.0)
resolution <- "coarse"
col_limits <- c(-4,4)
point_size <- 2.5
stroke_col <- grey(0.5)
stroke_size <- 0.25
col_vec <- rev(colorRampPalette(bobfunctions2::col_tim())(100))
# load country shapefiles
world_map <- getMap(resolution = resolution)
# basic map plot
plot_base <- ggplot() + theme_bw() + theme(panel.background = element_rect(fill = col_sea),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
strip.text = element_text(angle = 0, hjust = 0, size = 12))
# add country borders
plot_base <- plot_base + geom_polygon(aes(long, lat, group = group),
size = size_country_border, color = col_country_border,
fill = col_country, data = world_map)
# make separate inset plots
inset_list <- list()
for (i in 1:4) {
# get colours for this principal component
plot_df$col <- plot_df[,2+i]
# create inset plot from base
inset_list[[i]] <- plot_base + coord_cartesian(xlim = c(-5, 3), ylim = c(4,11)) +
geom_point(aes(x = long, y = lat, fill = col), color = stroke_col, shape = 21,
stroke = stroke_size, size = point_size, data = plot_df) +
scale_fill_gradientn(colours = col_vec, limits = col_limits, guide = FALSE) +
theme(axis.title.x = element_blank(), axis.title.y = element_blank()) +
theme(plot.margin = grid::unit(c(1,1,0,0), "mm"))
}
# create main facetted plot
plot1 <- plot_base + coord_cartesian(xlim = c(5, 33), ylim = c(-13,8)) +
geom_point(aes(x = long, y = lat, fill = value), color = stroke_col, shape = 21,
stroke = stroke_size, size = point_size, data = plot_df_long) +
scale_fill_gradientn(colours = col_vec, name = "PC value", limits = col_limits) +
xlab("longitude") + ylab("latitude") +
facet_wrap(~component)
# add rect for inset
plot_combined <- plot1 + geom_rect(aes(xmin = x0, xmax = x1, ymin = y0, ymax = y1), color = "black",
fill = NA, data = data.frame(x0 = 4, x1 = 16, y0 = -1, y1 = 8.6))
# add inset plots
for (i in 1:4) {
plot_combined <- plot_combined + gg_inset(ggplotGrob(inset_list[[i]]),
data = data.frame(component = names(plot_df)[2+i]),
xmin = 4, xmax = 16, ymin = -1, ymax = 8.6)
}
# save to file
ggsave("figure3_PCA_maps/figure3_PCA_maps.pdf", plot = plot_combined, device = "pdf", width = 9, height = 7)
ggsave("figure3_PCA_maps/figure3_PCA_maps.png", plot = plot_combined, device = "png", width = 9, height = 7, dpi = 100)
|
# Load data and caluclate life tables -------------------------------------
# Load and organize data
Nola<-read.csv('data/Cemetery_Partial_NOLA_Data.csv')
NolaF<-subset.data.frame(Nola,Nola$Sex=='Female') #grab all of the females
NolaM<-subset.data.frame(Nola,Nola$Sex=='Male') #and grab all the males
#Paramiterize the life tables
#calculate nx
NolaF$nx<-rep(NA) #create an empty placeholder for the data
NolaF$nx[1]<-sum(NolaF$dx) # the first value is easy
for(i in 2:(nrow(NolaF))){ #loop over each row
NolaF$nx[i]<-NolaF$nx[i-1]-NolaF$dx[i-1] #calculate nx
}
NolaM$nx<-rep(NA) #create an empty placeholder for the data
NolaM$nx[1]<-sum(NolaM$dx) # the first value is easy
for(i in 2:(nrow(NolaM))){ #loop over each row
NolaM$nx[i]<-NolaM$nx[i-1]-NolaM$dx[i-1] #calculate nx
}
#calculate lx and qx
NolaF$lx<-NolaF$nx/sum(NolaF$dx) #insert the formula to calculate lx
NolaM$lx<-NolaM$nx/sum(NolaM$dx)
NolaF$qx<-NolaF$dx/NolaF$nx #insert the formula for qx
NolaM$qx<-NolaM$dx/NolaM$nx
#Check how they turned out
NolaF
NolaM
# Generate curves ---------------------------------------------------------
#Suvivorship curve
plot(lx~AgeClass,data=NolaM, #plot the data
type='o',col='blue', #make it be blue connected points
ylim=c(0.01,1),log='y', #log transform the y axis
ylab='Survivalship (lx)',xlab='Age class') #change the axis labels
points(lx~AgeClass,data=NolaF,type='o',col='red') #plot females
legend('bottomleft',legend=c('Male','Female'),lty=1,pch=1,col=c('blue','red')) #add legend
#Mortality curve
plot(qx~AgeClass,data=NolaM,type='o',col='blue',xlab='Age class',ylab='Mortality (qx)')
points(qx~AgeClass,data=NolaF,type='o',col='red')
legend('topleft',legend=c('Male','Female'),lty=1,pch=1,col=c('blue','red'))
# Statistcal test ---------------------------------------------------------
#First creat a vector with our new age categories
Bins<-c(0,20,40,60,199)
#Next is the function for putting the data into new bins
Rebin<-function(NewBin,OldBin,Freq){
NewFreq<-rep(NA,length(NewBin)) #create a placehold for the new frequencies
for(i in 1:(length(NewBin)-1)){ #loop over each bin of the old data
sub<-subset(Freq,OldBin>=NewBin[i] & OldBin<NewBin[i+1]) #decide wheather it gets added
NewFreq[i]<-sum(sub) #add the ones that met the rule
}
return(NewFreq)
}
#Now, use the function to create a table of observed frequencies
Observed<-data.frame(Female=Rebin(Bins,NolaF$AgeClass,NolaF$dx),
Male=Rebin(Bins,NolaM$AgeClass,NolaM$dx))
rownames(Observed)<-Bins
Observed<-Observed[-5,]
#now run the test
Chisq<-chisq.test(Observed)
| /CemeteryScript_Completed.R | no_license | cmKent/Tulane-General-Ecology-Lab | R | false | false | 2,616 | r |
# Load data and caluclate life tables -------------------------------------
# Load and organize data
Nola<-read.csv('data/Cemetery_Partial_NOLA_Data.csv')
NolaF<-subset.data.frame(Nola,Nola$Sex=='Female') #grab all of the females
NolaM<-subset.data.frame(Nola,Nola$Sex=='Male') #and grab all the males
#Paramiterize the life tables
#calculate nx
NolaF$nx<-rep(NA) #create an empty placeholder for the data
NolaF$nx[1]<-sum(NolaF$dx) # the first value is easy
for(i in 2:(nrow(NolaF))){ #loop over each row
NolaF$nx[i]<-NolaF$nx[i-1]-NolaF$dx[i-1] #calculate nx
}
NolaM$nx<-rep(NA) #create an empty placeholder for the data
NolaM$nx[1]<-sum(NolaM$dx) # the first value is easy
for(i in 2:(nrow(NolaM))){ #loop over each row
NolaM$nx[i]<-NolaM$nx[i-1]-NolaM$dx[i-1] #calculate nx
}
#calculate lx and qx
NolaF$lx<-NolaF$nx/sum(NolaF$dx) #insert the formula to calculate lx
NolaM$lx<-NolaM$nx/sum(NolaM$dx)
NolaF$qx<-NolaF$dx/NolaF$nx #insert the formula for qx
NolaM$qx<-NolaM$dx/NolaM$nx
#Check how they turned out
NolaF
NolaM
# Generate curves ---------------------------------------------------------
#Suvivorship curve
plot(lx~AgeClass,data=NolaM, #plot the data
type='o',col='blue', #make it be blue connected points
ylim=c(0.01,1),log='y', #log transform the y axis
ylab='Survivalship (lx)',xlab='Age class') #change the axis labels
points(lx~AgeClass,data=NolaF,type='o',col='red') #plot females
legend('bottomleft',legend=c('Male','Female'),lty=1,pch=1,col=c('blue','red')) #add legend
#Mortality curve
plot(qx~AgeClass,data=NolaM,type='o',col='blue',xlab='Age class',ylab='Mortality (qx)')
points(qx~AgeClass,data=NolaF,type='o',col='red')
legend('topleft',legend=c('Male','Female'),lty=1,pch=1,col=c('blue','red'))
# Statistcal test ---------------------------------------------------------
#First creat a vector with our new age categories
Bins<-c(0,20,40,60,199)
#Next is the function for putting the data into new bins
Rebin<-function(NewBin,OldBin,Freq){
NewFreq<-rep(NA,length(NewBin)) #create a placehold for the new frequencies
for(i in 1:(length(NewBin)-1)){ #loop over each bin of the old data
sub<-subset(Freq,OldBin>=NewBin[i] & OldBin<NewBin[i+1]) #decide wheather it gets added
NewFreq[i]<-sum(sub) #add the ones that met the rule
}
return(NewFreq)
}
#Now, use the function to create a table of observed frequencies
Observed<-data.frame(Female=Rebin(Bins,NolaF$AgeClass,NolaF$dx),
Male=Rebin(Bins,NolaM$AgeClass,NolaM$dx))
rownames(Observed)<-Bins
Observed<-Observed[-5,]
#now run the test
Chisq<-chisq.test(Observed)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GRanges_helpers.R
\name{groupGRangesBy}
\alias{groupGRangesBy}
\title{make GRangesList from GRanges,}
\usage{
groupGRangesBy(gr, other = NULL)
}
\arguments{
\item{gr}{a GRanges object}
\item{other}{a vector of names to group, no 2 groups can have same name}
}
\value{
a GRangesList named after names(Granges) if other is NULL, else
names are from unique(other)
}
\description{
Grouped by names or another column(other)
ig. if GRanges should be grouped by gene,
give gene column as other
}
\examples{
ORFranges <- GRanges(seqnames = Rle(rep("1", 3)),
ranges = IRanges(start = c(1, 10, 20),
end = c(5, 15, 25)),
strand = "+")
ORFranges2 <- GRanges("1",
ranges = IRanges(start = c(20, 30, 40),
end = c(25, 35, 45)),
strand = "+")
names(ORFranges) = rep("tx1_1",3)
names(ORFranges2) = rep("tx1_2",3)
grl <- GRangesList(tx1_1 = ORFranges, tx1_2 = ORFranges2)
gr <- unlist(grl, use.names = FALSE)
## now recreate the grl
## group by orf
grltest <- groupGRangesBy(gr) # using the names to group
identical(grl, grltest) ## they are identical
## group by transcript
names(gr) <- OrfToTxNames(gr)
grltest <- groupGRangesBy(gr)
identical(grl, grltest) ## they are not identical
}
| /man/groupGRangesBy.Rd | permissive | katchyz/ORFik | R | false | true | 1,366 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GRanges_helpers.R
\name{groupGRangesBy}
\alias{groupGRangesBy}
\title{make GRangesList from GRanges,}
\usage{
groupGRangesBy(gr, other = NULL)
}
\arguments{
\item{gr}{a GRanges object}
\item{other}{a vector of names to group, no 2 groups can have same name}
}
\value{
a GRangesList named after names(Granges) if other is NULL, else
names are from unique(other)
}
\description{
Grouped by names or another column(other)
ig. if GRanges should be grouped by gene,
give gene column as other
}
\examples{
ORFranges <- GRanges(seqnames = Rle(rep("1", 3)),
ranges = IRanges(start = c(1, 10, 20),
end = c(5, 15, 25)),
strand = "+")
ORFranges2 <- GRanges("1",
ranges = IRanges(start = c(20, 30, 40),
end = c(25, 35, 45)),
strand = "+")
names(ORFranges) = rep("tx1_1",3)
names(ORFranges2) = rep("tx1_2",3)
grl <- GRangesList(tx1_1 = ORFranges, tx1_2 = ORFranges2)
gr <- unlist(grl, use.names = FALSE)
## now recreate the grl
## group by orf
grltest <- groupGRangesBy(gr) # using the names to group
identical(grl, grltest) ## they are identical
## group by transcript
names(gr) <- OrfToTxNames(gr)
grltest <- groupGRangesBy(gr)
identical(grl, grltest) ## they are not identical
}
|
\name{tidedata}
\docType{data}
\alias{tidedata}
\title{Tidal constituent information}
\description{The \code{tidedata} dataset contains Tide-constituent information
that is use by \code{\link{tidem}} to fit tidal models.
\code{tidedata} is a list containing
\describe{
\item{const}{a list containing vectors
\describe{
\item{\code{name}}{a string with constituent name}
\item{\code{freq}}{the frequency, in cycles per hour}
\item{\code{kmpr}}{a string naming the comparison constituent, blank if there is none}
\item{\code{ikmpr}}{index of comparison constituent, or \code{0} if there is none}
\item{\code{df}}{frequency difference betwee constituent and its comparison, used in the Rayleigh criterion}
\item{\code{d1}}{first Doodson number}
\item{\code{d2}}{second Doodson number}
\item{\code{d3}}{third Doodson number}
\item{\code{d4}}{fourth Doodson number}
\item{\code{d5}}{fifth Doodson number}
\item{\code{d6}}{sixth Doodson number}
\item{\code{semi}}{(fill in some info later)}
\item{\code{nsat}}{number of satellite constituents}
\item{\code{ishallow}}{(fill in some info later)}
\item{\code{nshallow}}{(fill in some info later)}
\item{\code{doodsonamp}}{(fill in some info later)}
\item{\code{doodsonspecies}}{(fill in some info later)}
}
}
\item{\code{sat}}{a list containing vectors
\describe{
\item{\code{deldood}}{(fill in some info later)}
\item{\code{phcorr}}{(fill in some info later)}
\item{\code{amprat}}{(fill in some info later)}
\item{\code{ilatfac}}{(fill in some info later)}
\item{\code{iconst}}{(fill in some info later)}
}
}
\item{\code{shallow}}{a list containing vectors
\describe{
\item{\code{iconst}}{(fill in some info later)}
\item{\code{coef}}{(fill in some info later)}
\item{\code{iname}}{(fill in some info later)}
}
}
}
Apart from the use of \code{d1} through \code{d6}, the naming and
content follows \code{T_TIDE}. All of this is based on Foreman
(1977), to which the reader is referred for details.
}
\usage{data(tidedata)}
\source{The data come from the \code{tide3.dat} file of the
\code{T_TIDE} package (Pawlowicz et al., 2002), and derive from
Appendices provided by Foreman (1977). The data are scanned using
\file{tests/tide.R} in this package, which also performs some tests using
\code{T_TIDE} values as a reference.
}
\seealso{\code{\link{tidem}} uses this dataset to establish its harmonic
constituents.}
\references{Foreman, M. G. G., 1977. Manual for tidal heights analysis and
prediction. Pacific Marine Science Report 77-10, Institute of Ocean
Sciences, Patricia Bay, Sidney, BC, 58pp.
Pawlowicz, Rich, Bob Beardsley, and Steve Lentz, 2002. Classical tidal
harmonic analysis including error estimates in MATLAB using \code{T_TIDE}.
Computers and Geosciences, 28, 929-937. }
\author{Dan Kelley}
\keyword{datasets}
\concept{tide}
| /man/tidedata.Rd | no_license | marie-geissler/oce | R | false | false | 2,933 | rd | \name{tidedata}
\docType{data}
\alias{tidedata}
\title{Tidal constituent information}
\description{The \code{tidedata} dataset contains Tide-constituent information
that is use by \code{\link{tidem}} to fit tidal models.
\code{tidedata} is a list containing
\describe{
\item{const}{a list containing vectors
\describe{
\item{\code{name}}{a string with constituent name}
\item{\code{freq}}{the frequency, in cycles per hour}
\item{\code{kmpr}}{a string naming the comparison constituent, blank if there is none}
\item{\code{ikmpr}}{index of comparison constituent, or \code{0} if there is none}
\item{\code{df}}{frequency difference betwee constituent and its comparison, used in the Rayleigh criterion}
\item{\code{d1}}{first Doodson number}
\item{\code{d2}}{second Doodson number}
\item{\code{d3}}{third Doodson number}
\item{\code{d4}}{fourth Doodson number}
\item{\code{d5}}{fifth Doodson number}
\item{\code{d6}}{sixth Doodson number}
\item{\code{semi}}{(fill in some info later)}
\item{\code{nsat}}{number of satellite constituents}
\item{\code{ishallow}}{(fill in some info later)}
\item{\code{nshallow}}{(fill in some info later)}
\item{\code{doodsonamp}}{(fill in some info later)}
\item{\code{doodsonspecies}}{(fill in some info later)}
}
}
\item{\code{sat}}{a list containing vectors
\describe{
\item{\code{deldood}}{(fill in some info later)}
\item{\code{phcorr}}{(fill in some info later)}
\item{\code{amprat}}{(fill in some info later)}
\item{\code{ilatfac}}{(fill in some info later)}
\item{\code{iconst}}{(fill in some info later)}
}
}
\item{\code{shallow}}{a list containing vectors
\describe{
\item{\code{iconst}}{(fill in some info later)}
\item{\code{coef}}{(fill in some info later)}
\item{\code{iname}}{(fill in some info later)}
}
}
}
Apart from the use of \code{d1} through \code{d6}, the naming and
content follows \code{T_TIDE}. All of this is based on Foreman
(1977), to which the reader is referred for details.
}
\usage{data(tidedata)}
\source{The data come from the \code{tide3.dat} file of the
\code{T_TIDE} package (Pawlowicz et al., 2002), and derive from
Appendices provided by Foreman (1977). The data are scanned using
\file{tests/tide.R} in this package, which also performs some tests using
\code{T_TIDE} values as a reference.
}
\seealso{\code{\link{tidem}} uses this dataset to establish its harmonic
constituents.}
\references{Foreman, M. G. G., 1977. Manual for tidal heights analysis and
prediction. Pacific Marine Science Report 77-10, Institute of Ocean
Sciences, Patricia Bay, Sidney, BC, 58pp.
Pawlowicz, Rich, Bob Beardsley, and Steve Lentz, 2002. Classical tidal
harmonic analysis including error estimates in MATLAB using \code{T_TIDE}.
Computers and Geosciences, 28, 929-937. }
\author{Dan Kelley}
\keyword{datasets}
\concept{tide}
|
rm(list = ls())
# devtools::install_github("poceviciute/AdvRprogr_lab4")
# devtools::install_github("poceviciute/AdvRprogr_lab4", build_vignettes = TRUE)
library(linreg)
linreg_mod <- linreg$new(Petal.Length~Sepal.Width+Sepal.Length, data=iris)
# devtools::test()
# Print
linreg_mod$print()
# Plot
linreg_mod$plot()
# resid
linreg_mod$resid()
# pred
linreg_mod$pred()
# coef
linreg_mod$coef()
# summary
linreg_mod$summary()
formula <- Petal.Length~Sepal.Width+Sepal.Length
data <- iris
a <- lm(formula, data)
summary(a)
print(a)
| /test_code.R | no_license | poceviciute/AdvRprogr_lab4 | R | false | false | 542 | r | rm(list = ls())
# devtools::install_github("poceviciute/AdvRprogr_lab4")
# devtools::install_github("poceviciute/AdvRprogr_lab4", build_vignettes = TRUE)
library(linreg)
linreg_mod <- linreg$new(Petal.Length~Sepal.Width+Sepal.Length, data=iris)
# devtools::test()
# Print
linreg_mod$print()
# Plot
linreg_mod$plot()
# resid
linreg_mod$resid()
# pred
linreg_mod$pred()
# coef
linreg_mod$coef()
# summary
linreg_mod$summary()
formula <- Petal.Length~Sepal.Width+Sepal.Length
data <- iris
a <- lm(formula, data)
summary(a)
print(a)
|
##cache the inverse of a matrix
##This function creats a special matrix that can cache its inverse
makeCacheMatrix<-function(x=matrix()){
inv<-NULL
#set the value of the matrix
set<-function(y){
x<<-y
inv<<-NULL
}
#get the value of matrix
get<-function() x
#set the value of the inverse matrix
setinverse<-function(solve) inv<<-solve
#get the value of the inverse matrix
getinverse<-function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
##This function computes the inverse of the special matrix returned by makeCacheMatrix.
cacheSolve<-function(x, ...){
inv<-x$getinverse()
#If the inverse matrix is already calculated
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
#calculate the inverse matrix and sets the value of the mean in the cache
matrix<-x$get()
inv<-solve(matrix)
x$setinverse(inv)
inv
}
#> m<-makeVector(1:10)
#> m$get()
# [1] 1 2 3 4 5 6 7 8 9 10
#> cachemean(m)
#[1] 5.5
#> cachemean(m)
#getting cached data
#[1] 5.5
#x<-matrix(1:4,nrow=2)
#n<-makeCacheMatrix(x)
#n$get()
#cacheSolve(n)
#cacheSolve(n) | /cachematrix.R | no_license | sky0502/ProgrammingAssignment2 | R | false | false | 1,108 | r | ##cache the inverse of a matrix
##This function creats a special matrix that can cache its inverse
makeCacheMatrix<-function(x=matrix()){
inv<-NULL
#set the value of the matrix
set<-function(y){
x<<-y
inv<<-NULL
}
#get the value of matrix
get<-function() x
#set the value of the inverse matrix
setinverse<-function(solve) inv<<-solve
#get the value of the inverse matrix
getinverse<-function() inv
list(set=set, get=get, setinverse=setinverse, getinverse=getinverse)
}
##This function computes the inverse of the special matrix returned by makeCacheMatrix.
cacheSolve<-function(x, ...){
inv<-x$getinverse()
#If the inverse matrix is already calculated
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
#calculate the inverse matrix and sets the value of the mean in the cache
matrix<-x$get()
inv<-solve(matrix)
x$setinverse(inv)
inv
}
#> m<-makeVector(1:10)
#> m$get()
# [1] 1 2 3 4 5 6 7 8 9 10
#> cachemean(m)
#[1] 5.5
#> cachemean(m)
#getting cached data
#[1] 5.5
#x<-matrix(1:4,nrow=2)
#n<-makeCacheMatrix(x)
#n$get()
#cacheSolve(n)
#cacheSolve(n) |
#' Automatic pre-preprocessing
#'
#' This function performs pre-processing on a time series object(ts) to treat
#' heterocedasticity, trend and seasonality in order to make the serie stationary.
#'
#' Returns an object \code{prep} which stores all data needed to undo the changes later on.
#'
#' This function provides an automatic way of pre-processing based on unit root tests, but
#' this is not the perfect way to do it. You should always check manually if
#' the given time serie is actually stationary, and modify the parameters according
#' to your thoughts.
#'
#' @param tserie A ts object.
#' @param homogenize.method A string. Current methods available are "log" and "boxcox". Method "log" is set as default. If you don't want to perform this transformation, set method as "none".
#' @param detrend.method A string. Current methods available are "differencing" and "sfsm". Method "differencing" is set as default. If you don't want to perform this transformation, set method as "none".
#' @param nd A number. Number of differences you want to apply to the "differencing" detrending method. As default its value is NULL, which means nd will be calculated internally.
#' @param deseason.method A string. Current methods available are "differencing". Method "differencing" is set as default. If you don't want to perform this transformation, set method as "none".
#' @param nsd A number. Number of seasonal differences you want to apply to the "differencing" deseasoning method. As default its value is NULL, which means nsd will be calculated internally.
#' @param detrend.first A boolean. TRUE if detrending method is applied first, then deseasoning. FALSE if deseasoning method is applied first. Default is TRUE.
#' @return A list is returned of class \code{prep} containing:
#' \item{tserie}{Processed ts object.}
#' \item{homogenize.method}{Method used for homogenizing.}
#' \item{detrend.method}{Method used for detrending.}
#' \item{nd}{Number of differences used on detrending through differencing.}
#' \item{firstvalues}{First \code{nd} values of the original series.}
#' \item{deseason.method}{Method used for deseasoning.}
#' \item{nsd}{Number of seasonal differences used on deseasoning through differencing.}
#' \item{firstseasons}{First \code{nsd} seasons of the original series.}
#' \item{detrend.first}{Processed ts object}
#' \item{means}{Vector of means used in "sfsm" detrending method.}
#' \item{lambda}{Coefficient used in "boxcox" transformation.}
#' \item{start}{Start of the original time serie.}
#' \item{length}{Length of the original time serie.}
#' @author Alberto Vico Moreno
#' @seealso{
#' \code{\link{prep.homogenize.log}},
#' \code{\link{prep.homogenize.boxcox}},
#' \code{\link{prep.detrend.differencing}},
#' \code{\link{prep.detrend.sfsm}},
#' \code{\link{prep.deseason.differencing}},
#' \code{\link{prep.check.acf}},
#' \code{\link{prep.check.adf}}
#' }
#' @export
#' @references \url{https://www.otexts.org/fpp/8/1}
#' @examples
#' prep(AirPassengers)
#' prep(AirPassengers,homogenize.method='boxcox',detrend.method='none')
prep <- function(tserie,homogenize.method='log'
,detrend.method='differencing',nd=NULL
,deseason.method='differencing',nsd=NULL
,detrend.first=TRUE){
if(!stats::is.ts(tserie)) stop('Not a ts object')
if(stats::frequency(tserie) == 1) deseason.method='none'
newts <- tserie
means <- NULL
lambda <- NULL
firstvalues <- NULL
firstseasons <- NULL
start <- start(tserie)
length <- length(tserie)
#homogenizing
if(homogenize.method=='log') newts <- prep.homogenize.log(newts)
else if(homogenize.method=='boxcox'){
bc <- prep.homogenize.boxcox(newts)
newts <- bc[[1]]
lambda <- bc[[2]]
}
else if(homogenize.method!='none') stop('Invalid homogenizing method')
#detrending and deseasoning
if(detrend.first==FALSE){
if(deseason.method=='differencing'){
dsts <- prep.deseason.differencing(newts,nsd)
if(dsts[[2]]>0){
newts <- dsts[[1]]
nsd <- dsts[[2]]
firstseasons <- dsts[[3]]
}else deseason.method='none'
}else if(deseason.method!='none') stop('Invalid deseasoning method')
if(detrend.method=='differencing'){
dtts <- prep.detrend.differencing(newts,nd)
if(dtts[[2]]>0){
newts <- dtts[[1]]
nd <- dtts[[2]]
firstvalues <- dtts[[3]]
}else detrend.method='none'
}else if(detrend.method=='sfsm'){
dtts <- prep.detrend.sfsm(newts)
newts <- dtts[[1]]
means <- dtts[[2]]
}else if(detrend.method!='none') stop('Invalid detrending method')
}else{
if(detrend.method=='differencing'){
dtts <- prep.detrend.differencing(newts,nd)
if(dtts[[2]]>0){
newts <- dtts[[1]]
nd <- dtts[[2]]
firstvalues <- dtts[[3]]
}else detrend.method='none'
}else if(detrend.method=='sfsm'){
dtts <- prep.detrend.sfsm(newts)
newts <- dtts[[1]]
means <- dtts[[2]]
}else if(detrend.method!='none') stop('Invalid detrending method')
if(deseason.method=='differencing'){
dsts <- prep.deseason.differencing(newts,nsd)
if(dsts[[2]]>0){
newts <- dsts[[1]]
nsd <- dsts[[2]]
firstseasons <- dsts[[3]]
}else deseason.method='none'
}else if(deseason.method!='none') stop('Invalid deseasoning method')
}
#creating the object
obj <- list()
class(obj) <- "prep"
obj$tserie <- newts
obj$homogenize.method <- homogenize.method
obj$detrend.method <- detrend.method
obj$nd <- nd
obj$firstvalues <- firstvalues
obj$deseason.method <- deseason.method
obj$nsd <- nsd
obj$firstseasons <- firstseasons
obj$detrend.first <- detrend.first
obj$means <- means
obj$lambda <- lambda
obj$start <- start
obj$length <- length
return (obj)
}
#' Logarithmic transformation
#'
#' Performs a logarithmic transformation to a time serie.
#'
#' @param tserie a \code{ts} object
#' @return \code{ts} object with transformed time serie
#' @export
#' @examples
#' prep.homogenize.log(AirPassengers)
prep.homogenize.log <- function(tserie){
if(!stats::is.ts(tserie)) stop('Not a ts object')
return (log(tserie))
}
#' Box-Cox transformation
#'
#' Performs a Box-Cox transformation to a time serie.
#'
#' @param tserie a \code{ts} object
#' @return A list is returned containing:
#' \item{boxcox}{Transformed ts object.}
#' \item{lambda}{Lambda value.}
#' @references Box-Cox transformation: \url{https://en.wikipedia.org/wiki/Power_transform#Box.E2.80.93Cox_transformation}
#' @export
#' @examples
#' prep.homogenize.log(AirPassengers)
prep.homogenize.boxcox <- function(tserie){
if(!stats::is.ts(tserie)) stop('Not a ts object')
lambda <- forecast::BoxCox.lambda(tserie)
return (list(boxcox=forecast::BoxCox(tserie,lambda),lambda=lambda))
}
#' Detrend with differencing method
#'
#' Performs differencing with lag=1.
#'
#' If no number of differences is specified, the function will make an estimation
#' of the number of differences needed based on unit root test provided by \code{forecast::ndiffs}
#'
#' @param tserie a \code{ts} object
#' @param nd number of differences to apply. As default its value is NULL; in this case, the function will perform an automatic estimation of \code{nd}.
#' @return A list is returned containing:
#' \item{tserie}{Transformed ts object.}
#' \item{nd}{Number of differencies applied.}
#' \item{firstvalues}{Lost values after differencing.}
#' @export
#' @examples
#' prep.detrend.differencing(AirPassengers)
#' prep.detrend.differencing(AirPassengers,nd=2)
prep.detrend.differencing <- function(tserie,nd=NULL){
if(!stats::is.ts(tserie)) stop('Not a ts object')
firstvalues <- NULL
if(is.null(nd)){
nd <- forecast::ndiffs(tserie)
if(nd > 0){
firstvalues <- tserie[1:nd]
tserie <- diff(tserie,differences=nd)
}
return (list(tserie=tserie,nd=nd,firstvalues=firstvalues))
}else{
if(nd > 0) firstvalues <- tserie[1:nd]
return (list(tserie=tserie <- diff(tserie,differences=nd),nd=nd,firsvalues=firstvalues))
}
}
#' Detrend with "substracting full-season means" method
#'
#' Performs "substracting full-season means" method to go for a totally automatic
#' approach.
#'
#' Under this detrending scheme, a series is first split into segments. The length
#' of the segments is equal to the length of seasonality(12 for monthly).
#' The mean of the historical observations within each of these segments is substacted
#' from every historical observation in the segment.
#' To get the detrended serie we do:
#' \code{ds = xi - m}
#' Being \code{xi} the actual values on the time series and \code{m} the mean of the segment of \code{xi}
#'
#' @param tserie a \code{ts} object
#' @return A list is returned containing:
#' \item{tserie}{Transformed ts object.}
#' \item{means}{Vector containing the historical means.}
#' @export
#' @examples
#' prep.detrend.sfsm(AirPassengers)
prep.detrend.sfsm <- function(tserie){
if(!stats::is.ts(tserie)) stop('Not a ts object')
first <- start(tserie)[2] #first observation index of the time serie
index <- 1 #general index
means <- NULL #means vector
cont <- TRUE
while(cont){
aux <- NULL
while(index <= length(tserie)){ #values of a season
aux <- c(aux,tserie[index])
index <- index + 1
if((index+first-2) %% frequency(tserie) == 0) break
}
means <- c(means,mean(aux)) #create means vector
if(index > length(tserie)) cont <- FALSE
}
mindex <- 1
index <- 1
while(index <= length(tserie)){
tserie[index] <- tserie[index]-means[mindex]
index <- index+1
if((index+first-2) %% frequency(tserie) == 0) mindex <- mindex+1
}
return (list(tserie=tserie,means=means))
}
#' Deseason with differencing method
#'
#' Performs differencing with lag=frequency.
#'
#' If no number of differences is specified, the function will make an estimation
#' of the number of differences needed based on unit root test provided by \code{forecast::nsdiffs}
#'
#' @param tserie a \code{ts} object
#' @param nsd number of seasonal differences to apply. As default its value is NULL; in this case, the function will perform an automatic estimation of \code{nsd}.
#' @return A list is returned containing:
#' \item{tserie}{Transformed ts object.}
#' \item{nsd}{Number of seasonal differencies applied.}
#' \item{firstseasons}{Lost values after differencing.}
#' @export
#' @examples
#' prep.deseason.differencing(AirPassengers)
#' prep.deseason.differencing(AirPassengers,nsd=2)
prep.deseason.differencing <- function(tserie, nsd=NULL){
if(!stats::is.ts(tserie)) stop('Not a ts object')
firstseasons <- NULL
if(is.null(nsd)){
nsd <- forecast::nsdiffs(tserie)
if(nsd > 0){
firstseasons <- tserie[1:(frequency(tserie)*nsd)]
tserie <- diff(tserie,lag=frequency(tserie),differences=nsd)
}
return (list(tserie=tserie,nsd=nsd,firstseasons=firstseasons))
}else{
if(nsd > 0) firstseasons <- tserie[1:(frequency(tserie)*nsd)]
return (list(tserie=diff(tserie,lag=frequency(tserie),differences=nsd),nsd=nsd,firstseasons=firstseasons))
}
}
#' Autocorrelation function
#'
#' Plots the autocorrelation function to check stationarity
#'
#' For a stationary time series, the ACF will drop to zero
#' relatively quickly, while the ACF of non-stationary data decreases slowly.
#' Also, for non-stationary data, the value is often large and positive.
#' @param tserie a \code{ts} or a \code{prep} object
#' @export
#' @examples
#' prep.check.acf(AirPassengers)
#' prep.check.acf(prep(AirPassengers))
prep.check.acf <- function(tserie){
if(stats::is.ts(tserie)) stats::acf(tserie)
else if(class(tserie)=='prep') stats::acf(tserie$tserie)
else stop('Not a ts/prep object')
}
#' Augmented Dickey-Fuller test
#'
#' Performs ADF test just as another tool to check stationarity.
#'
#' Shows the results of an ADF test. A p-value<0.05 suggests the data is stationary.
#'
#' @param tserie a \code{ts} or a \code{prep} object
#' @export
#' @examples
#' prep.check.adf(AirPassengers)
#' prep.check.adf(prep(AirPassengers))
prep.check.adf <- function(tserie){
if(stats::is.ts(tserie)) return (tseries::adf.test(tserie,alternative="stationary"))
else if(class(tserie)=='prep') return (tseries::adf.test(tserie$tserie,alternative="stationary"))
else stop('Not a ts/prep object')
}
#generic functions
#' Generic function
#'
#' Plots object prep
#' @param x \code{prep} object
#' @param ylab ylab
#' @param xlab xlab
#' @param ... ignored
#' @export
#' @examples
#' plot(prep(AirPassengers),ylab="Stationary AisPassengers")
plot.prep <- function(x,ylab="Preprocessed time serie",xlab="",...){
graphics::plot(x$tserie,ylab=ylab,xlab=xlab)
}
#' Generic function
#'
#' Summary of object prep
#' @param object \code{prep} object
#' @param ... ignored
#' @export
#' @examples
#' summary(prep(AirPassengers))
summary.prep <- function(object,...){
cat("Preprocessed time series object\n")
if(object$homogenize.method=='log') cat("~Homogenizing method: logarithmic transformation\n")
else if(object$homogenize.method=='boxcox') cat ("~Homogenizing method: Box-Cox transformation with lambda=",object$lambda,"\n")
else cat("~Transformation not applied\n")
if(object$detrend.method=='differencing') {
cat("~Detrending method: differencing\n Number of differences: ",object$nd,"\n")
cat("First original values: ",object$firstvalues,"\n")
}
else if(object$detrend.method=='sfsm'){
cat("~Detrending method: substracting full-season means\n Season means: \n")
utils::str(object$means)
}
else cat("~No detrending performed\n")
if(object$deseason.method=='differencing') {
cat("~Deseason method: differencing\n Number of seasonal differences: ",object$nsd,"\n")
cat("First original seasons: ",object$firstseasons,"\n")
}
else cat("~No deseason performed\n")
if(object$deseason.method!='none' && object$detrend.method!='none'){
if(object$detrend.first==TRUE) cat("~Detrending applied before deseasoning\n")
else cat("~Detrending applied after deseasoning\n")
}
cat("~Original serie start: ",object$start,"\n")
cat("~Original serie length: ",object$length,"\n")
cat("~Preprocessed time serie:\n")
utils::str(object$tserie)
}
#' Generic function
#'
#' Prints object prep
#' @param x \code{prep} object
#' @param ... ignored
#' @export
#' @examples
#' print(prep(AirPassengers))
print.prep <- function(x,...){
cat("Preprocessed time series object\n\n")
cat("Class: prep\n\n")
cat("Attributes: \n")
cat("$homogenize.method: ",x$homogenize.method,"\n")
if(!is.null(x$lambda)) cat("$lambda: ",x$lambda,"\n")
cat("$detrend.method: ",x$detrend.method,"\n")
if(!is.null(x$nd)) cat("$nd: ",x$nd,"\n")
if(!is.null(x$firstvalues)) cat("$firstvalues: ",x$firstvalues,"\n")
if(!is.null(x$means)) cat("$means: ",x$means,"\n")
cat("$deseason.method: ",x$deseason.method,"\n")
if(!is.null(x$nsd)) cat("$nsd: ",x$nsd,"\n")
if(!is.null(x$firstseasons)) cat("$firstseasons: ",x$firstseasons,"\n")
cat("$detrend.first: ",x$detrend.first,"\n")
cat("$start: ",x$start,"\n")
cat("$length: ",x$length,"\n")
cat("$tserie: \n")
print(x$tserie)
}
| /R/prep.R | no_license | cran/predtoolsTS | R | false | false | 15,636 | r | #' Automatic pre-preprocessing
#'
#' This function performs pre-processing on a time series object(ts) to treat
#' heterocedasticity, trend and seasonality in order to make the serie stationary.
#'
#' Returns an object \code{prep} which stores all data needed to undo the changes later on.
#'
#' This function provides an automatic way of pre-processing based on unit root tests, but
#' this is not the perfect way to do it. You should always check manually if
#' the given time serie is actually stationary, and modify the parameters according
#' to your thoughts.
#'
#' @param tserie A ts object.
#' @param homogenize.method A string. Current methods available are "log" and "boxcox". Method "log" is set as default. If you don't want to perform this transformation, set method as "none".
#' @param detrend.method A string. Current methods available are "differencing" and "sfsm". Method "differencing" is set as default. If you don't want to perform this transformation, set method as "none".
#' @param nd A number. Number of differences you want to apply to the "differencing" detrending method. As default its value is NULL, which means nd will be calculated internally.
#' @param deseason.method A string. Current methods available are "differencing". Method "differencing" is set as default. If you don't want to perform this transformation, set method as "none".
#' @param nsd A number. Number of seasonal differences you want to apply to the "differencing" deseasoning method. As default its value is NULL, which means nsd will be calculated internally.
#' @param detrend.first A boolean. TRUE if detrending method is applied first, then deseasoning. FALSE if deseasoning method is applied first. Default is TRUE.
#' @return A list is returned of class \code{prep} containing:
#' \item{tserie}{Processed ts object.}
#' \item{homogenize.method}{Method used for homogenizing.}
#' \item{detrend.method}{Method used for detrending.}
#' \item{nd}{Number of differences used on detrending through differencing.}
#' \item{firstvalues}{First \code{nd} values of the original series.}
#' \item{deseason.method}{Method used for deseasoning.}
#' \item{nsd}{Number of seasonal differences used on deseasoning through differencing.}
#' \item{firstseasons}{First \code{nsd} seasons of the original series.}
#' \item{detrend.first}{Processed ts object}
#' \item{means}{Vector of means used in "sfsm" detrending method.}
#' \item{lambda}{Coefficient used in "boxcox" transformation.}
#' \item{start}{Start of the original time serie.}
#' \item{length}{Length of the original time serie.}
#' @author Alberto Vico Moreno
#' @seealso{
#' \code{\link{prep.homogenize.log}},
#' \code{\link{prep.homogenize.boxcox}},
#' \code{\link{prep.detrend.differencing}},
#' \code{\link{prep.detrend.sfsm}},
#' \code{\link{prep.deseason.differencing}},
#' \code{\link{prep.check.acf}},
#' \code{\link{prep.check.adf}}
#' }
#' @export
#' @references \url{https://www.otexts.org/fpp/8/1}
#' @examples
#' prep(AirPassengers)
#' prep(AirPassengers,homogenize.method='boxcox',detrend.method='none')
prep <- function(tserie,homogenize.method='log'
,detrend.method='differencing',nd=NULL
,deseason.method='differencing',nsd=NULL
,detrend.first=TRUE){
if(!stats::is.ts(tserie)) stop('Not a ts object')
if(stats::frequency(tserie) == 1) deseason.method='none'
newts <- tserie
means <- NULL
lambda <- NULL
firstvalues <- NULL
firstseasons <- NULL
start <- start(tserie)
length <- length(tserie)
#homogenizing
if(homogenize.method=='log') newts <- prep.homogenize.log(newts)
else if(homogenize.method=='boxcox'){
bc <- prep.homogenize.boxcox(newts)
newts <- bc[[1]]
lambda <- bc[[2]]
}
else if(homogenize.method!='none') stop('Invalid homogenizing method')
#detrending and deseasoning
if(detrend.first==FALSE){
if(deseason.method=='differencing'){
dsts <- prep.deseason.differencing(newts,nsd)
if(dsts[[2]]>0){
newts <- dsts[[1]]
nsd <- dsts[[2]]
firstseasons <- dsts[[3]]
}else deseason.method='none'
}else if(deseason.method!='none') stop('Invalid deseasoning method')
if(detrend.method=='differencing'){
dtts <- prep.detrend.differencing(newts,nd)
if(dtts[[2]]>0){
newts <- dtts[[1]]
nd <- dtts[[2]]
firstvalues <- dtts[[3]]
}else detrend.method='none'
}else if(detrend.method=='sfsm'){
dtts <- prep.detrend.sfsm(newts)
newts <- dtts[[1]]
means <- dtts[[2]]
}else if(detrend.method!='none') stop('Invalid detrending method')
}else{
if(detrend.method=='differencing'){
dtts <- prep.detrend.differencing(newts,nd)
if(dtts[[2]]>0){
newts <- dtts[[1]]
nd <- dtts[[2]]
firstvalues <- dtts[[3]]
}else detrend.method='none'
}else if(detrend.method=='sfsm'){
dtts <- prep.detrend.sfsm(newts)
newts <- dtts[[1]]
means <- dtts[[2]]
}else if(detrend.method!='none') stop('Invalid detrending method')
if(deseason.method=='differencing'){
dsts <- prep.deseason.differencing(newts,nsd)
if(dsts[[2]]>0){
newts <- dsts[[1]]
nsd <- dsts[[2]]
firstseasons <- dsts[[3]]
}else deseason.method='none'
}else if(deseason.method!='none') stop('Invalid deseasoning method')
}
#creating the object
obj <- list()
class(obj) <- "prep"
obj$tserie <- newts
obj$homogenize.method <- homogenize.method
obj$detrend.method <- detrend.method
obj$nd <- nd
obj$firstvalues <- firstvalues
obj$deseason.method <- deseason.method
obj$nsd <- nsd
obj$firstseasons <- firstseasons
obj$detrend.first <- detrend.first
obj$means <- means
obj$lambda <- lambda
obj$start <- start
obj$length <- length
return (obj)
}
#' Logarithmic transformation
#'
#' Performs a logarithmic transformation to a time serie.
#'
#' @param tserie a \code{ts} object
#' @return \code{ts} object with transformed time serie
#' @export
#' @examples
#' prep.homogenize.log(AirPassengers)
prep.homogenize.log <- function(tserie){
if(!stats::is.ts(tserie)) stop('Not a ts object')
return (log(tserie))
}
#' Box-Cox transformation
#'
#' Performs a Box-Cox transformation to a time serie.
#'
#' @param tserie a \code{ts} object
#' @return A list is returned containing:
#' \item{boxcox}{Transformed ts object.}
#' \item{lambda}{Lambda value.}
#' @references Box-Cox transformation: \url{https://en.wikipedia.org/wiki/Power_transform#Box.E2.80.93Cox_transformation}
#' @export
#' @examples
#' prep.homogenize.log(AirPassengers)
prep.homogenize.boxcox <- function(tserie){
if(!stats::is.ts(tserie)) stop('Not a ts object')
lambda <- forecast::BoxCox.lambda(tserie)
return (list(boxcox=forecast::BoxCox(tserie,lambda),lambda=lambda))
}
#' Detrend with differencing method
#'
#' Performs differencing with lag=1.
#'
#' If no number of differences is specified, the function will make an estimation
#' of the number of differences needed based on unit root test provided by \code{forecast::ndiffs}
#'
#' @param tserie a \code{ts} object
#' @param nd number of differences to apply. As default its value is NULL; in this case, the function will perform an automatic estimation of \code{nd}.
#' @return A list is returned containing:
#' \item{tserie}{Transformed ts object.}
#' \item{nd}{Number of differencies applied.}
#' \item{firstvalues}{Lost values after differencing.}
#' @export
#' @examples
#' prep.detrend.differencing(AirPassengers)
#' prep.detrend.differencing(AirPassengers,nd=2)
prep.detrend.differencing <- function(tserie,nd=NULL){
if(!stats::is.ts(tserie)) stop('Not a ts object')
firstvalues <- NULL
if(is.null(nd)){
nd <- forecast::ndiffs(tserie)
if(nd > 0){
firstvalues <- tserie[1:nd]
tserie <- diff(tserie,differences=nd)
}
return (list(tserie=tserie,nd=nd,firstvalues=firstvalues))
}else{
if(nd > 0) firstvalues <- tserie[1:nd]
return (list(tserie=tserie <- diff(tserie,differences=nd),nd=nd,firsvalues=firstvalues))
}
}
#' Detrend with "substracting full-season means" method
#'
#' Performs "substracting full-season means" method to go for a totally automatic
#' approach.
#'
#' Under this detrending scheme, a series is first split into segments. The length
#' of the segments is equal to the length of seasonality(12 for monthly).
#' The mean of the historical observations within each of these segments is substacted
#' from every historical observation in the segment.
#' To get the detrended serie we do:
#' \code{ds = xi - m}
#' Being \code{xi} the actual values on the time series and \code{m} the mean of the segment of \code{xi}
#'
#' @param tserie a \code{ts} object
#' @return A list is returned containing:
#' \item{tserie}{Transformed ts object.}
#' \item{means}{Vector containing the historical means.}
#' @export
#' @examples
#' prep.detrend.sfsm(AirPassengers)
prep.detrend.sfsm <- function(tserie){
if(!stats::is.ts(tserie)) stop('Not a ts object')
first <- start(tserie)[2] #first observation index of the time serie
index <- 1 #general index
means <- NULL #means vector
cont <- TRUE
while(cont){
aux <- NULL
while(index <= length(tserie)){ #values of a season
aux <- c(aux,tserie[index])
index <- index + 1
if((index+first-2) %% frequency(tserie) == 0) break
}
means <- c(means,mean(aux)) #create means vector
if(index > length(tserie)) cont <- FALSE
}
mindex <- 1
index <- 1
while(index <= length(tserie)){
tserie[index] <- tserie[index]-means[mindex]
index <- index+1
if((index+first-2) %% frequency(tserie) == 0) mindex <- mindex+1
}
return (list(tserie=tserie,means=means))
}
#' Deseason with differencing method
#'
#' Performs differencing with lag=frequency.
#'
#' If no number of differences is specified, the function will make an estimation
#' of the number of differences needed based on unit root test provided by \code{forecast::nsdiffs}
#'
#' @param tserie a \code{ts} object
#' @param nsd number of seasonal differences to apply. As default its value is NULL; in this case, the function will perform an automatic estimation of \code{nsd}.
#' @return A list is returned containing:
#' \item{tserie}{Transformed ts object.}
#' \item{nsd}{Number of seasonal differencies applied.}
#' \item{firstseasons}{Lost values after differencing.}
#' @export
#' @examples
#' prep.deseason.differencing(AirPassengers)
#' prep.deseason.differencing(AirPassengers,nsd=2)
prep.deseason.differencing <- function(tserie, nsd=NULL){
if(!stats::is.ts(tserie)) stop('Not a ts object')
firstseasons <- NULL
if(is.null(nsd)){
nsd <- forecast::nsdiffs(tserie)
if(nsd > 0){
firstseasons <- tserie[1:(frequency(tserie)*nsd)]
tserie <- diff(tserie,lag=frequency(tserie),differences=nsd)
}
return (list(tserie=tserie,nsd=nsd,firstseasons=firstseasons))
}else{
if(nsd > 0) firstseasons <- tserie[1:(frequency(tserie)*nsd)]
return (list(tserie=diff(tserie,lag=frequency(tserie),differences=nsd),nsd=nsd,firstseasons=firstseasons))
}
}
#' Autocorrelation function
#'
#' Plots the autocorrelation function to check stationarity
#'
#' For a stationary time series, the ACF will drop to zero
#' relatively quickly, while the ACF of non-stationary data decreases slowly.
#' Also, for non-stationary data, the value is often large and positive.
#' @param tserie a \code{ts} or a \code{prep} object
#' @export
#' @examples
#' prep.check.acf(AirPassengers)
#' prep.check.acf(prep(AirPassengers))
prep.check.acf <- function(tserie){
if(stats::is.ts(tserie)) stats::acf(tserie)
else if(class(tserie)=='prep') stats::acf(tserie$tserie)
else stop('Not a ts/prep object')
}
#' Augmented Dickey-Fuller test
#'
#' Performs ADF test just as another tool to check stationarity.
#'
#' Shows the results of an ADF test. A p-value<0.05 suggests the data is stationary.
#'
#' @param tserie a \code{ts} or a \code{prep} object
#' @export
#' @examples
#' prep.check.adf(AirPassengers)
#' prep.check.adf(prep(AirPassengers))
prep.check.adf <- function(tserie){
if(stats::is.ts(tserie)) return (tseries::adf.test(tserie,alternative="stationary"))
else if(class(tserie)=='prep') return (tseries::adf.test(tserie$tserie,alternative="stationary"))
else stop('Not a ts/prep object')
}
#generic functions
#' Generic function
#'
#' Plots object prep
#' @param x \code{prep} object
#' @param ylab ylab
#' @param xlab xlab
#' @param ... ignored
#' @export
#' @examples
#' plot(prep(AirPassengers),ylab="Stationary AisPassengers")
plot.prep <- function(x,ylab="Preprocessed time serie",xlab="",...){
graphics::plot(x$tserie,ylab=ylab,xlab=xlab)
}
#' Generic function
#'
#' Summary of object prep
#' @param object \code{prep} object
#' @param ... ignored
#' @export
#' @examples
#' summary(prep(AirPassengers))
summary.prep <- function(object,...){
cat("Preprocessed time series object\n")
if(object$homogenize.method=='log') cat("~Homogenizing method: logarithmic transformation\n")
else if(object$homogenize.method=='boxcox') cat ("~Homogenizing method: Box-Cox transformation with lambda=",object$lambda,"\n")
else cat("~Transformation not applied\n")
if(object$detrend.method=='differencing') {
cat("~Detrending method: differencing\n Number of differences: ",object$nd,"\n")
cat("First original values: ",object$firstvalues,"\n")
}
else if(object$detrend.method=='sfsm'){
cat("~Detrending method: substracting full-season means\n Season means: \n")
utils::str(object$means)
}
else cat("~No detrending performed\n")
if(object$deseason.method=='differencing') {
cat("~Deseason method: differencing\n Number of seasonal differences: ",object$nsd,"\n")
cat("First original seasons: ",object$firstseasons,"\n")
}
else cat("~No deseason performed\n")
if(object$deseason.method!='none' && object$detrend.method!='none'){
if(object$detrend.first==TRUE) cat("~Detrending applied before deseasoning\n")
else cat("~Detrending applied after deseasoning\n")
}
cat("~Original serie start: ",object$start,"\n")
cat("~Original serie length: ",object$length,"\n")
cat("~Preprocessed time serie:\n")
utils::str(object$tserie)
}
#' Generic function
#'
#' Prints object prep
#' @param x \code{prep} object
#' @param ... ignored
#' @export
#' @examples
#' print(prep(AirPassengers))
print.prep <- function(x,...){
cat("Preprocessed time series object\n\n")
cat("Class: prep\n\n")
cat("Attributes: \n")
cat("$homogenize.method: ",x$homogenize.method,"\n")
if(!is.null(x$lambda)) cat("$lambda: ",x$lambda,"\n")
cat("$detrend.method: ",x$detrend.method,"\n")
if(!is.null(x$nd)) cat("$nd: ",x$nd,"\n")
if(!is.null(x$firstvalues)) cat("$firstvalues: ",x$firstvalues,"\n")
if(!is.null(x$means)) cat("$means: ",x$means,"\n")
cat("$deseason.method: ",x$deseason.method,"\n")
if(!is.null(x$nsd)) cat("$nsd: ",x$nsd,"\n")
if(!is.null(x$firstseasons)) cat("$firstseasons: ",x$firstseasons,"\n")
cat("$detrend.first: ",x$detrend.first,"\n")
cat("$start: ",x$start,"\n")
cat("$length: ",x$length,"\n")
cat("$tserie: \n")
print(x$tserie)
}
|
#! /usr/bin/env Rscript
library(testthat)
library(rly)
context("Missing t_error() rule")
Lexer <- R6Class("Lexer",
public = list(
tokens = c('NUMBER', 'PLUS','MINUS'),
t_PLUS = '\\+',
t_MINUS = '-',
t_NUMBER = '\\d+'
)
)
test_that("no t_error()", {
expect_output(rly::lex(Lexer), "WARN .* No t_error rule is defined")
})
| /tests/testthat/test.lex_error1.R | no_license | caprice-j/rly | R | false | false | 347 | r | #! /usr/bin/env Rscript
library(testthat)
library(rly)
context("Missing t_error() rule")
Lexer <- R6Class("Lexer",
public = list(
tokens = c('NUMBER', 'PLUS','MINUS'),
t_PLUS = '\\+',
t_MINUS = '-',
t_NUMBER = '\\d+'
)
)
test_that("no t_error()", {
expect_output(rly::lex(Lexer), "WARN .* No t_error rule is defined")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dodge_v.r
\name{pos_stackv}
\alias{pos_stackv}
\title{Stack overlapping intervals.
Assumes that each set has the same horizontal position}
\usage{
pos_stackv(df, height)
}
\description{
Stack overlapping intervals.
Assumes that each set has the same horizontal position
}
| /man/pos_stackv.Rd | no_license | PDXChris/pmtools | R | false | true | 350 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dodge_v.r
\name{pos_stackv}
\alias{pos_stackv}
\title{Stack overlapping intervals.
Assumes that each set has the same horizontal position}
\usage{
pos_stackv(df, height)
}
\description{
Stack overlapping intervals.
Assumes that each set has the same horizontal position
}
|
setwd("/mnt/2EA01BDBA01BA7FB/Working issues/WES/AF_Paper/Revision/quality")
library(ggplot2)
library(reshape2)
library(cowplot)
dps = read.table('DP.tsv')
dps$type = c(rep('known', 311101), rep('novel', 21709))
p1 <- ggplot(dps, aes(x=type, y=V1, fill=type)) + geom_violin() +
scale_y_continuous(limits=c(0, 50000)) + ylab('DP')
aggregate(V1~type, dps, median)
mqs = read.table('MQ.tsv')
mqs$type = c(rep('known', 311101), rep('novel', 21709))
p2 <- ggplot(mqs, aes(x=type, y=V1, fill=type)) + geom_violin(scale='width') +
scale_y_continuous(limits=c(0, 100)) + ylab('MQ')
aggregate(V1~type, mqs, median)
vafs = read.table('VAF.tsv')
vafs$type = c(rep('known', 300000), rep('novel', 23500))
p3 <- ggplot(vafs, aes(x=type, y=V1, fill=type)) + geom_violin(scale='width') +
scale_y_continuous(limits=c(0, 1)) + ylab('VAF')
aggregate(V1~type, vafs, median)
plot_grid(p1, p2, p3, nrow=1)
ggplot(vafs, aes(x=V1, fill=type)) + geom_density(alpha=0.4) +
scale_x_continuous(limits=c(0, 1)) + xlab('VAF')
| /data_cleanup/params.R | no_license | bioinf/afpaper | R | false | false | 1,017 | r | setwd("/mnt/2EA01BDBA01BA7FB/Working issues/WES/AF_Paper/Revision/quality")
library(ggplot2)
library(reshape2)
library(cowplot)
dps = read.table('DP.tsv')
dps$type = c(rep('known', 311101), rep('novel', 21709))
p1 <- ggplot(dps, aes(x=type, y=V1, fill=type)) + geom_violin() +
scale_y_continuous(limits=c(0, 50000)) + ylab('DP')
aggregate(V1~type, dps, median)
mqs = read.table('MQ.tsv')
mqs$type = c(rep('known', 311101), rep('novel', 21709))
p2 <- ggplot(mqs, aes(x=type, y=V1, fill=type)) + geom_violin(scale='width') +
scale_y_continuous(limits=c(0, 100)) + ylab('MQ')
aggregate(V1~type, mqs, median)
vafs = read.table('VAF.tsv')
vafs$type = c(rep('known', 300000), rep('novel', 23500))
p3 <- ggplot(vafs, aes(x=type, y=V1, fill=type)) + geom_violin(scale='width') +
scale_y_continuous(limits=c(0, 1)) + ylab('VAF')
aggregate(V1~type, vafs, median)
plot_grid(p1, p2, p3, nrow=1)
ggplot(vafs, aes(x=V1, fill=type)) + geom_density(alpha=0.4) +
scale_x_continuous(limits=c(0, 1)) + xlab('VAF')
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#' @title RecordBatchWriter classes
#' @description Apache Arrow defines two formats for [serializing data for interprocess
#' communication (IPC)](https://arrow.apache.org/docs/format/Columnar.html#serialization-and-interprocess-communication-ipc):
#' a "stream" format and a "file" format, known as Feather.
#' `RecordBatchStreamWriter` and `RecordBatchFileWriter` are
#' interfaces for writing record batches to those formats, respectively.
#'
#' For guidance on how to use these classes, see the examples section.
#'
#' @seealso [write_ipc_stream()] and [write_feather()] provide a much simpler
#' interface for writing data to these formats and are sufficient for many use
#' cases. [write_to_raw()] is a version that serializes data to a buffer.
#' @usage NULL
#' @format NULL
#' @docType class
#' @section Factory:
#'
#' The `RecordBatchFileWriter$create()` and `RecordBatchStreamWriter$create()`
#' factory methods instantiate the object and take the following arguments:
#'
#' - `sink` An `OutputStream`
#' - `schema` A [Schema] for the data to be written
#' - `use_legacy_format` logical: write data formatted so that Arrow libraries
#' versions 0.14 and lower can read it. Default is `FALSE`. You can also
#' enable this by setting the environment variable `ARROW_PRE_0_15_IPC_FORMAT=1`.
#' - `metadata_version`: A string like "V5" or the equivalent integer indicating
#' the Arrow IPC MetadataVersion. Default (NULL) will use the latest version,
#' unless the environment variable `ARROW_PRE_1_0_METADATA_VERSION=1`, in
#' which case it will be V4.
#'
#' @section Methods:
#'
#' - `$write(x)`: Write a [RecordBatch], [Table], or `data.frame`, dispatching
#' to the methods below appropriately
#' - `$write_batch(batch)`: Write a `RecordBatch` to stream
#' - `$write_table(table)`: Write a `Table` to stream
#' - `$close()`: close stream. Note that this indicates end-of-file or
#' end-of-stream--it does not close the connection to the `sink`. That needs
#' to be closed separately.
#'
#' @rdname RecordBatchWriter
#' @name RecordBatchWriter
#' @include arrow-package.R
#' @examples
#' \donttest{
#' tf <- tempfile()
#' on.exit(unlink(tf))
#'
#' batch <- record_batch(chickwts)
#'
#' # This opens a connection to the file in Arrow
#' file_obj <- FileOutputStream$create(tf)
#' # Pass that to a RecordBatchWriter to write data conforming to a schema
#' writer <- RecordBatchFileWriter$create(file_obj, batch$schema)
#' writer$write(batch)
#' # You may write additional batches to the stream, provided that they have
#' # the same schema.
#' # Call "close" on the writer to indicate end-of-file/stream
#' writer$close()
#' # Then, close the connection--closing the IPC message does not close the file
#' file_obj$close()
#'
#' # Now, we have a file we can read from. Same pattern: open file connection,
#' # then pass it to a RecordBatchReader
#' read_file_obj <- ReadableFile$create(tf)
#' reader <- RecordBatchFileReader$create(read_file_obj)
#' # RecordBatchFileReader knows how many batches it has (StreamReader does not)
#' reader$num_record_batches
#' # We could consume the Reader by calling $read_next_batch() until all are,
#' # consumed, or we can call $read_table() to pull them all into a Table
#' tab <- reader$read_table()
#' # Call as.data.frame to turn that Table into an R data.frame
#' df <- as.data.frame(tab)
#' # This should be the same data we sent
#' all.equal(df, chickwts, check.attributes = FALSE)
#' # Unlike the Writers, we don't have to close RecordBatchReaders,
#' # but we do still need to close the file connection
#' read_file_obj$close()
#' }
RecordBatchWriter <- R6Class("RecordBatchWriter", inherit = ArrowObject,
public = list(
write_batch = function(batch) ipc___RecordBatchWriter__WriteRecordBatch(self, batch),
write_table = function(table) ipc___RecordBatchWriter__WriteTable(self, table),
write = function(x) {
if (inherits(x, "RecordBatch")) {
self$write_batch(x)
} else if (inherits(x, "Table")) {
self$write_table(x)
} else {
self$write_table(Table$create(x))
}
},
close = function() ipc___RecordBatchWriter__Close(self)
)
)
#' @usage NULL
#' @format NULL
#' @rdname RecordBatchWriter
#' @export
RecordBatchStreamWriter <- R6Class("RecordBatchStreamWriter", inherit = RecordBatchWriter)
RecordBatchStreamWriter$create <- function(sink,
schema,
use_legacy_format = NULL,
metadata_version = NULL) {
if (is.string(sink)) {
stop(
"RecordBatchStreamWriter$create() requires an Arrow InputStream. ",
"Try providing FileOutputStream$create(", substitute(sink), ")",
call. = FALSE
)
}
assert_is(sink, "OutputStream")
assert_is(schema, "Schema")
shared_ptr(RecordBatchStreamWriter,
ipc___RecordBatchStreamWriter__Open(
sink,
schema,
get_ipc_use_legacy_format(use_legacy_format),
get_ipc_metadata_version(metadata_version)
)
)
}
#' @usage NULL
#' @format NULL
#' @rdname RecordBatchWriter
#' @export
RecordBatchFileWriter <- R6Class("RecordBatchFileWriter", inherit = RecordBatchStreamWriter)
RecordBatchFileWriter$create <- function(sink,
schema,
use_legacy_format = NULL,
metadata_version = NULL) {
if (is.string(sink)) {
stop(
"RecordBatchFileWriter$create() requires an Arrow InputStream. ",
"Try providing FileOutputStream$create(", substitute(sink), ")",
call. = FALSE
)
}
assert_is(sink, "OutputStream")
assert_is(schema, "Schema")
shared_ptr(RecordBatchFileWriter,
ipc___RecordBatchFileWriter__Open(
sink,
schema,
get_ipc_use_legacy_format(use_legacy_format),
get_ipc_metadata_version(metadata_version)
)
)
}
get_ipc_metadata_version <- function(x) {
input <- x
if (is_integerish(x)) {
# 4 means "V4", which actually happens to be 3L
x <- paste0("V", x)
} else if (is.null(x)) {
if (identical(Sys.getenv("ARROW_PRE_1_0_METADATA_VERSION"), "1") ||
identical(Sys.getenv("ARROW_PRE_0_15_IPC_FORMAT"), "1")) {
# PRE_1_0 is specific for this;
# if you already set PRE_0_15, PRE_1_0 should be implied
x <- "V4"
} else {
# Take the latest
x <- length(MetadataVersion)
}
}
out <- MetadataVersion[[x]]
if (is.null(out)) {
stop(deparse(input), " is not a valid IPC MetadataVersion", call. = FALSE)
}
out
}
get_ipc_use_legacy_format <- function(x) {
isTRUE(x %||% identical(Sys.getenv("ARROW_PRE_0_15_IPC_FORMAT"), "1"))
}
| /r/R/record-batch-writer.R | permissive | blmarket/arrow | R | false | false | 7,521 | r | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#' @title RecordBatchWriter classes
#' @description Apache Arrow defines two formats for [serializing data for interprocess
#' communication (IPC)](https://arrow.apache.org/docs/format/Columnar.html#serialization-and-interprocess-communication-ipc):
#' a "stream" format and a "file" format, known as Feather.
#' `RecordBatchStreamWriter` and `RecordBatchFileWriter` are
#' interfaces for writing record batches to those formats, respectively.
#'
#' For guidance on how to use these classes, see the examples section.
#'
#' @seealso [write_ipc_stream()] and [write_feather()] provide a much simpler
#' interface for writing data to these formats and are sufficient for many use
#' cases. [write_to_raw()] is a version that serializes data to a buffer.
#' @usage NULL
#' @format NULL
#' @docType class
#' @section Factory:
#'
#' The `RecordBatchFileWriter$create()` and `RecordBatchStreamWriter$create()`
#' factory methods instantiate the object and take the following arguments:
#'
#' - `sink` An `OutputStream`
#' - `schema` A [Schema] for the data to be written
#' - `use_legacy_format` logical: write data formatted so that Arrow libraries
#' versions 0.14 and lower can read it. Default is `FALSE`. You can also
#' enable this by setting the environment variable `ARROW_PRE_0_15_IPC_FORMAT=1`.
#' - `metadata_version`: A string like "V5" or the equivalent integer indicating
#' the Arrow IPC MetadataVersion. Default (NULL) will use the latest version,
#' unless the environment variable `ARROW_PRE_1_0_METADATA_VERSION=1`, in
#' which case it will be V4.
#'
#' @section Methods:
#'
#' - `$write(x)`: Write a [RecordBatch], [Table], or `data.frame`, dispatching
#' to the methods below appropriately
#' - `$write_batch(batch)`: Write a `RecordBatch` to stream
#' - `$write_table(table)`: Write a `Table` to stream
#' - `$close()`: close stream. Note that this indicates end-of-file or
#' end-of-stream--it does not close the connection to the `sink`. That needs
#' to be closed separately.
#'
#' @rdname RecordBatchWriter
#' @name RecordBatchWriter
#' @include arrow-package.R
#' @examples
#' \donttest{
#' tf <- tempfile()
#' on.exit(unlink(tf))
#'
#' batch <- record_batch(chickwts)
#'
#' # This opens a connection to the file in Arrow
#' file_obj <- FileOutputStream$create(tf)
#' # Pass that to a RecordBatchWriter to write data conforming to a schema
#' writer <- RecordBatchFileWriter$create(file_obj, batch$schema)
#' writer$write(batch)
#' # You may write additional batches to the stream, provided that they have
#' # the same schema.
#' # Call "close" on the writer to indicate end-of-file/stream
#' writer$close()
#' # Then, close the connection--closing the IPC message does not close the file
#' file_obj$close()
#'
#' # Now, we have a file we can read from. Same pattern: open file connection,
#' # then pass it to a RecordBatchReader
#' read_file_obj <- ReadableFile$create(tf)
#' reader <- RecordBatchFileReader$create(read_file_obj)
#' # RecordBatchFileReader knows how many batches it has (StreamReader does not)
#' reader$num_record_batches
#' # We could consume the Reader by calling $read_next_batch() until all are,
#' # consumed, or we can call $read_table() to pull them all into a Table
#' tab <- reader$read_table()
#' # Call as.data.frame to turn that Table into an R data.frame
#' df <- as.data.frame(tab)
#' # This should be the same data we sent
#' all.equal(df, chickwts, check.attributes = FALSE)
#' # Unlike the Writers, we don't have to close RecordBatchReaders,
#' # but we do still need to close the file connection
#' read_file_obj$close()
#' }
RecordBatchWriter <- R6Class("RecordBatchWriter", inherit = ArrowObject,
public = list(
write_batch = function(batch) ipc___RecordBatchWriter__WriteRecordBatch(self, batch),
write_table = function(table) ipc___RecordBatchWriter__WriteTable(self, table),
write = function(x) {
if (inherits(x, "RecordBatch")) {
self$write_batch(x)
} else if (inherits(x, "Table")) {
self$write_table(x)
} else {
self$write_table(Table$create(x))
}
},
close = function() ipc___RecordBatchWriter__Close(self)
)
)
#' @usage NULL
#' @format NULL
#' @rdname RecordBatchWriter
#' @export
RecordBatchStreamWriter <- R6Class("RecordBatchStreamWriter", inherit = RecordBatchWriter)
RecordBatchStreamWriter$create <- function(sink,
schema,
use_legacy_format = NULL,
metadata_version = NULL) {
if (is.string(sink)) {
stop(
"RecordBatchStreamWriter$create() requires an Arrow InputStream. ",
"Try providing FileOutputStream$create(", substitute(sink), ")",
call. = FALSE
)
}
assert_is(sink, "OutputStream")
assert_is(schema, "Schema")
shared_ptr(RecordBatchStreamWriter,
ipc___RecordBatchStreamWriter__Open(
sink,
schema,
get_ipc_use_legacy_format(use_legacy_format),
get_ipc_metadata_version(metadata_version)
)
)
}
#' @usage NULL
#' @format NULL
#' @rdname RecordBatchWriter
#' @export
RecordBatchFileWriter <- R6Class("RecordBatchFileWriter", inherit = RecordBatchStreamWriter)
RecordBatchFileWriter$create <- function(sink,
schema,
use_legacy_format = NULL,
metadata_version = NULL) {
if (is.string(sink)) {
stop(
"RecordBatchFileWriter$create() requires an Arrow InputStream. ",
"Try providing FileOutputStream$create(", substitute(sink), ")",
call. = FALSE
)
}
assert_is(sink, "OutputStream")
assert_is(schema, "Schema")
shared_ptr(RecordBatchFileWriter,
ipc___RecordBatchFileWriter__Open(
sink,
schema,
get_ipc_use_legacy_format(use_legacy_format),
get_ipc_metadata_version(metadata_version)
)
)
}
get_ipc_metadata_version <- function(x) {
input <- x
if (is_integerish(x)) {
# 4 means "V4", which actually happens to be 3L
x <- paste0("V", x)
} else if (is.null(x)) {
if (identical(Sys.getenv("ARROW_PRE_1_0_METADATA_VERSION"), "1") ||
identical(Sys.getenv("ARROW_PRE_0_15_IPC_FORMAT"), "1")) {
# PRE_1_0 is specific for this;
# if you already set PRE_0_15, PRE_1_0 should be implied
x <- "V4"
} else {
# Take the latest
x <- length(MetadataVersion)
}
}
out <- MetadataVersion[[x]]
if (is.null(out)) {
stop(deparse(input), " is not a valid IPC MetadataVersion", call. = FALSE)
}
out
}
get_ipc_use_legacy_format <- function(x) {
isTRUE(x %||% identical(Sys.getenv("ARROW_PRE_0_15_IPC_FORMAT"), "1"))
}
|
# a function to convert a mating summary list to a dataframe
matingSummary.df <- function(matSum){
if(is.list(matSum[[1]][[1]])){
df <- lapply(matSum, matingSummary.df)
}
if(is.list(matSum[[1]][1])){
l <- lapply(matSum, unlist)
df <- as.data.frame(do.call(rbind, l))
if('popSt' %in% colnames(df)){
df$popSt <- as.Date(df$popSt, origin = '1970-01-01')
df$peak <- as.Date(df$peak, origin = '1970-01-01')
df$meanSD <- as.Date(df$meanSD, origin = '1970-01-01')
df$meanED <- as.Date(df$meanED, origin = '1970-01-01')
df$popEnd <- as.Date(df$popEnd, origin = '1970-01-01')
}
df$id <- rownames(df)
if (!all(df$id == as.character(df$year))){
df <- df[,c(ncol(df),1:(ncol(df)-1))]
} else{
df <- df[,1:(ncol(df)-1)]
}
rownames(df) <- NULL
} else {
df <- as.data.frame(matSum)
}
df
}
simplify.potential.list <- function(s, subject){
subject <- match.arg(subject, c("population", "pairwise",
"individual", "all"),
several.ok = TRUE)
if('all' %in% subject){
subject <- c('population','pairwise','individual')
}
potential <- list()
if ('population' %in% subject){
pop <- data.frame(pop = names(s), synchrony = sapply(s,function(l)l[[which('pop' == names(l))]]))
row.names(pop) <- NULL
potential$pop <- pop
}
if ('individual' %in% subject){
ind <- as.data.frame(do.call(rbind,lapply(s,function(l)l[[which('ind' == names(l))]])))
row.names(ind) <- NULL
potential$ind <- ind
}
# if ('pairwise' %in% subject){
# pair <- array(unlist(lapply(s,function(l)l[[which('pair' == names(l))]])), dim = c(dim(s[[1]][[which('pair' == names(s[[1]]))]])[1],dim(s[[1]][[which('pair' == names(s[[1]]))]])[1],3))
# potential$pair <- pair
# }
return(potential)
}
| /R/helpers.R | no_license | cran/mateable | R | false | false | 1,897 | r | # a function to convert a mating summary list to a dataframe
matingSummary.df <- function(matSum){
if(is.list(matSum[[1]][[1]])){
df <- lapply(matSum, matingSummary.df)
}
if(is.list(matSum[[1]][1])){
l <- lapply(matSum, unlist)
df <- as.data.frame(do.call(rbind, l))
if('popSt' %in% colnames(df)){
df$popSt <- as.Date(df$popSt, origin = '1970-01-01')
df$peak <- as.Date(df$peak, origin = '1970-01-01')
df$meanSD <- as.Date(df$meanSD, origin = '1970-01-01')
df$meanED <- as.Date(df$meanED, origin = '1970-01-01')
df$popEnd <- as.Date(df$popEnd, origin = '1970-01-01')
}
df$id <- rownames(df)
if (!all(df$id == as.character(df$year))){
df <- df[,c(ncol(df),1:(ncol(df)-1))]
} else{
df <- df[,1:(ncol(df)-1)]
}
rownames(df) <- NULL
} else {
df <- as.data.frame(matSum)
}
df
}
simplify.potential.list <- function(s, subject){
subject <- match.arg(subject, c("population", "pairwise",
"individual", "all"),
several.ok = TRUE)
if('all' %in% subject){
subject <- c('population','pairwise','individual')
}
potential <- list()
if ('population' %in% subject){
pop <- data.frame(pop = names(s), synchrony = sapply(s,function(l)l[[which('pop' == names(l))]]))
row.names(pop) <- NULL
potential$pop <- pop
}
if ('individual' %in% subject){
ind <- as.data.frame(do.call(rbind,lapply(s,function(l)l[[which('ind' == names(l))]])))
row.names(ind) <- NULL
potential$ind <- ind
}
# if ('pairwise' %in% subject){
# pair <- array(unlist(lapply(s,function(l)l[[which('pair' == names(l))]])), dim = c(dim(s[[1]][[which('pair' == names(s[[1]]))]])[1],dim(s[[1]][[which('pair' == names(s[[1]]))]])[1],3))
# potential$pair <- pair
# }
return(potential)
}
|
# layers ----
layers_id_fields = c('rgn_id','cntry_key','country_id','saup_id','fao_id','fao_saup_id') # note: cntry_key for www2013, country_id for nature2012
layer_region_labels = 'rgn_labels'
layer_region_areas = 'rgn_area'
# pressures & resilience matrices ----
# components describe the layer and level with which to aggregate resilience and pressures matrices for goals with categories
resilience_components = list('NP' = c('layer'='np_harvest_product_weight' , 'level'='region_id-category'), # old: rnk_np_product_weight
'CS' = c('layer'='cs_habitat_extent' , 'level'='region_id'),
'CP' = c('layer'='cp_habitat_extent_rank' , 'level'='region_id'), # old: rnk_cp_habitat_extent
'HAB' = c('layer'='hab_presence' , 'level'='region_id'))
pressures_components = list('NP' = c('layer'='np_harvest_product_weight' , 'level'='region_id-category'),
'CS' = c('layer'='cs_habitat_extent' , 'level'='region_id'),
'CP' = c('layer'='cp_habitat_extent_rank' , 'level'='region_id'),
'LIV' = c('layer'='le_sector_weight' , 'level'='region_id'),
'ECO' = c('layer'='le_sector_weight' , 'level'='region_id'),
'HAB' = c('layer'='hab_presence' , 'level'='region_id'))
pressures_categories = list(environmental=c('po','hd','fp','sp','cc'), social='ss')
resilience_categories = c('environmental', 'regulatory', 'social')
# constants
pressures_gamma = 0.5
goal_discount = 1.0
goal_beta = 0.67
default_trend = 0
# map configuration
map_lat=60.4348; map_lon=17.0975; map_zoom=5
# extra descriptions not covered by goals.description or layers.description, used in ohigui
index_description = 'The overall Index represents the weighted average of all goal scores.'
dimension_descriptions = c('score' = 'This dimension is an average of the current status and likely future.',
'status' = 'This dimension represents the current value of a goal or sub-goal relative to its reference point.',
'future' = 'For this dimension, the likely future is calculated as the projected status in 5 years, informed by the current status, continued trend, inflected upwards by resilience and downwards by pressures.',
'trend' = 'This dimension represents the recent change in the value of the status. Unlike all other dimensions which range in value from 0 to 100, the trend ranges from -1 to 1, representing the steepest declines to increases respectively.',
'pressures' = 'This dimension represents the anthropogenic stressors that negatively affect the ability of a goal to be delivered to people. Pressures can affect either ecological or social (i.e. human) systems.',
'resilience' = 'This dimension represents the social, institutional, and ecological factors that positively affect the ability of a goal to be delivered to people.')
| /subcountry2014/conf/config.R | no_license | OHI-Science/swe | R | false | false | 3,169 | r | # layers ----
layers_id_fields = c('rgn_id','cntry_key','country_id','saup_id','fao_id','fao_saup_id') # note: cntry_key for www2013, country_id for nature2012
layer_region_labels = 'rgn_labels'
layer_region_areas = 'rgn_area'
# pressures & resilience matrices ----
# components describe the layer and level with which to aggregate resilience and pressures matrices for goals with categories
resilience_components = list('NP' = c('layer'='np_harvest_product_weight' , 'level'='region_id-category'), # old: rnk_np_product_weight
'CS' = c('layer'='cs_habitat_extent' , 'level'='region_id'),
'CP' = c('layer'='cp_habitat_extent_rank' , 'level'='region_id'), # old: rnk_cp_habitat_extent
'HAB' = c('layer'='hab_presence' , 'level'='region_id'))
pressures_components = list('NP' = c('layer'='np_harvest_product_weight' , 'level'='region_id-category'),
'CS' = c('layer'='cs_habitat_extent' , 'level'='region_id'),
'CP' = c('layer'='cp_habitat_extent_rank' , 'level'='region_id'),
'LIV' = c('layer'='le_sector_weight' , 'level'='region_id'),
'ECO' = c('layer'='le_sector_weight' , 'level'='region_id'),
'HAB' = c('layer'='hab_presence' , 'level'='region_id'))
pressures_categories = list(environmental=c('po','hd','fp','sp','cc'), social='ss')
resilience_categories = c('environmental', 'regulatory', 'social')
# constants
pressures_gamma = 0.5
goal_discount = 1.0
goal_beta = 0.67
default_trend = 0
# map configuration
map_lat=60.4348; map_lon=17.0975; map_zoom=5
# extra descriptions not covered by goals.description or layers.description, used in ohigui
index_description = 'The overall Index represents the weighted average of all goal scores.'
dimension_descriptions = c('score' = 'This dimension is an average of the current status and likely future.',
'status' = 'This dimension represents the current value of a goal or sub-goal relative to its reference point.',
'future' = 'For this dimension, the likely future is calculated as the projected status in 5 years, informed by the current status, continued trend, inflected upwards by resilience and downwards by pressures.',
'trend' = 'This dimension represents the recent change in the value of the status. Unlike all other dimensions which range in value from 0 to 100, the trend ranges from -1 to 1, representing the steepest declines to increases respectively.',
'pressures' = 'This dimension represents the anthropogenic stressors that negatively affect the ability of a goal to be delivered to people. Pressures can affect either ecological or social (i.e. human) systems.',
'resilience' = 'This dimension represents the social, institutional, and ecological factors that positively affect the ability of a goal to be delivered to people.')
|
## This now includes 'thresh' variable
## DIDN'T HAVE TO USE THIS: SEE 70.community_iterations.py INSTEAD
Args <- Sys.getenv("R_ARGS")
print(noquote(strsplit(Args," ")[[1]]))
print(length(noquote(strsplit(Args," ")[[1]])))
subj <- noquote(strsplit(Args," ")[[1]][1])
cond <- as.numeric(noquote(strsplit(Args," ")[[1]][2]))
thresh <- as.numeric(noquote(strsplit(Args," ")[[1]][3]))
library(bct)
threshes <- c(15, 12, 8, 5) # Doing at different proprotion of links preserved; 8 approximate name for median completeness, actually ~8.7%
for (t in threshes)
{
srcdst <- paste(Sys.getenv("state"),"/links_files",t,"p/",subj,".",cond,".",t,"p_r",thresh,"_linksthresh_proportion.out.srcdst", sep="")
tree <- paste(Sys.getenv("state"),"/links_files",t,"p/",subj,".",cond,".",t,"p_r",thresh,"_linksthresh_proportion.out.tree", sep="")
modularity_score = blondel_community(srcdst, tree)
print(modularity_score)
modout <- paste(Sys.getenv("state"),"/links_files",t,"p/mod_score.",subj,".",cond,".",t,"p_r",thresh,"_linksthresh_proportion.txt",sep="")
write.table(modularity_score, modout, row.names=F, col.names=F, quote=F)
}
| /modularity_conn/8.blondel_iterations.R | no_license | michaelandric/steadystate | R | false | false | 1,146 | r | ## This now includes 'thresh' variable
## DIDN'T HAVE TO USE THIS: SEE 70.community_iterations.py INSTEAD
Args <- Sys.getenv("R_ARGS")
print(noquote(strsplit(Args," ")[[1]]))
print(length(noquote(strsplit(Args," ")[[1]])))
subj <- noquote(strsplit(Args," ")[[1]][1])
cond <- as.numeric(noquote(strsplit(Args," ")[[1]][2]))
thresh <- as.numeric(noquote(strsplit(Args," ")[[1]][3]))
library(bct)
threshes <- c(15, 12, 8, 5) # Doing at different proprotion of links preserved; 8 approximate name for median completeness, actually ~8.7%
for (t in threshes)
{
srcdst <- paste(Sys.getenv("state"),"/links_files",t,"p/",subj,".",cond,".",t,"p_r",thresh,"_linksthresh_proportion.out.srcdst", sep="")
tree <- paste(Sys.getenv("state"),"/links_files",t,"p/",subj,".",cond,".",t,"p_r",thresh,"_linksthresh_proportion.out.tree", sep="")
modularity_score = blondel_community(srcdst, tree)
print(modularity_score)
modout <- paste(Sys.getenv("state"),"/links_files",t,"p/mod_score.",subj,".",cond,".",t,"p_r",thresh,"_linksthresh_proportion.txt",sep="")
write.table(modularity_score, modout, row.names=F, col.names=F, quote=F)
}
|
library(RandomFields)
### Name: RMbiwm
### Title: Full Bivariate Whittle Matern Model
### Aliases: RMbiwm
### Keywords: spatial models
### ** Examples
## Don't show:
StartExample()
## End(Don't show)
RFoptions(seed=0) ## *ANY* simulation will have the random seed 0; set
## RFoptions(seed=NA) to make them all random again
x <- y <- seq(-10, 10, 0.2)
model <- RMbiwm(nudiag=c(0.3, 2), nured=1, rhored=1, cdiag=c(1, 1.5),
s=c(1, 1, 2))
plot(model)
plot(RFsimulate(model, x, y))
## Don't show:
FinalizeExample()
## End(Don't show)
| /data/genthat_extracted_code/RandomFields/examples/RMbiwm.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 573 | r | library(RandomFields)
### Name: RMbiwm
### Title: Full Bivariate Whittle Matern Model
### Aliases: RMbiwm
### Keywords: spatial models
### ** Examples
## Don't show:
StartExample()
## End(Don't show)
RFoptions(seed=0) ## *ANY* simulation will have the random seed 0; set
## RFoptions(seed=NA) to make them all random again
x <- y <- seq(-10, 10, 0.2)
model <- RMbiwm(nudiag=c(0.3, 2), nured=1, rhored=1, cdiag=c(1, 1.5),
s=c(1, 1, 2))
plot(model)
plot(RFsimulate(model, x, y))
## Don't show:
FinalizeExample()
## End(Don't show)
|
#' @author Bill Peterman
#' @title Generate correlated Gaussian surfaces
#' @description Function to create correlated Gaussian surfaces
#'
#' @param corr Desired correlation level between fbm surfaces
#' @param dim Dimension of simulated raster
#' @param autocorr_range Maximum range (raster units) of spatial autocorrelation. (Default = 5)
#' @param mag_var Magnitude of variation over the entire landscape. (Default = 25)
#' @param nug Magnitude of variation in the scale of autocorr_range, smaller values lead to more homogeneous landscapes.(Default = 5)
#' @param user_seed Seed can be set for reproducibility (Default = NA)
#'
#' @export
gaus_corr <- function(corr,
dim,
autocorr_range = 5,
mag_var = 25,
nug = 5,
user_seed = NA){
sim_rast <- NLMR::nlm_gaussianfield(ncol = dim,
nrow = dim,
autocorr_range = autocorr_range,
mag_var = mag_var,
nug = nug,
user_seed = user_seed)
rast_corr <- 0
while(rast_corr > corr + 0.015 | rast_corr < corr - 0.015){
rep_sim <- NLMR::nlm_gaussianfield(ncol = dim,
nrow = dim,
autocorr_range = autocorr_range,
mag_var = mag_var,
nug = nug,
user_seed = NA)
corr_ <- sqrt((1 / (corr^2)) - 1)
mat <- raster::as.matrix(sim_rast)
rep_sim_ <- rep_sim * corr_
corr_rast <- sim_rast + rep_sim_
rast_corr <- raster::layerStats(stack(sim_rast, corr_rast), 'pearson')$`pearson correlation coefficient`[1,2]
cat(paste0(round(rast_corr, digits = 4), '\n \n'))
}
cat(paste0("Surface correlation is ", round(rast_corr, 4), '\n \n'))
corr_rast[] <- (corr_rast[] - min(corr_rast[])) / (max(corr_rast[]) - min(corr_rast[]))
return(stack(list(sim_true = sim_rast,
corr_sim = corr_rast)))
} | /R/correlated_gaus_function.R | no_license | nspope/radishDGS | R | false | false | 2,215 | r | #' @author Bill Peterman
#' @title Generate correlated Gaussian surfaces
#' @description Function to create correlated Gaussian surfaces
#'
#' @param corr Desired correlation level between fbm surfaces
#' @param dim Dimension of simulated raster
#' @param autocorr_range Maximum range (raster units) of spatial autocorrelation. (Default = 5)
#' @param mag_var Magnitude of variation over the entire landscape. (Default = 25)
#' @param nug Magnitude of variation in the scale of autocorr_range, smaller values lead to more homogeneous landscapes.(Default = 5)
#' @param user_seed Seed can be set for reproducibility (Default = NA)
#'
#' @export
gaus_corr <- function(corr,
dim,
autocorr_range = 5,
mag_var = 25,
nug = 5,
user_seed = NA){
sim_rast <- NLMR::nlm_gaussianfield(ncol = dim,
nrow = dim,
autocorr_range = autocorr_range,
mag_var = mag_var,
nug = nug,
user_seed = user_seed)
rast_corr <- 0
while(rast_corr > corr + 0.015 | rast_corr < corr - 0.015){
rep_sim <- NLMR::nlm_gaussianfield(ncol = dim,
nrow = dim,
autocorr_range = autocorr_range,
mag_var = mag_var,
nug = nug,
user_seed = NA)
corr_ <- sqrt((1 / (corr^2)) - 1)
mat <- raster::as.matrix(sim_rast)
rep_sim_ <- rep_sim * corr_
corr_rast <- sim_rast + rep_sim_
rast_corr <- raster::layerStats(stack(sim_rast, corr_rast), 'pearson')$`pearson correlation coefficient`[1,2]
cat(paste0(round(rast_corr, digits = 4), '\n \n'))
}
cat(paste0("Surface correlation is ", round(rast_corr, 4), '\n \n'))
corr_rast[] <- (corr_rast[] - min(corr_rast[])) / (max(corr_rast[]) - min(corr_rast[]))
return(stack(list(sim_true = sim_rast,
corr_sim = corr_rast)))
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tally.R
\name{columns}
\alias{columns}
\alias{rows}
\title{return a vector of row or column indices}
\usage{
columns(x, default = c())
rows(x, default = c())
}
\arguments{
\item{x}{an object that may or may not have any rows or columns}
\item{default}{what to return if there are no rows or columns}
}
\value{
if \code{x} has rows or columns, a vector of indices, else \code{default}
}
\description{
return a vector of row or column indices
}
\examples{
dim(iris)
columns(iris)
rows(iris)
columns(NULL)
columns("this doesn't have columns")
}
| /man/columns.Rd | no_license | ProjectMOSAIC/mosaicCore | R | false | true | 622 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tally.R
\name{columns}
\alias{columns}
\alias{rows}
\title{return a vector of row or column indices}
\usage{
columns(x, default = c())
rows(x, default = c())
}
\arguments{
\item{x}{an object that may or may not have any rows or columns}
\item{default}{what to return if there are no rows or columns}
}
\value{
if \code{x} has rows or columns, a vector of indices, else \code{default}
}
\description{
return a vector of row or column indices
}
\examples{
dim(iris)
columns(iris)
rows(iris)
columns(NULL)
columns("this doesn't have columns")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.crm.r
\name{print.crmlist}
\alias{print.crmlist}
\title{Print model table from model list}
\usage{
\method{print}{crmlist}(x,...)
}
\arguments{
\item{x}{list of model results}
\item{...}{generic arguments not used here}
}
\value{
None
}
\description{
Print model table from model list
}
\seealso{
\code{\link{crm}}
}
\author{
Jeff Laake
}
\keyword{utility}
| /man/print.crmlist.Rd | no_license | cran/marked | R | false | true | 443 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print.crm.r
\name{print.crmlist}
\alias{print.crmlist}
\title{Print model table from model list}
\usage{
\method{print}{crmlist}(x,...)
}
\arguments{
\item{x}{list of model results}
\item{...}{generic arguments not used here}
}
\value{
None
}
\description{
Print model table from model list
}
\seealso{
\code{\link{crm}}
}
\author{
Jeff Laake
}
\keyword{utility}
|
12b9e05bec45383ec3fc42219dfbd954 query33_falsequ_1344.qdimacs 99 141 | /code/dcnf-ankit-optimized/Results/QBFLIB-2018/A1/Database/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/query33_falsequ_1344/query33_falsequ_1344.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 68 | r | 12b9e05bec45383ec3fc42219dfbd954 query33_falsequ_1344.qdimacs 99 141 |
#Modeled after the R6 testing structure: https://github.com/wch/R6/blob/master/tests/testthat.R
library(testthat)
library(REDCapR)
testthat::test_check("REDCapR")
| /REDCapR/tests/test-all.R | no_license | ingted/R-Examples | R | false | false | 169 | r | #Modeled after the R6 testing structure: https://github.com/wch/R6/blob/master/tests/testthat.R
library(testthat)
library(REDCapR)
testthat::test_check("REDCapR")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_mz_tab.R
\name{extractMetadata}
\alias{extractMetadata}
\title{Extract metadata from data frame.}
\usage{
extractMetadata(MzTabDataFrame)
}
\arguments{
\item{MzTabDataFrame}{the mztab data frame}
}
\description{
Extract the \link{Metadata} from an mztab data frame.
}
| /man/extractMetadata.Rd | permissive | lifs-tools/rmzTab-m | R | false | true | 351 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read_mz_tab.R
\name{extractMetadata}
\alias{extractMetadata}
\title{Extract metadata from data frame.}
\usage{
extractMetadata(MzTabDataFrame)
}
\arguments{
\item{MzTabDataFrame}{the mztab data frame}
}
\description{
Extract the \link{Metadata} from an mztab data frame.
}
|
library(SPEI)
# Example
# Load data
data(wichita)
# Compute potential evapotranspiration (PET) and climatic water balance (BAL)
wichita$PET = thornthwaite(wichita$TMED, 37.6475)
wichita$BAL = wichita$PRCP-wichita$PET
# Convert to a ts (time series) object for convenience
wichita_ts = ts(wichita[,-c(1,2)], end=c(2011,10), frequency=12)
plot(wichita_ts)
# One and tvelwe-months SPEI
spei1 <- spei(wichita[,'BAL'], 1)
spei12 <- spei(wichita[,'BAL'], 12)
class(spei1)
# Extract information from spei object: summary, call function, fitted values, and coefficients
summary(spei1)
# Plot spei object
par(mfrow=c(2,1))
plot(spei1, main='Wichita, SPEI-1')
plot(spei12, main='Wichita, SPEI-12')
write.csv(spei1$fitted,file='./example_spei.csv') | /SPEI_satellite/calc_SPEI.R | no_license | behroozeslami/Impact_Based_Forecasting_Droughts | R | false | false | 737 | r | library(SPEI)
# Example
# Load data
data(wichita)
# Compute potential evapotranspiration (PET) and climatic water balance (BAL)
wichita$PET = thornthwaite(wichita$TMED, 37.6475)
wichita$BAL = wichita$PRCP-wichita$PET
# Convert to a ts (time series) object for convenience
wichita_ts = ts(wichita[,-c(1,2)], end=c(2011,10), frequency=12)
plot(wichita_ts)
# One and tvelwe-months SPEI
spei1 <- spei(wichita[,'BAL'], 1)
spei12 <- spei(wichita[,'BAL'], 12)
class(spei1)
# Extract information from spei object: summary, call function, fitted values, and coefficients
summary(spei1)
# Plot spei object
par(mfrow=c(2,1))
plot(spei1, main='Wichita, SPEI-1')
plot(spei12, main='Wichita, SPEI-12')
write.csv(spei1$fitted,file='./example_spei.csv') |
#### DYNASTYPROCESS IMPORT ####
#' Import latest DynastyProcess values
#'
#' Fetches a copy of the latest DynastyProcess dynasty trade values sheets
#'
#' @param file one of `c("values.csv","values-players.csv","values-picks.csv")`
#'
#' @seealso <https://github.com/DynastyProcess/data>
#'
#' @examples
#' \donttest{
#' dp_values()
#' }
#'
#' @return a tibble of trade values from DynastyProcess
#'
#' @export
dp_values <- function(file = c("values.csv", "values-players.csv", "values-picks.csv")) {
file_name <- match.arg(file)
url_query <- glue::glue("https://github.com/DynastyProcess/data/raw/master/files/{file_name}")
response <- httr::RETRY("GET", url_query, httr::accept("text/csv"))
if (httr::http_error(response)) {
stop(glue::glue("GitHub request failed with error: <{httr::status_code(response)}> \n
while calling <{url_query}>"), call. = FALSE)
}
content <- response %>%
httr::content() %>%
utils::read.csv(text = ., stringsAsFactors = FALSE) %>%
dplyr::mutate_at(dplyr::vars(dplyr::ends_with("id")), as.character) %>%
tibble::tibble()
}
#' Import latest DynastyProcess player IDs
#'
#' Fetches a copy of the latest DynastyProcess player IDs csv
#'
#' @examples
#' \donttest{
#' dp_playerids()
#' }
#'
#' @seealso <https://github.com/DynastyProcess/data>
#'
#' @return a tibble of player IDs
#'
#' @export
dp_playerids <- function() {
url_query <- "https://github.com/DynastyProcess/data/raw/master/files/db_playerids.csv"
response <- httr::RETRY("GET", url_query, httr::accept("text/csv"))
if (httr::http_error(response)) {
stop(glue::glue("GitHub request failed with error: <{httr::status_code(response)}> \n
while calling <{url_query}>"), call. = FALSE)
}
content <- response %>%
httr::content() %>%
utils::read.csv(text = ., stringsAsFactors = FALSE) %>%
dplyr::mutate_at(dplyr::vars(dplyr::ends_with("id")), as.character) %>%
tibble::tibble()
return(content)
}
#' Clean Names
#'
#' Applies some name-cleaning heuristics to facilitate joins. These heuristics may include:
#' - removing periods and apostrophes
#' - removing common suffixes, such as Jr, Sr, II, III, IV
#' - converting to lowercase
#' - using `dp_name_mapping` to do common name substitutions, such as Mitch Trubisky to Mitchell Trubisky
#'
#' @param player_name a character (or character vector)
#' @param lowercase defaults to FALSE - if TRUE, converts to lowercase
#' @param convert_lastfirst converts names from "Last, First" to "First Last" (i.e. MFL style)
#' @param use_name_database uses internal name database to do common substitutions (Mitchell Trubisky to Mitch Trubisky etc)
#'
#' @examples
#' \donttest{
#' dp_cleannames(c("A.J. Green", "Odell Beckham Jr.", "Le'Veon Bell Sr."))
#'
#' dp_cleannames(c("Trubisky, Mitch", "Atwell, Chatarius", "Elliott, Zeke", "Elijah Moore"),
#' convert_lastfirst = TRUE,
#' use_name_database = TRUE)
#' }
#'
#' @seealso `dp_name_mapping`
#'
#' @return a character vector of cleaned names
#'
#' @export
dp_cleannames <- function(player_name, lowercase = FALSE, convert_lastfirst = TRUE, use_name_database = TRUE) {
checkmate::assert_character(player_name)
checkmate::assert_flag(lowercase)
checkmate::assert_flag(convert_lastfirst)
checkmate::assert_flag(use_name_database)
n <- player_name
if(convert_lastfirst) n <- stringr::str_replace_all(n, "^(.+), (.+)$", "\\2 \\1")
n <- stringr::str_remove_all(n, "( Jr\\.$)|( Sr\\.$)|( III$)|( II$)|( IV$)|( V$)|(\\')|(\\.)")
n <- stringr::str_squish(n)
if(use_name_database) n <- unname(dplyr::coalesce(ffscrapr::dp_name_mapping[n],n))
if(lowercase) n <- tolower(n)
return(n)
}
#' Alternate name mappings
#'
#' A named character vector mapping common alternate names
#'
#' @examples
#' \donttest{
#' dp_name_mapping[c("Chatarius Atwell", "Robert Kelley")]
#' }
#'
#' @format A named character vector
#' \describe{
#' \item{name attribute}{The "alternate" name.}
#' \item{value attribute}{The "correct" name.}
#' }
#'
"dp_name_mapping"
| /R/1_import_dp.R | permissive | jpiburn/ffscrapr | R | false | false | 4,091 | r | #### DYNASTYPROCESS IMPORT ####
#' Import latest DynastyProcess values
#'
#' Fetches a copy of the latest DynastyProcess dynasty trade values sheets
#'
#' @param file one of `c("values.csv","values-players.csv","values-picks.csv")`
#'
#' @seealso <https://github.com/DynastyProcess/data>
#'
#' @examples
#' \donttest{
#' dp_values()
#' }
#'
#' @return a tibble of trade values from DynastyProcess
#'
#' @export
dp_values <- function(file = c("values.csv", "values-players.csv", "values-picks.csv")) {
file_name <- match.arg(file)
url_query <- glue::glue("https://github.com/DynastyProcess/data/raw/master/files/{file_name}")
response <- httr::RETRY("GET", url_query, httr::accept("text/csv"))
if (httr::http_error(response)) {
stop(glue::glue("GitHub request failed with error: <{httr::status_code(response)}> \n
while calling <{url_query}>"), call. = FALSE)
}
content <- response %>%
httr::content() %>%
utils::read.csv(text = ., stringsAsFactors = FALSE) %>%
dplyr::mutate_at(dplyr::vars(dplyr::ends_with("id")), as.character) %>%
tibble::tibble()
}
#' Import latest DynastyProcess player IDs
#'
#' Fetches a copy of the latest DynastyProcess player IDs csv
#'
#' @examples
#' \donttest{
#' dp_playerids()
#' }
#'
#' @seealso <https://github.com/DynastyProcess/data>
#'
#' @return a tibble of player IDs
#'
#' @export
dp_playerids <- function() {
url_query <- "https://github.com/DynastyProcess/data/raw/master/files/db_playerids.csv"
response <- httr::RETRY("GET", url_query, httr::accept("text/csv"))
if (httr::http_error(response)) {
stop(glue::glue("GitHub request failed with error: <{httr::status_code(response)}> \n
while calling <{url_query}>"), call. = FALSE)
}
content <- response %>%
httr::content() %>%
utils::read.csv(text = ., stringsAsFactors = FALSE) %>%
dplyr::mutate_at(dplyr::vars(dplyr::ends_with("id")), as.character) %>%
tibble::tibble()
return(content)
}
#' Clean Names
#'
#' Applies some name-cleaning heuristics to facilitate joins. These heuristics may include:
#' - removing periods and apostrophes
#' - removing common suffixes, such as Jr, Sr, II, III, IV
#' - converting to lowercase
#' - using `dp_name_mapping` to do common name substitutions, such as Mitch Trubisky to Mitchell Trubisky
#'
#' @param player_name a character (or character vector)
#' @param lowercase defaults to FALSE - if TRUE, converts to lowercase
#' @param convert_lastfirst converts names from "Last, First" to "First Last" (i.e. MFL style)
#' @param use_name_database uses internal name database to do common substitutions (Mitchell Trubisky to Mitch Trubisky etc)
#'
#' @examples
#' \donttest{
#' dp_cleannames(c("A.J. Green", "Odell Beckham Jr.", "Le'Veon Bell Sr."))
#'
#' dp_cleannames(c("Trubisky, Mitch", "Atwell, Chatarius", "Elliott, Zeke", "Elijah Moore"),
#' convert_lastfirst = TRUE,
#' use_name_database = TRUE)
#' }
#'
#' @seealso `dp_name_mapping`
#'
#' @return a character vector of cleaned names
#'
#' @export
dp_cleannames <- function(player_name, lowercase = FALSE, convert_lastfirst = TRUE, use_name_database = TRUE) {
checkmate::assert_character(player_name)
checkmate::assert_flag(lowercase)
checkmate::assert_flag(convert_lastfirst)
checkmate::assert_flag(use_name_database)
n <- player_name
if(convert_lastfirst) n <- stringr::str_replace_all(n, "^(.+), (.+)$", "\\2 \\1")
n <- stringr::str_remove_all(n, "( Jr\\.$)|( Sr\\.$)|( III$)|( II$)|( IV$)|( V$)|(\\')|(\\.)")
n <- stringr::str_squish(n)
if(use_name_database) n <- unname(dplyr::coalesce(ffscrapr::dp_name_mapping[n],n))
if(lowercase) n <- tolower(n)
return(n)
}
#' Alternate name mappings
#'
#' A named character vector mapping common alternate names
#'
#' @examples
#' \donttest{
#' dp_name_mapping[c("Chatarius Atwell", "Robert Kelley")]
#' }
#'
#' @format A named character vector
#' \describe{
#' \item{name attribute}{The "alternate" name.}
#' \item{value attribute}{The "correct" name.}
#' }
#'
"dp_name_mapping"
|
SHeader <- function(id) {
ns <- NS(id)
dashboardHeader(title = "Intelligent Data Analysis",
tags$li(a(href = 'https://www.fi.upm.es/',
img(src = 'Captura.png',
title = "a", height = "31px", width = "96px"),
style = "padding-top:10px; padding-bottom:10px;"),
class = "dropdown"))
} | /01 Hw1.2/00 Shinny app/00 Previous versions/Header.R | no_license | Data-Heretic/Intelligent-Data-Analysis | R | false | false | 420 | r | SHeader <- function(id) {
ns <- NS(id)
dashboardHeader(title = "Intelligent Data Analysis",
tags$li(a(href = 'https://www.fi.upm.es/',
img(src = 'Captura.png',
title = "a", height = "31px", width = "96px"),
style = "padding-top:10px; padding-bottom:10px;"),
class = "dropdown"))
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_describe_transit_gateway_connects}
\alias{ec2_describe_transit_gateway_connects}
\title{Describes one or more Connect attachments}
\usage{
ec2_describe_transit_gateway_connects(TransitGatewayAttachmentIds,
Filters, MaxResults, NextToken, DryRun)
}
\arguments{
\item{TransitGatewayAttachmentIds}{The IDs of the attachments.}
\item{Filters}{One or more filters. The possible values are:
\itemize{
\item \code{options.protocol} - The tunnel protocol (\code{gre}).
\item \code{state} - The state of the attachment (\code{initiating} |
\code{initiatingRequest} | \code{pendingAcceptance} | \code{rollingBack} |
\code{pending} | \code{available} | \code{modifying} | \code{deleting} | \code{deleted} |
\code{failed} | \code{rejected} | \code{rejecting} | \code{failing}).
\item \code{transit-gateway-attachment-id} - The ID of the Connect attachment.
\item \code{transit-gateway-id} - The ID of the transit gateway.
\item \code{transport-transit-gateway-attachment-id} - The ID of the transit
gateway attachment from which the Connect attachment was created.
}}
\item{MaxResults}{The maximum number of results to return with a single call. To retrieve
the remaining results, make another call with the returned \code{nextToken}
value.}
\item{NextToken}{The token for the next page of results.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
}
\description{
Describes one or more Connect attachments.
}
\section{Request syntax}{
\preformatted{svc$describe_transit_gateway_connects(
TransitGatewayAttachmentIds = list(
"string"
),
Filters = list(
list(
Name = "string",
Values = list(
"string"
)
)
),
MaxResults = 123,
NextToken = "string",
DryRun = TRUE|FALSE
)
}
}
\keyword{internal}
| /cran/paws.compute/man/ec2_describe_transit_gateway_connects.Rd | permissive | sanchezvivi/paws | R | false | true | 2,075 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_describe_transit_gateway_connects}
\alias{ec2_describe_transit_gateway_connects}
\title{Describes one or more Connect attachments}
\usage{
ec2_describe_transit_gateway_connects(TransitGatewayAttachmentIds,
Filters, MaxResults, NextToken, DryRun)
}
\arguments{
\item{TransitGatewayAttachmentIds}{The IDs of the attachments.}
\item{Filters}{One or more filters. The possible values are:
\itemize{
\item \code{options.protocol} - The tunnel protocol (\code{gre}).
\item \code{state} - The state of the attachment (\code{initiating} |
\code{initiatingRequest} | \code{pendingAcceptance} | \code{rollingBack} |
\code{pending} | \code{available} | \code{modifying} | \code{deleting} | \code{deleted} |
\code{failed} | \code{rejected} | \code{rejecting} | \code{failing}).
\item \code{transit-gateway-attachment-id} - The ID of the Connect attachment.
\item \code{transit-gateway-id} - The ID of the transit gateway.
\item \code{transport-transit-gateway-attachment-id} - The ID of the transit
gateway attachment from which the Connect attachment was created.
}}
\item{MaxResults}{The maximum number of results to return with a single call. To retrieve
the remaining results, make another call with the returned \code{nextToken}
value.}
\item{NextToken}{The token for the next page of results.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
}
\description{
Describes one or more Connect attachments.
}
\section{Request syntax}{
\preformatted{svc$describe_transit_gateway_connects(
TransitGatewayAttachmentIds = list(
"string"
),
Filters = list(
list(
Name = "string",
Values = list(
"string"
)
)
),
MaxResults = 123,
NextToken = "string",
DryRun = TRUE|FALSE
)
}
}
\keyword{internal}
|
library(networkD3)
library(igraph)
####################################################
### get parameters
args = commandArgs(trailingOnly=TRUE)
signal_matrix_file = args[1]
cd_tree = args[2]
signal_range_color_file = args[3]
signal_input_list = args[4]
signal_matrix_start_col = as.numeric(args[5])
####################################################
############ read input files
####################################################
### read signal matrix file
signal_matrix_od = as.matrix(read.table(signal_matrix_file, header=FALSE, comment.char='~'))
### extract signal matrix without info
signal_matrix = signal_matrix_od[ , signal_matrix_start_col:dim(signal_matrix_od)[2] ]
### get index_set name
index_set_name = signal_matrix_od[,1]
###### read colnames file
colname_file = read.table(signal_input_list, header=F)
colname = colname_file[,1]
### read cell development tree file
tree = read.table(cd_tree, header = F, sep=',')
tree.df = as.data.frame(tree)
colnames(tree.df) = c('Node.1', 'Node.2')
### get color list
signal_matrix_color = signal_matrix#matrix("#FFFFFF", nrow=dim(signal_matrix)[1], ncol=dim(signal_matrix)[2])
### plot trees
for (i in seq(1,dim(signal_matrix_color)[1])){
### get color vector from color matrix
value_col = signal_matrix_color[i,]
### get tree
tree.igraph = graph.data.frame(tree.df, directed=TRUE)
tree_names = V(tree.igraph)$name
V(tree.igraph)$name = tree_names#rep('', length(tree_names))
### sort colnames by tree nodes id
match_id = match(tree_names, colname)
V(tree.igraph)$color = value_col[match_id]
V(tree.igraph)$size = 25
pdf(paste(index_set_name[i], '.', signal_input_list, '.tree.pdf', sep = ''), width = 12, height = 12)
plot(tree.igraph, layout = layout_as_tree(tree.igraph, root=c(1)))
dev.off()
}
| /bin/plot_tree_multi_color.1.MetaIS.R | permissive | guanjue/snapshot | R | false | false | 1,780 | r | library(networkD3)
library(igraph)
####################################################
### get parameters
args = commandArgs(trailingOnly=TRUE)
signal_matrix_file = args[1]
cd_tree = args[2]
signal_range_color_file = args[3]
signal_input_list = args[4]
signal_matrix_start_col = as.numeric(args[5])
####################################################
############ read input files
####################################################
### read signal matrix file
signal_matrix_od = as.matrix(read.table(signal_matrix_file, header=FALSE, comment.char='~'))
### extract signal matrix without info
signal_matrix = signal_matrix_od[ , signal_matrix_start_col:dim(signal_matrix_od)[2] ]
### get index_set name
index_set_name = signal_matrix_od[,1]
###### read colnames file
colname_file = read.table(signal_input_list, header=F)
colname = colname_file[,1]
### read cell development tree file
tree = read.table(cd_tree, header = F, sep=',')
tree.df = as.data.frame(tree)
colnames(tree.df) = c('Node.1', 'Node.2')
### get color list
signal_matrix_color = signal_matrix#matrix("#FFFFFF", nrow=dim(signal_matrix)[1], ncol=dim(signal_matrix)[2])
### plot trees
for (i in seq(1,dim(signal_matrix_color)[1])){
### get color vector from color matrix
value_col = signal_matrix_color[i,]
### get tree
tree.igraph = graph.data.frame(tree.df, directed=TRUE)
tree_names = V(tree.igraph)$name
V(tree.igraph)$name = tree_names#rep('', length(tree_names))
### sort colnames by tree nodes id
match_id = match(tree_names, colname)
V(tree.igraph)$color = value_col[match_id]
V(tree.igraph)$size = 25
pdf(paste(index_set_name[i], '.', signal_input_list, '.tree.pdf', sep = ''), width = 12, height = 12)
plot(tree.igraph, layout = layout_as_tree(tree.igraph, root=c(1)))
dev.off()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wflow_run.R
\name{wflow_run}
\alias{wflow_run}
\title{Run the code}
\usage{
wflow_run(file = NULL, verbose = TRUE, project = ".")
}
\arguments{
\item{file}{character (default: \code{NULL}). The R Markdown file to execute.
Must have file extension Rmd or rmd. If \code{NULL}, the most recently
modified Rmd file will be executed.}
\item{verbose}{logical (default: \code{TRUE}). Should the lines of code (and
their output) be echoed in the R console as they are executed? This
argument is passed directly to the argument \code{echo} of the function
\code{\link{source}}.}
\item{project}{character (default: ".") By default the function assumes the
current working directory is within the project. If this is not true,
you'll need to provide the path to the project directory.}
}
\value{
Invisibly returns the path to the Rmd file that was executed
}
\description{
\code{wflow_run} executes the code chunks of an R Markdown file in the
current R session without affecting any of the website files. This is meant
to be used while interactively developing an analysis. It does \strong{not}
change the working directory, isolate the computation from the current R
session, nor set the seed of the random number generator. This is analogous
to the RStudio option "Run all" to run all the code chunks. Use
\code{\link{wflow_publish}} when you are ready to add the results to the
website.
}
\examples{
\dontrun{
# Run the most recently modified Rmd file
wflow_run()
# Run a specific Rmd file
wflow_run("analysis/file.Rmd")
}
}
\seealso{
\code{\link{wflow_build}} with argument \code{local = TRUE},
\code{\link{source}} with argument \code{echo = TRUE}
}
| /man/wflow_run.Rd | permissive | workflowr/workflowr | R | false | true | 1,729 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wflow_run.R
\name{wflow_run}
\alias{wflow_run}
\title{Run the code}
\usage{
wflow_run(file = NULL, verbose = TRUE, project = ".")
}
\arguments{
\item{file}{character (default: \code{NULL}). The R Markdown file to execute.
Must have file extension Rmd or rmd. If \code{NULL}, the most recently
modified Rmd file will be executed.}
\item{verbose}{logical (default: \code{TRUE}). Should the lines of code (and
their output) be echoed in the R console as they are executed? This
argument is passed directly to the argument \code{echo} of the function
\code{\link{source}}.}
\item{project}{character (default: ".") By default the function assumes the
current working directory is within the project. If this is not true,
you'll need to provide the path to the project directory.}
}
\value{
Invisibly returns the path to the Rmd file that was executed
}
\description{
\code{wflow_run} executes the code chunks of an R Markdown file in the
current R session without affecting any of the website files. This is meant
to be used while interactively developing an analysis. It does \strong{not}
change the working directory, isolate the computation from the current R
session, nor set the seed of the random number generator. This is analogous
to the RStudio option "Run all" to run all the code chunks. Use
\code{\link{wflow_publish}} when you are ready to add the results to the
website.
}
\examples{
\dontrun{
# Run the most recently modified Rmd file
wflow_run()
# Run a specific Rmd file
wflow_run("analysis/file.Rmd")
}
}
\seealso{
\code{\link{wflow_build}} with argument \code{local = TRUE},
\code{\link{source}} with argument \code{echo = TRUE}
}
|
library(ggplot2)
library(dplyr)
library(scales)
library(ggrepel)
library(RColorBrewer)
source('code/byte_formater.R')
wikiSizeVocab <- function() {
sizes <- read.csv('output_files/wsmall_sizes.csv')
plot <- sizes[sizes$new_size != -1,]
ggplot(plot) + geom_line(aes(idx,dif)) + annotate("text", x=3000, y=805000, label= paste(length(plot$idx),'files total'))+
scale_y_continuous(limits = c(min(plot$dif),max(plot$dif)),breaks = pretty_breaks(n=7),labels = Kb) +
labs(x='Wiki Small File',y='File Size Differnce From Live Web Version In Bytes',title='Wiki Small Files Whoes Live Web Counterpart Could Be Retrieved',color='none')
ggsave('images/wikismall_dif.png')
plot2 <- sizes[sizes$new_size == -1,]
ggplot(plot2) + geom_line(aes(idx,old_size)) + annotate("text", x=5000, y=300000, label= paste(length(plot2$idx),'files total'))+
scale_y_continuous(limits = c(min(plot2$old_size),max(plot2$old_size)),breaks = pretty_breaks(n=8),labels = Kb) +
labs(x='Wiki Small File',y='File Size In Bytes',title='Wiki Small Files Whoes Live Web Counterpart Could Be Not Retrieved',color='none')
ggsave('images/wikismall_nolive.png')
wsv <- read.csv('output_files/wsmall-vocab-combines.csv')
wsvl <- wsv[wsv$which == 'Live Web',]
wsvd <- wsv[wsv$which == 'Data Set',]
ggplot() +
geom_line(data=wsvl,aes(x=wc,y=vc,color=which)) +
geom_line(data=wsvd,aes(x=wc,y=vc,color=which)) +
scale_x_continuous(limits = c(min(wsvl$wc),max(wsvl$wc)),breaks = pretty_breaks(n=7),labels = comma) +
scale_y_continuous(limits = c(min(wsvl$vc),max(wsvl$vc)),breaks = pretty_breaks(n=6),labels = comma) +
scale_colour_brewer('Vocab Growth',palette='Dark2') +
labs(title='Vocabulary Wiki Small Live Web VS Data Set',x = 'Word Count', y = 'Vocab Count')
ggsave('images/wikismall_vocab_compare.png')
}
| /assignments/a5/code/wiki_plotter.R | no_license | N0taN3rd/cs834-f16 | R | false | false | 1,849 | r | library(ggplot2)
library(dplyr)
library(scales)
library(ggrepel)
library(RColorBrewer)
source('code/byte_formater.R')
wikiSizeVocab <- function() {
sizes <- read.csv('output_files/wsmall_sizes.csv')
plot <- sizes[sizes$new_size != -1,]
ggplot(plot) + geom_line(aes(idx,dif)) + annotate("text", x=3000, y=805000, label= paste(length(plot$idx),'files total'))+
scale_y_continuous(limits = c(min(plot$dif),max(plot$dif)),breaks = pretty_breaks(n=7),labels = Kb) +
labs(x='Wiki Small File',y='File Size Differnce From Live Web Version In Bytes',title='Wiki Small Files Whoes Live Web Counterpart Could Be Retrieved',color='none')
ggsave('images/wikismall_dif.png')
plot2 <- sizes[sizes$new_size == -1,]
ggplot(plot2) + geom_line(aes(idx,old_size)) + annotate("text", x=5000, y=300000, label= paste(length(plot2$idx),'files total'))+
scale_y_continuous(limits = c(min(plot2$old_size),max(plot2$old_size)),breaks = pretty_breaks(n=8),labels = Kb) +
labs(x='Wiki Small File',y='File Size In Bytes',title='Wiki Small Files Whoes Live Web Counterpart Could Be Not Retrieved',color='none')
ggsave('images/wikismall_nolive.png')
wsv <- read.csv('output_files/wsmall-vocab-combines.csv')
wsvl <- wsv[wsv$which == 'Live Web',]
wsvd <- wsv[wsv$which == 'Data Set',]
ggplot() +
geom_line(data=wsvl,aes(x=wc,y=vc,color=which)) +
geom_line(data=wsvd,aes(x=wc,y=vc,color=which)) +
scale_x_continuous(limits = c(min(wsvl$wc),max(wsvl$wc)),breaks = pretty_breaks(n=7),labels = comma) +
scale_y_continuous(limits = c(min(wsvl$vc),max(wsvl$vc)),breaks = pretty_breaks(n=6),labels = comma) +
scale_colour_brewer('Vocab Growth',palette='Dark2') +
labs(title='Vocabulary Wiki Small Live Web VS Data Set',x = 'Word Count', y = 'Vocab Count')
ggsave('images/wikismall_vocab_compare.png')
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shrinkVARcoef.R
\name{shrinkVARcoef}
\alias{shrinkVARcoef}
\title{Semiparametric Bayesian Shrinkage Estimator for
Multivariate Regression}
\usage{
shrinkVARcoef(Y, X, lambda, dof = Inf, prior_type = "NCJ",
TolDRes = 1e-04, m0 = ncol(Y))
}
\arguments{
\item{Y}{An N x K matrix of dependent variables.}
\item{X}{An N x M matrix of regressors.}
\item{lambda}{A shrinkage intensity parameter value between 0~1.}
\item{dof}{Degree of freedom for multivariate t-distribution.
If NULL or Inf, then use multivariate normal distribution.}
\item{prior_type}{"NCJ" for non-conjugate prior and "CJ" for conjugate
prior for scale matrix Sigma.}
\item{TolDRes}{Tolerance parameter for stopping criterion.}
\item{m0}{A hyperparameter for inverse Wishart distribution for Sigma}
}
\description{
Compute the semiparametric Bayesian shrinkage estimator of Psi and Sigma
for a given shrinkage parameter lambda.
The function is a private function for lm_semi_Bayes_PCV() and
lm_ShVAR_KCV().
}
\references{
N. Lee, H. Choi, and S.-H. Kim (2016). Bayes shrinkage
estimation for high-dimensional VAR models with scale mixture of normal
distributions for noise. Computational Statistics & Data Analysis 101,
250-276. doi: 10.1016/j.csda.2016.03.007
}
| /man/shrinkVARcoef.Rd | no_license | Allisterh/VARshrink | R | false | true | 1,313 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/shrinkVARcoef.R
\name{shrinkVARcoef}
\alias{shrinkVARcoef}
\title{Semiparametric Bayesian Shrinkage Estimator for
Multivariate Regression}
\usage{
shrinkVARcoef(Y, X, lambda, dof = Inf, prior_type = "NCJ",
TolDRes = 1e-04, m0 = ncol(Y))
}
\arguments{
\item{Y}{An N x K matrix of dependent variables.}
\item{X}{An N x M matrix of regressors.}
\item{lambda}{A shrinkage intensity parameter value between 0~1.}
\item{dof}{Degree of freedom for multivariate t-distribution.
If NULL or Inf, then use multivariate normal distribution.}
\item{prior_type}{"NCJ" for non-conjugate prior and "CJ" for conjugate
prior for scale matrix Sigma.}
\item{TolDRes}{Tolerance parameter for stopping criterion.}
\item{m0}{A hyperparameter for inverse Wishart distribution for Sigma}
}
\description{
Compute the semiparametric Bayesian shrinkage estimator of Psi and Sigma
for a given shrinkage parameter lambda.
The function is a private function for lm_semi_Bayes_PCV() and
lm_ShVAR_KCV().
}
\references{
N. Lee, H. Choi, and S.-H. Kim (2016). Bayes shrinkage
estimation for high-dimensional VAR models with scale mixture of normal
distributions for noise. Computational Statistics & Data Analysis 101,
250-276. doi: 10.1016/j.csda.2016.03.007
}
|
#' Digit analysis
#'
#' @param x A vector or matrix of numeric values.
#' @param type Type of digit analysis ('benford' or 'terminal')
#'
#' @return
#' @export
#'
#' @examples
#' digit_analysis(c(1.234, 65.4321, 53.222), type = 'terminal')
#' digit_analysis(c(1.234, 65.4321, 53.222), type = 'benford')
digit_analysis <- function(x, type = 'terminal') {
if(!(is.matrix(x) | is.vector(x))) {
stop('Please only specify a vector or matrix.
If specifying a matrix, ensure results from one set of digits are in the
rows!')
}
if(!(type == 'terminal' | type == 'benford')) {
stop("Only 'benford' and 'terminal' allowed as types.")
}
if (type == 'terminal') {
df <- 10 - 1
chis <- apply(t(x), 1, function(y) {
obs <- digit_counter(y, 'terminal')
exp <- expected_digit_counter(y, 'terminal')
chi <- sum((obs - exp)^2 / exp)
return(chi)
})
} else if (type == 'benford') {
df <- 9 - 1
chis <- apply(t(x), 1, function(y) {
obs <- digit_counter(y, 'benford')
exp <- expected_digit_counter(y, 'benford')
chi <- sum((obs - exp)^2 / exp)
return(chi)
})
} else {
stop("Something went awry.")
}
pval <- pchisq(q = chis, df = df,
lower.tail = FALSE)
return(pval)
}
digit_counter <- function(x, type) {
if(!is.vector(x)) stop("Currently only works with vectors.")
x <- decimator(x)
if (type == 'terminal') {
counts <- rep(0, 10)
names(counts) <- seq(0, 9)
digits <- table(regmatches(x, regexpr("\\d$", x)))
counts[names(digits)] <- digits
} else if (type == 'benford') {
counts <- rep(0, 9)
names(counts) <- seq(1, 9)
digits <- table(regmatches(x, regexpr("\\d", x)))
counts[names(digits)] <- digits
} else {
stop("ERROR")
}
return(counts)
}
expected_digit_counter <- function(x, type = type) {
if (!is.vector(x)) stop("Currently only works with vectors.")
if (!(type == 'terminal' | type == 'benford')) {
stop("Only benford and terminal allowed as types.")
}
if (type == 'terminal') {
cell <- length(x) / 10
counts <- rep(cell, 10)
} else if (type == 'benford') {
d <- 1:9
counts <- length(x) * log(x = ((d + 1) / d), base = 10)
} else {
stop("Something went awry.")
}
return(counts)
}
decimator <- function(x) {
decimal <- regexpr('\\.', as.character(x))
length <- nchar(as.character(x))
decimated <- 10 ^ (length - decimal) * x
# Make sure integers remain
decimated[decimal == -1] <- x[decimal == -1]
return(decimated)
} | /assets/functions/digit_analysis.R | permissive | chartgerink/dissertation | R | false | false | 2,585 | r | #' Digit analysis
#'
#' @param x A vector or matrix of numeric values.
#' @param type Type of digit analysis ('benford' or 'terminal')
#'
#' @return
#' @export
#'
#' @examples
#' digit_analysis(c(1.234, 65.4321, 53.222), type = 'terminal')
#' digit_analysis(c(1.234, 65.4321, 53.222), type = 'benford')
digit_analysis <- function(x, type = 'terminal') {
if(!(is.matrix(x) | is.vector(x))) {
stop('Please only specify a vector or matrix.
If specifying a matrix, ensure results from one set of digits are in the
rows!')
}
if(!(type == 'terminal' | type == 'benford')) {
stop("Only 'benford' and 'terminal' allowed as types.")
}
if (type == 'terminal') {
df <- 10 - 1
chis <- apply(t(x), 1, function(y) {
obs <- digit_counter(y, 'terminal')
exp <- expected_digit_counter(y, 'terminal')
chi <- sum((obs - exp)^2 / exp)
return(chi)
})
} else if (type == 'benford') {
df <- 9 - 1
chis <- apply(t(x), 1, function(y) {
obs <- digit_counter(y, 'benford')
exp <- expected_digit_counter(y, 'benford')
chi <- sum((obs - exp)^2 / exp)
return(chi)
})
} else {
stop("Something went awry.")
}
pval <- pchisq(q = chis, df = df,
lower.tail = FALSE)
return(pval)
}
digit_counter <- function(x, type) {
if(!is.vector(x)) stop("Currently only works with vectors.")
x <- decimator(x)
if (type == 'terminal') {
counts <- rep(0, 10)
names(counts) <- seq(0, 9)
digits <- table(regmatches(x, regexpr("\\d$", x)))
counts[names(digits)] <- digits
} else if (type == 'benford') {
counts <- rep(0, 9)
names(counts) <- seq(1, 9)
digits <- table(regmatches(x, regexpr("\\d", x)))
counts[names(digits)] <- digits
} else {
stop("ERROR")
}
return(counts)
}
expected_digit_counter <- function(x, type = type) {
if (!is.vector(x)) stop("Currently only works with vectors.")
if (!(type == 'terminal' | type == 'benford')) {
stop("Only benford and terminal allowed as types.")
}
if (type == 'terminal') {
cell <- length(x) / 10
counts <- rep(cell, 10)
} else if (type == 'benford') {
d <- 1:9
counts <- length(x) * log(x = ((d + 1) / d), base = 10)
} else {
stop("Something went awry.")
}
return(counts)
}
decimator <- function(x) {
decimal <- regexpr('\\.', as.character(x))
length <- nchar(as.character(x))
decimated <- 10 ^ (length - decimal) * x
# Make sure integers remain
decimated[decimal == -1] <- x[decimal == -1]
return(decimated)
} |
data <- read.table("./household_power_consumption.txt", sep = ";", dec = ".", header = TRUE,
col.names = c("Date","Time", "Global_active_power", "Global_reactive_power",
"Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2",
"Sub_metering_3"), na.string = "?")
data_subset <- data[(data$Date == "1/2/2007" | data$Date == "2/2/2007"),]
data_subset$Date_Time <- as.POSIXct(strptime(paste(data_subset$Date, data_subset$Time, sep = " "),
"%d/%m/%Y %H:%M:%S"))
plot(data_subset$Date_Time, data_subset$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(data_subset$Date_Time, data_subset$Sub_metering_2, type = "l", col = "red")
lines(data_subset$Date_Time, data_subset$Sub_metering_3, type = "l", col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), cex = 0.8, legend = c("Sub_metering_1",
"Sub_metering_2", "Sub_metering_3"))
dev.copy(png, file = "plot3.png", width = 480, height = 480)
dev.off()
| /plot3.R | no_license | ywsheng227/exploratory_data_analysis_1 | R | false | false | 968 | r | data <- read.table("./household_power_consumption.txt", sep = ";", dec = ".", header = TRUE,
col.names = c("Date","Time", "Global_active_power", "Global_reactive_power",
"Voltage", "Global_intensity", "Sub_metering_1", "Sub_metering_2",
"Sub_metering_3"), na.string = "?")
data_subset <- data[(data$Date == "1/2/2007" | data$Date == "2/2/2007"),]
data_subset$Date_Time <- as.POSIXct(strptime(paste(data_subset$Date, data_subset$Time, sep = " "),
"%d/%m/%Y %H:%M:%S"))
plot(data_subset$Date_Time, data_subset$Sub_metering_1, type = "l", xlab = "", ylab = "Energy sub metering")
lines(data_subset$Date_Time, data_subset$Sub_metering_2, type = "l", col = "red")
lines(data_subset$Date_Time, data_subset$Sub_metering_3, type = "l", col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), cex = 0.8, legend = c("Sub_metering_1",
"Sub_metering_2", "Sub_metering_3"))
dev.copy(png, file = "plot3.png", width = 480, height = 480)
dev.off()
|
# 5. How have emissions from motor vehicle sources changed from 1999–2008 in
# Baltimore City?
library(RDS)
NEI <- readRDS("exdata-data-NEI_data/summarySCC_PM25.rds")
SCC <- readRDS("exdata-data-NEI_data/Source_Classification_Code.rds")
library(ggplot2)
v <- grepl("vehicle", SCC$SCC.Level.Two, ignore.case=TRUE)
vSCC <- SCC[v,]$SCC
vNEI <- NEI[NEI$SCC %in% vSCC,]
baltVNEI <- vNEI[vNEI$fips=="24510",]
png("plot5.png",width=480,height=480,units="px")
g <- ggplot(baltVNEI,aes(factor(year),Emissions)) +
geom_bar(stat="identity",fill="grey",width=0.75) +
theme_bw() + guides(fill=FALSE) +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (10^5 Tons)")) +
labs(title=expression("PM"[2.5]*" Motor Vehicle Source Emissions in Baltimore from 1999-2008"))
print(g)
dev.off() | /MyNotes/04 - Exploratory Data Analysis/CourseProject2/plot5.R | no_license | vitorefigenio/datasciencecoursera | R | false | false | 791 | r | # 5. How have emissions from motor vehicle sources changed from 1999–2008 in
# Baltimore City?
library(RDS)
NEI <- readRDS("exdata-data-NEI_data/summarySCC_PM25.rds")
SCC <- readRDS("exdata-data-NEI_data/Source_Classification_Code.rds")
library(ggplot2)
v <- grepl("vehicle", SCC$SCC.Level.Two, ignore.case=TRUE)
vSCC <- SCC[v,]$SCC
vNEI <- NEI[NEI$SCC %in% vSCC,]
baltVNEI <- vNEI[vNEI$fips=="24510",]
png("plot5.png",width=480,height=480,units="px")
g <- ggplot(baltVNEI,aes(factor(year),Emissions)) +
geom_bar(stat="identity",fill="grey",width=0.75) +
theme_bw() + guides(fill=FALSE) +
labs(x="year", y=expression("Total PM"[2.5]*" Emission (10^5 Tons)")) +
labs(title=expression("PM"[2.5]*" Motor Vehicle Source Emissions in Baltimore from 1999-2008"))
print(g)
dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exports.R
\name{get_jira_server_info}
\alias{get_jira_server_info}
\title{Get the JIRA server information as a \code{data.frame}}
\usage{
get_jira_server_info(
domain = NULL,
username = NULL,
password = NULL,
verbose = FALSE
)
}
\arguments{
\item{domain}{Custom JIRA domain URL as for
example \href{https://bugreports.qt.io}{https://bugreports.qt.io}.
Can be passed as a parameter or can be previously defined through
the \code{save_jira_credentials()} function.}
\item{username}{Username used to authenticate the access to the JIRA \code{domain}.
If both username and password are not passed no authentication is made and only
public domains can bet accessed. Optional parameter.}
\item{password}{Password used to authenticate the access to the JIRA \code{domain}.
If both username and password are not passed no authentication is made and only
public domains can bet accessed. Optional parameter.}
\item{verbose}{Explicitly informs the user of the JIRA API request process.}
}
\value{
Returns a \code{data.frame} with all the JIRA server information
}
\description{
Makes a request to JIRA's latest REST API to retrieve all
the necessary information regarding the JIRA server version.
}
\section{Warning}{
The function works with the latest JIRA REST API and to work you need to have
a internet connection. Calling the function too many times might block your
access, you will receive a 403 error code. To unblock your access you will
have to access interactively through your browser, signing out and signing
in again, and might even have to enter a CAPTCHA at https://jira.yourdomain.com/secure/Dashboard.jspa.
This only happens if the API is called upon multiple times in a short period of time.
}
\examples{
\dontrun{
get_jira_server_info("https://bugreports.qt.io")
}
}
\seealso{
For more information about Atlassians JIRA API go
to \href{https://docs.atlassian.com/software/jira/docs/api/REST/8.9.1/}{JIRA API Documentation}
}
| /man/get_jira_server_info.Rd | permissive | amishakov/JirAgileR | R | false | true | 2,029 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exports.R
\name{get_jira_server_info}
\alias{get_jira_server_info}
\title{Get the JIRA server information as a \code{data.frame}}
\usage{
get_jira_server_info(
domain = NULL,
username = NULL,
password = NULL,
verbose = FALSE
)
}
\arguments{
\item{domain}{Custom JIRA domain URL as for
example \href{https://bugreports.qt.io}{https://bugreports.qt.io}.
Can be passed as a parameter or can be previously defined through
the \code{save_jira_credentials()} function.}
\item{username}{Username used to authenticate the access to the JIRA \code{domain}.
If both username and password are not passed no authentication is made and only
public domains can bet accessed. Optional parameter.}
\item{password}{Password used to authenticate the access to the JIRA \code{domain}.
If both username and password are not passed no authentication is made and only
public domains can bet accessed. Optional parameter.}
\item{verbose}{Explicitly informs the user of the JIRA API request process.}
}
\value{
Returns a \code{data.frame} with all the JIRA server information
}
\description{
Makes a request to JIRA's latest REST API to retrieve all
the necessary information regarding the JIRA server version.
}
\section{Warning}{
The function works with the latest JIRA REST API and to work you need to have
a internet connection. Calling the function too many times might block your
access, you will receive a 403 error code. To unblock your access you will
have to access interactively through your browser, signing out and signing
in again, and might even have to enter a CAPTCHA at https://jira.yourdomain.com/secure/Dashboard.jspa.
This only happens if the API is called upon multiple times in a short period of time.
}
\examples{
\dontrun{
get_jira_server_info("https://bugreports.qt.io")
}
}
\seealso{
For more information about Atlassians JIRA API go
to \href{https://docs.atlassian.com/software/jira/docs/api/REST/8.9.1/}{JIRA API Documentation}
}
|
library(testthat)
library(BLRF)
test_check("BLRF")
| /BLRF/tests/testthat.R | permissive | STA141C/Final_project | R | false | false | 52 | r | library(testthat)
library(BLRF)
test_check("BLRF")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hbgd-package.R
\docType{data}
\name{hbgd_labels_df}
\alias{hbgd_labels_df}
\title{Labels for common variable names in hbgd data}
\usage{
hbgd_labels_df
}
\description{
Labels for common variable names in hbgd data, used in \code{\link{get_data_attributes}} if labels are not explicitly provided.
}
\keyword{data}
| /man/hbgd_labels_df.Rd | permissive | hafen/hbgd | R | false | true | 391 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hbgd-package.R
\docType{data}
\name{hbgd_labels_df}
\alias{hbgd_labels_df}
\title{Labels for common variable names in hbgd data}
\usage{
hbgd_labels_df
}
\description{
Labels for common variable names in hbgd data, used in \code{\link{get_data_attributes}} if labels are not explicitly provided.
}
\keyword{data}
|
testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = c(-5.59219752033303e+72, -1.06823407896587e-87, -4.25255837648531e+71, 1.7951202446173e-155, -8.18790345762274e-12, -4.84876319029531e+202, -3.96895588925774e+304, -1.15261897385914e+41, -4.16286459815484e-108, -2.95899697222989e+94, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = Inf)
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) | /meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615860153-test.R | no_license | akhikolla/updatedatatype-list3 | R | false | false | 428 | r | testlist <- list(Rs = numeric(0), atmp = numeric(0), relh = c(-5.59219752033303e+72, -1.06823407896587e-87, -4.25255837648531e+71, 1.7951202446173e-155, -8.18790345762274e-12, -4.84876319029531e+202, -3.96895588925774e+304, -1.15261897385914e+41, -4.16286459815484e-108, -2.95899697222989e+94, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), temp = Inf)
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_import_image}
\alias{ec2_import_image}
\title{Import single or multi-volume disk images or EBS snapshots into an
Amazon Machine Image (AMI)}
\usage{
ec2_import_image(Architecture, ClientData, ClientToken, Description,
DiskContainers, DryRun, Encrypted, Hypervisor, KmsKeyId, LicenseType,
Platform, RoleName, LicenseSpecifications, TagSpecifications)
}
\arguments{
\item{Architecture}{The architecture of the virtual machine.
Valid values: \code{i386} | \code{x86_64} | \code{arm64}}
\item{ClientData}{The client-specific data.}
\item{ClientToken}{The token to enable idempotency for VM import requests.}
\item{Description}{A description string for the import image task.}
\item{DiskContainers}{Information about the disk containers.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
\item{Encrypted}{Specifies whether the destination AMI of the imported image should be
encrypted. The default CMK for EBS is used unless you specify a
non-default AWS Key Management Service (AWS KMS) CMK using \code{KmsKeyId}.
For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html}{Amazon EBS Encryption}
in the \emph{Amazon Elastic Compute Cloud User Guide}.}
\item{Hypervisor}{The target hypervisor platform.
Valid values: \code{xen}}
\item{KmsKeyId}{An identifier for the symmetric AWS Key Management Service (AWS KMS)
customer master key (CMK) to use when creating the encrypted AMI. This
parameter is only required if you want to use a non-default CMK; if this
parameter is not specified, the default CMK for EBS is used. If a
\code{KmsKeyId} is specified, the \code{Encrypted} flag must also be set.
The CMK identifier may be provided in any of the following formats:
\itemize{
\item Key ID
\item Key alias. The alias ARN contains the \code{arn:aws:kms} namespace,
followed by the Region of the CMK, the AWS account ID of the CMK
owner, the \code{alias} namespace, and then the CMK alias. For example,
arn:aws:kms:\emph{us-east-1}:\emph{012345678910}:alias/\emph{ExampleAlias}.
\item ARN using key ID. The ID ARN contains the \code{arn:aws:kms} namespace,
followed by the Region of the CMK, the AWS account ID of the CMK
owner, the \code{key} namespace, and then the CMK ID. For example,
arn:aws:kms:\emph{us-east-1}:\emph{012345678910}:key/\emph{abcd1234-a123-456a-a12b-a123b4cd56ef}.
\item ARN using key alias. The alias ARN contains the \code{arn:aws:kms}
namespace, followed by the Region of the CMK, the AWS account ID of
the CMK owner, the \code{alias} namespace, and then the CMK alias. For
example,
arn:aws:kms:\emph{us-east-1}:\emph{012345678910}:alias/\emph{ExampleAlias}.
}
AWS parses \code{KmsKeyId} asynchronously, meaning that the action you call
may appear to complete even though you provided an invalid identifier.
This action will eventually report failure.
The specified CMK must exist in the Region that the AMI is being copied
to.
Amazon EBS does not support asymmetric CMKs.}
\item{LicenseType}{The license type to be used for the Amazon Machine Image (AMI) after
importing.
By default, we detect the source-system operating system (OS) and apply
the appropriate license. Specify \code{AWS} to replace the source-system
license with an AWS license, if appropriate. Specify \code{BYOL} to retain
the source-system license, if appropriate.
To use \code{BYOL}, you must have existing licenses with rights to use these
licenses in a third party cloud, such as AWS. For more information, see
\href{https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html#prerequisites-image}{Prerequisites}
in the VM Import/Export User Guide.}
\item{Platform}{The operating system of the virtual machine.
Valid values: \code{Windows} | \code{Linux}}
\item{RoleName}{The name of the role to use when not using the default role, 'vmimport'.}
\item{LicenseSpecifications}{The ARNs of the license configurations.}
\item{TagSpecifications}{The tags to apply to the image being imported.}
}
\description{
Import single or multi-volume disk images or EBS snapshots into an
Amazon Machine Image (AMI). For more information, see \href{https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html}{Importing a VM as an Image Using VM Import/Export}
in the \emph{VM Import/Export User Guide}.
}
\section{Request syntax}{
\preformatted{svc$import_image(
Architecture = "string",
ClientData = list(
Comment = "string",
UploadEnd = as.POSIXct(
"2015-01-01"
),
UploadSize = 123.0,
UploadStart = as.POSIXct(
"2015-01-01"
)
),
ClientToken = "string",
Description = "string",
DiskContainers = list(
list(
Description = "string",
DeviceName = "string",
Format = "string",
SnapshotId = "string",
Url = "string",
UserBucket = list(
S3Bucket = "string",
S3Key = "string"
)
)
),
DryRun = TRUE|FALSE,
Encrypted = TRUE|FALSE,
Hypervisor = "string",
KmsKeyId = "string",
LicenseType = "string",
Platform = "string",
RoleName = "string",
LicenseSpecifications = list(
list(
LicenseConfigurationArn = "string"
)
),
TagSpecifications = list(
list(
ResourceType = "client-vpn-endpoint"|"customer-gateway"|"dedicated-host"|"dhcp-options"|"egress-only-internet-gateway"|"elastic-ip"|"elastic-gpu"|"export-image-task"|"export-instance-task"|"fleet"|"fpga-image"|"host-reservation"|"image"|"import-image-task"|"import-snapshot-task"|"instance"|"internet-gateway"|"key-pair"|"launch-template"|"local-gateway-route-table-vpc-association"|"natgateway"|"network-acl"|"network-interface"|"network-insights-analysis"|"network-insights-path"|"placement-group"|"reserved-instances"|"route-table"|"security-group"|"snapshot"|"spot-fleet-request"|"spot-instances-request"|"subnet"|"traffic-mirror-filter"|"traffic-mirror-session"|"traffic-mirror-target"|"transit-gateway"|"transit-gateway-attachment"|"transit-gateway-connect-peer"|"transit-gateway-multicast-domain"|"transit-gateway-route-table"|"volume"|"vpc"|"vpc-peering-connection"|"vpn-connection"|"vpn-gateway"|"vpc-flow-log",
Tags = list(
list(
Key = "string",
Value = "string"
)
)
)
)
)
}
}
\keyword{internal}
| /paws/man/ec2_import_image.Rd | permissive | sanchezvivi/paws | R | false | true | 6,587 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ec2_operations.R
\name{ec2_import_image}
\alias{ec2_import_image}
\title{Import single or multi-volume disk images or EBS snapshots into an
Amazon Machine Image (AMI)}
\usage{
ec2_import_image(Architecture, ClientData, ClientToken, Description,
DiskContainers, DryRun, Encrypted, Hypervisor, KmsKeyId, LicenseType,
Platform, RoleName, LicenseSpecifications, TagSpecifications)
}
\arguments{
\item{Architecture}{The architecture of the virtual machine.
Valid values: \code{i386} | \code{x86_64} | \code{arm64}}
\item{ClientData}{The client-specific data.}
\item{ClientToken}{The token to enable idempotency for VM import requests.}
\item{Description}{A description string for the import image task.}
\item{DiskContainers}{Information about the disk containers.}
\item{DryRun}{Checks whether you have the required permissions for the action, without
actually making the request, and provides an error response. If you have
the required permissions, the error response is \code{DryRunOperation}.
Otherwise, it is \code{UnauthorizedOperation}.}
\item{Encrypted}{Specifies whether the destination AMI of the imported image should be
encrypted. The default CMK for EBS is used unless you specify a
non-default AWS Key Management Service (AWS KMS) CMK using \code{KmsKeyId}.
For more information, see \href{https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSEncryption.html}{Amazon EBS Encryption}
in the \emph{Amazon Elastic Compute Cloud User Guide}.}
\item{Hypervisor}{The target hypervisor platform.
Valid values: \code{xen}}
\item{KmsKeyId}{An identifier for the symmetric AWS Key Management Service (AWS KMS)
customer master key (CMK) to use when creating the encrypted AMI. This
parameter is only required if you want to use a non-default CMK; if this
parameter is not specified, the default CMK for EBS is used. If a
\code{KmsKeyId} is specified, the \code{Encrypted} flag must also be set.
The CMK identifier may be provided in any of the following formats:
\itemize{
\item Key ID
\item Key alias. The alias ARN contains the \code{arn:aws:kms} namespace,
followed by the Region of the CMK, the AWS account ID of the CMK
owner, the \code{alias} namespace, and then the CMK alias. For example,
arn:aws:kms:\emph{us-east-1}:\emph{012345678910}:alias/\emph{ExampleAlias}.
\item ARN using key ID. The ID ARN contains the \code{arn:aws:kms} namespace,
followed by the Region of the CMK, the AWS account ID of the CMK
owner, the \code{key} namespace, and then the CMK ID. For example,
arn:aws:kms:\emph{us-east-1}:\emph{012345678910}:key/\emph{abcd1234-a123-456a-a12b-a123b4cd56ef}.
\item ARN using key alias. The alias ARN contains the \code{arn:aws:kms}
namespace, followed by the Region of the CMK, the AWS account ID of
the CMK owner, the \code{alias} namespace, and then the CMK alias. For
example,
arn:aws:kms:\emph{us-east-1}:\emph{012345678910}:alias/\emph{ExampleAlias}.
}
AWS parses \code{KmsKeyId} asynchronously, meaning that the action you call
may appear to complete even though you provided an invalid identifier.
This action will eventually report failure.
The specified CMK must exist in the Region that the AMI is being copied
to.
Amazon EBS does not support asymmetric CMKs.}
\item{LicenseType}{The license type to be used for the Amazon Machine Image (AMI) after
importing.
By default, we detect the source-system operating system (OS) and apply
the appropriate license. Specify \code{AWS} to replace the source-system
license with an AWS license, if appropriate. Specify \code{BYOL} to retain
the source-system license, if appropriate.
To use \code{BYOL}, you must have existing licenses with rights to use these
licenses in a third party cloud, such as AWS. For more information, see
\href{https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html#prerequisites-image}{Prerequisites}
in the VM Import/Export User Guide.}
\item{Platform}{The operating system of the virtual machine.
Valid values: \code{Windows} | \code{Linux}}
\item{RoleName}{The name of the role to use when not using the default role, 'vmimport'.}
\item{LicenseSpecifications}{The ARNs of the license configurations.}
\item{TagSpecifications}{The tags to apply to the image being imported.}
}
\description{
Import single or multi-volume disk images or EBS snapshots into an
Amazon Machine Image (AMI). For more information, see \href{https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html}{Importing a VM as an Image Using VM Import/Export}
in the \emph{VM Import/Export User Guide}.
}
\section{Request syntax}{
\preformatted{svc$import_image(
Architecture = "string",
ClientData = list(
Comment = "string",
UploadEnd = as.POSIXct(
"2015-01-01"
),
UploadSize = 123.0,
UploadStart = as.POSIXct(
"2015-01-01"
)
),
ClientToken = "string",
Description = "string",
DiskContainers = list(
list(
Description = "string",
DeviceName = "string",
Format = "string",
SnapshotId = "string",
Url = "string",
UserBucket = list(
S3Bucket = "string",
S3Key = "string"
)
)
),
DryRun = TRUE|FALSE,
Encrypted = TRUE|FALSE,
Hypervisor = "string",
KmsKeyId = "string",
LicenseType = "string",
Platform = "string",
RoleName = "string",
LicenseSpecifications = list(
list(
LicenseConfigurationArn = "string"
)
),
TagSpecifications = list(
list(
ResourceType = "client-vpn-endpoint"|"customer-gateway"|"dedicated-host"|"dhcp-options"|"egress-only-internet-gateway"|"elastic-ip"|"elastic-gpu"|"export-image-task"|"export-instance-task"|"fleet"|"fpga-image"|"host-reservation"|"image"|"import-image-task"|"import-snapshot-task"|"instance"|"internet-gateway"|"key-pair"|"launch-template"|"local-gateway-route-table-vpc-association"|"natgateway"|"network-acl"|"network-interface"|"network-insights-analysis"|"network-insights-path"|"placement-group"|"reserved-instances"|"route-table"|"security-group"|"snapshot"|"spot-fleet-request"|"spot-instances-request"|"subnet"|"traffic-mirror-filter"|"traffic-mirror-session"|"traffic-mirror-target"|"transit-gateway"|"transit-gateway-attachment"|"transit-gateway-connect-peer"|"transit-gateway-multicast-domain"|"transit-gateway-route-table"|"volume"|"vpc"|"vpc-peering-connection"|"vpn-connection"|"vpn-gateway"|"vpc-flow-log",
Tags = list(
list(
Key = "string",
Value = "string"
)
)
)
)
)
}
}
\keyword{internal}
|
\name{PE}
\alias{PE}
\title{Partition entropy}
\description{
Produces the partition entropy index. The optimal number of clusters \emph{k} is is such that the index takes the minimum value.
}
\usage{
PE (U, b)
}
\arguments{
\item{U}{Membership degree matrix}
\item{b}{Logarithmic base (default: exp(1))}
}
\value{
\item{pe}{Value of the partition entropy index}
}
\references{
Bezdek J.C., 1981. Pattern Recognition with Fuzzy Objective Function Algorithms. Plenum Press, New York.
}
\author{Paolo Giordani, Maria Brigida Ferraro, Alessio Serafini}
\seealso{\code{\link{PC}}, \code{\link{MPC}}, \code{\link{SIL}}, \code{\link{SIL.F}}, \code{\link{XB}}, \code{\link{Fclust}}, \code{\link{Mc}}}
\examples{
## McDonald's data
data(Mc)
names(Mc)
## data normalization by dividing the nutrition facts by the Serving Size (column 1)
for (j in 2:(ncol(Mc)-1))
Mc[,j]=Mc[,j]/Mc[,1]
## removing the column Serving Size
Mc=Mc[,-1]
## fuzzy k-means
## (excluded the factor column Type (last column))
clust=FKM(Mc[,1:(ncol(Mc)-1)],k=6,m=1.5,stand=1)
## partition entropy index
pe=PE(clust$U)
}
\keyword{multivariate}
| /man/PE.Rd | no_license | cran/fclust | R | false | false | 1,112 | rd | \name{PE}
\alias{PE}
\title{Partition entropy}
\description{
Produces the partition entropy index. The optimal number of clusters \emph{k} is is such that the index takes the minimum value.
}
\usage{
PE (U, b)
}
\arguments{
\item{U}{Membership degree matrix}
\item{b}{Logarithmic base (default: exp(1))}
}
\value{
\item{pe}{Value of the partition entropy index}
}
\references{
Bezdek J.C., 1981. Pattern Recognition with Fuzzy Objective Function Algorithms. Plenum Press, New York.
}
\author{Paolo Giordani, Maria Brigida Ferraro, Alessio Serafini}
\seealso{\code{\link{PC}}, \code{\link{MPC}}, \code{\link{SIL}}, \code{\link{SIL.F}}, \code{\link{XB}}, \code{\link{Fclust}}, \code{\link{Mc}}}
\examples{
## McDonald's data
data(Mc)
names(Mc)
## data normalization by dividing the nutrition facts by the Serving Size (column 1)
for (j in 2:(ncol(Mc)-1))
Mc[,j]=Mc[,j]/Mc[,1]
## removing the column Serving Size
Mc=Mc[,-1]
## fuzzy k-means
## (excluded the factor column Type (last column))
clust=FKM(Mc[,1:(ncol(Mc)-1)],k=6,m=1.5,stand=1)
## partition entropy index
pe=PE(clust$U)
}
\keyword{multivariate}
|
# load data
source(data_claning.R')
# open device
png(filename='plot3.png',width=480,height=480,units='px')
# plot data
lncol<-c('black','red','blue')
lbls<-c('Sub_metering_1','Sub_metering_2','Sub_metering_3')
plot(power.consumption$DateTime,power.consumption$SubMetering1,type='l',col=lncol[1],xlab='',ylab='Energy sub metering')
lines(power.consumption$DateTime,power.consumption$SubMetering2,col=lncol[2])
lines(power.consumption$DateTime,power.consumption$SubMetering3,col=lncol[3])
# add legend
legend('topright',legend=lbls,col=lncol,lty='solid')
# close device
x<-dev.off() | /plot3.R | no_license | annaszungyi/ExData_Plotting1 | R | false | false | 585 | r | # load data
source(data_claning.R')
# open device
png(filename='plot3.png',width=480,height=480,units='px')
# plot data
lncol<-c('black','red','blue')
lbls<-c('Sub_metering_1','Sub_metering_2','Sub_metering_3')
plot(power.consumption$DateTime,power.consumption$SubMetering1,type='l',col=lncol[1],xlab='',ylab='Energy sub metering')
lines(power.consumption$DateTime,power.consumption$SubMetering2,col=lncol[2])
lines(power.consumption$DateTime,power.consumption$SubMetering3,col=lncol[3])
# add legend
legend('topright',legend=lbls,col=lncol,lty='solid')
# close device
x<-dev.off() |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/peak_matrix.R
\name{get_peak_matrix}
\alias{get_peak_matrix}
\title{retrieve matrix of precalculated peaks}
\usage{
get_peak_matrix(fid)
}
\arguments{
\item{fid}{h5d file handle}
}
\value{
matrix of all peaks captured in Ionicion sofrtware
}
\description{
retrieve matrix of precalculated peaks
}
\examples{
\dontrun{
get_peak_matrix(fid)
}
}
| /man/get_peak_matrix.Rd | permissive | bdcaf/rawTof | R | false | true | 421 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/peak_matrix.R
\name{get_peak_matrix}
\alias{get_peak_matrix}
\title{retrieve matrix of precalculated peaks}
\usage{
get_peak_matrix(fid)
}
\arguments{
\item{fid}{h5d file handle}
}
\value{
matrix of all peaks captured in Ionicion sofrtware
}
\description{
retrieve matrix of precalculated peaks
}
\examples{
\dontrun{
get_peak_matrix(fid)
}
}
|
#### Looking at RAD SNPs from Seeb lab for lower 48 chum
setwd("V:/WORK/Chum/AHRP")
## save.image("V:/WORK/Chum/AHRP/RAD_SNPs.RData")
## load("V:/WORK/Chum/AHRP/RAD_SNPs.RData")
## rm(list=setdiff(ls(), c(objects(pattern="RAD"),"fun","hets","ps","phase1","allSNPs","SNPs192")))
RAD_part1=readClipboard()
RAD_part2=readClipboard()
allRAD=c(RAD_part1,RAD_part2)
RAD=matrix(allRAD,nrow=length(allRAD)/4,ncol=4,byrow=TRUE)
RAD2=RAD[,2:4]
rownames(RAD2)=RAD[,1]
colnames(RAD2)=c("Fst","Ho","He")
RAD2
RAD3=data.matrix(data.frame(RAD2,stringsAsFactors=FALSE))
fun=function(p) {2*p-2*p^2}
fun(0.2)
sum(RAD3[,"He"]>fun(0.2))
dim(RAD3)
writeClipboard(rownames(RAD3)[which(RAD3[,"He"]>fun(0.2))])
RAD3[which(RAD3[,"He"]>fun(0.2)),]
sum(RAD3[,"He"]>fun(0.2))
sum(RAD3[,"He"]>fun(0.25))
sum(RAD3[,"He"]>fun(0.3))
sum(RAD3[,"He"]>fun(0.35))
sum(RAD3[,"He"]>fun(0.4))
sum(RAD3[,"He"]>fun(0.45))
hets <- seq(0,0.5,by=0.001)
ps <- fun(hets)
names(ps) <- hets
RAD3 <- cbind(RAD3,"MAF"=sapply(RAD3[,"He"], function(x) {as.numeric(names(ps))[which.min(abs(x-ps))]}))
hist(RAD3[,"MAF"],col=8,xlim=c(0,0.5))
sum(RAD3[,"MAF"]>0.2)
names(which(RAD3[,"MAF"]>0.2))
### Compare 188 w/ these new RAD SNPs
t(t(sort(phase1[,"Overall","MAF"])))
sum(phase1[,"Overall","MAF"]>0.2)
## All Chum SNPs, assuming that the MAFs observed down south are similar to in SEAK
allSNPs <- c(phase1[,"Overall","MAF"],RAD3[,5])
length(allSNPs)
hist(allSNPs)
sum(allSNPs>0.2)
t(t(sort(allSNPs,decreasing=TRUE)))
## Best 192 SNPs ranked by MAF with WASSIP 188 + RAD 72
SNPs192 <- t(t(t(t(sort(allSNPs,decreasing=TRUE)))[1:192,]))
unlist(dimnames(SNPs192))
# Show MAF of all "best" 192 SNP panel
hist(SNPs192, col=8, xlim=c(0,0.5))
# Blue ones are the "new" RAD SNPs
hist(SNPs192[grep(pattern="RAD",unlist(dimnames(SNPs192))),],col=4,add=TRUE)
legend("topleft",legend="RAD",col=4,pch=15,bty="n",cex=2)
par(mar=c(5.1,5.1,4.1,2.1))
par(bg=colors()[c(356)])
## All 260
hist(allSNPs, col="gray70", main="Available Chum SNPs", cex.lab=2, cex.axis=2, cex.main=2)
# Which are RAD
hist(allSNPs[grep(pattern="RAD",names(allSNPs))],col="lightblue",add=TRUE)
# Best 192 SNPs
hist(SNPs192, col="gray30", add=TRUE)
# Which are RAD
hist(SNPs192[grep(pattern="RAD",unlist(dimnames(SNPs192))),],col="blue",add=TRUE)
abline(v=min(SNPs192), lwd=10)
text(x=min(SNPs192), y=32, labels="Use These", cex=3, pos=4)
legend("topright",legend=c("WASSIP 188", "RAD"),col=c("gray70","blue"),pch=15,bty="n",cex=2, pt.cex=4)
abline(v=sort(phase1[,"Overall","MAF"], decreasing=TRUE)[96], lwd=10)
text(x=min(SNPs192), y=32, labels="Use These", cex=3, pos=4)
legend("topright",legend=c("WASSIP 188", "RAD"),col=c("gray70","lightblue"),pch=15,bty="n",cex=2, pt.cex=4)
## All, but this will show RAD
hist(allSNPs, col="lightblue", main="Available Chum SNPs", cex.lab=2, cex.axis=2, cex.main=2, ylim=c(0,45), xlab="Minor Allele Frequency (MAF)")
legend("topright", legend=c("WASSIP 188", "Best 96", "RAD"), col=c("gray70", "gray30", "lightblue"), pch=15, bty="n", cex=2, pt.cex=4)
## WASSIP 188
hist(phase1[,"Overall","MAF"], col="gray70", main="Available Chum SNPs", cex.lab=2, cex.axis=2, cex.main=2, ylim=c(0,45), xlab="Minor Allele Frequency (MAF)", add=TRUE)
#hist(SNPs192[-grep(pattern="RAD",unlist(dimnames(SNPs192))),], col="gray30", add=TRUE)
abline(v=min(SNPs192), lwd=10)
text(x=min(SNPs192), y=30.5, labels="Use These", cex=3, pos=4)
legend("topright", legend=c("WASSIP 188"), col=c("gray70"), pch=15, bty="n", cex=2, pt.cex=4)
# Best 96
hist(sort(phase1[,"Overall","MAF"], decreasing=TRUE)[1:96], col="gray30", main="Available Chum SNPs", cex.lab=2, cex.axis=2, cex.main=2, ylim=c(0,45), xlab="Minor Allele Frequency (MAF)", add=TRUE)
legend("topright", legend=c("WASSIP 188", "Best 96"), col=c("gray70", "gray30"), pch=15, bty="n", cex=2, pt.cex=4)
# How many of these 192 are RAD?
names(SNPs192[grep(pattern="RAD",unlist(dimnames(SNPs192))),])
length(names(SNPs192[grep(pattern="RAD",unlist(dimnames(SNPs192))),])) # 51
| /RAD_SNPs.R | no_license | krshedd/SEAK-Chum-Parentage | R | false | false | 3,987 | r | #### Looking at RAD SNPs from Seeb lab for lower 48 chum
setwd("V:/WORK/Chum/AHRP")
## save.image("V:/WORK/Chum/AHRP/RAD_SNPs.RData")
## load("V:/WORK/Chum/AHRP/RAD_SNPs.RData")
## rm(list=setdiff(ls(), c(objects(pattern="RAD"),"fun","hets","ps","phase1","allSNPs","SNPs192")))
RAD_part1=readClipboard()
RAD_part2=readClipboard()
allRAD=c(RAD_part1,RAD_part2)
RAD=matrix(allRAD,nrow=length(allRAD)/4,ncol=4,byrow=TRUE)
RAD2=RAD[,2:4]
rownames(RAD2)=RAD[,1]
colnames(RAD2)=c("Fst","Ho","He")
RAD2
RAD3=data.matrix(data.frame(RAD2,stringsAsFactors=FALSE))
fun=function(p) {2*p-2*p^2}
fun(0.2)
sum(RAD3[,"He"]>fun(0.2))
dim(RAD3)
writeClipboard(rownames(RAD3)[which(RAD3[,"He"]>fun(0.2))])
RAD3[which(RAD3[,"He"]>fun(0.2)),]
sum(RAD3[,"He"]>fun(0.2))
sum(RAD3[,"He"]>fun(0.25))
sum(RAD3[,"He"]>fun(0.3))
sum(RAD3[,"He"]>fun(0.35))
sum(RAD3[,"He"]>fun(0.4))
sum(RAD3[,"He"]>fun(0.45))
hets <- seq(0,0.5,by=0.001)
ps <- fun(hets)
names(ps) <- hets
RAD3 <- cbind(RAD3,"MAF"=sapply(RAD3[,"He"], function(x) {as.numeric(names(ps))[which.min(abs(x-ps))]}))
hist(RAD3[,"MAF"],col=8,xlim=c(0,0.5))
sum(RAD3[,"MAF"]>0.2)
names(which(RAD3[,"MAF"]>0.2))
### Compare 188 w/ these new RAD SNPs
t(t(sort(phase1[,"Overall","MAF"])))
sum(phase1[,"Overall","MAF"]>0.2)
## All Chum SNPs, assuming that the MAFs observed down south are similar to in SEAK
allSNPs <- c(phase1[,"Overall","MAF"],RAD3[,5])
length(allSNPs)
hist(allSNPs)
sum(allSNPs>0.2)
t(t(sort(allSNPs,decreasing=TRUE)))
## Best 192 SNPs ranked by MAF with WASSIP 188 + RAD 72
SNPs192 <- t(t(t(t(sort(allSNPs,decreasing=TRUE)))[1:192,]))
unlist(dimnames(SNPs192))
# Show MAF of all "best" 192 SNP panel
hist(SNPs192, col=8, xlim=c(0,0.5))
# Blue ones are the "new" RAD SNPs
hist(SNPs192[grep(pattern="RAD",unlist(dimnames(SNPs192))),],col=4,add=TRUE)
legend("topleft",legend="RAD",col=4,pch=15,bty="n",cex=2)
par(mar=c(5.1,5.1,4.1,2.1))
par(bg=colors()[c(356)])
## All 260
hist(allSNPs, col="gray70", main="Available Chum SNPs", cex.lab=2, cex.axis=2, cex.main=2)
# Which are RAD
hist(allSNPs[grep(pattern="RAD",names(allSNPs))],col="lightblue",add=TRUE)
# Best 192 SNPs
hist(SNPs192, col="gray30", add=TRUE)
# Which are RAD
hist(SNPs192[grep(pattern="RAD",unlist(dimnames(SNPs192))),],col="blue",add=TRUE)
abline(v=min(SNPs192), lwd=10)
text(x=min(SNPs192), y=32, labels="Use These", cex=3, pos=4)
legend("topright",legend=c("WASSIP 188", "RAD"),col=c("gray70","blue"),pch=15,bty="n",cex=2, pt.cex=4)
abline(v=sort(phase1[,"Overall","MAF"], decreasing=TRUE)[96], lwd=10)
text(x=min(SNPs192), y=32, labels="Use These", cex=3, pos=4)
legend("topright",legend=c("WASSIP 188", "RAD"),col=c("gray70","lightblue"),pch=15,bty="n",cex=2, pt.cex=4)
## All, but this will show RAD
hist(allSNPs, col="lightblue", main="Available Chum SNPs", cex.lab=2, cex.axis=2, cex.main=2, ylim=c(0,45), xlab="Minor Allele Frequency (MAF)")
legend("topright", legend=c("WASSIP 188", "Best 96", "RAD"), col=c("gray70", "gray30", "lightblue"), pch=15, bty="n", cex=2, pt.cex=4)
## WASSIP 188
hist(phase1[,"Overall","MAF"], col="gray70", main="Available Chum SNPs", cex.lab=2, cex.axis=2, cex.main=2, ylim=c(0,45), xlab="Minor Allele Frequency (MAF)", add=TRUE)
#hist(SNPs192[-grep(pattern="RAD",unlist(dimnames(SNPs192))),], col="gray30", add=TRUE)
abline(v=min(SNPs192), lwd=10)
text(x=min(SNPs192), y=30.5, labels="Use These", cex=3, pos=4)
legend("topright", legend=c("WASSIP 188"), col=c("gray70"), pch=15, bty="n", cex=2, pt.cex=4)
# Best 96
hist(sort(phase1[,"Overall","MAF"], decreasing=TRUE)[1:96], col="gray30", main="Available Chum SNPs", cex.lab=2, cex.axis=2, cex.main=2, ylim=c(0,45), xlab="Minor Allele Frequency (MAF)", add=TRUE)
legend("topright", legend=c("WASSIP 188", "Best 96"), col=c("gray70", "gray30"), pch=15, bty="n", cex=2, pt.cex=4)
# How many of these 192 are RAD?
names(SNPs192[grep(pattern="RAD",unlist(dimnames(SNPs192))),])
length(names(SNPs192[grep(pattern="RAD",unlist(dimnames(SNPs192))),])) # 51
|
library(dplyr)
library(prodlim)
rm(list = ls())
source("~/Desktop/PharmaceuticsData/HPMC/maxDissim9.R")
allX<- read.csv("~/Desktop/PharmaceuticsData/HPMC/remaining_data.csv")
extra <- read.csv("~/Desktop/PharmaceuticsData/HPMC/final_testing_data.csv")
##scale range from 0 to 1
alldata <- data.matrix(allX)
extradata <- data.matrix(extra)
y <- alldata[,19:22]/100
X <- alldata[, 1:18]
extraY <- extradata[,19:22]/100
extraX <- extradata[, 1:18]
maxs <- apply(X, 2, max)
mins <- apply(X, 2, min)
ranges <- maxs - mins
means <- apply(X, 2, mean)
scaledallx <- scale(X, center = mins, scale = ranges)
scaleddata <- cbind(scaledallx, y)
scaledextrax <- scale(extraX, center = mins, scale = ranges)
scaledextradata <- cbind(scaledextrax, extraY)
## get rid of groups all big than 80
index80 <- which((scaleddata[,21] >= 0.6 & scaleddata[,20] >= 0.8 & scaleddata[,21] >= 0.8 & scaleddata[,22] >= 0.8) | (scaleddata[,19] <= 0.2 & scaleddata[,20] <= 0.2 & scaleddata[,21] <= 0.2 & scaleddata[,22] <= 0.2))
big80scaleddata <- scaleddata[index80, ]
scaleddata <- scaleddata[-index80, ]
scaledallx <- scaledallx[-index80, ]
big80alldata <- alldata[index80, ]
alldata <- alldata[-index80, ]
## get rid of groups less 3
df <- data.frame(scaledallx[, 1:9])
bb <- aggregate(list(numdup=rep(1,nrow(df))), df, length)
dd <- bb[order(bb$numdup, decreasing = FALSE),]
forbidgroup <- filter(dd, numdup <= 3)
conformIndex <- which(is.na(row.match(data.frame(scaleddata[, 1:9]), forbidgroup[, 1:9])))
#conformIndex <- c(conformIndex, index80)
conformdata <- scaleddata[conformIndex, ]
less3scaleddata <- scaleddata[-conformIndex, ]
conformalldata <- alldata[conformIndex, ]
less3conformalldata <- alldata[-conformIndex, ]
## Get best inital dataset
numbers = dim(conformdata)[1];
allIndexes <- NULL
allsumdiss <- NULL
times <- choose(numbers, 5)
## Generate 10000 intial data set and get best one
for (i in 1:10000) {
## A random sample of 5 data points
set.seed(i)
initalIndexes <- sample(numbers, 5)
TrainningSet <- conformdata[-initalIndexes, ]
initalTestSet <- conformdata[initalIndexes, ]
allIndexes <- rbind(allIndexes, initalIndexes)
diss <- proxy::dist(initalTestSet, TrainningSet)
sumdiss <- sum(diss)
allsumdiss <- c(allsumdiss, sumdiss)
#initalIndexes <- c(14, 30, 46, 54, 91)
#initalIndexes <- c(5,50,78,99,117)
#initalIndexes <- c(18,64,65,66,84)
#initalIndexes <- c(18,64,65,66,74,83,84)
}
bestInitalIndex <- allIndexes[which.min(allsumdiss), ]
bestDistance <- min(allsumdiss)
#Begin compute remaining testset
RemainingSet <- conformdata[-bestInitalIndex, ]
initalSet <- conformdata[bestInitalIndex, ]
SelectedIndex <- maxDissim(initalSet, RemainingSet, n = 15, obj = minDiss, alpha = 0.5)
SelectedSet <- RemainingSet[SelectedIndex, ]
FinalTestingSet <- rbind(initalSet, SelectedSet)
#FinalTestingSet <- SelectedSet
#FinalTrainingSet <- rbind(RemainingSet[-SelectedIndex, ], less3scaleddata, big80scaleddata, initalSet)
FinalTrainingSet <- rbind(RemainingSet[-SelectedIndex, ], less3scaleddata, big80scaleddata)
# compute un-scaled data
UnScaledRemainingSet <- conformalldata[-bestInitalIndex, ]
UnScaledinitalSet <- conformalldata[bestInitalIndex, ]
UnScaledSelectedSet <- UnScaledRemainingSet[SelectedIndex, ]
UnScaledFinalTestingSet <- rbind(UnScaledinitalSet, UnScaledSelectedSet)
#UnScaledFinalTestingSet <- UnScaledSelectedSet
#UnScaledFinalTrainingSet <- rbind(UnScaledRemainingSet[-SelectedIndex, ], less3conformalldata, big80alldata, UnScaledinitalSet)
UnScaledFinalTrainingSet <- rbind(UnScaledRemainingSet[-SelectedIndex, ], less3conformalldata, big80alldata)
#cat("Selected Indexes are: ", SelectedIndex, "\n", sep=",")
write.csv(FinalTestingSet, "~/Desktop/PharmaceuticsData/HPMC/testingset.csv", row.names = FALSE)
write.csv(FinalTrainingSet, "~/Desktop/PharmaceuticsData/HPMC/trainingset.csv", row.names = FALSE)
write.csv(UnScaledFinalTestingSet, "~/Desktop/PharmaceuticsData/HPMC/testingset(readyforcheck).csv", row.names = FALSE)
write.csv(UnScaledFinalTrainingSet, "~/Desktop/PharmaceuticsData/HPMC/trainingset(readyforcheck).csv", row.names = FALSE)
write.csv(scaledextradata, "~/Desktop/PharmaceuticsData/HPMC/extrascaledtestset.csv", row.names = FALSE)
| /net.mydreamy.mlpharmaceutics.hydrophilicmatrixtablet/src/main/java/net/mydreamy/mlpharmaceutics/hydrophilicmatrixtablet/finalcode/hpmc.R | no_license | yylonly/SolidDispersion | R | false | false | 4,251 | r | library(dplyr)
library(prodlim)
rm(list = ls())
source("~/Desktop/PharmaceuticsData/HPMC/maxDissim9.R")
allX<- read.csv("~/Desktop/PharmaceuticsData/HPMC/remaining_data.csv")
extra <- read.csv("~/Desktop/PharmaceuticsData/HPMC/final_testing_data.csv")
##scale range from 0 to 1
alldata <- data.matrix(allX)
extradata <- data.matrix(extra)
y <- alldata[,19:22]/100
X <- alldata[, 1:18]
extraY <- extradata[,19:22]/100
extraX <- extradata[, 1:18]
maxs <- apply(X, 2, max)
mins <- apply(X, 2, min)
ranges <- maxs - mins
means <- apply(X, 2, mean)
scaledallx <- scale(X, center = mins, scale = ranges)
scaleddata <- cbind(scaledallx, y)
scaledextrax <- scale(extraX, center = mins, scale = ranges)
scaledextradata <- cbind(scaledextrax, extraY)
## get rid of groups all big than 80
index80 <- which((scaleddata[,21] >= 0.6 & scaleddata[,20] >= 0.8 & scaleddata[,21] >= 0.8 & scaleddata[,22] >= 0.8) | (scaleddata[,19] <= 0.2 & scaleddata[,20] <= 0.2 & scaleddata[,21] <= 0.2 & scaleddata[,22] <= 0.2))
big80scaleddata <- scaleddata[index80, ]
scaleddata <- scaleddata[-index80, ]
scaledallx <- scaledallx[-index80, ]
big80alldata <- alldata[index80, ]
alldata <- alldata[-index80, ]
## get rid of groups less 3
df <- data.frame(scaledallx[, 1:9])
bb <- aggregate(list(numdup=rep(1,nrow(df))), df, length)
dd <- bb[order(bb$numdup, decreasing = FALSE),]
forbidgroup <- filter(dd, numdup <= 3)
conformIndex <- which(is.na(row.match(data.frame(scaleddata[, 1:9]), forbidgroup[, 1:9])))
#conformIndex <- c(conformIndex, index80)
conformdata <- scaleddata[conformIndex, ]
less3scaleddata <- scaleddata[-conformIndex, ]
conformalldata <- alldata[conformIndex, ]
less3conformalldata <- alldata[-conformIndex, ]
## Get best inital dataset
numbers = dim(conformdata)[1];
allIndexes <- NULL
allsumdiss <- NULL
times <- choose(numbers, 5)
## Generate 10000 intial data set and get best one
for (i in 1:10000) {
## A random sample of 5 data points
set.seed(i)
initalIndexes <- sample(numbers, 5)
TrainningSet <- conformdata[-initalIndexes, ]
initalTestSet <- conformdata[initalIndexes, ]
allIndexes <- rbind(allIndexes, initalIndexes)
diss <- proxy::dist(initalTestSet, TrainningSet)
sumdiss <- sum(diss)
allsumdiss <- c(allsumdiss, sumdiss)
#initalIndexes <- c(14, 30, 46, 54, 91)
#initalIndexes <- c(5,50,78,99,117)
#initalIndexes <- c(18,64,65,66,84)
#initalIndexes <- c(18,64,65,66,74,83,84)
}
bestInitalIndex <- allIndexes[which.min(allsumdiss), ]
bestDistance <- min(allsumdiss)
#Begin compute remaining testset
RemainingSet <- conformdata[-bestInitalIndex, ]
initalSet <- conformdata[bestInitalIndex, ]
SelectedIndex <- maxDissim(initalSet, RemainingSet, n = 15, obj = minDiss, alpha = 0.5)
SelectedSet <- RemainingSet[SelectedIndex, ]
FinalTestingSet <- rbind(initalSet, SelectedSet)
#FinalTestingSet <- SelectedSet
#FinalTrainingSet <- rbind(RemainingSet[-SelectedIndex, ], less3scaleddata, big80scaleddata, initalSet)
FinalTrainingSet <- rbind(RemainingSet[-SelectedIndex, ], less3scaleddata, big80scaleddata)
# compute un-scaled data
UnScaledRemainingSet <- conformalldata[-bestInitalIndex, ]
UnScaledinitalSet <- conformalldata[bestInitalIndex, ]
UnScaledSelectedSet <- UnScaledRemainingSet[SelectedIndex, ]
UnScaledFinalTestingSet <- rbind(UnScaledinitalSet, UnScaledSelectedSet)
#UnScaledFinalTestingSet <- UnScaledSelectedSet
#UnScaledFinalTrainingSet <- rbind(UnScaledRemainingSet[-SelectedIndex, ], less3conformalldata, big80alldata, UnScaledinitalSet)
UnScaledFinalTrainingSet <- rbind(UnScaledRemainingSet[-SelectedIndex, ], less3conformalldata, big80alldata)
#cat("Selected Indexes are: ", SelectedIndex, "\n", sep=",")
write.csv(FinalTestingSet, "~/Desktop/PharmaceuticsData/HPMC/testingset.csv", row.names = FALSE)
write.csv(FinalTrainingSet, "~/Desktop/PharmaceuticsData/HPMC/trainingset.csv", row.names = FALSE)
write.csv(UnScaledFinalTestingSet, "~/Desktop/PharmaceuticsData/HPMC/testingset(readyforcheck).csv", row.names = FALSE)
write.csv(UnScaledFinalTrainingSet, "~/Desktop/PharmaceuticsData/HPMC/trainingset(readyforcheck).csv", row.names = FALSE)
write.csv(scaledextradata, "~/Desktop/PharmaceuticsData/HPMC/extrascaledtestset.csv", row.names = FALSE)
|
md2html <- function(text){
html_chunk <- markdown::markdownToHTML(text = text,
fragment.only = TRUE)
htmltools::HTML(html_chunk)
}
| /inst/rmd_template/wellbeing/auxiliary.R | permissive | standardgalactic/wpa | R | false | false | 179 | r | md2html <- function(text){
html_chunk <- markdown::markdownToHTML(text = text,
fragment.only = TRUE)
htmltools::HTML(html_chunk)
}
|
#Dafermos and Nikolaidi model for students (September 2018)
#Basic version
#Reproduced by Marco Veronese Passarella, April 4th, 2020
#Clear all
rm(list=ls(all=TRUE))
T<-51
#Upload data from Dropbox
Data <- read.csv( "https://www.dropbox.com/s/kusoabl4s8clhvj/data_dafnik.csv?dl=1" )
#Endogenous variables
W<- vector(length=T)
Yc<- vector(length=T)
CO<- vector(length=T)
M<- vector(length=T)
Y<- vector(length=T)
TP<- vector(length=T)
RP<- vector(length=T)
DP<- vector(length=T)
I<- vector(length=T)
K<- vector(length=T)
L<- vector(length=T)
BP<- vector(length=T)
M_red<- vector(length=T)
Y_star<- vector(length=T) #auxiliary variable
u<- vector(length=T) #auxiliary variable
gy<- vector(length=T) #auxiliary variable
lev<- vector(length=T) #auxiliary variable
#Parameters
for (i in 1:T) {
if (i == 1)
{ for (iterations in 1:10){
sw<-mean(Data[,c("sw")]) #Sets the wage share equal to its mean value in the US during the period 1960-2010 [Category B(i)]
rm<- mean(Data[,c("rm")]) #Sets the deposit interest rate equal to its mean value in the US during the period 1960-2010 [Category B(i)]
rl<- mean(Data[,c("rl")]) #Sets the loan interest rate equal to its value in the US during the period 1960-2010 [Category B(i)]
c1<-0.9 #Selected from a reasonable range of values [Category B(iii)]
c2<-0.75 #Selected from a reasonable range of values [Category B(iii)]
u[i]<-Data[1,c("u")] #US capacity utilisation in 1960
v<-Y[i]/(K[i]*u[i]) #Calibrated such that capacity utilisation in the model matches the capacity utilisation in the US in 1960 [Category C(i)]; we use equations (14) and (15)
gk<- mean(Data[,c("gy")]) #Calibrated such that the model generates the baseline scenario [Category C(ii)]
c3<-(K[i]/L[i])*((Y[i]/K[i])*(1+gk)-gk-(c1*W[i]/K[i]+c2*Yc[i]/K[i])) #Calibrated such that the model generates the baseline scenario; ensures that Y/K will remain contant during the simulation period [Category C(ii)]; see Appendix B
sf<-(gk-gk*(L[i]/K[i]))/(TP[i]/(K[i]/(1+gk))) #Calibrated such that the model generates the baseline scenario; ensures that L/K will remain contant during the simulation period [Category C(ii)]; see Appendix B
#Initial values
Y[i]<-Data[1,c("Y")] #US GDP in 1960 (in trillion 2009 US$)
K[i]<-Data[1,c("K")] #US capital stock in 1960 (in trillion 2009 US$)
L[i]<-Data[1,c("L")] #Loans of US non-financial corporations in 1960 (in trillion 2009 US$)
W[i]<-sw*Y[i] #Derived from equation (1)
Yc[i]<-DP[i]+BP[i]+rm*(M[i]/(1+gk)) #Derived from equation (2)
CO[i]<-Y[i]-I[i] #Derived from equation (5)
TP[i]<-Y[i]-W[i]-rl*(L[i]/(1+gk)) #Derived from equation (6)
RP[i]<-sf*TP[i] #Derived from equation (7)
DP[i]<-TP[i]-RP[i] #Derived from equation (8)
I[i]<-(gk/(1+gk))*K[i] #Derived from equation (9)
BP[i]<-rl*(L[i]/(1+gk))-rm*(M[i]/(1+gk)) #Derived from equation (12)
M[i]<-L[i] #Derived from equation (13)
Y_star[i]<-v*K[i] #Derived from equation (14)
lev[i]<-L[i]/K[i] #Derived from equation (17)
gy[i]<-gk #Based on the baseline scenario
}
}
#Equations
else {
for (iterations in 1:10){
#Households
W[i]<-sw*Y[i]
Yc[i]<-DP[i]+BP[i]+rm*M[i-1]
CO[i]<-c1*W[i-1]+c2*Yc[i-1]+c3*M[i-1]
M[i]<-M[i-1]+W[i]+Yc[i]-CO[i]
#Firms
Y[i]<-CO[i]+I[i]
TP[i]<-Y[i]-W[i]-rl*L[i-1]
RP[i]<-sf*TP[i]
DP[i]<-TP[i]-RP[i]
I[i]<-gk*K[i-1]
K[i]<-K[i-1]+I[i]
L[i]<-L[i-1]+I[i]-RP[i]
#Banks
BP[i]<-rl*L[i-1]-rm*M[i-1]
M_red[i]<-L[i]
#Auxiliary equations
Y_star[i]<-v*K[i]
u[i]<-Y[i]/Y_star[i]
gy[i]<-(Y[i]-Y[i-1])/Y[i-1]
lev[i]<-L[i]/K[i]
}
}
}
#Table
matrixname<-paste("Table")
assign (matrixname, (round(cbind(M_red, M, u, gy, lev, Y), digits=4)))
#Graphs
plot(Data[1:T,c("lev")], type="l", xlab= "Year", ylab= "Leverage ratio", xaxt="n")
lines(Table[1:T,c("lev")], type="l", lty=3)
axis(side=1, at=c(1,11,21,31,41, 51), labels=c("1960","1970","1980", "1990","2000","2010"))
legend("bottomright", legend=c("Actual", "Simulated"), lty=c(1,3), bty="n")
plot(Data[1:T,c("u")], type="l", xlab= "Year", ylab= "Capacity utilisation", xaxt="n")
lines(Table[1:T,c("u")], type="l", lty=3)
axis(side=1, at=c(1,11,21,31,41, 51), labels=c("1960","1970","1980", "1990","2000","2010"))
legend("bottomright", legend=c("Actual", "Simulated"), lty=c(1,3), bty="n")
plot(Data[1:T,c("gy")], type="l", lty=1, xlab= "Year", ylab= "Growth rate of output", xaxt="n")
lines(Table[1:T,c("gy")], type="l", lty=3)
axis(side=1, at=c(1,11,21,31,41, 51), labels=c("1960","1970","1980", "1990","2000","2010"))
legend("bottomright", legend=c("Actual", "Simulated"), lty=c(1,3), bty="n")
plot(Data[1:T,c("Y")], type="l", lty=1, xlab= "Year", ylab= "Output", xaxt="n")
lines(Table[1:T,c("Y")], type="l", lty=3 )
axis(side=1, at=c(1,11,21,31,41, 51), labels=c("1960","1970","1980", "1990","2000","2010"))
legend("bottomright", legend=c("Actual", "Simulated"), lty=c(1,3), bty="n")
###########################################
#Model validation
#Install.packages("mFilter") #This command is necessary if mFilter has not been installed in your computer
library(mFilter)
Y_log<-log((Table[,c("Y")]))
Yactual_log<-log((Data[,c("Y")]))
Y.hp <- hpfilter((Y_log), freq=100, drift=TRUE)
actualY.hp <- hpfilter((Yactual_log), freq=6.25, drift=TRUE)
acfYactual=acf(actualY.hp$cycle, lag.max=20, plot=F)
acfY=acf(Y.hp$cycle,lag.max=20, plot=F)
plot(acfYactual$acf, ylab=" ", xlab="Lag", type="l", lty=1, ylim=c(-0.5,1))
lines(acfY$acf, type="l", lty=3, ylim=c(-0.5,1))
legend("topright", legend=c("Actual", "Simulated"), lty=c(1,3), bty="n") | /dafnik.R | no_license | marcoverpas/SFC-models-R | R | false | false | 5,860 | r | #Dafermos and Nikolaidi model for students (September 2018)
#Basic version
#Reproduced by Marco Veronese Passarella, April 4th, 2020
#Clear all
rm(list=ls(all=TRUE))
T<-51
#Upload data from Dropbox
Data <- read.csv( "https://www.dropbox.com/s/kusoabl4s8clhvj/data_dafnik.csv?dl=1" )
#Endogenous variables
W<- vector(length=T)
Yc<- vector(length=T)
CO<- vector(length=T)
M<- vector(length=T)
Y<- vector(length=T)
TP<- vector(length=T)
RP<- vector(length=T)
DP<- vector(length=T)
I<- vector(length=T)
K<- vector(length=T)
L<- vector(length=T)
BP<- vector(length=T)
M_red<- vector(length=T)
Y_star<- vector(length=T) #auxiliary variable
u<- vector(length=T) #auxiliary variable
gy<- vector(length=T) #auxiliary variable
lev<- vector(length=T) #auxiliary variable
#Parameters
for (i in 1:T) {
if (i == 1)
{ for (iterations in 1:10){
sw<-mean(Data[,c("sw")]) #Sets the wage share equal to its mean value in the US during the period 1960-2010 [Category B(i)]
rm<- mean(Data[,c("rm")]) #Sets the deposit interest rate equal to its mean value in the US during the period 1960-2010 [Category B(i)]
rl<- mean(Data[,c("rl")]) #Sets the loan interest rate equal to its value in the US during the period 1960-2010 [Category B(i)]
c1<-0.9 #Selected from a reasonable range of values [Category B(iii)]
c2<-0.75 #Selected from a reasonable range of values [Category B(iii)]
u[i]<-Data[1,c("u")] #US capacity utilisation in 1960
v<-Y[i]/(K[i]*u[i]) #Calibrated such that capacity utilisation in the model matches the capacity utilisation in the US in 1960 [Category C(i)]; we use equations (14) and (15)
gk<- mean(Data[,c("gy")]) #Calibrated such that the model generates the baseline scenario [Category C(ii)]
c3<-(K[i]/L[i])*((Y[i]/K[i])*(1+gk)-gk-(c1*W[i]/K[i]+c2*Yc[i]/K[i])) #Calibrated such that the model generates the baseline scenario; ensures that Y/K will remain contant during the simulation period [Category C(ii)]; see Appendix B
sf<-(gk-gk*(L[i]/K[i]))/(TP[i]/(K[i]/(1+gk))) #Calibrated such that the model generates the baseline scenario; ensures that L/K will remain contant during the simulation period [Category C(ii)]; see Appendix B
#Initial values
Y[i]<-Data[1,c("Y")] #US GDP in 1960 (in trillion 2009 US$)
K[i]<-Data[1,c("K")] #US capital stock in 1960 (in trillion 2009 US$)
L[i]<-Data[1,c("L")] #Loans of US non-financial corporations in 1960 (in trillion 2009 US$)
W[i]<-sw*Y[i] #Derived from equation (1)
Yc[i]<-DP[i]+BP[i]+rm*(M[i]/(1+gk)) #Derived from equation (2)
CO[i]<-Y[i]-I[i] #Derived from equation (5)
TP[i]<-Y[i]-W[i]-rl*(L[i]/(1+gk)) #Derived from equation (6)
RP[i]<-sf*TP[i] #Derived from equation (7)
DP[i]<-TP[i]-RP[i] #Derived from equation (8)
I[i]<-(gk/(1+gk))*K[i] #Derived from equation (9)
BP[i]<-rl*(L[i]/(1+gk))-rm*(M[i]/(1+gk)) #Derived from equation (12)
M[i]<-L[i] #Derived from equation (13)
Y_star[i]<-v*K[i] #Derived from equation (14)
lev[i]<-L[i]/K[i] #Derived from equation (17)
gy[i]<-gk #Based on the baseline scenario
}
}
#Equations
else {
for (iterations in 1:10){
#Households
W[i]<-sw*Y[i]
Yc[i]<-DP[i]+BP[i]+rm*M[i-1]
CO[i]<-c1*W[i-1]+c2*Yc[i-1]+c3*M[i-1]
M[i]<-M[i-1]+W[i]+Yc[i]-CO[i]
#Firms
Y[i]<-CO[i]+I[i]
TP[i]<-Y[i]-W[i]-rl*L[i-1]
RP[i]<-sf*TP[i]
DP[i]<-TP[i]-RP[i]
I[i]<-gk*K[i-1]
K[i]<-K[i-1]+I[i]
L[i]<-L[i-1]+I[i]-RP[i]
#Banks
BP[i]<-rl*L[i-1]-rm*M[i-1]
M_red[i]<-L[i]
#Auxiliary equations
Y_star[i]<-v*K[i]
u[i]<-Y[i]/Y_star[i]
gy[i]<-(Y[i]-Y[i-1])/Y[i-1]
lev[i]<-L[i]/K[i]
}
}
}
#Table
matrixname<-paste("Table")
assign (matrixname, (round(cbind(M_red, M, u, gy, lev, Y), digits=4)))
#Graphs
plot(Data[1:T,c("lev")], type="l", xlab= "Year", ylab= "Leverage ratio", xaxt="n")
lines(Table[1:T,c("lev")], type="l", lty=3)
axis(side=1, at=c(1,11,21,31,41, 51), labels=c("1960","1970","1980", "1990","2000","2010"))
legend("bottomright", legend=c("Actual", "Simulated"), lty=c(1,3), bty="n")
plot(Data[1:T,c("u")], type="l", xlab= "Year", ylab= "Capacity utilisation", xaxt="n")
lines(Table[1:T,c("u")], type="l", lty=3)
axis(side=1, at=c(1,11,21,31,41, 51), labels=c("1960","1970","1980", "1990","2000","2010"))
legend("bottomright", legend=c("Actual", "Simulated"), lty=c(1,3), bty="n")
plot(Data[1:T,c("gy")], type="l", lty=1, xlab= "Year", ylab= "Growth rate of output", xaxt="n")
lines(Table[1:T,c("gy")], type="l", lty=3)
axis(side=1, at=c(1,11,21,31,41, 51), labels=c("1960","1970","1980", "1990","2000","2010"))
legend("bottomright", legend=c("Actual", "Simulated"), lty=c(1,3), bty="n")
plot(Data[1:T,c("Y")], type="l", lty=1, xlab= "Year", ylab= "Output", xaxt="n")
lines(Table[1:T,c("Y")], type="l", lty=3 )
axis(side=1, at=c(1,11,21,31,41, 51), labels=c("1960","1970","1980", "1990","2000","2010"))
legend("bottomright", legend=c("Actual", "Simulated"), lty=c(1,3), bty="n")
###########################################
#Model validation
#Install.packages("mFilter") #This command is necessary if mFilter has not been installed in your computer
library(mFilter)
Y_log<-log((Table[,c("Y")]))
Yactual_log<-log((Data[,c("Y")]))
Y.hp <- hpfilter((Y_log), freq=100, drift=TRUE)
actualY.hp <- hpfilter((Yactual_log), freq=6.25, drift=TRUE)
acfYactual=acf(actualY.hp$cycle, lag.max=20, plot=F)
acfY=acf(Y.hp$cycle,lag.max=20, plot=F)
plot(acfYactual$acf, ylab=" ", xlab="Lag", type="l", lty=1, ylim=c(-0.5,1))
lines(acfY$acf, type="l", lty=3, ylim=c(-0.5,1))
legend("topright", legend=c("Actual", "Simulated"), lty=c(1,3), bty="n") |
#----------------------------------------------------------------
# Environment Set-up
#----------------------------------------------------------------
rm(list=ls(all=TRUE))
gc()
options(scipen=999)
library(xgboost)
setwd("/home/rstudio/Dropbox/Public/Springleaf")
subversion <- "10_58"
startTime <- Sys.time()
#----------------------------------------------------------------
# Data
#----------------------------------------------------------------
load("Kaggle_RawData.RData")
y <- train$target
train <- train[, -c(1, 1934)]
test <- test[, -1]
# Separate date and character variables
train_char <- train[, sapply(train, is.character)]
train_date <- train_char[, grep("JAN1|FEB1|MAR1", train_char), ]
train_char <- train_char[, !colnames(train_char) %in% colnames(train_date)]
train_char[train_char=="[]"] <- NA
train[, names(train_char)] <- train_char
test_char <- test[, sapply(test, is.character)]
test_date <- test_char[, grep("JAN1|FEB1|MAR1", test_char), ]
test_char <- test_char[, !colnames(test_char) %in% colnames(test_date)]
test_char[test_char=="[]"] <- NA
test[, names(test_char)] <- test_char
# Converting date variables to year
train_date <- sapply(train_date, function(x) strptime(x, "%d%B%y:%H:%M:%S"))
train_date <- do.call(cbind.data.frame, train_date)
train_date <- sapply(train_date, function(x) as.numeric(format(x, "%Y")))
train_date <- data.frame(train_date)
train[, names(train_date)] <- train_date
test_date <- sapply(test_date, function(x) strptime(x, "%d%B%y:%H:%M:%S"))
test_date <- do.call(cbind.data.frame, test_date)
test_date <- sapply(test_date, function(x) as.numeric(format(x, "%Y")))
test_date <- data.frame(test_date)
test[, names(test_date)] <- test_date
# Convert character variables to numeric
for(i in 1:ncol(train)) {
if(class(train[, i]) == "character") {
tmp <- as.numeric(as.factor(c(train[, i], test[, i])))
train[, i] <- head(tmp, nrow(train))
test[, i] <- tail(tmp, nrow(test))
}
}
# Replace -99999 by NA
train[train==-99999] <- NA
test[test==-99999] <- NA
#----------------------------------------------------------------
# Other data processing
#----------------------------------------------------------------
varTrans <- read.csv("Variable_Transformation_3.csv", stringsAsFactors=FALSE)
# Replace 98 by NA
tmpVars <- varTrans$Variable[which(varTrans$BEST=="REP_98")]
for(i in tmpVars) {
train[which(train[, i] == 98), i] <- NA
test[which(test[, i] == 98), i] <- NA
}
#----------------------------------------------------------------
# Model Parameters
#----------------------------------------------------------------
# Create index for cross validation
set.seed(1948)
index <- sample(nrow(train), nrow(train)-1)
kFolds <- split(index, ceiling(seq_along(index)/14523))
# Define model parameters
param0 <- list(
"objective" = "binary:logistic"
, "eval_metric" = "auc"
, "eta" = 0.01
, "subsample" = 0.7
, "colsample_bytree" = 0.5
, "min_child_weight" = 6
, "max_depth" = 9
, "alpha" = 4
)
#----------------------------------------------------------------
# Develop Model
#----------------------------------------------------------------
nTree <- 5000
Outputs <- data.frame("Tree"=0:(nTree-1))
fold <- 1
for(fold in 1:5) {
cat("Processing Fold: ", fold, "\n")
hold <- kFolds[[fold]]
xgtrain <- xgb.DMatrix(as.matrix(train[-hold, ]), label = y[-hold], missing = NA)
xgval <- xgb.DMatrix(as.matrix(train[hold, ]), label = y[hold], missing = NA)
watchlist <- list('val' = xgval)
aucVal <- capture.output(model <- xgb.train(
nrounds = nTree
, params = param0
, data = xgtrain
, watchlist = watchlist
))
AUC <- sapply(aucVal, function(x) as.numeric(unlist(strsplit(x, split=":"))[2]))
names(AUC) <- sapply(names(AUC), function(x) unlist(strsplit(x, split=":"))[1])
names(AUC) <- gsub("\\t", "", names(AUC))
names(AUC) <- gsub("val-auc", "", names(AUC))
names(AUC) <- gsub(" ", "", names(AUC))
names(AUC) <- gsub("]", "", names(AUC))
names(AUC) <- gsub("\\[", "", names(AUC))
Outputs <- cbind(Outputs, AUC)
names(Outputs)[fold + 1] <- paste0("Fold_", fold)
write.csv(Outputs, paste0("Outputs_CV_", subversion, ".csv"), row.names=FALSE)
tmpAvg <- rowMeans(as.data.frame(Outputs[, -1]))
cat("Highest accuracy:", round(max(tmpAvg), 6), " Tree: ", which.max(tmpAvg), "\n")
}
Mean <- rowMeans(Outputs[, -1])
SD <- apply(Outputs[, -1], 1, sd)
Outputs$Mean <- Mean
Outputs$SD <- SD
write.csv(Outputs, paste0("Outputs_CV_", subversion, ".csv"), row.names=FALSE)
head(Outputs[order(Outputs$Mean, decreasing=TRUE), ])
endTime <- Sys.time()
difftime(endTime, startTime)
| /Benchmark Scripts/CV/Codes/CV_10/CV_10_58.R | no_license | vikasnitk85/SpringleafMarketingesponse | R | false | false | 4,617 | r | #----------------------------------------------------------------
# Environment Set-up
#----------------------------------------------------------------
rm(list=ls(all=TRUE))
gc()
options(scipen=999)
library(xgboost)
setwd("/home/rstudio/Dropbox/Public/Springleaf")
subversion <- "10_58"
startTime <- Sys.time()
#----------------------------------------------------------------
# Data
#----------------------------------------------------------------
load("Kaggle_RawData.RData")
y <- train$target
train <- train[, -c(1, 1934)]
test <- test[, -1]
# Separate date and character variables
train_char <- train[, sapply(train, is.character)]
train_date <- train_char[, grep("JAN1|FEB1|MAR1", train_char), ]
train_char <- train_char[, !colnames(train_char) %in% colnames(train_date)]
train_char[train_char=="[]"] <- NA
train[, names(train_char)] <- train_char
test_char <- test[, sapply(test, is.character)]
test_date <- test_char[, grep("JAN1|FEB1|MAR1", test_char), ]
test_char <- test_char[, !colnames(test_char) %in% colnames(test_date)]
test_char[test_char=="[]"] <- NA
test[, names(test_char)] <- test_char
# Converting date variables to year
train_date <- sapply(train_date, function(x) strptime(x, "%d%B%y:%H:%M:%S"))
train_date <- do.call(cbind.data.frame, train_date)
train_date <- sapply(train_date, function(x) as.numeric(format(x, "%Y")))
train_date <- data.frame(train_date)
train[, names(train_date)] <- train_date
test_date <- sapply(test_date, function(x) strptime(x, "%d%B%y:%H:%M:%S"))
test_date <- do.call(cbind.data.frame, test_date)
test_date <- sapply(test_date, function(x) as.numeric(format(x, "%Y")))
test_date <- data.frame(test_date)
test[, names(test_date)] <- test_date
# Convert character variables to numeric
for(i in 1:ncol(train)) {
if(class(train[, i]) == "character") {
tmp <- as.numeric(as.factor(c(train[, i], test[, i])))
train[, i] <- head(tmp, nrow(train))
test[, i] <- tail(tmp, nrow(test))
}
}
# Replace -99999 by NA
train[train==-99999] <- NA
test[test==-99999] <- NA
#----------------------------------------------------------------
# Other data processing
#----------------------------------------------------------------
varTrans <- read.csv("Variable_Transformation_3.csv", stringsAsFactors=FALSE)
# Replace 98 by NA
tmpVars <- varTrans$Variable[which(varTrans$BEST=="REP_98")]
for(i in tmpVars) {
train[which(train[, i] == 98), i] <- NA
test[which(test[, i] == 98), i] <- NA
}
#----------------------------------------------------------------
# Model Parameters
#----------------------------------------------------------------
# Create index for cross validation
set.seed(1948)
index <- sample(nrow(train), nrow(train)-1)
kFolds <- split(index, ceiling(seq_along(index)/14523))
# Define model parameters
param0 <- list(
"objective" = "binary:logistic"
, "eval_metric" = "auc"
, "eta" = 0.01
, "subsample" = 0.7
, "colsample_bytree" = 0.5
, "min_child_weight" = 6
, "max_depth" = 9
, "alpha" = 4
)
#----------------------------------------------------------------
# Develop Model
#----------------------------------------------------------------
nTree <- 5000
Outputs <- data.frame("Tree"=0:(nTree-1))
fold <- 1
for(fold in 1:5) {
cat("Processing Fold: ", fold, "\n")
hold <- kFolds[[fold]]
xgtrain <- xgb.DMatrix(as.matrix(train[-hold, ]), label = y[-hold], missing = NA)
xgval <- xgb.DMatrix(as.matrix(train[hold, ]), label = y[hold], missing = NA)
watchlist <- list('val' = xgval)
aucVal <- capture.output(model <- xgb.train(
nrounds = nTree
, params = param0
, data = xgtrain
, watchlist = watchlist
))
AUC <- sapply(aucVal, function(x) as.numeric(unlist(strsplit(x, split=":"))[2]))
names(AUC) <- sapply(names(AUC), function(x) unlist(strsplit(x, split=":"))[1])
names(AUC) <- gsub("\\t", "", names(AUC))
names(AUC) <- gsub("val-auc", "", names(AUC))
names(AUC) <- gsub(" ", "", names(AUC))
names(AUC) <- gsub("]", "", names(AUC))
names(AUC) <- gsub("\\[", "", names(AUC))
Outputs <- cbind(Outputs, AUC)
names(Outputs)[fold + 1] <- paste0("Fold_", fold)
write.csv(Outputs, paste0("Outputs_CV_", subversion, ".csv"), row.names=FALSE)
tmpAvg <- rowMeans(as.data.frame(Outputs[, -1]))
cat("Highest accuracy:", round(max(tmpAvg), 6), " Tree: ", which.max(tmpAvg), "\n")
}
Mean <- rowMeans(Outputs[, -1])
SD <- apply(Outputs[, -1], 1, sd)
Outputs$Mean <- Mean
Outputs$SD <- SD
write.csv(Outputs, paste0("Outputs_CV_", subversion, ".csv"), row.names=FALSE)
head(Outputs[order(Outputs$Mean, decreasing=TRUE), ])
endTime <- Sys.time()
difftime(endTime, startTime)
|
\name{PamChipAllPepResults-class}
\docType{class}
\alias{PamChipAllPepResults-class}
\alias{PamChipAllPepResults}
\alias{show,PamChipAllPepResults-method}
\title{"PamChipAllPepResults"}
\description{Object returned by \code{\link{AutoPamGeneMix}}.}
\section{Slots}{
\describe{
\item{\code{AllPepRes}:}{A list of data frames as many as peptides analyzed.}
\item{\code{RunTime}:}{Total run time (in seconds) to analyze the all peptides.}
\item{\code{NoOfPeptides}:}{Number of peptides analyzed.}
\item{\code{TestAt}:}{Additional time point at which group-specific velocities are compared.}
\item{\code{n.groups}:}{Number of groups in the data.}
\item{\code{path}:}{Path where the Cleaned Peptide specific RData have been saved.}
}
}
\section{Methods}{
\describe{
\item{show}{Use \code{show(PamChipAllPepResults-object)} for brief information.}
}
}
\author{Pushpike Thilakarathne, Ziv Shkedy and Dan Lin}
\seealso{\code{\link{PamGeneMix}}, \code{\link{AutoPamGeneMix}}}
\keyword{Mixed Model, Smoothing, Thin Plate regression}
| /man/PamChipAllPepResults-class.Rd | no_license | cran/PamGeneMixed | R | false | false | 1,098 | rd | \name{PamChipAllPepResults-class}
\docType{class}
\alias{PamChipAllPepResults-class}
\alias{PamChipAllPepResults}
\alias{show,PamChipAllPepResults-method}
\title{"PamChipAllPepResults"}
\description{Object returned by \code{\link{AutoPamGeneMix}}.}
\section{Slots}{
\describe{
\item{\code{AllPepRes}:}{A list of data frames as many as peptides analyzed.}
\item{\code{RunTime}:}{Total run time (in seconds) to analyze the all peptides.}
\item{\code{NoOfPeptides}:}{Number of peptides analyzed.}
\item{\code{TestAt}:}{Additional time point at which group-specific velocities are compared.}
\item{\code{n.groups}:}{Number of groups in the data.}
\item{\code{path}:}{Path where the Cleaned Peptide specific RData have been saved.}
}
}
\section{Methods}{
\describe{
\item{show}{Use \code{show(PamChipAllPepResults-object)} for brief information.}
}
}
\author{Pushpike Thilakarathne, Ziv Shkedy and Dan Lin}
\seealso{\code{\link{PamGeneMix}}, \code{\link{AutoPamGeneMix}}}
\keyword{Mixed Model, Smoothing, Thin Plate regression}
|
#=======================================================================================================================================
# These are the R commands used to analyze the RNAseq data from HTseq output (not included in the current shell script - but provided
# in class in case we can't get stringtie / prepDE files)
#=======================================================================================================================================
#=======================================================================================================================================
#Installation of the bioconductor packages needed for this part (only done once, if they're absent):
source("http://www.bioconductor.org/biocLite.R")
biocLite("edgeR")
biocLite("goseq")
#=======================================================================================================================================
# To start with a clean working environment, you can execute the line below. Some people get really angry seeing this line in a script
# for whatever reason.
rm(list = ls()) > ls()
#=======================================================================================================================================
#======================================================================================================================================
# Differential gene expression analysis using EdgeR, starting with the counts tables from HTseq.
#=======================================================================================================================================
#Installation:
source("http://www.bioconductor.org/biocLite.R")
biocLite("edgeR")
biocLite("goseq")
#Loading EdgeR library
library(edgeR)
#Starting HTseq counts files are in this directory:
directory="~/5430_RNAseq/HTseq_counts/"
setwd(directory)
#Creating the files and conditions for the table:
sampleFiles <- c("untr_rep1.counts", "untr_rep2.counts", "E2_rep1.counts", "E2_rep2.counts")
sampleConditions <- c("untreated", "untreated", "treated", "treated")
#Building the experiment table
counts=data.frame(matrix(ncol=4, nrow=20366)) #creates empty df to be filled with the information from all 4 samples I have
#filling in the table:
for (ii in sampleFiles)
{
print(ii)
temp<- read.table(paste(directory, ii, sep=""), header=F, stringsAsFactors = F)
counts[,which(sampleFiles==ii)] <- temp[,2]
}
#adding labels to the table
rownames(counts) <- temp[,1]
colnames(counts) <- sampleFiles
head(counts)
# Creating groups (all samples that are untreated, all samples that are treated) to be used for DGEList()
groups=c(rep("untreated",2), rep('treated',2))
# DGEList function creates the object for the edgeR
cds <- DGEList(counts, group=groups)
names(cds)
head(cds$counts)
#Filtering poorly expressed genes
cds <- cds[rowSums(1e+06 * cds$counts/expandAsMatrix(cds$samples$lib.size, dim(cds)) > 1) >=3, ]
dim(cds)
cds <- calcNormFactors(cds)
cds$samples
#Effective library sizes after normalization
cds$samples$lib.size * cds$samples$norm.factors
cds <- estimateCommonDisp(cds)
names(cds)
cds$common.dispersion
cds<-estimateTagwiseDisp(cds, prior.df=10)
plotMDS(cds, method="bcv", col=as.numeric(cds$samples$group))
legend("bottomright", as.character(unique(cds$samples$group)), col=1:3, pch=20, inset=0)
# Using tagwise dispersion model to call significantly changed genes
de.tgw <- exactTest(cds, dispersion = cds$tagwise.dispersion, pair = c("untreated", "treated"))
names(de.tgw)
de.tgw$comparison
head(de.tgw$table)
resultsTbl.tgw <- topTags(de.tgw, n=nrow(de.tgw$table))$table
#Creating new table with significantly changed genes:
de.genes.tbl.tgw <- resultsTbl.tgw[ resultsTbl.tgw$FDR <= 0.05, ]
dim(de.genes.tbl.tgw)
#MA plot
plot(resultsTbl.tgw$logCPM, resultsTbl.tgw$logFC, pch=20, col='grey60', cex=0.5, main = "Tagwise dispersion", xlab="log(total CPM)", ylab="log(Fold Change)")
points(de.genes.tbl.tgw$logCPM, de.genes.tbl.tgw$logFC, pch=20, col='red', cex=0.75)
| /EdgeR_HTseq.R | no_license | biancamocanu/5430_RNAseq | R | false | false | 4,052 | r | #=======================================================================================================================================
# These are the R commands used to analyze the RNAseq data from HTseq output (not included in the current shell script - but provided
# in class in case we can't get stringtie / prepDE files)
#=======================================================================================================================================
#=======================================================================================================================================
#Installation of the bioconductor packages needed for this part (only done once, if they're absent):
source("http://www.bioconductor.org/biocLite.R")
biocLite("edgeR")
biocLite("goseq")
#=======================================================================================================================================
# To start with a clean working environment, you can execute the line below. Some people get really angry seeing this line in a script
# for whatever reason.
rm(list = ls()) > ls()
#=======================================================================================================================================
#======================================================================================================================================
# Differential gene expression analysis using EdgeR, starting with the counts tables from HTseq.
#=======================================================================================================================================
#Installation:
source("http://www.bioconductor.org/biocLite.R")
biocLite("edgeR")
biocLite("goseq")
#Loading EdgeR library
library(edgeR)
#Starting HTseq counts files are in this directory:
directory="~/5430_RNAseq/HTseq_counts/"
setwd(directory)
#Creating the files and conditions for the table:
sampleFiles <- c("untr_rep1.counts", "untr_rep2.counts", "E2_rep1.counts", "E2_rep2.counts")
sampleConditions <- c("untreated", "untreated", "treated", "treated")
#Building the experiment table
counts=data.frame(matrix(ncol=4, nrow=20366)) #creates empty df to be filled with the information from all 4 samples I have
#filling in the table:
for (ii in sampleFiles)
{
print(ii)
temp<- read.table(paste(directory, ii, sep=""), header=F, stringsAsFactors = F)
counts[,which(sampleFiles==ii)] <- temp[,2]
}
#adding labels to the table
rownames(counts) <- temp[,1]
colnames(counts) <- sampleFiles
head(counts)
# Creating groups (all samples that are untreated, all samples that are treated) to be used for DGEList()
groups=c(rep("untreated",2), rep('treated',2))
# DGEList function creates the object for the edgeR
cds <- DGEList(counts, group=groups)
names(cds)
head(cds$counts)
#Filtering poorly expressed genes
cds <- cds[rowSums(1e+06 * cds$counts/expandAsMatrix(cds$samples$lib.size, dim(cds)) > 1) >=3, ]
dim(cds)
cds <- calcNormFactors(cds)
cds$samples
#Effective library sizes after normalization
cds$samples$lib.size * cds$samples$norm.factors
cds <- estimateCommonDisp(cds)
names(cds)
cds$common.dispersion
cds<-estimateTagwiseDisp(cds, prior.df=10)
plotMDS(cds, method="bcv", col=as.numeric(cds$samples$group))
legend("bottomright", as.character(unique(cds$samples$group)), col=1:3, pch=20, inset=0)
# Using tagwise dispersion model to call significantly changed genes
de.tgw <- exactTest(cds, dispersion = cds$tagwise.dispersion, pair = c("untreated", "treated"))
names(de.tgw)
de.tgw$comparison
head(de.tgw$table)
resultsTbl.tgw <- topTags(de.tgw, n=nrow(de.tgw$table))$table
#Creating new table with significantly changed genes:
de.genes.tbl.tgw <- resultsTbl.tgw[ resultsTbl.tgw$FDR <= 0.05, ]
dim(de.genes.tbl.tgw)
#MA plot
plot(resultsTbl.tgw$logCPM, resultsTbl.tgw$logFC, pch=20, col='grey60', cex=0.5, main = "Tagwise dispersion", xlab="log(total CPM)", ylab="log(Fold Change)")
points(de.genes.tbl.tgw$logCPM, de.genes.tbl.tgw$logFC, pch=20, col='red', cex=0.75)
|
library(testthat)
library(RcppDeepState)
library(nc)
context("rcppdeepstate")
insts_path <- system.file(package="LOPART")
print(insts_path)
insts_path <- gsub("/LOPART.Rcheck/LOPART","",insts_path)
print(insts_path)
RcppDeepState::make_run()
RcppDeepState::deepstate_ci_setup(insts_path)
travis_path<-file.path(insts_path,".travis.yml")
testfile<-"test-rcppdeepstates.R"
testdir_path<-file.path(insts_path,"tests/testthat/",testfile)
test_that("deepstate ci setup test", {
expect_true(file.exists(travis_path))
expect_true(file.exists(testdir_path))
})
result <- RcppDeepState::deepstate_pkg_create(insts_path)
test_that("deepstate create TestHarness",{
expect_equal(result,"Testharness created!!")
})
RcppDeepState::deep_harness_compile_run(insts_path)
log_path <- system.file("testfiles/LOPART_interface_log",package="LOPART")
print(log_path)
error.list <- RcppDeepState::user_error_display("/home/travis/build/akhikolla/LOPART/inst/testfiles/LOPART_interface_log")
test_that("log files check",{
expect_equal(paste0(error.list$src.file.lines,collapse=""),"")
}) | /tests/testthat/test-rcppdeepstate.R | no_license | akhikolla/LOPART | R | false | false | 1,073 | r | library(testthat)
library(RcppDeepState)
library(nc)
context("rcppdeepstate")
insts_path <- system.file(package="LOPART")
print(insts_path)
insts_path <- gsub("/LOPART.Rcheck/LOPART","",insts_path)
print(insts_path)
RcppDeepState::make_run()
RcppDeepState::deepstate_ci_setup(insts_path)
travis_path<-file.path(insts_path,".travis.yml")
testfile<-"test-rcppdeepstates.R"
testdir_path<-file.path(insts_path,"tests/testthat/",testfile)
test_that("deepstate ci setup test", {
expect_true(file.exists(travis_path))
expect_true(file.exists(testdir_path))
})
result <- RcppDeepState::deepstate_pkg_create(insts_path)
test_that("deepstate create TestHarness",{
expect_equal(result,"Testharness created!!")
})
RcppDeepState::deep_harness_compile_run(insts_path)
log_path <- system.file("testfiles/LOPART_interface_log",package="LOPART")
print(log_path)
error.list <- RcppDeepState::user_error_display("/home/travis/build/akhikolla/LOPART/inst/testfiles/LOPART_interface_log")
test_that("log files check",{
expect_equal(paste0(error.list$src.file.lines,collapse=""),"")
}) |
TMtest <- function(y, trt, n, dferror, mserror, alpha, dms) {
Ybar <- tapply(y, trt, mean)
Ybar <- sort(Ybar)
posmax <- as.integer(which.max(Ybar[2:n]-Ybar[1:(n - 1)]))
if (posmax >= (n - posmax)) {
Ymean <- mean(Ybar[1:posmax])
} else
{
Ymean <- mean(Ybar[(posmax + 1):n])
}
range <- n
pos <- 1
col <- 0
qobs <- (Ybar[pos] + Ybar[n]) / 2 - Ymean
aux <- rep(0, times = n)
groups <- Ybar
if ((qobs >= - dms[1]) & (qobs <= dms[1])) aux[1:n] <- 1
if (!(any(aux == 0))) {
groups <- cbind(Ybar, aux)
}
if (any(aux == 0)) {
continua1 = TRUE
} else {
continua1 = FALSE
}
if (continua1 == TRUE) {
range <- range - 1
pos <- 0
ncomp <- n - range + 1
ct <- 1
if (range < 2) {
continua2 <- FALSE
} else {
continua2 <- TRUE
}
if (continua2 == TRUE) {
repeat {
pos <- pos + 1
posmax <- as.integer(which.max(Ybar[(pos + 1):(pos + range - 1)]
- Ybar[pos:(pos + range - 2)]))
if (posmax >= (pos + range - 1 - posmax)) {
Ymean <- mean(Ybar[pos:(pos+posmax-1)])
} else {
Ymean <- mean(Ybar[(pos + posmax):(pos + range - 1)])
}
qobs <- (Ybar[pos] + Ybar[pos+range-1]) / 2 - Ymean
aux[1:n] <- 0
if (((qobs >= - dms[2]) & (qobs <= dms[2]))) {
aux[pos:(pos + range - 1)] <- 1
}
dentro <- FALSE
if ((col > 0) & any(aux == 1)) {
for (i in 1:col)
{
if (any(groups[aux == 1, i + 1] == 0)) {
dentro <- FALSE
} else {
dentro <- TRUE
}
if (dentro == TRUE) break
}
}
if ((dentro == FALSE) & any(aux == 1)) {
groups <- cbind(groups, aux)
col <- col + 1
}
ct <- ct + 1
if (ct > ncomp)
{
range <- range - 1
pos <- 0
ncomp <- n - range + 1
ct <- 1
}
if (range < 2) {
continua2 <- FALSE
} else {
continua2 <- TRUE
}
if (continua2 == FALSE) break
}
}
}
result <- ProcTest(groups)
return(group.test(result))
}
| /R/TMtest.R | no_license | bendeivide/midrangeMCP | R | false | false | 2,265 | r | TMtest <- function(y, trt, n, dferror, mserror, alpha, dms) {
Ybar <- tapply(y, trt, mean)
Ybar <- sort(Ybar)
posmax <- as.integer(which.max(Ybar[2:n]-Ybar[1:(n - 1)]))
if (posmax >= (n - posmax)) {
Ymean <- mean(Ybar[1:posmax])
} else
{
Ymean <- mean(Ybar[(posmax + 1):n])
}
range <- n
pos <- 1
col <- 0
qobs <- (Ybar[pos] + Ybar[n]) / 2 - Ymean
aux <- rep(0, times = n)
groups <- Ybar
if ((qobs >= - dms[1]) & (qobs <= dms[1])) aux[1:n] <- 1
if (!(any(aux == 0))) {
groups <- cbind(Ybar, aux)
}
if (any(aux == 0)) {
continua1 = TRUE
} else {
continua1 = FALSE
}
if (continua1 == TRUE) {
range <- range - 1
pos <- 0
ncomp <- n - range + 1
ct <- 1
if (range < 2) {
continua2 <- FALSE
} else {
continua2 <- TRUE
}
if (continua2 == TRUE) {
repeat {
pos <- pos + 1
posmax <- as.integer(which.max(Ybar[(pos + 1):(pos + range - 1)]
- Ybar[pos:(pos + range - 2)]))
if (posmax >= (pos + range - 1 - posmax)) {
Ymean <- mean(Ybar[pos:(pos+posmax-1)])
} else {
Ymean <- mean(Ybar[(pos + posmax):(pos + range - 1)])
}
qobs <- (Ybar[pos] + Ybar[pos+range-1]) / 2 - Ymean
aux[1:n] <- 0
if (((qobs >= - dms[2]) & (qobs <= dms[2]))) {
aux[pos:(pos + range - 1)] <- 1
}
dentro <- FALSE
if ((col > 0) & any(aux == 1)) {
for (i in 1:col)
{
if (any(groups[aux == 1, i + 1] == 0)) {
dentro <- FALSE
} else {
dentro <- TRUE
}
if (dentro == TRUE) break
}
}
if ((dentro == FALSE) & any(aux == 1)) {
groups <- cbind(groups, aux)
col <- col + 1
}
ct <- ct + 1
if (ct > ncomp)
{
range <- range - 1
pos <- 0
ncomp <- n - range + 1
ct <- 1
}
if (range < 2) {
continua2 <- FALSE
} else {
continua2 <- TRUE
}
if (continua2 == FALSE) break
}
}
}
result <- ProcTest(groups)
return(group.test(result))
}
|
## ----setup, echo = FALSE-------------------------------------------------
knitr::opts_chunk$set(
collapse=TRUE,
comment = NA,
prompt = TRUE
)
set.seed(42)
## ----installation, eval=FALSE--------------------------------------------
# library(devtools)
# install_github(repo = 'lageIBUSP/Rsampling')
## ----load library--------------------------------------------------------
library(Rsampling)
## ----inspecionando objeto embauba----------------------------------------
head(embauba)
summary(embauba)
## ----proporcao de infestacao por morfo de embauba------------------------
tapply(embauba$with.vines, embauba$morphotype, mean)
## ----estatistica de interesse embaubas-----------------------------------
emb.ei <- function(dataframe){
props <- tapply(dataframe$with.vines, dataframe$morphotype, mean)
props[[1]] - props[[2]]
}
## Verificando
emb.ei(embauba)
## ----embaubas resampling, results="hide"---------------------------------
emb.r <- Rsampling(type = "normal", dataframe = embauba,
statistics = emb.ei, cols = 2, ntrials = 1000)
## ----embaubas distribuicao nula, fig.cap="Distribuição das diferenças nas proporções de embaúbas brancas e vermelhas com lianas em 1000 simulações da hipótese nula de ausência de diferença nas populações amostradas. A linha vermelha indica a diferença observada. A região de aceitação da hipótese nula para 5% de significância está delimitada em cinza."----
dplot(emb.r, svalue = emb.ei(embauba), pside="Greater",
main = "Distribuição da estatística de interesse sob H0",
xlab = "Estatística de interesse")
## ----embaubas teste------------------------------------------------------
sum(emb.r >= emb.ei(embauba))/1000 < 0.05
## ----inspecionando objeto azteca-----------------------------------------
head(azteca)
summary(azteca)
## ----pairplot azteca, fig.cap = "Número de formigas recrutadas por extratos de folhas novas e velhas de embaúbas. Os extratos foram aplicados em pares de folhas próximas em embaúbas que tinham colônias de formigas. As linhas ligam folhas do mesmo par experimental."----
splot(azteca$extract.new, azteca$extract.old,
groups.names=c("Folha nova","Folha velha"),
ylab="N de formigas recrutadas",
xlab="Tipo de extrato aplicado")
## ----estatistica de interesse azteca-------------------------------------
azt.ei <- function(dataframe){
diferencas <- with(dataframe, extract.new - extract.old)
mean(diferencas)
}
## Valor observado
azt.ei(azteca)
## ----azteca resampling, results="hide"-----------------------------------
azt.r <- Rsampling(type = "within_rows", dataframe = azteca,
statistics = azt.ei, cols = 2:3, ntrials = 1000)
## ----azteca distribuicao nula, fig.cap="Distribuição das diferenças do número de formigas recrutadas por extratos de folhas novas e velhas de embaúba em pares experimentais, em 1000 simulações da hipótese nula de ausência de diferença. A linha vermelha indica a diferença observada. A região de aceitação da hipótese nula para 5% de significância está delimitada em cinza."----
dplot(azt.r, svalue = azt.ei(azteca), pside="Greater",
main = "Distribuição da estatística de interesse sob H0",
xlab = "Estatística de interesse")
## ----azteca teste--------------------------------------------------------
sum(azt.r >= azt.ei(azteca))/1000 < 0.05
## ----azteca distribuicao nula bicaudal, fig.cap="Distribuição das diferenças do número de formigas recrutadas por extratos de folhas novas e velhas de embaúba em pares experimentais, em 1000 simulações da hipótese nula de ausência de diferença. A região de aceitação da hipótese nula para 5% de significância para teste bicaudal está delimitada em cinza."----
dplot(azt.r, svalue = azt.ei(azteca), pside="Two sided",
main = "Teste bicaudal",
xlab = "Estatística de interesse")
## ----inspecionando objeto peucetia---------------------------------------
head(peucetia)
## ----barplot peucetia, fig.cap = "Número de inspeções em que as 27 aranhas foram registradas em folhas com tricomas, em um experimento de preferência por substratos."----
## Número de inspeções em que estava em folha com tricomas
n.insp <- apply(peucetia, 1, sum)
barplot(table(factor(n.insp, levels=0:6)),
xlab="N de inspeções em que estava na folha com tricoma",
ylab="N de aranhas")
## ----estatistica de interesse peucetia-----------------------------------
peu.ei <- function(dataframe){
mean(apply(dataframe, 1, sum))
}
## Valor observado
peu.ei(peucetia)
## ----peucetia H0---------------------------------------------------------
peu.H0 <- matrix( rep(c(TRUE,FALSE), each = 3),
nrow = nrow(peucetia), ncol = ncol(peucetia), byrow=TRUE)
## Converte em data.frame
peu.H0 <- data.frame(peu.H0)
## verificando
head(peu.H0)
## ----peucetia resampling, results="hide"---------------------------------
peu.r <- Rsampling(type = "within_rows", dataframe = peu.H0,
statistics = peu.ei, ntrials = 1000, replace=TRUE)
## ----peucetia distribuicao nula, fig.cap="Distribuição do número médio de inspeções em que as aranhas estavam em folhas com tricomas, em 1000 simulações da hipótese nula de ausência de preferência por substrato. A linha vermelha indica a média observada. A região de aceitação da hipótese nula para 5% de significância está delimitada em cinza."----
dplot(peu.r, svalue = peu.ei(peucetia), pside="Greater",
main = "Distribuição da estatística de interesse sob H0",
xlab = "Estatística de interesse")
## ----peucetia teste------------------------------------------------------
sum(peu.r >= peu.ei(peucetia))/1000 < 0.05
## ----peucetia n de inspeções---------------------------------------------
## N de inspeções em folha com tricoma
tric <- apply(peucetia, 1, sum)
## N de inspeções em folha lisa
lisa <- apply(peucetia, 1, function(x) sum(x==0))
## Monta o data frame
peu.H0b <- data.frame(tric=tric, lisa = lisa)
## Primeiras linhas
head(peu.H0b)
## ----peucetia statistics 2-----------------------------------------------
peu.ei2 <- function(dataframe) mean(dataframe$tric)
## Verificando
peu.ei2(peu.H0b)
## ----peucetia resampling 2, results="hide"-------------------------------
peu.r2 <- Rsampling(type = "within_rows", dataframe = peu.H0b,
statistics = peu.ei2, ntrials = 1000)
## ----peucetia distribuicao nula 2, fig.cap="Distribuição do número médio de inspeções em que as aranhas estavam em folhas com tricomas, em 1000 simulações da hipótese nula de ausência de preferência por substrato, considerando tendência das aranhas permanecerem onde estão. A linha vermelha indica a média observada."----
dplot(peu.r2, svalue = peu.ei2(peu.H0b), pside="Greater",
main = "Distribuição da estatística de interesse sob H0",
xlab = "Estatística de interesse")
## ----peucetia teste 2----------------------------------------------------
sum(peu.r2 >= peu.ei(peucetia))/1000 < 0.05
| /inst/doc/RRR.R | no_license | ayanamartins/Rsampling | R | false | false | 7,111 | r | ## ----setup, echo = FALSE-------------------------------------------------
knitr::opts_chunk$set(
collapse=TRUE,
comment = NA,
prompt = TRUE
)
set.seed(42)
## ----installation, eval=FALSE--------------------------------------------
# library(devtools)
# install_github(repo = 'lageIBUSP/Rsampling')
## ----load library--------------------------------------------------------
library(Rsampling)
## ----inspecionando objeto embauba----------------------------------------
head(embauba)
summary(embauba)
## ----proporcao de infestacao por morfo de embauba------------------------
tapply(embauba$with.vines, embauba$morphotype, mean)
## ----estatistica de interesse embaubas-----------------------------------
emb.ei <- function(dataframe){
props <- tapply(dataframe$with.vines, dataframe$morphotype, mean)
props[[1]] - props[[2]]
}
## Verificando
emb.ei(embauba)
## ----embaubas resampling, results="hide"---------------------------------
emb.r <- Rsampling(type = "normal", dataframe = embauba,
statistics = emb.ei, cols = 2, ntrials = 1000)
## ----embaubas distribuicao nula, fig.cap="Distribuição das diferenças nas proporções de embaúbas brancas e vermelhas com lianas em 1000 simulações da hipótese nula de ausência de diferença nas populações amostradas. A linha vermelha indica a diferença observada. A região de aceitação da hipótese nula para 5% de significância está delimitada em cinza."----
dplot(emb.r, svalue = emb.ei(embauba), pside="Greater",
main = "Distribuição da estatística de interesse sob H0",
xlab = "Estatística de interesse")
## ----embaubas teste------------------------------------------------------
sum(emb.r >= emb.ei(embauba))/1000 < 0.05
## ----inspecionando objeto azteca-----------------------------------------
head(azteca)
summary(azteca)
## ----pairplot azteca, fig.cap = "Número de formigas recrutadas por extratos de folhas novas e velhas de embaúbas. Os extratos foram aplicados em pares de folhas próximas em embaúbas que tinham colônias de formigas. As linhas ligam folhas do mesmo par experimental."----
splot(azteca$extract.new, azteca$extract.old,
groups.names=c("Folha nova","Folha velha"),
ylab="N de formigas recrutadas",
xlab="Tipo de extrato aplicado")
## ----estatistica de interesse azteca-------------------------------------
azt.ei <- function(dataframe){
diferencas <- with(dataframe, extract.new - extract.old)
mean(diferencas)
}
## Valor observado
azt.ei(azteca)
## ----azteca resampling, results="hide"-----------------------------------
azt.r <- Rsampling(type = "within_rows", dataframe = azteca,
statistics = azt.ei, cols = 2:3, ntrials = 1000)
## ----azteca distribuicao nula, fig.cap="Distribuição das diferenças do número de formigas recrutadas por extratos de folhas novas e velhas de embaúba em pares experimentais, em 1000 simulações da hipótese nula de ausência de diferença. A linha vermelha indica a diferença observada. A região de aceitação da hipótese nula para 5% de significância está delimitada em cinza."----
dplot(azt.r, svalue = azt.ei(azteca), pside="Greater",
main = "Distribuição da estatística de interesse sob H0",
xlab = "Estatística de interesse")
## ----azteca teste--------------------------------------------------------
sum(azt.r >= azt.ei(azteca))/1000 < 0.05
## ----azteca distribuicao nula bicaudal, fig.cap="Distribuição das diferenças do número de formigas recrutadas por extratos de folhas novas e velhas de embaúba em pares experimentais, em 1000 simulações da hipótese nula de ausência de diferença. A região de aceitação da hipótese nula para 5% de significância para teste bicaudal está delimitada em cinza."----
dplot(azt.r, svalue = azt.ei(azteca), pside="Two sided",
main = "Teste bicaudal",
xlab = "Estatística de interesse")
## ----inspecionando objeto peucetia---------------------------------------
head(peucetia)
## ----barplot peucetia, fig.cap = "Número de inspeções em que as 27 aranhas foram registradas em folhas com tricomas, em um experimento de preferência por substratos."----
## Número de inspeções em que estava em folha com tricomas
n.insp <- apply(peucetia, 1, sum)
barplot(table(factor(n.insp, levels=0:6)),
xlab="N de inspeções em que estava na folha com tricoma",
ylab="N de aranhas")
## ----estatistica de interesse peucetia-----------------------------------
peu.ei <- function(dataframe){
mean(apply(dataframe, 1, sum))
}
## Valor observado
peu.ei(peucetia)
## ----peucetia H0---------------------------------------------------------
peu.H0 <- matrix( rep(c(TRUE,FALSE), each = 3),
nrow = nrow(peucetia), ncol = ncol(peucetia), byrow=TRUE)
## Converte em data.frame
peu.H0 <- data.frame(peu.H0)
## verificando
head(peu.H0)
## ----peucetia resampling, results="hide"---------------------------------
peu.r <- Rsampling(type = "within_rows", dataframe = peu.H0,
statistics = peu.ei, ntrials = 1000, replace=TRUE)
## ----peucetia distribuicao nula, fig.cap="Distribuição do número médio de inspeções em que as aranhas estavam em folhas com tricomas, em 1000 simulações da hipótese nula de ausência de preferência por substrato. A linha vermelha indica a média observada. A região de aceitação da hipótese nula para 5% de significância está delimitada em cinza."----
dplot(peu.r, svalue = peu.ei(peucetia), pside="Greater",
main = "Distribuição da estatística de interesse sob H0",
xlab = "Estatística de interesse")
## ----peucetia teste------------------------------------------------------
sum(peu.r >= peu.ei(peucetia))/1000 < 0.05
## ----peucetia n de inspeções---------------------------------------------
## N de inspeções em folha com tricoma
tric <- apply(peucetia, 1, sum)
## N de inspeções em folha lisa
lisa <- apply(peucetia, 1, function(x) sum(x==0))
## Monta o data frame
peu.H0b <- data.frame(tric=tric, lisa = lisa)
## Primeiras linhas
head(peu.H0b)
## ----peucetia statistics 2-----------------------------------------------
peu.ei2 <- function(dataframe) mean(dataframe$tric)
## Verificando
peu.ei2(peu.H0b)
## ----peucetia resampling 2, results="hide"-------------------------------
peu.r2 <- Rsampling(type = "within_rows", dataframe = peu.H0b,
statistics = peu.ei2, ntrials = 1000)
## ----peucetia distribuicao nula 2, fig.cap="Distribuição do número médio de inspeções em que as aranhas estavam em folhas com tricomas, em 1000 simulações da hipótese nula de ausência de preferência por substrato, considerando tendência das aranhas permanecerem onde estão. A linha vermelha indica a média observada."----
dplot(peu.r2, svalue = peu.ei2(peu.H0b), pside="Greater",
main = "Distribuição da estatística de interesse sob H0",
xlab = "Estatística de interesse")
## ----peucetia teste 2----------------------------------------------------
sum(peu.r2 >= peu.ei(peucetia))/1000 < 0.05
|
# file MASS/R/kde2d.R
# copyright (C) 1994-2009 W. N. Venables and B. D. Ripley
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 or 3 of the License
# (at your option).
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
#
kde2d <- function(x, y, h, n = 25, lims = c(range(x), range(y)) )
{
nx <- length(x)
if(length(y) != nx)
stop("data vectors must be the same length")
if(any(!is.finite(x)) || any(!is.finite(y)))
stop("missing or infinite values in the data are not allowed")
if(any(!is.finite(lims)))
stop("only finite values are allowed in 'lims'")
n <- rep(n, length.out = 2L)
gx <- seq.int(lims[1L], lims[2L], length.out = n[1L])
gy <- seq.int(lims[3L], lims[4L], length.out = n[2L])
h <- if (missing(h)) c(bandwidth.nrd(x), bandwidth.nrd(y))
else rep(h, length.out = 2L)
h <- h/4 # for S's bandwidth scale
ax <- outer(gx, x, "-" )/h[1L]
ay <- outer(gy, y, "-" )/h[2L]
z <- tcrossprod(matrix(dnorm(ax), , nx), matrix(dnorm(ay), , nx))/ (nx * h[1L] * h[2L])
list(x = gx, y = gy, z = z)
}
| /MASS/R/kde2d.R | no_license | radfordneal/R-package-mods | R | false | false | 1,532 | r | # file MASS/R/kde2d.R
# copyright (C) 1994-2009 W. N. Venables and B. D. Ripley
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 or 3 of the License
# (at your option).
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# A copy of the GNU General Public License is available at
# http://www.r-project.org/Licenses/
#
kde2d <- function(x, y, h, n = 25, lims = c(range(x), range(y)) )
{
nx <- length(x)
if(length(y) != nx)
stop("data vectors must be the same length")
if(any(!is.finite(x)) || any(!is.finite(y)))
stop("missing or infinite values in the data are not allowed")
if(any(!is.finite(lims)))
stop("only finite values are allowed in 'lims'")
n <- rep(n, length.out = 2L)
gx <- seq.int(lims[1L], lims[2L], length.out = n[1L])
gy <- seq.int(lims[3L], lims[4L], length.out = n[2L])
h <- if (missing(h)) c(bandwidth.nrd(x), bandwidth.nrd(y))
else rep(h, length.out = 2L)
h <- h/4 # for S's bandwidth scale
ax <- outer(gx, x, "-" )/h[1L]
ay <- outer(gy, y, "-" )/h[2L]
z <- tcrossprod(matrix(dnorm(ax), , nx), matrix(dnorm(ay), , nx))/ (nx * h[1L] * h[2L])
list(x = gx, y = gy, z = z)
}
|
# Element 8: Tidyverse -- dplyr ----
# Load packages ----
# This should already be loaded if you executed the commands in the previous file.
library(tidyverse)
# The PlayData_t dataset comes from the previous file.
# Scenario 1: Aggregation across height & width
# Scenario 2: Aggregation across time 1 & time 2
# Scenario 3: Aggregation across type A & type B
| /10 - Element 7- dplyr Recap and Conclusion.R | no_license | Scavetta/DAwR | R | false | false | 376 | r | # Element 8: Tidyverse -- dplyr ----
# Load packages ----
# This should already be loaded if you executed the commands in the previous file.
library(tidyverse)
# The PlayData_t dataset comes from the previous file.
# Scenario 1: Aggregation across height & width
# Scenario 2: Aggregation across time 1 & time 2
# Scenario 3: Aggregation across type A & type B
|
currency = "USD"
dependent_var = "log_severity_to_average_after_max_volume_weighted"
#dependent_var = "log_magnitude"
if (currency == "BTC") {
input_filename = "./data/joined_price_network_trivialness_btc.csv"
} else if (currency == "USD") {
input_filename = "./data/joined_price_network_trivialness_usd.csv"
}
if (dependent_var == "log_severity_to_average_after_max_volume_weighted") {
dependent_var_label = paste0("Severity (", currency, ")")
if (currency == "BTC") {
output_filename = "./tables/log_severity_with_trivialness_btc.tex"
} else if (currency == "USD") {
output_filename = "./tables/log_severity_with_trivialness_usd.tex"
}
} else if (dependent_var == "log_magnitude") {
dependent_var_label = paste0("Magnitude (", currency, ")")
if (currency == "BTC") {
output_filename = "./tables/log_magnitude_with_trivialness_btc.tex"
} else if (currency == "USD") {
output_filename = "./tables/log_magnitude_with_trivialness_usd.tex"
}
}
# whether to control for the coin vintage by converting the dependent var to its residual
# after rlm regressing on the control variable or just include it in the regression. False
# includes it in the regression
control_date = FALSE
library(glmnet)
library(MASS)
library(sandwich)
library(lmtest)
library(stargazer)
setwd(dir = "~/research/nikete/Comunity-Structure-Extremely-Speculative-Asset-Dynamics/")
source("analysis/elastic_net.R")
source("analysis/utils.R")
remove_zero_volume = FALSE
if (dependent_var == "magnitude" | dependent_var == "log_magnitude") {
remove_zero_volume = TRUE
}
data = read_data(input_filename, remove_zero_volume, normalize_closeness = TRUE, interaction_terms = TRUE)
data_size = nrow(data)
# 60% train
train_size = floor(0.60 * data_size)
# reporoducible partition?
set.seed(19870209)
train_indices = sample(seq(data_size), size = train_size)
train_data = data[train_indices, ]
test_data = data[-train_indices, ]
all_independent_vars = c("user1_num_posts",
"user1_num_subjects",
"user1_days_since_first_post",
"user1_degree_incoming",
"user1_degree_outgoing",
"user1_clustering_coefficient",
"user1_closeness_centrality_unweighted",
"user1_closeness_centrality_weighted",
"user1_closeness_centrality_incoming_unweighted",
"user1_closeness_centrality_outgoing_unweighted",
"user1_closeness_centrality_incoming_weighted",
"user1_closeness_centrality_outgoing_weighted",
"user1_betweenness_centrality_weighted",
"user1_satoshi_distance",
"user1_satoshi_pagerank_unweighted",
"user1_satoshi_pagerank_weighted",
"user1_pagerank_unweighted",
"user1_pagerank_weighted",
"nontrivial")
cor(train_data[,all_independent_vars])
cor(train_data[,all_independent_vars])>0.9
if (control_date) {
control_formula = paste(dependent_var, "~ date_control")
rlmfit = rlm(as.formula(control_formula), train_data, psi="psi.huber", method="M", model=T)
summary = summary(rlmfit)
dd = data.frame(summary$coefficients)
dd$p.value = 2*pt(abs(dd$t.value), summary$df[2], lower.tail=FALSE)
dd
train_data[,dependent_var] = rlmfit$residuals
}
#####################################
# Model 0: Initial exploration, using all vars. No specific model
# find the best elastic net model config and get nonzero coefficients on best alpha
good_independent_vars = c("user1_num_posts",
"user1_num_subjects",
"user1_days_since_first_post",
"user1_degree_incoming",
"user1_degree_incoming_nontrivial",
"user1_degree_outgoing",
"user1_degree_outgoing_nontrivial",
"user1_clustering_coefficient",
"user1_clustering_coefficient_nontrivial",
"user1_closeness_centrality_weighted",
"user1_closeness_centrality_weighted_nontrivial",
"user1_betweenness_centrality_weighted",
"user1_betweenness_centrality_weighted_nontrivial",
"user1_satoshi_pagerank_weighted",
"user1_satoshi_pagerank_weighted_nontrivial",
"user1_satoshi_distance",
"user1_satoshi_distance_inf",
"user1_pagerank_weighted",
"user1_pagerank_weighted_nontrivial",
"nontrivial")
if (!control_date) {
good_independent_vars = c(good_independent_vars, "date_control")
}
cor(train_data[,good_independent_vars])>0.9
dependent_var = "log_severity_to_average_after_max_volume_weighted"
x = data.matrix(train_data[,good_independent_vars])
y = train_data[,dependent_var]
# Initial exploration, using all vars. No specific model
# find the best elastic net model config and get nonzero coefficients on best alpha
alphas=seq(1,1,by=0.05)
best_model = cross_validate_alphas(x, y, alphas)
best_alpha = best_model[2]
nonzero_coefs = extract_nonzero_coefs(best_model$coefs)
# plot the cross validated alpha results
cvfit = cv.glmnet(x, y, nfolds=5, type.measure="mse", standardize=T, alpha=best_alpha)
plot(cvfit)
cvfit$lambda.min
coef(cvfit, s="lambda.min")
# Run simple ols
lm_formula = paste(dependent_var, "~", paste(nonzero_coefs, collapse=" + "))
lmfit = lm(as.formula(lm_formula), train_data)
summary(lmfit)
# investigate the assumption of ols
oldpar = par(mfrow = c(2,2))
plot(lmfit, las=1)
par(oldpar)
train_data[which(cooks.distance(lmfit) > 30/nrow(train_data)), c("coin_name", "symbol", dependent_var, nonzero_coefs)]
# get robust standard errors
robust_se = diag(vcovHC(lmfit, type="HC"))^0.5
coeftest(lmfit, vcov=vcovHC(lmfit, "HC0"))
coeftest(lmfit, vcov=vcovHC(lmfit, "HC2"))
coeftest(lmfit, vcov=vcovHC(lmfit, "HC3"))
# run robust regression using iterated re-weighted least square
rlmfit = rlm(as.formula(lm_formula), train_data, psi="psi.huber", method="M", model=T)
summary = summary(rlmfit)
dd = data.frame(summary$coefficients)
dd$p.value = 2*pt(abs(dd$t.value), summary$df[2], lower.tail=FALSE)
dd
# above uses the psi.huber penalty function. but below we simply use mse.
# the results are similar nevertheless.
wlmfit = lm(as.formula(lm_formula), train_data, weights=rlmfit$w)
summary(wlmfit)
# check the weights assigned to each coin
weights = data.frame(coin=train_data[,"coin_name"], log_severity=train_data[,dependent_var], residual=rlmfit$residuals, weight = rlmfit$w)
weights_ordered = weights[order(rlmfit$w),]
weights_ordered[1:15,]
#####################################
# Model 1: Use only simple user stats
good_independent_vars = c("user1_num_posts",
"user1_num_subjects",
"user1_days_since_first_post",
"user1_degree_incoming",
"user1_degree_outgoing")
if (!control_date) {
good_independent_vars = c(good_independent_vars, "date_control")
}
x = data.matrix(train_data[,good_independent_vars])
y = train_data[,dependent_var]
# find the best elastic net model config and get nonzero coefficients on best alpha
alphas=seq(1,1,by=0.05)
best_model = cross_validate_alphas(x, y, alphas)
best_alpha = best_model[2]
nonzero_coefs = extract_nonzero_coefs(best_model$coefs)
# Run simple ols
lm_formula = paste(dependent_var, "~", paste(nonzero_coefs, collapse=" + "))
model1_lmfit = lm(as.formula(lm_formula), train_data)
model1_lm_sum = summary(model1_lmfit)
model1_lm_sum
# investigate the assumption of ols
oldpar = par(mfrow = c(2,2))
plot(model1_lmfit, las=1)
par(oldpar)
train_data[which(cooks.distance(model1_lmfit) > 30/nrow(train_data)), c("coin_name", dependent_var, nonzero_coefs)]
# Get summary with robust standard errors
model1_rlm_sum = model1_lm_sum
model1_rlm_sum$coefficients = unclass(coeftest(model1_lmfit, vcov=vcovHC(model1_lmfit, "HC0")))
model1_rlm_sum
# run robust regression using iterated re-weighted least square
model1_rlmfit = rlm(as.formula(lm_formula), train_data, psi="psi.huber", method="M", model=T)
summary = summary(model1_rlmfit)
dd = data.frame(summary$coefficients)
dd$p.value = 2*pt(abs(dd$t.value), summary$df[2], lower.tail=FALSE)
dd
# above uses the psi.huber penalty function. but below we simply use mse.
# the results are similar nevertheless.
model1_wlmfit = lm(as.formula(lm_formula), train_data, weights=model1_rlmfit$w)
summary(model1_wlmfit)
#####################################
# Model 2: Use only nontrivialness
good_independent_vars = c("nontrivial")
if (!control_date) {
good_independent_vars = c(good_independent_vars, "date_control")
}
x = data.matrix(train_data[,good_independent_vars])
y = train_data[,dependent_var]
nonzero_coefs = good_independent_vars
# Run simple ols
lm_formula = paste(dependent_var, "~", paste(nonzero_coefs, collapse=" + "))
model2_lmfit = lm(as.formula(lm_formula), train_data)
model2_lm_sum = summary(model2_lmfit)
model2_lm_sum
# investigate the assumption of ols
oldpar = par(mfrow = c(2,2))
plot(model2_lmfit, las=1)
par(oldpar)
train_data[which(cooks.distance(model2_lmfit) > 30/nrow(train_data)), c("coin_name", dependent_var, nonzero_coefs)]
# Get summary with robust standard errors
model2_rlm_sum = model2_lm_sum
model2_rlm_sum$coefficients = unclass(coeftest(model2_lmfit, vcov=vcovHC(model2_lmfit, "HC0")))
model2_rlm_sum
# run robust regression using iterated re-weighted least square
model2_rlmfit = rlm(as.formula(lm_formula), train_data, psi="psi.huber", method="M", model=T)
summary = summary(model2_rlmfit)
dd = data.frame(summary$coefficients)
dd$p.value = 2*pt(abs(dd$t.value), summary$df[2], lower.tail=FALSE)
dd
# above uses the psi.huber penalty function. but below we simply use mse.
# the results are similar nevertheless.
model2_wlmfit = lm(as.formula(lm_formula), train_data, weights=model2_rlmfit$w)
summary(model2_wlmfit)
#####################################
# Model 3: Use only vars relative to satoshi
good_independent_vars = c("user1_satoshi_pagerank_weighted",
"user1_satoshi_distance",
"user1_satoshi_distance_inf")
if (!control_date) {
good_independent_vars = c(good_independent_vars, "date_control")
}
x = data.matrix(train_data[,good_independent_vars])
y = train_data[,dependent_var]
# find the best elastic net model config and get nonzero coefficients on best alpha
alphas=seq(1,1,by=0.05)
best_model = cross_validate_alphas(x, y, alphas)
best_alpha = best_model[2]
nonzero_coefs = extract_nonzero_coefs(best_model$coefs)
# Run simple ols
lm_formula = paste(dependent_var, "~", paste(nonzero_coefs, collapse=" + "))
model3_lmfit = lm(as.formula(lm_formula), train_data)
model3_lm_sum = summary(model3_lmfit)
model3_lm_sum
# investigate the assumption of ols
oldpar = par(mfrow = c(2,2))
plot(model3_lmfit, las=1)
par(oldpar)
train_data[which(cooks.distance(model3_lmfit) > 30/nrow(train_data)), c("coin_name", dependent_var, nonzero_coefs)]
# Get summary with robust standard errors
model3_rlm_sum = model3_lm_sum
model3_rlm_sum$coefficients = unclass(coeftest(model3_lmfit, vcov=vcovHC(model3_lmfit, "HC0")))
model3_rlm_sum
# run robust regression using iterated re-weighted least square
model3_rlmfit = rlm(as.formula(lm_formula), train_data, psi="psi.huber", method="M", model=T)
summary = summary(model3_rlmfit)
dd = data.frame(summary$coefficients)
dd$p.value = 2*pt(abs(dd$t.value), summary$df[2], lower.tail=FALSE)
dd
# above uses the psi.huber penalty function. but below we simply use mse.
# the results are similar nevertheless.
model3_wlmfit = lm(as.formula(lm_formula), train_data, weights=model3_rlmfit$w)
summary(model3_wlmfit)
#####################################
# Model 4: Use network measures
good_independent_vars = c("user1_clustering_coefficient",
"user1_closeness_centrality_weighted",
"user1_betweenness_centrality_weighted",
"user1_pagerank_weighted")
if (!control_date) {
good_independent_vars = c(good_independent_vars, "date_control")
}
cor(train_data[,good_independent_vars])>0.9
x = data.matrix(train_data[,good_independent_vars])
y = train_data[,dependent_var]
# find the best elastic net model config and get nonzero coefficients on best alpha
alphas=seq(1,1,by=0.05)
best_model = cross_validate_alphas(x, y, alphas)
best_alpha = best_model[2]
nonzero_coefs = extract_nonzero_coefs(best_model$coefs)
# Run simple ols
lm_formula = paste(dependent_var, "~", paste(nonzero_coefs, collapse=" + "))
model4_lmfit = lm(as.formula(lm_formula), train_data)
model4_lm_sum = summary(model4_lmfit)
model4_lm_sum
# investigate the assumption of ols
oldpar = par(mfrow = c(2,2))
plot(model4_lmfit, las=1)
par(oldpar)
train_data[which(cooks.distance(model4_lmfit) > 30/nrow(train_data)), c("coin_name", dependent_var, nonzero_coefs)]
# Get summary with robust standard errors
model4_rlm_sum = model4_lm_sum
model4_rlm_sum$coefficients = unclass(coeftest(model4_lmfit, vcov=vcovHC(model4_lmfit, "HC0")))
model4_rlm_sum
# run robust regression using iterated re-weighted least square
model4_rlmfit = rlm(as.formula(lm_formula), train_data, psi="psi.huber", method="M", model=T)
summary = summary(model4_rlmfit)
dd = data.frame(summary$coefficients)
dd$p.value = 2*pt(abs(dd$t.value), summary$df[2], lower.tail=FALSE)
dd
# above uses the psi.huber penalty function. but below we simply use mse.
# the results are similar nevertheless.
model4_wlmfit = lm(as.formula(lm_formula), train_data, weights=model4_rlmfit$w)
summary(model4_wlmfit)
#####################################
# Model 5: Use network measures, satoshi measures with nontrivial interaction term
good_independent_vars = c("user1_clustering_coefficient_nontrivial",
"user1_closeness_centrality_weighted_nontrivial",
"user1_betweenness_centrality_weighted_nontrivial",
"user1_satoshi_pagerank_weighted_nontrivial",
"user1_pagerank_weighted_nontrivial",
"user1_degree_incoming_nontrivial",
"user1_degree_outgoing_nontrivial")
if (!control_date) {
good_independent_vars = c(good_independent_vars, "date_control")
}
cor(train_data[,good_independent_vars])>0.9
x = data.matrix(train_data[,good_independent_vars])
y = train_data[,dependent_var]
# find the best elastic net model config and get nonzero coefficients on best alpha
alphas=seq(1,1,by=0.05)
best_model = cross_validate_alphas(x, y, alphas)
best_alpha = best_model[2]
nonzero_coefs = extract_nonzero_coefs(best_model$coefs)
# Run simple ols
lm_formula = paste(dependent_var, "~", paste(nonzero_coefs, collapse=" + "))
model5_lmfit = lm(as.formula(lm_formula), train_data)
model5_lm_sum = summary(model5_lmfit)
model5_lm_sum
# investigate the assumption of ols
oldpar = par(mfrow = c(2,2))
plot(model5_lmfit, las=1)
par(oldpar)
train_data[which(cooks.distance(model5_lmfit) > 30/nrow(train_data)), c("coin_name", dependent_var, nonzero_coefs)]
# Get summary with robust standard errors
model5_rlm_sum = model5_lm_sum
model5_rlm_sum$coefficients = unclass(coeftest(model5_lmfit, vcov=vcovHC(model5_lmfit, "HC0")))
model5_rlm_sum
# run robust regression using iterated re-weighted least square
model5_rlmfit = rlm(as.formula(lm_formula), train_data, psi="psi.huber", method="M", model=T)
summary = summary(model5_rlmfit)
dd = data.frame(summary$coefficients)
dd$p.value = 2*pt(abs(dd$t.value), summary$df[2], lower.tail=FALSE)
dd
# above uses the psi.huber penalty function. but below we simply use mse.
# the results are similar nevertheless.
model5_wlmfit = lm(as.formula(lm_formula), train_data, weights=model5_rlmfit$w)
summary(model5_wlmfit)
#####################################
# Model 6: Use network measures, satoshi measures and user simple stats without any nontrivial measure
good_independent_vars = c("user1_num_posts",
"user1_num_subjects",
"user1_days_since_first_post",
"user1_degree_incoming",
"user1_degree_outgoing",
"user1_clustering_coefficient",
"user1_closeness_centrality_weighted",
"user1_betweenness_centrality_weighted",
"user1_satoshi_pagerank_weighted",
"user1_satoshi_distance",
"user1_satoshi_distance_inf",
"user1_pagerank_weighted")
if (!control_date) {
good_independent_vars = c(good_independent_vars, "date_control")
}
cor(train_data[,good_independent_vars])>0.9
x = data.matrix(train_data[,good_independent_vars])
y = train_data[,dependent_var]
# find the best elastic net model config and get nonzero coefficients on best alpha
alphas=seq(1,1,by=0.05)
best_model = cross_validate_alphas(x, y, alphas)
best_alpha = best_model[2]
nonzero_coefs = extract_nonzero_coefs(best_model$coefs)
# Run simple ols
lm_formula = paste(dependent_var, "~", paste(nonzero_coefs, collapse=" + "))
model6_lmfit = lm(as.formula(lm_formula), train_data)
model6_lm_sum = summary(model6_lmfit)
model6_lm_sum
# investigate the assumption of ols
oldpar = par(mfrow = c(2,2))
plot(model6_lmfit, las=1)
par(oldpar)
train_data[which(cooks.distance(model6_lmfit) > 30/nrow(train_data)), c("coin_name", dependent_var, nonzero_coefs)]
# Get summary with robust standard errors
model6_rlm_sum = model6_lm_sum
model6_rlm_sum$coefficients = unclass(coeftest(model6_lmfit, vcov=vcovHC(model6_lmfit, "HC0")))
model6_rlm_sum
# run robust regression using iterated re-weighted least square
model6_rlmfit = rlm(as.formula(lm_formula), train_data, psi="psi.huber", method="M", model=T)
summary = summary(model6_rlmfit)
dd = data.frame(summary$coefficients)
dd$p.value = 2*pt(abs(dd$t.value), summary$df[2], lower.tail=FALSE)
dd
# above uses the psi.huber penalty function. but below we simply use mse.
# the results are similar nevertheless.
model6_wlmfit = lm(as.formula(lm_formula), train_data, weights=model6_rlmfit$w)
summary(model6_wlmfit)
#####################################
# Model 7: Use all metrics
good_independent_vars = c("user1_num_posts",
"user1_num_subjects",
"user1_days_since_first_post",
"user1_degree_incoming",
"user1_degree_incoming_nontrivial",
"user1_degree_outgoing",
"user1_degree_outgoing_nontrivial",
"user1_clustering_coefficient",
"user1_clustering_coefficient_nontrivial",
"user1_closeness_centrality_weighted",
"user1_closeness_centrality_weighted_nontrivial",
"user1_betweenness_centrality_weighted",
"user1_betweenness_centrality_weighted_nontrivial",
"user1_satoshi_pagerank_weighted",
"user1_satoshi_pagerank_weighted_nontrivial",
"user1_satoshi_distance",
"user1_satoshi_distance_inf",
"user1_pagerank_weighted",
"user1_pagerank_weighted_nontrivial",
"nontrivial")
if (!control_date) {
good_independent_vars = c(good_independent_vars, "date_control")
}
cor(train_data[,good_independent_vars])>0.9
x = data.matrix(train_data[,good_independent_vars])
y = train_data[,dependent_var]
# find the best elastic net model config and get nonzero coefficients on best alpha
alphas=seq(1,1,by=0.05)
best_model = cross_validate_alphas(x, y, alphas)
best_alpha = best_model[2]
nonzero_coefs = extract_nonzero_coefs(best_model$coefs)
# Run simple ols
lm_formula = paste(dependent_var, "~", paste(nonzero_coefs, collapse=" + "))
model7_lmfit = lm(as.formula(lm_formula), train_data)
model7_lm_sum = summary(model7_lmfit)
model7_lm_sum
# investigate the assumption of ols
oldpar = par(mfrow = c(2,2))
plot(model7_lmfit, las=1)
par(oldpar)
train_data[which(cooks.distance(model7_lmfit) > 30/nrow(train_data)), c("coin_name", dependent_var, nonzero_coefs)]
# Get summary with robust standard errors
model7_rlm_sum = model7_lm_sum
model7_rlm_sum$coefficients = unclass(coeftest(model7_lmfit, vcov=vcovHC(model7_lmfit, "HC0")))
model7_rlm_sum
# run robust regression using iterated re-weighted least square
model7_rlmfit = rlm(as.formula(lm_formula), train_data, psi="psi.huber", method="M", model=T)
summary = summary(model7_rlmfit)
dd = data.frame(summary$coefficients)
dd$p.value = 2*pt(abs(dd$t.value), summary$df[2], lower.tail=FALSE)
dd
# above uses the psi.huber penalty function. but below we simply use mse.
# the results are similar nevertheless.
model7_wlmfit = lm(as.formula(lm_formula), train_data, weights=model7_rlmfit$w)
summary(model7_wlmfit)
#####################################
# Model 8: Use all vars without feature selection
good_independent_vars = c("user1_num_posts",
"user1_num_subjects",
"user1_days_since_first_post",
"user1_degree_incoming",
"user1_degree_incoming_nontrivial",
"user1_degree_outgoing",
"user1_degree_outgoing_nontrivial",
"user1_clustering_coefficient",
"user1_clustering_coefficient_nontrivial",
"user1_closeness_centrality_weighted",
"user1_closeness_centrality_weighted_nontrivial",
"user1_betweenness_centrality_weighted",
"user1_betweenness_centrality_weighted_nontrivial",
"user1_satoshi_pagerank_weighted",
"user1_satoshi_pagerank_weighted_nontrivial",
"user1_satoshi_distance",
"user1_satoshi_distance_inf",
"user1_pagerank_weighted",
"user1_pagerank_weighted_nontrivial",
"nontrivial")
if (!control_date) {
good_independent_vars = c(good_independent_vars, "date_control")
}
cor(train_data[,good_independent_vars])>0.9
x = data.matrix(train_data[,good_independent_vars])
y = train_data[,dependent_var]
nonzero_coefs = good_independent_vars
# Run simple ols
lm_formula = paste(dependent_var, "~", paste(nonzero_coefs, collapse=" + "))
model8_lmfit = lm(as.formula(lm_formula), train_data)
model8_lm_sum = summary(model8_lmfit)
model8_lm_sum
# investigate the assumption of ols
oldpar = par(mfrow = c(2,2))
plot(model8_lmfit, las=1)
par(oldpar)
train_data[which(cooks.distance(model8_lmfit) > 30/nrow(train_data)), c("coin_name", dependent_var, nonzero_coefs)]
# Get summary with robust standard errors
model8_rlm_sum = model8_lm_sum
model8_rlm_sum$coefficients = unclass(coeftest(model8_lmfit, vcov=vcovHC(model8_lmfit, "HC0")))
model8_rlm_sum
# run robust regression using iterated re-weighted least square
model8_rlmfit = rlm(as.formula(lm_formula), train_data, psi="psi.huber", method="M", model=T)
summary = summary(model8_rlmfit)
dd = data.frame(summary$coefficients)
dd$p.value = 2*pt(abs(dd$t.value), summary$df[2], lower.tail=FALSE)
dd
# above uses the psi.huber penalty function. but below we simply use mse.
# the results are similar nevertheless.
model8_wlmfit = lm(as.formula(lm_formula), train_data, weights=model8_rlmfit$w)
summary(model8_wlmfit)
#####################################
# Print table
order = c("user1_num_posts",
"user1_num_subjects",
"user1_days_since_first_post",
"user1_degree_incoming",
"user1_degree_incoming_nontrivial",
"user1_degree_outgoing",
"user1_degree_outgoing_nontrivial",
"user1_clustering_coefficient",
"user1_clustering_coefficient_nontrivial",
"user1_closeness_centrality_weighted",
"user1_closeness_centrality_weighted_nontrivial",
"user1_betweenness_centrality_weighted",
"user1_betweenness_centrality_weighted_nontrivial",
"user1_satoshi_pagerank_weighted",
"user1_satoshi_pagerank_weighted_nontrivial",
"user1_satoshi_distance",
"user1_satoshi_distance_inf",
"user1_pagerank_weighted",
"user1_pagerank_weighted_nontrivial",
"nontrivial")
cov.labels = c("num posts",
"num subjects",
"days since first post",
"incoming degree",
"incoming * nontrivial",
"outgoing degree",
"outgoing * nontrivial",
"clustering coefficient",
"clustering coefficient * nontrivial",
"closeness centrality",
"closeness * nontrivial",
"betweenness centrality",
"betweenness * nontrivial",
"satoshi pagerank",
"satoshi * nontrivial",
"satoshi distance",
"Infinite satoshi distance",
"pagerank",
"pagerank * nontrivial",
"nontrivial")
if (!control_date) {
order = c(order, "date_control")
cov.labels = c(cov.labels, "Coin Vintage Control")
}
depvar.label = c(dependent_var_label)
stargazer(model2_wlmfit,
model3_wlmfit,
model4_wlmfit,
model5_wlmfit,
model6_wlmfit,
model7_wlmfit,
model8_wlmfit,
dep.var.labels= "",
column.labels=c("Model2", "Model3", "Model4", "Model5", "Model6", "Model7", "Model8"),
column.sep.width = "0pt",
omit.table.layout = "#",
df = FALSE,
title="", align=TRUE,
no.space=TRUE,
dep.var.caption = depvar.label,
order = order,
covariate.labels = cov.labels,
float.env = "table*",
digits = 3,
out=output_filename) | /analysis/models_with_trivialness.R | no_license | nikete/Comunity-Structure-Extremely-Speculative-Asset-Dynamics | R | false | false | 26,662 | r | currency = "USD"
dependent_var = "log_severity_to_average_after_max_volume_weighted"
#dependent_var = "log_magnitude"
if (currency == "BTC") {
input_filename = "./data/joined_price_network_trivialness_btc.csv"
} else if (currency == "USD") {
input_filename = "./data/joined_price_network_trivialness_usd.csv"
}
if (dependent_var == "log_severity_to_average_after_max_volume_weighted") {
dependent_var_label = paste0("Severity (", currency, ")")
if (currency == "BTC") {
output_filename = "./tables/log_severity_with_trivialness_btc.tex"
} else if (currency == "USD") {
output_filename = "./tables/log_severity_with_trivialness_usd.tex"
}
} else if (dependent_var == "log_magnitude") {
dependent_var_label = paste0("Magnitude (", currency, ")")
if (currency == "BTC") {
output_filename = "./tables/log_magnitude_with_trivialness_btc.tex"
} else if (currency == "USD") {
output_filename = "./tables/log_magnitude_with_trivialness_usd.tex"
}
}
# whether to control for the coin vintage by converting the dependent var to its residual
# after rlm regressing on the control variable or just include it in the regression. False
# includes it in the regression
control_date = FALSE
library(glmnet)
library(MASS)
library(sandwich)
library(lmtest)
library(stargazer)
setwd(dir = "~/research/nikete/Comunity-Structure-Extremely-Speculative-Asset-Dynamics/")
source("analysis/elastic_net.R")
source("analysis/utils.R")
remove_zero_volume = FALSE
if (dependent_var == "magnitude" | dependent_var == "log_magnitude") {
remove_zero_volume = TRUE
}
data = read_data(input_filename, remove_zero_volume, normalize_closeness = TRUE, interaction_terms = TRUE)
data_size = nrow(data)
# 60% train
train_size = floor(0.60 * data_size)
# reporoducible partition?
set.seed(19870209)
train_indices = sample(seq(data_size), size = train_size)
train_data = data[train_indices, ]
test_data = data[-train_indices, ]
all_independent_vars = c("user1_num_posts",
"user1_num_subjects",
"user1_days_since_first_post",
"user1_degree_incoming",
"user1_degree_outgoing",
"user1_clustering_coefficient",
"user1_closeness_centrality_unweighted",
"user1_closeness_centrality_weighted",
"user1_closeness_centrality_incoming_unweighted",
"user1_closeness_centrality_outgoing_unweighted",
"user1_closeness_centrality_incoming_weighted",
"user1_closeness_centrality_outgoing_weighted",
"user1_betweenness_centrality_weighted",
"user1_satoshi_distance",
"user1_satoshi_pagerank_unweighted",
"user1_satoshi_pagerank_weighted",
"user1_pagerank_unweighted",
"user1_pagerank_weighted",
"nontrivial")
cor(train_data[,all_independent_vars])
cor(train_data[,all_independent_vars])>0.9
if (control_date) {
control_formula = paste(dependent_var, "~ date_control")
rlmfit = rlm(as.formula(control_formula), train_data, psi="psi.huber", method="M", model=T)
summary = summary(rlmfit)
dd = data.frame(summary$coefficients)
dd$p.value = 2*pt(abs(dd$t.value), summary$df[2], lower.tail=FALSE)
dd
train_data[,dependent_var] = rlmfit$residuals
}
#####################################
# Model 0: Initial exploration, using all vars. No specific model
# find the best elastic net model config and get nonzero coefficients on best alpha
good_independent_vars = c("user1_num_posts",
"user1_num_subjects",
"user1_days_since_first_post",
"user1_degree_incoming",
"user1_degree_incoming_nontrivial",
"user1_degree_outgoing",
"user1_degree_outgoing_nontrivial",
"user1_clustering_coefficient",
"user1_clustering_coefficient_nontrivial",
"user1_closeness_centrality_weighted",
"user1_closeness_centrality_weighted_nontrivial",
"user1_betweenness_centrality_weighted",
"user1_betweenness_centrality_weighted_nontrivial",
"user1_satoshi_pagerank_weighted",
"user1_satoshi_pagerank_weighted_nontrivial",
"user1_satoshi_distance",
"user1_satoshi_distance_inf",
"user1_pagerank_weighted",
"user1_pagerank_weighted_nontrivial",
"nontrivial")
if (!control_date) {
good_independent_vars = c(good_independent_vars, "date_control")
}
cor(train_data[,good_independent_vars])>0.9
dependent_var = "log_severity_to_average_after_max_volume_weighted"
x = data.matrix(train_data[,good_independent_vars])
y = train_data[,dependent_var]
# Initial exploration, using all vars. No specific model
# find the best elastic net model config and get nonzero coefficients on best alpha
alphas=seq(1,1,by=0.05)
best_model = cross_validate_alphas(x, y, alphas)
best_alpha = best_model[2]
nonzero_coefs = extract_nonzero_coefs(best_model$coefs)
# plot the cross validated alpha results
cvfit = cv.glmnet(x, y, nfolds=5, type.measure="mse", standardize=T, alpha=best_alpha)
plot(cvfit)
cvfit$lambda.min
coef(cvfit, s="lambda.min")
# Run simple ols
lm_formula = paste(dependent_var, "~", paste(nonzero_coefs, collapse=" + "))
lmfit = lm(as.formula(lm_formula), train_data)
summary(lmfit)
# investigate the assumption of ols
oldpar = par(mfrow = c(2,2))
plot(lmfit, las=1)
par(oldpar)
train_data[which(cooks.distance(lmfit) > 30/nrow(train_data)), c("coin_name", "symbol", dependent_var, nonzero_coefs)]
# get robust standard errors
robust_se = diag(vcovHC(lmfit, type="HC"))^0.5
coeftest(lmfit, vcov=vcovHC(lmfit, "HC0"))
coeftest(lmfit, vcov=vcovHC(lmfit, "HC2"))
coeftest(lmfit, vcov=vcovHC(lmfit, "HC3"))
# run robust regression using iterated re-weighted least square
rlmfit = rlm(as.formula(lm_formula), train_data, psi="psi.huber", method="M", model=T)
summary = summary(rlmfit)
dd = data.frame(summary$coefficients)
dd$p.value = 2*pt(abs(dd$t.value), summary$df[2], lower.tail=FALSE)
dd
# above uses the psi.huber penalty function. but below we simply use mse.
# the results are similar nevertheless.
wlmfit = lm(as.formula(lm_formula), train_data, weights=rlmfit$w)
summary(wlmfit)
# check the weights assigned to each coin
weights = data.frame(coin=train_data[,"coin_name"], log_severity=train_data[,dependent_var], residual=rlmfit$residuals, weight = rlmfit$w)
weights_ordered = weights[order(rlmfit$w),]
weights_ordered[1:15,]
#####################################
# Model 1: Use only simple user stats
good_independent_vars = c("user1_num_posts",
"user1_num_subjects",
"user1_days_since_first_post",
"user1_degree_incoming",
"user1_degree_outgoing")
if (!control_date) {
good_independent_vars = c(good_independent_vars, "date_control")
}
x = data.matrix(train_data[,good_independent_vars])
y = train_data[,dependent_var]
# find the best elastic net model config and get nonzero coefficients on best alpha
alphas=seq(1,1,by=0.05)
best_model = cross_validate_alphas(x, y, alphas)
best_alpha = best_model[2]
nonzero_coefs = extract_nonzero_coefs(best_model$coefs)
# Run simple ols
lm_formula = paste(dependent_var, "~", paste(nonzero_coefs, collapse=" + "))
model1_lmfit = lm(as.formula(lm_formula), train_data)
model1_lm_sum = summary(model1_lmfit)
model1_lm_sum
# investigate the assumption of ols
oldpar = par(mfrow = c(2,2))
plot(model1_lmfit, las=1)
par(oldpar)
train_data[which(cooks.distance(model1_lmfit) > 30/nrow(train_data)), c("coin_name", dependent_var, nonzero_coefs)]
# Get summary with robust standard errors
model1_rlm_sum = model1_lm_sum
model1_rlm_sum$coefficients = unclass(coeftest(model1_lmfit, vcov=vcovHC(model1_lmfit, "HC0")))
model1_rlm_sum
# run robust regression using iterated re-weighted least square
model1_rlmfit = rlm(as.formula(lm_formula), train_data, psi="psi.huber", method="M", model=T)
summary = summary(model1_rlmfit)
dd = data.frame(summary$coefficients)
dd$p.value = 2*pt(abs(dd$t.value), summary$df[2], lower.tail=FALSE)
dd
# above uses the psi.huber penalty function. but below we simply use mse.
# the results are similar nevertheless.
model1_wlmfit = lm(as.formula(lm_formula), train_data, weights=model1_rlmfit$w)
summary(model1_wlmfit)
#####################################
# Model 2: Use only nontrivialness
good_independent_vars = c("nontrivial")
if (!control_date) {
good_independent_vars = c(good_independent_vars, "date_control")
}
x = data.matrix(train_data[,good_independent_vars])
y = train_data[,dependent_var]
nonzero_coefs = good_independent_vars
# Run simple ols
lm_formula = paste(dependent_var, "~", paste(nonzero_coefs, collapse=" + "))
model2_lmfit = lm(as.formula(lm_formula), train_data)
model2_lm_sum = summary(model2_lmfit)
model2_lm_sum
# investigate the assumption of ols
oldpar = par(mfrow = c(2,2))
plot(model2_lmfit, las=1)
par(oldpar)
train_data[which(cooks.distance(model2_lmfit) > 30/nrow(train_data)), c("coin_name", dependent_var, nonzero_coefs)]
# Get summary with robust standard errors
model2_rlm_sum = model2_lm_sum
model2_rlm_sum$coefficients = unclass(coeftest(model2_lmfit, vcov=vcovHC(model2_lmfit, "HC0")))
model2_rlm_sum
# run robust regression using iterated re-weighted least square
model2_rlmfit = rlm(as.formula(lm_formula), train_data, psi="psi.huber", method="M", model=T)
summary = summary(model2_rlmfit)
dd = data.frame(summary$coefficients)
dd$p.value = 2*pt(abs(dd$t.value), summary$df[2], lower.tail=FALSE)
dd
# above uses the psi.huber penalty function. but below we simply use mse.
# the results are similar nevertheless.
model2_wlmfit = lm(as.formula(lm_formula), train_data, weights=model2_rlmfit$w)
summary(model2_wlmfit)
#####################################
# Model 3: Use only vars relative to satoshi
good_independent_vars = c("user1_satoshi_pagerank_weighted",
"user1_satoshi_distance",
"user1_satoshi_distance_inf")
if (!control_date) {
good_independent_vars = c(good_independent_vars, "date_control")
}
x = data.matrix(train_data[,good_independent_vars])
y = train_data[,dependent_var]
# find the best elastic net model config and get nonzero coefficients on best alpha
alphas=seq(1,1,by=0.05)
best_model = cross_validate_alphas(x, y, alphas)
best_alpha = best_model[2]
nonzero_coefs = extract_nonzero_coefs(best_model$coefs)
# Run simple ols
lm_formula = paste(dependent_var, "~", paste(nonzero_coefs, collapse=" + "))
model3_lmfit = lm(as.formula(lm_formula), train_data)
model3_lm_sum = summary(model3_lmfit)
model3_lm_sum
# investigate the assumption of ols
oldpar = par(mfrow = c(2,2))
plot(model3_lmfit, las=1)
par(oldpar)
train_data[which(cooks.distance(model3_lmfit) > 30/nrow(train_data)), c("coin_name", dependent_var, nonzero_coefs)]
# Get summary with robust standard errors
model3_rlm_sum = model3_lm_sum
model3_rlm_sum$coefficients = unclass(coeftest(model3_lmfit, vcov=vcovHC(model3_lmfit, "HC0")))
model3_rlm_sum
# run robust regression using iterated re-weighted least square
model3_rlmfit = rlm(as.formula(lm_formula), train_data, psi="psi.huber", method="M", model=T)
summary = summary(model3_rlmfit)
dd = data.frame(summary$coefficients)
dd$p.value = 2*pt(abs(dd$t.value), summary$df[2], lower.tail=FALSE)
dd
# above uses the psi.huber penalty function. but below we simply use mse.
# the results are similar nevertheless.
model3_wlmfit = lm(as.formula(lm_formula), train_data, weights=model3_rlmfit$w)
summary(model3_wlmfit)
#####################################
# Model 4: Use network measures
good_independent_vars = c("user1_clustering_coefficient",
"user1_closeness_centrality_weighted",
"user1_betweenness_centrality_weighted",
"user1_pagerank_weighted")
if (!control_date) {
good_independent_vars = c(good_independent_vars, "date_control")
}
cor(train_data[,good_independent_vars])>0.9
x = data.matrix(train_data[,good_independent_vars])
y = train_data[,dependent_var]
# find the best elastic net model config and get nonzero coefficients on best alpha
alphas=seq(1,1,by=0.05)
best_model = cross_validate_alphas(x, y, alphas)
best_alpha = best_model[2]
nonzero_coefs = extract_nonzero_coefs(best_model$coefs)
# Run simple ols
lm_formula = paste(dependent_var, "~", paste(nonzero_coefs, collapse=" + "))
model4_lmfit = lm(as.formula(lm_formula), train_data)
model4_lm_sum = summary(model4_lmfit)
model4_lm_sum
# investigate the assumption of ols
oldpar = par(mfrow = c(2,2))
plot(model4_lmfit, las=1)
par(oldpar)
train_data[which(cooks.distance(model4_lmfit) > 30/nrow(train_data)), c("coin_name", dependent_var, nonzero_coefs)]
# Get summary with robust standard errors
model4_rlm_sum = model4_lm_sum
model4_rlm_sum$coefficients = unclass(coeftest(model4_lmfit, vcov=vcovHC(model4_lmfit, "HC0")))
model4_rlm_sum
# run robust regression using iterated re-weighted least square
model4_rlmfit = rlm(as.formula(lm_formula), train_data, psi="psi.huber", method="M", model=T)
summary = summary(model4_rlmfit)
dd = data.frame(summary$coefficients)
dd$p.value = 2*pt(abs(dd$t.value), summary$df[2], lower.tail=FALSE)
dd
# above uses the psi.huber penalty function. but below we simply use mse.
# the results are similar nevertheless.
model4_wlmfit = lm(as.formula(lm_formula), train_data, weights=model4_rlmfit$w)
summary(model4_wlmfit)
#####################################
# Model 5: Use network measures, satoshi measures with nontrivial interaction term
good_independent_vars = c("user1_clustering_coefficient_nontrivial",
"user1_closeness_centrality_weighted_nontrivial",
"user1_betweenness_centrality_weighted_nontrivial",
"user1_satoshi_pagerank_weighted_nontrivial",
"user1_pagerank_weighted_nontrivial",
"user1_degree_incoming_nontrivial",
"user1_degree_outgoing_nontrivial")
if (!control_date) {
good_independent_vars = c(good_independent_vars, "date_control")
}
cor(train_data[,good_independent_vars])>0.9
x = data.matrix(train_data[,good_independent_vars])
y = train_data[,dependent_var]
# find the best elastic net model config and get nonzero coefficients on best alpha
alphas=seq(1,1,by=0.05)
best_model = cross_validate_alphas(x, y, alphas)
best_alpha = best_model[2]
nonzero_coefs = extract_nonzero_coefs(best_model$coefs)
# Run simple ols
lm_formula = paste(dependent_var, "~", paste(nonzero_coefs, collapse=" + "))
model5_lmfit = lm(as.formula(lm_formula), train_data)
model5_lm_sum = summary(model5_lmfit)
model5_lm_sum
# investigate the assumption of ols
oldpar = par(mfrow = c(2,2))
plot(model5_lmfit, las=1)
par(oldpar)
train_data[which(cooks.distance(model5_lmfit) > 30/nrow(train_data)), c("coin_name", dependent_var, nonzero_coefs)]
# Get summary with robust standard errors
model5_rlm_sum = model5_lm_sum
model5_rlm_sum$coefficients = unclass(coeftest(model5_lmfit, vcov=vcovHC(model5_lmfit, "HC0")))
model5_rlm_sum
# run robust regression using iterated re-weighted least square
model5_rlmfit = rlm(as.formula(lm_formula), train_data, psi="psi.huber", method="M", model=T)
summary = summary(model5_rlmfit)
dd = data.frame(summary$coefficients)
dd$p.value = 2*pt(abs(dd$t.value), summary$df[2], lower.tail=FALSE)
dd
# above uses the psi.huber penalty function. but below we simply use mse.
# the results are similar nevertheless.
model5_wlmfit = lm(as.formula(lm_formula), train_data, weights=model5_rlmfit$w)
summary(model5_wlmfit)
#####################################
# Model 6: Use network measures, satoshi measures and user simple stats without any nontrivial measure
good_independent_vars = c("user1_num_posts",
"user1_num_subjects",
"user1_days_since_first_post",
"user1_degree_incoming",
"user1_degree_outgoing",
"user1_clustering_coefficient",
"user1_closeness_centrality_weighted",
"user1_betweenness_centrality_weighted",
"user1_satoshi_pagerank_weighted",
"user1_satoshi_distance",
"user1_satoshi_distance_inf",
"user1_pagerank_weighted")
if (!control_date) {
good_independent_vars = c(good_independent_vars, "date_control")
}
cor(train_data[,good_independent_vars])>0.9
x = data.matrix(train_data[,good_independent_vars])
y = train_data[,dependent_var]
# find the best elastic net model config and get nonzero coefficients on best alpha
alphas=seq(1,1,by=0.05)
best_model = cross_validate_alphas(x, y, alphas)
best_alpha = best_model[2]
nonzero_coefs = extract_nonzero_coefs(best_model$coefs)
# Run simple ols
lm_formula = paste(dependent_var, "~", paste(nonzero_coefs, collapse=" + "))
model6_lmfit = lm(as.formula(lm_formula), train_data)
model6_lm_sum = summary(model6_lmfit)
model6_lm_sum
# investigate the assumption of ols
oldpar = par(mfrow = c(2,2))
plot(model6_lmfit, las=1)
par(oldpar)
train_data[which(cooks.distance(model6_lmfit) > 30/nrow(train_data)), c("coin_name", dependent_var, nonzero_coefs)]
# Get summary with robust standard errors
model6_rlm_sum = model6_lm_sum
model6_rlm_sum$coefficients = unclass(coeftest(model6_lmfit, vcov=vcovHC(model6_lmfit, "HC0")))
model6_rlm_sum
# run robust regression using iterated re-weighted least square
model6_rlmfit = rlm(as.formula(lm_formula), train_data, psi="psi.huber", method="M", model=T)
summary = summary(model6_rlmfit)
dd = data.frame(summary$coefficients)
dd$p.value = 2*pt(abs(dd$t.value), summary$df[2], lower.tail=FALSE)
dd
# above uses the psi.huber penalty function. but below we simply use mse.
# the results are similar nevertheless.
model6_wlmfit = lm(as.formula(lm_formula), train_data, weights=model6_rlmfit$w)
summary(model6_wlmfit)
#####################################
# Model 7: Use all metrics
good_independent_vars = c("user1_num_posts",
"user1_num_subjects",
"user1_days_since_first_post",
"user1_degree_incoming",
"user1_degree_incoming_nontrivial",
"user1_degree_outgoing",
"user1_degree_outgoing_nontrivial",
"user1_clustering_coefficient",
"user1_clustering_coefficient_nontrivial",
"user1_closeness_centrality_weighted",
"user1_closeness_centrality_weighted_nontrivial",
"user1_betweenness_centrality_weighted",
"user1_betweenness_centrality_weighted_nontrivial",
"user1_satoshi_pagerank_weighted",
"user1_satoshi_pagerank_weighted_nontrivial",
"user1_satoshi_distance",
"user1_satoshi_distance_inf",
"user1_pagerank_weighted",
"user1_pagerank_weighted_nontrivial",
"nontrivial")
if (!control_date) {
good_independent_vars = c(good_independent_vars, "date_control")
}
cor(train_data[,good_independent_vars])>0.9
x = data.matrix(train_data[,good_independent_vars])
y = train_data[,dependent_var]
# find the best elastic net model config and get nonzero coefficients on best alpha
alphas=seq(1,1,by=0.05)
best_model = cross_validate_alphas(x, y, alphas)
best_alpha = best_model[2]
nonzero_coefs = extract_nonzero_coefs(best_model$coefs)
# Run simple ols
lm_formula = paste(dependent_var, "~", paste(nonzero_coefs, collapse=" + "))
model7_lmfit = lm(as.formula(lm_formula), train_data)
model7_lm_sum = summary(model7_lmfit)
model7_lm_sum
# investigate the assumption of ols
oldpar = par(mfrow = c(2,2))
plot(model7_lmfit, las=1)
par(oldpar)
train_data[which(cooks.distance(model7_lmfit) > 30/nrow(train_data)), c("coin_name", dependent_var, nonzero_coefs)]
# Get summary with robust standard errors
model7_rlm_sum = model7_lm_sum
model7_rlm_sum$coefficients = unclass(coeftest(model7_lmfit, vcov=vcovHC(model7_lmfit, "HC0")))
model7_rlm_sum
# run robust regression using iterated re-weighted least square
model7_rlmfit = rlm(as.formula(lm_formula), train_data, psi="psi.huber", method="M", model=T)
summary = summary(model7_rlmfit)
dd = data.frame(summary$coefficients)
dd$p.value = 2*pt(abs(dd$t.value), summary$df[2], lower.tail=FALSE)
dd
# above uses the psi.huber penalty function. but below we simply use mse.
# the results are similar nevertheless.
model7_wlmfit = lm(as.formula(lm_formula), train_data, weights=model7_rlmfit$w)
summary(model7_wlmfit)
#####################################
# Model 8: Use all vars without feature selection
good_independent_vars = c("user1_num_posts",
"user1_num_subjects",
"user1_days_since_first_post",
"user1_degree_incoming",
"user1_degree_incoming_nontrivial",
"user1_degree_outgoing",
"user1_degree_outgoing_nontrivial",
"user1_clustering_coefficient",
"user1_clustering_coefficient_nontrivial",
"user1_closeness_centrality_weighted",
"user1_closeness_centrality_weighted_nontrivial",
"user1_betweenness_centrality_weighted",
"user1_betweenness_centrality_weighted_nontrivial",
"user1_satoshi_pagerank_weighted",
"user1_satoshi_pagerank_weighted_nontrivial",
"user1_satoshi_distance",
"user1_satoshi_distance_inf",
"user1_pagerank_weighted",
"user1_pagerank_weighted_nontrivial",
"nontrivial")
if (!control_date) {
good_independent_vars = c(good_independent_vars, "date_control")
}
cor(train_data[,good_independent_vars])>0.9
x = data.matrix(train_data[,good_independent_vars])
y = train_data[,dependent_var]
nonzero_coefs = good_independent_vars
# Run simple ols
lm_formula = paste(dependent_var, "~", paste(nonzero_coefs, collapse=" + "))
model8_lmfit = lm(as.formula(lm_formula), train_data)
model8_lm_sum = summary(model8_lmfit)
model8_lm_sum
# investigate the assumption of ols
oldpar = par(mfrow = c(2,2))
plot(model8_lmfit, las=1)
par(oldpar)
train_data[which(cooks.distance(model8_lmfit) > 30/nrow(train_data)), c("coin_name", dependent_var, nonzero_coefs)]
# Get summary with robust standard errors
model8_rlm_sum = model8_lm_sum
model8_rlm_sum$coefficients = unclass(coeftest(model8_lmfit, vcov=vcovHC(model8_lmfit, "HC0")))
model8_rlm_sum
# run robust regression using iterated re-weighted least square
model8_rlmfit = rlm(as.formula(lm_formula), train_data, psi="psi.huber", method="M", model=T)
summary = summary(model8_rlmfit)
dd = data.frame(summary$coefficients)
dd$p.value = 2*pt(abs(dd$t.value), summary$df[2], lower.tail=FALSE)
dd
# above uses the psi.huber penalty function. but below we simply use mse.
# the results are similar nevertheless.
model8_wlmfit = lm(as.formula(lm_formula), train_data, weights=model8_rlmfit$w)
summary(model8_wlmfit)
#####################################
# Print table
order = c("user1_num_posts",
"user1_num_subjects",
"user1_days_since_first_post",
"user1_degree_incoming",
"user1_degree_incoming_nontrivial",
"user1_degree_outgoing",
"user1_degree_outgoing_nontrivial",
"user1_clustering_coefficient",
"user1_clustering_coefficient_nontrivial",
"user1_closeness_centrality_weighted",
"user1_closeness_centrality_weighted_nontrivial",
"user1_betweenness_centrality_weighted",
"user1_betweenness_centrality_weighted_nontrivial",
"user1_satoshi_pagerank_weighted",
"user1_satoshi_pagerank_weighted_nontrivial",
"user1_satoshi_distance",
"user1_satoshi_distance_inf",
"user1_pagerank_weighted",
"user1_pagerank_weighted_nontrivial",
"nontrivial")
cov.labels = c("num posts",
"num subjects",
"days since first post",
"incoming degree",
"incoming * nontrivial",
"outgoing degree",
"outgoing * nontrivial",
"clustering coefficient",
"clustering coefficient * nontrivial",
"closeness centrality",
"closeness * nontrivial",
"betweenness centrality",
"betweenness * nontrivial",
"satoshi pagerank",
"satoshi * nontrivial",
"satoshi distance",
"Infinite satoshi distance",
"pagerank",
"pagerank * nontrivial",
"nontrivial")
if (!control_date) {
order = c(order, "date_control")
cov.labels = c(cov.labels, "Coin Vintage Control")
}
depvar.label = c(dependent_var_label)
stargazer(model2_wlmfit,
model3_wlmfit,
model4_wlmfit,
model5_wlmfit,
model6_wlmfit,
model7_wlmfit,
model8_wlmfit,
dep.var.labels= "",
column.labels=c("Model2", "Model3", "Model4", "Model5", "Model6", "Model7", "Model8"),
column.sep.width = "0pt",
omit.table.layout = "#",
df = FALSE,
title="", align=TRUE,
no.space=TRUE,
dep.var.caption = depvar.label,
order = order,
covariate.labels = cov.labels,
float.env = "table*",
digits = 3,
out=output_filename) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.