content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getBSData.R
\name{addHIVPrevBS}
\alias{addHIVPrevBS}
\title{addHIVPrevBS}
\usage{
addHIVPrevBS(inFile, dat, Args, Type = "All")
}
\arguments{
\item{inFile}{The filepath to the dataset with HIV prevalence.}
\item{dat}{A dataset to add HIV prevalence variables to.}
\item{Args}{requires Args, see \code{\link{setArgs}}.}
\item{Type}{Males, Females or All for ART coverage.}
}
\value{
data.frame
}
\description{
Calculates the HIV prevalence of area surrounding BS
}
|
/man/addHIVPrevBS.Rd
|
no_license
|
hkim207/ahri-1
|
R
| false
| true
| 545
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getBSData.R
\name{addHIVPrevBS}
\alias{addHIVPrevBS}
\title{addHIVPrevBS}
\usage{
addHIVPrevBS(inFile, dat, Args, Type = "All")
}
\arguments{
\item{inFile}{The filepath to the dataset with HIV prevalence.}
\item{dat}{A dataset to add HIV prevalence variables to.}
\item{Args}{requires Args, see \code{\link{setArgs}}.}
\item{Type}{Males, Females or All for ART coverage.}
}
\value{
data.frame
}
\description{
Calculates the HIV prevalence of area surrounding BS
}
|
##--------------------------------------------------------------------
##----- General
##--------------------------------------------------------------------
# Clear the workspace
rm(list = ls()) # Clear environment
gc() # Clear unused memory
cat("\f") # Clear the console
# Prepare needed libraries
library.list <- c("stringr")
for (i in 1:length(library.list)) {
if (!library.list[i] %in% rownames(installed.packages())) {
install.packages(library.list[i]
, repos = "http://cran.rstudio.com/"
, dependencies = TRUE
)
}
library(library.list[i], character.only = TRUE)
}
rm(library.list)
# Set working directory and path to data, if need be
# setwd("")
# Load data
sales <- read.csv(file.choose()
, check.names = FALSE
, stringsAsFactors = FALSE
, na.strings = ""
)
##--------------------------------------------------------------------
##----- Q1 - rename variables
##--------------------------------------------------------------------
old.names <- colnames(sales)
new.names <- tolower(old.names)
new.names <- gsub(" ", ".", new.names)
new.names <- gsub("[()]", "", new.names) # Or "\\(|\\)"
colnames(sales) <- new.names
rm(old.names)
rm(new.names)
##--------------------------------------------------------------------
##----- Q2 - store registry
##--------------------------------------------------------------------
# Q2.1 - Filter out unique store records and drop store info from main data
stores <- unique(sales[, c("store.number"
, "store.name"
, "address"
, "store.location"
, "city"
, "zip.code"
, "county"
)
]
)
# Drop store info from main data:
drop.vars <- c("store.name"
, "address"
, "county"
, "city"
, "zip.code"
, "store.location"
)
sales[, drop.vars] <- NULL # alternatively sales <- sales[, !names(sales) %in% drop.vars]
gc()
rm(drop.vars)
# Q2.2 - Filter out store GPS coordinates from location variable
coordinates <- strsplit(stores$store.location, "[()]")
coordinates <- sapply(coordinates, function(x) strsplit(x[2], ", "))
stores$store.latitude <- as.numeric(sapply(coordinates, function(x) x[1]))
stores$store.longitude <- as.numeric(sapply(coordinates, function(x) x[2]))
rm(coordinates)
# Q2.3 - Drop location variable
stores$store.location <- NULL
# Q2.4 - Average GPS coordinates for stores
coordinates <- aggregate(cbind(store.latitude, store.longitude) ~ store.number
, stores
, mean
)
stores <- merge(stores[, !names(stores) %in% c("store.latitude", "store.longitude")]
, coordinates
, by = c("store.number")
, all.x = TRUE
)
rm(coordinates)
# Q2.5 - Removing duplicates
stores <- unique(stores)
# Q2.6 - Fix address, city and county names
stores$address <- str_to_title(stores$address)
stores$address <- gsub("[.,]", "", stores$address)
stores$city <- str_to_title(stores$city)
stores$county <- str_to_title(stores$county)
# Q2.7 - Remove duplicates
stores <- unique(stores)
# Q2.8 - Fill in missing country names
# We do so by (1) extracting unique combinations of (store.number, county)
# (2) removing county variable from stores
# and (3) adding it back from previously selected unique combinations
stores <- merge(stores[, !names(stores) %in% c("county")]
, unique((stores[!is.na(stores$county), c("store.number", "county")]))
, by = c("store.number")
, all.x = T
)
# Q2.9 - Remove duplicates
stores <- unique(stores)
# Q2.10 - Extract duplicates
stores$dup <- as.integer(duplicated(stores$store.number)
| duplicated(stores$store.number, fromLast = TRUE)
)
# Q2.11 - Check for proper zip/city/county combinations
# Import data with correct geo info
geo <- read.csv(file.choose()
, check.names = FALSE
, stringsAsFactors = FALSE
, na.strings = ""
)
# Create match variable inside imported data
geo$match <- 1L
# Merge stores with geo so that match variable is added to stores
# but only when stores has correct zipcode/city/county combination
stores <- merge(stores, geo
, by.x = c("zip.code", "city", "county")
, by.y = c("zipcode", "city", "county")
, all.x = TRUE, all.y = FALSE
)
# NAs in stores$match correspond to records with wrong geo info
# Make those records have match = 0
stores$match[is.na(stores$match)] <- 0
##--------------------------------------------------------------------
##----- Q3 - cleaning sales data
##--------------------------------------------------------------------
# Q3.1 - Convert sales and prices to proper format
sales$state.bottle.retail <- as.numeric(gsub("\\$", "", sales$state.bottle.retail))
sales$sale.dollars <- as.numeric(gsub("\\$", "", sales$sale.dollars))
# Q3.2 - Create subcategory variable
sales$subcategory <- sales$category.name
sales$category.name <- NULL
# Q3.3 - Create new category variable
sales$category <- NA_character_
sales$category[grepl(' tequila|^tequila', sales$subcategory, ignore.case = TRUE)] <- "Tequila"
sales$category[grepl(' gin|^gin', sales$subcategory, ignore.case = TRUE)] <- "Gin"
sales$category[grepl(' brand|^brand', sales$subcategory, ignore.case = TRUE)] <- "Brandy"
##--------------------------------------------------------------------
##----- Q4 - export cleaned data
##--------------------------------------------------------------------
write.csv(sales, "sales.csv")
write.csv(stores, "stores.csv")
|
/R/r.hw1.solution.R
|
permissive
|
sherrytp/bc_f19_econ
|
R
| false
| false
| 6,425
|
r
|
##--------------------------------------------------------------------
##----- General
##--------------------------------------------------------------------
# Clear the workspace
rm(list = ls()) # Clear environment
gc() # Clear unused memory
cat("\f") # Clear the console
# Prepare needed libraries
library.list <- c("stringr")
for (i in 1:length(library.list)) {
if (!library.list[i] %in% rownames(installed.packages())) {
install.packages(library.list[i]
, repos = "http://cran.rstudio.com/"
, dependencies = TRUE
)
}
library(library.list[i], character.only = TRUE)
}
rm(library.list)
# Set working directory and path to data, if need be
# setwd("")
# Load data
sales <- read.csv(file.choose()
, check.names = FALSE
, stringsAsFactors = FALSE
, na.strings = ""
)
##--------------------------------------------------------------------
##----- Q1 - rename variables
##--------------------------------------------------------------------
old.names <- colnames(sales)
new.names <- tolower(old.names)
new.names <- gsub(" ", ".", new.names)
new.names <- gsub("[()]", "", new.names) # Or "\\(|\\)"
colnames(sales) <- new.names
rm(old.names)
rm(new.names)
##--------------------------------------------------------------------
##----- Q2 - store registry
##--------------------------------------------------------------------
# Q2.1 - Filter out unique store records and drop store info from main data
stores <- unique(sales[, c("store.number"
, "store.name"
, "address"
, "store.location"
, "city"
, "zip.code"
, "county"
)
]
)
# Drop store info from main data:
drop.vars <- c("store.name"
, "address"
, "county"
, "city"
, "zip.code"
, "store.location"
)
sales[, drop.vars] <- NULL # alternatively sales <- sales[, !names(sales) %in% drop.vars]
gc()
rm(drop.vars)
# Q2.2 - Filter out store GPS coordinates from location variable
coordinates <- strsplit(stores$store.location, "[()]")
coordinates <- sapply(coordinates, function(x) strsplit(x[2], ", "))
stores$store.latitude <- as.numeric(sapply(coordinates, function(x) x[1]))
stores$store.longitude <- as.numeric(sapply(coordinates, function(x) x[2]))
rm(coordinates)
# Q2.3 - Drop location variable
stores$store.location <- NULL
# Q2.4 - Average GPS coordinates for stores
coordinates <- aggregate(cbind(store.latitude, store.longitude) ~ store.number
, stores
, mean
)
stores <- merge(stores[, !names(stores) %in% c("store.latitude", "store.longitude")]
, coordinates
, by = c("store.number")
, all.x = TRUE
)
rm(coordinates)
# Q2.5 - Removing duplicates
stores <- unique(stores)
# Q2.6 - Fix address, city and county names
stores$address <- str_to_title(stores$address)
stores$address <- gsub("[.,]", "", stores$address)
stores$city <- str_to_title(stores$city)
stores$county <- str_to_title(stores$county)
# Q2.7 - Remove duplicates
stores <- unique(stores)
# Q2.8 - Fill in missing country names
# We do so by (1) extracting unique combinations of (store.number, county)
# (2) removing county variable from stores
# and (3) adding it back from previously selected unique combinations
stores <- merge(stores[, !names(stores) %in% c("county")]
, unique((stores[!is.na(stores$county), c("store.number", "county")]))
, by = c("store.number")
, all.x = T
)
# Q2.9 - Remove duplicates
stores <- unique(stores)
# Q2.10 - Extract duplicates
stores$dup <- as.integer(duplicated(stores$store.number)
| duplicated(stores$store.number, fromLast = TRUE)
)
# Q2.11 - Check for proper zip/city/county combinations
# Import data with correct geo info
geo <- read.csv(file.choose()
, check.names = FALSE
, stringsAsFactors = FALSE
, na.strings = ""
)
# Create match variable inside imported data
geo$match <- 1L
# Merge stores with geo so that match variable is added to stores
# but only when stores has correct zipcode/city/county combination
stores <- merge(stores, geo
, by.x = c("zip.code", "city", "county")
, by.y = c("zipcode", "city", "county")
, all.x = TRUE, all.y = FALSE
)
# NAs in stores$match correspond to records with wrong geo info
# Make those records have match = 0
stores$match[is.na(stores$match)] <- 0
##--------------------------------------------------------------------
##----- Q3 - cleaning sales data
##--------------------------------------------------------------------
# Q3.1 - Convert sales and prices to proper format
sales$state.bottle.retail <- as.numeric(gsub("\\$", "", sales$state.bottle.retail))
sales$sale.dollars <- as.numeric(gsub("\\$", "", sales$sale.dollars))
# Q3.2 - Create subcategory variable
sales$subcategory <- sales$category.name
sales$category.name <- NULL
# Q3.3 - Create new category variable
sales$category <- NA_character_
sales$category[grepl(' tequila|^tequila', sales$subcategory, ignore.case = TRUE)] <- "Tequila"
sales$category[grepl(' gin|^gin', sales$subcategory, ignore.case = TRUE)] <- "Gin"
sales$category[grepl(' brand|^brand', sales$subcategory, ignore.case = TRUE)] <- "Brandy"
##--------------------------------------------------------------------
##----- Q4 - export cleaned data
##--------------------------------------------------------------------
write.csv(sales, "sales.csv")
write.csv(stores, "stores.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prior.R
\docType{methods}
\name{Transform,prior.ode-method}
\alias{Transform,prior.ode-method}
\title{Transform the prior model}
\usage{
\S4method{Transform}{prior.ode}(
object,
transforms = NULL,
name,
observation = "X",
par,
keep_grad = TRUE
)
}
\arguments{
\item{object}{object}
\item{transforms}{list of formulas specifying transformations}
\item{name}{name of the log-likelihood model}
\item{observation}{observation variable name}
\item{par}{additional parameter names}
\item{keep_grad}{maintain the gradient as part of the model}
}
\value{
An object of class ``prior.ode'' as described in \code{\link{prior.ode-class}}.
}
\description{
Transform the prior model
}
\keyword{internal}
|
/man/Transform-prior.ode-method.Rd
|
no_license
|
parksw3/fitode
|
R
| false
| true
| 785
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/prior.R
\docType{methods}
\name{Transform,prior.ode-method}
\alias{Transform,prior.ode-method}
\title{Transform the prior model}
\usage{
\S4method{Transform}{prior.ode}(
object,
transforms = NULL,
name,
observation = "X",
par,
keep_grad = TRUE
)
}
\arguments{
\item{object}{object}
\item{transforms}{list of formulas specifying transformations}
\item{name}{name of the log-likelihood model}
\item{observation}{observation variable name}
\item{par}{additional parameter names}
\item{keep_grad}{maintain the gradient as part of the model}
}
\value{
An object of class ``prior.ode'' as described in \code{\link{prior.ode-class}}.
}
\description{
Transform the prior model
}
\keyword{internal}
|
#Complete function for running code
PubMedScraping <- function(query, firefoxprofile, username, password, savelocation, filename, newsavelocation) {
inputs <<- c(query, firefoxprofile, savelocation, filename, newsavelocation)
if(is.null(username) == TRUE){
LoadPackages()
PubMedSearch(query)
DownloadPDF_NoAccount(firefoxprofile, savelocation, filename, newsavelocation)
WriteToExcel(newsavelocation, filename)
}
else {
LoadPackages()
PubMedSearch(query)
DownloadPDF_Account(firefoxprofile, username, password, savelocation, filename, newsavelocation)
WriteToExcel(newsavelocation, filename)
}
}
#######Load Packages for All Functions#######
LoadPackages <- function(){
#load packages for scraping pubmed
library(easyPubMed)
#used for parsing xml files
library(xml2)
#convert to excel file
library(openxlsx)
#parse out ID's from xml code
library(qdapRegex)
#run automated websearch
library(RSelenium)
#rename dataframe variables
library(plyr)
#used to rename pdf files
library(R.utils)
#used to check whether count is an integer
library(ttutils)
}
########Run PubMed Search########
PubMedSearch <- function(query){
# Query pubmed and fetch results for search string
my_query <<- query
#This api key should allow you to make large queries without issue
#not positive what the maximum search is
my_query <<- get_pubmed_ids(my_query, api_key = "0e0464ff93883fbef4c648d491ced27ed909")
#obtain number of items that need to be iterated through in request
number <<- 1:(as.numeric(my_query$Count))
#generate empty dataframe to hold data
final_df_noauthor <<- data.frame(query=character(),
pmid=character(),
doi=character(),
title=character(),
abstract=character(),
year=character(),
month=character(),
day=character(),
jabbrv=character(),
journal=character(),
PDFStatus=character(),
Search=character(),
firstname=character(),
address=character(),
email=character(),
search=character())
#for loop to pull all results. May be more/less efficient with larger/
#smaller retmax (determines how many search results to pull at once)
#If issues with pulling too many results at once, use Sys.sleep(seconds)
#to include a pause in each loop
for (i in number) {
newdata <<- fetch_pubmed_data(my_query, retstart = i-1, retmax = 1)
newlist <<- articles_to_list(newdata)
new_df_noauthor <<- do.call(rbind, lapply(newlist, article_to_df,
max_chars = -1, getAuthors = FALSE))
final_df_noauthor <<- rbind(final_df_noauthor, new_df_noauthor)
}
#these lines are just cleaning the data - renaming two columns to PDFStatus and Query,
#adding the query used to the first cell of Query column, then eliinating columns that
#weren't used at all.
final_df_noauthor <<- rename(final_df_noauthor, c("keywords"="PDFStatus", "lastname"="Query"))
final_df_noauthor$Query[1] <<- query
final_df_noauthor <<- final_df_noauthor[, c(1:11)]
}
########Pull PDF's Using Results From Search and Write to Excel File########
#This function loads the web browser and pulls pdfs using the results from the pubmed search
DownloadPDF_Account <- function(firefoxprofile, username, password, savelocation, filename, newsavelocation) {
#I've had to run this to get the port to work correctly, not sure if
#genuinely necessary
remDr1 <- rsDriver()
#used to get the firefox profile you currently have -- any settings you want to apply
#for this will have to be saved beforehand
fprof <<- getFirefoxProfile(firefoxprofile, useBase = TRUE)
#Initialize the remote driver that will open Firefox. In theory,
#extraCapabilities = fprof should load the specifications made
#for Firefox to download files
remDr <<- remoteDriver(
remoteServerAddr = "localhost",
port = 4567L,
extraCapabilities = fprof,
browserName = "firefox"
)
#Browser sometimes quits due to error when running immediately after
#remDr, so need sleep time between them.
Sys.sleep(15)
#Open Firefox
remDr$open(silent = TRUE)
Sys.sleep(15)
#This section enters your username/password for sci hub if you have them
remDr$navigate("https://singlelogin.org/?from=booksc.xyz")
Sys.sleep(5)
Login <- remDr$findElement(using = "css selector", "#username")
Login$sendKeysToElement(list(username))
Password <- remDr$findElement(using = "css selector", "#password")
Password$sendKeysToElement(list(password, "\uE006"))
Sys.sleep(5)
#Below code will navigate to website of choice, enter the titles from
#final_df_noauthor in search bar, then navigate each window
#to download pdf if available.
#When missing, it will enter "missing" in the PDFStatus variable in
#final_df_noauthor, when downloaded successfully it will enter
#as "downloaded"
if(file.exists(file.path(newsavelocation, filename)) == TRUE) {
oldfile <<- read.xlsx(file.path(newsavelocation, filename))
for (i in 1:length(final_df_noauthor$title)) {
if(is.na(oldfile$PDFStatus[i]) == FALSE){
next
}
browsercheck <- try(remDr$navigate("https://book4you.org/?signAll=1"))
if((class(browsercheck) != "NULL")){
try(remDr1 <- rsDriver())
Sys.sleep(5)
remDr$open(silent = TRUE)
Sys.sleep(5)
remDr$navigate("https://singlelogin.org/?from=booksc.xyz")
Sys.sleep(5)
Login <- remDr$findElement(using = "css selector", "#username")
Login$sendKeysToElement(list(username))
Password <- remDr$findElement(using = "css selector", "#password")
Password$sendKeysToElement(list(password, "\uE006"))
Sys.sleep(2)
}
remDr$navigate("https://book4you.org/?signAll=1")
Sys.sleep(2)
ExactMatch <- remDr$findElement(using = "css selector", "#advSearch-control")
ExactMatch$clickElement()
ExactMatch2 <- remDr$findElement(using = "css selector", "#ftcb")
ExactMatch2$clickElement()
Sys.sleep(2)
SearchEntry <- remDr$findElement(using = "css selector", "#searchFieldx")
SearchEntry$sendKeysToElement(list(final_df_noauthor$title[i], "\uE006"))
Sys.sleep(2)
Articles <- remDr$findElement(using = "css selector", ".searchServiceStats")
Articles$clickElement()
Sys.sleep(2)
check <- try(remDr$findElement(using = "css", "#searchResultBox a"))
if(class(check) == "try-error"){
final_df_noauthor$PDFStatus[i] <- "missing"
next
}
else {
Sys.sleep(5)
webElem <- remDr$findElement(using = "css", "#searchResultBox a")
webElem$clickElement()
}
Sys.sleep(5)
check2 <- try(remDr$findElement(using = "css", ".addDownloadedBook"))
if(class(check2) == "try-error"){
final_df_noauthor$PDFStatus[i] <- "missing"
next
}
else {
Sys.sleep(5)
webElem2 <- remDr$findElement(using = "css", ".addDownloadedBook")
webElem2$clickElement()
Sys.sleep(2)
webElem2$sendKeysToElement(list("\uE006"))
final_df_noauthor$PDFStatus[i] <- "downloaded"
pdfs <- list.files(savelocation, pattern = .pdf)
pdfs <- data.frame(lapply(pdfs, as.character), stringsAsFactors=FALSE)
if(nchar(pdfs[1, 1]) >= 5){
pdf <- paste(final_df_noauthor$title[i], '.pdf', sep="")
newname <- toString(pdf)
renameFile(file.path(savelocation, pdfs[1,1]), file.path(newsavelocation, newname))
}
}
if(i == length(final_df_noauthor)) {
remDr$close()
}
}
}
else {
for (i in 1:length(final_df_noauthor$title)) {
browsercheck <- try(remDr$navigate("https://book4you.org/?signAll=1"))
if((class(browsercheck) != "NULL")){
try(remDr1 <- rsDriver())
Sys.sleep(5)
remDr$open(silent = TRUE)
Sys.sleep(5)
remDr$navigate("https://singlelogin.org/?from=booksc.xyz")
Sys.sleep(5)
Login <- remDr$findElement(using = "css selector", "#username")
Login$sendKeysToElement(list(username))
Password <- remDr$findElement(using = "css selector", "#password")
Password$sendKeysToElement(list(password, "\uE006"))
Sys.sleep(5)
}
remDr$navigate("https://book4you.org/?signAll=1")
Sys.sleep(2)
ExactMatch <- remDr$findElement(using = "css selector", "#advSearch-control")
ExactMatch$clickElement()
ExactMatch2 <- remDr$findElement(using = "css selector", "#ftcb")
ExactMatch2$clickElement()
Sys.sleep(2)
SearchEntry <- remDr$findElement(using = "css selector", "#searchFieldx")
SearchEntry$sendKeysToElement(list(final_df_noauthor$title[i], "\uE006"))
Sys.sleep(2)
Articles <- remDr$findElement(using = "css selector", ".searchServiceStats")
Articles$clickElement()
Sys.sleep(2)
check <- try(remDr$findElement(using = "css", "#searchResultBox a"))
if(class(check) == "try-error"){
print("no result")
final_df_noauthor$PDFStatus[i] <<- "missing"
next
}
else {
Sys.sleep(5)
webElem <- remDr$findElement(using = "css", "#searchResultBox a")
webElem$clickElement()
}
Sys.sleep(5)
check2 <- try(remDr$findElement(using = "css", ".addDownloadedBook"))
if(class(check2) == "try-error"){
print("no result 2")
final_df_noauthor$PDFStatus[i] <<- "missing"
next
}
else {
Sys.sleep(5)
webElem2 <- remDr$findElement(using = "css", ".addDownloadedBook")
webElem2$clickElement()
Sys.sleep(2)
webElem2$sendKeysToElement(list("\uE006"))
final_df_noauthor$PDFStatus[i] <<- "downloaded"
pdfs <- list.files(savelocation, pattern = ".pdf")
pdfs <- data.frame(lapply(pdfs, as.character), stringsAsFactors=FALSE)
if(nchar(pdfs[1, 1]) >= 5){
pdf <- paste(final_df_noauthor$title[i], '.pdf', sep="")
newname <- toString(pdf)
renameFile(file.path(savelocation, pdfs[1,1]), file.path(newsavelocation, newname))
}
}
if(i == length(final_df_noauthor$title)) {
remDr$close()
}
}
}
}
#This function will be used when you don't have a scihub username/password - principle
#difference is that the maximum/day without an account is 10 pdfs, so will stop itself
#after successfully downloading 10.
DownloadPDF_NoAccount <- function(firefoxprofile, savelocation, filename, newsavelocation) {
#I've had to run this to get the port to work correctly, not sure if
#genuinely necessary
remDr1 <- rsDriver()
#This is supposed to make Firefox download files without asking you
#for permission, currently not working for me
fprof <<- getFirefoxProfile(firefoxprofile, useBase = TRUE)
#Initialize the remote driver that will open Firefox. In theory,
#extraCapabilities = fprof should load the specifications made
#for Firefox to download files
remDr <<- remoteDriver(
remoteServerAddr = "localhost",
port = 4567L,
extraCapabilities = fprof,
browserName = "firefox"
)
#Browser sometimes quits due to error when running immediately after
#remDr, so need sleep time between them.
Sys.sleep(15)
#Open Firefox
remDr$open(silent = TRUE)
Sys.sleep(15)
count <- 0
if(file.exists(file.path(newsavelocation, filename)) == TRUE) {
oldfile <<- read.xlsx(file.path(newsavelocation, filename))
for (i in 1:length(final_df_noauthor$title)) {
if(is.na(oldfile$PDFStatus[i]) == FALSE){
next
}
if((isInteger(count/10) & (count >= 10)) == TRUE){
remDr$close
break
}
browsercheck <- try(remDr$navigate("https://libgen.bban.top/"))
if((class(browsercheck) != "NULL")){
try(remDr1 <- rsDriver())
Sys.sleep(5)
remDr$open(silent = TRUE)
Sys.sleep(5)
}
remDr$navigate("https://libgen.bban.top/")
ExactMatch <- remDr$findElement(using = "css selector", "#advSearch-control")
ExactMatch$clickElement()
ExactMatch2 <- remDr$findElement(using = "css selector", "#ftcb")
ExactMatch2$clickElement()
Sys.sleep(5)
SearchEntry <- remDr$findElement(using = "css selector", "#searchFieldx")
SearchEntry$sendKeysToElement(list(final_df_noauthor$title[i], "\uE006"))
Sys.sleep(5)
Articles <- remDr$findElement(using = "css selector", ".searchServiceStats")
Articles$clickElement()
Sys.sleep(5)
check <- try(remDr$findElement(using = "css", "#searchResultBox a"))
if(class(check) == "try-error"){
final_df_noauthor$PDFStatus[i] <<- "missing"
next
}
else {
Sys.sleep(5)
webElem <- remDr$findElement(using = "css", "#searchResultBox a")
webElem$clickElement()
}
Sys.sleep(5)
check2 <- try(remDr$findElement(using = "css", ".addDownloadedBook"))
if(class(check2) == "try-error"){
final_df_noauthor$PDFStatus[i] <<- "missing"
next
}
else {
Sys.sleep(5)
webElem2 <- remDr$findElement(using = "css", ".addDownloadedBook")
webElem2$clickElement()
Sys.sleep(2)
webElem2$sendKeysToElement(list("\uE006"))
final_df_noauthor$PDFStatus[i] <<- "downloaded"
count <- count + 1
pdfs <- list.files(savelocation, pattern = .pdf)
pdfs <- data.frame(lapply(pdfs, as.character), stringsAsFactors=FALSE)
if(nchar(pdfs[1, 1]) >= 5){
pdf <- paste(final_df_noauthor$title[i], '.pdf', sep="")
newname <- toString(pdf)
renameFile(file.path(savelocation, pdfs[1,1]), file.path(newsavelocation, newname))
}
}
if(i == length(final_df_noauthor)) {
remDr$close()
}
}
}
else {
for (i in 1:length(final_df_noauthor$title)) {
browsercheck <- try(remDr$navigate("https://libgen.bban.top/"))
if((class(browsercheck) != "NULL")){
try(remDr1 <- rsDriver())
Sys.sleep(5)
remDr$open(silent = TRUE)
Sys.sleep(5)
}
if((isInteger(count/10) & (count >= 10)) == TRUE){
remDr$close
break
}
remDr$navigate("https://libgen.bban.top/")
ExactMatch <- remDr$findElement(using = "css selector", "#advSearch-control")
ExactMatch$clickElement()
ExactMatch2 <- remDr$findElement(using = "css selector", "#ftcb")
ExactMatch2$clickElement()
Sys.sleep(5)
SearchEntry <- remDr$findElement(using = "css selector", "#searchFieldx")
SearchEntry$sendKeysToElement(list(final_df_noauthor$title[i], "\uE006"))
Sys.sleep(5)
Articles <- remDr$findElement(using = "css selector", ".searchServiceStats")
Articles$clickElement()
Sys.sleep(5)
check <- try(remDr$findElement(using = "css", "#searchResultBox a"))
if(class(check) == "try-error"){
final_df_noauthor$PDFStatus[i] <<- "missing"
next
}
else {
Sys.sleep(5)
webElem <- remDr$findElement(using = "css", "#searchResultBox a")
webElem$clickElement()
}
Sys.sleep(5)
check2 <- try(remDr$findElement(using = "css", ".addDownloadedBook"))
if(class(check2) == "try-error"){
final_df_noauthor$PDFStatus[i] <<- "missing"
next
}
else {
Sys.sleep(5)
webElem2 <- remDr$findElement(using = "css", ".addDownloadedBook")
webElem2$clickElement()
Sys.sleep(2)
webElem2$sendKeysToElement(list("\uE006"))
final_df_noauthor$PDFStatus[i] <<- "downloaded"
count <- count + 1
pdfs <- list.files(savelocation, pattern = .pdf)
pdfs <- data.frame(lapply(pdfs, as.character), stringsAsFactors=FALSE)
if(nchar(pdfs[1, 1]) >= 5){
pdf <- paste(final_df_noauthor$title[i], '.pdf', sep="")
newname <- toString(pdf)
renameFile(file.path(savelocation, pdfs[1,1]), file.path(newsavelocation, newname))
}
}
if(i == length(final_df_noauthor$title)) {
remDr$close()
}
}
}
}
#This will write out the results of your search and whether pdfs were downloaded.
#When running a second time, it will just append new results to the document you already have
#rather than rewriting it.
WriteToExcel <- function(newsavelocation, filename) {
setwd(newsavelocation)
if(file.exists(file.path(newsavelocation, filename)) == FALSE) {
Output <- createWorkbook()
addWorksheet(Output, "Search Results")
addWorksheet(Output, "Search Inputs")
writeData(Output, sheet = "Search Results", x = final_df_noauthor)
writeData(Output, sheet = "Search Inputs", x = inputs)
saveWorkbook(Output, filename)
}
else {
oldfile <<- read.xlsx(file.path("/Users/georgeabitante/Desktop/Judy Garber Search", "AdolescentDepression_JG.xlsx"))
for(i in 1:length(final_df_noauthor)){
if((final_df_noauthor$pmid[i] %in% oldfile$pmid) == TRUE) {
next
}
else {
oldfile <<- oldfile.append(final_df_noauthor[i, 1:11])
}
writein <- list("Search Results" = oldfile, "Search Inputs" = inputs)
write.xlsx(writein, file=filename, sheetName='Search Inputs', append=TRUE)
}
}
}
########UNUSED CODE MAY BE USEFUL######
# Give the input file name to the function.
#result <- xmlParse(file = "input.xml")
# Print the result.
#print(result)
## Can use to this add text to ends of strings - useful for changing titles easily in
## consistent way
#for (i in length(data)) {
# f <- "[Title]"
# d <- unlist(strsplit(data[i], " "))
# d <- paste(d, f, sep = "")
# data[i] <- d
#}
|
/PubMedScraping_GitHub.R
|
no_license
|
gabitante/Web-Scraping-Project-
|
R
| false
| false
| 18,459
|
r
|
#Complete function for running code
PubMedScraping <- function(query, firefoxprofile, username, password, savelocation, filename, newsavelocation) {
inputs <<- c(query, firefoxprofile, savelocation, filename, newsavelocation)
if(is.null(username) == TRUE){
LoadPackages()
PubMedSearch(query)
DownloadPDF_NoAccount(firefoxprofile, savelocation, filename, newsavelocation)
WriteToExcel(newsavelocation, filename)
}
else {
LoadPackages()
PubMedSearch(query)
DownloadPDF_Account(firefoxprofile, username, password, savelocation, filename, newsavelocation)
WriteToExcel(newsavelocation, filename)
}
}
#######Load Packages for All Functions#######
LoadPackages <- function(){
#load packages for scraping pubmed
library(easyPubMed)
#used for parsing xml files
library(xml2)
#convert to excel file
library(openxlsx)
#parse out ID's from xml code
library(qdapRegex)
#run automated websearch
library(RSelenium)
#rename dataframe variables
library(plyr)
#used to rename pdf files
library(R.utils)
#used to check whether count is an integer
library(ttutils)
}
########Run PubMed Search########
PubMedSearch <- function(query){
# Query pubmed and fetch results for search string
my_query <<- query
#This api key should allow you to make large queries without issue
#not positive what the maximum search is
my_query <<- get_pubmed_ids(my_query, api_key = "0e0464ff93883fbef4c648d491ced27ed909")
#obtain number of items that need to be iterated through in request
number <<- 1:(as.numeric(my_query$Count))
#generate empty dataframe to hold data
final_df_noauthor <<- data.frame(query=character(),
pmid=character(),
doi=character(),
title=character(),
abstract=character(),
year=character(),
month=character(),
day=character(),
jabbrv=character(),
journal=character(),
PDFStatus=character(),
Search=character(),
firstname=character(),
address=character(),
email=character(),
search=character())
#for loop to pull all results. May be more/less efficient with larger/
#smaller retmax (determines how many search results to pull at once)
#If issues with pulling too many results at once, use Sys.sleep(seconds)
#to include a pause in each loop
for (i in number) {
newdata <<- fetch_pubmed_data(my_query, retstart = i-1, retmax = 1)
newlist <<- articles_to_list(newdata)
new_df_noauthor <<- do.call(rbind, lapply(newlist, article_to_df,
max_chars = -1, getAuthors = FALSE))
final_df_noauthor <<- rbind(final_df_noauthor, new_df_noauthor)
}
#these lines are just cleaning the data - renaming two columns to PDFStatus and Query,
#adding the query used to the first cell of Query column, then eliinating columns that
#weren't used at all.
final_df_noauthor <<- rename(final_df_noauthor, c("keywords"="PDFStatus", "lastname"="Query"))
final_df_noauthor$Query[1] <<- query
final_df_noauthor <<- final_df_noauthor[, c(1:11)]
}
########Pull PDF's Using Results From Search and Write to Excel File########
#This function loads the web browser and pulls pdfs using the results from the pubmed search
DownloadPDF_Account <- function(firefoxprofile, username, password, savelocation, filename, newsavelocation) {
#I've had to run this to get the port to work correctly, not sure if
#genuinely necessary
remDr1 <- rsDriver()
#used to get the firefox profile you currently have -- any settings you want to apply
#for this will have to be saved beforehand
fprof <<- getFirefoxProfile(firefoxprofile, useBase = TRUE)
#Initialize the remote driver that will open Firefox. In theory,
#extraCapabilities = fprof should load the specifications made
#for Firefox to download files
remDr <<- remoteDriver(
remoteServerAddr = "localhost",
port = 4567L,
extraCapabilities = fprof,
browserName = "firefox"
)
#Browser sometimes quits due to error when running immediately after
#remDr, so need sleep time between them.
Sys.sleep(15)
#Open Firefox
remDr$open(silent = TRUE)
Sys.sleep(15)
#This section enters your username/password for sci hub if you have them
remDr$navigate("https://singlelogin.org/?from=booksc.xyz")
Sys.sleep(5)
Login <- remDr$findElement(using = "css selector", "#username")
Login$sendKeysToElement(list(username))
Password <- remDr$findElement(using = "css selector", "#password")
Password$sendKeysToElement(list(password, "\uE006"))
Sys.sleep(5)
#Below code will navigate to website of choice, enter the titles from
#final_df_noauthor in search bar, then navigate each window
#to download pdf if available.
#When missing, it will enter "missing" in the PDFStatus variable in
#final_df_noauthor, when downloaded successfully it will enter
#as "downloaded"
if(file.exists(file.path(newsavelocation, filename)) == TRUE) {
oldfile <<- read.xlsx(file.path(newsavelocation, filename))
for (i in 1:length(final_df_noauthor$title)) {
if(is.na(oldfile$PDFStatus[i]) == FALSE){
next
}
browsercheck <- try(remDr$navigate("https://book4you.org/?signAll=1"))
if((class(browsercheck) != "NULL")){
try(remDr1 <- rsDriver())
Sys.sleep(5)
remDr$open(silent = TRUE)
Sys.sleep(5)
remDr$navigate("https://singlelogin.org/?from=booksc.xyz")
Sys.sleep(5)
Login <- remDr$findElement(using = "css selector", "#username")
Login$sendKeysToElement(list(username))
Password <- remDr$findElement(using = "css selector", "#password")
Password$sendKeysToElement(list(password, "\uE006"))
Sys.sleep(2)
}
remDr$navigate("https://book4you.org/?signAll=1")
Sys.sleep(2)
ExactMatch <- remDr$findElement(using = "css selector", "#advSearch-control")
ExactMatch$clickElement()
ExactMatch2 <- remDr$findElement(using = "css selector", "#ftcb")
ExactMatch2$clickElement()
Sys.sleep(2)
SearchEntry <- remDr$findElement(using = "css selector", "#searchFieldx")
SearchEntry$sendKeysToElement(list(final_df_noauthor$title[i], "\uE006"))
Sys.sleep(2)
Articles <- remDr$findElement(using = "css selector", ".searchServiceStats")
Articles$clickElement()
Sys.sleep(2)
check <- try(remDr$findElement(using = "css", "#searchResultBox a"))
if(class(check) == "try-error"){
final_df_noauthor$PDFStatus[i] <- "missing"
next
}
else {
Sys.sleep(5)
webElem <- remDr$findElement(using = "css", "#searchResultBox a")
webElem$clickElement()
}
Sys.sleep(5)
check2 <- try(remDr$findElement(using = "css", ".addDownloadedBook"))
if(class(check2) == "try-error"){
final_df_noauthor$PDFStatus[i] <- "missing"
next
}
else {
Sys.sleep(5)
webElem2 <- remDr$findElement(using = "css", ".addDownloadedBook")
webElem2$clickElement()
Sys.sleep(2)
webElem2$sendKeysToElement(list("\uE006"))
final_df_noauthor$PDFStatus[i] <- "downloaded"
pdfs <- list.files(savelocation, pattern = .pdf)
pdfs <- data.frame(lapply(pdfs, as.character), stringsAsFactors=FALSE)
if(nchar(pdfs[1, 1]) >= 5){
pdf <- paste(final_df_noauthor$title[i], '.pdf', sep="")
newname <- toString(pdf)
renameFile(file.path(savelocation, pdfs[1,1]), file.path(newsavelocation, newname))
}
}
if(i == length(final_df_noauthor)) {
remDr$close()
}
}
}
else {
for (i in 1:length(final_df_noauthor$title)) {
browsercheck <- try(remDr$navigate("https://book4you.org/?signAll=1"))
if((class(browsercheck) != "NULL")){
try(remDr1 <- rsDriver())
Sys.sleep(5)
remDr$open(silent = TRUE)
Sys.sleep(5)
remDr$navigate("https://singlelogin.org/?from=booksc.xyz")
Sys.sleep(5)
Login <- remDr$findElement(using = "css selector", "#username")
Login$sendKeysToElement(list(username))
Password <- remDr$findElement(using = "css selector", "#password")
Password$sendKeysToElement(list(password, "\uE006"))
Sys.sleep(5)
}
remDr$navigate("https://book4you.org/?signAll=1")
Sys.sleep(2)
ExactMatch <- remDr$findElement(using = "css selector", "#advSearch-control")
ExactMatch$clickElement()
ExactMatch2 <- remDr$findElement(using = "css selector", "#ftcb")
ExactMatch2$clickElement()
Sys.sleep(2)
SearchEntry <- remDr$findElement(using = "css selector", "#searchFieldx")
SearchEntry$sendKeysToElement(list(final_df_noauthor$title[i], "\uE006"))
Sys.sleep(2)
Articles <- remDr$findElement(using = "css selector", ".searchServiceStats")
Articles$clickElement()
Sys.sleep(2)
check <- try(remDr$findElement(using = "css", "#searchResultBox a"))
if(class(check) == "try-error"){
print("no result")
final_df_noauthor$PDFStatus[i] <<- "missing"
next
}
else {
Sys.sleep(5)
webElem <- remDr$findElement(using = "css", "#searchResultBox a")
webElem$clickElement()
}
Sys.sleep(5)
check2 <- try(remDr$findElement(using = "css", ".addDownloadedBook"))
if(class(check2) == "try-error"){
print("no result 2")
final_df_noauthor$PDFStatus[i] <<- "missing"
next
}
else {
Sys.sleep(5)
webElem2 <- remDr$findElement(using = "css", ".addDownloadedBook")
webElem2$clickElement()
Sys.sleep(2)
webElem2$sendKeysToElement(list("\uE006"))
final_df_noauthor$PDFStatus[i] <<- "downloaded"
pdfs <- list.files(savelocation, pattern = ".pdf")
pdfs <- data.frame(lapply(pdfs, as.character), stringsAsFactors=FALSE)
if(nchar(pdfs[1, 1]) >= 5){
pdf <- paste(final_df_noauthor$title[i], '.pdf', sep="")
newname <- toString(pdf)
renameFile(file.path(savelocation, pdfs[1,1]), file.path(newsavelocation, newname))
}
}
if(i == length(final_df_noauthor$title)) {
remDr$close()
}
}
}
}
#This function will be used when you don't have a scihub username/password - principle
#difference is that the maximum/day without an account is 10 pdfs, so will stop itself
#after successfully downloading 10.
DownloadPDF_NoAccount <- function(firefoxprofile, savelocation, filename, newsavelocation) {
#I've had to run this to get the port to work correctly, not sure if
#genuinely necessary
remDr1 <- rsDriver()
#This is supposed to make Firefox download files without asking you
#for permission, currently not working for me
fprof <<- getFirefoxProfile(firefoxprofile, useBase = TRUE)
#Initialize the remote driver that will open Firefox. In theory,
#extraCapabilities = fprof should load the specifications made
#for Firefox to download files
remDr <<- remoteDriver(
remoteServerAddr = "localhost",
port = 4567L,
extraCapabilities = fprof,
browserName = "firefox"
)
#Browser sometimes quits due to error when running immediately after
#remDr, so need sleep time between them.
Sys.sleep(15)
#Open Firefox
remDr$open(silent = TRUE)
Sys.sleep(15)
count <- 0
if(file.exists(file.path(newsavelocation, filename)) == TRUE) {
oldfile <<- read.xlsx(file.path(newsavelocation, filename))
for (i in 1:length(final_df_noauthor$title)) {
if(is.na(oldfile$PDFStatus[i]) == FALSE){
next
}
if((isInteger(count/10) & (count >= 10)) == TRUE){
remDr$close
break
}
browsercheck <- try(remDr$navigate("https://libgen.bban.top/"))
if((class(browsercheck) != "NULL")){
try(remDr1 <- rsDriver())
Sys.sleep(5)
remDr$open(silent = TRUE)
Sys.sleep(5)
}
remDr$navigate("https://libgen.bban.top/")
ExactMatch <- remDr$findElement(using = "css selector", "#advSearch-control")
ExactMatch$clickElement()
ExactMatch2 <- remDr$findElement(using = "css selector", "#ftcb")
ExactMatch2$clickElement()
Sys.sleep(5)
SearchEntry <- remDr$findElement(using = "css selector", "#searchFieldx")
SearchEntry$sendKeysToElement(list(final_df_noauthor$title[i], "\uE006"))
Sys.sleep(5)
Articles <- remDr$findElement(using = "css selector", ".searchServiceStats")
Articles$clickElement()
Sys.sleep(5)
check <- try(remDr$findElement(using = "css", "#searchResultBox a"))
if(class(check) == "try-error"){
final_df_noauthor$PDFStatus[i] <<- "missing"
next
}
else {
Sys.sleep(5)
webElem <- remDr$findElement(using = "css", "#searchResultBox a")
webElem$clickElement()
}
Sys.sleep(5)
check2 <- try(remDr$findElement(using = "css", ".addDownloadedBook"))
if(class(check2) == "try-error"){
final_df_noauthor$PDFStatus[i] <<- "missing"
next
}
else {
Sys.sleep(5)
webElem2 <- remDr$findElement(using = "css", ".addDownloadedBook")
webElem2$clickElement()
Sys.sleep(2)
webElem2$sendKeysToElement(list("\uE006"))
final_df_noauthor$PDFStatus[i] <<- "downloaded"
count <- count + 1
pdfs <- list.files(savelocation, pattern = .pdf)
pdfs <- data.frame(lapply(pdfs, as.character), stringsAsFactors=FALSE)
if(nchar(pdfs[1, 1]) >= 5){
pdf <- paste(final_df_noauthor$title[i], '.pdf', sep="")
newname <- toString(pdf)
renameFile(file.path(savelocation, pdfs[1,1]), file.path(newsavelocation, newname))
}
}
if(i == length(final_df_noauthor)) {
remDr$close()
}
}
}
else {
for (i in 1:length(final_df_noauthor$title)) {
browsercheck <- try(remDr$navigate("https://libgen.bban.top/"))
if((class(browsercheck) != "NULL")){
try(remDr1 <- rsDriver())
Sys.sleep(5)
remDr$open(silent = TRUE)
Sys.sleep(5)
}
if((isInteger(count/10) & (count >= 10)) == TRUE){
remDr$close
break
}
remDr$navigate("https://libgen.bban.top/")
ExactMatch <- remDr$findElement(using = "css selector", "#advSearch-control")
ExactMatch$clickElement()
ExactMatch2 <- remDr$findElement(using = "css selector", "#ftcb")
ExactMatch2$clickElement()
Sys.sleep(5)
SearchEntry <- remDr$findElement(using = "css selector", "#searchFieldx")
SearchEntry$sendKeysToElement(list(final_df_noauthor$title[i], "\uE006"))
Sys.sleep(5)
Articles <- remDr$findElement(using = "css selector", ".searchServiceStats")
Articles$clickElement()
Sys.sleep(5)
check <- try(remDr$findElement(using = "css", "#searchResultBox a"))
if(class(check) == "try-error"){
final_df_noauthor$PDFStatus[i] <<- "missing"
next
}
else {
Sys.sleep(5)
webElem <- remDr$findElement(using = "css", "#searchResultBox a")
webElem$clickElement()
}
Sys.sleep(5)
check2 <- try(remDr$findElement(using = "css", ".addDownloadedBook"))
if(class(check2) == "try-error"){
final_df_noauthor$PDFStatus[i] <<- "missing"
next
}
else {
Sys.sleep(5)
webElem2 <- remDr$findElement(using = "css", ".addDownloadedBook")
webElem2$clickElement()
Sys.sleep(2)
webElem2$sendKeysToElement(list("\uE006"))
final_df_noauthor$PDFStatus[i] <<- "downloaded"
count <- count + 1
pdfs <- list.files(savelocation, pattern = .pdf)
pdfs <- data.frame(lapply(pdfs, as.character), stringsAsFactors=FALSE)
if(nchar(pdfs[1, 1]) >= 5){
pdf <- paste(final_df_noauthor$title[i], '.pdf', sep="")
newname <- toString(pdf)
renameFile(file.path(savelocation, pdfs[1,1]), file.path(newsavelocation, newname))
}
}
if(i == length(final_df_noauthor$title)) {
remDr$close()
}
}
}
}
#This will write out the results of your search and whether pdfs were downloaded.
#When running a second time, it will just append new results to the document you already have
#rather than rewriting it.
WriteToExcel <- function(newsavelocation, filename) {
setwd(newsavelocation)
if(file.exists(file.path(newsavelocation, filename)) == FALSE) {
Output <- createWorkbook()
addWorksheet(Output, "Search Results")
addWorksheet(Output, "Search Inputs")
writeData(Output, sheet = "Search Results", x = final_df_noauthor)
writeData(Output, sheet = "Search Inputs", x = inputs)
saveWorkbook(Output, filename)
}
else {
oldfile <<- read.xlsx(file.path("/Users/georgeabitante/Desktop/Judy Garber Search", "AdolescentDepression_JG.xlsx"))
for(i in 1:length(final_df_noauthor)){
if((final_df_noauthor$pmid[i] %in% oldfile$pmid) == TRUE) {
next
}
else {
oldfile <<- oldfile.append(final_df_noauthor[i, 1:11])
}
writein <- list("Search Results" = oldfile, "Search Inputs" = inputs)
write.xlsx(writein, file=filename, sheetName='Search Inputs', append=TRUE)
}
}
}
########UNUSED CODE MAY BE USEFUL######
# Give the input file name to the function.
#result <- xmlParse(file = "input.xml")
# Print the result.
#print(result)
## Can use to this add text to ends of strings - useful for changing titles easily in
## consistent way
#for (i in length(data)) {
# f <- "[Title]"
# d <- unlist(strsplit(data[i], " "))
# d <- paste(d, f, sep = "")
# data[i] <- d
#}
|
H <- c(99,12,44,88,66)
png(file = "barchart.png")
barplot(H)
#To Save The File
dev.off()
M <- c("Mar","Apr","May","Jun","Jul")
png(file = "barchart_months_revenue.png")
# Plot the bar chart.
barplot(H,names.arg = M,xlab = "Month",ylab = "Revenue",col = "blue", main = "Revenue chart",border = "red")
dev.off()
colors <- c("green","orange","brown")
months <- c("Mar","Apr","May","Jun","Jul")
regions <- c("East","West","North")
# Create the matrix of the values.
Values <- matrix(c(2,9,3,11,9,4,8,7,3,12,5,2,8,10,11),nrow = 3,ncol = 5,byrow = TRUE)
# Give the chart file a name.
png(file = "barchart_stacked.png")
# Create the bar chart.
barplot(Values,main = "total revenue",names.arg = months,xlab = "month",ylab = "revenue",
col = colors)
# Add the legend to the chart.
legend("topleft", regions, cex = 1.3, fill = colors)
# Save the file.
dev.off()
|
/WorkSpace/R Programming/R Charts & Graphs/R - Bar Charts.R
|
no_license
|
chinna510/Projects
|
R
| false
| false
| 878
|
r
|
H <- c(99,12,44,88,66)
png(file = "barchart.png")
barplot(H)
#To Save The File
dev.off()
M <- c("Mar","Apr","May","Jun","Jul")
png(file = "barchart_months_revenue.png")
# Plot the bar chart.
barplot(H,names.arg = M,xlab = "Month",ylab = "Revenue",col = "blue", main = "Revenue chart",border = "red")
dev.off()
colors <- c("green","orange","brown")
months <- c("Mar","Apr","May","Jun","Jul")
regions <- c("East","West","North")
# Create the matrix of the values.
Values <- matrix(c(2,9,3,11,9,4,8,7,3,12,5,2,8,10,11),nrow = 3,ncol = 5,byrow = TRUE)
# Give the chart file a name.
png(file = "barchart_stacked.png")
# Create the bar chart.
barplot(Values,main = "total revenue",names.arg = months,xlab = "month",ylab = "revenue",
col = colors)
# Add the legend to the chart.
legend("topleft", regions, cex = 1.3, fill = colors)
# Save the file.
dev.off()
|
##########################################
#### GAM MODELS FOR T1 BIFACTOR STUDY ####
##########################################
#Load data
data.JLF <- readRDS("/data/jux/BBL/projects/pncT1AcrossDisorder/subjectData/n1394_T1_subjData.rds")
#Load library
library(mgcv)
library(dplyr)
#Get NMF variable names
jlfAllComponents <- data.JLF[c(grep("mprage_jlf_vol",names(data.JLF)))]
##129 variables
jlfComponents_short <- jlfAllComponents[,-grep("Vent|Brain_Stem|Cerebell|Cerebral_White_Matter|CSF|Lobe_WM",names(jlfAllComponents))]
##112 variables
jlfComponents <- names(jlfComponents_short)
#Run gam models
JlfModels <- lapply(jlfComponents, function(x) {
gam(substitute(i ~ s(age) + sex + averageManualRating + mood_4factorv2 + psychosis_4factorv2 + externalizing_4factorv2 + phobias_4factorv2 + overall_psychopathology_4factorv2, list(i = as.name(x))), method="REML", data = data.JLF)
})
#Look at model summaries
models <- lapply(JlfModels, summary)
######################
#### MOOD RESULTS ####
######################
#Pull p-values
p_mood <- sapply(JlfModels, function(v) summary(v)$p.table[4,4])
#Convert to data frame
p_mood <- as.data.frame(p_mood)
#Print original p-values to three decimal places
p_mood_round <- round(p_mood,3)
#Add row names
rownames(p_mood) <- jlfComponents
#FDR correct p-values
p_mood_fdr <- p.adjust(p_mood[,1],method="fdr")
#Convert to data frame
p_mood_fdr <- as.data.frame(p_mood_fdr)
#To print fdr-corrected p-values to three decimal places
p_mood_fdr_round <- round(p_mood_fdr,3)
#Keep only the p-values that survive FDR correction
p_mood_fdr_round_signif <- p_mood_fdr_round[p_mood_fdr<0.05]
#Convert to data frame
p_mood_sig <- as.data.frame(p_mood_fdr_round_signif)
#Add row names
rownames(p_mood_fdr) <- jlfComponents
#List the JLF components that survive FDR correction
Jlf_mood_fdr <- row.names(p_mood_fdr)[p_mood_fdr<0.05]
#Convert to data frame
ROI_mood <- as.data.frame(Jlf_mood_fdr)
#Name of the JLF components that survive FDR correction
Jlf_mood_fdr_names <- jlfComponents[as.numeric(Jlf_mood_fdr)]
#To check direction of coefficient estimates
mood_coeff <- models[as.numeric(Jlf_mood_fdr)]
###########################
#### PSYCHOSIS RESULTS ####
###########################
#Pull p-values
p_psy <- sapply(JlfModels, function(v) summary(v)$p.table[5,4])
#Convert to data frame
p_psy <- as.data.frame(p_psy)
#Print original p-values to three decimal places
p_psy_round <- round(p_psy,3)
#FDR correct p-values
p_psy_fdr <- p.adjust(p_psy[,1],method="fdr")
#Convert to data frame
p_psy_fdr <- as.data.frame(p_psy_fdr)
#To print fdr-corrected p-values to three decimal places
p_psy_fdr_round <- round(p_psy_fdr,3)
#List the JLF components that survive FDR correction
Jlf_psy_fdr <- row.names(p_psy_fdr)[p_psy_fdr<0.05]
#Name of the JLF components that survive FDR correction
Jlf_psy_fdr_names <- jlfComponents[as.numeric(Jlf_psy_fdr)]
#To check direction of coefficient estimates
psy_coeff <- models[as.numeric(Jlf_psy_fdr)]
########################################
#### EXTERNALIZING BEHAVIOR RESULTS ####
########################################
#Pull p-values
p_ext <- sapply(JlfModels, function(v) summary(v)$p.table[6,4])
#Convert to data frame
p_ext <- as.data.frame(p_ext)
#Print original p-values to three decimal places
p_ext_round <- round(p_ext,3)
#FDR correct p-values
p_ext_fdr <- p.adjust(p_ext[,1],method="fdr")
#Convert to data frame
p_ext_fdr <- as.data.frame(p_ext_fdr)
#To print fdr-corrected p-values to three decimal places
p_ext_fdr_round <- round(p_ext_fdr,3)
#List the JLF components that survive FDR correction
Jlf_ext_fdr <- row.names(p_ext_fdr)[p_ext_fdr<0.05]
#Name of the JLF components that survive FDR correction
Jlf_ext_fdr_names <- jlfComponents[as.numeric(Jlf_ext_fdr)]
#To check direction of coefficient estimates
ext_coeff <- models[as.numeric(Jlf_ext_fdr)]
##############################
#### PHOBIA(FEAR) RESULTS ####
##############################
#Pull p-values
p_fear <- sapply(JlfModels, function(v) summary(v)$p.table[7,4])
#Convert to data frame
p_fear <- as.data.frame(p_fear)
#Print original p-values to three decimal places
p_fear_round <- round(p_fear,3)
#FDR correct p-values
p_fear_fdr <- p.adjust(p_fear[,1],method="fdr")
#Convert to data frame
p_fear_fdr <- as.data.frame(p_fear_fdr)
#To print fdr-corrected p-values to three decimal places
p_fear_fdr_round <- round(p_fear_fdr,3)
#List the JLF components that survive FDR correction
Jlf_fear_fdr <- row.names(p_fear_fdr)[p_fear_fdr<0.05]
#Name of the JLF components that survive FDR correction
Jlf_fear_fdr_names <- jlfComponents[as.numeric(Jlf_fear_fdr)]
#To check direction of coefficient estimates
fear_coeff <- models[as.numeric(Jlf_fear_fdr)]
#########################################
#### OVERALL PSYCHOPATHOLOGY RESULTS ####
#########################################
#Pull p-values
p_overall <- sapply(JlfModels, function(v) summary(v)$p.table[8,4])
#Convert to data frame
p_overall <- as.data.frame(p_overall)
#Print original p-values to three decimal places
p_overall_round <- round(p_overall,3)
#Add row names
rownames(p_overall) <- jlfComponents
#FDR correct p-values
p_overall_fdr <- p.adjust(p_overall[,1],method="fdr")
#Convert to data frame
p_overall_fdr <- as.data.frame(p_overall_fdr)
#To print fdr-corrected p-values to three decimal places
p_overall_fdr_round <- round(p_overall_fdr,3)
#Keep only the p-values that survive FDR correction
p_overall_fdr_round_signif <- p_overall_fdr_round[p_overall_fdr<0.05]
#Convert to data frame
p_overall_sig <- as.data.frame(p_overall_fdr_round_signif)
#Add row names
rownames(p_overall_fdr) <- jlfComponents
#List the JLF components that survive FDR correction
Jlf_overall_fdr <- row.names(p_overall_fdr)[p_overall_fdr<0.05]
#Convert to data frame
ROI_overall <- as.data.frame(Jlf_overall_fdr)
#Name of the JLF components that survive FDR correction
Jlf_overall_fdr_names <- jlfComponents[as.numeric(Jlf_overall_fdr)]
#To check direction of coefficient estimates
overall_coeff <- models[as.numeric(Jlf_overall_fdr)]
#######################
#### PULL T VALUES ####
#######################
##Mood
#Pull t-values for mood
tJLF_mood <- sapply(JlfModels, function(x) summary(x)$p.table[4,3])
#Print to two decimal places (only significant components)
tJLF_mood_round <- round(tJLF_mood,2)[p_mood_fdr<0.05]
#Convert to data frame
t_mood <- as.data.frame(tJLF_mood_round)
##Overall
#Pull t-values for overall
tJLF_overall <- sapply(JlfModels, function(x) summary(x)$p.table[8,3])
#Print to two decimal places (only significant components)
tJLF_overall_round <- round(tJLF_overall,2)[p_overall_fdr<0.05]
#Convert to data frame
t_overall <- as.data.frame(tJLF_overall_round)
#######################
#### ROI, P, AND T ####
#######################
##Mood
#Combine ROI names, p values, and t values into one dataframe
combined_mood <- cbind(ROI_mood,p_mood_sig)
combined_mood2 <- cbind(combined_mood,t_mood)
#Rename variables
data.mood <- rename(combined_mood2, p = p_mood_fdr_round_signif, t = tJLF_mood_round)
#Save as a .csv
write.csv(data.mood, file="/data/jux/BBL/projects/pncT1AcrossDisorder/subjectData/n1394_JLFvol_mood.csv", row.names=F, quote=F)
##Overall
#Combine ROI names, p values, and t values into one dataframe
combined_overall <- cbind(ROI_overall,p_overall_sig)
combined_overall2 <- cbind(combined_overall,t_overall)
#Rename variables
data.overall <- rename(combined_overall2, p = p_overall_fdr_round_signif, t = tJLF_overall_round)
#Save as a .csv
write.csv(data.overall, file="/data/jux/BBL/projects/pncT1AcrossDisorder/subjectData/n1394_JLFvol_overall.csv", row.names=F, quote=F)
|
/JLFvol/GamAnalyses_T1Bifactors_JLFvol.R
|
no_license
|
PennBBL/pncT1Bifactors
|
R
| false
| false
| 7,678
|
r
|
##########################################
#### GAM MODELS FOR T1 BIFACTOR STUDY ####
##########################################
#Load data
data.JLF <- readRDS("/data/jux/BBL/projects/pncT1AcrossDisorder/subjectData/n1394_T1_subjData.rds")
#Load library
library(mgcv)
library(dplyr)
#Get NMF variable names
jlfAllComponents <- data.JLF[c(grep("mprage_jlf_vol",names(data.JLF)))]
##129 variables
jlfComponents_short <- jlfAllComponents[,-grep("Vent|Brain_Stem|Cerebell|Cerebral_White_Matter|CSF|Lobe_WM",names(jlfAllComponents))]
##112 variables
jlfComponents <- names(jlfComponents_short)
#Run gam models
JlfModels <- lapply(jlfComponents, function(x) {
gam(substitute(i ~ s(age) + sex + averageManualRating + mood_4factorv2 + psychosis_4factorv2 + externalizing_4factorv2 + phobias_4factorv2 + overall_psychopathology_4factorv2, list(i = as.name(x))), method="REML", data = data.JLF)
})
#Look at model summaries
models <- lapply(JlfModels, summary)
######################
#### MOOD RESULTS ####
######################
#Pull p-values
p_mood <- sapply(JlfModels, function(v) summary(v)$p.table[4,4])
#Convert to data frame
p_mood <- as.data.frame(p_mood)
#Print original p-values to three decimal places
p_mood_round <- round(p_mood,3)
#Add row names
rownames(p_mood) <- jlfComponents
#FDR correct p-values
p_mood_fdr <- p.adjust(p_mood[,1],method="fdr")
#Convert to data frame
p_mood_fdr <- as.data.frame(p_mood_fdr)
#To print fdr-corrected p-values to three decimal places
p_mood_fdr_round <- round(p_mood_fdr,3)
#Keep only the p-values that survive FDR correction
p_mood_fdr_round_signif <- p_mood_fdr_round[p_mood_fdr<0.05]
#Convert to data frame
p_mood_sig <- as.data.frame(p_mood_fdr_round_signif)
#Add row names
rownames(p_mood_fdr) <- jlfComponents
#List the JLF components that survive FDR correction
Jlf_mood_fdr <- row.names(p_mood_fdr)[p_mood_fdr<0.05]
#Convert to data frame
ROI_mood <- as.data.frame(Jlf_mood_fdr)
#Name of the JLF components that survive FDR correction
Jlf_mood_fdr_names <- jlfComponents[as.numeric(Jlf_mood_fdr)]
#To check direction of coefficient estimates
mood_coeff <- models[as.numeric(Jlf_mood_fdr)]
###########################
#### PSYCHOSIS RESULTS ####
###########################
#Pull p-values
p_psy <- sapply(JlfModels, function(v) summary(v)$p.table[5,4])
#Convert to data frame
p_psy <- as.data.frame(p_psy)
#Print original p-values to three decimal places
p_psy_round <- round(p_psy,3)
#FDR correct p-values
p_psy_fdr <- p.adjust(p_psy[,1],method="fdr")
#Convert to data frame
p_psy_fdr <- as.data.frame(p_psy_fdr)
#To print fdr-corrected p-values to three decimal places
p_psy_fdr_round <- round(p_psy_fdr,3)
#List the JLF components that survive FDR correction
Jlf_psy_fdr <- row.names(p_psy_fdr)[p_psy_fdr<0.05]
#Name of the JLF components that survive FDR correction
Jlf_psy_fdr_names <- jlfComponents[as.numeric(Jlf_psy_fdr)]
#To check direction of coefficient estimates
psy_coeff <- models[as.numeric(Jlf_psy_fdr)]
########################################
#### EXTERNALIZING BEHAVIOR RESULTS ####
########################################
#Pull p-values
p_ext <- sapply(JlfModels, function(v) summary(v)$p.table[6,4])
#Convert to data frame
p_ext <- as.data.frame(p_ext)
#Print original p-values to three decimal places
p_ext_round <- round(p_ext,3)
#FDR correct p-values
p_ext_fdr <- p.adjust(p_ext[,1],method="fdr")
#Convert to data frame
p_ext_fdr <- as.data.frame(p_ext_fdr)
#To print fdr-corrected p-values to three decimal places
p_ext_fdr_round <- round(p_ext_fdr,3)
#List the JLF components that survive FDR correction
Jlf_ext_fdr <- row.names(p_ext_fdr)[p_ext_fdr<0.05]
#Name of the JLF components that survive FDR correction
Jlf_ext_fdr_names <- jlfComponents[as.numeric(Jlf_ext_fdr)]
#To check direction of coefficient estimates
ext_coeff <- models[as.numeric(Jlf_ext_fdr)]
##############################
#### PHOBIA(FEAR) RESULTS ####
##############################
#Pull p-values
p_fear <- sapply(JlfModels, function(v) summary(v)$p.table[7,4])
#Convert to data frame
p_fear <- as.data.frame(p_fear)
#Print original p-values to three decimal places
p_fear_round <- round(p_fear,3)
#FDR correct p-values
p_fear_fdr <- p.adjust(p_fear[,1],method="fdr")
#Convert to data frame
p_fear_fdr <- as.data.frame(p_fear_fdr)
#To print fdr-corrected p-values to three decimal places
p_fear_fdr_round <- round(p_fear_fdr,3)
#List the JLF components that survive FDR correction
Jlf_fear_fdr <- row.names(p_fear_fdr)[p_fear_fdr<0.05]
#Name of the JLF components that survive FDR correction
Jlf_fear_fdr_names <- jlfComponents[as.numeric(Jlf_fear_fdr)]
#To check direction of coefficient estimates
fear_coeff <- models[as.numeric(Jlf_fear_fdr)]
#########################################
#### OVERALL PSYCHOPATHOLOGY RESULTS ####
#########################################
#Pull p-values
p_overall <- sapply(JlfModels, function(v) summary(v)$p.table[8,4])
#Convert to data frame
p_overall <- as.data.frame(p_overall)
#Print original p-values to three decimal places
p_overall_round <- round(p_overall,3)
#Add row names
rownames(p_overall) <- jlfComponents
#FDR correct p-values
p_overall_fdr <- p.adjust(p_overall[,1],method="fdr")
#Convert to data frame
p_overall_fdr <- as.data.frame(p_overall_fdr)
#To print fdr-corrected p-values to three decimal places
p_overall_fdr_round <- round(p_overall_fdr,3)
#Keep only the p-values that survive FDR correction
p_overall_fdr_round_signif <- p_overall_fdr_round[p_overall_fdr<0.05]
#Convert to data frame
p_overall_sig <- as.data.frame(p_overall_fdr_round_signif)
#Add row names
rownames(p_overall_fdr) <- jlfComponents
#List the JLF components that survive FDR correction
Jlf_overall_fdr <- row.names(p_overall_fdr)[p_overall_fdr<0.05]
#Convert to data frame
ROI_overall <- as.data.frame(Jlf_overall_fdr)
#Name of the JLF components that survive FDR correction
Jlf_overall_fdr_names <- jlfComponents[as.numeric(Jlf_overall_fdr)]
#To check direction of coefficient estimates
overall_coeff <- models[as.numeric(Jlf_overall_fdr)]
#######################
#### PULL T VALUES ####
#######################
##Mood
#Pull t-values for mood
tJLF_mood <- sapply(JlfModels, function(x) summary(x)$p.table[4,3])
#Print to two decimal places (only significant components)
tJLF_mood_round <- round(tJLF_mood,2)[p_mood_fdr<0.05]
#Convert to data frame
t_mood <- as.data.frame(tJLF_mood_round)
##Overall
#Pull t-values for overall
tJLF_overall <- sapply(JlfModels, function(x) summary(x)$p.table[8,3])
#Print to two decimal places (only significant components)
tJLF_overall_round <- round(tJLF_overall,2)[p_overall_fdr<0.05]
#Convert to data frame
t_overall <- as.data.frame(tJLF_overall_round)
#######################
#### ROI, P, AND T ####
#######################
##Mood
#Combine ROI names, p values, and t values into one dataframe
combined_mood <- cbind(ROI_mood,p_mood_sig)
combined_mood2 <- cbind(combined_mood,t_mood)
#Rename variables
data.mood <- rename(combined_mood2, p = p_mood_fdr_round_signif, t = tJLF_mood_round)
#Save as a .csv
write.csv(data.mood, file="/data/jux/BBL/projects/pncT1AcrossDisorder/subjectData/n1394_JLFvol_mood.csv", row.names=F, quote=F)
##Overall
#Combine ROI names, p values, and t values into one dataframe
combined_overall <- cbind(ROI_overall,p_overall_sig)
combined_overall2 <- cbind(combined_overall,t_overall)
#Rename variables
data.overall <- rename(combined_overall2, p = p_overall_fdr_round_signif, t = tJLF_overall_round)
#Save as a .csv
write.csv(data.overall, file="/data/jux/BBL/projects/pncT1AcrossDisorder/subjectData/n1394_JLFvol_overall.csv", row.names=F, quote=F)
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{arguments.merge}
\alias{arguments.merge}
\title{Merge two lists of function arguments}
\usage{
arguments.merge(x, y, FUN = NULL, append = ifelse(is.null(FUN), TRUE, "..."
\%in\% names(formals(FUN))))
}
\arguments{
\item{x}{base list}
\item{y}{list to be merged with \code{x}}
\item{FUN}{function that should take the arguments. If the \code{\link{formals}} of \code{FUN} does not contain \code{...} the default of append is changed.}
\item{append}{should elements of \code{y} not contained in \code{x} be appended?}
}
\description{
Merge two lists of function arguments
}
|
/man/arguments.merge.Rd
|
no_license
|
SwedishPensionsAgency/format.tables
|
R
| false
| false
| 637
|
rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{arguments.merge}
\alias{arguments.merge}
\title{Merge two lists of function arguments}
\usage{
arguments.merge(x, y, FUN = NULL, append = ifelse(is.null(FUN), TRUE, "..."
\%in\% names(formals(FUN))))
}
\arguments{
\item{x}{base list}
\item{y}{list to be merged with \code{x}}
\item{FUN}{function that should take the arguments. If the \code{\link{formals}} of \code{FUN} does not contain \code{...} the default of append is changed.}
\item{append}{should elements of \code{y} not contained in \code{x} be appended?}
}
\description{
Merge two lists of function arguments
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 11557
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 11152
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 11152
c
c Input Parameter (command line, file):
c input filename QBFLIB/Seidl/ASP_Program_Inclusion/T-adeu-10.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 4790
c no.of clauses 11557
c no.of taut cls 181
c
c Output Parameters:
c remaining no.of clauses 11152
c
c QBFLIB/Seidl/ASP_Program_Inclusion/T-adeu-10.qdimacs 4790 11557 E1 [81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 121 122 123 124 125 126 127 128 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 297 298 299 300 301 302 303 304 448 449 466 467 484 485 502 503 520 521 538 539 556 557 574 575 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 1238 1239 1240 1241 1242 1243 1244 1245 1329 1330 1350 1373 1379 1389 1396 1404 1407 1413 1419 1420 1423 1424 1438 1458 1464 1466 1477 1493 1497 1498 1499 1506 1514 1516 1517 1535 1572 1574 1591 1656 1657 1682 1683 1708 1709 1734 1735 1923 1924 1941 1942 1959 1960 1977 1978 1995 1996 2013 2014 2031 2032 2049 2050 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2402 2403 2423 2446 2452 2462 2469 2477 2480 2486 2492 2493 2496 2497 2511 2531 2537 2539 2550 2566 2570 2571 2572 2579 2587 2589 2590 2608 2645 2647 2664 2679 2680 2705 2706 2731 2732 2757 2758 2783 2784 2809 2810 2835 2836 2861 2862 2887 2888 2913 2914 2939 2940 2965 2966 2987 2988 2989 2990 2991 2992 2993 2994 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3561 3562 3582 3605 3611 3621 3628 3636 3639 3645 3651 3652 3655 3656 3670 3690 3696 3698 3709 3725 3729 3730 3731 3738 3746 3748 3749 3767 3804 3806 3823 3838 3839 3864 3865 3890 3891 3916 3917 3942 3943 3968 3969 3994 3995 4020 4021 4046 4047 4072 4073 4098 4099 4124 4125 4171 4172 4173 4174 4175 4176 4177 4178] 181 160 4305 11152 RED
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Seidl/ASP_Program_Inclusion/T-adeu-10/T-adeu-10.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 2,237
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 11557
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 11152
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 11152
c
c Input Parameter (command line, file):
c input filename QBFLIB/Seidl/ASP_Program_Inclusion/T-adeu-10.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 4790
c no.of clauses 11557
c no.of taut cls 181
c
c Output Parameters:
c remaining no.of clauses 11152
c
c QBFLIB/Seidl/ASP_Program_Inclusion/T-adeu-10.qdimacs 4790 11557 E1 [81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 121 122 123 124 125 126 127 128 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 297 298 299 300 301 302 303 304 448 449 466 467 484 485 502 503 520 521 538 539 556 557 574 575 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 1238 1239 1240 1241 1242 1243 1244 1245 1329 1330 1350 1373 1379 1389 1396 1404 1407 1413 1419 1420 1423 1424 1438 1458 1464 1466 1477 1493 1497 1498 1499 1506 1514 1516 1517 1535 1572 1574 1591 1656 1657 1682 1683 1708 1709 1734 1735 1923 1924 1941 1942 1959 1960 1977 1978 1995 1996 2013 2014 2031 2032 2049 2050 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2402 2403 2423 2446 2452 2462 2469 2477 2480 2486 2492 2493 2496 2497 2511 2531 2537 2539 2550 2566 2570 2571 2572 2579 2587 2589 2590 2608 2645 2647 2664 2679 2680 2705 2706 2731 2732 2757 2758 2783 2784 2809 2810 2835 2836 2861 2862 2887 2888 2913 2914 2939 2940 2965 2966 2987 2988 2989 2990 2991 2992 2993 2994 3333 3334 3335 3336 3337 3338 3339 3340 3341 3342 3343 3344 3345 3346 3347 3348 3561 3562 3582 3605 3611 3621 3628 3636 3639 3645 3651 3652 3655 3656 3670 3690 3696 3698 3709 3725 3729 3730 3731 3738 3746 3748 3749 3767 3804 3806 3823 3838 3839 3864 3865 3890 3891 3916 3917 3942 3943 3968 3969 3994 3995 4020 4021 4046 4047 4072 4073 4098 4099 4124 4125 4171 4172 4173 4174 4175 4176 4177 4178] 181 160 4305 11152 RED
|
# SGD types
# "ARS" "ARS consensus sequence"
# "CDEI" "CDEII"
# "CDEIII" "CDS"
# "ORF" "W_region"
# "X_element_combinatorial_repeats" "X_element_core_sequence"
# "X_region" "Y'_element"
# "Y_region" "Z1_region"
# "Z2_region" "binding_site"
# "centromere" "external_transcribed_spacer_region"
# "five_prime_UTR_intron" "gene_cassette"
# "insertion" "internal_transcribed_spacer_region"
# "intron" "long_terminal_repeat"
# "mating_locus" "multigene locus"
# "ncRNA" "non_transcribed_region"
# "noncoding_exon" "not in systematic sequence of S288C"
# "not physically mapped" "plus_1_translational_frameshift"
# "pseudogene" "rRNA"
# "repeat_region" "retrotransposon"
# "snRNA" "snoRNA"
# "tRNA" "telomere"
# "telomeric_repeat" "transposable_element_gene"
library(plyr)
PROMOTER_LENGTH <- 750
TERMINATOR_LENGTH <- 250
convert_to_promoter <- function(input_df) {
start <- input_df$start_position
if (input_df$strand[1] == 'W') {
input_df$start_position <- pmax(1, start - PROMOTER_LENGTH)
input_df$stop_position <- pmax(1, start - 1)
}
if (input_df$strand[1] == 'C') {
input_df$start_position <- start + PROMOTER_LENGTH
input_df$stop_position <- start + 1
}
input_df$type <- "promoter"
input_df$SGDID_pt <- paste(input_df$SGDID, 'p', sep='_')
return(input_df)
}
convert_to_terminator <- function(input_df) {
stop <- input_df$stop_position
if (input_df$strand[1] == 'W') {
input_df$start_position <- stop + 1
input_df$stop_position <- stop + TERMINATOR_LENGTH
}
if (input_df$strand[1] == 'C') {
input_df$start_position <- stop - 1
input_df$stop_position <- stop - TERMINATOR_LENGTH
}
input_df$type <- "terminator"
input_df$SGDID_pt <- paste(input_df$SGDID, 't', sep='_')
return(input_df)
}
sgd_df <- read.delim('/Users/johnkoschwanez/sequencing/S288C_reference_genome_r64/SGD_features.tab',
col.names=c('SGDID', 'type', 'qualifier', 'feature_name',
'gene_name', 'alias', 'parent_feature_name',
'secondary_SGDID', 'chromosome', 'start_position',
'stop_position', 'strand', 'genetic_position',
'coordinate_version', 'sequence_version',
'description'))
sgd_df$SGDID_pt <- sgd_df$SGDID
orf_df <- subset(sgd_df, (type == "ORF"))
promoter_df <- ddply(orf_df,
.(strand),
convert_to_promoter)
terminator_df <- ddply(orf_df,
.(strand),
convert_to_terminator)
sgd_df <- rbind(sgd_df, promoter_df, terminator_df)
sgd_df$min_position <- pmin(sgd_df$start_position, sgd_df$stop_position)
sgd_df$max_position <- pmax(sgd_df$start_position, sgd_df$stop_position)
sgd_df$chromosome <- factor(sgd_df$chromosome)
min_df <- subset(sgd_df, (type == "ORF" |
type == "promoter" |
type == "terminator" |
type == "ARS" |
type == "centromere" |
type == "mating_locus" |
type == "ncRNA" |
type == "snRNA" |
type == "tRNA" |
type == "binding site" |
type == "rRNA" |
type == "snoRNA" |
type == "CDEI" |
type == "CDEII" |
type == "CDEIII" |
type == "intron"
),
select=c('chromosome', 'min_position', 'max_position',
'type', 'gene_name', 'feature_name',
'strand', 'SGDID', 'SGDID_pt', 'parent_feature_name'))
write.table(min_df, 'SGD_features_min.txt', sep='\t')
write.table(min_df, 'SGD_features_python.txt', quote=F, col.names=F,
row.names=F, sep='\t')
|
/ref_seq/SGD_features_parse.r
|
permissive
|
koschwanez/mutantanalysis
|
R
| false
| false
| 4,201
|
r
|
# SGD types
# "ARS" "ARS consensus sequence"
# "CDEI" "CDEII"
# "CDEIII" "CDS"
# "ORF" "W_region"
# "X_element_combinatorial_repeats" "X_element_core_sequence"
# "X_region" "Y'_element"
# "Y_region" "Z1_region"
# "Z2_region" "binding_site"
# "centromere" "external_transcribed_spacer_region"
# "five_prime_UTR_intron" "gene_cassette"
# "insertion" "internal_transcribed_spacer_region"
# "intron" "long_terminal_repeat"
# "mating_locus" "multigene locus"
# "ncRNA" "non_transcribed_region"
# "noncoding_exon" "not in systematic sequence of S288C"
# "not physically mapped" "plus_1_translational_frameshift"
# "pseudogene" "rRNA"
# "repeat_region" "retrotransposon"
# "snRNA" "snoRNA"
# "tRNA" "telomere"
# "telomeric_repeat" "transposable_element_gene"
library(plyr)
PROMOTER_LENGTH <- 750
TERMINATOR_LENGTH <- 250
convert_to_promoter <- function(input_df) {
start <- input_df$start_position
if (input_df$strand[1] == 'W') {
input_df$start_position <- pmax(1, start - PROMOTER_LENGTH)
input_df$stop_position <- pmax(1, start - 1)
}
if (input_df$strand[1] == 'C') {
input_df$start_position <- start + PROMOTER_LENGTH
input_df$stop_position <- start + 1
}
input_df$type <- "promoter"
input_df$SGDID_pt <- paste(input_df$SGDID, 'p', sep='_')
return(input_df)
}
convert_to_terminator <- function(input_df) {
stop <- input_df$stop_position
if (input_df$strand[1] == 'W') {
input_df$start_position <- stop + 1
input_df$stop_position <- stop + TERMINATOR_LENGTH
}
if (input_df$strand[1] == 'C') {
input_df$start_position <- stop - 1
input_df$stop_position <- stop - TERMINATOR_LENGTH
}
input_df$type <- "terminator"
input_df$SGDID_pt <- paste(input_df$SGDID, 't', sep='_')
return(input_df)
}
sgd_df <- read.delim('/Users/johnkoschwanez/sequencing/S288C_reference_genome_r64/SGD_features.tab',
col.names=c('SGDID', 'type', 'qualifier', 'feature_name',
'gene_name', 'alias', 'parent_feature_name',
'secondary_SGDID', 'chromosome', 'start_position',
'stop_position', 'strand', 'genetic_position',
'coordinate_version', 'sequence_version',
'description'))
sgd_df$SGDID_pt <- sgd_df$SGDID
orf_df <- subset(sgd_df, (type == "ORF"))
promoter_df <- ddply(orf_df,
.(strand),
convert_to_promoter)
terminator_df <- ddply(orf_df,
.(strand),
convert_to_terminator)
sgd_df <- rbind(sgd_df, promoter_df, terminator_df)
sgd_df$min_position <- pmin(sgd_df$start_position, sgd_df$stop_position)
sgd_df$max_position <- pmax(sgd_df$start_position, sgd_df$stop_position)
sgd_df$chromosome <- factor(sgd_df$chromosome)
min_df <- subset(sgd_df, (type == "ORF" |
type == "promoter" |
type == "terminator" |
type == "ARS" |
type == "centromere" |
type == "mating_locus" |
type == "ncRNA" |
type == "snRNA" |
type == "tRNA" |
type == "binding site" |
type == "rRNA" |
type == "snoRNA" |
type == "CDEI" |
type == "CDEII" |
type == "CDEIII" |
type == "intron"
),
select=c('chromosome', 'min_position', 'max_position',
'type', 'gene_name', 'feature_name',
'strand', 'SGDID', 'SGDID_pt', 'parent_feature_name'))
write.table(min_df, 'SGD_features_min.txt', sep='\t')
write.table(min_df, 'SGD_features_python.txt', quote=F, col.names=F,
row.names=F, sep='\t')
|
## delete all objects; clear plots
rm(list=ls()); dev.off()
## Read data for 1/2/2007 and 2/2/2007 only;
## create new DateTime variable with class "Date"
header <- read.table("data/household_power_consumption.txt",
sep=";", nrows=1, stringsAsFactors=F)
PowerConsum <- read.table(pipe('grep \'^[12]/2/2007\' data/*'),
sep=";", na.strings="?", col.names=header)
## Histogram "Global Active Power"
par(cex = 0.9)
hist(PowerConsum$Global_active_power, col = "red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)")
## Write png file 480 x 480 pixels
dev.copy(png, filename="plot1.png", width = 480, height = 480, units = "px")
dev.off()
|
/plot1.R
|
no_license
|
pputz/ExData_Plotting1
|
R
| false
| false
| 726
|
r
|
## delete all objects; clear plots
rm(list=ls()); dev.off()
## Read data for 1/2/2007 and 2/2/2007 only;
## create new DateTime variable with class "Date"
header <- read.table("data/household_power_consumption.txt",
sep=";", nrows=1, stringsAsFactors=F)
PowerConsum <- read.table(pipe('grep \'^[12]/2/2007\' data/*'),
sep=";", na.strings="?", col.names=header)
## Histogram "Global Active Power"
par(cex = 0.9)
hist(PowerConsum$Global_active_power, col = "red",
main = "Global Active Power",
xlab = "Global Active Power (kilowatts)")
## Write png file 480 x 480 pixels
dev.copy(png, filename="plot1.png", width = 480, height = 480, units = "px")
dev.off()
|
library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/breast.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.7,family="gaussian",standardize=FALSE)
sink('./Model/EN/Lasso/breast/breast_076.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Lasso/breast/breast_076.R
|
no_license
|
leon1003/QSMART
|
R
| false
| false
| 351
|
r
|
library(glmnet)
mydata = read.table("./TrainingSet/LassoBIC/breast.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mae",alpha=0.7,family="gaussian",standardize=FALSE)
sink('./Model/EN/Lasso/breast/breast_076.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
fjComm::clear_()
# pallete=RColorBrewer::brewer.pal(n = 11, name = "Spectral") %>% rev()
df=read_tsv("K562_rep1_20.6U_79.2U_304U_MNase.fragment_length.closest_dist_to_center_within_500.YBX1_Control_TTGCTC40NGAA_KR_YGTWCCAYC_m1_c4_short_overlap_K562_ChIP.no_blacklist.txt",col_names = F)
df %<>% mutate(left_edge=X11-X4/2, right_edge=X11+X4/2) %>% dplyr::filter(left_edge >-1000 & right_edge<1000) %>% dplyr::filter(X4>140)
p=ggplot(df)+stat_bin_2d(aes(X11,X4),binwidth = 2)+scale_fill_gradientn(colours =c("white","#ead100","red"),limits=c(0,200),oob=scales::squish, name="Count",breaks=c(0,25))+scale_y_continuous(limits = c(0,200),expand = c(0,0),breaks = c(0,100,200))+scale_x_continuous(limits = c(-600,600),expand = c(0,0),breaks = c(-200,0,200))+
xlab("Distance from motif (bp)")+ ylab("Fragment length (bp)")+gg_theme_Publication()+theme(legend.direction = "horizontal",legend.position = c(0.8,0.2))
print(p)
gg_save_pdf(p,6,5.5,filename = "YBX1_Vplot")
data=rbind( tibble(side="5'",edge=df$left_edge),tibble(side="3'",edge=df$right_edge) )
p=ggplot(data)+geom_density(aes(edge,color=side),bw=5)+geom_vline(xintercept = 0)+scale_x_continuous(limits = c(-350,350))
gg_save_pdf(p,12,5.5,filename = "YBX1_Vplot_cutpos")
rngs=IRanges::IRanges(start = df$left_edge,end = df$right_edge)
cvgs=IRanges::coverage(rngs,shift = 1000); cvgLen=cvgs %>% length()
cvgdf=tibble(pos=1:cvgLen-1000,cvg= cvgs %>% as.integer())
p=ggplot(cvgdf)+geom_line(aes(pos,cvg))+scale_x_continuous(limits = c(-350,350))+scale_y_continuous(limits = range(cvgdf %>% dplyr::filter(pos>-300&pos<300) %>% .$cvg ))+geom_vline(xintercept = 0)
gg_save_pdf(p,12,5.5,filename = "YBX1_Vplot_cvg")
p=ggplot(cvgdf)+geom_line(aes(pos,cvg))+scale_x_continuous(limits = c(-350,350))+scale_y_continuous(limits = range(cvgdf %>% dplyr::filter(pos>-300&pos<300) %>% .$cvg ))+xlab("(bp)")+theme(axis.line.y = element_blank(),axis.text.y = element_blank(),axis.title.y = element_blank(),axis.ticks.y = element_blank())
gg_save_pdf(p,3,2,filename = "YBX1_Vplot_cvg")
|
/!ADD_Nat_revise1/6_ENCODE_corr/MNase_cvg/YBX1.R
|
no_license
|
aquaflakes/individual_analyses
|
R
| false
| false
| 2,029
|
r
|
fjComm::clear_()
# pallete=RColorBrewer::brewer.pal(n = 11, name = "Spectral") %>% rev()
df=read_tsv("K562_rep1_20.6U_79.2U_304U_MNase.fragment_length.closest_dist_to_center_within_500.YBX1_Control_TTGCTC40NGAA_KR_YGTWCCAYC_m1_c4_short_overlap_K562_ChIP.no_blacklist.txt",col_names = F)
df %<>% mutate(left_edge=X11-X4/2, right_edge=X11+X4/2) %>% dplyr::filter(left_edge >-1000 & right_edge<1000) %>% dplyr::filter(X4>140)
p=ggplot(df)+stat_bin_2d(aes(X11,X4),binwidth = 2)+scale_fill_gradientn(colours =c("white","#ead100","red"),limits=c(0,200),oob=scales::squish, name="Count",breaks=c(0,25))+scale_y_continuous(limits = c(0,200),expand = c(0,0),breaks = c(0,100,200))+scale_x_continuous(limits = c(-600,600),expand = c(0,0),breaks = c(-200,0,200))+
xlab("Distance from motif (bp)")+ ylab("Fragment length (bp)")+gg_theme_Publication()+theme(legend.direction = "horizontal",legend.position = c(0.8,0.2))
print(p)
gg_save_pdf(p,6,5.5,filename = "YBX1_Vplot")
data=rbind( tibble(side="5'",edge=df$left_edge),tibble(side="3'",edge=df$right_edge) )
p=ggplot(data)+geom_density(aes(edge,color=side),bw=5)+geom_vline(xintercept = 0)+scale_x_continuous(limits = c(-350,350))
gg_save_pdf(p,12,5.5,filename = "YBX1_Vplot_cutpos")
rngs=IRanges::IRanges(start = df$left_edge,end = df$right_edge)
cvgs=IRanges::coverage(rngs,shift = 1000); cvgLen=cvgs %>% length()
cvgdf=tibble(pos=1:cvgLen-1000,cvg= cvgs %>% as.integer())
p=ggplot(cvgdf)+geom_line(aes(pos,cvg))+scale_x_continuous(limits = c(-350,350))+scale_y_continuous(limits = range(cvgdf %>% dplyr::filter(pos>-300&pos<300) %>% .$cvg ))+geom_vline(xintercept = 0)
gg_save_pdf(p,12,5.5,filename = "YBX1_Vplot_cvg")
p=ggplot(cvgdf)+geom_line(aes(pos,cvg))+scale_x_continuous(limits = c(-350,350))+scale_y_continuous(limits = range(cvgdf %>% dplyr::filter(pos>-300&pos<300) %>% .$cvg ))+xlab("(bp)")+theme(axis.line.y = element_blank(),axis.text.y = element_blank(),axis.title.y = element_blank(),axis.ticks.y = element_blank())
gg_save_pdf(p,3,2,filename = "YBX1_Vplot_cvg")
|
library(foreach)
library(splines)
library(gam)
require(ggplot2)
SA<-read.table('SA.txt', sep=",", header=T, row.names=1)
#1
SAGam <- gam(chd ~ s(sbp,4) + s(tobacco,4) + s(ldl,4) + s(adiposity, 4) + s(typea, 4) +
s(obesity, 4) + s(alcohol, 4) + s(age, 4) + famhist,data=SA,family=binomial)
par(mfrow=c(3,3))
plot(SAGam)
###2
###Q6.2
###For fast programming we rely on the cv.glm function in the
###boot library.
library(boot)
df <- seq(1,3,by=0.1)
#Using cross-validation to select the df-parameter. The conclusion is
#dependent upon the run (and random devisions of the dataset), but in a couple of runs
#it seems to be df in the range 3-5
SAGamCv <- numeric(length(df))
for(i in seq(along=df))
{
formGam <- as.formula(paste("chd~famhist+", paste("s(",names(SA[1,1:9])[-5], ",df=", df[i], ")",sep="",collapse="+")))
SAGam <- gam(formGam,family=binomial,data=SA)
tmp <- cv.glm(SA,SAGam, zeroOneCost,7)
set.seed(tmp$seed)
SAGamCv[i] <- tmp$delta[1]
}
qplot(df,SAGamCv,geom="line")
###For a comparision, we can also use a (pseudo) AIC criteria. It is pseudo because
###there is a smoother involved and we use the effective degrees of freedom
SAGamAic <- numeric(length(df))
for(i in seq(along=df)){
formGam <- as.formula(paste("chd~famhist+",paste("s(",names(SA[1,1:9])[-5], ",df=", df[i], ")",sep="",collapse="+")))
SAGam <- gam(formGam,family=binomial,data=SA)
SAGamAic[i] <- SAGam$aic
}
qplot(df,SAGamAic,geom="line")
###Q6.3
for(i in seq(along=df)){
formGam <- as.formula(paste("chd~famhist+",paste("s(",names(SA[1,1:9])[-5], ",df=", df[i], ")",sep="",collapse="+")))
SAGam <- gam(formGam,family=binomial,data=SA)
tmp <- cv.glm(SA,SAGam,likelihoodCost,7)
set.seed(tmp$seed)
SAGamCv[i] <- tmp$delta[1]
}
qplot(df,SAGamCv,geom="line")
|
/Statistical modelling/Generalized additive model.R
|
no_license
|
while777/while777
|
R
| false
| false
| 1,872
|
r
|
library(foreach)
library(splines)
library(gam)
require(ggplot2)
SA<-read.table('SA.txt', sep=",", header=T, row.names=1)
#1
SAGam <- gam(chd ~ s(sbp,4) + s(tobacco,4) + s(ldl,4) + s(adiposity, 4) + s(typea, 4) +
s(obesity, 4) + s(alcohol, 4) + s(age, 4) + famhist,data=SA,family=binomial)
par(mfrow=c(3,3))
plot(SAGam)
###2
###Q6.2
###For fast programming we rely on the cv.glm function in the
###boot library.
library(boot)
df <- seq(1,3,by=0.1)
#Using cross-validation to select the df-parameter. The conclusion is
#dependent upon the run (and random devisions of the dataset), but in a couple of runs
#it seems to be df in the range 3-5
SAGamCv <- numeric(length(df))
for(i in seq(along=df))
{
formGam <- as.formula(paste("chd~famhist+", paste("s(",names(SA[1,1:9])[-5], ",df=", df[i], ")",sep="",collapse="+")))
SAGam <- gam(formGam,family=binomial,data=SA)
tmp <- cv.glm(SA,SAGam, zeroOneCost,7)
set.seed(tmp$seed)
SAGamCv[i] <- tmp$delta[1]
}
qplot(df,SAGamCv,geom="line")
###For a comparision, we can also use a (pseudo) AIC criteria. It is pseudo because
###there is a smoother involved and we use the effective degrees of freedom
SAGamAic <- numeric(length(df))
for(i in seq(along=df)){
formGam <- as.formula(paste("chd~famhist+",paste("s(",names(SA[1,1:9])[-5], ",df=", df[i], ")",sep="",collapse="+")))
SAGam <- gam(formGam,family=binomial,data=SA)
SAGamAic[i] <- SAGam$aic
}
qplot(df,SAGamAic,geom="line")
###Q6.3
for(i in seq(along=df)){
formGam <- as.formula(paste("chd~famhist+",paste("s(",names(SA[1,1:9])[-5], ",df=", df[i], ")",sep="",collapse="+")))
SAGam <- gam(formGam,family=binomial,data=SA)
tmp <- cv.glm(SA,SAGam,likelihoodCost,7)
set.seed(tmp$seed)
SAGamCv[i] <- tmp$delta[1]
}
qplot(df,SAGamCv,geom="line")
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'pollutant' is a character vector of length 1 indicating
## the name of the pollutant for which we will calculate the
## mean; either "sulfate" or "nitrate".
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return the mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
total <- 0
count <- 0
for (i in id) {
monitor = sprintf("%03i", i)
path <- paste(directory, "/", monitor, ".csv", sep="")
data <- read.csv(path)
data_p <- data[[pollutant]]
data_p <- data_p[complete.cases(data_p)]
total <- total + sum(data_p)
count <- count + length(data_p)
}
return (total / count)
}
|
/pollutantmean.R
|
no_license
|
Simran-B/ProgrammingAssignment1
|
R
| false
| false
| 872
|
r
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'pollutant' is a character vector of length 1 indicating
## the name of the pollutant for which we will calculate the
## mean; either "sulfate" or "nitrate".
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return the mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
total <- 0
count <- 0
for (i in id) {
monitor = sprintf("%03i", i)
path <- paste(directory, "/", monitor, ".csv", sep="")
data <- read.csv(path)
data_p <- data[[pollutant]]
data_p <- data_p[complete.cases(data_p)]
total <- total + sum(data_p)
count <- count + length(data_p)
}
return (total / count)
}
|
CalcRange <- function(x, method = "pseudospherical", terrestrial = F,
rare = "buffer", buffer.width = 10000) {
# x = object of class data.frame, spgeOUT, SpatialPOints, method = c('euclidean', 'pseudospherical'), terrestrial = logical,
base::match.arg(arg = method,
choices = c("euclidean", "pseudospherical"))
base::match.arg(arg = rare,
choices = c("buffer", "drop"))
#projection
wgs84 <- sp::CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
warning("Assuming lat/long wgs84 coordinates")
# check for geosphere package
if (!requireNamespace("geosphere", quietly = TRUE)) {
stop("Package 'geosphere' not found. Please install the package.", call. = FALSE)
}
# fix different input data types
## data.frame
if (is.data.frame(x)) {
names(x) <- tolower(names(x))
dat <- x[, c("species", "decimallongitude", "decimallatitude")]
}
## spgeoOUt
if (is.spgeoOUT(x)) {
dat <- x$samples[, 1:3]
}
# check for species with less than 3 records
filt <- table(dat$species)
sortout <- names(filt[filt <= 2])
filt <- filt[filt > 2]
dat.filt <- droplevels(subset(dat, dat$species %in% as.character(names(filt))))
#check for species where all lat or long ar identical, or almost identical, to prevent line polygons
##longitude
test <- split(dat.filt, f = dat.filt$species)
test2 <- sapply(test, function(k){
length(unique(k$decimallongitude))
})
sortout2 <- names(test2[test2 == 1])
sortout <- c(sortout, sortout2)
dat.filt <- droplevels(subset(dat.filt, !dat.filt$species %in% sortout))
#latitude
test2 <- sapply(test, function(k){
length(unique(k$decimallatitude))
})
sortout2 <- names(test2[test2 == 1])
sortout <- c(sortout, sortout2)
dat.filt <- droplevels(subset(dat.filt, !dat.filt$species %in% sortout))
#test for almost perfect fit
test2 <- sapply(test, function(k){
round(abs(cor(k[, "decimallongitude"], k[, "decimallatitude"])), 6)})
sortout2 <- names(test2[test2 == 1])
sortout <- c(sortout, sortout2)
dat.filt <- droplevels(subset(dat.filt, !dat.filt$species %in% sortout))
sortout <- sortout[!is.na(sortout)]
if (length(sortout) > 0) {
warning("found species with < 3 occurrences:", paste("\n", sortout))
}
if (nrow(dat.filt) > 0) {
#calculate convex hulls
inp <- split(dat.filt, f = dat.filt$species)
#test for occurrences spanning > 180 degrees
test <- lapply(inp, function(k){SpatialPoints(k[,2:3])})
test <- lapply(test, "extent")
test <- lapply(test, function(k){(k@xmax + 180) - (k@xmin +180)})
test <- unlist(lapply(test, function(k){k >= 180}))
if(any(test)){
stop("data includes species spanning >180 degrees.")
}
# calculate ranges based on method euclidean
if (method == "euclidean") {
out <- lapply(inp, function(k) .ConvHull(k, type = "euclidean"))
nam <- names(out)
if(length(out) == 1){
names(out) <- NULL
out <- SpatialPolygonsDataFrame(out[[1]],
data = data.frame(species = nam,
row.names = paste(nam, "_convhull", sep = "")))
suppressWarnings(proj4string(out) <- wgs84)
}
if(length(out > 1)){
names(out) <- NULL
out <- do.call(bind, out)
out <- SpatialPolygonsDataFrame(out, data = data.frame(species = nam))
suppressWarnings(proj4string(out) <- wgs84)
}
}
# pseudospherical
if (method == "pseudospherical") {
out <- lapply(inp, function(k) .ConvHull(k, type = "pseudospherical"))
nam <- names(out)
if(length(out) == 1){
names(out) <- NULL
out <- SpatialPolygonsDataFrame(out[[1]],
data = data.frame(species = nam,
row.names = paste(nam, "_convhull", sep = "")))
suppressWarnings(proj4string(out) <- wgs84)
}else{
names(out) <- NULL
out <- do.call(bind, out)
out <- SpatialPolygonsDataFrame(out, data = data.frame(species = nam))
suppressWarnings(proj4string(out) <- wgs84)
}
}
}else{
warning("no species with more than 2 occurrences found")
out <- "empty"
}
#calculate buffer if rare == buffer
if(rare == "buffer" & length(sortout) > 0){
cea <- sp::CRS("+proj=cea +lon_0=0 +lat_ts=30 +x_0=0 +y_0=0 +a=6371228 +b=6371228 +units=m +no_defs")
#buffer each species' records with buffer width
rar <- sapply(sortout, function(k){
sub <- droplevels(subset(dat, dat$species == k))
rar <- sp::SpatialPointsDataFrame(sub[, c("decimallongitude", "decimallatitude")],
data = sub[,"species", drop = FALSE], proj4string = wgs84)
rar.cea <- sp::spTransform(rar, cea)
rar.cea <- rgeos::gBuffer(rar.cea, width = buffer.width, byid=TRUE)
rar.cea <- rgeos::gUnaryUnion(rar.cea)
rar <- sp::spTransform(rar.cea, wgs84)
})
#combine into one data.frame
rar.out <- Reduce(bind, rar)
rar.out <- SpatialPolygonsDataFrame(rar.out, data = data.frame(species = sortout))
if(!is.character(out)){
out <- rbind(out, rar.out)
}else{
out <- rar.out}
warning(sprintf("using buffer based range for species with <3 records, bufferradius = %s",
buffer.width))
}
if(rare == "drop" & length(sortout) > 0){
warning("species with < 3 records dropped from output")
}
# cut to landmass
if (terrestrial) {
if (!requireNamespace("rgeos", quietly = TRUE)) {
stop("Package 'rgeos' not found. Please install.", call. = FALSE)
}
# create landmass mask
cropper <- raster::extent(sp::SpatialPoints(dat[, 2:3]))
cropper <- cropper + 1
cropper <- raster::crop(speciesgeocodeR::landmass, cropper)
out2 <- rgeos::gIntersection(out, cropper, byid = T)
dat.add <- out@data
rownames(dat.add) <- getSpPPolygonsIDSlots(out2)
out <- SpatialPolygonsDataFrame(out2, data = dat.add)
}
return(out)
}
|
/speciesgeocodeR.Rcheck/00_pkg_src/speciesgeocodeR/R/CalcRange.R
|
no_license
|
wennading/speciesgeocodeR
|
R
| false
| false
| 6,086
|
r
|
CalcRange <- function(x, method = "pseudospherical", terrestrial = F,
rare = "buffer", buffer.width = 10000) {
# x = object of class data.frame, spgeOUT, SpatialPOints, method = c('euclidean', 'pseudospherical'), terrestrial = logical,
base::match.arg(arg = method,
choices = c("euclidean", "pseudospherical"))
base::match.arg(arg = rare,
choices = c("buffer", "drop"))
#projection
wgs84 <- sp::CRS("+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs")
warning("Assuming lat/long wgs84 coordinates")
# check for geosphere package
if (!requireNamespace("geosphere", quietly = TRUE)) {
stop("Package 'geosphere' not found. Please install the package.", call. = FALSE)
}
# fix different input data types
## data.frame
if (is.data.frame(x)) {
names(x) <- tolower(names(x))
dat <- x[, c("species", "decimallongitude", "decimallatitude")]
}
## spgeoOUt
if (is.spgeoOUT(x)) {
dat <- x$samples[, 1:3]
}
# check for species with less than 3 records
filt <- table(dat$species)
sortout <- names(filt[filt <= 2])
filt <- filt[filt > 2]
dat.filt <- droplevels(subset(dat, dat$species %in% as.character(names(filt))))
#check for species where all lat or long ar identical, or almost identical, to prevent line polygons
##longitude
test <- split(dat.filt, f = dat.filt$species)
test2 <- sapply(test, function(k){
length(unique(k$decimallongitude))
})
sortout2 <- names(test2[test2 == 1])
sortout <- c(sortout, sortout2)
dat.filt <- droplevels(subset(dat.filt, !dat.filt$species %in% sortout))
#latitude
test2 <- sapply(test, function(k){
length(unique(k$decimallatitude))
})
sortout2 <- names(test2[test2 == 1])
sortout <- c(sortout, sortout2)
dat.filt <- droplevels(subset(dat.filt, !dat.filt$species %in% sortout))
#test for almost perfect fit
test2 <- sapply(test, function(k){
round(abs(cor(k[, "decimallongitude"], k[, "decimallatitude"])), 6)})
sortout2 <- names(test2[test2 == 1])
sortout <- c(sortout, sortout2)
dat.filt <- droplevels(subset(dat.filt, !dat.filt$species %in% sortout))
sortout <- sortout[!is.na(sortout)]
if (length(sortout) > 0) {
warning("found species with < 3 occurrences:", paste("\n", sortout))
}
if (nrow(dat.filt) > 0) {
#calculate convex hulls
inp <- split(dat.filt, f = dat.filt$species)
#test for occurrences spanning > 180 degrees
test <- lapply(inp, function(k){SpatialPoints(k[,2:3])})
test <- lapply(test, "extent")
test <- lapply(test, function(k){(k@xmax + 180) - (k@xmin +180)})
test <- unlist(lapply(test, function(k){k >= 180}))
if(any(test)){
stop("data includes species spanning >180 degrees.")
}
# calculate ranges based on method euclidean
if (method == "euclidean") {
out <- lapply(inp, function(k) .ConvHull(k, type = "euclidean"))
nam <- names(out)
if(length(out) == 1){
names(out) <- NULL
out <- SpatialPolygonsDataFrame(out[[1]],
data = data.frame(species = nam,
row.names = paste(nam, "_convhull", sep = "")))
suppressWarnings(proj4string(out) <- wgs84)
}
if(length(out > 1)){
names(out) <- NULL
out <- do.call(bind, out)
out <- SpatialPolygonsDataFrame(out, data = data.frame(species = nam))
suppressWarnings(proj4string(out) <- wgs84)
}
}
# pseudospherical
if (method == "pseudospherical") {
out <- lapply(inp, function(k) .ConvHull(k, type = "pseudospherical"))
nam <- names(out)
if(length(out) == 1){
names(out) <- NULL
out <- SpatialPolygonsDataFrame(out[[1]],
data = data.frame(species = nam,
row.names = paste(nam, "_convhull", sep = "")))
suppressWarnings(proj4string(out) <- wgs84)
}else{
names(out) <- NULL
out <- do.call(bind, out)
out <- SpatialPolygonsDataFrame(out, data = data.frame(species = nam))
suppressWarnings(proj4string(out) <- wgs84)
}
}
}else{
warning("no species with more than 2 occurrences found")
out <- "empty"
}
#calculate buffer if rare == buffer
if(rare == "buffer" & length(sortout) > 0){
cea <- sp::CRS("+proj=cea +lon_0=0 +lat_ts=30 +x_0=0 +y_0=0 +a=6371228 +b=6371228 +units=m +no_defs")
#buffer each species' records with buffer width
rar <- sapply(sortout, function(k){
sub <- droplevels(subset(dat, dat$species == k))
rar <- sp::SpatialPointsDataFrame(sub[, c("decimallongitude", "decimallatitude")],
data = sub[,"species", drop = FALSE], proj4string = wgs84)
rar.cea <- sp::spTransform(rar, cea)
rar.cea <- rgeos::gBuffer(rar.cea, width = buffer.width, byid=TRUE)
rar.cea <- rgeos::gUnaryUnion(rar.cea)
rar <- sp::spTransform(rar.cea, wgs84)
})
#combine into one data.frame
rar.out <- Reduce(bind, rar)
rar.out <- SpatialPolygonsDataFrame(rar.out, data = data.frame(species = sortout))
if(!is.character(out)){
out <- rbind(out, rar.out)
}else{
out <- rar.out}
warning(sprintf("using buffer based range for species with <3 records, bufferradius = %s",
buffer.width))
}
if(rare == "drop" & length(sortout) > 0){
warning("species with < 3 records dropped from output")
}
# cut to landmass
if (terrestrial) {
if (!requireNamespace("rgeos", quietly = TRUE)) {
stop("Package 'rgeos' not found. Please install.", call. = FALSE)
}
# create landmass mask
cropper <- raster::extent(sp::SpatialPoints(dat[, 2:3]))
cropper <- cropper + 1
cropper <- raster::crop(speciesgeocodeR::landmass, cropper)
out2 <- rgeos::gIntersection(out, cropper, byid = T)
dat.add <- out@data
rownames(dat.add) <- getSpPPolygonsIDSlots(out2)
out <- SpatialPolygonsDataFrame(out2, data = dat.add)
}
return(out)
}
|
# The Impact of Settlements Network Structure on Population Dynamics
# Part 4. Network topology analysis
# Author: Alexander Sheludkov
# Date: 19 October 2018
library(sp)
library(sf)
library(raster)
library(dplyr)
library(ggplot2)
library(gridExtra)
library(igraph)
library(RColorBrewer)
library(tidyr)
library(viridis)
# load the data
load("data/Part2_output.RData")
# ================
# 1. Preprocessing
# ================
# 1.1. Сохраним данные в новую переменную и очистим от лишних столбцов
df <- settlements_2002@data
df %>%
dplyr::select(-Rosstat1981, -cohort1981, -cohort1990, -cohort2002,
-trend_1981to1990, -trend_1990to2002, -trend_2002to2010,
-rel1981to1990, -rel1990to2002, -rel2002to2010) -> df
# Add coordinates of the settlements as new columns
df %>%
mutate(lon = coordinates(settlements_2002)[,1],
lat = coordinates(settlements_2002)[,2]) %>%
dplyr::select(id, lon, lat, ShortName, MunicipalDistrict,
Rosstat1990, Census2002, Census2010, clust_3, clust_6, clust_18) -> df
# 1.2. Функция для выборки subgraphs из графа, созданного методом shp2graph
# У нас нестандартная структура графа и некоторые функции igraph с ним не работают
# В частности из-за несовпадения числа вершин и числа реальных н.п. induced_subgraph()
# возвращает набор несвязанных точек: все перекрестки, дорожные развязки "опускаются", и
# граф теряет связность.
# Создадим собственную функцию, которая будет принимать на вход граф (@graph),
# вектор индексов вершин-н.п. (@nodes) и возвращать subgraph без потерь
my_subgraph_function <- function(graph, nodes) {
# 1) сохраним в отдельный вектор номера всех вершин, лежащих между н.п.
# shortest_paths() возвращает именованный list длины @to,
# который содержит индексы всех вершин и ребер каждого пути
all_the_verticies <-
shortest_paths(graph = graph, # igraph object
from = nodes, # vertex ids from
to = nodes) %>% # vertex ids to
.$vpath %>% # extract list of returned vertex ids
unlist() # unlist
# 2) выборка из графа
induced_subgraph(graph = graph, # igraph object
vids = all_the_verticies) %>% # vertex ids
simplify() -> # remove loop and multiple edges
sub_graph
return(sub_graph)
}
# 1.3. Functions for creating matrix by repeating rows and columns
# (we will need them for calculating weighted cetnrality measures)
rep.row <-function(x,n){
matrix(rep(x,each=n),nrow=n)
}
rep.col <-function(x,n){
matrix(rep(x,each=n),ncol=n)
}
# 1.4. Define function for normalizing data
normalize <- function(x) {
return ((x - min(x)) / (max(x) - min(x)))
}
# =================================
# 2. Рассчет метрик для 6 кластеров
# =================================
# ======================================
# 2.1. Descriptive metrics on population
df %>%
group_by(clust_6) %>%
mutate(CL6_n = n(), # Number of settlements
CL6_pop2002 = sum(Census2002), # 2002 population
CL6_pop2010 = sum(Census2010), # 2010 population общая численность населения
CL6_pop2010to2002_rel = CL6_pop2010/CL6_pop2002*100, # percentage of 2010-population to 2002-population
CL6_max_pop2002 = max(Census2002), # the largest settlement's size
CL6_mean_pop2002 = mean(Census2002), # mean settlement's size
CL6_median_pop2002 = median(Census2002)) %>% # median settlement's size
# select the columns we need
dplyr::select(clust_6, CL6_n, CL6_pop2002, CL6_pop2010, CL6_pop2010to2002_rel,
CL6_max_pop2002, CL6_mean_pop2002, CL6_median_pop2002) %>%
unique() -> clusters_6_metrics # Save the results into new data.frame
# ==============================================================
# 2.2. Variance in population distribution among the settlements
# Create new columns
clusters_6_metrics$CL6_variance_2002 <- NA_real_
clusters_6_metrics$CL6_variance_2010 <- NA_real_
for (i in 1:nrow(clusters_6_metrics)) {
# Define logical vector to subset observations by the cluster
select_condition <- clust_6_2002 == i
settlements_temp <- df[select_condition,]
# Calculate variance (standard deviation(x)/mean(x))
# 2002
clusters_6_metrics[clusters_6_metrics$clust_6 == i,]$CL6_variance_2002 <-
sd(settlements_temp$Census2002, na.rm = T)/mean(settlements_temp$Census2002, na.rm = T)
# 2010
clusters_6_metrics[clusters_6_metrics$clust_6 == i,]$CL6_variance_2010 <-
sd(settlements_temp$Census2010, na.rm = T)/mean(settlements_temp$Census2010, na.rm = T)
}
# Calculate the difference in variance between 2002 and 2010 (темпы сжатия расселения)
clusters_6_metrics %>%
mutate(CL6_variance_dif = CL6_variance_2010/CL6_variance_2002*100) ->
clusters_6_metrics
# 3.2.1. Quick explorative analysis
# Темпы сжатия расселения vs общая динамика населения
clusters_6_metrics %>%
ggplot(aes(y=CL6_variance_dif, x=CL6_pop2010to2002_rel))+
geom_point(aes(size = CL6_mean_pop2002))+
geom_smooth(method = "glm")+
scale_size_continuous(name = "Ср. размер\nн.п. (чел.)", breaks = c(0, 500, 2000))+
scale_y_continuous(name = "Динамика территориальной\nдифференциации расселения") +
scale_x_continuous(name = "Население в 2010 году к населению в 2002, %")
# =================================
# 2.3. Централизация/связность сети
# Для оценки связности сети в сетевой анализе используется понятие "connectivity". Оно показывает,
# сколько вершин или ребер нужно удалить, чтобы разбить граф на части. Однако при условии, что
# наши кластеры слабо связаны, эта метрика имеет мало смысла - мы получим везде 1.
# "It is also possible to examine the extent to which a whole graph has a centralized structure.
# The concepts of density and centralization refer to differing aspects of the overall 'compactness' of a graph.
# Density describes the general level of cohesion in a graph; centralization describes the extent
# to which this cohesion is organized around particular focal points. Centralization and density,
# therefore, are important complementary measures".
# "The general procedure involved in any measure of graph centralization is
# to look at the differences between the centrality scores of the most
# central point and those of all other points. Centralization, then,
# is the ratio of the actual sum of differences to the maximum possible sum of differences".
# Source: http://www.analytictech.com/mb119/chapter5.htm
# 2.3.1. Density
# The density of a graph is the ratio of the number of edges and the number of possible edges.
# Создадим переменную
clusters_6_metrics$CL6_density <- NA_real_
# Calculate
for (i in 1:nrow(clusters_6_metrics)) {
# Define logical vector to subset settlements by the cluster
select_condition <- clust_6_2002 == i
# Create subgraph
temp_graph <- my_subgraph_function(res_graph_2002, settl_index_2002[select_condition])
# Calculate edge_density
clusters_6_metrics[clusters_6_metrics$clust_6 == i,]$CL6_density <- edge_density(temp_graph)
}
# Quick explorative analysis:
# PopDynamics vs Density
clusters_6_metrics %>%
ggplot(aes(x = CL6_density, y = CL6_pop2010to2002_rel))+
geom_point(aes(size = CL6_mean_pop2002))+
# geom_smooth(col = "grey")+
geom_text(aes(x = CL6_density, y = CL6_pop2010to2002_rel - 1, label = clust_6))
# Var_dif vs Density
clusters_6_metrics %>%
ggplot(aes(x = CL6_density, y = CL6_variance_dif))+
# geom_smooth()+
geom_point(aes(size = CL6_mean_pop2002))+
geom_text(aes(x = CL6_density, y = CL6_variance_dif + 0.5, label = clust_6))
# 2.3.2. Centralization
# Betweenness Centralisation (централизация по посредничеству)
# Create column
clusters_6_metrics$CL6_centr_betw <- NA_real_
# Calculate
for (i in 1:nrow(clusters_6_metrics)) {
# Define logical vector to subset settlements by the cluster
select_condition <- clust_6_2002 == i
# Create subgraph
temp_graph <- my_subgraph_function(res_graph_2002, settl_index_2002[select_condition])
# Calculate edge_density
clusters_6_metrics[clusters_6_metrics$clust_6 == i,]$CL6_centr_betw <- centr_betw(temp_graph, normalized = T)$centralization
}
# Closeness Centralisation (централизация по близости)
# Create column
clusters_6_metrics$CL6_centr_clo<- NA_real_
# Calculate
for (i in 1:nrow(clusters_6_metrics)) {
# Define logical vector to subset settlements by the cluster
select_condition <- clust_6_2002 == i
# Create subgraph
temp_graph <- my_subgraph_function(res_graph_2002, settl_index_2002[select_condition])
# Calculate edge_density
clusters_6_metrics[clusters_6_metrics$clust_6 == i,]$CL6_centr_clo <- centr_clo(temp_graph, normalized = T)$centralization
}
# Quick explorative analysis:
# Var_dif vs centr_betw
clusters_6_metrics %>%
ggplot(aes(x = CL6_centr_betw, y = CL6_variance_dif))+
# geom_smooth()+
geom_point(aes(size = CL6_mean_pop2002))+
geom_text(aes(x = CL6_centr_betw+0.001, y = CL6_variance_dif + 0.5, label = clust_6))
# PopDyn vs centr_betw
clusters_6_metrics %>%
ggplot(aes(x = CL6_centr_betw, y = CL6_pop2010to2002_rel))+
# geom_smooth()+
geom_point(aes(size = CL6_mean_pop2002))+
geom_text(aes(x = CL6_centr_betw+0.01, y = CL6_pop2010to2002_rel - 0.5, label = clust_6))
# Var_dif vs centr_betw
clusters_6_metrics %>%
ggplot(aes(x = CL6_centr_clo, y = CL6_variance_dif))+
# geom_smooth()+
geom_point(aes(size = CL6_mean_pop2002))+
geom_text(aes(x = CL6_centr_clo+0.001, y = CL6_variance_dif + 0.5, label = clust_6))
# ========================================
# 2.4. Удаленность от регионального центра
clusters_6_metrics$CL6_dist2Tyumen <- NA_real_
for (i in 1:nrow(clusters_6_metrics)) {
# Define logical vector to subset observations by the cluster
select_condition <- clust_6_2002 == i
settlements_temp <- df[select_condition,]
# Subset distance matrix: by row - cluster members, by column - Tyumen
distances_to_Tyumen <- dist_matrix_2002[select_condition, df$ShortName == "г. Тюмень"]
# Weight by population proportion
res <- sum(distances_to_Tyumen * settlements_temp$Census2002/sum(settlements_temp$Census2002))
# Save to res cell
clusters_6_metrics[clusters_6_metrics$clust_6 == i,]$CL6_dist2Tyumen <- res
}
# ==================================
# 3. Рассчет метрик для 18 кластеров
# ==================================
# ======================================
# 3.1. Descriptive metrics on population
df %>%
group_by(clust_18) %>%
mutate(CL18_n = n(), # Number of settlements
CL18_pop2002 = sum(Census2002), # 2002 population
CL18_pop2010 = sum(Census2010), # 2010 population общая численность населения
CL18_pop2010to2002_rel = CL18_pop2010/CL18_pop2002*100, # percentage of 2010-population to 2002-population
CL18_max_pop2002 = max(Census2002), # the largest settlement's size
CL18_mean_pop2002 = mean(Census2002), # mean settlement's size
CL18_median_pop2002 = median(Census2002)) %>% # median settlement's size
# select the columns we need
dplyr::select(clust_6, clust_18, CL18_pop2002, CL18_pop2010, CL18_pop2010to2002_rel,
CL18_max_pop2002, CL18_mean_pop2002, CL18_median_pop2002) %>%
unique() -> clusters_18_metrics # Save the results into new data.frame
# ==============================================================
# 3.2. Variance in population distribution among the settlements
# Create new columns
clusters_18_metrics$CL18_variance_2002 <- NA_real_
clusters_18_metrics$CL18_variance_2010 <- NA_real_
for (i in 1:nrow(clusters_18_metrics)) {
# Define logical vector to subset observations by the cluster
select_condition <- clust_18_2002 == i
settlements_temp <- df[select_condition,]
# Calculate variation (standart deviation(x)/mean(x))
# 2002
clusters_18_metrics[clusters_18_metrics$clust_18 == i,]$CL18_variance_2002 <-
sd(settlements_temp$Census2002, na.rm = T)/mean(settlements_temp$Census2002, na.rm = T)
# 2010
clusters_18_metrics[clusters_18_metrics$clust_18 == i,]$CL18_variance_2010 <-
sd(settlements_temp$Census2010, na.rm = T)/mean(settlements_temp$Census2010, na.rm = T)
}
# Calculate the difference in variance between 2002 and 2010 (темпы сжатия расселения)
clusters_18_metrics %>%
mutate(CL18_variance_dif = CL18_variance_2010/CL18_variance_2002*100) ->
clusters_18_metrics
# 3.2.1. Quick explorative analysis
# Темпы сжатия расселения vs общая динамика населения
clusters_18_metrics %>%
ggplot(aes(y=CL18_variance_dif, x=CL18_pop2010to2002_rel))+
geom_point(aes(size = CL18_mean_pop2002))+
# geom_smooth(method = "glm")+
scale_size_continuous(name = "Ср. размер\nн.п. (чел.)",
breaks = c(0, 300, 500, 1000, 2000), trans = "sqrt",
labels = c("<300", "300-499", "500-999", "1000-2000", ">8000"))+
scale_y_continuous(name = "Динамика территориальной\nдифференциации расселения", breaks = seq(100, 115, 5),
limits = c(100,115))+
scale_x_continuous(name = "Динамика населения (%)")
# Темпы сжатия расселения vs средний размер населенных пунктов
clusters_18_metrics %>%
ggplot(aes(x=CL18_mean_pop2002, y=CL18_variance_dif))+
geom_point()+
# geom_smooth(method = "glm")+
scale_x_continuous(trans = "log")+
scale_y_continuous(name = "Изменение вариации")
# Сжатие расселения наблюдается везде, но его траектория разная: есть две группы районов:
# 1 группа: низкие темпы сжатия на фоне роста или незначительного сокращения населения
# 2 группа: высокие темпы сжатия на фоне общего значительного сокращения населения
# =================================
# 3.3. Централизация/связность сети
# 3.3.1. Density
# The density of a graph is the ratio of the number of edges
# and the number of possible edges
# Create column
clusters_18_metrics$CL18_density <- NA_real_
# Calculate
for (i in 1:nrow(clusters_18_metrics)) {
# Define logical vector to subset settlements by the cluster
select_condition <- clust_18_2002 == i
# Create subgraph
temp_graph <- my_subgraph_function(res_graph_2002, settl_index_2002[select_condition])
# Calculate edge_density
clusters_18_metrics[clusters_18_metrics$clust_18 == i,]$CL18_density <- edge_density(temp_graph)
}
# Quick explorative analysis:
# PopDynamics vs Density
clusters_18_metrics %>%
ggplot(aes(x = CL18_density, y = CL18_pop2010to2002_rel))+
geom_point(aes(size = CL18_mean_pop2002))+
# geom_smooth(col = "grey")+
geom_text(aes(x = CL18_density+0.001, y = CL18_pop2010to2002_rel - 0.5, label = clust_18))
# Var_dif vs Density
clusters_18_metrics %>%
ggplot(aes(x = CL18_density, y = CL18_variance_dif))+
# geom_smooth()+
geom_point(aes(size = CL18_mean_pop2002))+
geom_text(aes(x = CL18_density, y = CL18_variance_dif + 0.5, label = clust_6))
# 3.3.2. Centralization
# Betweenness Centralisation (централизация по посредничеству)
# Create column
clusters_18_metrics$CL18_centr_betw <- NA_real_
# Calculate
for (i in 1:nrow(clusters_18_metrics)) {
# Define logical vector to subset settlements by the cluster
select_condition <- clust_18_2002 == i
# Create subgraph
temp_graph <- my_subgraph_function(res_graph_2002, settl_index_2002[select_condition])
# Calculate edge_density
clusters_18_metrics[clusters_18_metrics$clust_18 == i,]$CL18_centr_betw <- centr_betw(temp_graph, normalized = T)$centralization
}
# Closeness Centralisation (централизация по близости)
# Create column
clusters_18_metrics$CL18_centr_clo <- NA_real_
# Calculate
for (i in 1:nrow(clusters_18_metrics)) {
# Define logical vector to subset settlements by the cluster
select_condition <- clust_18_2002 == i
# Create subgraph
temp_graph <- my_subgraph_function(res_graph_2002, settl_index_2002[select_condition])
# Calculate edge_density
clusters_18_metrics[clusters_18_metrics$clust_18 == i,]$CL18_centr_clo <- centr_clo(temp_graph, normalized = T)$centralization
}
# Quick explorative analysis:
# Var_dif vs centr_betw
clusters_18_metrics %>%
ggplot(aes(x = CL18_centr_betw, y = CL18_variance_dif))+
# geom_smooth()+
geom_point(aes(size = CL18_mean_pop2002))+
geom_text(aes(x = CL18_centr_betw+0.001, y = CL18_variance_dif + 0.5, label = clust_6))
# Var_dif vs centr_clo
clusters_18_metrics %>%
ggplot(aes(x = CL18_centr_clo, y = CL18_variance_dif))+
# geom_smooth()+
geom_point(aes(size = CL18_mean_pop2002))+
geom_text(aes(x = CL18_centr_clo+0.001, y = CL18_variance_dif + 0.5, label = clust_6))
# # 3.3.3. Check the calculations
#
# # Расчеты igraph включают в себя все узлы, поэтому могут быть смещения
# # Для проверки рассчитаем централизацию по близости вручную по матрице расстояний
# # и сравним с результатами igraph
#
# # Создаем пустую переменную
# clusters_18_metrics$CL18_centr_clo_m <- NA_real_
# for (i in 1:18) {
# # Define logical vector to subset observations by the cluster
# select_condition <- clust_18_2002 == i
# # Subset distance matrix
# temp_dist <- dist_matrix_2002[select_condition, select_condition]
# # Calculate vector of closeness centrality ids
# res <- apply(temp_dist, MARGIN = 1, FUN = function(x) return(1/sum(x, na.rm = T)))
# # Calculate centralisation index
# clusters_18_metrics[clusters_18_metrics$clust_18 == i,]$CL18_centr_clo_m <- sum(max(res)-res)/centr_clo_tmax(nodes = length(res))
# }
#
# # Compare with igraph results
# clusters_18_metrics %>%
# ggplot(aes(x = CL18_centr_clo_m, y = CL18_centr_clo, col = CL18_density))+
# geom_point()
# # Higher density, higher bias
# # However, the values quite highly correlate
# cor(clusters_18_metrics$CL18_centr_clo_m, clusters_18_metrics$CL18_centr_clo) # 0.83
# ========================================
# 3.4. Удаленность от регионального центра
clusters_18_metrics$CL18_dist2Tyumen <- NA_real_
for (i in 1:nrow(clusters_18_metrics)) {
# Define logical vector to subset observations by the cluster
select_condition <- clust_18_2002 == i
settlements_temp <- df[select_condition,]
# Subset distance matrix: by row - cluster members, by column - Tyumen
distances_to_Tyumen <- dist_matrix_2002[select_condition, df$ShortName == "г. Тюмень"]
# Weight by population proportion
res <- sum(distances_to_Tyumen * settlements_temp$Census2002/sum(settlements_temp$Census2002))
# Save to res cell
clusters_18_metrics[clusters_18_metrics$clust_18 == i,]$CL18_dist2Tyumen <- res
}
# ==========================
# 4. Рассчет метрик для н.п.
# ==========================
# =====================================
# 4.1. Distance to the regional capital
df$dist2Tyumen <- dist_matrix_2002[,which(settlements_2002$ShortName == "г. Тюмень")]
# ==========================
# 4.1.1 Populationn dynamics
df %>%
mutate(pop2010to2002_rel = Census2010/Census2002*100) -> df
# ==========================================================
# 4.2. Closeness Centrality (in a scope of the whole region)
# Closeness centrality описывает способность актора достичь максимальное количество других
# акторов, затратив наименьшее число сил - насколько "близок" ко всем.
# В самом простом варианте считается как 1, деленная на сумму
# всех кратчайших расстояний до всех других узлов
# 4.2.1. Closeness centrality
df$clo <- 1/(dist_matrix_2002 %>% apply(1, sum))
# 4.2.2. Weighted closeness centrality
# However, for a settlement the position in relation to all the other vertices may not so
# important, as the position to the main (largest) ones. Let's calculate centrality measures
# in accordance to the size of settlements. In order to take the size into account, we
# multiply distance matrix to normalized population by the destination nodes
# Create matrix of population sizes in 2002
pop_2002_matrix <- rep.row(normalize(settlements_2002$Census2002),
nrow(dist_matrix_2002))
# Calculate distance matrices, weighted by population (_w)
dist_matrix_2002_w <- dist_matrix_2002 * pop_2002_matrix
# Calculate centrality closeness, weightened by population
df$clo_w <- 1/(dist_matrix_2002_w %>% apply(1, sum))
# How relate 'pure' closeness centrality to the weighted one?
df %>%
ggplot(aes(x = clo, y = clo_w, col= as.factor(clust_6)))+
geom_point()
# ====================================================
# 4.3. Closeness Centrality (in a scope of 6 clusters)
# 4.3.1. Closeness centrality
df$clo_CL6 <- NA_real_
# Calculate
for (i in 1:nrow(clusters_6_metrics)) {
# Define logical vector to subset settlements by the cluster
select_condition <- clust_6_2002 == i
# Subset distance matrix
temp_matrix <- dist_matrix_2002[select_condition, select_condition]
# Calculate edge_density
df[df$clust_6 == i,]$clo_CL6 <- 1/(temp_matrix %>% apply(1, sum))
}
# 4.3.1. Weighted closeness centrality
df$clo_CL6_w <- NA_real_
# Calculate
for (i in 1:nrow(clusters_6_metrics)) {
# Define logical vector to subset settlements by the cluster
select_condition <- clust_6_2002 == i
# Subset distance matrix
temp_matrix <- dist_matrix_2002[select_condition, select_condition]
# Create matrix of population sizes in 2002
temp_pop_matrix <- rep.row(normalize(settlements_2002[select_condition,]$Census2002),
nrow(temp_matrix))
# Calculate distance matrices, weighted by population (_w)
temp_matrix_w <- temp_matrix * temp_pop_matrix
# Calculate edge_density
df[df$clust_6 == i,]$clo_CL6_w <- 1/(temp_matrix_w %>% apply(1, sum, na.rm = T))
}
# ====================================================
# 4.4. Closeness Centrality (in a scope of 18 clusters)
# 4.4.1. Closeness centrality
df$clo_CL18 <- NA_real_
# Calculate
for (i in 1:nrow(clusters_18_metrics)) {
# Define logical vector to subset settlements by the cluster
select_condition <- clust_18_2002 == i
# Subset distance matrix
temp_matrix <- dist_matrix_2002[select_condition, select_condition]
# Calculate edge_density
df[df$clust_18 == i,]$clo_CL18 <- 1/(temp_matrix %>% apply(1, sum))
}
# 4.4.1. Weighted closeness centrality
df$clo_CL18_w <- NA_real_
# Calculate
for (i in 1:nrow(clusters_18_metrics)) {
# Define logical vector to subset settlements by the cluster
select_condition <- clust_18_2002 == i
# Subset distance matrix
temp_matrix <- dist_matrix_2002[select_condition, select_condition]
# Create matrix of population sizes in 2002
temp_pop_matrix <- rep.row(normalize(settlements_2002[select_condition,]$Census2002),
nrow(temp_matrix))
# Calculate distance matrices, weighted by population (_w)
temp_matrix_w <- temp_matrix * temp_pop_matrix
# Calculate edge_density
df[df$clust_18 == i,]$clo_CL18_w <- 1/(temp_matrix_w %>% apply(1, sum, na.rm = T))
}
# ===========================
# 4.5. Betweenness Centrality
# Чтобы выделить населенные пункты, связывающие кластеры между собой,
# мы рассчитали центральность по посредничеству
# с ограничением максимальной длины пути, учитываемой в вычислениях.
# Первоначально идея была ограничить путь средним диаметром кластеров. Диаметр в
# сетевом анализе - это расстояние между двумя самыми удаленными точками графа.
# Однако оказалось, что это слишком большие величины. Средний диаметр 6 кластеров -
# 385522.3. Вetweenness Centrality на его основе на 0.97 коррелирует
# с обычной центральностью по всему графу. Средний диаметр по 18 кластерам - 194501.3 -
# тоже достаточно большой. В итоге, в качестве ограничения мы взяли медианный путь внутри
# кластеров (52021.79 м)
# 4.5.1. Calculate median path
# 6 clusters
clusters_6_metrics$CL6_median_path <- NA_real_
# Calculate
for (i in 1:nrow(clusters_6_metrics)) {
# Define logical vector to subset settlements by the cluster
select_condition <- clust_6_2002 == i
# Subset distance matrix
temp_matrix <- dist_matrix_2002[select_condition, select_condition]
# Calculate edge_density
clusters_6_metrics[clusters_6_metrics$clust_6 == i,]$CL6_median_path <- median(temp_matrix)
}
# 18 clusters
clusters_18_metrics$CL18_median_path <- NA_real_
# Calculate
for (i in 1:nrow(clusters_18_metrics)) {
# Define logical vector to subset settlements by the cluster
select_condition <- clust_18_2002 == i
# Subset distance matrix
temp_matrix <- dist_matrix_2002[select_condition, select_condition]
# Calculate edge_density
clusters_18_metrics[clusters_18_metrics$clust_18 == i,]$CL18_median_path <- median(temp_matrix)
}
# 4.5.2. Betweenness centrality (limited by clusters median path)
df$betw_CL6 <- estimate_betweenness(graph = res_graph_2002,
vids = settl_index_2002,
cutoff = median(clusters_6_metrics$CL6_median_path))
df$betw_CL18 <- estimate_betweenness(graph = res_graph_2002,
vids = settl_index_2002,
cutoff = median(clusters_18_metrics$CL18_median_path))
# 4.5.3. Explore betweenness centrality
# Distribution of values
df %>%
ggplot(aes(x = betw_CL18))+
geom_density()
df %>%
ggplot(aes(x = betw_CL6))+
geom_density()
# Distributions are similiar. Let's compare the values?
df %>%
ggplot(aes(x = betw_CL18, y = betw_CL6))+
geom_point()
cor(df$betw_CL18, df$betw_CL6) # 0.86 - the values are highly correlated.
# Conclusion: may be, it makes sence to use in the model just one of the variables
# Betweenness Centrality vs Population Dynamics
df %>%
filter(pop2010to2002_rel < 200) %>%
ggplot(aes(x = betw_CL6, y = pop2010to2002_rel))+
geom_point(aes(col = betw_CL6), alpha = 0.4)+
geom_smooth(method = "glm")+
scale_colour_gradientn(colours = viridis(7), trans = "sqrt")
# ==================================
# 5. Compiling the resulting dataset
# ==================================
# Combine all the metrics into a single dataset
df %>%
left_join(clusters_6_metrics, by = "clust_6") %>%
left_join(clusters_18_metrics %>% dplyr::select(-clust_6), by = "clust_18") -> df
# Save datasets into Rdatafile
save(df, clusters_6_metrics, clusters_18_metrics, file = "data/Part3_res_dataset.Rdata")
# P.S.: in discussion part of the paper Q was raised: what is mean path from the settlements
# with highest closeness centrality to all the other settlements in 6 and 18 clusters. Let's answer it.
load("data/Part2_output.RData")
load("data/Part3_res_dataset.Rdata")
# We call the metric "half_radius"
half_radius_6 <- NA_real_
for (i in 1:nrow(clusters_6_metrics)) {
# Define logical vector to subset settlements by the cluster
select_cluster_condition <- clust_6_2002 == i
# Find index of the settlement with highest closeness centrality
temp_max = df %>% filter(clust_6 == i) %>% pull(clo_CL6) %>% max()
select_settl_condition <- which(df$clo_CL6 == temp_max)
# Calculate median value
half_radius_6[i] <- dist_matrix_2002[select_settl_condition, select_cluster_condition] %>% mean()
}
half_radius_18 <- NA_real_
for (i in 1:nrow(clusters_18_metrics)) {
# Define logical vector to subset settlements by the cluster
select_cluster_condition <- clust_18_2002 == i
# Find index of the settlement with highest closeness centrality
temp_max = df %>% filter(clust_18 == i) %>% pull(clo_CL18) %>% max()
select_settl_condition <- which(df$clo_CL18 == temp_max)
# Calculate median value
half_radius_18[i] <- dist_matrix_2002[select_settl_condition, select_cluster_condition] %>% mean()
}
mean(half_radius_6) # 70313.81
mean(half_radius_18) # 35732.43
|
/4_Network_topology_analysis.R
|
no_license
|
alschel/Settlements-Network-Population-Dynamics
|
R
| false
| false
| 30,351
|
r
|
# The Impact of Settlements Network Structure on Population Dynamics
# Part 4. Network topology analysis
# Author: Alexander Sheludkov
# Date: 19 October 2018
library(sp)
library(sf)
library(raster)
library(dplyr)
library(ggplot2)
library(gridExtra)
library(igraph)
library(RColorBrewer)
library(tidyr)
library(viridis)
# load the data
load("data/Part2_output.RData")
# ================
# 1. Preprocessing
# ================
# 1.1. Сохраним данные в новую переменную и очистим от лишних столбцов
df <- settlements_2002@data
df %>%
dplyr::select(-Rosstat1981, -cohort1981, -cohort1990, -cohort2002,
-trend_1981to1990, -trend_1990to2002, -trend_2002to2010,
-rel1981to1990, -rel1990to2002, -rel2002to2010) -> df
# Add coordinates of the settlements as new columns
df %>%
mutate(lon = coordinates(settlements_2002)[,1],
lat = coordinates(settlements_2002)[,2]) %>%
dplyr::select(id, lon, lat, ShortName, MunicipalDistrict,
Rosstat1990, Census2002, Census2010, clust_3, clust_6, clust_18) -> df
# 1.2. Функция для выборки subgraphs из графа, созданного методом shp2graph
# У нас нестандартная структура графа и некоторые функции igraph с ним не работают
# В частности из-за несовпадения числа вершин и числа реальных н.п. induced_subgraph()
# возвращает набор несвязанных точек: все перекрестки, дорожные развязки "опускаются", и
# граф теряет связность.
# Создадим собственную функцию, которая будет принимать на вход граф (@graph),
# вектор индексов вершин-н.п. (@nodes) и возвращать subgraph без потерь
my_subgraph_function <- function(graph, nodes) {
# 1) сохраним в отдельный вектор номера всех вершин, лежащих между н.п.
# shortest_paths() возвращает именованный list длины @to,
# который содержит индексы всех вершин и ребер каждого пути
all_the_verticies <-
shortest_paths(graph = graph, # igraph object
from = nodes, # vertex ids from
to = nodes) %>% # vertex ids to
.$vpath %>% # extract list of returned vertex ids
unlist() # unlist
# 2) выборка из графа
induced_subgraph(graph = graph, # igraph object
vids = all_the_verticies) %>% # vertex ids
simplify() -> # remove loop and multiple edges
sub_graph
return(sub_graph)
}
# 1.3. Functions for creating matrix by repeating rows and columns
# (we will need them for calculating weighted cetnrality measures)
rep.row <-function(x,n){
matrix(rep(x,each=n),nrow=n)
}
rep.col <-function(x,n){
matrix(rep(x,each=n),ncol=n)
}
# 1.4. Define function for normalizing data
normalize <- function(x) {
return ((x - min(x)) / (max(x) - min(x)))
}
# =================================
# 2. Рассчет метрик для 6 кластеров
# =================================
# ======================================
# 2.1. Descriptive metrics on population
df %>%
group_by(clust_6) %>%
mutate(CL6_n = n(), # Number of settlements
CL6_pop2002 = sum(Census2002), # 2002 population
CL6_pop2010 = sum(Census2010), # 2010 population общая численность населения
CL6_pop2010to2002_rel = CL6_pop2010/CL6_pop2002*100, # percentage of 2010-population to 2002-population
CL6_max_pop2002 = max(Census2002), # the largest settlement's size
CL6_mean_pop2002 = mean(Census2002), # mean settlement's size
CL6_median_pop2002 = median(Census2002)) %>% # median settlement's size
# select the columns we need
dplyr::select(clust_6, CL6_n, CL6_pop2002, CL6_pop2010, CL6_pop2010to2002_rel,
CL6_max_pop2002, CL6_mean_pop2002, CL6_median_pop2002) %>%
unique() -> clusters_6_metrics # Save the results into new data.frame
# ==============================================================
# 2.2. Variance in population distribution among the settlements
# Create new columns
clusters_6_metrics$CL6_variance_2002 <- NA_real_
clusters_6_metrics$CL6_variance_2010 <- NA_real_
for (i in 1:nrow(clusters_6_metrics)) {
# Define logical vector to subset observations by the cluster
select_condition <- clust_6_2002 == i
settlements_temp <- df[select_condition,]
# Calculate variance (standard deviation(x)/mean(x))
# 2002
clusters_6_metrics[clusters_6_metrics$clust_6 == i,]$CL6_variance_2002 <-
sd(settlements_temp$Census2002, na.rm = T)/mean(settlements_temp$Census2002, na.rm = T)
# 2010
clusters_6_metrics[clusters_6_metrics$clust_6 == i,]$CL6_variance_2010 <-
sd(settlements_temp$Census2010, na.rm = T)/mean(settlements_temp$Census2010, na.rm = T)
}
# Calculate the difference in variance between 2002 and 2010 (темпы сжатия расселения)
clusters_6_metrics %>%
mutate(CL6_variance_dif = CL6_variance_2010/CL6_variance_2002*100) ->
clusters_6_metrics
# 3.2.1. Quick explorative analysis
# Темпы сжатия расселения vs общая динамика населения
clusters_6_metrics %>%
ggplot(aes(y=CL6_variance_dif, x=CL6_pop2010to2002_rel))+
geom_point(aes(size = CL6_mean_pop2002))+
geom_smooth(method = "glm")+
scale_size_continuous(name = "Ср. размер\nн.п. (чел.)", breaks = c(0, 500, 2000))+
scale_y_continuous(name = "Динамика территориальной\nдифференциации расселения") +
scale_x_continuous(name = "Население в 2010 году к населению в 2002, %")
# =================================
# 2.3. Централизация/связность сети
# Для оценки связности сети в сетевой анализе используется понятие "connectivity". Оно показывает,
# сколько вершин или ребер нужно удалить, чтобы разбить граф на части. Однако при условии, что
# наши кластеры слабо связаны, эта метрика имеет мало смысла - мы получим везде 1.
# "It is also possible to examine the extent to which a whole graph has a centralized structure.
# The concepts of density and centralization refer to differing aspects of the overall 'compactness' of a graph.
# Density describes the general level of cohesion in a graph; centralization describes the extent
# to which this cohesion is organized around particular focal points. Centralization and density,
# therefore, are important complementary measures".
# "The general procedure involved in any measure of graph centralization is
# to look at the differences between the centrality scores of the most
# central point and those of all other points. Centralization, then,
# is the ratio of the actual sum of differences to the maximum possible sum of differences".
# Source: http://www.analytictech.com/mb119/chapter5.htm
# 2.3.1. Density
# The density of a graph is the ratio of the number of edges and the number of possible edges.
# Создадим переменную
clusters_6_metrics$CL6_density <- NA_real_
# Calculate
for (i in 1:nrow(clusters_6_metrics)) {
# Define logical vector to subset settlements by the cluster
select_condition <- clust_6_2002 == i
# Create subgraph
temp_graph <- my_subgraph_function(res_graph_2002, settl_index_2002[select_condition])
# Calculate edge_density
clusters_6_metrics[clusters_6_metrics$clust_6 == i,]$CL6_density <- edge_density(temp_graph)
}
# Quick explorative analysis:
# PopDynamics vs Density
clusters_6_metrics %>%
ggplot(aes(x = CL6_density, y = CL6_pop2010to2002_rel))+
geom_point(aes(size = CL6_mean_pop2002))+
# geom_smooth(col = "grey")+
geom_text(aes(x = CL6_density, y = CL6_pop2010to2002_rel - 1, label = clust_6))
# Var_dif vs Density
clusters_6_metrics %>%
ggplot(aes(x = CL6_density, y = CL6_variance_dif))+
# geom_smooth()+
geom_point(aes(size = CL6_mean_pop2002))+
geom_text(aes(x = CL6_density, y = CL6_variance_dif + 0.5, label = clust_6))
# 2.3.2. Centralization
# Betweenness Centralisation (централизация по посредничеству)
# Create column
clusters_6_metrics$CL6_centr_betw <- NA_real_
# Calculate
for (i in 1:nrow(clusters_6_metrics)) {
# Define logical vector to subset settlements by the cluster
select_condition <- clust_6_2002 == i
# Create subgraph
temp_graph <- my_subgraph_function(res_graph_2002, settl_index_2002[select_condition])
# Calculate edge_density
clusters_6_metrics[clusters_6_metrics$clust_6 == i,]$CL6_centr_betw <- centr_betw(temp_graph, normalized = T)$centralization
}
# Closeness Centralisation (централизация по близости)
# Create column
clusters_6_metrics$CL6_centr_clo<- NA_real_
# Calculate
for (i in 1:nrow(clusters_6_metrics)) {
# Define logical vector to subset settlements by the cluster
select_condition <- clust_6_2002 == i
# Create subgraph
temp_graph <- my_subgraph_function(res_graph_2002, settl_index_2002[select_condition])
# Calculate edge_density
clusters_6_metrics[clusters_6_metrics$clust_6 == i,]$CL6_centr_clo <- centr_clo(temp_graph, normalized = T)$centralization
}
# Quick explorative analysis:
# Var_dif vs centr_betw
clusters_6_metrics %>%
ggplot(aes(x = CL6_centr_betw, y = CL6_variance_dif))+
# geom_smooth()+
geom_point(aes(size = CL6_mean_pop2002))+
geom_text(aes(x = CL6_centr_betw+0.001, y = CL6_variance_dif + 0.5, label = clust_6))
# PopDyn vs centr_betw
clusters_6_metrics %>%
ggplot(aes(x = CL6_centr_betw, y = CL6_pop2010to2002_rel))+
# geom_smooth()+
geom_point(aes(size = CL6_mean_pop2002))+
geom_text(aes(x = CL6_centr_betw+0.01, y = CL6_pop2010to2002_rel - 0.5, label = clust_6))
# Var_dif vs centr_betw
clusters_6_metrics %>%
ggplot(aes(x = CL6_centr_clo, y = CL6_variance_dif))+
# geom_smooth()+
geom_point(aes(size = CL6_mean_pop2002))+
geom_text(aes(x = CL6_centr_clo+0.001, y = CL6_variance_dif + 0.5, label = clust_6))
# ========================================
# 2.4. Удаленность от регионального центра
clusters_6_metrics$CL6_dist2Tyumen <- NA_real_
for (i in 1:nrow(clusters_6_metrics)) {
# Define logical vector to subset observations by the cluster
select_condition <- clust_6_2002 == i
settlements_temp <- df[select_condition,]
# Subset distance matrix: by row - cluster members, by column - Tyumen
distances_to_Tyumen <- dist_matrix_2002[select_condition, df$ShortName == "г. Тюмень"]
# Weight by population proportion
res <- sum(distances_to_Tyumen * settlements_temp$Census2002/sum(settlements_temp$Census2002))
# Save to res cell
clusters_6_metrics[clusters_6_metrics$clust_6 == i,]$CL6_dist2Tyumen <- res
}
# ==================================
# 3. Рассчет метрик для 18 кластеров
# ==================================
# ======================================
# 3.1. Descriptive metrics on population
df %>%
group_by(clust_18) %>%
mutate(CL18_n = n(), # Number of settlements
CL18_pop2002 = sum(Census2002), # 2002 population
CL18_pop2010 = sum(Census2010), # 2010 population общая численность населения
CL18_pop2010to2002_rel = CL18_pop2010/CL18_pop2002*100, # percentage of 2010-population to 2002-population
CL18_max_pop2002 = max(Census2002), # the largest settlement's size
CL18_mean_pop2002 = mean(Census2002), # mean settlement's size
CL18_median_pop2002 = median(Census2002)) %>% # median settlement's size
# select the columns we need
dplyr::select(clust_6, clust_18, CL18_pop2002, CL18_pop2010, CL18_pop2010to2002_rel,
CL18_max_pop2002, CL18_mean_pop2002, CL18_median_pop2002) %>%
unique() -> clusters_18_metrics # Save the results into new data.frame
# ==============================================================
# 3.2. Variance in population distribution among the settlements
# Create new columns
clusters_18_metrics$CL18_variance_2002 <- NA_real_
clusters_18_metrics$CL18_variance_2010 <- NA_real_
for (i in 1:nrow(clusters_18_metrics)) {
# Define logical vector to subset observations by the cluster
select_condition <- clust_18_2002 == i
settlements_temp <- df[select_condition,]
# Calculate variation (standart deviation(x)/mean(x))
# 2002
clusters_18_metrics[clusters_18_metrics$clust_18 == i,]$CL18_variance_2002 <-
sd(settlements_temp$Census2002, na.rm = T)/mean(settlements_temp$Census2002, na.rm = T)
# 2010
clusters_18_metrics[clusters_18_metrics$clust_18 == i,]$CL18_variance_2010 <-
sd(settlements_temp$Census2010, na.rm = T)/mean(settlements_temp$Census2010, na.rm = T)
}
# Calculate the difference in variance between 2002 and 2010 (темпы сжатия расселения)
clusters_18_metrics %>%
mutate(CL18_variance_dif = CL18_variance_2010/CL18_variance_2002*100) ->
clusters_18_metrics
# 3.2.1. Quick explorative analysis
# Темпы сжатия расселения vs общая динамика населения
clusters_18_metrics %>%
ggplot(aes(y=CL18_variance_dif, x=CL18_pop2010to2002_rel))+
geom_point(aes(size = CL18_mean_pop2002))+
# geom_smooth(method = "glm")+
scale_size_continuous(name = "Ср. размер\nн.п. (чел.)",
breaks = c(0, 300, 500, 1000, 2000), trans = "sqrt",
labels = c("<300", "300-499", "500-999", "1000-2000", ">8000"))+
scale_y_continuous(name = "Динамика территориальной\nдифференциации расселения", breaks = seq(100, 115, 5),
limits = c(100,115))+
scale_x_continuous(name = "Динамика населения (%)")
# Темпы сжатия расселения vs средний размер населенных пунктов
clusters_18_metrics %>%
ggplot(aes(x=CL18_mean_pop2002, y=CL18_variance_dif))+
geom_point()+
# geom_smooth(method = "glm")+
scale_x_continuous(trans = "log")+
scale_y_continuous(name = "Изменение вариации")
# Сжатие расселения наблюдается везде, но его траектория разная: есть две группы районов:
# 1 группа: низкие темпы сжатия на фоне роста или незначительного сокращения населения
# 2 группа: высокие темпы сжатия на фоне общего значительного сокращения населения
# =================================
# 3.3. Централизация/связность сети
# 3.3.1. Density
# The density of a graph is the ratio of the number of edges
# and the number of possible edges
# Create column
clusters_18_metrics$CL18_density <- NA_real_
# Calculate
for (i in 1:nrow(clusters_18_metrics)) {
# Define logical vector to subset settlements by the cluster
select_condition <- clust_18_2002 == i
# Create subgraph
temp_graph <- my_subgraph_function(res_graph_2002, settl_index_2002[select_condition])
# Calculate edge_density
clusters_18_metrics[clusters_18_metrics$clust_18 == i,]$CL18_density <- edge_density(temp_graph)
}
# Quick explorative analysis:
# PopDynamics vs Density
clusters_18_metrics %>%
ggplot(aes(x = CL18_density, y = CL18_pop2010to2002_rel))+
geom_point(aes(size = CL18_mean_pop2002))+
# geom_smooth(col = "grey")+
geom_text(aes(x = CL18_density+0.001, y = CL18_pop2010to2002_rel - 0.5, label = clust_18))
# Var_dif vs Density
clusters_18_metrics %>%
ggplot(aes(x = CL18_density, y = CL18_variance_dif))+
# geom_smooth()+
geom_point(aes(size = CL18_mean_pop2002))+
geom_text(aes(x = CL18_density, y = CL18_variance_dif + 0.5, label = clust_6))
# 3.3.2. Centralization
# Betweenness Centralisation (централизация по посредничеству)
# Create column
clusters_18_metrics$CL18_centr_betw <- NA_real_
# Calculate
for (i in 1:nrow(clusters_18_metrics)) {
# Define logical vector to subset settlements by the cluster
select_condition <- clust_18_2002 == i
# Create subgraph
temp_graph <- my_subgraph_function(res_graph_2002, settl_index_2002[select_condition])
# Calculate edge_density
clusters_18_metrics[clusters_18_metrics$clust_18 == i,]$CL18_centr_betw <- centr_betw(temp_graph, normalized = T)$centralization
}
# Closeness Centralisation (централизация по близости)
# Create column
clusters_18_metrics$CL18_centr_clo <- NA_real_
# Calculate
for (i in 1:nrow(clusters_18_metrics)) {
# Define logical vector to subset settlements by the cluster
select_condition <- clust_18_2002 == i
# Create subgraph
temp_graph <- my_subgraph_function(res_graph_2002, settl_index_2002[select_condition])
# Calculate edge_density
clusters_18_metrics[clusters_18_metrics$clust_18 == i,]$CL18_centr_clo <- centr_clo(temp_graph, normalized = T)$centralization
}
# Quick explorative analysis:
# Var_dif vs centr_betw
clusters_18_metrics %>%
ggplot(aes(x = CL18_centr_betw, y = CL18_variance_dif))+
# geom_smooth()+
geom_point(aes(size = CL18_mean_pop2002))+
geom_text(aes(x = CL18_centr_betw+0.001, y = CL18_variance_dif + 0.5, label = clust_6))
# Var_dif vs centr_clo
clusters_18_metrics %>%
ggplot(aes(x = CL18_centr_clo, y = CL18_variance_dif))+
# geom_smooth()+
geom_point(aes(size = CL18_mean_pop2002))+
geom_text(aes(x = CL18_centr_clo+0.001, y = CL18_variance_dif + 0.5, label = clust_6))
# # 3.3.3. Check the calculations
#
# # Расчеты igraph включают в себя все узлы, поэтому могут быть смещения
# # Для проверки рассчитаем централизацию по близости вручную по матрице расстояний
# # и сравним с результатами igraph
#
# # Создаем пустую переменную
# clusters_18_metrics$CL18_centr_clo_m <- NA_real_
# for (i in 1:18) {
# # Define logical vector to subset observations by the cluster
# select_condition <- clust_18_2002 == i
# # Subset distance matrix
# temp_dist <- dist_matrix_2002[select_condition, select_condition]
# # Calculate vector of closeness centrality ids
# res <- apply(temp_dist, MARGIN = 1, FUN = function(x) return(1/sum(x, na.rm = T)))
# # Calculate centralisation index
# clusters_18_metrics[clusters_18_metrics$clust_18 == i,]$CL18_centr_clo_m <- sum(max(res)-res)/centr_clo_tmax(nodes = length(res))
# }
#
# # Compare with igraph results
# clusters_18_metrics %>%
# ggplot(aes(x = CL18_centr_clo_m, y = CL18_centr_clo, col = CL18_density))+
# geom_point()
# # Higher density, higher bias
# # However, the values quite highly correlate
# cor(clusters_18_metrics$CL18_centr_clo_m, clusters_18_metrics$CL18_centr_clo) # 0.83
# ========================================
# 3.4. Удаленность от регионального центра
clusters_18_metrics$CL18_dist2Tyumen <- NA_real_
for (i in 1:nrow(clusters_18_metrics)) {
# Define logical vector to subset observations by the cluster
select_condition <- clust_18_2002 == i
settlements_temp <- df[select_condition,]
# Subset distance matrix: by row - cluster members, by column - Tyumen
distances_to_Tyumen <- dist_matrix_2002[select_condition, df$ShortName == "г. Тюмень"]
# Weight by population proportion
res <- sum(distances_to_Tyumen * settlements_temp$Census2002/sum(settlements_temp$Census2002))
# Save to res cell
clusters_18_metrics[clusters_18_metrics$clust_18 == i,]$CL18_dist2Tyumen <- res
}
# ==========================
# 4. Рассчет метрик для н.п.
# ==========================
# =====================================
# 4.1. Distance to the regional capital
df$dist2Tyumen <- dist_matrix_2002[,which(settlements_2002$ShortName == "г. Тюмень")]
# ==========================
# 4.1.1 Populationn dynamics
df %>%
mutate(pop2010to2002_rel = Census2010/Census2002*100) -> df
# ==========================================================
# 4.2. Closeness Centrality (in a scope of the whole region)
# Closeness centrality описывает способность актора достичь максимальное количество других
# акторов, затратив наименьшее число сил - насколько "близок" ко всем.
# В самом простом варианте считается как 1, деленная на сумму
# всех кратчайших расстояний до всех других узлов
# 4.2.1. Closeness centrality
df$clo <- 1/(dist_matrix_2002 %>% apply(1, sum))
# 4.2.2. Weighted closeness centrality
# However, for a settlement the position in relation to all the other vertices may not so
# important, as the position to the main (largest) ones. Let's calculate centrality measures
# in accordance to the size of settlements. In order to take the size into account, we
# multiply distance matrix to normalized population by the destination nodes
# Create matrix of population sizes in 2002
pop_2002_matrix <- rep.row(normalize(settlements_2002$Census2002),
nrow(dist_matrix_2002))
# Calculate distance matrices, weighted by population (_w)
dist_matrix_2002_w <- dist_matrix_2002 * pop_2002_matrix
# Calculate centrality closeness, weightened by population
df$clo_w <- 1/(dist_matrix_2002_w %>% apply(1, sum))
# How relate 'pure' closeness centrality to the weighted one?
df %>%
ggplot(aes(x = clo, y = clo_w, col= as.factor(clust_6)))+
geom_point()
# ====================================================
# 4.3. Closeness Centrality (in a scope of 6 clusters)
# 4.3.1. Closeness centrality
df$clo_CL6 <- NA_real_
# Calculate
for (i in 1:nrow(clusters_6_metrics)) {
# Define logical vector to subset settlements by the cluster
select_condition <- clust_6_2002 == i
# Subset distance matrix
temp_matrix <- dist_matrix_2002[select_condition, select_condition]
# Calculate edge_density
df[df$clust_6 == i,]$clo_CL6 <- 1/(temp_matrix %>% apply(1, sum))
}
# 4.3.1. Weighted closeness centrality
df$clo_CL6_w <- NA_real_
# Calculate
for (i in 1:nrow(clusters_6_metrics)) {
# Define logical vector to subset settlements by the cluster
select_condition <- clust_6_2002 == i
# Subset distance matrix
temp_matrix <- dist_matrix_2002[select_condition, select_condition]
# Create matrix of population sizes in 2002
temp_pop_matrix <- rep.row(normalize(settlements_2002[select_condition,]$Census2002),
nrow(temp_matrix))
# Calculate distance matrices, weighted by population (_w)
temp_matrix_w <- temp_matrix * temp_pop_matrix
# Calculate edge_density
df[df$clust_6 == i,]$clo_CL6_w <- 1/(temp_matrix_w %>% apply(1, sum, na.rm = T))
}
# ====================================================
# 4.4. Closeness Centrality (in a scope of 18 clusters)
# 4.4.1. Closeness centrality
df$clo_CL18 <- NA_real_
# Calculate
for (i in 1:nrow(clusters_18_metrics)) {
# Define logical vector to subset settlements by the cluster
select_condition <- clust_18_2002 == i
# Subset distance matrix
temp_matrix <- dist_matrix_2002[select_condition, select_condition]
# Calculate edge_density
df[df$clust_18 == i,]$clo_CL18 <- 1/(temp_matrix %>% apply(1, sum))
}
# 4.4.1. Weighted closeness centrality
df$clo_CL18_w <- NA_real_
# Calculate
for (i in 1:nrow(clusters_18_metrics)) {
# Define logical vector to subset settlements by the cluster
select_condition <- clust_18_2002 == i
# Subset distance matrix
temp_matrix <- dist_matrix_2002[select_condition, select_condition]
# Create matrix of population sizes in 2002
temp_pop_matrix <- rep.row(normalize(settlements_2002[select_condition,]$Census2002),
nrow(temp_matrix))
# Calculate distance matrices, weighted by population (_w)
temp_matrix_w <- temp_matrix * temp_pop_matrix
# Calculate edge_density
df[df$clust_18 == i,]$clo_CL18_w <- 1/(temp_matrix_w %>% apply(1, sum, na.rm = T))
}
# ===========================
# 4.5. Betweenness Centrality
# Чтобы выделить населенные пункты, связывающие кластеры между собой,
# мы рассчитали центральность по посредничеству
# с ограничением максимальной длины пути, учитываемой в вычислениях.
# Первоначально идея была ограничить путь средним диаметром кластеров. Диаметр в
# сетевом анализе - это расстояние между двумя самыми удаленными точками графа.
# Однако оказалось, что это слишком большие величины. Средний диаметр 6 кластеров -
# 385522.3. Вetweenness Centrality на его основе на 0.97 коррелирует
# с обычной центральностью по всему графу. Средний диаметр по 18 кластерам - 194501.3 -
# тоже достаточно большой. В итоге, в качестве ограничения мы взяли медианный путь внутри
# кластеров (52021.79 м)
# 4.5.1. Calculate median path
# 6 clusters
clusters_6_metrics$CL6_median_path <- NA_real_
# Calculate
for (i in 1:nrow(clusters_6_metrics)) {
# Define logical vector to subset settlements by the cluster
select_condition <- clust_6_2002 == i
# Subset distance matrix
temp_matrix <- dist_matrix_2002[select_condition, select_condition]
# Calculate edge_density
clusters_6_metrics[clusters_6_metrics$clust_6 == i,]$CL6_median_path <- median(temp_matrix)
}
# 18 clusters
clusters_18_metrics$CL18_median_path <- NA_real_
# Calculate
for (i in 1:nrow(clusters_18_metrics)) {
# Define logical vector to subset settlements by the cluster
select_condition <- clust_18_2002 == i
# Subset distance matrix
temp_matrix <- dist_matrix_2002[select_condition, select_condition]
# Calculate edge_density
clusters_18_metrics[clusters_18_metrics$clust_18 == i,]$CL18_median_path <- median(temp_matrix)
}
# 4.5.2. Betweenness centrality (limited by clusters median path)
df$betw_CL6 <- estimate_betweenness(graph = res_graph_2002,
vids = settl_index_2002,
cutoff = median(clusters_6_metrics$CL6_median_path))
df$betw_CL18 <- estimate_betweenness(graph = res_graph_2002,
vids = settl_index_2002,
cutoff = median(clusters_18_metrics$CL18_median_path))
# 4.5.3. Explore betweenness centrality
# Distribution of values
df %>%
ggplot(aes(x = betw_CL18))+
geom_density()
df %>%
ggplot(aes(x = betw_CL6))+
geom_density()
# Distributions are similiar. Let's compare the values?
df %>%
ggplot(aes(x = betw_CL18, y = betw_CL6))+
geom_point()
cor(df$betw_CL18, df$betw_CL6) # 0.86 - the values are highly correlated.
# Conclusion: may be, it makes sence to use in the model just one of the variables
# Betweenness Centrality vs Population Dynamics
df %>%
filter(pop2010to2002_rel < 200) %>%
ggplot(aes(x = betw_CL6, y = pop2010to2002_rel))+
geom_point(aes(col = betw_CL6), alpha = 0.4)+
geom_smooth(method = "glm")+
scale_colour_gradientn(colours = viridis(7), trans = "sqrt")
# ==================================
# 5. Compiling the resulting dataset
# ==================================
# Combine all the metrics into a single dataset
df %>%
left_join(clusters_6_metrics, by = "clust_6") %>%
left_join(clusters_18_metrics %>% dplyr::select(-clust_6), by = "clust_18") -> df
# Save datasets into Rdatafile
save(df, clusters_6_metrics, clusters_18_metrics, file = "data/Part3_res_dataset.Rdata")
# P.S.: in discussion part of the paper Q was raised: what is mean path from the settlements
# with highest closeness centrality to all the other settlements in 6 and 18 clusters. Let's answer it.
load("data/Part2_output.RData")
load("data/Part3_res_dataset.Rdata")
# We call the metric "half_radius"
half_radius_6 <- NA_real_
for (i in 1:nrow(clusters_6_metrics)) {
# Define logical vector to subset settlements by the cluster
select_cluster_condition <- clust_6_2002 == i
# Find index of the settlement with highest closeness centrality
temp_max = df %>% filter(clust_6 == i) %>% pull(clo_CL6) %>% max()
select_settl_condition <- which(df$clo_CL6 == temp_max)
# Calculate median value
half_radius_6[i] <- dist_matrix_2002[select_settl_condition, select_cluster_condition] %>% mean()
}
half_radius_18 <- NA_real_
for (i in 1:nrow(clusters_18_metrics)) {
# Define logical vector to subset settlements by the cluster
select_cluster_condition <- clust_18_2002 == i
# Find index of the settlement with highest closeness centrality
temp_max = df %>% filter(clust_18 == i) %>% pull(clo_CL18) %>% max()
select_settl_condition <- which(df$clo_CL18 == temp_max)
# Calculate median value
half_radius_18[i] <- dist_matrix_2002[select_settl_condition, select_cluster_condition] %>% mean()
}
mean(half_radius_6) # 70313.81
mean(half_radius_18) # 35732.43
|
RegSvm_1<-fread(file="~/Desktop/intro to data/project/dataset/out-201501.csv",select=c(168,194,196,139,141,142,232))
RegSvm_2<-fread(file="~/Desktop/intro to data/project/dataset/out-201402.csv",select=c(168,194,196,139,141,142,232))
RegSvm_3<-fread(file="~/Desktop/intro to data/project/dataset/out-201403.csv",select=c(168,194,196,139,141,142,232))
RegSvm_4<-fread(file="~/Desktop/intro to data/project/dataset/out-201404.csv",select=c(168,194,196,139,141,142,232))
RegSvm_5<-fread(file="~/Desktop/intro to data/project/dataset/out-201405.csv",select=c(168,194,196,139,141,142,232))
RegSvm_6<-fread(file="~/Desktop/intro to data/project/dataset/out-201406.csv",select=c(168,194,196,139,141,142,232))
RegSvm_7<-fread(file="~/Desktop/intro to data/project/dataset/out-201407.csv",select=c(168,194,196,139,141,142,232))
RegSvm_8<-fread(file="~/Desktop/intro to data/project/dataset/out-201408.csv",select=c(168,194,196,139,141,142,232))
RegSvm_9<-fread(file="~/Desktop/intro to data/project/dataset/out-201409.csv",select=c(168,194,196,139,141,142,232))
RegSvm_10<-fread(file="~/Desktop/intro to data/project/dataset/out-201410.csv",select=c(168,194,196,139,141,142,232))
RegSvm_11<-fread(file="~/Desktop/intro to data/project/dataset/out-201411.csv",select=c(168,194,196,139,141,142,232))
RegSvm_12<-fread(file="~/Desktop/intro to data/project/dataset/out-201412.csv",select=c(168,194,196,139,141,142,232))
RegSvmData<-RegressionData[,c(2,4,9:12)]
summary(RegressionData)
RegSvmData[RegSvmData==""]<-NA
RegSvmData1<-na.omit(RegSvmData)
View(RegSvmData1)
RegSvmRow<-which((RegSvmData1$State_PL=="California") & (RegSvmData1$Type_PL=="Business") & (RegSvmData1$Location_PL=="Urban"))
RegSvmData2<-RegSvmData1[c(RegSvmRow),]
View(RegSvmData2)
summary(RegSvmData2)
RegSvmData2[,3:6]<-lapply(RegSvmData2[,3:6],factor)
randIndex1<-sample(1:dim(RegSvmData2)[1])
summary(randIndex1)
head(randIndex1)
CutPoint2_3<-floor(2*dim(RegSvmData2)[1]/3)
CutPoint2_3
RegTrainData<-RegSvmData2[randIndex1[1:CutPoint2_3],]
RegTrainData<-RegTrainData[,-3:-5]
RegTestData<-RegSvmData2[randIndex1[(CutPoint2_3+1):dim(RegSvmData2)[1]],]
RegTestData<-RegTestData[,-3:-5]
install.packages("kernlab")
library(kernlab)
RegKsvmOutput<-ksvm(NPS_Type~., data=RegTrainData, kernel="rbfdot", kpar="automatic",C=100,cross=10,prob.model=TRUE)
RegKsvmPred<-predict(RegKsvmOutput, RegTestData, type="votes")
RegCompTable<-data.frame(RegTestData[,3],RegKsvmPred[1,])
table(RegCompTable)
# 75.0%
install.packages("e1071")
library(e1071)
install.packages("klaR")
library(klaR)
RegNbModel<-naiveBayes(NPS_Type~.,data=RegTrainData)
RegNbPred<-predict(RegNbModel,RegTestData)
RegTestData$NB_Prediction<-RegNbPred
RegTestData$nb_YesorNot<-ifelse(RegTestData$NPS_Type==RegTestData$NB_Prediction,"Correct","Wrong")
View(RegTestData)
length(which(RegTestData$nb_YesorNot=="Correct"))
Regaccuaracy<-length(which(RegTestData$nb_YesorNot=="Correct"))/length(RegTestData$nb_YesorNot)
Regaccuaracy # 78.1%
RegTestData$Guest_Room_H<-as.numeric(RegTestData$Guest_Room_H)
RegTestData$Condition_Hotel_H<-as.numeric(RegTestData$Condition_Hotel_H)
RegTestData$nb_YesorNot<-as.factor(RegTestData$nb_YesorNot)
RegPlot1<-ggplot(data=RegTestData)+geom_point(aes(x=RegTestData$Guest_Room_H,y=RegTestData$Condition_Hotel_H,color=RegTestData$NPS_Type))
RegPlot2<-ggplot(data=RegTestData)+geom_point(aes(x=RegTestData$Guest_Room_H,y=RegTestData$Condition_Hotel_H,color=RegTestData$NB_Prediction))
RegPlot3<-ggplot(data=RegTestData)+geom_point(aes(x=RegTestData$Guest_Room_H,y=RegTestData$Condition_Hotel_H,color=RegTestData$nb_YesorNot))
length(RegTestData$NPS_Type)
AruleData<-RegressionData[,c(2,4,5,9:12)]
summary(AruleData)
AruleData[AruleData==""]<-NA
AruleData1<-na.omit(AruleData)
View(AruleData1)
AruleSvmRow<-which((AruleData1$State_PL=="California") & (AruleData1$Type_PL=="Business") & (AruleData1$Location_PL=="Urban"))
AruleData2<-AruleData1[c(AruleSvmRow),]
View(AruleData2)
summary(AruleData2)
RuleFunction<-function(c)
{
c[c>=1&c<=6]<-"low"
c[c>=7&c<=8]<-"med"
c[c==9|c==10]<-"high"
return(c)
}
AruleData3<-data.frame(RuleFunction(AruleData2$Guest_Room_H),RuleFunction(AruleData2$Condition_Hotel_H),RuleFunction(AruleData2$Customer_SVC_H),AruleData2$NPS_Type)
View(AruleData3)
colnames(AruleData3)<-c("Guest Room Satisfaction","Hotel Condition","Customer Service","NPS_Type")
str(AruleData3)
randIndex2<-sample(1:dim(AruleData2)[1])
summary(randIndex2)
head(randIndex2)
CutPoint22_3<-floor(2*dim(AruleData2)[1]/3)
CutPoint22_3
AruleTrainData<-AruleData3[randIndex2[1:CutPoint22_3],]
AruleTestData<-AruleData3[randIndex2[(CutPoint22_3+1):dim(AruleData2)[1]],]
AruleKsvmOutput<-ksvm(NPS_Type~., data=AruleTrainData, kernel="rbfdot", kpar="automatic",C=5,cross=5,prob.model=TRUE)
AruleKsvmPred<-predict(AruleKsvmOutput, AruleTestData, type="votes")
AruleCompTable<-data.frame(AruleTestData[,4],AruleKsvmPred[1,])
table(AruleCompTable) # 79.4%
AruleNbModel<-naiveBayes(NPS_Type~.,data=AruleTrainData)
AruleNbPred<-predict(AruleNbModel,AruleTestData)
AruleTestData$NB_Prediction<-AruleNbPred
AruleTestData$nb_YesorNot<-ifelse(AruleTestData$NPS_Type==AruleTestData$NB_Prediction,"Correct","Wrong")
View(AruleTestData)
Aruleaccuaracy<-length(which(AruleTestData$nb_YesorNot=="Correct"))/length(AruleTestData$nb_YesorNot)
Aruleaccuaracy # 80.0%
NBPro<-length(which(AruleTestData$NB_Prediction=="Promoter"))
NBDet<-length(which(AruleTestData$NB_Prediction=="Detractor"))
NBNPSPre<-(NBPro-NBDet)/length(AruleTestData$NB_Prediction)
|
/IST687/IST687_SVM&NB.R
|
no_license
|
MathieuWmy/Portfolio
|
R
| false
| false
| 5,502
|
r
|
RegSvm_1<-fread(file="~/Desktop/intro to data/project/dataset/out-201501.csv",select=c(168,194,196,139,141,142,232))
RegSvm_2<-fread(file="~/Desktop/intro to data/project/dataset/out-201402.csv",select=c(168,194,196,139,141,142,232))
RegSvm_3<-fread(file="~/Desktop/intro to data/project/dataset/out-201403.csv",select=c(168,194,196,139,141,142,232))
RegSvm_4<-fread(file="~/Desktop/intro to data/project/dataset/out-201404.csv",select=c(168,194,196,139,141,142,232))
RegSvm_5<-fread(file="~/Desktop/intro to data/project/dataset/out-201405.csv",select=c(168,194,196,139,141,142,232))
RegSvm_6<-fread(file="~/Desktop/intro to data/project/dataset/out-201406.csv",select=c(168,194,196,139,141,142,232))
RegSvm_7<-fread(file="~/Desktop/intro to data/project/dataset/out-201407.csv",select=c(168,194,196,139,141,142,232))
RegSvm_8<-fread(file="~/Desktop/intro to data/project/dataset/out-201408.csv",select=c(168,194,196,139,141,142,232))
RegSvm_9<-fread(file="~/Desktop/intro to data/project/dataset/out-201409.csv",select=c(168,194,196,139,141,142,232))
RegSvm_10<-fread(file="~/Desktop/intro to data/project/dataset/out-201410.csv",select=c(168,194,196,139,141,142,232))
RegSvm_11<-fread(file="~/Desktop/intro to data/project/dataset/out-201411.csv",select=c(168,194,196,139,141,142,232))
RegSvm_12<-fread(file="~/Desktop/intro to data/project/dataset/out-201412.csv",select=c(168,194,196,139,141,142,232))
RegSvmData<-RegressionData[,c(2,4,9:12)]
summary(RegressionData)
RegSvmData[RegSvmData==""]<-NA
RegSvmData1<-na.omit(RegSvmData)
View(RegSvmData1)
RegSvmRow<-which((RegSvmData1$State_PL=="California") & (RegSvmData1$Type_PL=="Business") & (RegSvmData1$Location_PL=="Urban"))
RegSvmData2<-RegSvmData1[c(RegSvmRow),]
View(RegSvmData2)
summary(RegSvmData2)
RegSvmData2[,3:6]<-lapply(RegSvmData2[,3:6],factor)
randIndex1<-sample(1:dim(RegSvmData2)[1])
summary(randIndex1)
head(randIndex1)
CutPoint2_3<-floor(2*dim(RegSvmData2)[1]/3)
CutPoint2_3
RegTrainData<-RegSvmData2[randIndex1[1:CutPoint2_3],]
RegTrainData<-RegTrainData[,-3:-5]
RegTestData<-RegSvmData2[randIndex1[(CutPoint2_3+1):dim(RegSvmData2)[1]],]
RegTestData<-RegTestData[,-3:-5]
install.packages("kernlab")
library(kernlab)
RegKsvmOutput<-ksvm(NPS_Type~., data=RegTrainData, kernel="rbfdot", kpar="automatic",C=100,cross=10,prob.model=TRUE)
RegKsvmPred<-predict(RegKsvmOutput, RegTestData, type="votes")
RegCompTable<-data.frame(RegTestData[,3],RegKsvmPred[1,])
table(RegCompTable)
# 75.0%
install.packages("e1071")
library(e1071)
install.packages("klaR")
library(klaR)
RegNbModel<-naiveBayes(NPS_Type~.,data=RegTrainData)
RegNbPred<-predict(RegNbModel,RegTestData)
RegTestData$NB_Prediction<-RegNbPred
RegTestData$nb_YesorNot<-ifelse(RegTestData$NPS_Type==RegTestData$NB_Prediction,"Correct","Wrong")
View(RegTestData)
length(which(RegTestData$nb_YesorNot=="Correct"))
Regaccuaracy<-length(which(RegTestData$nb_YesorNot=="Correct"))/length(RegTestData$nb_YesorNot)
Regaccuaracy # 78.1%
RegTestData$Guest_Room_H<-as.numeric(RegTestData$Guest_Room_H)
RegTestData$Condition_Hotel_H<-as.numeric(RegTestData$Condition_Hotel_H)
RegTestData$nb_YesorNot<-as.factor(RegTestData$nb_YesorNot)
RegPlot1<-ggplot(data=RegTestData)+geom_point(aes(x=RegTestData$Guest_Room_H,y=RegTestData$Condition_Hotel_H,color=RegTestData$NPS_Type))
RegPlot2<-ggplot(data=RegTestData)+geom_point(aes(x=RegTestData$Guest_Room_H,y=RegTestData$Condition_Hotel_H,color=RegTestData$NB_Prediction))
RegPlot3<-ggplot(data=RegTestData)+geom_point(aes(x=RegTestData$Guest_Room_H,y=RegTestData$Condition_Hotel_H,color=RegTestData$nb_YesorNot))
length(RegTestData$NPS_Type)
AruleData<-RegressionData[,c(2,4,5,9:12)]
summary(AruleData)
AruleData[AruleData==""]<-NA
AruleData1<-na.omit(AruleData)
View(AruleData1)
AruleSvmRow<-which((AruleData1$State_PL=="California") & (AruleData1$Type_PL=="Business") & (AruleData1$Location_PL=="Urban"))
AruleData2<-AruleData1[c(AruleSvmRow),]
View(AruleData2)
summary(AruleData2)
RuleFunction<-function(c)
{
c[c>=1&c<=6]<-"low"
c[c>=7&c<=8]<-"med"
c[c==9|c==10]<-"high"
return(c)
}
AruleData3<-data.frame(RuleFunction(AruleData2$Guest_Room_H),RuleFunction(AruleData2$Condition_Hotel_H),RuleFunction(AruleData2$Customer_SVC_H),AruleData2$NPS_Type)
View(AruleData3)
colnames(AruleData3)<-c("Guest Room Satisfaction","Hotel Condition","Customer Service","NPS_Type")
str(AruleData3)
randIndex2<-sample(1:dim(AruleData2)[1])
summary(randIndex2)
head(randIndex2)
CutPoint22_3<-floor(2*dim(AruleData2)[1]/3)
CutPoint22_3
AruleTrainData<-AruleData3[randIndex2[1:CutPoint22_3],]
AruleTestData<-AruleData3[randIndex2[(CutPoint22_3+1):dim(AruleData2)[1]],]
AruleKsvmOutput<-ksvm(NPS_Type~., data=AruleTrainData, kernel="rbfdot", kpar="automatic",C=5,cross=5,prob.model=TRUE)
AruleKsvmPred<-predict(AruleKsvmOutput, AruleTestData, type="votes")
AruleCompTable<-data.frame(AruleTestData[,4],AruleKsvmPred[1,])
table(AruleCompTable) # 79.4%
AruleNbModel<-naiveBayes(NPS_Type~.,data=AruleTrainData)
AruleNbPred<-predict(AruleNbModel,AruleTestData)
AruleTestData$NB_Prediction<-AruleNbPred
AruleTestData$nb_YesorNot<-ifelse(AruleTestData$NPS_Type==AruleTestData$NB_Prediction,"Correct","Wrong")
View(AruleTestData)
Aruleaccuaracy<-length(which(AruleTestData$nb_YesorNot=="Correct"))/length(AruleTestData$nb_YesorNot)
Aruleaccuaracy # 80.0%
NBPro<-length(which(AruleTestData$NB_Prediction=="Promoter"))
NBDet<-length(which(AruleTestData$NB_Prediction=="Detractor"))
NBNPSPre<-(NBPro-NBDet)/length(AruleTestData$NB_Prediction)
|
test_that("translink_samplesize fails when sensitivity 0", {
expect_error(translink_samplesize(sensitivity = 0, specificity = 0.995, N = 100,
R = 1, tdr = 0.75))
})
test_that("translink_samplesize fails when parameters invalid", {
expect_error(translink_samplesize(sensitivity = 0.99, specificity = 0.995, N = 100,
R = 1, tdr = 2))
expect_error(translink_samplesize(sensitivity = 0.99, specificity = 0.995, N = 100,
R = 1, tdr = -2))
})
|
/tests/testthat/test-translink_samplesize.R
|
no_license
|
HopkinsIDD/phylosamp
|
R
| false
| false
| 480
|
r
|
test_that("translink_samplesize fails when sensitivity 0", {
expect_error(translink_samplesize(sensitivity = 0, specificity = 0.995, N = 100,
R = 1, tdr = 0.75))
})
test_that("translink_samplesize fails when parameters invalid", {
expect_error(translink_samplesize(sensitivity = 0.99, specificity = 0.995, N = 100,
R = 1, tdr = 2))
expect_error(translink_samplesize(sensitivity = 0.99, specificity = 0.995, N = 100,
R = 1, tdr = -2))
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/payoff.R
\name{decision}
\alias{decision}
\title{Calculate Optimal Decision}
\usage{
decision(x)
}
\arguments{
\item{x}{}
}
\description{
Depends on how we calculate the payoff function.
}
|
/man/decision.Rd
|
no_license
|
Tetraktys10/treeSimR
|
R
| false
| true
| 268
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/payoff.R
\name{decision}
\alias{decision}
\title{Calculate Optimal Decision}
\usage{
decision(x)
}
\arguments{
\item{x}{}
}
\description{
Depends on how we calculate the payoff function.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stateface.R
\name{geom_stateface}
\alias{geom_stateface}
\title{Use StateFace font for labeling of States in charts}
\source{
\url{https://propublica.github.io/stateface/}
}
\usage{
geom_stateface(
mapping = NULL,
data = NULL,
stat = "identity",
position = "identity",
...,
parse = FALSE,
nudge_x = 0,
nudge_y = 0,
check_overlap = FALSE,
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE
)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link[ggplot2:aes]{aes()}} or
\code{\link[ggplot2:aes_]{aes_()}}. If specified and \code{inherit.aes = TRUE} (the
default), it is combined with the default mapping at the top level of the
plot. You must supply \code{mapping} if there is no plot mapping.}
\item{data}{The data to be displayed in this layer. There are three
options:
If \code{NULL}, the default, the data is inherited from the plot
data as specified in the call to \code{\link[ggplot2:ggplot]{ggplot()}}.
A \code{data.frame}, or other object, will override the plot
data. All objects will be fortified to produce a data frame. See
\code{\link[ggplot2:fortify]{fortify()}} for which variables will be created.
A \code{function} will be called with a single argument,
the plot data. The return value must be a \code{data.frame}, and
will be used as the layer data. A \code{function} can be created
from a \code{formula} (e.g. \code{~ head(.x, 10)}).}
\item{stat}{The statistical transformation to use on the data for this
layer, as a string.}
\item{position}{Position adjustment, either as a string, or the result of
a call to a position adjustment function. Cannot be jointy specified with
\code{nudge_x} or \code{nudge_y}.}
\item{...}{Other arguments passed on to \code{\link[ggplot2:layer]{layer()}}. These are
often aesthetics, used to set an aesthetic to a fixed value, like
\code{colour = "red"} or \code{size = 3}. They may also be parameters
to the paired geom/stat.}
\item{parse}{If \code{TRUE}, the labels will be parsed into expressions and
displayed as described in \code{?plotmath}.}
\item{nudge_x}{Horizontal and vertical adjustment to nudge labels by.
Useful for offsetting text from points, particularly on discrete scales.
Cannot be jointly specified with \code{position}.}
\item{nudge_y}{Horizontal and vertical adjustment to nudge labels by.
Useful for offsetting text from points, particularly on discrete scales.
Cannot be jointly specified with \code{position}.}
\item{check_overlap}{If \code{TRUE}, text that overlaps previous text in the
same layer will not be plotted. \code{check_overlap} happens at draw time and in
the order of the data. Therefore data should be arranged by the label
column before calling \code{geom_label()} or \code{geom_text()}.}
\item{na.rm}{If \code{FALSE}, the default, missing values are removed with
a warning. If \code{TRUE}, missing values are silently removed.}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.
It can also be a named logical vector to finely select the aesthetics to
display.}
\item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics,
rather than combining with them. This is most useful for helper functions
that define both data and aesthetics and shouldn't inherit behaviour from
the default plot specification, e.g. \code{\link[ggplot2:borders]{borders()}}.}
}
\description{
Allows you to use ProPublica's \href{https://propublica.github.io/stateface/}{StateFace font}
in charts.
}
\examples{
\dontrun{
library(ggplot2)
ggplot(usa_arrests, aes(murder, assault, label = state)) +
geom_stateface()
ggplot(usa_arrests, aes(murder, assault, label = state, color = urban_pop)) +
geom_stateface() +
scale_color_viridis_c()
}
}
|
/man/geom_stateface.Rd
|
permissive
|
EmilHvitfeldt/fontscales
|
R
| false
| true
| 3,912
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stateface.R
\name{geom_stateface}
\alias{geom_stateface}
\title{Use StateFace font for labeling of States in charts}
\source{
\url{https://propublica.github.io/stateface/}
}
\usage{
geom_stateface(
mapping = NULL,
data = NULL,
stat = "identity",
position = "identity",
...,
parse = FALSE,
nudge_x = 0,
nudge_y = 0,
check_overlap = FALSE,
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE
)
}
\arguments{
\item{mapping}{Set of aesthetic mappings created by \code{\link[ggplot2:aes]{aes()}} or
\code{\link[ggplot2:aes_]{aes_()}}. If specified and \code{inherit.aes = TRUE} (the
default), it is combined with the default mapping at the top level of the
plot. You must supply \code{mapping} if there is no plot mapping.}
\item{data}{The data to be displayed in this layer. There are three
options:
If \code{NULL}, the default, the data is inherited from the plot
data as specified in the call to \code{\link[ggplot2:ggplot]{ggplot()}}.
A \code{data.frame}, or other object, will override the plot
data. All objects will be fortified to produce a data frame. See
\code{\link[ggplot2:fortify]{fortify()}} for which variables will be created.
A \code{function} will be called with a single argument,
the plot data. The return value must be a \code{data.frame}, and
will be used as the layer data. A \code{function} can be created
from a \code{formula} (e.g. \code{~ head(.x, 10)}).}
\item{stat}{The statistical transformation to use on the data for this
layer, as a string.}
\item{position}{Position adjustment, either as a string, or the result of
a call to a position adjustment function. Cannot be jointy specified with
\code{nudge_x} or \code{nudge_y}.}
\item{...}{Other arguments passed on to \code{\link[ggplot2:layer]{layer()}}. These are
often aesthetics, used to set an aesthetic to a fixed value, like
\code{colour = "red"} or \code{size = 3}. They may also be parameters
to the paired geom/stat.}
\item{parse}{If \code{TRUE}, the labels will be parsed into expressions and
displayed as described in \code{?plotmath}.}
\item{nudge_x}{Horizontal and vertical adjustment to nudge labels by.
Useful for offsetting text from points, particularly on discrete scales.
Cannot be jointly specified with \code{position}.}
\item{nudge_y}{Horizontal and vertical adjustment to nudge labels by.
Useful for offsetting text from points, particularly on discrete scales.
Cannot be jointly specified with \code{position}.}
\item{check_overlap}{If \code{TRUE}, text that overlaps previous text in the
same layer will not be plotted. \code{check_overlap} happens at draw time and in
the order of the data. Therefore data should be arranged by the label
column before calling \code{geom_label()} or \code{geom_text()}.}
\item{na.rm}{If \code{FALSE}, the default, missing values are removed with
a warning. If \code{TRUE}, missing values are silently removed.}
\item{show.legend}{logical. Should this layer be included in the legends?
\code{NA}, the default, includes if any aesthetics are mapped.
\code{FALSE} never includes, and \code{TRUE} always includes.
It can also be a named logical vector to finely select the aesthetics to
display.}
\item{inherit.aes}{If \code{FALSE}, overrides the default aesthetics,
rather than combining with them. This is most useful for helper functions
that define both data and aesthetics and shouldn't inherit behaviour from
the default plot specification, e.g. \code{\link[ggplot2:borders]{borders()}}.}
}
\description{
Allows you to use ProPublica's \href{https://propublica.github.io/stateface/}{StateFace font}
in charts.
}
\examples{
\dontrun{
library(ggplot2)
ggplot(usa_arrests, aes(murder, assault, label = state)) +
geom_stateface()
ggplot(usa_arrests, aes(murder, assault, label = state, color = urban_pop)) +
geom_stateface() +
scale_color_viridis_c()
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllClass.R
\name{handleFlags}
\alias{handleFlags}
\title{Handle flags in oce objects}
\usage{
handleFlags(object, flags, actions, debug)
}
\arguments{
\item{object}{An object of \code{\link{oce}}.}
\item{flags}{An optional \code{\link{list}} containing (a)
items with names of entries in the \code{data} slot of \code{object},
or (b) a single unnamed item. In the first case, the attention is
focussed on the named items, while in the second case the
all the data in the \code{object}'s \code{data} slot are examined.
Each element in the list must be set to an integer or vector of integers,
specifying conditions to be met before actions are to be taken.
See \dQuote{Details} for the default that is used if \code{flags} is not supplied.}
\item{actions}{An optional \code{\link{list}} that contains items with
names that match those in the \code{flags} argument. If \code{actions}
is not supplied, the default will be to set all values identified by
\code{flags} to \code{NA}; this can also be specified by
specifying \code{actions=list("NA")}. It is also possible to specify
functions that calculate replacement values. These are provided
with \code{object} as the single argument, and must return a
replacement for the data item in question.
See \dQuote{Details} for the default that is used if \code{actions} is not supplied.}
\item{debug}{An optional integer specifying the degree of debugging, with
value 0 meaning to skip debugging and 1 or higher meaning to print some
information about the arguments and the data. It is usually a good idea to set
this to 1 for initial work with a dataset, to see which flags are being
handled for each data item. If not supplied, this defaults to the value of
\code{\link{getOption}("oceDebug")}.}
}
\description{
Data-quality flags are stored in the \code{metadata}
slot of \code{\link{oce-class}} objects in a
\code{\link{list}} named \code{flags}.
The present function (a generic that has specialized versions
for various data classes) provides a way to
manipulate the core data based on
the data-quality flags. For example, a common operation is to replace suspicious
or erroneous data with \code{NA}.
If \code{metadata$flags} in the object supplied as the first argument
is empty, then that object is returned, unaltered.
Otherwise, \code{handleFlags} analyses the data-quality flags within
the object, in relation to the \code{flags} argument, and interprets
the \code{action} argument to select an action to be applied to mached
data.
Reasonable defaults are used if \code{flags} and \code{actions}
are not supplied (see \sQuote{Details}),
but different schemes are used in different
data archives, so it is risky to rely on these defaults.
It is usually necessary to tailor \code{flags} and \code{actions}
to the data and the analysis goals.
}
\details{
Each specialized variant of this function has its own defaults
for \code{flags} and \code{actions}.
}
\section{Implementation status}{
\code{handleFlags} is a new function as of March 2016,
and it will probably continue to evolve through the rest of 2016.
Users are asked to be patient, and to provide help by
looking at the documentation and telling the developers
whether the planned functioning seems reasonable.
}
\seealso{
Other functions that handle data-quality flags: \code{\link{handleFlags,argo-method}},
\code{\link{handleFlags,ctd-method}},
\code{\link{handleFlags,section-method}}
}
|
/pkgs/oce/man/handleFlags.Rd
|
no_license
|
vaguiar/EDAV_Project_2017
|
R
| false
| true
| 3,495
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllClass.R
\name{handleFlags}
\alias{handleFlags}
\title{Handle flags in oce objects}
\usage{
handleFlags(object, flags, actions, debug)
}
\arguments{
\item{object}{An object of \code{\link{oce}}.}
\item{flags}{An optional \code{\link{list}} containing (a)
items with names of entries in the \code{data} slot of \code{object},
or (b) a single unnamed item. In the first case, the attention is
focussed on the named items, while in the second case the
all the data in the \code{object}'s \code{data} slot are examined.
Each element in the list must be set to an integer or vector of integers,
specifying conditions to be met before actions are to be taken.
See \dQuote{Details} for the default that is used if \code{flags} is not supplied.}
\item{actions}{An optional \code{\link{list}} that contains items with
names that match those in the \code{flags} argument. If \code{actions}
is not supplied, the default will be to set all values identified by
\code{flags} to \code{NA}; this can also be specified by
specifying \code{actions=list("NA")}. It is also possible to specify
functions that calculate replacement values. These are provided
with \code{object} as the single argument, and must return a
replacement for the data item in question.
See \dQuote{Details} for the default that is used if \code{actions} is not supplied.}
\item{debug}{An optional integer specifying the degree of debugging, with
value 0 meaning to skip debugging and 1 or higher meaning to print some
information about the arguments and the data. It is usually a good idea to set
this to 1 for initial work with a dataset, to see which flags are being
handled for each data item. If not supplied, this defaults to the value of
\code{\link{getOption}("oceDebug")}.}
}
\description{
Data-quality flags are stored in the \code{metadata}
slot of \code{\link{oce-class}} objects in a
\code{\link{list}} named \code{flags}.
The present function (a generic that has specialized versions
for various data classes) provides a way to
manipulate the core data based on
the data-quality flags. For example, a common operation is to replace suspicious
or erroneous data with \code{NA}.
If \code{metadata$flags} in the object supplied as the first argument
is empty, then that object is returned, unaltered.
Otherwise, \code{handleFlags} analyses the data-quality flags within
the object, in relation to the \code{flags} argument, and interprets
the \code{action} argument to select an action to be applied to mached
data.
Reasonable defaults are used if \code{flags} and \code{actions}
are not supplied (see \sQuote{Details}),
but different schemes are used in different
data archives, so it is risky to rely on these defaults.
It is usually necessary to tailor \code{flags} and \code{actions}
to the data and the analysis goals.
}
\details{
Each specialized variant of this function has its own defaults
for \code{flags} and \code{actions}.
}
\section{Implementation status}{
\code{handleFlags} is a new function as of March 2016,
and it will probably continue to evolve through the rest of 2016.
Users are asked to be patient, and to provide help by
looking at the documentation and telling the developers
whether the planned functioning seems reasonable.
}
\seealso{
Other functions that handle data-quality flags: \code{\link{handleFlags,argo-method}},
\code{\link{handleFlags,ctd-method}},
\code{\link{handleFlags,section-method}}
}
|
\name{RidgeFused}
\alias{RidgeFused}
\title{Ridged Fused Inverse Covariance Matrix Estimation}
\description{Calculates the ridge fusion precision estimator for multiple classes}
\usage{
RidgeFused(S,lambda1,lambda2,nc,tol=10^(-7), maxiter=1e3,warm.start=NULL,scale=FALSE)
}
\arguments{
A list is returned where the elements are:
\item{S}{A list of length J that contains the sample covariance estimators of each class }
\item{lambda1}{ Ridge tuning parameter, must be greater than or equal to 0}
\item{lambda2}{Ridge Fusion tuning parameter, must be greater than or equal to 0}
\item{nc}{ A vector of length J that contains the sample size of each class }
\item{tol}{Convergence tolerance for blockwise coordinate descent algorithm}
\item{maxiter}{The number of iterations the algorithm will run if convergence tolerance is not met}
\item{warm.start}{If \code{NULL} no warm start is used. If initialized with a list of positive definite inverse covariance matrix estimates of length J, will use them as initialization for the algorithm.}
\item{scale}{If \code{FALSE} scale invariant method is used}
}
\value{
An object of class \code{RidgeFusion}, basically a list including elements
\item{Omega}{ a list where each element is the inverse covariance matrix estimate for the corresponding element of S }
\item{Ridge}{ lambda1 }
\item{FusedRidge}{lambda2 }
\item{iter}{Number of iterations until convergence}
}
\author{Brad Price}
\examples{
## Creating a toy example with 5 variables
library(mvtnorm)
set.seed(526)
p=5
Sig1=matrix(0,p,p)
for(j in 1:p){
for(i in j:p){
Sig1[j,i]=.7^abs(i-j)
Sig1[i,j]=Sig1[j,i]
}
}
Sig2=diag(c(rep(2,p-5),rep(1,5)),p,p)%*%Sig1%*%diag(c(rep(2,p-5),rep(1,5)),p,p)
X1=rmvnorm(100,rep(2*log(p)/p,p),Sig1)
Y=rmvnorm(100,,Sig2)
## Creating a list to use as S
S=list(0,0)
S[[1]]=(99/100)*cov(X1)
S[[2]]=(99/100)*cov(Y)
## Creating the vector of sample sizes
nc2=c(100,100)
## Running RidgeFused scale invariant method for tuning parameters lambda1=1 ,lambda2=2
A=RidgeFused(S,1,2,nc2,scale=TRUE)
A
names(A)
}
\keyword{Inverse covariance matrix estimation}
|
/man/RidgeFused.Rd
|
no_license
|
cran/RidgeFusion
|
R
| false
| false
| 2,175
|
rd
|
\name{RidgeFused}
\alias{RidgeFused}
\title{Ridged Fused Inverse Covariance Matrix Estimation}
\description{Calculates the ridge fusion precision estimator for multiple classes}
\usage{
RidgeFused(S,lambda1,lambda2,nc,tol=10^(-7), maxiter=1e3,warm.start=NULL,scale=FALSE)
}
\arguments{
A list is returned where the elements are:
\item{S}{A list of length J that contains the sample covariance estimators of each class }
\item{lambda1}{ Ridge tuning parameter, must be greater than or equal to 0}
\item{lambda2}{Ridge Fusion tuning parameter, must be greater than or equal to 0}
\item{nc}{ A vector of length J that contains the sample size of each class }
\item{tol}{Convergence tolerance for blockwise coordinate descent algorithm}
\item{maxiter}{The number of iterations the algorithm will run if convergence tolerance is not met}
\item{warm.start}{If \code{NULL} no warm start is used. If initialized with a list of positive definite inverse covariance matrix estimates of length J, will use them as initialization for the algorithm.}
\item{scale}{If \code{FALSE} scale invariant method is used}
}
\value{
An object of class \code{RidgeFusion}, basically a list including elements
\item{Omega}{ a list where each element is the inverse covariance matrix estimate for the corresponding element of S }
\item{Ridge}{ lambda1 }
\item{FusedRidge}{lambda2 }
\item{iter}{Number of iterations until convergence}
}
\author{Brad Price}
\examples{
## Creating a toy example with 5 variables
library(mvtnorm)
set.seed(526)
p=5
Sig1=matrix(0,p,p)
for(j in 1:p){
for(i in j:p){
Sig1[j,i]=.7^abs(i-j)
Sig1[i,j]=Sig1[j,i]
}
}
Sig2=diag(c(rep(2,p-5),rep(1,5)),p,p)%*%Sig1%*%diag(c(rep(2,p-5),rep(1,5)),p,p)
X1=rmvnorm(100,rep(2*log(p)/p,p),Sig1)
Y=rmvnorm(100,,Sig2)
## Creating a list to use as S
S=list(0,0)
S[[1]]=(99/100)*cov(X1)
S[[2]]=(99/100)*cov(Y)
## Creating the vector of sample sizes
nc2=c(100,100)
## Running RidgeFused scale invariant method for tuning parameters lambda1=1 ,lambda2=2
A=RidgeFused(S,1,2,nc2,scale=TRUE)
A
names(A)
}
\keyword{Inverse covariance matrix estimation}
|
## TODO:
## 1. get a list of stations
## 2. get a list of reports and matching headers / units
## 3. better documentation / testing
## 4. work with Deb / programmers to get compressed output
##
## see: http://www.wcc.nrcs.usda.gov/web_service/awdb_webservice_announcements.htm
## http://www.wcc.nrcs.usda.gov/web_service/AWDB_Web_Service_Reference.htm
## http://www.wcc.nrcs.usda.gov/report_generator/WebReportScripting.htm
##
## 5. we will need to address the potential for multiple sensor ID per type/depth
## examples in:
## https://github.com/ncss-tech/soilDB/issues/14
##
## 6. use API vs. scraping report output
## https://github.com/bluegreen-labs/snotelr/blob/master/R/snotel_download.r#L65
## --> this would require enumeration of sensors, etc.
### sensor codes: http://wcc.sc.egov.usda.gov/nwcc/sensors
##
## ideas:
## https://github.com/gunnarleffler/getSnotel
##
## site images:
## https://www.wcc.nrcs.usda.gov/siteimages/462.jpg
##
## site notes:
## https://wcc.sc.egov.usda.gov/nwcc/sitenotes?sitenum=462
##
#' @title Get Daily Climate Data from USDA-NRCS SCAN (Soil Climate Analysis Network) Stations
#'
#' @description Query soil/climate data from USDA-NRCS SCAN Stations.
#'
#' @details Possible above and below ground sensor types include: 'SMS' (soil moisture), 'STO' (soil temperature), 'SAL' (salinity), 'TAVG' (daily average air temperature), 'TMIN' (daily minimum air temperature), 'TMAX' (daily maximum air temperature), 'PRCP' (daily precipitation), 'PREC' (daily precipitation), 'SNWD' (snow depth), 'WTEQ' (snow water equivalent),'WDIRV' (wind direction), 'WSPDV' (wind speed), 'LRADT' (solar radiation/langley total).
#'
#' This function converts below-ground sensor depth from inches to cm. All temperature values are reported as degrees C. Precipitation, snow depth, and snow water content are reported as *inches*.
#'
#' ## SCAN Sensors
#'
#' All Soil Climate Analysis Network (SCAN) sensor measurements are reported hourly.
#'
#' |Element Measured |Sensor Type |Precision |
#' |:------------------------|:----------------------------------------------------------------------------------------------------------|:---------------------------|
#' |Air Temperature |Shielded thermistor |0.1 degrees C |
#' |Barometric Pressure |Silicon capacitive pressure sensor |1% |
#' |Precipitation |Storage-type gage or tipping bucket |Storage: 0.1 inches;|Tipping bucket: 0.01 inches|
#' |Relative Humidity |Thin film capacitance-type sensor |1% |
#' |Snow Depth |Sonic sensor (not on all stations) |0.5 inches |
#' |Snow Water Content |Snow pillow device and a pressure transducer (not on all stations) |0.1 inches |
#' |Soil Moisture |Dielectric constant measuring device. Typical measurements are at 2", 4", 8", 20", and 40" where possible. |0.50% |
#' |Soil Temperature |Encapsulated thermistor. Typical measurements are at 2", 4", 8", 20", and 40" where possible. |0.1 degrees C |
#' |Solar Radiation |Pyranometer |0.01 watts per meter |
#' |Wind Speed and Direction |Propellor-type anemometer |Speed: 0.1 miles per hour; Direction: 1 degree|
#'
#' ## SNOTEL Sensors
#'
#' All Snow Telemetry (SNOTEL) sensor measurements are reported daily.
#'
#' |Element Measured |Sensor Type |Precision |
#' |:------------------------|:----------------------------------------------------------------------------------------------------------|:---------------------------|
#' |Air Temperature |Shielded thermistor |0.1 degrees C |
#' |Barometric Pressure |Silicon capacitive pressure sensor |1% |
#' |Precipitation |Storage-type gage or tipping bucket |Storage: 0.1 inches; Tipping bucket: 0.01 inches|
#' |Relative Humidity |Thin film capacitance-type sensor |1% |
#' |Snow Depth |Sonic sensor |0.5 inches |
#' |Snow Water Content |Snow pillow device and a pressure transducer |0.1 inches |
#' |Soil Moisture |Dielectric constant measuring device. Typical measurements are at 2", 4", 8", 20", and 40" where possible. |0.50% |
#' |Soil Temperature |Encapsulated thermistor. Typical measurements are at 2", 4", 8", 20", and 40" where possible. |0.1 degrees C |
#' |Solar Radiation |Pyranometer |0.01 watts per meter |
#' |Wind Speed and Direction |Propellor-type anemometer |Speed: 0.1 miles per hour; Direction: 1 degree|
#'
#' See the [fetchSCAN tutorial](http://ncss-tech.github.io/AQP/soilDB/fetchSCAN-demo.html) for additional usage and visualization examples.
#'
#' @references See the [Soil Climate Analysis Network](https://www.nrcs.usda.gov/resources/data-and-reports/soil-climate-analysis-network) home page for more information on the SCAN program, and links to other associated programs such as SNOTEL, at the National Weather and Climate Center. You can get information on available web services, as well as interactive maps of snow water equivalent, precipitation and streamflow.
#'
#' @param site.code a vector of site codes. If `NULL` `SCAN_site_metadata()` returns metadata for all SCAN sites.
#' @param year a vector of years
#' @param report report name, single value only; default `'SCAN'`, other example options include individual sensor codes, e.g. `'SMS'` for Soil Moisture Storage, `'TEMP'` for temperature
#' @param timeseries either `'Daily'` or `'Hourly'`
#' @param ... additional arguments. May include `intervalType`, `format`, `sitenum`, `interval`, `year`, `month`. Presence of additional arguments bypasses default batching functionality provided in the function and submits a 'raw' request to the API form.
#' @return a `list` of `data.frame` objects, where each element name is a sensor type, plus a `metadata` table; different `report` types change the types of sensor data returned. `SCAN_sensor_metadata()` and `SCAN_site_metadata()` return a `data.frame`. `NULL` on bad request.
#' @author D.E. Beaudette, A.G. Brown
#' @keywords manip
#' @examples
#' \dontrun{
#' # get data
#' x <- try(fetchSCAN(site.code=c(356, 2072), year=c(2015, 2016)))
#' str(x)
#'
#' # get sensor metadata
#' m <- SCAN_sensor_metadata(site.code=c(356, 2072))
#'
#' # get site metadata
#' m <- SCAN_site_metadata(site.code=c(356, 2072))
#'
#' # get hourly data (396315 records)
#' # x <- try(fetchSCAN(site.code=c(356, 2072), year=c(2015, 2016), timeseries = "Hourly"))
#' }
#' @rdname fetchSCAN
#' @export
fetchSCAN <- function(site.code = NULL, year = NULL, report = 'SCAN', timeseries = c('Daily', 'Hourly'), ...) {
# check for required packages
if (!requireNamespace('httr', quietly = TRUE))
stop('please install the `httr` package', call. = FALSE)
# sanity check on granularity
# required to flatten possible arguments to single value
timeseries <- match.arg(timeseries)
## allow for arbitrary queries using `req` argument or additional arguments via ...
l.extra <- list(...)
# TODO do this after expansion to iterate over site.code*year + ???
l <- c(sitenum = site.code, year = year, report = report, timeseries = timeseries, l.extra)
if (length(l.extra) > 0) {
if ("req" %in% names(l)) {
.Deprecated("`req` argument is deprecated; custom form inputs can be specified as named arguments via `...`")
l <- l[["req"]]
} else {
l <- unlist(l)
}
return(.get_SCAN_data(req = l))
}
# init list to store results
res <- list()
# add metadata from cached table in soilDB
m <- SCAN_site_metadata(site.code)
site.code <- m$Site
# all possible combinations of site codes and year | single report and timeseries type
g <- expand.grid(s = site.code, y = year, r = report, dt = timeseries)
# get a list of request lists
req.list <- mapply(.make_SCAN_req, s = g$s, y = g$y, r = g$r, dt = g$dt, SIMPLIFY = FALSE)
# format raw data into a list of lists:
# sensor suite -> site number -> year
d.list <- list()
# save: sensor suite -> site number -> year
sensors <- c('SMS', 'STO', 'SAL', 'TAVG', 'TMIN',
'TMAX', 'PRCP', 'PREC', 'SNWD', 'WTEQ',
'WDIRV', 'WSPDV', 'LRADT')
## TODO: consider submitting queries in parallel, possible at the inner for-loop, over sensors
for (i in req.list) {
# when there are no data, result is an empty data.frame
d <- try(.get_SCAN_data(i), silent = TRUE)
# errors occur in exceptional situations
# so we terminate the request loop
# (rather than possibly incomplete results)
if (inherits(d, 'try-error')) {
message(d)
return(NULL)
}
for (sensor.i in sensors) {
site.i <- as.character(i$sitenum)
year.i <- as.character(i$year)
if (is.null(d)) {
res <- data.frame(Site = integer(0),
Date = as.Date(numeric(0),
origin = "1970-01-01"),
Time = character(0),
water_year = numeric(0),
water_day = integer(0),
value = numeric(0),
depth = numeric(0),
sensor.id = integer(0),
row.names = NULL,
stringsAsFactors = FALSE)
} else {
res <- .formatSCAN_soil_sensor_suites(d, code = sensor.i)
}
d.list[[sensor.i]][[site.i]][[year.i]] <- res
}
}
# iterate over sensors
for (sensor.i in sensors) {
# flatten individual sensors over years, by site number
r.i <- data.table::rbindlist(lapply(d.list[[sensor.i]], data.table::rbindlist, fill = TRUE), fill = TRUE)
rownames(r.i) <- NULL
# res should be a list
if (inherits(res, 'data.frame')) {
res <- list()
}
res[[sensor.i]] <- as.data.frame(r.i)
}
# report object size
if (length(res) > 0) {
res.size <- round(object.size(res) / 1024 / 1024, 2)
res.rows <- sum(sapply(res, nrow), na.rm = TRUE)
message(paste(res.rows, ' records (', res.size, ' Mb transferred)', sep = ''))
} else message('query returned no data')
res[['metadata']] <- m
return(res)
}
# combine soil sensor suites into stackable format
.formatSCAN_soil_sensor_suites <- function(d, code) {
value <- NULL
stopifnot(length(code) == 1)
# locate named columns
d.cols <- grep(code, names(d))
# return NULL if no data
if (length(d.cols) == 0) {
return(NULL)
}
## https://github.com/ncss-tech/soilDB/issues/14
## there may be multiple above-ground sensors (takes the first)
if (length(d.cols) > 1 && code %in% c('TAVG', 'TMIN', 'TMAX', 'PRCP', 'PREC',
'SNWD', 'WTEQ', 'WDIRV', 'WSPDV', 'LRADT')) {
message(paste0('multiple sensors per site [site ', d$Site[1], '] ',
paste0(names(d)[d.cols], collapse = ',')))
# use only the first sensor
d.cols <- d.cols[1]
}
# coerce all values to double (avoids data.table warnings)
mvars <- unique(names(d)[d.cols])
d[mvars] <- lapply(d[mvars], as.double)
# convert to long format
d.long <- data.table::melt(
data.table::as.data.table(d),
id.vars = c('Site', 'Date', 'Time'),
measure.vars = mvars
)
# extract depths
d.depths <- strsplit(as.character(d.long$variable), '_', fixed = TRUE)
d.long$depth <- sapply(d.depths, function(i) as.numeric(i[2]))
# convert depths (in to cm)
d.long$depth <- round(d.long$depth * 2.54)
# change 'variable' to 'sensor.id'
names(d.long)[which(names(d.long) == 'variable')] <- 'sensor.id'
## there can be multiple sensors at below-ground label
.SD <- NULL
no.na <- NULL
sensors.per.depth <- d.long[, list(no.na = sum(complete.cases(.SD))),
by = c('sensor.id', 'depth'),
.SDcols = c('sensor.id', 'depth', 'value')]
most.data <- sensors.per.depth[, .SD[which.max(no.na)], by = 'depth']
# check for multiple sensors per depth
tab <- table(sensors.per.depth$depth) > 1
if (any(tab)) {
multiple.sensor.ids <- as.character(sensors.per.depth$sensor.id[which(sensors.per.depth$depth %in% names(tab))])
message(paste0('multiple sensors per depth [site ', d$Site[1], '] ',
paste(multiple.sensor.ids, collapse = ', ')))
}
# multiple rows / day, remove NA in sensor values
idx <- which(!is.na(d.long$value))
d.long <- d.long[idx, ]
# water year/day: October 1st -- September 30th
w <- waterDayYear(d.long$Date)
# row-order is preserved
d.long$water_year <- w$wy
d.long$water_day <- w$wd
# format and return
res <- as.data.frame(d.long[, c('Site', 'Date', 'Time', 'water_year', 'water_day',
'value', 'depth', 'sensor.id')])
# Time ranges from "00:00" to "23:00" [24 hourly readings]
# set Time to 12:00 (middle of day) for daily data
if (is.null(res$Time) || all(is.na(res$Time) | res$Time == "")) {
# only when there are data
if (nrow(res) > 0) {
res$Time <- "12:00"
}
}
# TODO: what is the correct timezone for each site's data? Is it local? Or corrected to some default?
# res$datetime <- as.POSIXct(strptime(paste(res$Date, res$Time), "%Y-%m-%d %H:%M"), tz = "GMT")
res
}
# format a list request for SCAN data
# s: single site code
# y: single year
# r: single report type
# dt: either 'Daily' or 'Hourly'
.make_SCAN_req <- function(s, y, r, dt = c('Daily', 'Hourly')) {
stopifnot(tolower(dt) %in% c('daily', 'hourly'))
req <- list(
intervalType = ' View Historic ',
report = r,
timeseries = dt,
format = 'copy',
sitenum = s,
interval = 'YEAR',
year = y,
month = 'CY'
)
return(req)
}
# req is a named vector or list
.get_SCAN_data <- function(req) {
# convert to list as needed
if (!inherits(req, 'list')) {
req <- as.list(req)
}
# base URL to service
uri <- 'https://wcc.sc.egov.usda.gov/nwcc/view'
# note: the SCAN form processor checks the referring page and user-agent
new.headers <- c("Referer" = "https://wcc.sc.egov.usda.gov/nwcc/")
# enable follow-location
# http://stackoverflow.com/questions/25538957/suppressing-302-error-returned-by-httr-post
# cf <- httr::config(followlocation = 1L, verbose=1L) # debugging
cf <- httr::config(followlocation = 1L)
# submit request
r <- try(httr::POST(
uri,
body = req,
encode = 'form',
config = cf,
httr::add_headers(new.headers),
httr::timeout(getOption("soilDB.timeout", default = 300))
))
if (inherits(r, 'try-error'))
return(NULL)
res <- try(httr::stop_for_status(r), silent = TRUE)
if (inherits(res, 'try-error')) {
return(NULL)
}
# extract content as text, cannot be directly read-in
r.content <- try(httr::content(r, as = 'text'), silent = TRUE)
if (inherits(r.content, 'try-error')) {
return(NULL)
}
# connect to the text as a standard file
tc <- textConnection(r.content)
# attempt to read column headers, after skipping the first two lines of data
# note: this moves the text connection cursor forward 3 lines
# 2018-03-06 DEB: results have an extra line up top, now need to skip 3 lines
h <- unlist(read.table(
tc,
nrows = 1,
skip = 3,
header = FALSE,
stringsAsFactors = FALSE,
sep = ',',
quote = '',
strip.white = TRUE,
na.strings = '-99.9',
comment.char = ''
))
# the last header is junk (NA)
h <- as.vector(na.omit(h))
# split column names on white space and keep the first element
h <- sapply(strsplit(h, split = ' '), function(i) i[[1]])
# clean some more junk
h <- gsub('-1', '', fixed = TRUE, h)
h <- gsub(':-', '_', h)
# NOTE: we have already read-in the first 3 lines above, therefore we don't need to skip lines here
# read as CSV, skipping junk + headers, accommodating white-space and NA values encoded as -99.9
x <- try(read.table(
tc,
header = FALSE,
stringsAsFactors = FALSE,
sep = ',',
quote = '',
strip.white = TRUE,
na.strings = '-99.9',
comment.char = ''
), silent = TRUE)
# catch errors
if (inherits(x, 'try-error')) {
close.connection(tc)
.msg <- sprintf("* site %s [%s]: %s", req$sitenum, req$y, attr(x, 'condition')[["message"]])
message(.msg)
x <- as.data.frame(matrix(ncol = 12, nrow = 0))
return(x)
}
# the last column is always junk
x[[names(x)[length(x)]]] <- NULL
# apply truncated column names:
names(x) <- h
# clean-up connections
close.connection(tc)
# convert date to Date class
x$Date <- as.Date(x$Date)
# done
return(x)
}
## helper function for getting a single table of SCAN metadata
# site.code: a single SCAN site code
.get_single_SCAN_metadata <- function(site.code) {
# base URL to service
uri <- 'https://wcc.sc.egov.usda.gov/nwcc/sensors'
# note: the SCAN form processor checks the refering page and user-agent
new.headers <- c("Referer" = "https://wcc.sc.egov.usda.gov/nwcc/sensors")
# enable follow-location
# http://stackoverflow.com/questions/25538957/suppressing-302-error-returned-by-httr-post
# cf <- httr::config(followlocation = 1L, verbose=1L) # debugging
cf <- httr::config(followlocation = 1L)
req <- list(
sitenum = site.code,
report = 'ALL',
interval = 'DAY',
timeseries = " View Daily Sensor Descriptions "
)
# submit request
r <- httr::POST(
uri,
body = req,
encode = 'form',
config = cf,
httr::add_headers(new.headers)
)
httr::stop_for_status(r)
# parsed XML
r.content <- httr::content(r, as = 'parsed')
# get tables
n.tables <- rvest::html_nodes(r.content, "table")
# the metadata table we want is the last one
m <- rvest::html_table(n.tables[[length(n.tables)]], header = FALSE)
# clean-up table
# 1st row is header
h <- make.names(m[1, ])
# second row is junk
m <- m[-c(1:2), ]
names(m) <- h
m$site.code <- site.code
return(m)
}
# iterate over a vector of SCAN site codes, returning basic metadata
# site.code: vector of SCAN site codes
#' @rdname fetchSCAN
#' @export
SCAN_sensor_metadata <- function(site.code) {
# check for required packages
if (!requireNamespace('httr', quietly = TRUE) | !requireNamespace('rvest', quietly = TRUE))
stop('please install the `httr` and `rvest` packages', call. = FALSE)
# iterate over site codes, returning DF + site.code
res <- do.call('rbind', lapply(site.code, .get_single_SCAN_metadata))
return(as.data.frame(res))
}
## https://github.com/ncss-tech/soilDB/issues/61
# site.code: vector of SCAN site codes
#' @rdname fetchSCAN
#' @export
SCAN_site_metadata <- function(site.code = NULL) {
# hack to please R CMD check
SCAN_SNOTEL_metadata <- NULL
# cached copy available in soilDB::SCAN_SNOTEL_metadata
load(system.file("data/SCAN_SNOTEL_metadata.rda", package = "soilDB")[1])
if (is.null(site.code)) {
idx <- 1:nrow(SCAN_SNOTEL_metadata)
} else {
idx <- which(SCAN_SNOTEL_metadata$Site %in% site.code)
}
# subset requested codes
res <- SCAN_SNOTEL_metadata[idx, ]
return(res)
}
|
/R/fetchSCAN.R
|
no_license
|
ncss-tech/soilDB
|
R
| false
| false
| 20,834
|
r
|
## TODO:
## 1. get a list of stations
## 2. get a list of reports and matching headers / units
## 3. better documentation / testing
## 4. work with Deb / programmers to get compressed output
##
## see: http://www.wcc.nrcs.usda.gov/web_service/awdb_webservice_announcements.htm
## http://www.wcc.nrcs.usda.gov/web_service/AWDB_Web_Service_Reference.htm
## http://www.wcc.nrcs.usda.gov/report_generator/WebReportScripting.htm
##
## 5. we will need to address the potential for multiple sensor ID per type/depth
## examples in:
## https://github.com/ncss-tech/soilDB/issues/14
##
## 6. use API vs. scraping report output
## https://github.com/bluegreen-labs/snotelr/blob/master/R/snotel_download.r#L65
## --> this would require enumeration of sensors, etc.
### sensor codes: http://wcc.sc.egov.usda.gov/nwcc/sensors
##
## ideas:
## https://github.com/gunnarleffler/getSnotel
##
## site images:
## https://www.wcc.nrcs.usda.gov/siteimages/462.jpg
##
## site notes:
## https://wcc.sc.egov.usda.gov/nwcc/sitenotes?sitenum=462
##
#' @title Get Daily Climate Data from USDA-NRCS SCAN (Soil Climate Analysis Network) Stations
#'
#' @description Query soil/climate data from USDA-NRCS SCAN Stations.
#'
#' @details Possible above and below ground sensor types include: 'SMS' (soil moisture), 'STO' (soil temperature), 'SAL' (salinity), 'TAVG' (daily average air temperature), 'TMIN' (daily minimum air temperature), 'TMAX' (daily maximum air temperature), 'PRCP' (daily precipitation), 'PREC' (daily precipitation), 'SNWD' (snow depth), 'WTEQ' (snow water equivalent),'WDIRV' (wind direction), 'WSPDV' (wind speed), 'LRADT' (solar radiation/langley total).
#'
#' This function converts below-ground sensor depth from inches to cm. All temperature values are reported as degrees C. Precipitation, snow depth, and snow water content are reported as *inches*.
#'
#' ## SCAN Sensors
#'
#' All Soil Climate Analysis Network (SCAN) sensor measurements are reported hourly.
#'
#' |Element Measured |Sensor Type |Precision |
#' |:------------------------|:----------------------------------------------------------------------------------------------------------|:---------------------------|
#' |Air Temperature |Shielded thermistor |0.1 degrees C |
#' |Barometric Pressure |Silicon capacitive pressure sensor |1% |
#' |Precipitation |Storage-type gage or tipping bucket |Storage: 0.1 inches;|Tipping bucket: 0.01 inches|
#' |Relative Humidity |Thin film capacitance-type sensor |1% |
#' |Snow Depth |Sonic sensor (not on all stations) |0.5 inches |
#' |Snow Water Content |Snow pillow device and a pressure transducer (not on all stations) |0.1 inches |
#' |Soil Moisture |Dielectric constant measuring device. Typical measurements are at 2", 4", 8", 20", and 40" where possible. |0.50% |
#' |Soil Temperature |Encapsulated thermistor. Typical measurements are at 2", 4", 8", 20", and 40" where possible. |0.1 degrees C |
#' |Solar Radiation |Pyranometer |0.01 watts per meter |
#' |Wind Speed and Direction |Propellor-type anemometer |Speed: 0.1 miles per hour; Direction: 1 degree|
#'
#' ## SNOTEL Sensors
#'
#' All Snow Telemetry (SNOTEL) sensor measurements are reported daily.
#'
#' |Element Measured |Sensor Type |Precision |
#' |:------------------------|:----------------------------------------------------------------------------------------------------------|:---------------------------|
#' |Air Temperature |Shielded thermistor |0.1 degrees C |
#' |Barometric Pressure |Silicon capacitive pressure sensor |1% |
#' |Precipitation |Storage-type gage or tipping bucket |Storage: 0.1 inches; Tipping bucket: 0.01 inches|
#' |Relative Humidity |Thin film capacitance-type sensor |1% |
#' |Snow Depth |Sonic sensor |0.5 inches |
#' |Snow Water Content |Snow pillow device and a pressure transducer |0.1 inches |
#' |Soil Moisture |Dielectric constant measuring device. Typical measurements are at 2", 4", 8", 20", and 40" where possible. |0.50% |
#' |Soil Temperature |Encapsulated thermistor. Typical measurements are at 2", 4", 8", 20", and 40" where possible. |0.1 degrees C |
#' |Solar Radiation |Pyranometer |0.01 watts per meter |
#' |Wind Speed and Direction |Propellor-type anemometer |Speed: 0.1 miles per hour; Direction: 1 degree|
#'
#' See the [fetchSCAN tutorial](http://ncss-tech.github.io/AQP/soilDB/fetchSCAN-demo.html) for additional usage and visualization examples.
#'
#' @references See the [Soil Climate Analysis Network](https://www.nrcs.usda.gov/resources/data-and-reports/soil-climate-analysis-network) home page for more information on the SCAN program, and links to other associated programs such as SNOTEL, at the National Weather and Climate Center. You can get information on available web services, as well as interactive maps of snow water equivalent, precipitation and streamflow.
#'
#' @param site.code a vector of site codes. If `NULL` `SCAN_site_metadata()` returns metadata for all SCAN sites.
#' @param year a vector of years
#' @param report report name, single value only; default `'SCAN'`, other example options include individual sensor codes, e.g. `'SMS'` for Soil Moisture Storage, `'TEMP'` for temperature
#' @param timeseries either `'Daily'` or `'Hourly'`
#' @param ... additional arguments. May include `intervalType`, `format`, `sitenum`, `interval`, `year`, `month`. Presence of additional arguments bypasses default batching functionality provided in the function and submits a 'raw' request to the API form.
#' @return a `list` of `data.frame` objects, where each element name is a sensor type, plus a `metadata` table; different `report` types change the types of sensor data returned. `SCAN_sensor_metadata()` and `SCAN_site_metadata()` return a `data.frame`. `NULL` on bad request.
#' @author D.E. Beaudette, A.G. Brown
#' @keywords manip
#' @examples
#' \dontrun{
#' # get data
#' x <- try(fetchSCAN(site.code=c(356, 2072), year=c(2015, 2016)))
#' str(x)
#'
#' # get sensor metadata
#' m <- SCAN_sensor_metadata(site.code=c(356, 2072))
#'
#' # get site metadata
#' m <- SCAN_site_metadata(site.code=c(356, 2072))
#'
#' # get hourly data (396315 records)
#' # x <- try(fetchSCAN(site.code=c(356, 2072), year=c(2015, 2016), timeseries = "Hourly"))
#' }
#' @rdname fetchSCAN
#' @export
fetchSCAN <- function(site.code = NULL, year = NULL, report = 'SCAN', timeseries = c('Daily', 'Hourly'), ...) {
# check for required packages
if (!requireNamespace('httr', quietly = TRUE))
stop('please install the `httr` package', call. = FALSE)
# sanity check on granularity
# required to flatten possible arguments to single value
timeseries <- match.arg(timeseries)
## allow for arbitrary queries using `req` argument or additional arguments via ...
l.extra <- list(...)
# TODO do this after expansion to iterate over site.code*year + ???
l <- c(sitenum = site.code, year = year, report = report, timeseries = timeseries, l.extra)
if (length(l.extra) > 0) {
if ("req" %in% names(l)) {
.Deprecated("`req` argument is deprecated; custom form inputs can be specified as named arguments via `...`")
l <- l[["req"]]
} else {
l <- unlist(l)
}
return(.get_SCAN_data(req = l))
}
# init list to store results
res <- list()
# add metadata from cached table in soilDB
m <- SCAN_site_metadata(site.code)
site.code <- m$Site
# all possible combinations of site codes and year | single report and timeseries type
g <- expand.grid(s = site.code, y = year, r = report, dt = timeseries)
# get a list of request lists
req.list <- mapply(.make_SCAN_req, s = g$s, y = g$y, r = g$r, dt = g$dt, SIMPLIFY = FALSE)
# format raw data into a list of lists:
# sensor suite -> site number -> year
d.list <- list()
# save: sensor suite -> site number -> year
sensors <- c('SMS', 'STO', 'SAL', 'TAVG', 'TMIN',
'TMAX', 'PRCP', 'PREC', 'SNWD', 'WTEQ',
'WDIRV', 'WSPDV', 'LRADT')
## TODO: consider submitting queries in parallel, possible at the inner for-loop, over sensors
for (i in req.list) {
# when there are no data, result is an empty data.frame
d <- try(.get_SCAN_data(i), silent = TRUE)
# errors occur in exceptional situations
# so we terminate the request loop
# (rather than possibly incomplete results)
if (inherits(d, 'try-error')) {
message(d)
return(NULL)
}
for (sensor.i in sensors) {
site.i <- as.character(i$sitenum)
year.i <- as.character(i$year)
if (is.null(d)) {
res <- data.frame(Site = integer(0),
Date = as.Date(numeric(0),
origin = "1970-01-01"),
Time = character(0),
water_year = numeric(0),
water_day = integer(0),
value = numeric(0),
depth = numeric(0),
sensor.id = integer(0),
row.names = NULL,
stringsAsFactors = FALSE)
} else {
res <- .formatSCAN_soil_sensor_suites(d, code = sensor.i)
}
d.list[[sensor.i]][[site.i]][[year.i]] <- res
}
}
# iterate over sensors
for (sensor.i in sensors) {
# flatten individual sensors over years, by site number
r.i <- data.table::rbindlist(lapply(d.list[[sensor.i]], data.table::rbindlist, fill = TRUE), fill = TRUE)
rownames(r.i) <- NULL
# res should be a list
if (inherits(res, 'data.frame')) {
res <- list()
}
res[[sensor.i]] <- as.data.frame(r.i)
}
# report object size
if (length(res) > 0) {
res.size <- round(object.size(res) / 1024 / 1024, 2)
res.rows <- sum(sapply(res, nrow), na.rm = TRUE)
message(paste(res.rows, ' records (', res.size, ' Mb transferred)', sep = ''))
} else message('query returned no data')
res[['metadata']] <- m
return(res)
}
# combine soil sensor suites into stackable format
.formatSCAN_soil_sensor_suites <- function(d, code) {
value <- NULL
stopifnot(length(code) == 1)
# locate named columns
d.cols <- grep(code, names(d))
# return NULL if no data
if (length(d.cols) == 0) {
return(NULL)
}
## https://github.com/ncss-tech/soilDB/issues/14
## there may be multiple above-ground sensors (takes the first)
if (length(d.cols) > 1 && code %in% c('TAVG', 'TMIN', 'TMAX', 'PRCP', 'PREC',
'SNWD', 'WTEQ', 'WDIRV', 'WSPDV', 'LRADT')) {
message(paste0('multiple sensors per site [site ', d$Site[1], '] ',
paste0(names(d)[d.cols], collapse = ',')))
# use only the first sensor
d.cols <- d.cols[1]
}
# coerce all values to double (avoids data.table warnings)
mvars <- unique(names(d)[d.cols])
d[mvars] <- lapply(d[mvars], as.double)
# convert to long format
d.long <- data.table::melt(
data.table::as.data.table(d),
id.vars = c('Site', 'Date', 'Time'),
measure.vars = mvars
)
# extract depths
d.depths <- strsplit(as.character(d.long$variable), '_', fixed = TRUE)
d.long$depth <- sapply(d.depths, function(i) as.numeric(i[2]))
# convert depths (in to cm)
d.long$depth <- round(d.long$depth * 2.54)
# change 'variable' to 'sensor.id'
names(d.long)[which(names(d.long) == 'variable')] <- 'sensor.id'
## there can be multiple sensors at below-ground label
.SD <- NULL
no.na <- NULL
sensors.per.depth <- d.long[, list(no.na = sum(complete.cases(.SD))),
by = c('sensor.id', 'depth'),
.SDcols = c('sensor.id', 'depth', 'value')]
most.data <- sensors.per.depth[, .SD[which.max(no.na)], by = 'depth']
# check for multiple sensors per depth
tab <- table(sensors.per.depth$depth) > 1
if (any(tab)) {
multiple.sensor.ids <- as.character(sensors.per.depth$sensor.id[which(sensors.per.depth$depth %in% names(tab))])
message(paste0('multiple sensors per depth [site ', d$Site[1], '] ',
paste(multiple.sensor.ids, collapse = ', ')))
}
# multiple rows / day, remove NA in sensor values
idx <- which(!is.na(d.long$value))
d.long <- d.long[idx, ]
# water year/day: October 1st -- September 30th
w <- waterDayYear(d.long$Date)
# row-order is preserved
d.long$water_year <- w$wy
d.long$water_day <- w$wd
# format and return
res <- as.data.frame(d.long[, c('Site', 'Date', 'Time', 'water_year', 'water_day',
'value', 'depth', 'sensor.id')])
# Time ranges from "00:00" to "23:00" [24 hourly readings]
# set Time to 12:00 (middle of day) for daily data
if (is.null(res$Time) || all(is.na(res$Time) | res$Time == "")) {
# only when there are data
if (nrow(res) > 0) {
res$Time <- "12:00"
}
}
# TODO: what is the correct timezone for each site's data? Is it local? Or corrected to some default?
# res$datetime <- as.POSIXct(strptime(paste(res$Date, res$Time), "%Y-%m-%d %H:%M"), tz = "GMT")
res
}
# format a list request for SCAN data
# s: single site code
# y: single year
# r: single report type
# dt: either 'Daily' or 'Hourly'
.make_SCAN_req <- function(s, y, r, dt = c('Daily', 'Hourly')) {
stopifnot(tolower(dt) %in% c('daily', 'hourly'))
req <- list(
intervalType = ' View Historic ',
report = r,
timeseries = dt,
format = 'copy',
sitenum = s,
interval = 'YEAR',
year = y,
month = 'CY'
)
return(req)
}
# req is a named vector or list
.get_SCAN_data <- function(req) {
# convert to list as needed
if (!inherits(req, 'list')) {
req <- as.list(req)
}
# base URL to service
uri <- 'https://wcc.sc.egov.usda.gov/nwcc/view'
# note: the SCAN form processor checks the referring page and user-agent
new.headers <- c("Referer" = "https://wcc.sc.egov.usda.gov/nwcc/")
# enable follow-location
# http://stackoverflow.com/questions/25538957/suppressing-302-error-returned-by-httr-post
# cf <- httr::config(followlocation = 1L, verbose=1L) # debugging
cf <- httr::config(followlocation = 1L)
# submit request
r <- try(httr::POST(
uri,
body = req,
encode = 'form',
config = cf,
httr::add_headers(new.headers),
httr::timeout(getOption("soilDB.timeout", default = 300))
))
if (inherits(r, 'try-error'))
return(NULL)
res <- try(httr::stop_for_status(r), silent = TRUE)
if (inherits(res, 'try-error')) {
return(NULL)
}
# extract content as text, cannot be directly read-in
r.content <- try(httr::content(r, as = 'text'), silent = TRUE)
if (inherits(r.content, 'try-error')) {
return(NULL)
}
# connect to the text as a standard file
tc <- textConnection(r.content)
# attempt to read column headers, after skipping the first two lines of data
# note: this moves the text connection cursor forward 3 lines
# 2018-03-06 DEB: results have an extra line up top, now need to skip 3 lines
h <- unlist(read.table(
tc,
nrows = 1,
skip = 3,
header = FALSE,
stringsAsFactors = FALSE,
sep = ',',
quote = '',
strip.white = TRUE,
na.strings = '-99.9',
comment.char = ''
))
# the last header is junk (NA)
h <- as.vector(na.omit(h))
# split column names on white space and keep the first element
h <- sapply(strsplit(h, split = ' '), function(i) i[[1]])
# clean some more junk
h <- gsub('-1', '', fixed = TRUE, h)
h <- gsub(':-', '_', h)
# NOTE: we have already read-in the first 3 lines above, therefore we don't need to skip lines here
# read as CSV, skipping junk + headers, accommodating white-space and NA values encoded as -99.9
x <- try(read.table(
tc,
header = FALSE,
stringsAsFactors = FALSE,
sep = ',',
quote = '',
strip.white = TRUE,
na.strings = '-99.9',
comment.char = ''
), silent = TRUE)
# catch errors
if (inherits(x, 'try-error')) {
close.connection(tc)
.msg <- sprintf("* site %s [%s]: %s", req$sitenum, req$y, attr(x, 'condition')[["message"]])
message(.msg)
x <- as.data.frame(matrix(ncol = 12, nrow = 0))
return(x)
}
# the last column is always junk
x[[names(x)[length(x)]]] <- NULL
# apply truncated column names:
names(x) <- h
# clean-up connections
close.connection(tc)
# convert date to Date class
x$Date <- as.Date(x$Date)
# done
return(x)
}
## helper function for getting a single table of SCAN metadata
# site.code: a single SCAN site code
.get_single_SCAN_metadata <- function(site.code) {
# base URL to service
uri <- 'https://wcc.sc.egov.usda.gov/nwcc/sensors'
# note: the SCAN form processor checks the refering page and user-agent
new.headers <- c("Referer" = "https://wcc.sc.egov.usda.gov/nwcc/sensors")
# enable follow-location
# http://stackoverflow.com/questions/25538957/suppressing-302-error-returned-by-httr-post
# cf <- httr::config(followlocation = 1L, verbose=1L) # debugging
cf <- httr::config(followlocation = 1L)
req <- list(
sitenum = site.code,
report = 'ALL',
interval = 'DAY',
timeseries = " View Daily Sensor Descriptions "
)
# submit request
r <- httr::POST(
uri,
body = req,
encode = 'form',
config = cf,
httr::add_headers(new.headers)
)
httr::stop_for_status(r)
# parsed XML
r.content <- httr::content(r, as = 'parsed')
# get tables
n.tables <- rvest::html_nodes(r.content, "table")
# the metadata table we want is the last one
m <- rvest::html_table(n.tables[[length(n.tables)]], header = FALSE)
# clean-up table
# 1st row is header
h <- make.names(m[1, ])
# second row is junk
m <- m[-c(1:2), ]
names(m) <- h
m$site.code <- site.code
return(m)
}
# iterate over a vector of SCAN site codes, returning basic metadata
# site.code: vector of SCAN site codes
#' @rdname fetchSCAN
#' @export
SCAN_sensor_metadata <- function(site.code) {
# check for required packages
if (!requireNamespace('httr', quietly = TRUE) | !requireNamespace('rvest', quietly = TRUE))
stop('please install the `httr` and `rvest` packages', call. = FALSE)
# iterate over site codes, returning DF + site.code
res <- do.call('rbind', lapply(site.code, .get_single_SCAN_metadata))
return(as.data.frame(res))
}
## https://github.com/ncss-tech/soilDB/issues/61
# site.code: vector of SCAN site codes
#' @rdname fetchSCAN
#' @export
SCAN_site_metadata <- function(site.code = NULL) {
# hack to please R CMD check
SCAN_SNOTEL_metadata <- NULL
# cached copy available in soilDB::SCAN_SNOTEL_metadata
load(system.file("data/SCAN_SNOTEL_metadata.rda", package = "soilDB")[1])
if (is.null(site.code)) {
idx <- 1:nrow(SCAN_SNOTEL_metadata)
} else {
idx <- which(SCAN_SNOTEL_metadata$Site %in% site.code)
}
# subset requested codes
res <- SCAN_SNOTEL_metadata[idx, ]
return(res)
}
|
# R-Programme Assignment 2: Caching the Inverse of a Matrix
# ("cacher fr.", meaning 'to hide') special functions.
# LESSON
# Study the given examples on the treatment of a vector
# Note the introduction of <<- operator
# VALUE OF LESSON
# Matrix inversion, along with some others,
# is said to be a costly computation
# (frequency of reference, user system time, waiting time, etc.)
# A well designed and effective function can be stored,
# recalled and used timely and efficiently
# Example 1 Caching the Mean of a Vector
# creates a special vector of a list containing a function to
## a) set the value of the vector, b) get the value of the vector
## c) set the value of the mean and d) get the value of the mean
# SPECIAL FUNCTION
# Now the for the special "matrix" object
# that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
## Create a placeholder matrix
## INSTRUCTION: assume matrix supplied is always invertible
iM <- NULL
set <- function(y) {
x <<- y
iM <<- NULL
}
## get the input
get <- function() x
setinverse <- function(inverse) iM <<- inverse
getinverse <- function() iM
## the matrix, its inverse obtained through 'solve'
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
# Develop a function to calculates the inverse of the special "matrix"
# First check to see if the inverse has already been computed.
# If not already done, compute the inverse and store it.
# The function
cacheSolve <- function(x, ...) {
## attempt to get the inverse of the matrix from hidding
iM <- x$getinverse()
## check the getting result: if not a failure (i.e. iM exists)
if(!is.null(iM)) {
message("inverse exists, please hold on")
return(iM)
}
## failing that, calculate it
## Computing the inverse of a square matrix
## can be done with the generic function 'solve'
data <- x$get()
iM <- solve(data, ...)
## Return a matrix that is the inverse of 'x'
x$setinverse(iM)
iM
}
# Test drive
# > x <- rbind(c(4,3), c(3, 2))
# > m = makeCacheMatrix(x)
# > m$get()
# [,1] [,2]
# [1,] 4 3
# [2,] 3 2
# > cacheSolve(m)
# [,1] [,2]
# [1,] -2 3
# [2,] 3 -4
# > cacheSolve(m)
# inverse exists, please hold on
# [,1] [,2]
# [1,] -2 3
# [2,] 3 -4
# >
|
/cachematrix.R
|
no_license
|
AnthonyAbolarin/ProgrammingAssignment2
|
R
| false
| false
| 2,437
|
r
|
# R-Programme Assignment 2: Caching the Inverse of a Matrix
# ("cacher fr.", meaning 'to hide') special functions.
# LESSON
# Study the given examples on the treatment of a vector
# Note the introduction of <<- operator
# VALUE OF LESSON
# Matrix inversion, along with some others,
# is said to be a costly computation
# (frequency of reference, user system time, waiting time, etc.)
# A well designed and effective function can be stored,
# recalled and used timely and efficiently
# Example 1 Caching the Mean of a Vector
# creates a special vector of a list containing a function to
## a) set the value of the vector, b) get the value of the vector
## c) set the value of the mean and d) get the value of the mean
# SPECIAL FUNCTION
# Now the for the special "matrix" object
# that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
## Create a placeholder matrix
## INSTRUCTION: assume matrix supplied is always invertible
iM <- NULL
set <- function(y) {
x <<- y
iM <<- NULL
}
## get the input
get <- function() x
setinverse <- function(inverse) iM <<- inverse
getinverse <- function() iM
## the matrix, its inverse obtained through 'solve'
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
# Develop a function to calculates the inverse of the special "matrix"
# First check to see if the inverse has already been computed.
# If not already done, compute the inverse and store it.
# The function
cacheSolve <- function(x, ...) {
## attempt to get the inverse of the matrix from hidding
iM <- x$getinverse()
## check the getting result: if not a failure (i.e. iM exists)
if(!is.null(iM)) {
message("inverse exists, please hold on")
return(iM)
}
## failing that, calculate it
## Computing the inverse of a square matrix
## can be done with the generic function 'solve'
data <- x$get()
iM <- solve(data, ...)
## Return a matrix that is the inverse of 'x'
x$setinverse(iM)
iM
}
# Test drive
# > x <- rbind(c(4,3), c(3, 2))
# > m = makeCacheMatrix(x)
# > m$get()
# [,1] [,2]
# [1,] 4 3
# [2,] 3 2
# > cacheSolve(m)
# [,1] [,2]
# [1,] -2 3
# [2,] 3 -4
# > cacheSolve(m)
# inverse exists, please hold on
# [,1] [,2]
# [1,] -2 3
# [2,] 3 -4
# >
|
#' @author Ivan Jacob Agaloos Pesigan
#'
#' @title Monte Carlo Method Assuming Multivariate Normal Distribution for Indirect Effect in a Simple Mediation Model
#' for Data Generated Using the Vale and Maurelli (1983) Approach (Skewness = 2, Kurtosis = 7)
#'
#' @family monte carlo method functions
#' @keywords mc
#' @inheritParams mc.mvn
#' @inheritParams useparamsmvn
#' @examples
#' taskid <- 1
#' data <- vm_mod_dat(taskid = taskid)
#' fit.sem.mlr(data = data, minimal = TRUE)
#'
#' fit <- vm_mod_fit.sem.mlr(data = data, taskid = taskid)
#' thetahatstar <- vm_mod_sem_mc.mvn(
#' taskid = taskid, R = 20000L,
#' alphahat = fit["alphahat"], sehatalphahat = fit["sehatalphahat"],
#' betahat = fit["betahat"], sehatbetahat = fit["sehatbetahat"]
#' )
#' hist(thetahatstar)
#' @export
vm_mod_sem_mc.mvn <- function(taskid,
R = 20000L,
alphahat,
sehatalphahat,
betahat,
sehatbetahat) {
paramsmvn <- useparamsmvn(taskid = taskid)
out <- mc.mvn(
R = R,
alphahat = alphahat,
sehatalphahat = sehatalphahat,
betahat = betahat,
sehatbetahat = sehatbetahat
)
attributes(out)$taskid <- paramsmvn$taskid
attributes(out)$theta <- paramsmvn$alphabeta
attributes(out)$thetahat <- alphahat * betahat
out
}
#' @author Ivan Jacob Agaloos Pesigan
#'
#' @title Monte Carlo Method Assuming Multivariate Normal Distribution for Indirect Effect in a Simple Mediation Model
#' for Data Generated Using the Vale and Maurelli (1983) Approach (Skewness = 2, Kurtosis = 7)
#' (Single Task)
#'
#' @family monte carlo method functions
#' @keywords mc
#' @inheritParams vm_mod_dat_task
#' @export
vm_mod_sem_mc.mvn_task <- function(taskid,
dir = getwd(),
overwrite = FALSE) {
# for socks to load package in the namespace
requireNamespace(
"jeksterslabRmedsimple",
quietly = TRUE
)
wd <- getwd()
setwd(dir)
fnest <- paste0(
"medsimple_vm_mod_fit.sem.mlr_",
sprintf(
"%05.0f",
taskid
),
".Rds"
)
fn <- paste0(
"medsimple_vm_mod_sem_mc.mvn_",
sprintf(
"%05.0f",
taskid
),
".Rds"
)
# Check if data exists --------------------------------------------------------
if (file.exists(fnest)) {
X <- readRDS(fnest)
} else {
stop(
paste(
fnest,
"does not exist in",
dir
)
)
}
# Resolve overwrite -----------------------------------------------------------
if (overwrite) {
run <- TRUE
} else {
# Check if result exists ----------------------------------------------------
if (file.exists(fn)) {
run <- FALSE
tryCatch(
{
existing_results <- readRDS(fn)
},
error = function(e) {
run <- TRUE
}
)
} else {
run <- TRUE
}
}
if (run) {
out <- invisible(
mapply(
FUN = vm_mod_sem_mc.mvn,
taskid = X[, "taskid"],
alphahat = X[, "alphahat"],
sehatalphahat = X[, "sehatalphahat"],
betahat = X[, "betahat"],
sehatbetahat = X[, "sehatbetahat"],
SIMPLIFY = FALSE
)
)
saveRDS(
object = out,
file = fn
)
}
invisible(
setwd(wd)
)
}
#' @author Ivan Jacob Agaloos Pesigan
#'
#' @title Monte Carlo Method Assuming Multivariate Normal Distribution for Indirect Effect in a Simple Mediation Model
#' for Data Generated Using the Vale and Maurelli (1983) Approach (Skewness = 2, Kurtosis = 7)
#' (Simulation)
#'
#' @family monte carlo method functions
#' @keywords mc
#' @importFrom jeksterslabRpar par_lapply
#' @inheritParams vm_mod_sem_mc.mvn_task
#' @inheritParams jeksterslabRpar::par_lapply
#' @inheritParams vm_mod_dat_simulation
#' @export
vm_mod_sem_mc.mvn_simulation <- function(dir = getwd(),
all = TRUE,
taskid = NULL,
overwrite = FALSE,
par = TRUE,
ncores = NULL,
blas_threads = TRUE,
mc = TRUE,
lb = FALSE,
cl_eval = FALSE,
cl_export = FALSE,
cl_expr,
cl_vars) {
if (all) {
ncase <- nrow(jeksterslabRmedsimple::paramsmvn)
taskid <- 1:ncase
} else {
if (is.null(taskid)) {
stop(
"If \`all = FALSE\` \`taskid\` should be provided."
)
}
}
out <- invisible(
par_lapply(
X = taskid,
FUN = vm_mod_sem_mc.mvn_task,
dir = dir,
overwrite = overwrite,
par = par,
ncores = ncores,
blas_threads = blas_threads,
mc = mc,
lb = lb,
cl_eval = cl_eval,
cl_export = cl_eval,
cl_expr = cl_expr,
cl_vars = cl_vars,
rbind = NULL
)
)
}
#' @author Ivan Jacob Agaloos Pesigan
#'
#' @title Monte Carlo Method Confidence Intervals Assuming Multivariate Normal Distribution for Indirect Effect in a Simple Mediation Model
#' for Data Generated Using the Vale and Maurelli (1983) Approach (Skewness = 2, Kurtosis = 7)
#' (Single Task)
#'
#' @family monte carlo method functions
#' @keywords mc
#' @inheritParams vm_mod_dat_task
#' @export
vm_mod_sem_mc.mvn_pcci_task <- function(taskid,
dir = getwd()) {
# for socks to load package in the namespace
requireNamespace(
"jeksterslabRmedsimple",
quietly = TRUE
)
foo <- function(thetahatstar) {
pcci(
thetahatstar = thetahatstar,
thetahat = attributes(thetahatstar)$thetahat,
theta = attributes(thetahatstar)$theta,
alpha = c(0.001, 0.01, 0.05)
)
}
wd <- getwd()
setwd(dir)
fndata <- paste0(
"medsimple_vm_mod_sem_mc.mvn_",
sprintf(
"%05.0f",
taskid
),
".Rds"
)
if (file.exists(fndata)) {
X <- readRDS(fndata)
} else {
stop(
paste(
fndata,
"does not exist in",
dir
)
)
}
out <- invisible(
par_lapply(
X = X,
FUN = foo,
par = FALSE, # should always be FALSE since this is wrapped around a parallel par_lapply
blas_threads = FALSE, # should always be FALSE since this is wrapped around a parallel par_lapply
rbind = TRUE
)
)
setwd(wd)
process(
taskid = taskid,
out = out
)
}
#' @author Ivan Jacob Agaloos Pesigan
#'
#' @title Monte Carlo Method Confidence Intervals Assuming Multivariate Normal Distribution for Indirect Effect in a Simple Mediation Model
#' for Data Generated Using the Vale and Maurelli (1983) Approach (Skewness = 2, Kurtosis = 7)
#' (Simulation)
#'
#' @family monte carlo method functions
#' @keywords mc
#' @importFrom jeksterslabRpar par_lapply
#' @inheritParams vm_mod_sem_mc.mvn_task
#' @inheritParams jeksterslabRpar::par_lapply
#' @inheritParams vm_mod_dat_simulation
#' @export
vm_mod_sem_mc.mvn_pcci_simulation <- function(dir = getwd(),
all = TRUE,
taskid = NULL,
par = TRUE,
ncores = NULL,
blas_threads = TRUE,
mc = TRUE,
lb = FALSE,
cl_eval = FALSE,
cl_export = FALSE,
cl_expr,
cl_vars) {
if (all) {
ncase <- nrow(jeksterslabRmedsimple::paramsmvn)
taskid <- 1:ncase
} else {
if (is.null(taskid)) {
stop(
"If \`all = FALSE\` \`taskid\` should be provided."
)
}
}
out <- invisible(
par_lapply(
X = taskid,
FUN = vm_mod_sem_mc.mvn_pcci_task,
dir = dir,
par = par,
ncores = ncores,
blas_threads = blas_threads,
mc = mc,
lb = lb,
cl_eval = cl_eval,
cl_export = cl_eval,
cl_expr = cl_expr,
cl_vars = cl_vars,
rbind = TRUE
)
)
out <- label(
out = out,
method = "MC",
model = "Simple mediation model",
std = FALSE
)
fn <- "summary_medsimple_vm_mod_sem_mc.mvn_pcci.Rds"
saveRDS(
object = out,
file = fn
)
}
|
/R/vm_mod_complete_unstd_sem_mc.mvn.R
|
permissive
|
jeksterslabds/jeksterslabRmedsimple
|
R
| false
| false
| 8,765
|
r
|
#' @author Ivan Jacob Agaloos Pesigan
#'
#' @title Monte Carlo Method Assuming Multivariate Normal Distribution for Indirect Effect in a Simple Mediation Model
#' for Data Generated Using the Vale and Maurelli (1983) Approach (Skewness = 2, Kurtosis = 7)
#'
#' @family monte carlo method functions
#' @keywords mc
#' @inheritParams mc.mvn
#' @inheritParams useparamsmvn
#' @examples
#' taskid <- 1
#' data <- vm_mod_dat(taskid = taskid)
#' fit.sem.mlr(data = data, minimal = TRUE)
#'
#' fit <- vm_mod_fit.sem.mlr(data = data, taskid = taskid)
#' thetahatstar <- vm_mod_sem_mc.mvn(
#' taskid = taskid, R = 20000L,
#' alphahat = fit["alphahat"], sehatalphahat = fit["sehatalphahat"],
#' betahat = fit["betahat"], sehatbetahat = fit["sehatbetahat"]
#' )
#' hist(thetahatstar)
#' @export
vm_mod_sem_mc.mvn <- function(taskid,
R = 20000L,
alphahat,
sehatalphahat,
betahat,
sehatbetahat) {
paramsmvn <- useparamsmvn(taskid = taskid)
out <- mc.mvn(
R = R,
alphahat = alphahat,
sehatalphahat = sehatalphahat,
betahat = betahat,
sehatbetahat = sehatbetahat
)
attributes(out)$taskid <- paramsmvn$taskid
attributes(out)$theta <- paramsmvn$alphabeta
attributes(out)$thetahat <- alphahat * betahat
out
}
#' @author Ivan Jacob Agaloos Pesigan
#'
#' @title Monte Carlo Method Assuming Multivariate Normal Distribution for Indirect Effect in a Simple Mediation Model
#' for Data Generated Using the Vale and Maurelli (1983) Approach (Skewness = 2, Kurtosis = 7)
#' (Single Task)
#'
#' @family monte carlo method functions
#' @keywords mc
#' @inheritParams vm_mod_dat_task
#' @export
vm_mod_sem_mc.mvn_task <- function(taskid,
dir = getwd(),
overwrite = FALSE) {
# for socks to load package in the namespace
requireNamespace(
"jeksterslabRmedsimple",
quietly = TRUE
)
wd <- getwd()
setwd(dir)
fnest <- paste0(
"medsimple_vm_mod_fit.sem.mlr_",
sprintf(
"%05.0f",
taskid
),
".Rds"
)
fn <- paste0(
"medsimple_vm_mod_sem_mc.mvn_",
sprintf(
"%05.0f",
taskid
),
".Rds"
)
# Check if data exists --------------------------------------------------------
if (file.exists(fnest)) {
X <- readRDS(fnest)
} else {
stop(
paste(
fnest,
"does not exist in",
dir
)
)
}
# Resolve overwrite -----------------------------------------------------------
if (overwrite) {
run <- TRUE
} else {
# Check if result exists ----------------------------------------------------
if (file.exists(fn)) {
run <- FALSE
tryCatch(
{
existing_results <- readRDS(fn)
},
error = function(e) {
run <- TRUE
}
)
} else {
run <- TRUE
}
}
if (run) {
out <- invisible(
mapply(
FUN = vm_mod_sem_mc.mvn,
taskid = X[, "taskid"],
alphahat = X[, "alphahat"],
sehatalphahat = X[, "sehatalphahat"],
betahat = X[, "betahat"],
sehatbetahat = X[, "sehatbetahat"],
SIMPLIFY = FALSE
)
)
saveRDS(
object = out,
file = fn
)
}
invisible(
setwd(wd)
)
}
#' @author Ivan Jacob Agaloos Pesigan
#'
#' @title Monte Carlo Method Assuming Multivariate Normal Distribution for Indirect Effect in a Simple Mediation Model
#' for Data Generated Using the Vale and Maurelli (1983) Approach (Skewness = 2, Kurtosis = 7)
#' (Simulation)
#'
#' @family monte carlo method functions
#' @keywords mc
#' @importFrom jeksterslabRpar par_lapply
#' @inheritParams vm_mod_sem_mc.mvn_task
#' @inheritParams jeksterslabRpar::par_lapply
#' @inheritParams vm_mod_dat_simulation
#' @export
vm_mod_sem_mc.mvn_simulation <- function(dir = getwd(),
all = TRUE,
taskid = NULL,
overwrite = FALSE,
par = TRUE,
ncores = NULL,
blas_threads = TRUE,
mc = TRUE,
lb = FALSE,
cl_eval = FALSE,
cl_export = FALSE,
cl_expr,
cl_vars) {
if (all) {
ncase <- nrow(jeksterslabRmedsimple::paramsmvn)
taskid <- 1:ncase
} else {
if (is.null(taskid)) {
stop(
"If \`all = FALSE\` \`taskid\` should be provided."
)
}
}
out <- invisible(
par_lapply(
X = taskid,
FUN = vm_mod_sem_mc.mvn_task,
dir = dir,
overwrite = overwrite,
par = par,
ncores = ncores,
blas_threads = blas_threads,
mc = mc,
lb = lb,
cl_eval = cl_eval,
cl_export = cl_eval,
cl_expr = cl_expr,
cl_vars = cl_vars,
rbind = NULL
)
)
}
#' @author Ivan Jacob Agaloos Pesigan
#'
#' @title Monte Carlo Method Confidence Intervals Assuming Multivariate Normal Distribution for Indirect Effect in a Simple Mediation Model
#' for Data Generated Using the Vale and Maurelli (1983) Approach (Skewness = 2, Kurtosis = 7)
#' (Single Task)
#'
#' @family monte carlo method functions
#' @keywords mc
#' @inheritParams vm_mod_dat_task
#' @export
vm_mod_sem_mc.mvn_pcci_task <- function(taskid,
dir = getwd()) {
# for socks to load package in the namespace
requireNamespace(
"jeksterslabRmedsimple",
quietly = TRUE
)
foo <- function(thetahatstar) {
pcci(
thetahatstar = thetahatstar,
thetahat = attributes(thetahatstar)$thetahat,
theta = attributes(thetahatstar)$theta,
alpha = c(0.001, 0.01, 0.05)
)
}
wd <- getwd()
setwd(dir)
fndata <- paste0(
"medsimple_vm_mod_sem_mc.mvn_",
sprintf(
"%05.0f",
taskid
),
".Rds"
)
if (file.exists(fndata)) {
X <- readRDS(fndata)
} else {
stop(
paste(
fndata,
"does not exist in",
dir
)
)
}
out <- invisible(
par_lapply(
X = X,
FUN = foo,
par = FALSE, # should always be FALSE since this is wrapped around a parallel par_lapply
blas_threads = FALSE, # should always be FALSE since this is wrapped around a parallel par_lapply
rbind = TRUE
)
)
setwd(wd)
process(
taskid = taskid,
out = out
)
}
#' @author Ivan Jacob Agaloos Pesigan
#'
#' @title Monte Carlo Method Confidence Intervals Assuming Multivariate Normal Distribution for Indirect Effect in a Simple Mediation Model
#' for Data Generated Using the Vale and Maurelli (1983) Approach (Skewness = 2, Kurtosis = 7)
#' (Simulation)
#'
#' @family monte carlo method functions
#' @keywords mc
#' @importFrom jeksterslabRpar par_lapply
#' @inheritParams vm_mod_sem_mc.mvn_task
#' @inheritParams jeksterslabRpar::par_lapply
#' @inheritParams vm_mod_dat_simulation
#' @export
vm_mod_sem_mc.mvn_pcci_simulation <- function(dir = getwd(),
all = TRUE,
taskid = NULL,
par = TRUE,
ncores = NULL,
blas_threads = TRUE,
mc = TRUE,
lb = FALSE,
cl_eval = FALSE,
cl_export = FALSE,
cl_expr,
cl_vars) {
if (all) {
ncase <- nrow(jeksterslabRmedsimple::paramsmvn)
taskid <- 1:ncase
} else {
if (is.null(taskid)) {
stop(
"If \`all = FALSE\` \`taskid\` should be provided."
)
}
}
out <- invisible(
par_lapply(
X = taskid,
FUN = vm_mod_sem_mc.mvn_pcci_task,
dir = dir,
par = par,
ncores = ncores,
blas_threads = blas_threads,
mc = mc,
lb = lb,
cl_eval = cl_eval,
cl_export = cl_eval,
cl_expr = cl_expr,
cl_vars = cl_vars,
rbind = TRUE
)
)
out <- label(
out = out,
method = "MC",
model = "Simple mediation model",
std = FALSE
)
fn <- "summary_medsimple_vm_mod_sem_mc.mvn_pcci.Rds"
saveRDS(
object = out,
file = fn
)
}
|
# Clear workspace
rm(list = ls())
# Setup
################################################################################
# Packages
library(plyr)
library(dplyr)
library(RColorBrewer)
# Directories
datadir <- "/Users/cfree/Dropbox/Chris/Rutgers/projects/productivity/models/spmodel_tb1/output/fixed_effects"
plotdir <- "/Users/cfree/Dropbox/Chris/Rutgers/projects/productivity/models/spmodel_tb1/figures/fixed_effects"
# Read fixed effects results
load(paste(datadir, "ramldb_v3.8_pella_0.20_cobe_fixed.Rdata", sep="/"))
data_f <- results.wide
rm(hess, data, input.data, model, nstocks, stocks, output, params, problem.stocks, sd, results.df, results.wide)
# Read random effects results
load(paste(datadir, "ramldb_v3.8_pella_0.20_cobe_random_normal.Rdata", sep="/"))
data_r <- results.wide
rm(hess, data, input.data, model, nstocks, stocks, output, params, problem.stocks, sd, results.df, results.wide)
# Build data
################################################################################
# Merge data
data <- data_f %>%
select(stockid, betaT, betaT_lo, betaT_hi, betaT_inf) %>%
rename(betaT_f=betaT, betaT_lo_f=betaT_lo, betaT_hi_f=betaT_hi, betaT_inf_f=betaT_inf) %>%
left_join(select(data_r, stockid, betaT, betaT_lo, betaT_hi, betaT_inf), by="stockid") %>%
rename(betaT_r=betaT, betaT_lo_r=betaT_lo, betaT_hi_r=betaT_hi, betaT_inf_r=betaT_inf)
# Plot data
################################################################################
# Setup figure
figname <- "Fig2_thetas_fixed_vs_random_effects.png"
png(paste(plotdir, figname, sep="/"), width=6.5, height=4, units="in", res=600)
layout(matrix(c(1,2,
1,3), ncol=2, byrow=T), widths=c(0.6, 0.4))
par(mar=c(3.5,3.5,1,0.5), mgp=c(2.2,0.8,0))
# A. Scatterplot
#####################################
# Setup empty plot
plot(betaT_f ~ betaT_r, data, type="n", bty="n",
xlim=c(-1.5, 1.5), ylim=c(-8,14), pch=16,
xlab=expression("θ"["random"]), ylab=expression("θ"["fixed"]), yaxt="n")
axis(2, at=seq(-8,14,2), las=2)
lines(x=c(-1.5,1.5), y=c(0,0), lty=2)
lines(x=c(0,0), y=c(-8,10), lty=2)
# Add error bars
for(i in 1:nrow(data)){lines(x=c(data$betaT_lo_r[i], data$betaT_hi_r[i]),
y=c(data$betaT_f[i], data$betaT_f[i]), col="grey60", lwd=0.6)}
for(i in 1:nrow(data)){lines(x=c(data$betaT_r[i], data$betaT_r[i]),
y=c(data$betaT_lo_f[i], data$betaT_hi_f[i]), col="grey60", lwd=0.6)}
# Add points
points(data$betaT_r, data$betaT_f, pch=16)
# B. Random effects
#####################################
# Fixed effects
hist(data$betaT_r, breaks=seq(-4,8,0.5), col="grey60", border=F, las=1,
xlim=c(-4,8), xaxt="n", xlab=expression("θ"["random"]), main="")
axis(1, at=seq(-4,8,2))
# A. Fixed effects
#####################################
# Fixed effects
hist(data$betaT_f, breaks=seq(-4,8,0.5), col="grey60", border=F, las=1,
xlim=c(-4,8), xaxt="n", xlab=expression("θ"["fixed"]), main="")
axis(1, at=seq(-4,8,2))
# Off
dev.off()
graphics.off()
|
/code/figures/fixed_effects/Fig2_thetas_fixed_vs_random_effects.R
|
no_license
|
cfree14/sst_productivity
|
R
| false
| false
| 3,027
|
r
|
# Clear workspace
rm(list = ls())
# Setup
################################################################################
# Packages
library(plyr)
library(dplyr)
library(RColorBrewer)
# Directories
datadir <- "/Users/cfree/Dropbox/Chris/Rutgers/projects/productivity/models/spmodel_tb1/output/fixed_effects"
plotdir <- "/Users/cfree/Dropbox/Chris/Rutgers/projects/productivity/models/spmodel_tb1/figures/fixed_effects"
# Read fixed effects results
load(paste(datadir, "ramldb_v3.8_pella_0.20_cobe_fixed.Rdata", sep="/"))
data_f <- results.wide
rm(hess, data, input.data, model, nstocks, stocks, output, params, problem.stocks, sd, results.df, results.wide)
# Read random effects results
load(paste(datadir, "ramldb_v3.8_pella_0.20_cobe_random_normal.Rdata", sep="/"))
data_r <- results.wide
rm(hess, data, input.data, model, nstocks, stocks, output, params, problem.stocks, sd, results.df, results.wide)
# Build data
################################################################################
# Merge data
data <- data_f %>%
select(stockid, betaT, betaT_lo, betaT_hi, betaT_inf) %>%
rename(betaT_f=betaT, betaT_lo_f=betaT_lo, betaT_hi_f=betaT_hi, betaT_inf_f=betaT_inf) %>%
left_join(select(data_r, stockid, betaT, betaT_lo, betaT_hi, betaT_inf), by="stockid") %>%
rename(betaT_r=betaT, betaT_lo_r=betaT_lo, betaT_hi_r=betaT_hi, betaT_inf_r=betaT_inf)
# Plot data
################################################################################
# Setup figure
figname <- "Fig2_thetas_fixed_vs_random_effects.png"
png(paste(plotdir, figname, sep="/"), width=6.5, height=4, units="in", res=600)
layout(matrix(c(1,2,
1,3), ncol=2, byrow=T), widths=c(0.6, 0.4))
par(mar=c(3.5,3.5,1,0.5), mgp=c(2.2,0.8,0))
# A. Scatterplot
#####################################
# Setup empty plot
plot(betaT_f ~ betaT_r, data, type="n", bty="n",
xlim=c(-1.5, 1.5), ylim=c(-8,14), pch=16,
xlab=expression("θ"["random"]), ylab=expression("θ"["fixed"]), yaxt="n")
axis(2, at=seq(-8,14,2), las=2)
lines(x=c(-1.5,1.5), y=c(0,0), lty=2)
lines(x=c(0,0), y=c(-8,10), lty=2)
# Add error bars
for(i in 1:nrow(data)){lines(x=c(data$betaT_lo_r[i], data$betaT_hi_r[i]),
y=c(data$betaT_f[i], data$betaT_f[i]), col="grey60", lwd=0.6)}
for(i in 1:nrow(data)){lines(x=c(data$betaT_r[i], data$betaT_r[i]),
y=c(data$betaT_lo_f[i], data$betaT_hi_f[i]), col="grey60", lwd=0.6)}
# Add points
points(data$betaT_r, data$betaT_f, pch=16)
# B. Random effects
#####################################
# Fixed effects
hist(data$betaT_r, breaks=seq(-4,8,0.5), col="grey60", border=F, las=1,
xlim=c(-4,8), xaxt="n", xlab=expression("θ"["random"]), main="")
axis(1, at=seq(-4,8,2))
# A. Fixed effects
#####################################
# Fixed effects
hist(data$betaT_f, breaks=seq(-4,8,0.5), col="grey60", border=F, las=1,
xlim=c(-4,8), xaxt="n", xlab=expression("θ"["fixed"]), main="")
axis(1, at=seq(-4,8,2))
# Off
dev.off()
graphics.off()
|
\name{ft.connect}
\alias{ft.connect}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
A function to generate OAuth2 token for Google Fusion Tables access
}
\description{
Creates OAuth2 Token with app credentials and API scopes
}
\usage{
ft.connect(username, password)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{client_id}{
Provide a valid Google AppEngine client id.
}
\item{client_secret}{
Provide a valid Google AppEngine client secret corresponding the the provided id.
}
\item{api_scopes}{
The API scopes for Fusion Tables V2, defaulting to the urls at time of package creation.
}
}
\details{
Users can register a free AppEngine app and create a client id/client secret for use with this package.
}
\value{
An authentication token that can be used to access the Fusion Tables V2 API. See the httr package for more information.
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Thomas Johnson <thomascjohnson@gmail.com>
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
## The function itself:
> ft.connect
function(client_id,
client_secret,
api_scopes = c("https://www.googleapis.com/auth/fusiontables",
"https://www.googleapis.com/auth/fusiontables.readonly")) {
require(httr)
require(rjson)
app <- oauth_app("google", client_id, client_secret)
auth_key <- oauth2.0_token(oauth_endpoints("google"), app, api_scopes)
return(auth_key)
}
}
|
/rfusiontables/man/ft.connect.Rd
|
permissive
|
thomascjohnson/rfusiontables
|
R
| false
| false
| 1,783
|
rd
|
\name{ft.connect}
\alias{ft.connect}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
A function to generate OAuth2 token for Google Fusion Tables access
}
\description{
Creates OAuth2 Token with app credentials and API scopes
}
\usage{
ft.connect(username, password)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{client_id}{
Provide a valid Google AppEngine client id.
}
\item{client_secret}{
Provide a valid Google AppEngine client secret corresponding the the provided id.
}
\item{api_scopes}{
The API scopes for Fusion Tables V2, defaulting to the urls at time of package creation.
}
}
\details{
Users can register a free AppEngine app and create a client id/client secret for use with this package.
}
\value{
An authentication token that can be used to access the Fusion Tables V2 API. See the httr package for more information.
%% ~Describe the value returned
%% If it is a LIST, use
%% \item{comp1 }{Description of 'comp1'}
%% \item{comp2 }{Description of 'comp2'}
%% ...
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
Thomas Johnson <thomascjohnson@gmail.com>
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
\examples{
## The function itself:
> ft.connect
function(client_id,
client_secret,
api_scopes = c("https://www.googleapis.com/auth/fusiontables",
"https://www.googleapis.com/auth/fusiontables.readonly")) {
require(httr)
require(rjson)
app <- oauth_app("google", client_id, client_secret)
auth_key <- oauth2.0_token(oauth_endpoints("google"), app, api_scopes)
return(auth_key)
}
}
|
testlist <- list(x = 65024L, y = c(1987475062L, 1987475062L, 1987475062L, 1987475062L, 1987475062L, 1987475062L, 1987475062L, 1987475062L, 1987475062L, 1987475062L, 1987475062L, 1987475062L, 1987475158L, -436216714L, 1987475062L, 1987475062L, 1987475062L, 1987475062L, 1996480511L, -702926875L, -539616721L, -11788545L, -42L))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
/diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609960565-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 404
|
r
|
testlist <- list(x = 65024L, y = c(1987475062L, 1987475062L, 1987475062L, 1987475062L, 1987475062L, 1987475062L, 1987475062L, 1987475062L, 1987475062L, 1987475062L, 1987475062L, 1987475062L, 1987475158L, -436216714L, 1987475062L, 1987475062L, 1987475062L, 1987475062L, 1996480511L, -702926875L, -539616721L, -11788545L, -42L))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
library(rvest)
library(dplyr)
library(stringr)
library(readr)
library(xml2)
library(purrr)
library(DescTools)
#Get list of state names and corresponding cities
# Get list of all states and cities with all zipcodes each in different columns
uscities <- read.csv("./data/working/uscities.csv")
#myuscities <- c("city","city_ascii","stateid","statename","county_fips","county",
"lat","long","density","zipcode")
#Subsetting just 1 zip code per city
myuscities <- uscities[c(1,3:10)]
head(myuscities)
state_names <- myuscities$statename
city_names <- myuscities$city
# Fix and remove blank space to - for all states and cities
state_names <- str_replace_all(state_names, "\\[upper-alpha 3\\]", "")
state_names <- str_replace_all(state_names, "\\[upper-alpha 4\\]", "")
state_names <- str_replace_all(state_names, " ", "-")
city_names <- str_replace_all(city_names, "\\[upper-alpha 3\\]", "")
city_names <- str_replace_all(city_names, "\\[upper-alpha 4\\]", "")
city_names <- str_replace_all(city_names, " ", "-")
# Make state URLs
url_list <- str_c("https://broadbandnow.com/", state_names,"/",city_names)
#
#------------------------------------ Get broadband information at city level by state
#
# For each state: read the URL, then
# 1) select only the third table (city table), add state variable and remove empty column, and assign to a dataframe
# 2) pull sparkline data, get it out of a list, drop the class column, and reverse the order of sparkline columns to match the online order
# 3) cbind and save to df
for (val_i in 1:length(url_list)) {
summary <- read_html(url_list[val_i])
scrape1 <- summary %>%
html_table(fill = TRUE) %>%
.[[1]] %>%
mutate(state = str_to_lower(state_names[val_i])) %>%
select(-`Coverage`)
# scrape2 <- summary %>%
# html_nodes(xpath = '//*[contains(concat( " ", @class, " " ), concat( " ", "speed-inlinesparkline", " " ))]') %>%
# map(xml_attrs) %>%
# map_df(~as.list(.)) %>%
# select(-class) %>%
# Rev(margin = 2)
#
# df <- cbind(scrape1, scrape2)
# assign(paste("frame", str_to_lower(state_names[val_i]), sep = "_"), df)
}
# Make a list of dataframes and bind
dflist <- lapply(ls(pattern = "frame_"), function(x) if (class(get(x)) == "data.frame") get(x))
us_cities <- do.call("rbind", dflist)
# Rename columns
datavars <- paste("data-m", 12:1, sep = "")
speedvars <- paste("speed", 12:1, sep = "")
oldnames = c("City", "Broadband Coverage", "# of Providers", datavars)
newnames = c("city", "coverage","providernum", speedvars)
us_cities <- us_cities %>% rename_at(vars(oldnames), ~ newnames)
# Clean values (lowercase, unnecessary characters, fill spaces)
us_cities$coverage <- str_replace_all(us_cities$coverage, "%", "")
us_cities$providernum <- str_replace_all(us_cities$providernum, " providers", "")
us_cities$city <- str_replace_all(us_cities$city, " ", "-")
us_cities$city <- str_to_lower(us_cities$city)
# Convert from character to factor/numeric
tonumeric <- c("coverage", "providernum", speedvars)
us_cities[, tonumeric] <- sapply(us_cities[, tonumeric], as.numeric)
us_cities$state <- as.factor(us_cities$state)
#
#------------------------------------ Write out
#
write_csv(us_cities, path = "./data/working/bbnow_cities.csv", append = FALSE, col_names = TRUE)
#
#------------------------------------ Clean up workspace
#
remove(list = ls())
|
/src/02_bbnow_scrape_code.R
|
no_license
|
uva-bi-sdad/dspg19broadband
|
R
| false
| false
| 3,394
|
r
|
library(rvest)
library(dplyr)
library(stringr)
library(readr)
library(xml2)
library(purrr)
library(DescTools)
#Get list of state names and corresponding cities
# Get list of all states and cities with all zipcodes each in different columns
uscities <- read.csv("./data/working/uscities.csv")
#myuscities <- c("city","city_ascii","stateid","statename","county_fips","county",
"lat","long","density","zipcode")
#Subsetting just 1 zip code per city
myuscities <- uscities[c(1,3:10)]
head(myuscities)
state_names <- myuscities$statename
city_names <- myuscities$city
# Fix and remove blank space to - for all states and cities
state_names <- str_replace_all(state_names, "\\[upper-alpha 3\\]", "")
state_names <- str_replace_all(state_names, "\\[upper-alpha 4\\]", "")
state_names <- str_replace_all(state_names, " ", "-")
city_names <- str_replace_all(city_names, "\\[upper-alpha 3\\]", "")
city_names <- str_replace_all(city_names, "\\[upper-alpha 4\\]", "")
city_names <- str_replace_all(city_names, " ", "-")
# Make state URLs
url_list <- str_c("https://broadbandnow.com/", state_names,"/",city_names)
#
#------------------------------------ Get broadband information at city level by state
#
# For each state: read the URL, then
# 1) select only the third table (city table), add state variable and remove empty column, and assign to a dataframe
# 2) pull sparkline data, get it out of a list, drop the class column, and reverse the order of sparkline columns to match the online order
# 3) cbind and save to df
for (val_i in 1:length(url_list)) {
summary <- read_html(url_list[val_i])
scrape1 <- summary %>%
html_table(fill = TRUE) %>%
.[[1]] %>%
mutate(state = str_to_lower(state_names[val_i])) %>%
select(-`Coverage`)
# scrape2 <- summary %>%
# html_nodes(xpath = '//*[contains(concat( " ", @class, " " ), concat( " ", "speed-inlinesparkline", " " ))]') %>%
# map(xml_attrs) %>%
# map_df(~as.list(.)) %>%
# select(-class) %>%
# Rev(margin = 2)
#
# df <- cbind(scrape1, scrape2)
# assign(paste("frame", str_to_lower(state_names[val_i]), sep = "_"), df)
}
# Make a list of dataframes and bind
dflist <- lapply(ls(pattern = "frame_"), function(x) if (class(get(x)) == "data.frame") get(x))
us_cities <- do.call("rbind", dflist)
# Rename columns
datavars <- paste("data-m", 12:1, sep = "")
speedvars <- paste("speed", 12:1, sep = "")
oldnames = c("City", "Broadband Coverage", "# of Providers", datavars)
newnames = c("city", "coverage","providernum", speedvars)
us_cities <- us_cities %>% rename_at(vars(oldnames), ~ newnames)
# Clean values (lowercase, unnecessary characters, fill spaces)
us_cities$coverage <- str_replace_all(us_cities$coverage, "%", "")
us_cities$providernum <- str_replace_all(us_cities$providernum, " providers", "")
us_cities$city <- str_replace_all(us_cities$city, " ", "-")
us_cities$city <- str_to_lower(us_cities$city)
# Convert from character to factor/numeric
tonumeric <- c("coverage", "providernum", speedvars)
us_cities[, tonumeric] <- sapply(us_cities[, tonumeric], as.numeric)
us_cities$state <- as.factor(us_cities$state)
#
#------------------------------------ Write out
#
write_csv(us_cities, path = "./data/working/bbnow_cities.csv", append = FALSE, col_names = TRUE)
#
#------------------------------------ Clean up workspace
#
remove(list = ls())
|
\name{data.dcm}
\alias{data.dcm}
\docType{data}
\title{
Dataset from Book 'Diagnostic Measurement' of Rupp, Templin and
Henson (2010)
}
\description{
Dataset from Chapter 9 of the book 'Diagnostic Measurement'
(Rupp, Templin & Henson, 2010).
}
\usage{
data(data.dcm)
}
\format{
The format of the data is a list containing the dichotomous item
response data \code{data} (10000 persons at 7 items)
and the Q-matrix \code{q.matrix} (7 items and 3 skills):
\code{List of 2} \cr
\code{ $ data :'data.frame':} \cr
\code{ ..$ id: int [1:10000] 1 2 3 4 5 6 7 8 9 10 ...} \cr
\code{ ..$ D1: num [1:10000] 0 0 0 0 1 0 1 0 0 1 ...} \cr
\code{ ..$ D2: num [1:10000] 0 0 0 0 0 1 1 1 0 1 ...} \cr
\code{ ..$ D3: num [1:10000] 1 0 1 0 1 1 0 0 0 1 ...} \cr
\code{ ..$ D4: num [1:10000] 0 0 1 0 0 1 1 1 0 0 ...} \cr
\code{ ..$ D5: num [1:10000] 1 0 0 0 1 1 1 0 1 0 ...} \cr
\code{ ..$ D6: num [1:10000] 0 0 0 0 1 1 1 0 0 1 ...} \cr
\code{ ..$ D7: num [1:10000] 0 0 0 0 0 1 1 0 1 1 ...} \cr
\code{ $ q.matrix: num [1:7, 1:3] 1 0 0 1 1 0 1 0 1 0 ...} \cr
\code{ ..- attr(*, "dimnames")=List of 2} \cr
\code{ .. ..$ : chr [1:7] "D1" "D2" "D3" "D4" ...} \cr
\code{ .. ..$ : chr [1:3] "skill1" "skill2" "skill3"} \cr
}
%\details{
%% ~~ If necessary, more details than the __description__ above ~~
%}
\source{
For supplementary material of the Rupp, Templin and Henson book (2010)
see \url{http://dcm.coe.uga.edu/}.
The dataset was downloaded from
\url{http://dcm.coe.uga.edu/supplemental/chapter9.html}.
}
\references{
Rupp, A. A., Templin, J., & Henson, R. A. (2010). \emph{Diagnostic
Measurement: Theory, Methods, and Applications}. New York: The Guilford
Press.
}
\examples{
\dontrun{
data(data.dcm)
#*****************************************************
# Model 1: DINA model
#*****************************************************
mod1 <- CDM::din( data.dcm$data[,-1] , q.matrix=data.dcm$q.matrix )
summary(mod1)
#--------
# Model 1m: estimate model in mirt package
library(mirt)
library(sirt)
dat <- data.dcm$data[,-1]
Q <- data.dcm$q.matrix
#** define theta grid of skills
# use the function skillspace.hierarchy just for convenience
hier <- "skill1 > skill2"
skillspace <- CDM::skillspace.hierarchy( hier , skill.names= colnames(Q) )
Theta <- as.matrix(skillspace$skillspace.complete)
#** create mirt model
mirtmodel <- mirt::mirt.model("
skill1 = 1
skill2 = 2
skill3 = 3
(skill1*skill2) = 4
(skill1*skill3) = 5
(skill2*skill3) = 6
(skill1*skill2*skill3) = 7
" )
#** mirt parameter table
mod.pars <- mirt::mirt( dat , mirtmodel , pars="values")
# use starting values of .20 for guessing parameter
ind <- which( mod.pars$name == "d" )
mod.pars[ind,"value"] <- qlogis(.20) # guessing parameter on the logit metric
# use starting values of .80 for anti-slipping parameter
ind <- which( ( mod.pars$name \%in\% paste0("a",1:20 ) ) & (mod.pars$est) )
mod.pars[ind,"value"] <- qlogis(.80) - qlogis(.20)
mod.pars
#** prior for the skill space distribution
I <- ncol(dat)
lca_prior <- function(Theta,Etable){
TP <- nrow(Theta)
if ( is.null(Etable) ){ prior <- rep( 1/TP , TP ) }
if ( ! is.null(Etable) ){
prior <- ( rowSums(Etable[ , seq(1,2*I,2)]) + rowSums(Etable[,seq(2,2*I,2)]) )/I
}
prior <- prior / sum(prior)
return(prior)
}
#** estimate model in mirt
mod1m <- mirt::mirt(dat, mirtmodel , pars = mod.pars , verbose=TRUE ,
technical = list( customTheta=Theta , customPriorFun = lca_prior) )
# The number of estimated parameters is incorrect because mirt does not correctly count
# estimated parameters from the user customized prior distribution.
mod1m@nest <- as.integer(sum(mod.pars$est) + nrow(Theta) - 1)
# extract log-likelihood
mod1m@logLik
# compute AIC and BIC
( AIC <- -2*mod1m@logLik+2*mod1m@nest )
( BIC <- -2*mod1m@logLik+log(mod1m@Data$N)*mod1m@nest )
#** extract item parameters
cmod1m <- sirt::mirt.wrapper.coef(mod1m)$coef
# compare estimated guessing and slipping parameters
dfr <- data.frame( "din.guess"=mod1$guess$est ,
"mirt.guess"=plogis(cmod1m$d), "din.slip"=mod1$slip$est ,
"mirt.slip"= 1-plogis( rowSums( cmod1m[ , c("d", paste0("a",1:7) ) ] ) )
)
round(t(dfr),3)
## [,1] [,2] [,3] [,4] [,5] [,6] [,7]
## din.guess 0.217 0.193 0.189 0.135 0.143 0.135 0.162
## mirt.guess 0.226 0.189 0.184 0.132 0.142 0.132 0.158
## din.slip 0.338 0.331 0.334 0.220 0.222 0.211 0.042
## mirt.slip 0.339 0.333 0.336 0.223 0.225 0.214 0.044
# compare estimated skill class distribution
dfr <- data.frame("din"= mod1$attribute.patt$class.prob ,
"mirt"= mod1m@Prior[[1]] )
round(t(dfr),3)
## [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8]
## din 0.113 0.083 0.094 0.092 0.064 0.059 0.065 0.429
## mirt 0.116 0.074 0.095 0.064 0.095 0.058 0.066 0.433
#** extract estimated classifications
fsc1m <- sirt::mirt.wrapper.fscores( mod1m )
#- estimated reliabilities
fsc1m$EAP.rel
## skill1 skill2 skill3
## 0.5479942 0.5362595 0.5357961
#- estimated classfications: EAPs, MLEs and MAPs
head( round(fsc1m$person,3) )
## case M EAP.skill1 SE.EAP.skill1 EAP.skill2 SE.EAP.skill2 EAP.skill3 SE.EAP.skill3
## 1 1 0.286 0.508 0.500 0.067 0.251 0.820 0.384
## 2 2 0.000 0.162 0.369 0.191 0.393 0.190 0.392
## 3 3 0.286 0.200 0.400 0.211 0.408 0.607 0.489
## 4 4 0.000 0.162 0.369 0.191 0.393 0.190 0.392
## 5 5 0.571 0.802 0.398 0.267 0.443 0.928 0.258
## 6 6 0.857 0.998 0.045 1.000 0.019 1.000 0.020
## MLE.skill1 MLE.skill2 MLE.skill3 MAP.skill1 MAP.skill2 MAP.skill3
## 1 1 0 1 1 0 1
## 2 0 0 0 0 0 0
## 3 0 0 1 0 0 1
## 4 0 0 0 0 0 0
## 5 1 0 1 1 0 1
## 6 1 1 1 1 1 1
#** estimate model fit in mirt
( fit1m <- mirt::M2( mod1m ) )
#*****************************************************
# Model 2: DINO model
#*****************************************************
mod2 <- CDM::din( data.dcm$data[,-1] , q.matrix=data.dcm$q.matrix , rule="DINO")
summary(mod2)
#*****************************************************
# Model 3: log-linear model (LCDM): this model is the GDINA model with the
# logit link function
#*****************************************************
mod3 <- CDM::gdina( data.dcm$data[,-1] , q.matrix=data.dcm$q.matrix , link="logit")
summary(mod3)
#*****************************************************
# Model 4: GDINA model with identity link function
#*****************************************************
mod4 <- CDM::gdina( data.dcm$data[,-1] , q.matrix=data.dcm$q.matrix )
summary(mod4)
#*****************************************************
# Model 5: GDINA additive model identity link function
#*****************************************************
mod5 <- CDM::gdina( data.dcm$data[,-1], q.matrix=data.dcm$q.matrix , rule="ACDM")
summary(mod5)
#*****************************************************
# Model 6: GDINA additive model logit link function
#*****************************************************
mod6 <- CDM::gdina( data.dcm$data[,-1], q.matrix=data.dcm$q.matrix, link="logit", rule="ACDM")
summary(mod6)
#--------
# Model 6m: GDINA additive model in mirt package
# use data specifications from Model 1m)
#** create mirt model
mirtmodel <- mirt::mirt.model("
skill1 = 1,4,5,7
skill2 = 2,4,6,7
skill3 = 3,5,6,7
" )
#** mirt parameter table
mod.pars <- mirt::mirt( dat , mirtmodel , pars="values")
#** estimate model in mirt
# Theta and lca_prior as defined as in Model 1m
mod6m <- mirt::mirt(dat, mirtmodel , pars = mod.pars , verbose=TRUE ,
technical = list( customTheta=Theta , customPriorFun = lca_prior) )
mod6m@nest <- as.integer(sum(mod.pars$est) + nrow(Theta) - 1)
# extract log-likelihood
mod6m@logLik
# compute AIC and BIC
( AIC <- -2*mod6m@logLik+2*mod6m@nest )
( BIC <- -2*mod6m@logLik+log(mod6m@Data$N)*mod6m@nest )
#** skill distribution
mod6m@Prior[[1]]
#** extract item parameters
cmod6m <- mirt.wrapper.coef(mod6m)$coef
print(cmod6m,digits=4)
## item a1 a2 a3 d g u
## 1 D1 1.882 0.000 0.000 -0.9330 0 1
## 2 D2 0.000 2.049 0.000 -1.0430 0 1
## 3 D3 0.000 0.000 2.028 -0.9915 0 1
## 4 D4 2.697 2.525 0.000 -2.9925 0 1
## 5 D5 2.524 0.000 2.478 -2.7863 0 1
## 6 D6 0.000 2.818 2.791 -3.1324 0 1
## 7 D7 3.113 2.918 2.785 -4.2794 0 1
#*****************************************************
# Model 7: Reduced RUM model
#*****************************************************
mod7 <- CDM::gdina( data.dcm$data[,-1] , q.matrix=data.dcm$q.matrix , rule="RRUM")
summary(mod7)
#*****************************************************
# Model 8: latent class model with 3 classes and 4 sets of starting values
#*****************************************************
#-- Model 8a: randomLCA package
library(randomLCA)
mod8a <- randomLCA::randomLCA( data.dcm$data[,-1], nclass= 3 , verbose=TRUE , notrials=4)
#-- Model8b: rasch.mirtlc function in sirt package
library(sirt)
mod8b <- sirt::rasch.mirtlc( data.dcm$data[,-1] , Nclasses=3 , nstarts=4 )
summary(mod8a)
summary(mod8b)
}
}
\keyword{datasets}
|
/man/data.dcm.Rd
|
no_license
|
parksejin/CDM
|
R
| false
| false
| 10,087
|
rd
|
\name{data.dcm}
\alias{data.dcm}
\docType{data}
\title{
Dataset from Book 'Diagnostic Measurement' of Rupp, Templin and
Henson (2010)
}
\description{
Dataset from Chapter 9 of the book 'Diagnostic Measurement'
(Rupp, Templin & Henson, 2010).
}
\usage{
data(data.dcm)
}
\format{
The format of the data is a list containing the dichotomous item
response data \code{data} (10000 persons at 7 items)
and the Q-matrix \code{q.matrix} (7 items and 3 skills):
\code{List of 2} \cr
\code{ $ data :'data.frame':} \cr
\code{ ..$ id: int [1:10000] 1 2 3 4 5 6 7 8 9 10 ...} \cr
\code{ ..$ D1: num [1:10000] 0 0 0 0 1 0 1 0 0 1 ...} \cr
\code{ ..$ D2: num [1:10000] 0 0 0 0 0 1 1 1 0 1 ...} \cr
\code{ ..$ D3: num [1:10000] 1 0 1 0 1 1 0 0 0 1 ...} \cr
\code{ ..$ D4: num [1:10000] 0 0 1 0 0 1 1 1 0 0 ...} \cr
\code{ ..$ D5: num [1:10000] 1 0 0 0 1 1 1 0 1 0 ...} \cr
\code{ ..$ D6: num [1:10000] 0 0 0 0 1 1 1 0 0 1 ...} \cr
\code{ ..$ D7: num [1:10000] 0 0 0 0 0 1 1 0 1 1 ...} \cr
\code{ $ q.matrix: num [1:7, 1:3] 1 0 0 1 1 0 1 0 1 0 ...} \cr
\code{ ..- attr(*, "dimnames")=List of 2} \cr
\code{ .. ..$ : chr [1:7] "D1" "D2" "D3" "D4" ...} \cr
\code{ .. ..$ : chr [1:3] "skill1" "skill2" "skill3"} \cr
}
%\details{
%% ~~ If necessary, more details than the __description__ above ~~
%}
\source{
For supplementary material of the Rupp, Templin and Henson book (2010)
see \url{http://dcm.coe.uga.edu/}.
The dataset was downloaded from
\url{http://dcm.coe.uga.edu/supplemental/chapter9.html}.
}
\references{
Rupp, A. A., Templin, J., & Henson, R. A. (2010). \emph{Diagnostic
Measurement: Theory, Methods, and Applications}. New York: The Guilford
Press.
}
\examples{
\dontrun{
data(data.dcm)
#*****************************************************
# Model 1: DINA model
#*****************************************************
mod1 <- CDM::din( data.dcm$data[,-1] , q.matrix=data.dcm$q.matrix )
summary(mod1)
#--------
# Model 1m: estimate model in mirt package
library(mirt)
library(sirt)
dat <- data.dcm$data[,-1]
Q <- data.dcm$q.matrix
#** define theta grid of skills
# use the function skillspace.hierarchy just for convenience
hier <- "skill1 > skill2"
skillspace <- CDM::skillspace.hierarchy( hier , skill.names= colnames(Q) )
Theta <- as.matrix(skillspace$skillspace.complete)
#** create mirt model
mirtmodel <- mirt::mirt.model("
skill1 = 1
skill2 = 2
skill3 = 3
(skill1*skill2) = 4
(skill1*skill3) = 5
(skill2*skill3) = 6
(skill1*skill2*skill3) = 7
" )
#** mirt parameter table
mod.pars <- mirt::mirt( dat , mirtmodel , pars="values")
# use starting values of .20 for guessing parameter
ind <- which( mod.pars$name == "d" )
mod.pars[ind,"value"] <- qlogis(.20) # guessing parameter on the logit metric
# use starting values of .80 for anti-slipping parameter
ind <- which( ( mod.pars$name \%in\% paste0("a",1:20 ) ) & (mod.pars$est) )
mod.pars[ind,"value"] <- qlogis(.80) - qlogis(.20)
mod.pars
#** prior for the skill space distribution
I <- ncol(dat)
lca_prior <- function(Theta,Etable){
TP <- nrow(Theta)
if ( is.null(Etable) ){ prior <- rep( 1/TP , TP ) }
if ( ! is.null(Etable) ){
prior <- ( rowSums(Etable[ , seq(1,2*I,2)]) + rowSums(Etable[,seq(2,2*I,2)]) )/I
}
prior <- prior / sum(prior)
return(prior)
}
#** estimate model in mirt
mod1m <- mirt::mirt(dat, mirtmodel , pars = mod.pars , verbose=TRUE ,
technical = list( customTheta=Theta , customPriorFun = lca_prior) )
# The number of estimated parameters is incorrect because mirt does not correctly count
# estimated parameters from the user customized prior distribution.
mod1m@nest <- as.integer(sum(mod.pars$est) + nrow(Theta) - 1)
# extract log-likelihood
mod1m@logLik
# compute AIC and BIC
( AIC <- -2*mod1m@logLik+2*mod1m@nest )
( BIC <- -2*mod1m@logLik+log(mod1m@Data$N)*mod1m@nest )
#** extract item parameters
cmod1m <- sirt::mirt.wrapper.coef(mod1m)$coef
# compare estimated guessing and slipping parameters
dfr <- data.frame( "din.guess"=mod1$guess$est ,
"mirt.guess"=plogis(cmod1m$d), "din.slip"=mod1$slip$est ,
"mirt.slip"= 1-plogis( rowSums( cmod1m[ , c("d", paste0("a",1:7) ) ] ) )
)
round(t(dfr),3)
## [,1] [,2] [,3] [,4] [,5] [,6] [,7]
## din.guess 0.217 0.193 0.189 0.135 0.143 0.135 0.162
## mirt.guess 0.226 0.189 0.184 0.132 0.142 0.132 0.158
## din.slip 0.338 0.331 0.334 0.220 0.222 0.211 0.042
## mirt.slip 0.339 0.333 0.336 0.223 0.225 0.214 0.044
# compare estimated skill class distribution
dfr <- data.frame("din"= mod1$attribute.patt$class.prob ,
"mirt"= mod1m@Prior[[1]] )
round(t(dfr),3)
## [,1] [,2] [,3] [,4] [,5] [,6] [,7] [,8]
## din 0.113 0.083 0.094 0.092 0.064 0.059 0.065 0.429
## mirt 0.116 0.074 0.095 0.064 0.095 0.058 0.066 0.433
#** extract estimated classifications
fsc1m <- sirt::mirt.wrapper.fscores( mod1m )
#- estimated reliabilities
fsc1m$EAP.rel
## skill1 skill2 skill3
## 0.5479942 0.5362595 0.5357961
#- estimated classfications: EAPs, MLEs and MAPs
head( round(fsc1m$person,3) )
## case M EAP.skill1 SE.EAP.skill1 EAP.skill2 SE.EAP.skill2 EAP.skill3 SE.EAP.skill3
## 1 1 0.286 0.508 0.500 0.067 0.251 0.820 0.384
## 2 2 0.000 0.162 0.369 0.191 0.393 0.190 0.392
## 3 3 0.286 0.200 0.400 0.211 0.408 0.607 0.489
## 4 4 0.000 0.162 0.369 0.191 0.393 0.190 0.392
## 5 5 0.571 0.802 0.398 0.267 0.443 0.928 0.258
## 6 6 0.857 0.998 0.045 1.000 0.019 1.000 0.020
## MLE.skill1 MLE.skill2 MLE.skill3 MAP.skill1 MAP.skill2 MAP.skill3
## 1 1 0 1 1 0 1
## 2 0 0 0 0 0 0
## 3 0 0 1 0 0 1
## 4 0 0 0 0 0 0
## 5 1 0 1 1 0 1
## 6 1 1 1 1 1 1
#** estimate model fit in mirt
( fit1m <- mirt::M2( mod1m ) )
#*****************************************************
# Model 2: DINO model
#*****************************************************
mod2 <- CDM::din( data.dcm$data[,-1] , q.matrix=data.dcm$q.matrix , rule="DINO")
summary(mod2)
#*****************************************************
# Model 3: log-linear model (LCDM): this model is the GDINA model with the
# logit link function
#*****************************************************
mod3 <- CDM::gdina( data.dcm$data[,-1] , q.matrix=data.dcm$q.matrix , link="logit")
summary(mod3)
#*****************************************************
# Model 4: GDINA model with identity link function
#*****************************************************
mod4 <- CDM::gdina( data.dcm$data[,-1] , q.matrix=data.dcm$q.matrix )
summary(mod4)
#*****************************************************
# Model 5: GDINA additive model identity link function
#*****************************************************
mod5 <- CDM::gdina( data.dcm$data[,-1], q.matrix=data.dcm$q.matrix , rule="ACDM")
summary(mod5)
#*****************************************************
# Model 6: GDINA additive model logit link function
#*****************************************************
mod6 <- CDM::gdina( data.dcm$data[,-1], q.matrix=data.dcm$q.matrix, link="logit", rule="ACDM")
summary(mod6)
#--------
# Model 6m: GDINA additive model in mirt package
# use data specifications from Model 1m)
#** create mirt model
mirtmodel <- mirt::mirt.model("
skill1 = 1,4,5,7
skill2 = 2,4,6,7
skill3 = 3,5,6,7
" )
#** mirt parameter table
mod.pars <- mirt::mirt( dat , mirtmodel , pars="values")
#** estimate model in mirt
# Theta and lca_prior as defined as in Model 1m
mod6m <- mirt::mirt(dat, mirtmodel , pars = mod.pars , verbose=TRUE ,
technical = list( customTheta=Theta , customPriorFun = lca_prior) )
mod6m@nest <- as.integer(sum(mod.pars$est) + nrow(Theta) - 1)
# extract log-likelihood
mod6m@logLik
# compute AIC and BIC
( AIC <- -2*mod6m@logLik+2*mod6m@nest )
( BIC <- -2*mod6m@logLik+log(mod6m@Data$N)*mod6m@nest )
#** skill distribution
mod6m@Prior[[1]]
#** extract item parameters
cmod6m <- mirt.wrapper.coef(mod6m)$coef
print(cmod6m,digits=4)
## item a1 a2 a3 d g u
## 1 D1 1.882 0.000 0.000 -0.9330 0 1
## 2 D2 0.000 2.049 0.000 -1.0430 0 1
## 3 D3 0.000 0.000 2.028 -0.9915 0 1
## 4 D4 2.697 2.525 0.000 -2.9925 0 1
## 5 D5 2.524 0.000 2.478 -2.7863 0 1
## 6 D6 0.000 2.818 2.791 -3.1324 0 1
## 7 D7 3.113 2.918 2.785 -4.2794 0 1
#*****************************************************
# Model 7: Reduced RUM model
#*****************************************************
mod7 <- CDM::gdina( data.dcm$data[,-1] , q.matrix=data.dcm$q.matrix , rule="RRUM")
summary(mod7)
#*****************************************************
# Model 8: latent class model with 3 classes and 4 sets of starting values
#*****************************************************
#-- Model 8a: randomLCA package
library(randomLCA)
mod8a <- randomLCA::randomLCA( data.dcm$data[,-1], nclass= 3 , verbose=TRUE , notrials=4)
#-- Model8b: rasch.mirtlc function in sirt package
library(sirt)
mod8b <- sirt::rasch.mirtlc( data.dcm$data[,-1] , Nclasses=3 , nstarts=4 )
summary(mod8a)
summary(mod8b)
}
}
\keyword{datasets}
|
#' Permuate explanatory variables to produce multiple output tables for common
#' regression models
#'
#' @param .data Data frame or tibble.
#' @param dependent Character vector of length 1: quoted name of dependent
#' variable. Can be continuous, a binary factor, or a survival object of form
#' \code{Surv(time, status)}.
#' @param explanatory_base Character vector of any length: quoted name(s) of
#' base model explanatory variables.
#' @param explanatory_permute Character vector of any length: quoted name(s) of
#' explanatory variables to permute through models.
#' @param multiple_tables Logical. Multiple model tables as a list, or a single
#' table including multiple models.
#' @param include_base_model Logical. Include model using \code{explanatory_base}
#' variables only.
#' @param include_full_model Logical. Include model using all \code{explanatory_base}
#' and \code{explanatory_permute} variables.
#' @param base_on_top Logical. Base variables at top of table, or bottom of
#' table.
#' @param ... Other arguments to \code{\link{finalfit}}
#'
#' @return Returns a list of data frame with the final model table.
#' @export
#'
#' @examples
#' explanatory_base = c("age.factor", "sex.factor")
#' explanatory_permute = c("obstruct.factor", "perfor.factor", "node4.factor")
#'
#' # Linear regression
#' colon_s %>%
#' finalfit_permute("nodes", explanatory_base, explanatory_permute)
#'
#' # Cox proportional hazards regression
#' colon_s %>%
#' finalfit_permute("Surv(time, status)", explanatory_base, explanatory_permute)
#'
#' # Logistic regression
#' colon_s %>%
#' finalfit_permute("mort_5yr", explanatory_base, explanatory_permute)
#'
#' # Logistic regression with random effect (glmer)
#' # colon_s %>%
#' # finalfit_permute("mort_5yr", explanatory_base, explanatory_permute,
#' # random_effect = "hospital")
ff_permute <- function(.data, dependent = NULL,
explanatory_base = NULL, explanatory_permute = NULL,
multiple_tables = FALSE,
include_base_model = TRUE,
include_full_model = TRUE,
base_on_top = TRUE, ...){
args = list(...)
if(base_on_top){
explanatory = explanatory_permute %>%
purrr::map(~ c(explanatory_base, .x))
} else {
explanatory = explanatory_permute %>%
purrr::map(c, explanatory_base)
}
if(include_base_model){
explanatory = c(list(explanatory_base), explanatory)
}
fits = explanatory %>%
purrr::map(~ do.call(finalfit, c(list(.data, dependent, explanatory = .x, keep_fit_id = TRUE),
args)))
if(base_on_top){
explanatory = c(explanatory_base, explanatory_permute)
} else {
explanatory = c(explanatory_permute, explanatory_base)
}
if(include_full_model){
fits = c(fits,
list(
finalfit(.data, dependent, explanatory, keep_fit_id = TRUE, ...)
)
)
}
# Multiple tables ----
if(multiple_tables){
out = fits %>%
purrr::map(dplyr::select, -fit_id)
return(out)
}
# Single table ----
uni = finalfit(.data, dependent, explanatory, keep_fit_id = TRUE,
add_dependent_label = FALSE, ...) %>%
dplyr::select(-length(.)) # remove last column
## multivariable only
fits = fits %>%
purrr::map(dplyr::select, c(1, length(.[[1]]))) # first and last columns
## number of models
n_fits = 1:length(fits)
## paste incremental integer to model name
fits = fits %>%
purrr::map(~ names(.x)[2]) %>%
purrr::map2(n_fits, ~ paste(.x, .y)) %>%
purrr::map2(fits, ~ dplyr::rename(.y, !!.x := 2))
## create final table
out = fits %>%
purrr::reduce(dplyr::full_join, by = "fit_id") %>%
dplyr::left_join(uni, ., by = "fit_id") %>%
dplyr::mutate_all(~ ifelse(is.na(.), "-", .)) %>%
dplyr::select(-fit_id) %>%
dependent_label(.data = .data, dependent = dependent)
return(out)
}
#' @rdname ff_permute
#' @export
finalfit_permute = ff_permute
|
/R/ff_permute.R
|
no_license
|
muathalmoslem/finalfit
|
R
| false
| false
| 3,892
|
r
|
#' Permuate explanatory variables to produce multiple output tables for common
#' regression models
#'
#' @param .data Data frame or tibble.
#' @param dependent Character vector of length 1: quoted name of dependent
#' variable. Can be continuous, a binary factor, or a survival object of form
#' \code{Surv(time, status)}.
#' @param explanatory_base Character vector of any length: quoted name(s) of
#' base model explanatory variables.
#' @param explanatory_permute Character vector of any length: quoted name(s) of
#' explanatory variables to permute through models.
#' @param multiple_tables Logical. Multiple model tables as a list, or a single
#' table including multiple models.
#' @param include_base_model Logical. Include model using \code{explanatory_base}
#' variables only.
#' @param include_full_model Logical. Include model using all \code{explanatory_base}
#' and \code{explanatory_permute} variables.
#' @param base_on_top Logical. Base variables at top of table, or bottom of
#' table.
#' @param ... Other arguments to \code{\link{finalfit}}
#'
#' @return Returns a list of data frame with the final model table.
#' @export
#'
#' @examples
#' explanatory_base = c("age.factor", "sex.factor")
#' explanatory_permute = c("obstruct.factor", "perfor.factor", "node4.factor")
#'
#' # Linear regression
#' colon_s %>%
#' finalfit_permute("nodes", explanatory_base, explanatory_permute)
#'
#' # Cox proportional hazards regression
#' colon_s %>%
#' finalfit_permute("Surv(time, status)", explanatory_base, explanatory_permute)
#'
#' # Logistic regression
#' colon_s %>%
#' finalfit_permute("mort_5yr", explanatory_base, explanatory_permute)
#'
#' # Logistic regression with random effect (glmer)
#' # colon_s %>%
#' # finalfit_permute("mort_5yr", explanatory_base, explanatory_permute,
#' # random_effect = "hospital")
ff_permute <- function(.data, dependent = NULL,
explanatory_base = NULL, explanatory_permute = NULL,
multiple_tables = FALSE,
include_base_model = TRUE,
include_full_model = TRUE,
base_on_top = TRUE, ...){
args = list(...)
if(base_on_top){
explanatory = explanatory_permute %>%
purrr::map(~ c(explanatory_base, .x))
} else {
explanatory = explanatory_permute %>%
purrr::map(c, explanatory_base)
}
if(include_base_model){
explanatory = c(list(explanatory_base), explanatory)
}
fits = explanatory %>%
purrr::map(~ do.call(finalfit, c(list(.data, dependent, explanatory = .x, keep_fit_id = TRUE),
args)))
if(base_on_top){
explanatory = c(explanatory_base, explanatory_permute)
} else {
explanatory = c(explanatory_permute, explanatory_base)
}
if(include_full_model){
fits = c(fits,
list(
finalfit(.data, dependent, explanatory, keep_fit_id = TRUE, ...)
)
)
}
# Multiple tables ----
if(multiple_tables){
out = fits %>%
purrr::map(dplyr::select, -fit_id)
return(out)
}
# Single table ----
uni = finalfit(.data, dependent, explanatory, keep_fit_id = TRUE,
add_dependent_label = FALSE, ...) %>%
dplyr::select(-length(.)) # remove last column
## multivariable only
fits = fits %>%
purrr::map(dplyr::select, c(1, length(.[[1]]))) # first and last columns
## number of models
n_fits = 1:length(fits)
## paste incremental integer to model name
fits = fits %>%
purrr::map(~ names(.x)[2]) %>%
purrr::map2(n_fits, ~ paste(.x, .y)) %>%
purrr::map2(fits, ~ dplyr::rename(.y, !!.x := 2))
## create final table
out = fits %>%
purrr::reduce(dplyr::full_join, by = "fit_id") %>%
dplyr::left_join(uni, ., by = "fit_id") %>%
dplyr::mutate_all(~ ifelse(is.na(.), "-", .)) %>%
dplyr::select(-fit_id) %>%
dependent_label(.data = .data, dependent = dependent)
return(out)
}
#' @rdname ff_permute
#' @export
finalfit_permute = ff_permute
|
## Created 12 / January / 2016
## Updates 06 / February / 2017
## Marina Costa Rillo
##
## Code that uses the raw CSV file of the Buckley Collection downloaded from the NHM Data Portal
##
## Reads "BuckleyCollection_DataPortal.csv"
## Creates "Coordinates_Buckley.csv" (with sample_type column)
##
rm(list=ls())
setwd("/Users/marinacostarillo/Google Drive/DOUTORADO/R_data")
buckley_table <- read.csv("Buckley_Collection/BuckleyCollection_DataPortal.csv", header = TRUE, stringsAsFactors=FALSE)
# buckley_table = read.csv("Buckley_total_JMicrop.csv", header = TRUE, stringsAsFactors=FALSE)
# buckley_table = buckley_table[,1:which(colnames(buckley_table)=="Ocean.Sea")] # in case csv file includes empty columns
names(buckley_table)[names(buckley_table) == "Long.decimal"] = "long"
names(buckley_table)[names(buckley_table) == "Lat.decimal"] = "lat"
names(buckley_table)[names(buckley_table) == "No_of_individuals"] = "no_ind"
names(buckley_table)[names(buckley_table) == "ZF_PF_no."] = "Foram_no"
### Taxonomic update
buckley_table[which(buckley_table[,"Species"]=="eggeri"),"Species"] = "dutertrei"
buckley_table[which(buckley_table[,"Species"]=="aequilateralis/siphonifera"),"Species"] = "siphonifera"
buckley_table[which(buckley_table[,"Species"]=="aequilateralis"),"Species"] = "siphonifera"
buckley_table[which(buckley_table[,"Species"]=="aequilateralis "),"Species"] = "siphonifera"
buckley_table[which(buckley_table[,"Species"]=="incompta"),"Species"] = "pachyderma"
# sort(unique(buckley_table[,"Species"]))
names(buckley_table)
coord_table = unique(buckley_table[,c("lat","long","Sample_depth_min_cm", "Sample_depth_max_cm",
"Sea_Depth_meters_NOAA", "Sea_Depth_meters")])
### Removing samples that do not have coordinates info
coord_table <- coord_table[order(coord_table[,"lat"]),]
length(coord_table[,1])
nas <- which(is.na(coord_table[,"lat"]))
length(nas)
coord_table <- coord_table[-nas,]
length(coord_table[,1])
# Adding "sample_type" column: tow, top_core, deep_core, land, no_info
deep = 15
coord_table[,"sample_type"] <- NA
names(coord_table)
coord_table[which(as.numeric(coord_table[,"Sample_depth_max_cm"])<=deep),"sample_type"]="top_core" # NA introduced because some sample depth are empty
coord_table[which(as.numeric(coord_table[,"Sample_depth_max_cm"])>deep),"sample_type"]="deep_core" # NA introduced because some sample depth are empty
coord_table[which(is.na(coord_table[,"sample_type"])),"sample_type"]="no_info"
coord_table[which(coord_table[,"Sample_depth_min_cm"]==c("tow")),c("sample_type")] = "tow"
coord_table[which(coord_table[,"Sample_depth_min_cm"]==c("land")), c("sample_type")] = "land"
# Number of unique coordinates
length(unique(coord_table[,c("lat","long", "Sample_depth_min_cm")])[,1])
length(unique(coord_table[,c("lat","long")])[,1])
sum(duplicated(coord_table[,c("lat","long")])) == length(unique(coord_table[,c("lat","long","Sample_depth_min_cm","Sample_depth_max_cm","sample_type","Sea_Depth_meters_NOAA")])[,1]) - length(unique(coord_table[,c("lat","long")])[,1])
# check <- data.frame(duplicated(coord_table[,c("lat","long")]), coord_table[,c("lat", "long","Sample_depth_min_cm","Sample_depth_max_cm", "sample_type")])
# names(check) <- c("check","lat","long","Sample_depth_min_cm","Sample_depth_max_cm","sample_type")
# write.csv(check, file = "Buckley_Collection/check_coord.csv",row.names=FALSE)
### Fixing coordinates that have more than one top_core (e.g. 0-3.5cm, 4-5.5cm, 12-13cm)
lat_fix <- c(-19.475, 7.193, 36.543) # doing it by hand, based on "check" right above
rows_fix <- which(round(coord_table[,"lat"],3) %in% lat_fix & coord_table[,"sample_type"] == c("top_core"))
coord_table[rows_fix,"sample_type"] = rep("deep_core", length(rows_fix))
coord_table[which(coord_table[,"Sample_depth_min_cm"]==0),"sample_type"] = rep("top_core", length(which(coord_table[,"Sample_depth_min_cm"]==0)))
coord_table[rows_fix,]
### Adding difference between BUCKLEY SEA DEPTH and NOAA SEA DEPTH
coord_table[,"Diff_Buckley_NOAA"] = NA
sea_floor_sample = c("top_core","no_info","deep_core")
diff_rows = which(coord_table[,"sample_type"] %in% sea_floor_sample)
coord_table[diff_rows,"Diff_Buckley_NOAA"] = as.numeric(coord_table[diff_rows,"Sea_Depth_meters"]) + coord_table[diff_rows ,"Sea_Depth_meters_NOAA"]
### Adding Ocean and OBD IRN information
coord_table[,"Ocean"] = 0
coord_table[,"OBD_IRN"] = 0
for (i in 1 : length(coord_table[,1])){
same_latlong = c()
same_latlong = c(same_latlong, which(coord_table[i,"lat"] == buckley_table[,"lat"] &
coord_table[i,"long"] == buckley_table[,"long"]) )
coord_table[i, "Ocean"] = unique(buckley_table[same_latlong,"Ocean_Sea"])[1]
coord_table[i, "OBD_IRN"] = unique(buckley_table[same_latlong,"IRN_Residue_OBD"])[1]
}
### Adding EXTRA OBD IRN information
OBD_table = read.csv("Buckley_Collection/OBD.csv", header = TRUE, stringsAsFactors=FALSE)
head(OBD_table)
OBD_table[,"info"] <- paste(OBD_table[,3],OBD_table[,4], sep = "|")
coord_table[,"OBD_IRN_extra_info"] = 0
for (i in 1 : length(coord_table[,1])){
same_IRN = c()
same_IRN = c(same_IRN, which(coord_table[i,"OBD_IRN"] == OBD_table[,"IRN"]))
coord_table[i,"OBD_IRN_extra_info"] = unique(OBD_table[same_IRN,"info"])[1]
}
### Ordering and saving coord_table
coord_table = coord_table[order(coord_table[,"sample_type"],coord_table[,"lat"]),]
write.csv(coord_table, file = "Buckley_Collection/Coordinates_Buckley.csv",row.names=FALSE)
|
/analysis/R/create_coord_table.R
|
no_license
|
mcrillo/buckley-collection
|
R
| false
| false
| 5,451
|
r
|
## Created 12 / January / 2016
## Updates 06 / February / 2017
## Marina Costa Rillo
##
## Code that uses the raw CSV file of the Buckley Collection downloaded from the NHM Data Portal
##
## Reads "BuckleyCollection_DataPortal.csv"
## Creates "Coordinates_Buckley.csv" (with sample_type column)
##
rm(list=ls())
setwd("/Users/marinacostarillo/Google Drive/DOUTORADO/R_data")
buckley_table <- read.csv("Buckley_Collection/BuckleyCollection_DataPortal.csv", header = TRUE, stringsAsFactors=FALSE)
# buckley_table = read.csv("Buckley_total_JMicrop.csv", header = TRUE, stringsAsFactors=FALSE)
# buckley_table = buckley_table[,1:which(colnames(buckley_table)=="Ocean.Sea")] # in case csv file includes empty columns
names(buckley_table)[names(buckley_table) == "Long.decimal"] = "long"
names(buckley_table)[names(buckley_table) == "Lat.decimal"] = "lat"
names(buckley_table)[names(buckley_table) == "No_of_individuals"] = "no_ind"
names(buckley_table)[names(buckley_table) == "ZF_PF_no."] = "Foram_no"
### Taxonomic update
buckley_table[which(buckley_table[,"Species"]=="eggeri"),"Species"] = "dutertrei"
buckley_table[which(buckley_table[,"Species"]=="aequilateralis/siphonifera"),"Species"] = "siphonifera"
buckley_table[which(buckley_table[,"Species"]=="aequilateralis"),"Species"] = "siphonifera"
buckley_table[which(buckley_table[,"Species"]=="aequilateralis "),"Species"] = "siphonifera"
buckley_table[which(buckley_table[,"Species"]=="incompta"),"Species"] = "pachyderma"
# sort(unique(buckley_table[,"Species"]))
names(buckley_table)
coord_table = unique(buckley_table[,c("lat","long","Sample_depth_min_cm", "Sample_depth_max_cm",
"Sea_Depth_meters_NOAA", "Sea_Depth_meters")])
### Removing samples that do not have coordinates info
coord_table <- coord_table[order(coord_table[,"lat"]),]
length(coord_table[,1])
nas <- which(is.na(coord_table[,"lat"]))
length(nas)
coord_table <- coord_table[-nas,]
length(coord_table[,1])
# Adding "sample_type" column: tow, top_core, deep_core, land, no_info
deep = 15
coord_table[,"sample_type"] <- NA
names(coord_table)
coord_table[which(as.numeric(coord_table[,"Sample_depth_max_cm"])<=deep),"sample_type"]="top_core" # NA introduced because some sample depth are empty
coord_table[which(as.numeric(coord_table[,"Sample_depth_max_cm"])>deep),"sample_type"]="deep_core" # NA introduced because some sample depth are empty
coord_table[which(is.na(coord_table[,"sample_type"])),"sample_type"]="no_info"
coord_table[which(coord_table[,"Sample_depth_min_cm"]==c("tow")),c("sample_type")] = "tow"
coord_table[which(coord_table[,"Sample_depth_min_cm"]==c("land")), c("sample_type")] = "land"
# Number of unique coordinates
length(unique(coord_table[,c("lat","long", "Sample_depth_min_cm")])[,1])
length(unique(coord_table[,c("lat","long")])[,1])
sum(duplicated(coord_table[,c("lat","long")])) == length(unique(coord_table[,c("lat","long","Sample_depth_min_cm","Sample_depth_max_cm","sample_type","Sea_Depth_meters_NOAA")])[,1]) - length(unique(coord_table[,c("lat","long")])[,1])
# check <- data.frame(duplicated(coord_table[,c("lat","long")]), coord_table[,c("lat", "long","Sample_depth_min_cm","Sample_depth_max_cm", "sample_type")])
# names(check) <- c("check","lat","long","Sample_depth_min_cm","Sample_depth_max_cm","sample_type")
# write.csv(check, file = "Buckley_Collection/check_coord.csv",row.names=FALSE)
### Fixing coordinates that have more than one top_core (e.g. 0-3.5cm, 4-5.5cm, 12-13cm)
lat_fix <- c(-19.475, 7.193, 36.543) # doing it by hand, based on "check" right above
rows_fix <- which(round(coord_table[,"lat"],3) %in% lat_fix & coord_table[,"sample_type"] == c("top_core"))
coord_table[rows_fix,"sample_type"] = rep("deep_core", length(rows_fix))
coord_table[which(coord_table[,"Sample_depth_min_cm"]==0),"sample_type"] = rep("top_core", length(which(coord_table[,"Sample_depth_min_cm"]==0)))
coord_table[rows_fix,]
### Adding difference between BUCKLEY SEA DEPTH and NOAA SEA DEPTH
coord_table[,"Diff_Buckley_NOAA"] = NA
sea_floor_sample = c("top_core","no_info","deep_core")
diff_rows = which(coord_table[,"sample_type"] %in% sea_floor_sample)
coord_table[diff_rows,"Diff_Buckley_NOAA"] = as.numeric(coord_table[diff_rows,"Sea_Depth_meters"]) + coord_table[diff_rows ,"Sea_Depth_meters_NOAA"]
### Adding Ocean and OBD IRN information
coord_table[,"Ocean"] = 0
coord_table[,"OBD_IRN"] = 0
for (i in 1 : length(coord_table[,1])){
same_latlong = c()
same_latlong = c(same_latlong, which(coord_table[i,"lat"] == buckley_table[,"lat"] &
coord_table[i,"long"] == buckley_table[,"long"]) )
coord_table[i, "Ocean"] = unique(buckley_table[same_latlong,"Ocean_Sea"])[1]
coord_table[i, "OBD_IRN"] = unique(buckley_table[same_latlong,"IRN_Residue_OBD"])[1]
}
### Adding EXTRA OBD IRN information
OBD_table = read.csv("Buckley_Collection/OBD.csv", header = TRUE, stringsAsFactors=FALSE)
head(OBD_table)
OBD_table[,"info"] <- paste(OBD_table[,3],OBD_table[,4], sep = "|")
coord_table[,"OBD_IRN_extra_info"] = 0
for (i in 1 : length(coord_table[,1])){
same_IRN = c()
same_IRN = c(same_IRN, which(coord_table[i,"OBD_IRN"] == OBD_table[,"IRN"]))
coord_table[i,"OBD_IRN_extra_info"] = unique(OBD_table[same_IRN,"info"])[1]
}
### Ordering and saving coord_table
coord_table = coord_table[order(coord_table[,"sample_type"],coord_table[,"lat"]),]
write.csv(coord_table, file = "Buckley_Collection/Coordinates_Buckley.csv",row.names=FALSE)
|
library(nloptr)
# Muestra de tamaño n = 8
set.seed(12092020)
sample(27, 8)
# c(7L, 5L, 19L, 4L, 13L, 25L, 24L, 23L)
y <- c(1,1, 1, 0, 0, 0, 1, 1 )
suma_y <- sum(y)
n <- length(y)
logLP <- function(p) {
log(p) * suma_y + log(1-p) * (n-suma_y)
}
LP <- function(p) {
exp(log(p) * suma_y + log(1-p) * (n-suma_y))
}
plot(logLP, xlab = "p", ylab = "log L(p)")
abline(v = 5/8, col = "red")
exp(logLP(0.1))
exp(logLP(0.5))
exp(logLP(5/8))
plot(LP, xlab = "p", ylab = "L(p)")
abline(v = 5/8, col = "red")
LP(0.625)
LP(0.5)
# Metodos numéricos (minimizar el - logL(p))
MenoslogLP <- function(p) {
-(log(p) * suma_y + log(1-p) * (n-suma_y))
}
opts = list("algorithm" = "NLOPT_LN_BOBYQA", "xtol_rel" = 1.0e-16,
"maxeval" = 10000)
solucion <- nloptr(x0 = 0.9, eval_f = MenoslogLP, lb = 1.0e-16, ub = 1 - 1.0e-16,
eval_grad_f = NULL, opts = opts)
solucion
MenosLP <- function(p) {
-(exp((log(p) * suma_y + log(1-p) * (n-suma_y))))
}
opts = list("algorithm" = "NLOPT_LN_BOBYQA", "xtol_rel" = 1.0e-16,
"maxeval" = 10000)
solucion <- nloptr(x0 = 0.9, eval_f = MenosLP, lb = 1.0e-16, ub = 1 - 1.0e-16,
eval_grad_f = NULL, opts = opts)
solucion
############################## regresión logística ####################
#c(7L, 5L, 19L, 4L, 13L, 25L, 24L, 23L)
y_i <- c(1,1, 1, 0, 0, 0, 1, 1 ) # tomo el ultimo año
x_i <- c(39, 30, 31, 26, 32, 31, 29, 50) # edad
x_i <- (x_i - mean(x_i)) / sd(x_i)
# invlogit <- function(x){
# exp(x) / (1+exp(x))
# }
invlogit <- function(x){
1 / (1 + exp(-x))
}
invlogit(0.8)
plot(invlogit, xlim = c(-10, 10))
MenoslogL_p <- function(betas){
p_i <- invlogit(betas[1] + betas[2]*x_i)
res <- sum(y_i * log(p_i)) + sum((1 - y_i) * log(1 - p_i))
-res # El algoritmo minimiza, entonces que minimice el -log L(p)
}
MenoslogL_p(c(0,1))
opts = list("algorithm" = "NLOPT_LN_BOBYQA", "xtol_rel" = 1.0e-16,
"maxeval" = 10000)
# xo_ el valor inicial para beta_0 y beta_1
sol <- nloptr(x0 = c(0, 1), eval_f = MenoslogL_p,
eval_grad_f = NULL, opts = opts)
sol
modelo <- glm(factor(y_i) ~ x_i, family = "binomial")
summary(modelo)
c(0.9872009, 1.9385) # Es el valor que maximiza la función de máxima verosimilitud
MenoslogL_p(c(0.9872009, 1.9385))
MenoslogL_p(c(1.6, 2.9385))
#Si es multivariado x_0: x_0 <- c(varlInic_bo, valinicial_b1, ...vali_inicial_bp)
# Ejercicio con iris
data(iris)
boxplot(Sepal.Length ~ Species,data = iris)
iris$yi <- ifelse(iris$Species == "setosa", 1, 0)
iris$xi <- iris$Sepal.Length
iris$EspecieRecod <- ifelse(iris$yi == 1, "Setosa", "Otras")
tapply(iris$Sepal.Length, iris$EspecieRecod, FUN = mean)
modelo <- glm(factor(yi) ~ xi, data = iris, family = "binomial")
summary(modelo)
# Calcular para los 150 espacies, ¿Cuás es la probabilidad de ser de la especie setosa?
iris$prob <- invlogit(27.8285 -5.1757 * iris$xi)
#iris$prob <- exp(27.8285 -5.1757 * iris$xi) / (1 + exp(27.8285 -5.1757 * iris$xi))
iris$prob2 <- modelo$fitted.values
iris$prob3 <- predict(modelo, iris, type = "response")
plot(iris$Sepal.Length,iris$prob2)
# Si una flor tiene una longitud del sepalo de 4.5
invlogit(27.8285 -5.1757 * 4.5)
# Si una flor tiene una longitud del sepalo de 6
invlogit(27.8285 -5.1757 * 6)
exp( -5.1757 * 0.1)
# Van a seleccionar el training y test
set.seed(12092020)
indica_mue <- sample(150, round(0.7 * 150))
data(iris)
iris$yi <- ifelse(iris$Species == "setosa", 1, 0)
training <- iris[indica_mue,]
test <- iris[-indica_mue,]
mod_logSepal <- glm(yi~Sepal.Length, data = training, family = "binomial")
summary(mod_logSepal)
# Valido en la muestra test
invlogit <- function(x){
1 / (1 + exp(-x))
}
test$probs <- invlogit(mod_logSepal$coefficients[1] + mod_logSepal$coefficients[2] * test$Sepal.Length)
# Punto corte SI las probs >0.5 voy a clasificar a la flor en que es de setosa
test$yhat <- ifelse(test$probs >= 0.5, 1, 0)
table(test$yi, test$yhat)
accuracy_test <- (30 + 12) / (30 + 3 + 0 + 12)
mc_test <- table(test$yi, test$yhat)
accuracy_test <- sum(diag(mc_test)) / sum(mc_test)
accuracy_test
# Ejercicio hacer cuatro modelos logisticos, en donde calculen el acrruacy en la muestra test para cad
# regresión logistica simple (solo metar una única variable continua en cuada uno de lso 4 modelos)
# Calcular la métrica accuracy para lso cuatro modelos y escoger el mejor
library(ggplot2)
load("C:/Users/Home/Downloads/logistica (1) (1).RData")
# Recodificar la variable de interés
# 1 si el cliente se va, 0 si el cliente permanece
# Churn analysis
# EDA
ggplot(data = insumo, aes(x = calidad_produc, y = factor(target))) +
geom_boxplot()
ggplot(data = insumo, aes(x = calif_voz, y = factor(target))) +
geom_boxplot()
ggplot(data = insumo, aes(x = senal_voz, y = factor(target))) +
geom_boxplot()
# Esta no parecería que sirve
ggplot(data = insumo, aes(x = recharges_month_a, y = factor(target))) +
geom_boxplot()
ggplot(data = insumo, aes(x = nr_recharges_month_a, y = factor(target))) +
geom_boxplot()
set.seed(17092020)
indica_mue <- sample(nrow(insumo), round(0.7 * nrow(insumo)))
training <- insumo[indica_mue,]
test <- insumo[-indica_mue,]
modelo <- glm(target ~ 1 + calidad_produc + nr_recharges_month_a,
data = training, family = "binomial")
summary(modelo)
# hay que evaluar en la muestra TEST
# Primero calculemos las probabilidades
test$probs <- predict(modelo,
test[c("calidad_produc", "nr_recharges_month_a")],
type = "response")
test$yhat <- as.numeric(test$probs >= 0.5)
matrix_conf <- table(test$target, test$yhat)
100 * sum(diag(matrix_conf)) / sum(matrix_conf)
# Sensibilidad
matrix_conf
matrix_conf[2,2] / sum(matrix_conf[2,])
# 67 % de sensibilidad
# Especificicdad
matrix_conf[1,1] / sum(matrix_conf[1,])
# 92.68 de especificidad
# Curva ROCO y el ROC
|
/clase5/clase_17Sept_20202_mlJueves.r
|
no_license
|
josezea/ml_2020_2
|
R
| false
| false
| 6,148
|
r
|
library(nloptr)
# Muestra de tamaño n = 8
set.seed(12092020)
sample(27, 8)
# c(7L, 5L, 19L, 4L, 13L, 25L, 24L, 23L)
y <- c(1,1, 1, 0, 0, 0, 1, 1 )
suma_y <- sum(y)
n <- length(y)
logLP <- function(p) {
log(p) * suma_y + log(1-p) * (n-suma_y)
}
LP <- function(p) {
exp(log(p) * suma_y + log(1-p) * (n-suma_y))
}
plot(logLP, xlab = "p", ylab = "log L(p)")
abline(v = 5/8, col = "red")
exp(logLP(0.1))
exp(logLP(0.5))
exp(logLP(5/8))
plot(LP, xlab = "p", ylab = "L(p)")
abline(v = 5/8, col = "red")
LP(0.625)
LP(0.5)
# Metodos numéricos (minimizar el - logL(p))
MenoslogLP <- function(p) {
-(log(p) * suma_y + log(1-p) * (n-suma_y))
}
opts = list("algorithm" = "NLOPT_LN_BOBYQA", "xtol_rel" = 1.0e-16,
"maxeval" = 10000)
solucion <- nloptr(x0 = 0.9, eval_f = MenoslogLP, lb = 1.0e-16, ub = 1 - 1.0e-16,
eval_grad_f = NULL, opts = opts)
solucion
MenosLP <- function(p) {
-(exp((log(p) * suma_y + log(1-p) * (n-suma_y))))
}
opts = list("algorithm" = "NLOPT_LN_BOBYQA", "xtol_rel" = 1.0e-16,
"maxeval" = 10000)
solucion <- nloptr(x0 = 0.9, eval_f = MenosLP, lb = 1.0e-16, ub = 1 - 1.0e-16,
eval_grad_f = NULL, opts = opts)
solucion
############################## regresión logística ####################
#c(7L, 5L, 19L, 4L, 13L, 25L, 24L, 23L)
y_i <- c(1,1, 1, 0, 0, 0, 1, 1 ) # tomo el ultimo año
x_i <- c(39, 30, 31, 26, 32, 31, 29, 50) # edad
x_i <- (x_i - mean(x_i)) / sd(x_i)
# invlogit <- function(x){
# exp(x) / (1+exp(x))
# }
invlogit <- function(x){
1 / (1 + exp(-x))
}
invlogit(0.8)
plot(invlogit, xlim = c(-10, 10))
MenoslogL_p <- function(betas){
p_i <- invlogit(betas[1] + betas[2]*x_i)
res <- sum(y_i * log(p_i)) + sum((1 - y_i) * log(1 - p_i))
-res # El algoritmo minimiza, entonces que minimice el -log L(p)
}
MenoslogL_p(c(0,1))
opts = list("algorithm" = "NLOPT_LN_BOBYQA", "xtol_rel" = 1.0e-16,
"maxeval" = 10000)
# xo_ el valor inicial para beta_0 y beta_1
sol <- nloptr(x0 = c(0, 1), eval_f = MenoslogL_p,
eval_grad_f = NULL, opts = opts)
sol
modelo <- glm(factor(y_i) ~ x_i, family = "binomial")
summary(modelo)
c(0.9872009, 1.9385) # Es el valor que maximiza la función de máxima verosimilitud
MenoslogL_p(c(0.9872009, 1.9385))
MenoslogL_p(c(1.6, 2.9385))
#Si es multivariado x_0: x_0 <- c(varlInic_bo, valinicial_b1, ...vali_inicial_bp)
# Ejercicio con iris
data(iris)
boxplot(Sepal.Length ~ Species,data = iris)
iris$yi <- ifelse(iris$Species == "setosa", 1, 0)
iris$xi <- iris$Sepal.Length
iris$EspecieRecod <- ifelse(iris$yi == 1, "Setosa", "Otras")
tapply(iris$Sepal.Length, iris$EspecieRecod, FUN = mean)
modelo <- glm(factor(yi) ~ xi, data = iris, family = "binomial")
summary(modelo)
# Calcular para los 150 espacies, ¿Cuás es la probabilidad de ser de la especie setosa?
iris$prob <- invlogit(27.8285 -5.1757 * iris$xi)
#iris$prob <- exp(27.8285 -5.1757 * iris$xi) / (1 + exp(27.8285 -5.1757 * iris$xi))
iris$prob2 <- modelo$fitted.values
iris$prob3 <- predict(modelo, iris, type = "response")
plot(iris$Sepal.Length,iris$prob2)
# Si una flor tiene una longitud del sepalo de 4.5
invlogit(27.8285 -5.1757 * 4.5)
# Si una flor tiene una longitud del sepalo de 6
invlogit(27.8285 -5.1757 * 6)
exp( -5.1757 * 0.1)
# Van a seleccionar el training y test
set.seed(12092020)
indica_mue <- sample(150, round(0.7 * 150))
data(iris)
iris$yi <- ifelse(iris$Species == "setosa", 1, 0)
training <- iris[indica_mue,]
test <- iris[-indica_mue,]
mod_logSepal <- glm(yi~Sepal.Length, data = training, family = "binomial")
summary(mod_logSepal)
# Valido en la muestra test
invlogit <- function(x){
1 / (1 + exp(-x))
}
test$probs <- invlogit(mod_logSepal$coefficients[1] + mod_logSepal$coefficients[2] * test$Sepal.Length)
# Punto corte SI las probs >0.5 voy a clasificar a la flor en que es de setosa
test$yhat <- ifelse(test$probs >= 0.5, 1, 0)
table(test$yi, test$yhat)
accuracy_test <- (30 + 12) / (30 + 3 + 0 + 12)
mc_test <- table(test$yi, test$yhat)
accuracy_test <- sum(diag(mc_test)) / sum(mc_test)
accuracy_test
# Ejercicio hacer cuatro modelos logisticos, en donde calculen el acrruacy en la muestra test para cad
# regresión logistica simple (solo metar una única variable continua en cuada uno de lso 4 modelos)
# Calcular la métrica accuracy para lso cuatro modelos y escoger el mejor
library(ggplot2)
load("C:/Users/Home/Downloads/logistica (1) (1).RData")
# Recodificar la variable de interés
# 1 si el cliente se va, 0 si el cliente permanece
# Churn analysis
# EDA
ggplot(data = insumo, aes(x = calidad_produc, y = factor(target))) +
geom_boxplot()
ggplot(data = insumo, aes(x = calif_voz, y = factor(target))) +
geom_boxplot()
ggplot(data = insumo, aes(x = senal_voz, y = factor(target))) +
geom_boxplot()
# Esta no parecería que sirve
ggplot(data = insumo, aes(x = recharges_month_a, y = factor(target))) +
geom_boxplot()
ggplot(data = insumo, aes(x = nr_recharges_month_a, y = factor(target))) +
geom_boxplot()
set.seed(17092020)
indica_mue <- sample(nrow(insumo), round(0.7 * nrow(insumo)))
training <- insumo[indica_mue,]
test <- insumo[-indica_mue,]
modelo <- glm(target ~ 1 + calidad_produc + nr_recharges_month_a,
data = training, family = "binomial")
summary(modelo)
# hay que evaluar en la muestra TEST
# Primero calculemos las probabilidades
test$probs <- predict(modelo,
test[c("calidad_produc", "nr_recharges_month_a")],
type = "response")
test$yhat <- as.numeric(test$probs >= 0.5)
matrix_conf <- table(test$target, test$yhat)
100 * sum(diag(matrix_conf)) / sum(matrix_conf)
# Sensibilidad
matrix_conf
matrix_conf[2,2] / sum(matrix_conf[2,])
# 67 % de sensibilidad
# Especificicdad
matrix_conf[1,1] / sum(matrix_conf[1,])
# 92.68 de especificidad
# Curva ROCO y el ROC
|
# Predicting with Machine Learning
# Load the data
data(iris)
# Set a seed to make randomness reproducible
set.seed(42)
# Randomly sample 100 of 150 row indexes
indexes <- sample(
x = 1:150,
size = 100)
# Inspect the random indexes
print(indexes)
# Create a training set from indexes
train <- iris[indexes, ]
# Create a test set from remaining indexes
test <- iris[-indexes, ]
# Load the decision tree package
library(tree)
# Train a decision tree model
model <- tree(
formula = Species ~ .,
data = train)
# Inspect the model
summary(model)
# Visualize the decision tree model
plot(model)
text(model)
# Load color brewer library
library(RColorBrewer)
# Create a color palette
palette <- brewer.pal(3, "Set2")
# Create a scatterplot colored by species
plot(
x = iris$Petal.Length,
y = iris$Petal.Width,
pch = 19,
col = palette[as.numeric(iris$Species)],
main = "Iris Petal Length vs. Width",
xlab = "Petal Length (cm)",
ylab = "Petal Width (cm)")
# Plot the decision boundaries
partition.tree(
tree = model,
label = "Species",
add = TRUE)
# Predict with the model
predictions <- predict(
object = model,
newdata = test,
type = "class")
# Create a confusion matrix
table(
x = predictions,
y = test$Species)
# Load the caret package
install.packages("caret")
library(caret)
install.packages('e1071', dependencies=TRUE)
# Evaluate the prediction results
confusionMatrix(
data = predictions,
reference = test$Species)
# Save the tree model
save(model, file = "Tree.RData")
# Save the training data
save(train, file = "Train.RData")
|
/TestCars6.R
|
no_license
|
TonyHomsi/Rtudio_TONY
|
R
| false
| false
| 1,598
|
r
|
# Predicting with Machine Learning
# Load the data
data(iris)
# Set a seed to make randomness reproducible
set.seed(42)
# Randomly sample 100 of 150 row indexes
indexes <- sample(
x = 1:150,
size = 100)
# Inspect the random indexes
print(indexes)
# Create a training set from indexes
train <- iris[indexes, ]
# Create a test set from remaining indexes
test <- iris[-indexes, ]
# Load the decision tree package
library(tree)
# Train a decision tree model
model <- tree(
formula = Species ~ .,
data = train)
# Inspect the model
summary(model)
# Visualize the decision tree model
plot(model)
text(model)
# Load color brewer library
library(RColorBrewer)
# Create a color palette
palette <- brewer.pal(3, "Set2")
# Create a scatterplot colored by species
plot(
x = iris$Petal.Length,
y = iris$Petal.Width,
pch = 19,
col = palette[as.numeric(iris$Species)],
main = "Iris Petal Length vs. Width",
xlab = "Petal Length (cm)",
ylab = "Petal Width (cm)")
# Plot the decision boundaries
partition.tree(
tree = model,
label = "Species",
add = TRUE)
# Predict with the model
predictions <- predict(
object = model,
newdata = test,
type = "class")
# Create a confusion matrix
table(
x = predictions,
y = test$Species)
# Load the caret package
install.packages("caret")
library(caret)
install.packages('e1071', dependencies=TRUE)
# Evaluate the prediction results
confusionMatrix(
data = predictions,
reference = test$Species)
# Save the tree model
save(model, file = "Tree.RData")
# Save the training data
save(train, file = "Train.RData")
|
library('MiSPU')
# load data
data(throat.tree)
data(dd)
# do clustering
ncluster = 20
clustering = pam(as.dist(cophenetic(throat.tree)), ncluster, diss = TRUE)$clustering
p_est = dd$pi # mle of prob
p_est = p_est[names(clustering)]
p_clus = sort(tapply(p_est, clustering, sum), decreasing = T) # abundance level of the 20 clusters
# some parameters for distribution of OTU counts
theta = dd$theta
gplus = (1-theta)/theta
g_est = p_est*gplus
# a function to generate OTU table
sim_data = function(nSam = 100, mu = 1000, size = 25)
{
comm = matrix(0, nSam, length(g_est))
comm.p = comm
rownames(comm) = 1:nrow(comm)
colnames(comm) = names(g_est)
nSeq = rnbinom(nSam, mu = mu, size = size) ## using rbinom to obtain a 25
for (i in 1:nSam) {
comm.p[i, ] = MiSPU::rdirichlet(1, g_est)[1, ]
comm[i, ] = rmultinom(1, nSeq[i], prob = comm.p[i, ])[,1]
}
return(comm)
}
n = 600 # sample size: pnly take 50% as cases and divide nto 3 clusters
c = 3
OTUtab = sim_data(nSam = n) # generate OTU table
## Creat different cases based on different signals set
info_OTUs = names(which(clustering == 14))
scaled_OTUtab = OTUtab/rowSums(OTUtab)
### the coefficient of info_OTUs
set.seed(1)
signal_cut = floor(rep(1/c,c)*length(info_OTUs))
if((length(info_OTUs)%% c)){
for(i in 1:(length(info_OTUs)%% c)){
signal_cut[i] = signal_cut[i]+1
}
}
assign = rep(0,ncol(scaled_OTUtab))
assign[sample(which(clustering == 14))] = rep(1:c, signal_cut)
beta_list = list()
pool = cbind(matrix(0,n,c),scaled_OTUtab)
colnames(pool) = c(paste("y",1:c, sep = ""),colnames(scaled_OTUtab))
for(i in 1:c){
beta = rep(0, ncol(scaled_OTUtab))
beta[assign ==i] = 5
eta = scale(scaled_OTUtab %*% beta)
prob = 1/(1 + exp(-eta))
pool[seq((i-1)*n/c+1, (i*n/c), by = 1),i] = rbinom(n/c,1,prob)
}
as_tibble(pool) %>%
pivot_longer(
y1:y3,
names_to = "class",
values_to = "status"
) %>%
group_by(class) %>%
summarise(case = sum(status))
############
## case extraction with the scaled OTUtable
# generate some binray outcomes
info_OTUs = names(which(clustering == 14)) # consider the OTUs in the largest cluster as informative OTUs
beta = rep(1, length(info_OTUs)) # effect size
scaled_OTUtab = OTUtab/rowSums(OTUtab)
eta = scale(scaled_OTUtab[, info_OTUs] %*% beta)
prob = 1/(1 + exp(-eta))
Y = rbinom(n, 1, prob) # final binary outcome
table(Y)
# some distance based on OTUs and tree
dist_ls = GUniFrac(OTUtab, throat.tree)
|
/code/microb_apply/sample_code.R
|
no_license
|
yuqimiao/multiomics-SIMLR
|
R
| false
| false
| 2,452
|
r
|
library('MiSPU')
# load data
data(throat.tree)
data(dd)
# do clustering
ncluster = 20
clustering = pam(as.dist(cophenetic(throat.tree)), ncluster, diss = TRUE)$clustering
p_est = dd$pi # mle of prob
p_est = p_est[names(clustering)]
p_clus = sort(tapply(p_est, clustering, sum), decreasing = T) # abundance level of the 20 clusters
# some parameters for distribution of OTU counts
theta = dd$theta
gplus = (1-theta)/theta
g_est = p_est*gplus
# a function to generate OTU table
sim_data = function(nSam = 100, mu = 1000, size = 25)
{
comm = matrix(0, nSam, length(g_est))
comm.p = comm
rownames(comm) = 1:nrow(comm)
colnames(comm) = names(g_est)
nSeq = rnbinom(nSam, mu = mu, size = size) ## using rbinom to obtain a 25
for (i in 1:nSam) {
comm.p[i, ] = MiSPU::rdirichlet(1, g_est)[1, ]
comm[i, ] = rmultinom(1, nSeq[i], prob = comm.p[i, ])[,1]
}
return(comm)
}
n = 600 # sample size: pnly take 50% as cases and divide nto 3 clusters
c = 3
OTUtab = sim_data(nSam = n) # generate OTU table
## Creat different cases based on different signals set
info_OTUs = names(which(clustering == 14))
scaled_OTUtab = OTUtab/rowSums(OTUtab)
### the coefficient of info_OTUs
set.seed(1)
signal_cut = floor(rep(1/c,c)*length(info_OTUs))
if((length(info_OTUs)%% c)){
for(i in 1:(length(info_OTUs)%% c)){
signal_cut[i] = signal_cut[i]+1
}
}
assign = rep(0,ncol(scaled_OTUtab))
assign[sample(which(clustering == 14))] = rep(1:c, signal_cut)
beta_list = list()
pool = cbind(matrix(0,n,c),scaled_OTUtab)
colnames(pool) = c(paste("y",1:c, sep = ""),colnames(scaled_OTUtab))
for(i in 1:c){
beta = rep(0, ncol(scaled_OTUtab))
beta[assign ==i] = 5
eta = scale(scaled_OTUtab %*% beta)
prob = 1/(1 + exp(-eta))
pool[seq((i-1)*n/c+1, (i*n/c), by = 1),i] = rbinom(n/c,1,prob)
}
as_tibble(pool) %>%
pivot_longer(
y1:y3,
names_to = "class",
values_to = "status"
) %>%
group_by(class) %>%
summarise(case = sum(status))
############
## case extraction with the scaled OTUtable
# generate some binray outcomes
info_OTUs = names(which(clustering == 14)) # consider the OTUs in the largest cluster as informative OTUs
beta = rep(1, length(info_OTUs)) # effect size
scaled_OTUtab = OTUtab/rowSums(OTUtab)
eta = scale(scaled_OTUtab[, info_OTUs] %*% beta)
prob = 1/(1 + exp(-eta))
Y = rbinom(n, 1, prob) # final binary outcome
table(Y)
# some distance based on OTUs and tree
dist_ls = GUniFrac(OTUtab, throat.tree)
|
library(fpp)
library(xts)
library(forecast)
setwd('/home/pavol/projects/hawkular/hawkular-datamining/R')
source('getBuckets')
df <- getBuckets()
ts = ts(as.numeric(df$avg))
horizon=4
# single exponential smoothing
ex = ses(ts, alpha=0.2, initial='optimal', h=horizon)
exHolt = holt(ts, h=horizont, damped=FALSE, exponential=FALSE)
plot(ex, plot.conf=FALSE, main="Exponential smoothing", col='black', fcol=col[1], flwd=2)
lines(exHolt$mean, col=col[2], lwd=2)
lines(exHoltExp$mean, col=col[3], lwd=2)
legend('topleft', lty=1, col=col, legend=c('Exponential smoothing', 'Holt', 'Holt exp'))
accuracy(ex)
accuracy(exHolt)
|
/R/ex_smoothing.R
|
permissive
|
pavolloffay/hawkular-datamining
|
R
| false
| false
| 625
|
r
|
library(fpp)
library(xts)
library(forecast)
setwd('/home/pavol/projects/hawkular/hawkular-datamining/R')
source('getBuckets')
df <- getBuckets()
ts = ts(as.numeric(df$avg))
horizon=4
# single exponential smoothing
ex = ses(ts, alpha=0.2, initial='optimal', h=horizon)
exHolt = holt(ts, h=horizont, damped=FALSE, exponential=FALSE)
plot(ex, plot.conf=FALSE, main="Exponential smoothing", col='black', fcol=col[1], flwd=2)
lines(exHolt$mean, col=col[2], lwd=2)
lines(exHoltExp$mean, col=col[3], lwd=2)
legend('topleft', lty=1, col=col, legend=c('Exponential smoothing', 'Holt', 'Holt exp'))
accuracy(ex)
accuracy(exHolt)
|
#### Sleeper ff_starters ####
#' Get starters and bench
#'
#' @param conn the list object created by \code{ff_connect()}
#' @param week a numeric or numeric vector
#' @param ... other arguments (currently unused)
#'
#' @describeIn ff_starters Fleaflicker: returns who was started as well as what they scored.
#'
#' @examples
#' \donttest{
#' conn <- fleaflicker_connect(season = 2020, league_id = 206154)
#' ff_starters(conn)
#' }
#'
#' @export
ff_starters.flea_conn <- function(conn, week = 1:17, ...) {
starters <- ff_schedule(conn, week) %>%
dplyr::filter(!is.na(.data$result)) %>%
dplyr::distinct(.data$week, .data$game_id) %>%
dplyr::mutate(starters = purrr::map2(.data$week, .data$game_id, .flea_starters, conn)) %>%
tidyr::unnest("starters") %>%
dplyr::arrange(.data$week, .data$franchise_id)
}
.flea_starters <- function(week, game_id, conn) {
x <- fleaflicker_getendpoint("FetchLeagueBoxscore",
sport = "NFL",
scoring_period = week,
fantasy_game_id = game_id,
league_id = conn$league_id
) %>%
purrr::pluck("content", "lineups") %>%
list() %>%
tibble::tibble() %>%
tidyr::unnest_longer(1) %>%
tidyr::unnest_wider(1) %>%
tidyr::unnest_longer("slots") %>%
tidyr::unnest_wider("slots") %>%
dplyr::mutate(
position = purrr::map_chr(.data$position, purrr::pluck, "label"),
positionColor = NULL
) %>%
tidyr::pivot_longer(c("home", "away"), names_to = "franchise", values_to = "player") %>%
tidyr::hoist("player", "proPlayer", "owner", "points" = "viewingActualPoints") %>%
tidyr::hoist("proPlayer",
"player_id" = "id",
"player_name" = "nameFull",
"pos" = "position",
"team" = "proTeamAbbreviation"
) %>%
dplyr::filter(!is.na(.data$player_id)) %>%
tidyr::hoist("owner", "franchise_id" = "id", "franchise_name" = "name") %>%
tidyr::hoist("points", "player_score" = "value") %>%
dplyr::select(dplyr::any_of(c(
"franchise_id",
"franchise_name",
"starter_status" = "position",
"player_id",
"player_name",
"pos",
"team",
"player_score"
)))
return(x)
}
|
/R/flea_starters.R
|
permissive
|
tonyelhabr/ffscrapr
|
R
| false
| false
| 2,151
|
r
|
#### Sleeper ff_starters ####
#' Get starters and bench
#'
#' @param conn the list object created by \code{ff_connect()}
#' @param week a numeric or numeric vector
#' @param ... other arguments (currently unused)
#'
#' @describeIn ff_starters Fleaflicker: returns who was started as well as what they scored.
#'
#' @examples
#' \donttest{
#' conn <- fleaflicker_connect(season = 2020, league_id = 206154)
#' ff_starters(conn)
#' }
#'
#' @export
ff_starters.flea_conn <- function(conn, week = 1:17, ...) {
starters <- ff_schedule(conn, week) %>%
dplyr::filter(!is.na(.data$result)) %>%
dplyr::distinct(.data$week, .data$game_id) %>%
dplyr::mutate(starters = purrr::map2(.data$week, .data$game_id, .flea_starters, conn)) %>%
tidyr::unnest("starters") %>%
dplyr::arrange(.data$week, .data$franchise_id)
}
.flea_starters <- function(week, game_id, conn) {
x <- fleaflicker_getendpoint("FetchLeagueBoxscore",
sport = "NFL",
scoring_period = week,
fantasy_game_id = game_id,
league_id = conn$league_id
) %>%
purrr::pluck("content", "lineups") %>%
list() %>%
tibble::tibble() %>%
tidyr::unnest_longer(1) %>%
tidyr::unnest_wider(1) %>%
tidyr::unnest_longer("slots") %>%
tidyr::unnest_wider("slots") %>%
dplyr::mutate(
position = purrr::map_chr(.data$position, purrr::pluck, "label"),
positionColor = NULL
) %>%
tidyr::pivot_longer(c("home", "away"), names_to = "franchise", values_to = "player") %>%
tidyr::hoist("player", "proPlayer", "owner", "points" = "viewingActualPoints") %>%
tidyr::hoist("proPlayer",
"player_id" = "id",
"player_name" = "nameFull",
"pos" = "position",
"team" = "proTeamAbbreviation"
) %>%
dplyr::filter(!is.na(.data$player_id)) %>%
tidyr::hoist("owner", "franchise_id" = "id", "franchise_name" = "name") %>%
tidyr::hoist("points", "player_score" = "value") %>%
dplyr::select(dplyr::any_of(c(
"franchise_id",
"franchise_name",
"starter_status" = "position",
"player_id",
"player_name",
"pos",
"team",
"player_score"
)))
return(x)
}
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 2.73584013950303e-312, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613116801-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 251
|
r
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170339e+295, 2.73584013950303e-312, 4.12396251261199e-221, 0), .Dim = c(5L, 1L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
#' Private function for catpuring the source code of model
#'
#' @param funcs functions to capture, defaults to required promote model functions
#' @param capture.model.require flag to capture the model.require function
#' @importFrom utils capture.output
capture.src <- function(funcs, capture.model.require=TRUE){
promote$model.require()
if(missing(funcs)){
funcs <- c("model.predict")
}
global.vars <- ls(.GlobalEnv)
src <- ""
if (capture.model.require==TRUE) {
src <- paste(capture.output(promote$model.require),collapse="\n")
}
for(func in funcs){
if(func %in% global.vars){
func.src <- paste(capture.output(.GlobalEnv[[func]]), collapse="\n")
func.src <- paste(func,"<-", func.src)
src <- paste(src, func.src,sep="\n\n")
}
}
src
}
#' Private function for recursively looking for variables
#'
#' @param block code block to spider
#' @param defined.vars variables which have already been defined within the
#' scope of the block. e.g. function argument
promote.spider.block <- function(block,defined.vars=c()){
# if block is a symbol, just return that symbol
if(typeof(block) == "symbol") {
return(c(block))
}
symbols <- c()
n <- length(block)
if(n == 0) {
return(symbols)
}
for(i in 1:n){
node <- block[[i]]
# Really weird bug that comes from assigning the "empty" symbol to a
# variable. No obvious way to test for this case other than a try/catch
is.valid.symbol <- tryCatch({
node
TRUE
}, error = function(e) {
FALSE
})
if(!is.valid.symbol){ next }
node.type <- typeof(node)
# if node type is "symbol" then it might be a variable
if(node.type == "symbol"){
# if symbol not already defined then it might be a dependency
if(!any(node == defined.vars)){
symbols <- c(symbols,node)
}
# if node type is "language" then it is another block we'll want to spider
} else if (node.type == "language"){
# is the block an assignment statement? if so we'll want to add the
# assignment result to the list of defined variables
if ((node[[1]] == as.symbol("<-")) || (node[[1]] == as.symbol("="))){
# Code will look like this:
# `assign.to` <- `assign.from`
assign.from <- node[[3]]
assign.from.type <- typeof(assign.from)
if (assign.from.type == "symbol"){
# if symbol not already defined then it might be a dependency
if (!any(assign.from == defined.vars)){
symbols <- c(symbols, assign.from)
}
} else if (assign.from.type == "language") {
symbols <- c(symbols, promote.spider.block(assign.from, defined.vars))
}
assign.to <- node[[2]]
assign.to.type <- typeof(assign.to)
if (assign.to.type == "symbol"){
# yay! the user has defined a variable
defined.vars <- c(assign.to,defined.vars)
} else if (assign.to.type == "language"){
# Wait, what?!?! are you assigning to a block of code?
symbols <- c(symbols,promote.spider.block(assign.to, defined.vars))
}
} else {
# if the block isn't an assignment, recursively crawl
symbols <- c(symbols,promote.spider.block(node,defined.vars))
}
}
}
# return a list of symbols which are candidates for global dependency
symbols
}
#' Private function for spidering function source code
#'
#' @param func.name name of function you want to spider
#' @importFrom utils getAnywhere
promote.spider.func <- function(func.name){
# parse function to pull out main block and argument names
func <- parse(text=getAnywhere(func.name))[[2]][[2]]
# we will be comparing symbols not strings
args <- lapply(names(func[[2]]),as.symbol)
block <- func[[3]]
# get all symbols used during function which are dependencies
func.vars <- unique(promote.spider.block(block,defined.vars=args))
# return dependency candidates which are defined in the global scope
# (these are all variables we'll want to capture)
intersect(func.vars,names(as.list(.GlobalEnv)))
}
#' Private function for determining model dependencies
#'
#' List all object names which are dependencies of and `model.predict`.
promote.ls <- function(){
funcs <- c("model.predict") # function queue to spider
global.vars <- ls(.GlobalEnv,all.names=T)
if (!("model.predict" %in% global.vars)){
err.msg <- "ERROR: You must define \"model.predict\" before deploying a model"
stop(err.msg)
}
dependencies <- funcs
while(length(funcs) > 0){
# pop first function from queue
func.name <- funcs[[1]]
n.funcs <- length(funcs)
if(n.funcs > 1){
funcs <- funcs[2:length(funcs)]
} else {
funcs <- c()
}
# spider a function and get all variable dependencies
func.vars <- promote.spider.func(func.name)
n.vars <- length(func.vars)
if(n.vars > 0){
for(i in 1:n.vars){
var <- func.vars[[i]]
# is variable already a dependency?
if(!(var %in% dependencies)){
dependencies <- c(var,dependencies)
# if this variable is a function we're going to
# want to spider it as well
if(typeof(.GlobalEnv[[var]]) == "closure"){
# add function to function queue
funcs <- c(var,funcs)
}
}
}
}
}
if("model.require" %in% global.vars){
stop("Warning: model.require is deprecated as of promoter 0.13.9 - please use promote.library to specify model dependencies")
}
dependencies
}
|
/R/env-capture.R
|
no_license
|
cran/promote
|
R
| false
| false
| 6,202
|
r
|
#' Private function for catpuring the source code of model
#'
#' @param funcs functions to capture, defaults to required promote model functions
#' @param capture.model.require flag to capture the model.require function
#' @importFrom utils capture.output
capture.src <- function(funcs, capture.model.require=TRUE){
promote$model.require()
if(missing(funcs)){
funcs <- c("model.predict")
}
global.vars <- ls(.GlobalEnv)
src <- ""
if (capture.model.require==TRUE) {
src <- paste(capture.output(promote$model.require),collapse="\n")
}
for(func in funcs){
if(func %in% global.vars){
func.src <- paste(capture.output(.GlobalEnv[[func]]), collapse="\n")
func.src <- paste(func,"<-", func.src)
src <- paste(src, func.src,sep="\n\n")
}
}
src
}
#' Private function for recursively looking for variables
#'
#' @param block code block to spider
#' @param defined.vars variables which have already been defined within the
#' scope of the block. e.g. function argument
promote.spider.block <- function(block,defined.vars=c()){
# if block is a symbol, just return that symbol
if(typeof(block) == "symbol") {
return(c(block))
}
symbols <- c()
n <- length(block)
if(n == 0) {
return(symbols)
}
for(i in 1:n){
node <- block[[i]]
# Really weird bug that comes from assigning the "empty" symbol to a
# variable. No obvious way to test for this case other than a try/catch
is.valid.symbol <- tryCatch({
node
TRUE
}, error = function(e) {
FALSE
})
if(!is.valid.symbol){ next }
node.type <- typeof(node)
# if node type is "symbol" then it might be a variable
if(node.type == "symbol"){
# if symbol not already defined then it might be a dependency
if(!any(node == defined.vars)){
symbols <- c(symbols,node)
}
# if node type is "language" then it is another block we'll want to spider
} else if (node.type == "language"){
# is the block an assignment statement? if so we'll want to add the
# assignment result to the list of defined variables
if ((node[[1]] == as.symbol("<-")) || (node[[1]] == as.symbol("="))){
# Code will look like this:
# `assign.to` <- `assign.from`
assign.from <- node[[3]]
assign.from.type <- typeof(assign.from)
if (assign.from.type == "symbol"){
# if symbol not already defined then it might be a dependency
if (!any(assign.from == defined.vars)){
symbols <- c(symbols, assign.from)
}
} else if (assign.from.type == "language") {
symbols <- c(symbols, promote.spider.block(assign.from, defined.vars))
}
assign.to <- node[[2]]
assign.to.type <- typeof(assign.to)
if (assign.to.type == "symbol"){
# yay! the user has defined a variable
defined.vars <- c(assign.to,defined.vars)
} else if (assign.to.type == "language"){
# Wait, what?!?! are you assigning to a block of code?
symbols <- c(symbols,promote.spider.block(assign.to, defined.vars))
}
} else {
# if the block isn't an assignment, recursively crawl
symbols <- c(symbols,promote.spider.block(node,defined.vars))
}
}
}
# return a list of symbols which are candidates for global dependency
symbols
}
#' Private function for spidering function source code
#'
#' @param func.name name of function you want to spider
#' @importFrom utils getAnywhere
promote.spider.func <- function(func.name){
# parse function to pull out main block and argument names
func <- parse(text=getAnywhere(func.name))[[2]][[2]]
# we will be comparing symbols not strings
args <- lapply(names(func[[2]]),as.symbol)
block <- func[[3]]
# get all symbols used during function which are dependencies
func.vars <- unique(promote.spider.block(block,defined.vars=args))
# return dependency candidates which are defined in the global scope
# (these are all variables we'll want to capture)
intersect(func.vars,names(as.list(.GlobalEnv)))
}
#' Private function for determining model dependencies
#'
#' List all object names which are dependencies of and `model.predict`.
promote.ls <- function(){
funcs <- c("model.predict") # function queue to spider
global.vars <- ls(.GlobalEnv,all.names=T)
if (!("model.predict" %in% global.vars)){
err.msg <- "ERROR: You must define \"model.predict\" before deploying a model"
stop(err.msg)
}
dependencies <- funcs
while(length(funcs) > 0){
# pop first function from queue
func.name <- funcs[[1]]
n.funcs <- length(funcs)
if(n.funcs > 1){
funcs <- funcs[2:length(funcs)]
} else {
funcs <- c()
}
# spider a function and get all variable dependencies
func.vars <- promote.spider.func(func.name)
n.vars <- length(func.vars)
if(n.vars > 0){
for(i in 1:n.vars){
var <- func.vars[[i]]
# is variable already a dependency?
if(!(var %in% dependencies)){
dependencies <- c(var,dependencies)
# if this variable is a function we're going to
# want to spider it as well
if(typeof(.GlobalEnv[[var]]) == "closure"){
# add function to function queue
funcs <- c(var,funcs)
}
}
}
}
}
if("model.require" %in% global.vars){
stop("Warning: model.require is deprecated as of promoter 0.13.9 - please use promote.library to specify model dependencies")
}
dependencies
}
|
# Course: Coursera Data Scientist - Capstone Project
# Author: Larry Riggen
# Creation Date: 2018-01-23
# Purpose: Provide the shiny server functions for the next word prediction app
suppressPackageStartupMessages(c(
library(shinythemes),
library(shiny),
library(tm),
library(stringr),
library(markdown),
library(stylo)))
source("./inputCleaner.R")
finalbigram <- readRDS(file="./data/finalbigram.RData")
finaltrigram <- readRDS(file="./data/finaltrigram.RData")
finalquadgram <- readRDS(file="./data/finalquadgram.RData")
shinyServer(function(input, output) {
predictedWord <- reactive({
text <- input$text
textInput <- cleanInput(text)
wordCount <- length(textInput)
predictedWord <- nextpredictedWord(wordCount,textInput)})
output$predictedWord <- renderPrint(predictedWord())
output$inputText <- renderText({ input$text }, quoted = FALSE)
})
|
/Shiny/server.R
|
no_license
|
ldriggen/Data-Scientist-Capstone-Project-Coursera
|
R
| false
| false
| 1,003
|
r
|
# Course: Coursera Data Scientist - Capstone Project
# Author: Larry Riggen
# Creation Date: 2018-01-23
# Purpose: Provide the shiny server functions for the next word prediction app
suppressPackageStartupMessages(c(
library(shinythemes),
library(shiny),
library(tm),
library(stringr),
library(markdown),
library(stylo)))
source("./inputCleaner.R")
finalbigram <- readRDS(file="./data/finalbigram.RData")
finaltrigram <- readRDS(file="./data/finaltrigram.RData")
finalquadgram <- readRDS(file="./data/finalquadgram.RData")
shinyServer(function(input, output) {
predictedWord <- reactive({
text <- input$text
textInput <- cleanInput(text)
wordCount <- length(textInput)
predictedWord <- nextpredictedWord(wordCount,textInput)})
output$predictedWord <- renderPrint(predictedWord())
output$inputText <- renderText({ input$text }, quoted = FALSE)
})
|
# 2016-08-05
# Jake Yeung
# compare_hogenesch_tissue_wide_in_livkid_wtko.R
rm(list=ls())
setwd("/home/yeung/projects/tissue-specificity")
source("scripts/functions/PlotFunctions.R")
source("scripts/functions/PlotGeneAcrossTissues.R")
source("scripts/functions/NcondsFunctions.R")
source("scripts/functions/SvdFunctions.R")
source("scripts/functions/LoadActivitiesLong.R")
source("scripts/functions/LiverKidneyFunctions.R")
source("scripts/functions/PlotActivitiesFunctions.R")
source("scripts/functions/FourierFunctions.R")
source("scripts/functions/GetTFs.R")
source("scripts/functions/LdaFunctions.R")
source("scripts/functions/HandleMotifNames.R")
source("scripts/functions/RemoveP2Name.R")
source("scripts/functions/GetTopMotifs.R")
source("scripts/functions/NcondsAnalysisFunctions.R")
source("scripts/functions/ModelStrToModel.R")
source("scripts/functions/ProteomicsFunctions.R")
# Load hogenesch data -----------------------------------------------------
load("Robjs/liver_kidney_atger_nestle/fits.long.multimethod.filtbest.staggeredtimepts.bugfixed.annotated.Robj", v=T)
load("Robjs/nconds_g1000_11_tissues/fits_long.11_tiss_3_max.g1000.bestmodel.filteramp.0.15.Robj", v=T)
fits.long.filt <- subset(fits.long.filt, method == "g=1001")
fits.long.filt$n.params <- sapply(fits.long.filt$model, function(m) return(length(strsplit(as.character(m), ";")[[1]])))
fits.long.filt$n.rhyth <- sapply(fits.long.filt$model, GetNrhythFromModel)
# Get tissue wide genes ---------------------------------------------------
genes.tw <- as.character(subset(fits.long, n.rhyth >= 8)$gene)
fits.hog <- subset(fits.long.filt, gene %in% genes.tw)
# Count models ------------------------------------------------------------
fits.sum <- fits.hog %>%
group_by(model) %>%
summarise(count = length(gene)) %>%
arrange(desc(count))
fits.sum <- OrderDecreasing(fits.sum, jfactor = "model", jval = "count")
ggplot(fits.sum, aes(x = model, y = count)) + geom_bar(stat = "identity")
|
/scripts/liver_kidney_WTKO/compare_hogenesch_tissue_wide_in_liverkid_wtko.R
|
no_license
|
jakeyeung/Yeung_et_al_2018_TissueSpecificity
|
R
| false
| false
| 1,978
|
r
|
# 2016-08-05
# Jake Yeung
# compare_hogenesch_tissue_wide_in_livkid_wtko.R
rm(list=ls())
setwd("/home/yeung/projects/tissue-specificity")
source("scripts/functions/PlotFunctions.R")
source("scripts/functions/PlotGeneAcrossTissues.R")
source("scripts/functions/NcondsFunctions.R")
source("scripts/functions/SvdFunctions.R")
source("scripts/functions/LoadActivitiesLong.R")
source("scripts/functions/LiverKidneyFunctions.R")
source("scripts/functions/PlotActivitiesFunctions.R")
source("scripts/functions/FourierFunctions.R")
source("scripts/functions/GetTFs.R")
source("scripts/functions/LdaFunctions.R")
source("scripts/functions/HandleMotifNames.R")
source("scripts/functions/RemoveP2Name.R")
source("scripts/functions/GetTopMotifs.R")
source("scripts/functions/NcondsAnalysisFunctions.R")
source("scripts/functions/ModelStrToModel.R")
source("scripts/functions/ProteomicsFunctions.R")
# Load hogenesch data -----------------------------------------------------
load("Robjs/liver_kidney_atger_nestle/fits.long.multimethod.filtbest.staggeredtimepts.bugfixed.annotated.Robj", v=T)
load("Robjs/nconds_g1000_11_tissues/fits_long.11_tiss_3_max.g1000.bestmodel.filteramp.0.15.Robj", v=T)
fits.long.filt <- subset(fits.long.filt, method == "g=1001")
fits.long.filt$n.params <- sapply(fits.long.filt$model, function(m) return(length(strsplit(as.character(m), ";")[[1]])))
fits.long.filt$n.rhyth <- sapply(fits.long.filt$model, GetNrhythFromModel)
# Get tissue wide genes ---------------------------------------------------
genes.tw <- as.character(subset(fits.long, n.rhyth >= 8)$gene)
fits.hog <- subset(fits.long.filt, gene %in% genes.tw)
# Count models ------------------------------------------------------------
fits.sum <- fits.hog %>%
group_by(model) %>%
summarise(count = length(gene)) %>%
arrange(desc(count))
fits.sum <- OrderDecreasing(fits.sum, jfactor = "model", jval = "count")
ggplot(fits.sum, aes(x = model, y = count)) + geom_bar(stat = "identity")
|
#' Plot x3p object as an image
#'
#' @param x3p x3p object
#' @param file file name for saving, if file is NULL the opengl device stays open.
#' The file extension determines the type of output. Possible extensions are png, stl (suitable for 3d printing), or svg.
#' @param col color specification
#' @param crosscut crosscut index
#' @param ccParam list with named components, consisting of parameters for showing crosscuts: color and radius for crosscut region
#' @param size vector of width and height
#' @param zoom numeric value indicating the amount of zoom
#' @param multiply exaggerate the relief by factor multiply
#' @param ... not used
#' @export
#' @import rgl
#' @importFrom rgl snapshot3d r3dDefaults
#' @examples
#' \dontrun{
#' logo <- read_x3p(system.file("csafe-logo.x3p", package="x3ptools"))
#' image_x3p(logo, file = "logo.png", crosscut = 50*.645e-6)
#' # alternative to crosscut
#' logoplus <- x3p_add_hline(logo, yintercept = 50*.645e-6, color = "#e6bf98", size = 5)
#' image_x3p(logoplus, size = c(741, 419), zoom=0.5)
#' }
image_x3p <- function(x3p, file = NULL, col = "#cd7f32",
crosscut = NA,
ccParam = list(color = "#e6bf98",
radius = 5),
size = c(750, 250), zoom = 0.35, multiply = 5, ...) {
stopifnot("x3p" %in% class(x3p))
surface <- x3p$surface.matrix
z <- multiply * surface # Exaggerate the relief
yidx <- ncol(z):1
y <- x3p$header.info$incrementY * yidx #
x <- x3p$header.info$incrementX * (1:nrow(z)) #
params <- rgl::r3dDefaults
# params$viewport <- c(0,0, 750, 250)
#
params$windowRect <- c(40, 125, 40 + size[1], 125 + size[2])
params$userMatrix <- diag(c(1, 1, 1, 1))
params$zoom <- zoom
open3d(params = params)
rgl.pop("lights")
# xyz <- matrix(c(-2000, mean(y), max(z, na.rm=TRUE)), ncol = 3)
xyz <- matrix(c(
min(y) - diff(range(y)),
mean(y), max(z, na.rm = TRUE)
), ncol = 3)
light3d(
x = xyz, diffuse = "gray40",
specular = "gray40", ambient = "grey10", viewpoint.rel = TRUE
)
light3d(diffuse = "gray20", specular = "gray20")
if (!is.na(crosscut)) {
.Deprecated("x3p_add_hline", msg = "Use of crosscut is deprecated. Use x3p_add_hline instead.")
crosscutidx <- which.min(abs(crosscut - y))
colmat <- matrix(rep(col, length(z)), nrow = nrow(z), ncol = ncol(z))
if (exists("mask", x3p)) colmat <- as.vector(x3p$mask)
if (length(crosscutidx) > 0) {
coloridx <- pmax(crosscutidx - ccParam$radius, 0):pmin(crosscutidx + ccParam$radius, ncol(z))
colmat[, coloridx] <- ccParam$color
} else {
warning(sprintf("Crosscut does not map to x3p file correctly. Crosscut is at %f, scan has height of %f", crosscut, max(y)))
}
if (crosscut > max(y))
warning(sprintf("Crosscut does not map to x3p file correctly. Crosscut is at %f, scan has height of %f", crosscut, max(y)))
surface3d(x, y, z, color = colmat, back = "fill")
} else {
if (exists("mask", x3p)) col <- as.vector(x3p$mask)
surface3d(x, y, z, color = col, back = "fill")
}
if (!is.null(file)) {
x3p_snapshot(file)
rgl.close()
}
}
#' Take a snapshot of the current rgl file
#'
#' Make a snapshot of the current rgl device and save it to file. Options for file formats are png, svg, and stl (for 3d printing).
#' @param file file name for saving.
#' The file extension determines the type of output. Possible extensions are png, stl (suitable for 3d printing), or svg.
#' @export
x3p_snapshot <- function(file) {
if (!is.null(file)) {
splits <- strsplit(file, split = "\\.")
extension <- splits[[1]][length(splits[[1]])]
if (extension == "png") {
rgl.snapshot(filename = file, top=TRUE)
}
if (extension == "svg") {
rgl.postscript(filename = file, fmt = "svg")
}
if (extension == "stl") {
writeSTL(con = file)
}
}
}
|
/R/image_x3p.R
|
no_license
|
sctyner/x3ptools
|
R
| false
| false
| 3,939
|
r
|
#' Plot x3p object as an image
#'
#' @param x3p x3p object
#' @param file file name for saving, if file is NULL the opengl device stays open.
#' The file extension determines the type of output. Possible extensions are png, stl (suitable for 3d printing), or svg.
#' @param col color specification
#' @param crosscut crosscut index
#' @param ccParam list with named components, consisting of parameters for showing crosscuts: color and radius for crosscut region
#' @param size vector of width and height
#' @param zoom numeric value indicating the amount of zoom
#' @param multiply exaggerate the relief by factor multiply
#' @param ... not used
#' @export
#' @import rgl
#' @importFrom rgl snapshot3d r3dDefaults
#' @examples
#' \dontrun{
#' logo <- read_x3p(system.file("csafe-logo.x3p", package="x3ptools"))
#' image_x3p(logo, file = "logo.png", crosscut = 50*.645e-6)
#' # alternative to crosscut
#' logoplus <- x3p_add_hline(logo, yintercept = 50*.645e-6, color = "#e6bf98", size = 5)
#' image_x3p(logoplus, size = c(741, 419), zoom=0.5)
#' }
image_x3p <- function(x3p, file = NULL, col = "#cd7f32",
crosscut = NA,
ccParam = list(color = "#e6bf98",
radius = 5),
size = c(750, 250), zoom = 0.35, multiply = 5, ...) {
stopifnot("x3p" %in% class(x3p))
surface <- x3p$surface.matrix
z <- multiply * surface # Exaggerate the relief
yidx <- ncol(z):1
y <- x3p$header.info$incrementY * yidx #
x <- x3p$header.info$incrementX * (1:nrow(z)) #
params <- rgl::r3dDefaults
# params$viewport <- c(0,0, 750, 250)
#
params$windowRect <- c(40, 125, 40 + size[1], 125 + size[2])
params$userMatrix <- diag(c(1, 1, 1, 1))
params$zoom <- zoom
open3d(params = params)
rgl.pop("lights")
# xyz <- matrix(c(-2000, mean(y), max(z, na.rm=TRUE)), ncol = 3)
xyz <- matrix(c(
min(y) - diff(range(y)),
mean(y), max(z, na.rm = TRUE)
), ncol = 3)
light3d(
x = xyz, diffuse = "gray40",
specular = "gray40", ambient = "grey10", viewpoint.rel = TRUE
)
light3d(diffuse = "gray20", specular = "gray20")
if (!is.na(crosscut)) {
.Deprecated("x3p_add_hline", msg = "Use of crosscut is deprecated. Use x3p_add_hline instead.")
crosscutidx <- which.min(abs(crosscut - y))
colmat <- matrix(rep(col, length(z)), nrow = nrow(z), ncol = ncol(z))
if (exists("mask", x3p)) colmat <- as.vector(x3p$mask)
if (length(crosscutidx) > 0) {
coloridx <- pmax(crosscutidx - ccParam$radius, 0):pmin(crosscutidx + ccParam$radius, ncol(z))
colmat[, coloridx] <- ccParam$color
} else {
warning(sprintf("Crosscut does not map to x3p file correctly. Crosscut is at %f, scan has height of %f", crosscut, max(y)))
}
if (crosscut > max(y))
warning(sprintf("Crosscut does not map to x3p file correctly. Crosscut is at %f, scan has height of %f", crosscut, max(y)))
surface3d(x, y, z, color = colmat, back = "fill")
} else {
if (exists("mask", x3p)) col <- as.vector(x3p$mask)
surface3d(x, y, z, color = col, back = "fill")
}
if (!is.null(file)) {
x3p_snapshot(file)
rgl.close()
}
}
#' Take a snapshot of the current rgl file
#'
#' Make a snapshot of the current rgl device and save it to file. Options for file formats are png, svg, and stl (for 3d printing).
#' @param file file name for saving.
#' The file extension determines the type of output. Possible extensions are png, stl (suitable for 3d printing), or svg.
#' @export
x3p_snapshot <- function(file) {
if (!is.null(file)) {
splits <- strsplit(file, split = "\\.")
extension <- splits[[1]][length(splits[[1]])]
if (extension == "png") {
rgl.snapshot(filename = file, top=TRUE)
}
if (extension == "svg") {
rgl.postscript(filename = file, fmt = "svg")
}
if (extension == "stl") {
writeSTL(con = file)
}
}
}
|
library(OpenMx)
source("modelUtil.R")
rcd <- read.csv(paste0('.',"/rawData.csv"), stringsAsFactors=FALSE)
whitelist <- getWhiteList(rcd)
rcd <- rcd[rcd$pa1 %in% whitelist & rcd$pa2 %in% whitelist,]
demogr <- read.csv(paste0('./', 'demogr.csv'), stringsAsFactors = FALSE,na.strings='')
mask <- with(demogr, paste(source, recno, sep=':')) %in% rcd$recno
demogr <- demogr[mask,]
SexItem <- c("Female", "Male")
demogr[['sex']] <- mxFactor(demogr[['sex']], levels=SexItem, labels=tolower(SexItem))
demogr[['age']] <- demogr[['recDate']] - demogr[['birthyear']]
capwords <- function(s, strict = FALSE) {
cap <- function(s) {
paste(toupper(substring(s, 1, 1)),
{s <- substring(s, 2); if(strict) tolower(s) else s},
sep = "", collapse = " " )
}
sapply(strsplit(s, split = " "), cap, USE.NAMES = !is.null(names(s)))
}
ctbl <- table(demogr[['country']])
demogr[demogr[['country']] %in% names(ctbl)[ctbl<=3], 'country'] <- 'other'
ctbl <- table(demogr[['country']])
cname <- capwords(names(ctbl))
cname[cname=='Usa'] <- 'USA'
ctbl <- ctbl[-which(names(ctbl)=='other')]
cname <- cname[-which(cname=='Other')]
demogr[['country']] <- mxFactor(demogr[['country']], levels=c(names(ctbl),'other'), labels=c(cname, 'other'))
EduItem = c('Less than high school degree',
'High school degree or equivalent (e.g., GED)',
'Some college but no degree',
'Associate degree',
'Bachelor degree',
'Graduate degree')
demogr[['education']] <- mxFactor(demogr[['edu']], levels = EduItem,
labels=tolower(EduItem), exclude = '')
demogr[['edu']] <- NULL
demogr[['source']] <- mxFactor(demogr[['source']], labels=c('public','MTurk'),
levels=c('public','mturk'),
exclude='')
demogr$channel <- demogr$source # alternate name
save(demogr, file="demogr.rda")
|
/rcpa/prepDemogr.R
|
permissive
|
jpritikin/ties
|
R
| false
| false
| 1,874
|
r
|
library(OpenMx)
source("modelUtil.R")
rcd <- read.csv(paste0('.',"/rawData.csv"), stringsAsFactors=FALSE)
whitelist <- getWhiteList(rcd)
rcd <- rcd[rcd$pa1 %in% whitelist & rcd$pa2 %in% whitelist,]
demogr <- read.csv(paste0('./', 'demogr.csv'), stringsAsFactors = FALSE,na.strings='')
mask <- with(demogr, paste(source, recno, sep=':')) %in% rcd$recno
demogr <- demogr[mask,]
SexItem <- c("Female", "Male")
demogr[['sex']] <- mxFactor(demogr[['sex']], levels=SexItem, labels=tolower(SexItem))
demogr[['age']] <- demogr[['recDate']] - demogr[['birthyear']]
capwords <- function(s, strict = FALSE) {
cap <- function(s) {
paste(toupper(substring(s, 1, 1)),
{s <- substring(s, 2); if(strict) tolower(s) else s},
sep = "", collapse = " " )
}
sapply(strsplit(s, split = " "), cap, USE.NAMES = !is.null(names(s)))
}
ctbl <- table(demogr[['country']])
demogr[demogr[['country']] %in% names(ctbl)[ctbl<=3], 'country'] <- 'other'
ctbl <- table(demogr[['country']])
cname <- capwords(names(ctbl))
cname[cname=='Usa'] <- 'USA'
ctbl <- ctbl[-which(names(ctbl)=='other')]
cname <- cname[-which(cname=='Other')]
demogr[['country']] <- mxFactor(demogr[['country']], levels=c(names(ctbl),'other'), labels=c(cname, 'other'))
EduItem = c('Less than high school degree',
'High school degree or equivalent (e.g., GED)',
'Some college but no degree',
'Associate degree',
'Bachelor degree',
'Graduate degree')
demogr[['education']] <- mxFactor(demogr[['edu']], levels = EduItem,
labels=tolower(EduItem), exclude = '')
demogr[['edu']] <- NULL
demogr[['source']] <- mxFactor(demogr[['source']], labels=c('public','MTurk'),
levels=c('public','mturk'),
exclude='')
demogr$channel <- demogr$source # alternate name
save(demogr, file="demogr.rda")
|
# Important note!!!
# Data file contains lines with "?" signs instead of values, which forces R to see all columns as factors
# They are now interpreted as lost ones. While my solution is not good niether universal, it works here
data = read.csv('household_power_consumption.txt', sep = ";", na.strings = "?")
data = subset (data, Date == "1/2/2007" | Date == "2/2/2007")
png(file = "plot2.png")
with (data, plot(Global_active_power, type = "l", ylab = "Global active power (kilowatts)", xlab = NA, xaxt = "n"))
axis (2, lwd = 2)
axis(1, c(1,length(data$Global_active_power)/2,length(data$Global_active_power)), c("Thu","Fri", "Sat"))
dev.off()
|
/plot2.R
|
no_license
|
grigorovich-sergey/ExData_Plotting1
|
R
| false
| false
| 654
|
r
|
# Important note!!!
# Data file contains lines with "?" signs instead of values, which forces R to see all columns as factors
# They are now interpreted as lost ones. While my solution is not good niether universal, it works here
data = read.csv('household_power_consumption.txt', sep = ";", na.strings = "?")
data = subset (data, Date == "1/2/2007" | Date == "2/2/2007")
png(file = "plot2.png")
with (data, plot(Global_active_power, type = "l", ylab = "Global active power (kilowatts)", xlab = NA, xaxt = "n"))
axis (2, lwd = 2)
axis(1, c(1,length(data$Global_active_power)/2,length(data$Global_active_power)), c("Thu","Fri", "Sat"))
dev.off()
|
pre = 3
opt = 'acc26'
pre = 2
opt = 'acc56'
pre = 0
opt = 'acc84'
accs = get_mt_ids(opt)
dirI = sprintf("%s/mt_35/31_phylogeny/%02d/08_stat", DIR_Repo, pre)
cutoff_missing = length(accs) * 0.3
intervals = seq(0,0.5,0.05)
chr = 5
fi = file.path(dirI, paste("chr", chr, ".tbl", sep=''))
s01 = read.table(fi, header=T, sep="\t", as.is=T)
s02 = cbind(s01, freq_der = s01$n_der / (s01$n_anc+s01$n_der))
s03 = s02[s02$n_states == 2 & s02$n_N < cutoff_missing,]
t01 = table(cut(s03$freq_der, breaks=intervals))
df = data.frame(bin=names(t01), count=as.numeric(t01))
p = ggplot(df) +
geom_bar(aes(x=bin, y=count, fill='all', width=0.6), stat='identity', position='dodge') +
scale_fill_brewer(palette='Set3') +
scale_x_discrete(name="Minor Alelle Frequency") +
scale_y_continuous(name="Number SNPs", formatter="comma") +
opts(title=paste("MAF distribution of chr", chr, " SNPs", sep=""), axis.text.x = theme_text(angle=45, size=8))
fo = file.path(dirI, paste("chr", chr, "_sfs.png", sep=""))
ggsave(fo, p, width=5, height=4)
|
/r/sfs.R
|
no_license
|
rakeshponnala/luffy
|
R
| false
| false
| 1,037
|
r
|
pre = 3
opt = 'acc26'
pre = 2
opt = 'acc56'
pre = 0
opt = 'acc84'
accs = get_mt_ids(opt)
dirI = sprintf("%s/mt_35/31_phylogeny/%02d/08_stat", DIR_Repo, pre)
cutoff_missing = length(accs) * 0.3
intervals = seq(0,0.5,0.05)
chr = 5
fi = file.path(dirI, paste("chr", chr, ".tbl", sep=''))
s01 = read.table(fi, header=T, sep="\t", as.is=T)
s02 = cbind(s01, freq_der = s01$n_der / (s01$n_anc+s01$n_der))
s03 = s02[s02$n_states == 2 & s02$n_N < cutoff_missing,]
t01 = table(cut(s03$freq_der, breaks=intervals))
df = data.frame(bin=names(t01), count=as.numeric(t01))
p = ggplot(df) +
geom_bar(aes(x=bin, y=count, fill='all', width=0.6), stat='identity', position='dodge') +
scale_fill_brewer(palette='Set3') +
scale_x_discrete(name="Minor Alelle Frequency") +
scale_y_continuous(name="Number SNPs", formatter="comma") +
opts(title=paste("MAF distribution of chr", chr, " SNPs", sep=""), axis.text.x = theme_text(angle=45, size=8))
fo = file.path(dirI, paste("chr", chr, "_sfs.png", sep=""))
ggsave(fo, p, width=5, height=4)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SSURGO_FUNCTIONS.R
\name{get_ssurgo_inventory}
\alias{get_ssurgo_inventory}
\title{Download and crop a shapefile of the SSURGO study areas.}
\usage{
get_ssurgo_inventory(template = NULL, raw.dir)
}
\arguments{
\item{template}{A Raster* or Spatial* object to serve
as a template for cropping.}
\item{raw.dir}{A character string indicating where raw downloaded files should be put.
The directory will be created if missing.}
}
\value{
A \code{SpatialPolygonsDataFrame} of the SSURGO study areas within
the specified \code{template}.
}
\description{
\code{get_ssurgo_inventory} returns a \code{SpatialPolygonsDataFrame} of the SSURGO study areas within
the specified \code{template}. If template is not provided, returns the entire SSURGO inventory of study areas.
}
\keyword{internal}
|
/man/get_ssurgo_inventory.Rd
|
permissive
|
Ashkenazic/FedData
|
R
| false
| true
| 863
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SSURGO_FUNCTIONS.R
\name{get_ssurgo_inventory}
\alias{get_ssurgo_inventory}
\title{Download and crop a shapefile of the SSURGO study areas.}
\usage{
get_ssurgo_inventory(template = NULL, raw.dir)
}
\arguments{
\item{template}{A Raster* or Spatial* object to serve
as a template for cropping.}
\item{raw.dir}{A character string indicating where raw downloaded files should be put.
The directory will be created if missing.}
}
\value{
A \code{SpatialPolygonsDataFrame} of the SSURGO study areas within
the specified \code{template}.
}
\description{
\code{get_ssurgo_inventory} returns a \code{SpatialPolygonsDataFrame} of the SSURGO study areas within
the specified \code{template}. If template is not provided, returns the entire SSURGO inventory of study areas.
}
\keyword{internal}
|
# server.R
library(dplyr)
library(ggplot2) # for getting midwest dataset
library(httr)
library(jsonlite)
library(leaflet)
library(RColorBrewer)
library(reshape2)
library(DT)
library(lubridate)
##########################Code for generating the joint data ##########################
joined_data <- read.csv("data/joined_data.csv", stringsAsFactors = F)
joined_data <- joined_data %>%
mutate(full_address = paste(address, city, County, State)) %>%
group_by(name) %>%
mutate(count = n()) %>%
arrange(-count) %>%
ungroup()
restaurant_chain_list <- joined_data %>%
group_by(name) %>%
summarize(count=n()) %>%
arrange(-count, name)
restaurant_choices <- c("All", restaurant_chain_list %>%
select(name) %>%
.[[1]])
restaurant_location_data <- joined_data %>% select(longitude, latitude, postalCode, name, address, city, County, State) %>%
mutate(full_address = paste(address, city, County, State, postalCode, sep=", "))
top_five_count <- 0
for (i in 2:6) {
top_five_count <- top_five_count + joined_data %>% filter(name == restaurant_choices[i]) %>% nrow()
}
restaurantSelections <- joined_data %>%
select(name) %>%
unique()
stateSelection <- joined_data %>%
select(State, name) %>%
select(State) %>%
unique()
restaurantByState <- joined_data %>%
select(State, name) %>%
filter
# Start shinyServer
shinyServer(function(input, output, session) {
observe({
selected_chain <- input$restaurant_chain
if (selected_chain == "") {
selected_chain = "All"
}
selected_data <- joined_data
if (selected_chain != "All") {
selected_data <- selected_data %>% filter(name == selected_chain)
}
colorData <- selected_data$name
pal <- colorFactor(if_else(selected_chain != "All", "#33007b", "viridis"), colorData, ordered = F, na.color = 'Purple')
leafletProxy("map", data = selected_data) %>%
clearShapes() %>%
addCircles(~longitude, ~latitude, radius=30000, layerId=~postalCode,
stroke=FALSE, fillOpacity=0.4, fillColor=pal(colorData), label=~paste(name, full_address, sep=", ")) %>%
addLegend("bottomleft", pal=pal, values=head(colorData, top_five_count), title="Restaurant Chain (top 5 unordered)",
layerId="colorLegend")
})
observe({
updateSelectInput(
session,
"restaurant_chain",
choices = restaurant_choices
)
})
output$map <- renderLeaflet({
colorData <- joined_data$name
pal <- colorFactor("viridis", colorData, ordered = F)
leaflet(data = joined_data) %>%
addTiles(
urlTemplate = "//{s}.tiles.mapbox.com/v3/jcheng.map-5ebohr46/{z}/{x}/{y}.png",
attribution = 'Maps by <a href="http://www.mapbox.com/">Mapbox</a>'
) %>%
setView(lng = -93.85, lat = 37.45, zoom = 4) %>%
addCircles(~longitude, ~latitude, radius=20000, layerId=~postalCode,
stroke=FALSE, fillOpacity=0.4, fillColor=pal(colorData), label=~paste(name, full_address, sep=", ")) %>%
addLegend("bottomleft", pal=pal, values=head(colorData, top_five_count), title="Restaurant Chain (top 5 unordered)",
layerId="colorLegend")
})
output$top_restaurant_list <- renderTable({
chain_name <- input$restaurant_chain
result <- restaurant_chain_list
if (chain_name != "All") {
result <- result %>% filter(name == chain_name)
}
result %>% head(5)
}, rownames = T, striped = T, hover = T, width = "100%", align = "l")
output$homepage <- renderUI({
HTML(markdown::markdownToHTML(file = "README.md"))
})
output$addresses <- renderTable({
filtered_data <- restaurant_location_data
if (input$address_filter != "") {
filtered_data <- filtered_data %>% filter(
grepl(input$address_filter, full_address, ignore.case = T) | grepl(input$address_filter, name, ignore.case = T))
}
if (input$restaurant_chain != "All") {
filtered_data <- filtered_data %>% filter(name == input$restaurant_chain)
}
filtered_data %>% select(name, full_address) %>% head(100)
}, striped = T, width = "100%", align = "l")
observe({
filtered_data <- restaurant_location_data
if (input$address_filter != "") {
filtered_data <- filtered_data %>% filter(
grepl(input$address_filter, full_address, ignore.case = T) | grepl(input$address_filter, name, ignore.case = T))
}
if (input$restaurant_chain != "All") {
filtered_data <- filtered_data %>% filter(name == input$restaurant_chain)
}
if (nrow(filtered_data) <= 10) {
leafletProxy("map", data = filtered_data) %>%
clearMarkers() %>%
addMarkers(~longitude, ~latitude, layerId=~postalCode, popup=~paste(paste0("<b>",name,"</b>"), full_address, sep="<br/>"))
} else {
leafletProxy("map") %>% clearMarkers()
}
})
observe({
updateSelectInput(session,
"stateChoice",
choices = stateSelection)
})
observe({
stateChoose <- input$stateChoice
filtered_state <- joined_data %>%
filter(State == stateChoose) %>%
select(County)
updateSelectInput(session,
"countyChoice",
choices = filtered_state)
})
output$distribPie <- renderPlotly({
distribData <- joined_data %>%
select(State, County, name) %>%
filter(State == input$stateChoice) %>%
count(name) %>%
mutate(ttl = sum(n)) %>%
filter(n > 2)
plot <- plot_ly(distribData,
labels = ~name,
values = ~n,
width = 570,
height = 550,
type = "pie",
textposition = 'inside',
textinfo = 'label+percent',
insidetextfont = list(color = '#FFFFFF'),
marker = list(colors = colors,
line = list(color = '#FFFFFF', width = 1)),
showlegend = F) %>%
layout(title = 'Fast Food distribution in USA by State')
plot
})
output$feedback <- renderText({
paste("You have selected <b>", input$countyChoice,
"</b> county located in the state of <b>",
input$stateChoice, "</b>.")
})
output$data <- renderText({
used <- joined_data %>%
filter(State == input$stateChoice, County == input$countyChoice) %>%
select(pct_obese_14, pct_diabetes_14, poverty_rate, count_change_pct, count_per_10k_pop_14) %>%
unique()
paste("Change in fast food chain count in 5 years: <b>", used$count_change_pct,
"%</b><br> restaurants per 10k people in 2014: <b>", used$count_per_10k_pop_14,
"</b><br> Poverty rate: <b>", used$poverty_rate, "</b>.")
})
output$racePie <- renderPlotly({
race <- joined_data %>%
filter(State == input$stateChoice, County == input$countyChoice) %>%
select(County, pct_white:pct_other) %>%
unique()
colnames(race) <- c("County", "Caucasian", "African American", "Hispanic", "Asian", "Other")
df <- melt(race, "County")
plot <- plot_ly(df, labels = ~variable,
values = ~value,
width = 570,
height = 550,
type = "pie",
textposition = 'inside',
textinfo = 'label+percent',
insidetextfont = list(color = '#FFFFFF'),
marker = list(colors = colors,
line = list(color = '#FFFFFF', width = 1)),
showlegend = F) %>%
layout(title = 'Race Distribution by County')
plot
})
output$chngPlot <- renderPlot({
filtersd <- joined_data %>%
filter(State == input$stateChoice, County == input$countyChoice) %>%
select(County, pct_obese_09, pct_obese_14, pct_diabetes_09, pct_diabetes_14) %>%
unique()
colnames(filtersd) <- c("County", "% Obese in 2009", "% Obese in 2014", "% Diabetic in 2009", "% Diabetic in 2014")
df <- melt(filtersd, "County")
p <- ggplot(data = df,
mapping = aes(x = variable, y = value, fill = variable)) +
geom_bar(stat = "identity") +
labs(title = "Changes in Obesity and Diabetic Within a 5 Year Time Frame by County",
x = "", y = "percentage") +
theme(legend.position = "none", plot.title = element_text(hjust = 0.5, size = 20, face = "bold"), axis.text = element_text(size = 15, face = "bold"))
p
})
output$CountyInfo <- renderDataTable({
county_impacted <- joined_data %>%
group_by(County) %>%
filter(pct_obese_14 == max(pct_obese_14)) %>%
arrange(- pct_obese_14) %>%
head(100) %>%
select(State, County, poverty_rate, count_change_pct, pct_obese_09, pct_obese_14) %>%
unique()
colnames(county_impacted) <- c("State", "County", "Poverty Rates (%)", "Count Change in 5 yrs (%)", "obese in 2009 (%)", "Obese in 2014 (%)")
datatable(county_impacted, options = list(pageLength = 10, scrollX = TRUE, scrollY = '450px')) %>% formatStyle(names(county_impacted))
})
output$Health_plot <- renderPlot({
p <- ggplot(data = joined_data, mapping = aes_string(x = input$countyChoice, y = input$pct_obese_14)) +
geom_point()
})
output$QA <- renderUI({
HTML(markdown::markdownToHTML(file = "README1.md"))
})
output$ContactInformation <- renderUI({
HTML(markdown::markdownToHTML(file = "contactinfo.md"))
})
})
|
/server.R
|
permissive
|
annajun1224/info201b-final-project
|
R
| false
| false
| 9,565
|
r
|
# server.R
library(dplyr)
library(ggplot2) # for getting midwest dataset
library(httr)
library(jsonlite)
library(leaflet)
library(RColorBrewer)
library(reshape2)
library(DT)
library(lubridate)
##########################Code for generating the joint data ##########################
joined_data <- read.csv("data/joined_data.csv", stringsAsFactors = F)
joined_data <- joined_data %>%
mutate(full_address = paste(address, city, County, State)) %>%
group_by(name) %>%
mutate(count = n()) %>%
arrange(-count) %>%
ungroup()
restaurant_chain_list <- joined_data %>%
group_by(name) %>%
summarize(count=n()) %>%
arrange(-count, name)
restaurant_choices <- c("All", restaurant_chain_list %>%
select(name) %>%
.[[1]])
restaurant_location_data <- joined_data %>% select(longitude, latitude, postalCode, name, address, city, County, State) %>%
mutate(full_address = paste(address, city, County, State, postalCode, sep=", "))
top_five_count <- 0
for (i in 2:6) {
top_five_count <- top_five_count + joined_data %>% filter(name == restaurant_choices[i]) %>% nrow()
}
restaurantSelections <- joined_data %>%
select(name) %>%
unique()
stateSelection <- joined_data %>%
select(State, name) %>%
select(State) %>%
unique()
restaurantByState <- joined_data %>%
select(State, name) %>%
filter
# Start shinyServer
shinyServer(function(input, output, session) {
observe({
selected_chain <- input$restaurant_chain
if (selected_chain == "") {
selected_chain = "All"
}
selected_data <- joined_data
if (selected_chain != "All") {
selected_data <- selected_data %>% filter(name == selected_chain)
}
colorData <- selected_data$name
pal <- colorFactor(if_else(selected_chain != "All", "#33007b", "viridis"), colorData, ordered = F, na.color = 'Purple')
leafletProxy("map", data = selected_data) %>%
clearShapes() %>%
addCircles(~longitude, ~latitude, radius=30000, layerId=~postalCode,
stroke=FALSE, fillOpacity=0.4, fillColor=pal(colorData), label=~paste(name, full_address, sep=", ")) %>%
addLegend("bottomleft", pal=pal, values=head(colorData, top_five_count), title="Restaurant Chain (top 5 unordered)",
layerId="colorLegend")
})
observe({
updateSelectInput(
session,
"restaurant_chain",
choices = restaurant_choices
)
})
output$map <- renderLeaflet({
colorData <- joined_data$name
pal <- colorFactor("viridis", colorData, ordered = F)
leaflet(data = joined_data) %>%
addTiles(
urlTemplate = "//{s}.tiles.mapbox.com/v3/jcheng.map-5ebohr46/{z}/{x}/{y}.png",
attribution = 'Maps by <a href="http://www.mapbox.com/">Mapbox</a>'
) %>%
setView(lng = -93.85, lat = 37.45, zoom = 4) %>%
addCircles(~longitude, ~latitude, radius=20000, layerId=~postalCode,
stroke=FALSE, fillOpacity=0.4, fillColor=pal(colorData), label=~paste(name, full_address, sep=", ")) %>%
addLegend("bottomleft", pal=pal, values=head(colorData, top_five_count), title="Restaurant Chain (top 5 unordered)",
layerId="colorLegend")
})
output$top_restaurant_list <- renderTable({
chain_name <- input$restaurant_chain
result <- restaurant_chain_list
if (chain_name != "All") {
result <- result %>% filter(name == chain_name)
}
result %>% head(5)
}, rownames = T, striped = T, hover = T, width = "100%", align = "l")
output$homepage <- renderUI({
HTML(markdown::markdownToHTML(file = "README.md"))
})
output$addresses <- renderTable({
filtered_data <- restaurant_location_data
if (input$address_filter != "") {
filtered_data <- filtered_data %>% filter(
grepl(input$address_filter, full_address, ignore.case = T) | grepl(input$address_filter, name, ignore.case = T))
}
if (input$restaurant_chain != "All") {
filtered_data <- filtered_data %>% filter(name == input$restaurant_chain)
}
filtered_data %>% select(name, full_address) %>% head(100)
}, striped = T, width = "100%", align = "l")
observe({
filtered_data <- restaurant_location_data
if (input$address_filter != "") {
filtered_data <- filtered_data %>% filter(
grepl(input$address_filter, full_address, ignore.case = T) | grepl(input$address_filter, name, ignore.case = T))
}
if (input$restaurant_chain != "All") {
filtered_data <- filtered_data %>% filter(name == input$restaurant_chain)
}
if (nrow(filtered_data) <= 10) {
leafletProxy("map", data = filtered_data) %>%
clearMarkers() %>%
addMarkers(~longitude, ~latitude, layerId=~postalCode, popup=~paste(paste0("<b>",name,"</b>"), full_address, sep="<br/>"))
} else {
leafletProxy("map") %>% clearMarkers()
}
})
observe({
updateSelectInput(session,
"stateChoice",
choices = stateSelection)
})
observe({
stateChoose <- input$stateChoice
filtered_state <- joined_data %>%
filter(State == stateChoose) %>%
select(County)
updateSelectInput(session,
"countyChoice",
choices = filtered_state)
})
output$distribPie <- renderPlotly({
distribData <- joined_data %>%
select(State, County, name) %>%
filter(State == input$stateChoice) %>%
count(name) %>%
mutate(ttl = sum(n)) %>%
filter(n > 2)
plot <- plot_ly(distribData,
labels = ~name,
values = ~n,
width = 570,
height = 550,
type = "pie",
textposition = 'inside',
textinfo = 'label+percent',
insidetextfont = list(color = '#FFFFFF'),
marker = list(colors = colors,
line = list(color = '#FFFFFF', width = 1)),
showlegend = F) %>%
layout(title = 'Fast Food distribution in USA by State')
plot
})
output$feedback <- renderText({
paste("You have selected <b>", input$countyChoice,
"</b> county located in the state of <b>",
input$stateChoice, "</b>.")
})
output$data <- renderText({
used <- joined_data %>%
filter(State == input$stateChoice, County == input$countyChoice) %>%
select(pct_obese_14, pct_diabetes_14, poverty_rate, count_change_pct, count_per_10k_pop_14) %>%
unique()
paste("Change in fast food chain count in 5 years: <b>", used$count_change_pct,
"%</b><br> restaurants per 10k people in 2014: <b>", used$count_per_10k_pop_14,
"</b><br> Poverty rate: <b>", used$poverty_rate, "</b>.")
})
output$racePie <- renderPlotly({
race <- joined_data %>%
filter(State == input$stateChoice, County == input$countyChoice) %>%
select(County, pct_white:pct_other) %>%
unique()
colnames(race) <- c("County", "Caucasian", "African American", "Hispanic", "Asian", "Other")
df <- melt(race, "County")
plot <- plot_ly(df, labels = ~variable,
values = ~value,
width = 570,
height = 550,
type = "pie",
textposition = 'inside',
textinfo = 'label+percent',
insidetextfont = list(color = '#FFFFFF'),
marker = list(colors = colors,
line = list(color = '#FFFFFF', width = 1)),
showlegend = F) %>%
layout(title = 'Race Distribution by County')
plot
})
output$chngPlot <- renderPlot({
filtersd <- joined_data %>%
filter(State == input$stateChoice, County == input$countyChoice) %>%
select(County, pct_obese_09, pct_obese_14, pct_diabetes_09, pct_diabetes_14) %>%
unique()
colnames(filtersd) <- c("County", "% Obese in 2009", "% Obese in 2014", "% Diabetic in 2009", "% Diabetic in 2014")
df <- melt(filtersd, "County")
p <- ggplot(data = df,
mapping = aes(x = variable, y = value, fill = variable)) +
geom_bar(stat = "identity") +
labs(title = "Changes in Obesity and Diabetic Within a 5 Year Time Frame by County",
x = "", y = "percentage") +
theme(legend.position = "none", plot.title = element_text(hjust = 0.5, size = 20, face = "bold"), axis.text = element_text(size = 15, face = "bold"))
p
})
output$CountyInfo <- renderDataTable({
county_impacted <- joined_data %>%
group_by(County) %>%
filter(pct_obese_14 == max(pct_obese_14)) %>%
arrange(- pct_obese_14) %>%
head(100) %>%
select(State, County, poverty_rate, count_change_pct, pct_obese_09, pct_obese_14) %>%
unique()
colnames(county_impacted) <- c("State", "County", "Poverty Rates (%)", "Count Change in 5 yrs (%)", "obese in 2009 (%)", "Obese in 2014 (%)")
datatable(county_impacted, options = list(pageLength = 10, scrollX = TRUE, scrollY = '450px')) %>% formatStyle(names(county_impacted))
})
output$Health_plot <- renderPlot({
p <- ggplot(data = joined_data, mapping = aes_string(x = input$countyChoice, y = input$pct_obese_14)) +
geom_point()
})
output$QA <- renderUI({
HTML(markdown::markdownToHTML(file = "README1.md"))
})
output$ContactInformation <- renderUI({
HTML(markdown::markdownToHTML(file = "contactinfo.md"))
})
})
|
items <-
function(..., Correct=1, KeepLast=0, report=FALSE) {
if (report)
stop("items() cannot produce a report; you need to call QReport()")
QuestionCounter(QuestionCounter() + 1)
x <- unlist(list(...))
if (CheckDups() && any(duplicated(format(x))))
stop("Duplicated answers in Q", QuestionCounter(), ": ",
paste(format(x), collapse=" "))
PermuteResponses(length(x), Correct, KeepLast)
if (!is.na(Correct))
x[Correct] <- paste("\\Correct", x[Correct])
x <- x[getPerm()]
y <- paste("\\item",x,"\n", sep=" ")
cat(y)
}
|
/Sweavetest/R/items.R
|
no_license
|
dmurdoch/Sweavetest
|
R
| false
| false
| 616
|
r
|
items <-
function(..., Correct=1, KeepLast=0, report=FALSE) {
if (report)
stop("items() cannot produce a report; you need to call QReport()")
QuestionCounter(QuestionCounter() + 1)
x <- unlist(list(...))
if (CheckDups() && any(duplicated(format(x))))
stop("Duplicated answers in Q", QuestionCounter(), ": ",
paste(format(x), collapse=" "))
PermuteResponses(length(x), Correct, KeepLast)
if (!is.na(Correct))
x[Correct] <- paste("\\Correct", x[Correct])
x <- x[getPerm()]
y <- paste("\\item",x,"\n", sep=" ")
cat(y)
}
|
\name{bnsl_p}
\alias{bnsl_p}
\title{Bayesian Network Structure Learning}
\usage{
bnsl_p(df, psl, tw = 0, proc = 1, s=0, n=0, ss=1)
}
\arguments{
\item{df}{a dataframe.}
\item{psl}{the list of parent sets.}
\item{tw}{the upper limit of the parent set.}
\item{proc}{the criterion based on which the BNSL solution is sought.
proc=1,2, and 3 indicates that the structure learning is based on Jeffreys [1], MDL [2,3], and BDeu [3]}
\item{s}{The value computed when obtaining the bound.}
\item{n}{The number of samples.}
\item{ss}{The BDeu parameter.}
}
\description{The function outputs the Bayesian network structure given a dataset based on
an assumed criterion.
}
\value{
The Bayesian network structure in the bn class of bnlearn.
}
\author{
Joe Suzuki and Jun Kawahara
}
\references{
[1] Suzuki, J. ``An Efficient Bayesian Network Structure Learning Strategy", New Generation Computing, December 2016.
[2] Suzuki, J. ``A construction of Bayesian networks from databases based on an MDL principle",
Uncertainty in Artificial Intelligence, pages 266-273, Washington D.C. July, 1993.
[3] Suzuki, J. ``Learning Bayesian Belief Networks Based on the Minimum Description Length Principle: An Efficient Algorithm Using the B & B Technique",
International Conference on Machine Learning, Bali, Italy, July 1996"
[4] Suzuki, J. ``A Theoretical Analysis of the BDeu Scores in Bayesian Network Structure Learning", Behaviormetrika 1(1):1-20,
January 2017.
}
\seealso{parent}
\examples{
library(bnlearn)
p0 <- parent.set(lizards, 0)
p1 <- parent.set(lizards, 1)
p2 <- parent.set(lizards, 2)
bnsl_p(lizards, list(p0, p1, p2))
}
|
/issuestests/BNSL/man/bnsl_p.Rd
|
no_license
|
akhikolla/RcppDeepStateTest
|
R
| false
| false
| 1,619
|
rd
|
\name{bnsl_p}
\alias{bnsl_p}
\title{Bayesian Network Structure Learning}
\usage{
bnsl_p(df, psl, tw = 0, proc = 1, s=0, n=0, ss=1)
}
\arguments{
\item{df}{a dataframe.}
\item{psl}{the list of parent sets.}
\item{tw}{the upper limit of the parent set.}
\item{proc}{the criterion based on which the BNSL solution is sought.
proc=1,2, and 3 indicates that the structure learning is based on Jeffreys [1], MDL [2,3], and BDeu [3]}
\item{s}{The value computed when obtaining the bound.}
\item{n}{The number of samples.}
\item{ss}{The BDeu parameter.}
}
\description{The function outputs the Bayesian network structure given a dataset based on
an assumed criterion.
}
\value{
The Bayesian network structure in the bn class of bnlearn.
}
\author{
Joe Suzuki and Jun Kawahara
}
\references{
[1] Suzuki, J. ``An Efficient Bayesian Network Structure Learning Strategy", New Generation Computing, December 2016.
[2] Suzuki, J. ``A construction of Bayesian networks from databases based on an MDL principle",
Uncertainty in Artificial Intelligence, pages 266-273, Washington D.C. July, 1993.
[3] Suzuki, J. ``Learning Bayesian Belief Networks Based on the Minimum Description Length Principle: An Efficient Algorithm Using the B & B Technique",
International Conference on Machine Learning, Bali, Italy, July 1996"
[4] Suzuki, J. ``A Theoretical Analysis of the BDeu Scores in Bayesian Network Structure Learning", Behaviormetrika 1(1):1-20,
January 2017.
}
\seealso{parent}
\examples{
library(bnlearn)
p0 <- parent.set(lizards, 0)
p1 <- parent.set(lizards, 1)
p2 <- parent.set(lizards, 2)
bnsl_p(lizards, list(p0, p1, p2))
}
|
library(pi0) #estimate pi0(lambda)
library(ggplot2)
library(VGAM) # estimate fai
library(fitdistrplus) # fit empirical distribution
library(doParallel) # foreach
library(knitr)
library(plyr) #map values of pilotN based on key
library(dplyr)
library(DropletUtils)
setwd("D:/research/MethySeq/")
source("code/functions.R")
load(file="data/Mouse/meth.regional.count.R")
load(file="data/Mouse/empirical.fit.rdata")
pilot.N.all<-c(2,4,6,8,9,10)
N<-c(2,6,10,15,25,50)
rep<-5
G<-10000
n.DE<-1000
#delta<-runif(n.DE,0.1,0.2)
delta<-rep(0.14, n.DE)
prop.all<-c(0.05,0.1,0.2,0.4,0.6,0.8,1.0)
set.seed(123)
dmr<-sample(1:G,n.DE)
#Assign 8 cores
cl<-makeCluster(8)
registerDoParallel(cl)
result<-vector("list",length(pilot.N.all)*length(prop.all))
for(i in 1:length(prop.all)){
prop<-prop.all[i]
#True EDR
result.true<-foreach(j = 1:length(N), .packages = "DropletUtils") %dopar% {
n<-N[j]
edr.Zw<-rep(0,rep)
fdr.Zw<-rep(0,rep)
dmr.N<-rep(0,rep)
# simulate dataset with D samples and G CpG regions
for(times in 1:rep) {
#simulate data
simulated.result<-simulate.methyl.data(n=n, G=G, delta=delta, dmr=dmr, prop=prop)
#DE analysis
a=proc.time()
dmr.result<-DMR.analysis(N0=n, cov.matrix=simulated.result$coverage, methyl.matrix=simulated.result$methyl.count, R=1/4, pilot.depth=250/8)
# Calculate dmr while controling fdr at 0.05
dmr.Zw<-get.dm.regions(p=dmr.result$p.values, level=0.05, dmr=dmr)
# Calculate the observed discovery rate
if(is.na(dmr.Zw[1])){
edr.Zw[times]<-0
} else {
edr.Zw[times]<-length(intersect(dmr.Zw,dmr))/length(dmr)
fdr.Zw[times]<-1-length(intersect(dmr.Zw,dmr))/length(dmr.Zw)
dmr.N[times]<-length(dmr.Zw)
result.Zw<-round(rbind(edr.Zw,fdr.Zw,dmr.N),digits=3)
rownames(result.Zw)<-c("edr","fdr","dmr N")
}
}
return(result.Zw)
}
edr.Zw.mean<-unlist(lapply(result.true,function(x) mean(x[1,])))
edr.Zw.sd<-unlist(lapply(result.true,function(x) sd(x[1,])))
fdr.Zw.mean<-unlist(lapply(result.true,function(x) mean(x[2,])))
#Estimate EDR
for(k in 1:length(pilot.N.all)){
pilot.N<-pilot.N.all[k]
# power prediction
result.pre<-foreach (times = 1:rep,.packages = c("pi0", "DropletUtils")) %dopar% {
#simulate data
simulated.result<-simulate.methyl.data(n=pilot.N, G=G, delta=delta, dmr=dmr, prop=1)
#DE analysis
a=proc.time()
dmr.result<-DMR.analysis(N0=pilot.N,cov.matrix=simulated.result$coverage, methyl.matrix=simulated.result$methyl.count, R=(1/4)*prop, pilot.depth=250/8)
#estimate EDR
power.result<-Estimate.EDR.from.pilot(res=dmr.result, thresh.p=0.005,
N0=pilot.N, target.N=N, FDR=0.05, M=10)
b=proc.time()-a
y<-rbind(EDR=power.result$EDR, FDR=power.result$FDR, time=b[3])
return(y)
}
#Summarize estimation result
# edr
result.pre<-result.pre[!unlist(lapply(result.pre,is.null))]
result.mean<-Reduce("+",result.pre)/length(result.pre)
result.pre.edr<-t(sapply(result.pre, function(x) x[1,]))
edr.pre.mean<-result.mean[1,]
edr.pre.sd<-apply(result.pre.edr,2,sd)
# fdr
fdr.pre.mean<-result.mean[2,]
#run time
time.onerun<-mean(result.mean[3,])
# combine to a dataset
se1<-edr.Zw.sd/sqrt(rep)
se2<-edr.pre.sd/sqrt(rep)
edr<-as.factor(c(rep("true",length(N)),rep("predicted",length(N))))
targetN<-c(N,N)
mean<-c(edr.Zw.mean,edr.pre.mean)
fdrcontrol<-c(fdr.Zw.mean,fdr.pre.mean)
se<-c(se1,se2)
sum.data<-data.frame(prop=prop,
pilot.N=pilot.N, edr=edr,
targetN=targetN, mean=mean,
se=se, fdrcontrol=fdrcontrol,
time=time.onerun)
result[[(i-1)*length(pilot.N.all)+k]]<-sum.data
}
}
new<-c("2 vs 2","4 vs 4","6 vs 6","8 vs 8", "9 vs 9", "10 vs 10")
result.data<-Reduce("rbind",result)
key.values<-data.frame(old=pilot.N.all,new=new)
# change new from factor to character
key.values$new<-as.character(key.values$new)
result.data[, 2] <- mapvalues(result.data[, 2], from = key.values$old, to = key.values$new)
result.data$pilot.N<-factor(result.data$pilot.N,levels=new)
#RMSE
result.true<-result.data %>%
filter(edr=="true")
result.predicted<-result.data %>%
filter(edr=="predicted")
result.predicted[,"true"]<-result.true[,"mean"]
result.predicted<-result.predicted %>%
mutate(squareerror=(mean-true)^2)
result.predicted<-result.predicted %>%
group_by(prop,pilot.N)%>%
mutate(rmse=sqrt(sum(squareerror)/length(squareerror)))
result.predicted<-result.predicted %>%
dplyr::select(prop,pilot.N,rmse) # select function is covered by MASS
zw<-as.data.frame(result.predicted)
zw<-unique(zw)
save(result, result.data, zw,file="data/simulation/powerplotrawdataCBUM+CDD0.005_20iter_cleaned_delta0.14_infateNR.Rdata")
#plot
gg_color_hue <- function(n) {
hues = seq(15, 375, length = n + 1)
hcl(h = hues, l = 65, c = 100)[1:n]
}
cols<-gg_color_hue(2)
pdf("results/CBUM+CDD0.005_20iter_delta0.14_inflateNR.pdf", width = 9, height = 9)
ggplot(result.data, aes(x=targetN, y=mean, colour=edr,shape=edr,linetype=edr)) +
geom_errorbar(aes(ymin=mean-1.96*se, ymax=mean+1.96*se), width=1) +
geom_line(lwd=1) +
geom_point()+
ylim(0,1)+xlim(0,50)+
labs(x = "Target N", y = "EDR") +
scale_linetype_manual(values=c("dotted","solid"))+
theme_bw()+theme(legend.position = "bottom")+
facet_grid(prop~pilot.N,labeller=label_both)
dev.off()
|
/code/Figure2_delta0.14_inflateNR.R
|
no_license
|
liupeng2117/MethylSeqDesign_data_code
|
R
| false
| false
| 5,734
|
r
|
library(pi0) #estimate pi0(lambda)
library(ggplot2)
library(VGAM) # estimate fai
library(fitdistrplus) # fit empirical distribution
library(doParallel) # foreach
library(knitr)
library(plyr) #map values of pilotN based on key
library(dplyr)
library(DropletUtils)
setwd("D:/research/MethySeq/")
source("code/functions.R")
load(file="data/Mouse/meth.regional.count.R")
load(file="data/Mouse/empirical.fit.rdata")
pilot.N.all<-c(2,4,6,8,9,10)
N<-c(2,6,10,15,25,50)
rep<-5
G<-10000
n.DE<-1000
#delta<-runif(n.DE,0.1,0.2)
delta<-rep(0.14, n.DE)
prop.all<-c(0.05,0.1,0.2,0.4,0.6,0.8,1.0)
set.seed(123)
dmr<-sample(1:G,n.DE)
#Assign 8 cores
cl<-makeCluster(8)
registerDoParallel(cl)
result<-vector("list",length(pilot.N.all)*length(prop.all))
for(i in 1:length(prop.all)){
prop<-prop.all[i]
#True EDR
result.true<-foreach(j = 1:length(N), .packages = "DropletUtils") %dopar% {
n<-N[j]
edr.Zw<-rep(0,rep)
fdr.Zw<-rep(0,rep)
dmr.N<-rep(0,rep)
# simulate dataset with D samples and G CpG regions
for(times in 1:rep) {
#simulate data
simulated.result<-simulate.methyl.data(n=n, G=G, delta=delta, dmr=dmr, prop=prop)
#DE analysis
a=proc.time()
dmr.result<-DMR.analysis(N0=n, cov.matrix=simulated.result$coverage, methyl.matrix=simulated.result$methyl.count, R=1/4, pilot.depth=250/8)
# Calculate dmr while controling fdr at 0.05
dmr.Zw<-get.dm.regions(p=dmr.result$p.values, level=0.05, dmr=dmr)
# Calculate the observed discovery rate
if(is.na(dmr.Zw[1])){
edr.Zw[times]<-0
} else {
edr.Zw[times]<-length(intersect(dmr.Zw,dmr))/length(dmr)
fdr.Zw[times]<-1-length(intersect(dmr.Zw,dmr))/length(dmr.Zw)
dmr.N[times]<-length(dmr.Zw)
result.Zw<-round(rbind(edr.Zw,fdr.Zw,dmr.N),digits=3)
rownames(result.Zw)<-c("edr","fdr","dmr N")
}
}
return(result.Zw)
}
edr.Zw.mean<-unlist(lapply(result.true,function(x) mean(x[1,])))
edr.Zw.sd<-unlist(lapply(result.true,function(x) sd(x[1,])))
fdr.Zw.mean<-unlist(lapply(result.true,function(x) mean(x[2,])))
#Estimate EDR
for(k in 1:length(pilot.N.all)){
pilot.N<-pilot.N.all[k]
# power prediction
result.pre<-foreach (times = 1:rep,.packages = c("pi0", "DropletUtils")) %dopar% {
#simulate data
simulated.result<-simulate.methyl.data(n=pilot.N, G=G, delta=delta, dmr=dmr, prop=1)
#DE analysis
a=proc.time()
dmr.result<-DMR.analysis(N0=pilot.N,cov.matrix=simulated.result$coverage, methyl.matrix=simulated.result$methyl.count, R=(1/4)*prop, pilot.depth=250/8)
#estimate EDR
power.result<-Estimate.EDR.from.pilot(res=dmr.result, thresh.p=0.005,
N0=pilot.N, target.N=N, FDR=0.05, M=10)
b=proc.time()-a
y<-rbind(EDR=power.result$EDR, FDR=power.result$FDR, time=b[3])
return(y)
}
#Summarize estimation result
# edr
result.pre<-result.pre[!unlist(lapply(result.pre,is.null))]
result.mean<-Reduce("+",result.pre)/length(result.pre)
result.pre.edr<-t(sapply(result.pre, function(x) x[1,]))
edr.pre.mean<-result.mean[1,]
edr.pre.sd<-apply(result.pre.edr,2,sd)
# fdr
fdr.pre.mean<-result.mean[2,]
#run time
time.onerun<-mean(result.mean[3,])
# combine to a dataset
se1<-edr.Zw.sd/sqrt(rep)
se2<-edr.pre.sd/sqrt(rep)
edr<-as.factor(c(rep("true",length(N)),rep("predicted",length(N))))
targetN<-c(N,N)
mean<-c(edr.Zw.mean,edr.pre.mean)
fdrcontrol<-c(fdr.Zw.mean,fdr.pre.mean)
se<-c(se1,se2)
sum.data<-data.frame(prop=prop,
pilot.N=pilot.N, edr=edr,
targetN=targetN, mean=mean,
se=se, fdrcontrol=fdrcontrol,
time=time.onerun)
result[[(i-1)*length(pilot.N.all)+k]]<-sum.data
}
}
new<-c("2 vs 2","4 vs 4","6 vs 6","8 vs 8", "9 vs 9", "10 vs 10")
result.data<-Reduce("rbind",result)
key.values<-data.frame(old=pilot.N.all,new=new)
# change new from factor to character
key.values$new<-as.character(key.values$new)
result.data[, 2] <- mapvalues(result.data[, 2], from = key.values$old, to = key.values$new)
result.data$pilot.N<-factor(result.data$pilot.N,levels=new)
#RMSE
result.true<-result.data %>%
filter(edr=="true")
result.predicted<-result.data %>%
filter(edr=="predicted")
result.predicted[,"true"]<-result.true[,"mean"]
result.predicted<-result.predicted %>%
mutate(squareerror=(mean-true)^2)
result.predicted<-result.predicted %>%
group_by(prop,pilot.N)%>%
mutate(rmse=sqrt(sum(squareerror)/length(squareerror)))
result.predicted<-result.predicted %>%
dplyr::select(prop,pilot.N,rmse) # select function is covered by MASS
zw<-as.data.frame(result.predicted)
zw<-unique(zw)
save(result, result.data, zw,file="data/simulation/powerplotrawdataCBUM+CDD0.005_20iter_cleaned_delta0.14_infateNR.Rdata")
#plot
gg_color_hue <- function(n) {
hues = seq(15, 375, length = n + 1)
hcl(h = hues, l = 65, c = 100)[1:n]
}
cols<-gg_color_hue(2)
pdf("results/CBUM+CDD0.005_20iter_delta0.14_inflateNR.pdf", width = 9, height = 9)
ggplot(result.data, aes(x=targetN, y=mean, colour=edr,shape=edr,linetype=edr)) +
geom_errorbar(aes(ymin=mean-1.96*se, ymax=mean+1.96*se), width=1) +
geom_line(lwd=1) +
geom_point()+
ylim(0,1)+xlim(0,50)+
labs(x = "Target N", y = "EDR") +
scale_linetype_manual(values=c("dotted","solid"))+
theme_bw()+theme(legend.position = "bottom")+
facet_grid(prop~pilot.N,labeller=label_both)
dev.off()
|
## Place: ECN395 Research - timesSeries project
## Purpose: 1) Match quotes and trades in the aggregate ES_qtsagg and ES_tdsagg objects
## 2) Get the orderflows using the highfrequency package. Experiment with different correction
## 3) Regress the
## Produce: xts objects to be ready to work with highfrequency package. .R data files of ES trades & quotes
## Learned: cat("\014") to clear the console; head (vector, -1) to get rid of the last column
cat("\014")
# These two lines are to get the data from my MAC and sould be ignore
# save(list=c("ESmatch", "ESmatch.minute", "ESmatch.second", "ESqts", "EStds", "orderflow"), file="ESeverything.R")
# load("/Users/cuongnguyen/Dropbox/class/01.s14/ECN395/ESeverything.R")
# install.packages('highfrequency')
# install.packages('biglm')
# install.packages('dynlm')
# install.packages('RcppArmadillo')
# install.packages('lubridate')
# install.packages('rugarch')
# install.packages('forecast')
# install.packages('TTR')
<<<<<<< HEAD
load("~/Desktop/ecn395data/ESeverything.R",)
=======
>>>>>>> 1fcf8d375b3a14267e59f133753ff614e27bfc76
load("~/Desktop/ecn395data/ES_qts_aggregate.R")
load("~/Desktop/ecn395data/ES_qts_xts_full.R")
#load("~/Desktop/ecn395data/ES_Quotes_raw.R")
load("~/Desktop/ecn395data/ES_tds_aggregate.R")
load("~/Desktop/ecn395data/ES_tds_xts_full.R")
#load("~/Desktop/ecn395data/ES_Trades_raw.R")
require(fasttime)
require(zoo); require(xts); require(highfrequency)
require(quantmod)
require(dynlm)
require(RcppArmadillo)
require (lubridate)
require (rugarch)
require (forecast)
require (TTR)
rm(list=ls(pattern=("model.")))
# Set the Sys.time variable to have 6 digits after seconds
options("digits.secs"=6)
Sys.time()
# Rename the object, we make use of lazy evaluation here
# ESqts <- qtsagg # This is the aggregated ES qts xts
# ESqts.full <- qts # This is full ES qts xts
# EStds <- tdsagg
# EStds.full <- tds
# Matching the quotes and trades at miliseconds level ---------------------
# Now match the ES trades and the quotes data, no adjustment
ESmatch <- matchTradesQuotes (EStds,ESqts, adjustment=0.000)
ESmatch <- merge (ESmatch, EStds$tds_volume)
# Get the trade directions of the data using Lee and Ready algo
# 1 is buy and -1 is sell
direction <- getTradeDirection (ESmatch)
orderflow <- direction * ESmatch$tds_volume
ESmatch <- merge (ESmatch, direction, orderflow)
# Trick to eliminate a column in an object very quickly
# ESmatch$orderflow = NULL
# Matching quotes and trades at the 0.5 and 1 SECONDS level -------------------------
# data to store in ESmatch.second
# Create a vector of average price per second. So price at 17:00:00 means
# the average price from 17:00:00.000 to 17:00:00.999
timestamp_halfsecond <- align.time(index(ESmatch), n= 0.5)
timestamp_quartersecond <- align.time(index(ESmatch), n= 0.25)
ESmatch.quartersecond <- aggregate (x= ESmatch$PRICE,
by= timestamp_quartersecond,
FUN= mean)
ESmatch.halfsecond <- aggregate (x= ESmatch$PRICE,
by= timestamp_halfsecond,
FUN= mean)
ESmatch.second <- aggregate (x= ESmatch$PRICE,
by= fastPOSIXct(trunc(index(ESmatch), units= "secs"),tz= "Chicago"),
FUN= mean)
# Change the vector to xts object and name the first column
ESmatch.quartersecond <- as.xts(x= ESmatch.quartersecond)
dimnames(ESmatch.quartersecond) <- list(list(), c("PRICE"))
ESmatch.halfsecond <- as.xts(x= ESmatch.halfsecond)
dimnames(ESmatch.halfsecond) <- list(list(), c("PRICE"))
ESmatch.second <- as.xts(x= ESmatch.second)
dimnames(ESmatch.second) <- list(list(), c("PRICE"))
getwd()
#Now create several vectors of BID ASk etc
tempquart <- as.xts (aggregate (x= ESmatch[,2:5],
by= timestamp_quartersecond,
FUN= median))
temphalf <- as.xts (aggregate (x= ESmatch[,2:5],
by= timestamp_halfsecond,
FUN= median))
temp <- as.xts (aggregate (x= ESmatch[,2:5],
by= fastPOSIXct(trunc(index(ESmatch), units= "secs"),tz= "Chicago"),
FUN= median))
# merge. After this ESmatch.second has 5 columns
ESmatch.quartersecond <- merge(ESmatch.quartersecond, tempquart)
ESmatch.quartersecond$direction <- getTradeDirection (ESmatch.quartersecond)
ESmatch.halfsecond <- merge(ESmatch.halfsecond, temphalf)
ESmatch.halfsecond$direction <- getTradeDirection (ESmatch.halfsecond)
ESmatch.second <- merge(ESmatch.second, temp)
ESmatch.second$direction <- getTradeDirection (ESmatch.second)
# get the order flow by summing up directions from higher frequency:
ESmatch.quartersecond$orderflow <- as.xts (
aggregate (x= ESmatch$direction,
by= timestamp_quartersecond,
FUN= sum))
ESmatch.halfsecond$orderflow <- as.xts (
aggregate (x= ESmatch$direction,
by= timestamp_halfsecond,
FUN= sum))
ESmatch.second$orderflow <- as.xts (
aggregate (x= ESmatch$direction,
by= fastPOSIXct (trunc (index (ESmatch), units= "secs"), tz= "Chicago"),
FUN= sum))
# get weighted_orderflow by multiplying direction with tds_volume before summing up:
ESmatch.quartersecond$weighted_orderflow <- as.xts (
aggregate (x= (ESmatch$direction * ESmatch$tds_volume),
by= timestamp_quartersecond,
FUN= sum))
ESmatch.halfsecond$weighted_orderflow <- as.xts (
aggregate (x= (ESmatch$direction * ESmatch$tds_volume),
by= timestamp_halfsecond,
FUN= sum))
ESmatch.second$weighted_orderflow <- as.xts (
aggregate (x= (ESmatch$direction * ESmatch$tds_volume),
by= fastPOSIXct (trunc (index (ESmatch), units= "secs"), tz= "Chicago"),
FUN= sum))
# get trade volume by summing up tds_volume
ESmatch.quartersecond$volume <- as.xts (
aggregate (x= ESmatch$tds_volume,
by= timestamp_quartersecond,
FUN= sum))
ESmatch.halfsecond$volume <- as.xts (
aggregate (x= ESmatch$tds_volume,
by= timestamp_halfsecond,
FUN= sum))
ESmatch.second$volume <- as.xts (
aggregate (x= ESmatch$tds_volume,
by= fastPOSIXct (trunc (index (ESmatch), units= "secs"), tz= "Chicago"),
FUN= sum))
# get the returns and standardize it
ESmatch.quartersecond$returns <- log (ESmatch.quartersecond$PRICE / lag(ESmatch.quartersecond$PRICE))
ESmatch.quartersecond$normalizedreturns <- scale (x= ESmatch.quartersecond$returns)
ESmatch.second$returns <- log (ESmatch.second$PRICE / lag(ESmatch.second$PRICE))
ESmatch.second$normalizedreturns <- scale (x= ESmatch.second$returns)
ESmatch.halfsecond$returns <- log (ESmatch.halfsecond$PRICE / lag(ESmatch.halfsecond$PRICE))
ESmatch.halfsecond$normalizedreturns <- scale (x= ESmatch.halfsecond$returns)
# get the dummy variables for lunchtime and trading hours
# Lunchtime is between 12:00 and 12:59:59.999 each day
# Trading hour is between 9:00 and 16:00 each day
# Non-lunch and non-trading hour are low-liquidity
ESmatch.quartersecond$lunchtime <- as.numeric((hour (index (ESmatch.quartersecond)) == 12))
ESmatch.quartersecond$tradinghour <- as.numeric(((hour (index (ESmatch.quartersecond)) >= 9)
& (hour (index (ESmatch.quartersecond)) < 16)
& !(ESmatch.quartersecond$lunchtime)))
ESmatch.quartersecond$hour <- as.factor(hour (index (ESmatch.quartersecond)))
ESmatch.quartersecond$wday <- as.factor(wday (index (ESmatch.quartersecond)))
ESmatch.halfsecond$lunchtime <- as.numeric((hour (index (ESmatch.halfsecond)) == 12))
ESmatch.halfsecond$tradinghour <- as.numeric(((hour (index (ESmatch.halfsecond)) >= 9)
& (hour (index (ESmatch.halfsecond)) < 16)
& !(ESmatch.halfsecond$lunchtime)))
ESmatch.halfsecond$hour <- as.factor(hour (index (ESmatch.halfsecond)))
ESmatch.halfsecond$wday <- as.factor(wday (index (ESmatch.halfsecond)))
ESmatch.second$lunchtime <- as.numeric((hour (index (ESmatch.second)) == 12))
ESmatch.second$tradinghour <- as.numeric(((hour (index (ESmatch.second)) >= 9)
& (hour (index (ESmatch.second)) < 16)
& !(ESmatch.second$lunchtime)))
ESmatch.second$hour <- as.factor(hour (index (ESmatch.second)))
ESmatch.second$wday <- as.factor(wday (index (ESmatch.second)))
length(ESmatch.quartersecond) / length(ESmatch.second$PRICE)
# estimate moving average of price and orderflow
ESmatch.quartersecond$movingaverage <- WMA (x= ESmatch.quartersecond$PRICE, n= 20, wts=1:20)
ESmatch.quartersecond$deltamovingaverage <- diff (ESmatch.quartersecond$movingaverage)
ESmatch.halfsecond$movingaverage <- WMA (x= ESmatch.halfsecond$PRICE, n= 20, wts=1:20)
ESmatch.halfsecond$deltamovingaverage <- diff (ESmatch.halfsecond$movingaverage)
ESmatch.second$movingaverage <- WMA (x= ESmatch.second$PRICE, n= 20, wts=1:20)
ESmatch.second$deltamovingaverage <- diff (ESmatch.second$movingaverage)
ESmatch.second$averageorderflow <- WMA (x= ESmatch.second$orderflow, n= 10, wts=1:10)
# estimate volatility
ESmatch.quartersecond$volatility <- volatility(OHLC=ESmatch.quartersecond$PRICE, n=20, calc="close")
ESmatch.halfsecond$volatility <- volatility(OHLC=ESmatch.halfsecond$PRICE, n=20, calc="close")
ESmatch.second$volatility <- volatility(OHLC=ESmatch.second$PRICE, n=20, calc="close")
# Matching quotes and trades at the MINUTES level -------------------------
# data to store in ESmatch.minute
# Create a vector of average price per minute. So price at 17:00:00 means
# the average price from 17:00:00.000 to 17:00:59.999 - 1 min gap
ESmatch.minute <- aggregate (x= ESmatch.second$PRICE,
by= fastPOSIXct(trunc(index(ESmatch.second), units= "mins"),tz= "Chicago"),
FUN= mean)
# Change the vector to xts object and name the first column
ESmatch.minute <- as.xts(x= ESmatch.minute)
dimnames(ESmatch.minute) <- list(list(), c("PRICE"))
# Create another vector of median price named medPRICE, data used from the original timseries
ESmatch.minute$PRICEmed <- as.xts (
aggregate (x= ESmatch$PRICE,
by= fastPOSIXct (trunc (index (ESmatch), units= "mins"), tz= "Chicago"),
FUN= median))
# Now create several vectors of BID ASk etc
temp <- as.xts (aggregate (x= ESmatch.second[,2:5],
by= fastPOSIXct(trunc(index(ESmatch.second), units= "mins"),tz= "Chicago"),
FUN= median))
# merge. After this ESmatch.second has 5 columns
ESmatch.minute <- merge(ESmatch.minute, temp)
# get orderlow
ESmatch.minute$orderflow <- as.xts (
aggregate (x= ESmatch.second$orderflow,
by= fastPOSIXct (trunc (index (ESmatch.second), units= "mins"), tz= "Chicago"),
FUN= sum))
# get weighted_orderflow by multiplying direction with tds_volume before summing up:
ESmatch.minute$weighted_orderflow <- as.xts (
aggregate (x= ESmatch.second$weighted_orderflow,
by= fastPOSIXct (trunc (index (ESmatch.second), units= "mins"), tz= "Chicago"),
FUN= sum))
# get trade volume
ESmatch.minute$volume <- as.xts (
aggregate (x= ESmatch.second$volume,
by= fastPOSIXct (trunc (index (ESmatch.second), units= "mins"), tz= "Chicago"),
FUN= sum))
# get the returns - both mean and median - and standardized it
ESmatch.minute$returns <- log (ESmatch.minute$PRICE / lag(ESmatch.minute$PRICE))
ESmatch.minute$normalizedreturns <- scale (ESmatch.minute$returns)
ESmatch.minute$returnsmed <- log (ESmatch.minute$PRICEmed / lag(ESmatch.minute$PRICEmed))
ESmatch.minute$normalizedreturnsmed <- scale (ESmatch.minute$returnsmed)
# get the dummy variables for lunchtime and trading hours
# Lunchtime is between 12:00 and 12:59:59.999 each day
# Trading hour is between 9:00 and 16:00 each day
# Non-lunch and non-trading hour are low-liquidity
ESmatch.minute$lunchtime <- (hour (index (ESmatch.minute)) == 12)
ESmatch.minute$tradinghour <- ((hour (index (ESmatch.minute)) >= 9)
& (hour (index (ESmatch.minute)) < 16)
& !(ESmatch.minute$lunchtime))
ESmatch.minute$hour <- as.factor(hour (index (ESmatch.minute)))
ESmatch.minute$wday <- as.factor(wday (index (ESmatch.minute)))
# estimate moving average
ESmatch.minute$movingaverage <- WMA (x= ESmatch.minute$PRICE, n= 20, wts=1:20)
ESmatch.minute$deltamovingaverage <- diff (ESmatch.minute$movingaverage)
# estimate volatility
ESmatch.minute$volatility <- volatility(OHLC=ESmatch.minute$PRICE, n=20, calc="close")
# Matching quotes and trades at the 2, 5, 10, 20 and 30-seconds level ---------------------
# Create a timestamp index that has 5-second intervals, using align.time in the xts package:
timestamp_2second <- align.time (index (ESmatch.second), n= 2)
timestamp_5second <- align.time (index (ESmatch.second), n= 5)
timestamp_10second <- align.time (index (ESmatch.second), n= 10)
timestamp_20second <- align.time (index (ESmatch.second), n= 20)
timestamp_30second <- align.time (index (ESmatch.second), n= 30)
ESmatch.2second <- as.xts (aggregate (x= ESmatch.second[, 1:5],
by= timestamp_2second,
FUN= mean))
ESmatch.5second <- as.xts (aggregate (x= ESmatch.second[, 1:5],
by= timestamp_5second,
FUN= mean))
ESmatch.10second <- as.xts (aggregate (x= ESmatch.second[, 1:5],
by= timestamp_10second,
FUN= mean))
ESmatch.20second <- as.xts (aggregate (x= ESmatch.second[, 1:5],
by= timestamp_20second,
FUN= mean))
ESmatch.30second <- as.xts (aggregate (x= ESmatch.second[, 1:5],
by= timestamp_30second,
FUN= mean))
# Create another vector of median price named medPRICE, data used from the original timseries
ESmatch.2second$PRICEmed <- as.xts (
aggregate (x= ESmatch$PRICE,
by= align.time (index (ESmatch), n= 2),
FUN= median))
ESmatch.5second$PRICEmed <- as.xts (
aggregate (x= ESmatch$PRICE,
by= align.time (index (ESmatch), n= 5),
FUN= median))
ESmatch.10second$PRICEmed <- as.xts (
aggregate (x= ESmatch$PRICE,
by= align.time (index (ESmatch), n= 10),
FUN= median))
ESmatch.20second$PRICEmed <- as.xts (
aggregate (x= ESmatch$PRICE,
by= align.time (index (ESmatch), n= 20),
FUN= median))
ESmatch.30second$PRICEmed <- as.xts (
aggregate (x= ESmatch$PRICE,
by= align.time (index (ESmatch), n= 30),
FUN= median))
# get the orderflows
ESmatch.2second$orderflow <- as.xts (aggregate (x= ESmatch.second$orderflow,
by= timestamp_2second,
FUN= sum))
ESmatch.5second$orderflow <- as.xts (aggregate (x= ESmatch.second$orderflow,
by= timestamp_5second,
FUN= sum))
ESmatch.10second$orderflow <- as.xts (aggregate (x= ESmatch.second$orderflow,
by= timestamp_10second,
FUN= sum))
ESmatch.20second$orderflow <- as.xts (aggregate (x= ESmatch.second$orderflow,
by= timestamp_20second,
FUN= sum))
ESmatch.30second$orderflow <- as.xts (aggregate (x= ESmatch.second$orderflow,
by= timestamp_30second,
FUN= sum))
# get the weighted orderflow
ESmatch.2second$weighted_orderflow <- as.xts (aggregate (x= ESmatch.second$weighted_orderflow,
by= timestamp_2second,
FUN= sum))
ESmatch.5second$weighted_orderflow <- as.xts (aggregate (x= ESmatch.second$weighted_orderflow,
by= timestamp_5second,
FUN= sum))
ESmatch.10second$weighted_orderflow <- as.xts (aggregate (x= ESmatch.second$weighted_orderflow,
by= timestamp_10second,
FUN= sum))
ESmatch.20second$weighted_orderflow <- as.xts (aggregate (x= ESmatch.second$weighted_orderflow,
by= timestamp_20second,
FUN= sum))
ESmatch.30second$weighted_orderflow <- as.xts (aggregate (x= ESmatch.second$weighted_orderflow,
by= timestamp_30second,
FUN= sum))
# get the volume
ESmatch.2second$volume <- as.xts (aggregate (x= ESmatch.second$volume,
by= timestamp_2second,
FUN= sum))
ESmatch.5second$volume <- as.xts (aggregate (x= ESmatch.second$volume,
by= timestamp_5second,
FUN= sum))
ESmatch.10second$volume <- as.xts (aggregate (x= ESmatch.second$volume,
by= timestamp_10second,
FUN= sum))
ESmatch.20second$volume <- as.xts (aggregate (x= ESmatch.second$volume,
by= timestamp_20second,
FUN= sum))
ESmatch.30second$volume <- as.xts(aggregate (x= ESmatch.second$volume,
by= timestamp_30second,
FUN= sum))
# get the returns
ESmatch.2second$returns <- log(ESmatch.2second$PRICE / lag(ESmatch.2second$PRICE))
ESmatch.2second$normalizedreturns <- scale (ESmatch.2second$returns)
ESmatch.2second$returnsmed <- log (ESmatch.2second$PRICEmed / lag(ESmatch.2second$PRICEmed))
ESmatch.2second$normalizedreturnsmed <- scale (ESmatch.2second$returnsmed)
ESmatch.5second$returns <- log(ESmatch.5second$PRICE / lag(ESmatch.5second$PRICE))
ESmatch.5second$normalizedreturns <- scale (ESmatch.5second$returns)
ESmatch.5second$returnsmed <- log (ESmatch.5second$PRICEmed / lag(ESmatch.5second$PRICEmed))
ESmatch.5second$normalizedreturnsmed <- scale (ESmatch.5second$returnsmed)
ESmatch.10second$returns <- log(ESmatch.10second$PRICE / lag(ESmatch.10second$PRICE))
ESmatch.10second$normalizedreturns <- scale (ESmatch.10second$returns)
ESmatch.10second$returnsmed <- log (ESmatch.10second$PRICEmed / lag(ESmatch.10second$PRICEmed))
ESmatch.10second$normalizedreturnsmed <- scale (ESmatch.10second$returnsmed)
ESmatch.20second$returns <- log(ESmatch.20second$PRICE / lag(ESmatch.20second$PRICE))
ESmatch.20second$normalizedreturns <- scale (ESmatch.20second$returns)
ESmatch.20second$returnsmed <- log (ESmatch.20second$PRICEmed / lag(ESmatch.20second$PRICEmed))
ESmatch.20second$normalizedreturnsmed <- scale (ESmatch.20second$returnsmed)
ESmatch.30second$returns <- log(ESmatch.30second$PRICE / lag(ESmatch.30second$PRICE))
ESmatch.30second$normalizedreturns <- scale (ESmatch.30second$returns)
ESmatch.30second$returnsmed <- log (ESmatch.30second$PRICEmed / lag(ESmatch.30second$PRICEmed))
ESmatch.30second$normalizedreturnsmed <- scale (ESmatch.30second$returnsmed)
# get the dummy variables for lunchtime and trading hours
# Lunchtime is between 12:00 and 12:59:59.999 each day
# Trading hour is between 9:00 and 16:00 each day
# Non-lunch and non-trading hour are low-liquidity
ESmatch.2second$lunchtime <- as.numeric((hour (index (ESmatch.2second)) == 12))
ESmatch.2second$tradinghour <- as.numeric((hour (index (ESmatch.2second)) >= 9)
& (hour (index (ESmatch.2second)) < 16)
& !(ESmatch.2second$lunchtime))
ESmatch.2second$hour <- as.factor(hour (index (ESmatch.2second)))
ESmatch.2second$wday <- as.factor(wday (index (ESmatch.2second)))
ESmatch.5second$lunchtime <- as.numeric((hour (index (ESmatch.5second)) == 12))
ESmatch.5second$tradinghour <- as.numeric((hour (index (ESmatch.5second)) >= 9)
& (hour (index (ESmatch.5second)) < 16)
& !(ESmatch.5second$lunchtime))
ESmatch.5second$hour <- as.factor(hour (index (ESmatch.5second)))
ESmatch.5second$wday <- as.factor(wday (index (ESmatch.5second)))
ESmatch.10second$lunchtime <- as.numeric((hour (index (ESmatch.10second)) == 12))
ESmatch.10second$tradinghour <- as.numeric((hour (index (ESmatch.10second)) >= 9)
& (hour (index (ESmatch.10second)) < 16)
& !(ESmatch.10second$lunchtime))
ESmatch.10second$hour <- as.factor(hour (index (ESmatch.10second)))
ESmatch.10second$wday <- as.factor(wday (index (ESmatch.10second)))
ESmatch.20second$lunchtime <- as.numeric((hour (index (ESmatch.20second)) == 12))
ESmatch.20second$tradinghour <- as.numeric((hour (index (ESmatch.20second)) >= 9)
& (hour (index (ESmatch.20second)) < 16)
& !(ESmatch.20second$lunchtime))
ESmatch.20second$hour <- as.factor(hour (index (ESmatch.20second)))
ESmatch.20second$wday <- as.factor(wday (index (ESmatch.20second)))
ESmatch.30second$lunchtime <- as.numeric(hour (index (ESmatch.30second)) == 12)
ESmatch.30second$tradinghour <- as.numeric((hour (index (ESmatch.30second)) >= 9)
& (hour (index (ESmatch.30second)) < 16)
& !(ESmatch.30second$lunchtime))
ESmatch.30second$hour <- as.factor(hour (index (ESmatch.30second)))
ESmatch.30second$wday <- as.factor(wday (index (ESmatch.30second)))
# estimate moving average
ESmatch.2second$movingaverage <- WMA (x= ESmatch.2second$PRICE, n= 20, wts=1:20)
ESmatch.2second$deltamovingaverage <- diff (ESmatch.2second$movingaverage)
ESmatch.2second$averageorderflow <- WMA (x= ESmatch.2second$orderflow, n= 10, wts=1:10)
ESmatch.5second$movingaverage <- WMA (x= ESmatch.5second$PRICE, n= 20, wts=1:20)
ESmatch.5second$deltamovingaverage <- diff (ESmatch.5second$movingaverage)
ESmatch.5second$averageorderflow <- WMA (x= ESmatch.5second$orderflow, n= 5, wts=1:5)
ESmatch.10second$movingaverage <- WMA (x= ESmatch.10second$PRICE, n= 20, wts=1:20)
ESmatch.10second$deltamovingaverage <- diff (ESmatch.10second$movingaverage)
ESmatch.10second$averageorderflow <- WMA (x= ESmatch.10second$orderflow, n= 5, wts=1:5)
ESmatch.20second$movingaverage <- WMA (x= ESmatch.20second$PRICE, n= 20, wts=1:20)
ESmatch.20second$deltamovingaverage <- diff (ESmatch.20second$movingaverage)
ESmatch.20second$averageorderflow <- WMA (x= ESmatch.20second$orderflow, n= 5, wts=1:5)
ESmatch.30second$movingaverage <- WMA (x= ESmatch.30second$PRICE, n= 20, wts=1:20)
ESmatch.30second$deltamovingaverage <- diff (ESmatch.30second$movingaverage)
# estimate volatility
ESmatch.2second$volatility <- volatility(OHLC=ESmatch.2second$PRICE, n=20, calc="close")
ESmatch.5second$volatility <- volatility(OHLC=ESmatch.5second$PRICE, n=20, calc="close")
ESmatch.10second$volatility <- volatility(OHLC=ESmatch.10second$PRICE, n=20, calc="close")
ESmatch.20second$volatility <- volatility(OHLC=ESmatch.20second$PRICE, n=20, calc="close")
ESmatch.30second$volatility <- volatility(OHLC=ESmatch.30second$PRICE, n=20, calc="close")
# Matching quotes and trades at the 5-MINUTES, 15-MINUTES and 30-MINUTES level --------
# Create a timestamp index that has 5-minute and 15-minute intervals, using align.time in the xts package:
timestamp_5minute <- align.time (index (ESmatch.second), n= 5*60)
timestamp_15minute <- align.time (index (ESmatch.second), n= 15*60)
timestamp_30minute <- align.time (index (ESmatch.second), n= 30*60)
ESmatch.5minute <- as.xts (aggregate (x= ESmatch.second[, 1:5],
by= timestamp_5minute,
FUN= mean))
ESmatch.15minute <- as.xts (aggregate (x= ESmatch.second[, 1:5],
by= timestamp_15minute,
FUN= mean))
ESmatch.30minute <- as.xts (aggregate (x= ESmatch.second[, 1:5],
by= timestamp_30minute,
FUN= mean))
# Create another vector of median price named medPRICE, data used from the original timseries
ESmatch.5minute$PRICEmed <- as.xts (
aggregate (x= ESmatch$PRICE,
by= align.time (index (ESmatch), n= 5*60),
FUN= median))
ESmatch.15minute$PRICEmed <- as.xts (
aggregate (x= ESmatch$PRICE,
by= align.time (index (ESmatch), n= 15*60),
FUN= median))
ESmatch.30minute$PRICEmed <- as.xts (
aggregate (x= ESmatch$PRICE,
by= align.time (index (ESmatch), n= 30*60),
FUN= median))
# get the orderflows
ESmatch.5minute$orderflow <- as.xts (aggregate (x= ESmatch.second$orderflow,
by= timestamp_5minute,
FUN= sum))
ESmatch.15minute$orderflow <- as.xts (aggregate (x= ESmatch.second$orderflow,
by= timestamp_15minute,
FUN= sum))
ESmatch.30minute$orderflow <- as.xts (aggregate (x= ESmatch.second$orderflow,
by= timestamp_30minute,
FUN= sum))
# get the weighted orderflow
ESmatch.5minute$weighted_orderflow <- as.xts (aggregate (x= ESmatch.second$weighted_orderflow,
by= timestamp_5minute,
FUN= sum))
ESmatch.15minute$weighted_orderflow <- as.xts (aggregate (x= ESmatch.second$weighted_orderflow,
by= timestamp_15minute,
FUN= sum))
ESmatch.30minute$weighted_orderflow <- as.xts (aggregate (x= ESmatch.second$weighted_orderflow,
by= timestamp_30minute,
FUN= sum))
# get the volume
ESmatch.5minute$volume <- as.xts (aggregate (x= ESmatch.second$volume,
by= timestamp_5minute,
FUN= sum))
ESmatch.15minute$volume <- as.xts(aggregate (x= ESmatch.second$volume,
by= timestamp_15minute,
FUN= sum))
ESmatch.30minute$volume <- as.xts(aggregate (x= ESmatch.second$volume,
by= timestamp_30minute,
FUN= sum))
# get the returns
ESmatch.5minute$returns <- log(ESmatch.5minute$PRICE / lag(ESmatch.5minute$PRICE))
ESmatch.5minute$normalizedreturns <- scale (ESmatch.5minute$returns)
ESmatch.5minute$returnsmed <- log (ESmatch.5minute$PRICEmed / lag(ESmatch.5minute$PRICEmed))
ESmatch.5minute$normalizedreturnsmed <- scale (ESmatch.5minute$returnsmed)
ESmatch.15minute$returns <- log(ESmatch.15minute$PRICE / lag(ESmatch.15minute$PRICE))
ESmatch.15minute$normalizedreturns <- scale (ESmatch.15minute$returns)
ESmatch.15minute$returnsmed <- log (ESmatch.15minute$PRICEmed / lag(ESmatch.15minute$PRICEmed))
ESmatch.15minute$normalizedreturnsmed <- scale (ESmatch.15minute$returnsmed)
ESmatch.30minute$returns <- log(ESmatch.30minute$PRICE / lag(ESmatch.30minute$PRICE))
ESmatch.30minute$normalizedreturns <- scale (ESmatch.30minute$returns)
ESmatch.30minute$returnsmed <- log (ESmatch.30minute$PRICEmed / lag(ESmatch.30minute$PRICEmed))
ESmatch.30minute$normalizedreturnsmed <- scale (ESmatch.30minute$returnsmed)
# get the dummy variables for lunchtime and trading hours
# Lunchtime is between 12:00 and 12:59:59.999 each day
# Trading hour is between 9:00 and 16:00 each day
# Non-lunch and non-trading hour are low-liquidity
ESmatch.5minute$lunchtime <- as.numeric (hour (index (ESmatch.5minute)) == 12)
ESmatch.5minute$tradinghour <- as.numeric ((hour (index (ESmatch.5minute)) >= 9)
& (hour (index (ESmatch.5minute)) < 16)
& !(ESmatch.5minute$lunchtime))
ESmatch.5minute$hour <- as.factor(hour (index (ESmatch.5minute)))
ESmatch.5minute$wday <- as.factor(wday (index (ESmatch.5minute)))
ESmatch.15minute$lunchtime <- as.numeric (hour (index (ESmatch.15minute)) == 12)
ESmatch.15minute$tradinghour <- as.numeric ((hour (index (ESmatch.15minute)) >= 9)
& (hour (index (ESmatch.15minute)) < 16)
& !(ESmatch.15minute$lunchtime))
ESmatch.15minute$hour <- as.factor(hour (index (ESmatch.15minute)))
ESmatch.15minute$wday <- as.factor(wday (index (ESmatch.15minute)))
ESmatch.30minute$lunchtime <- as.numeric (hour (index (ESmatch.30minute)) == 12)
ESmatch.30minute$tradinghour <- as.numeric ((hour (index (ESmatch.30minute)) >= 9)
& (hour (index (ESmatch.30minute)) < 16)
& !(ESmatch.30minute$lunchtime))
ESmatch.30minute$hour <- as.factor(hour (index (ESmatch.30minute)))
ESmatch.30minute$wday <- as.factor(wday (index (ESmatch.30minute)))
# estimate moving average
ESmatch.5minute$movingaverage <- WMA (x= ESmatch.5minute$PRICE, n= 6, wts=1:6)
ESmatch.5minute$deltamovingaverage <- diff (ESmatch.5minute$movingaverage)
ESmatch.15minute$movingaverage <- WMA (x= ESmatch.15minute$PRICE, n= 4, wts=1:4)
ESmatch.15minute$deltamovingaverage <- diff (ESmatch.15minute$movingaverage)
ESmatch.30minute$movingaverage <- WMA (x= ESmatch.30minute$PRICE, n= 2, wts=1:2)
ESmatch.30minute$deltamovingaverage <- diff (ESmatch.30minute$movingaverage)
# estimate volatility
ESmatch.5minute$volatility <- volatility(OHLC=ESmatch.5minute$PRICE, n=6, calc="close")
ESmatch.15minute$volatility <- volatility(OHLC=ESmatch.15minute$PRICE, n=4, calc="close")
# Fitting linear models! --------------------------------------------------
dim (ESmatch.5second)
dim (ESmatch.quartersecond)
model.quartersecond.full <- dynlm (returns ~ orderflow
+ L (normalizedreturns, 1)
+ volume + factor(hour) + factor(wday)
+ volatility
+ L (deltamovingaverage, 1)
, data= ESmatch.quartersecond)
model.halfsecond.full <- dynlm (normalizedreturns ~ orderflow
+ volume + factor(hour) + factor(wday)
+ volatility
+ L (deltamovingaverage, 1)
, data= ESmatch.halfsecond)
model.second <- dynlm (normalizedreturns ~ L(orderflow, -1) + L(weighted_orderflow, -1)
+ L(BID, -1) + L(OFR, -1) + L (BIDSIZ, -1) + L (OFRSIZ, -1)
+ L(BID, -2) + L(OFR, -2) + L (BIDSIZ, -2) + L (OFRSIZ, -2)
+ L(BID, -3) + L(OFR, -3) + L (BIDSIZ, -3) + L (OFRSIZ, -3)
+ L (volume, -1) + L (volume, -2)
+ tradinghour + lunchtime
+ L (weighted_orderflow, -1) + L (weighted_orderflow, -2)
+ L (weighted_orderflow, -3) + L (weighted_orderflow, -4)
+ L (normalizedreturns, -1)
+ L (normalizedreturns, -2) + L (normalizedreturns, -3)
+ L (normalizedreturns, -4) + L (normalizedreturns, -5)
+ L (normalizedreturns, -6) + L (normalizedreturns, -7)
+ L (volatility, -1) + L (deltamovingaverage, -1)
, data= ESmatch.second)
model.2second <- dynlm (normalizedreturns ~ L(orderflow, -1) + L(weighted_orderflow, -1)
+ L(BID, -1) + L(OFR, -1) + L (BIDSIZ, -1) + L (OFRSIZ, -1)
+ L(BID, -2) + L(OFR, -2) + L (BIDSIZ, -2) + L (OFRSIZ, -2)
+ L(BID, -3) + L(OFR, -3) + L (BIDSIZ, -3) + L (OFRSIZ, -3)
+ L (volume, -1) + L (volume, -2)
+ tradinghour + lunchtime
+ L (weighted_orderflow, -1) + L (weighted_orderflow, -2)
+ L (weighted_orderflow, -3) + L (weighted_orderflow, -4)
+ L (normalizedreturns, -1)
+ L (normalizedreturns, -2) + L (normalizedreturns, -3)
+ L (normalizedreturns, -4) + L (normalizedreturns, -5)
+ L (normalizedreturns, -6) + L (normalizedreturns, -7)
+ L (volatility, -1) + L (deltamovingaverage, -1)
, data= ESmatch.2second)
model.5second <- dynlm (normalizedreturns ~ L(orderflow, -1) + L(weighted_orderflow, -1)
+ L(BID, -1) + L(OFR, -1) + L (BIDSIZ, -1) + L (OFRSIZ, -1)
+ L(BID, -2) + L(OFR, -2) + L (BIDSIZ, -2) + L (OFRSIZ, -2)
+ L(BID, -3) + L(OFR, -3) + L (BIDSIZ, -3) + L (OFRSIZ, -3)
+ L (volume, -1) + L (volume, -2)
+ tradinghour + lunchtime
+ L (weighted_orderflow, -1) + L (weighted_orderflow, -2)
+ L (weighted_orderflow, -3) + L (weighted_orderflow, -4)
+ L (normalizedreturns, -1)
+ L (normalizedreturns, -2) + L (normalizedreturns, -3)
+ L (normalizedreturns, -4) + L (normalizedreturns, -5)
+ L (normalizedreturns, -6) + L (normalizedreturns, -7)
+ L (volatility, -1) + L (deltamovingaverage, -1)
, data= ESmatch.5second)
model.10second <- dynlm (normalizedreturns ~ L(orderflow, -1) + L(weighted_orderflow, -1)
+ L(BID, -1) + L(OFR, -1) + L (BIDSIZ, -1) + L (OFRSIZ, -1)
+ L(BID, -2) + L(OFR, -2) + L (BIDSIZ, -2) + L (OFRSIZ, -2)
+ L(BID, -3) + L(OFR, -3) + L (BIDSIZ, -3) + L (OFRSIZ, -3)
+ L (volume, -1) + L (volume, -2)
+ tradinghour + lunchtime
+ L (weighted_orderflow, -1) + L (weighted_orderflow, -2)
+ L (weighted_orderflow, -3) + L (weighted_orderflow, -4)
+ L (normalizedreturns, -1)
+ L (normalizedreturns, -2) + L (normalizedreturns, -3)
+ L (normalizedreturns, -4) + L (normalizedreturns, -5)
+ L (normalizedreturns, -6) + L (normalizedreturns, -7)
+ L (volatility, -1) + L (deltamovingaverage, -1)
, data= ESmatch.10second)
model.20second <- dynlm (normalizedreturns ~ L(orderflow, -1) + L(weighted_orderflow, -1)
+ L(BID, -1) + L(OFR, -1) + L (BIDSIZ, -1) + L (OFRSIZ, -1)
+ L(BID, -2) + L(OFR, -2) + L (BIDSIZ, -2) + L (OFRSIZ, -2)
+ L(BID, -3) + L(OFR, -3) + L (BIDSIZ, -3) + L (OFRSIZ, -3)
+ L (volume, -1) + L (volume, -2)
+ tradinghour + lunchtime
+ L (weighted_orderflow, -1) + L (weighted_orderflow, -2)
+ L (weighted_orderflow, -3) + L (weighted_orderflow, -4)
+ L (normalizedreturns, -1)
+ L (normalizedreturns, -2) + L (normalizedreturns, -3)
+ L (normalizedreturns, -4) + L (normalizedreturns, -5)
+ L (normalizedreturns, -6) + L (normalizedreturns, -7)
+ L (volatility, -1) + L (deltamovingaverage, -1)
, data= ESmatch.20second)
model.30second <- dynlm (normalizedreturns ~ L(orderflow, -1) + L(weighted_orderflow, -1)
+ L(BID, -1) + L(OFR, -1) + L (BIDSIZ, -1) + L (OFRSIZ, -1)
+ L(BID, -2) + L(OFR, -2) + L (BIDSIZ, -2) + L (OFRSIZ, -2)
+ L(BID, -3) + L(OFR, -3) + L (BIDSIZ, -3) + L (OFRSIZ, -3)
+ L (volume, -1) + L (volume, -2)
+ tradinghour + lunchtime
+ L (weighted_orderflow, -1) + L (weighted_orderflow, -2)
+ L (weighted_orderflow, -3) + L (weighted_orderflow, -4)
+ L (weighted_orderflow, -5) + L (weighted_orderflow, -6)
+ L (normalizedreturns, -1)
+ L (normalizedreturns, -2) + L (normalizedreturns, -3)
+ L (normalizedreturns, -4) + L (normalizedreturns, -5)
+ L (normalizedreturns, -6)
+ L (volatility, -1) + L (deltamovingaverage, -1)
, data= ESmatch.30second)
model.minute <- dynlm (normalizedreturnsmed ~ L(orderflow, -1) + L(weighted_orderflow, -1)
+ L(BID, -1) + L(OFR, -1) + L (BIDSIZ, -1) + L (OFRSIZ, -1)
+ L(BID, -2) + L(OFR, -2) + L (BIDSIZ, -2) + L (OFRSIZ, -2)
+ L(BID, -3) + L(OFR, -3) + L (BIDSIZ, -3) + L (OFRSIZ, -3)
+ L (volume, -1) + L (volume, -2)
+ tradinghour + lunchtime
+ L (weighted_orderflow, -1) + L (weighted_orderflow, -2)
+ L (weighted_orderflow, -3) + L (weighted_orderflow, -4)
+ L (normalizedreturnsmed, -1)
+ L (normalizedreturnsmed, -2) + L (normalizedreturnsmed, -3)
+ L (normalizedreturnsmed, -4) + L (normalizedreturnsmed, -5)
+ L (volatility, -1) + L (deltamovingaverage, -1)
, data= ESmatch.minute)
model.5minute <- dynlm (normalizedreturnsmed ~ L(orderflow, -1) + L(weighted_orderflow, -1)
+ L(BID, -1) + L(OFR, -1) + L (BIDSIZ, -1) + L (OFRSIZ, -1)
+ L(BID, -2) + L(OFR, -2) + L (BIDSIZ, -2) + L (OFRSIZ, -2)
+ L(BID, -3) + L(OFR, -3) + L (BIDSIZ, -3) + L (OFRSIZ, -3)
+ L (volume, -1) + L (volume, -2)
+ tradinghour + lunchtime
+ L (weighted_orderflow, -1) + L (weighted_orderflow, -2)
+ L (weighted_orderflow, -3) + L (weighted_orderflow, -4)
+ L (normalizedreturnsmed, -1)
+ L (normalizedreturnsmed, -2) + L (normalizedreturnsmed, -3)
+ L (normalizedreturnsmed, -4) + L (normalizedreturnsmed, -5)
+ L (volatility, -1) + L (deltamovingaverage, -1)
, data= ESmatch.5minute)
model.15minute <- dynlm (normalizedreturnsmed ~ L(orderflow, -1) + L(weighted_orderflow, -1)
+ L(BID, -1) + L(OFR, -1) + L (BIDSIZ, -1) + L (OFRSIZ, -1)
+ L(BID, -2) + L(OFR, -2) + L (BIDSIZ, -2) + L (OFRSIZ, -2)
+ L(BID, -3) + L(OFR, -3) + L (BIDSIZ, -3) + L (OFRSIZ, -3)
+ L (volume, -1) + L (volume, -2)
+ tradinghour + lunchtime
+ L (weighted_orderflow, -1) + L (weighted_orderflow, -2)
+ L (weighted_orderflow, -3)
+ L (normalizedreturnsmed, -1)
+ L (normalizedreturnsmed, -2) + L (normalizedreturnsmed, -3)
+ L (volatility, -1) + L (deltamovingaverage, -1)
, data= ESmatch.15minute)
model.30minute <- dynlm (normalizedreturnsmed ~ L(orderflow, -1) + L(weighted_orderflow, -1)
+ L(BID, -1) + L(OFR, -1) + L (BIDSIZ, -1) + L (OFRSIZ, -1)
+ L(BID, -2) + L(OFR, -2) + L (BIDSIZ, -2) + L (OFRSIZ, -2)
+ L(BID, -3) + L(OFR, -3) + L (BIDSIZ, -3) + L (OFRSIZ, -3)
+ L (volume, -1) + L (volume, -2)
+ tradinghour + lunchtime
+ L (weighted_orderflow, -1) + L (weighted_orderflow, -2)
+ L (weighted_orderflow, -3)
+ L (normalizedreturnsmed, -1)
+ L (normalizedreturnsmed, -2) + L (normalizedreturnsmed, -3)
+ L (deltamovingaverage, -1)
, data= ESmatch.30minute)
summary(model.quartersecond.full)
summary(model.halfsecond.full)
summary(model.second.full)
summary(model.2second.full)
summary(model.5second.full)
summary(model.10second.full)
summary(model.20second.full)
summary(model.30second.full)
summary(model.minute)
summary(model.5minute)
summary(model.15minute)
summary(model.30minute)
# Simple, contemporary models, no lags
model.quartersecond.nolag <- lm(normalizedreturns ~ orderflow, data= ESmatch.quartersecond)
model.halfsecond.nolag <- lm(normalizedreturns ~ orderflow, data= ESmatch.halfsecond)
model.second.nolag <- lm(normalizedreturns ~ orderflow, data= ESmatch.second)
model.2second.nolag <- lm(normalizedreturns ~ orderflow, data= ESmatch.2second)
model.5second.nolag <- lm(normalizedreturns ~ orderflow, data= ESmatch.5second)
model.10second.nolag <- lm(normalizedreturns ~ orderflow, data= ESmatch.10second)
model.minute.nolag <- lm(normalizedreturns ~ orderflow, data= ESmatch.minute)
model.halfsecond.lag <- dynlm(normalizedreturns ~ L(orderflow, -1), data= ESmatch.halfsecond)
model.2second.lag <- dynlm(normalizedreturns ~ L(orderflow, -1), data= ESmatch.2second)
model.second.lag <- dynlm(normalizedreturns ~ L(orderflow, -1), data= ESmatch.second)
model.5second.lag <- dynlm(normalizedreturns ~ L(orderflow, -1), data= ESmatch.5second)
model.10second.lag <- dynlm(normalizedreturns ~ L(orderflow, -1), data= ESmatch.10second)
model.20second.lag <- dynlm(normalizedreturns ~ L(orderflow, -1), data= ESmatch.20second)
model.halfsecond.lag1 <- dynlm(normalizedreturns ~ L(weighted_orderflow, -1), data= ESmatch.halfsecond)
model.second.lag1 <- dynlm(normalizedreturns ~ L(averageorderflow, -1), data= ESmatch.second)
model.2second.lag1 <- dynlm(normalizedreturns ~ L(averageorderflow, -1), data= ESmatch.2second)
model.5second.lag1 <- dynlm(normalizedreturns ~ L(averageorderflow, -1), data= ESmatch.5second)
model.10second.lag1 <- dynlm(normalizedreturns ~ L(averageorderflow, -1), data= ESmatch.10second)
model.20second.lag1 <- dynlm(normalizedreturns ~ L(averageorderflow, -1), data= ESmatch.20second)
model.5minute.lag1 <- dynlm(normalizedreturns ~ L(averageorderflow, -1), data= ESmatch.5minute)
summary(model.quartersecond.nolag)
summary(model.halfsecond.nolag)
summary(model.second.nolag)
summary(model.2second.nolag)
summary(model.5second.nolag)
summary(model.10second.nolag)
summary(model.minute.nolag)
summary(model.halfsecond.lag)
summary(model.second.lag)
summary(model.2second.lag)
summary(model.5second.lag)
summary(model.10second.lag)
summary(model.20second.lag)
summary(model.5minute.lag)
summary(model.halfsecond.lag1)
summary(model.second.lag1)
summary(model.2second.lag1)
summary(model.5second.lag1)
summary(model.10second.lag1)
summary(model.20second.lag1)
summary(model.5minute.lag1)
# can we predict orderflow with lag returns and lags flows, then use it to predict price?
model.futureflow.2second <- dynlm(orderflow ~ L(averageorderflow, -1) + L(movingaverage, -1)
+ L (normalizedreturns, -1) + L (normalizedreturns, -2)
+ L (orderflow, -1) + L (orderflow, -2)
+ L (volatility, -1) + L (deltamovingaverage, -1)
+ L(weighted_orderflow, -1)
+ L(BID, -1) + L(OFR, -1) + L (BIDSIZ, -1) + L (OFRSIZ, -1)
,data= ESmatch.2second )
rm(list=ls())
# Test liquidity during different time of the day:
ESliquidity <- aggregate (x= ESmatch.second$volume,
by= fastPOSIXct(trunc(index(ESmatch.second), units= "hours"),tz= "Chicago"),
FUN= sum)
plot(liquidity)
rm(direction)
|
/archive/R_learning/timeSeries/regression_ES_tds_qts.R
|
no_license
|
nguyentu1602/R
|
R
| false
| false
| 45,718
|
r
|
## Place: ECN395 Research - timesSeries project
## Purpose: 1) Match quotes and trades in the aggregate ES_qtsagg and ES_tdsagg objects
## 2) Get the orderflows using the highfrequency package. Experiment with different correction
## 3) Regress the
## Produce: xts objects to be ready to work with highfrequency package. .R data files of ES trades & quotes
## Learned: cat("\014") to clear the console; head (vector, -1) to get rid of the last column
cat("\014")
# These two lines are to get the data from my MAC and sould be ignore
# save(list=c("ESmatch", "ESmatch.minute", "ESmatch.second", "ESqts", "EStds", "orderflow"), file="ESeverything.R")
# load("/Users/cuongnguyen/Dropbox/class/01.s14/ECN395/ESeverything.R")
# install.packages('highfrequency')
# install.packages('biglm')
# install.packages('dynlm')
# install.packages('RcppArmadillo')
# install.packages('lubridate')
# install.packages('rugarch')
# install.packages('forecast')
# install.packages('TTR')
<<<<<<< HEAD
load("~/Desktop/ecn395data/ESeverything.R",)
=======
>>>>>>> 1fcf8d375b3a14267e59f133753ff614e27bfc76
load("~/Desktop/ecn395data/ES_qts_aggregate.R")
load("~/Desktop/ecn395data/ES_qts_xts_full.R")
#load("~/Desktop/ecn395data/ES_Quotes_raw.R")
load("~/Desktop/ecn395data/ES_tds_aggregate.R")
load("~/Desktop/ecn395data/ES_tds_xts_full.R")
#load("~/Desktop/ecn395data/ES_Trades_raw.R")
require(fasttime)
require(zoo); require(xts); require(highfrequency)
require(quantmod)
require(dynlm)
require(RcppArmadillo)
require (lubridate)
require (rugarch)
require (forecast)
require (TTR)
rm(list=ls(pattern=("model.")))
# Set the Sys.time variable to have 6 digits after seconds
options("digits.secs"=6)
Sys.time()
# Rename the object, we make use of lazy evaluation here
# ESqts <- qtsagg # This is the aggregated ES qts xts
# ESqts.full <- qts # This is full ES qts xts
# EStds <- tdsagg
# EStds.full <- tds
# Matching the quotes and trades at miliseconds level ---------------------
# Now match the ES trades and the quotes data, no adjustment
ESmatch <- matchTradesQuotes (EStds,ESqts, adjustment=0.000)
ESmatch <- merge (ESmatch, EStds$tds_volume)
# Get the trade directions of the data using Lee and Ready algo
# 1 is buy and -1 is sell
direction <- getTradeDirection (ESmatch)
orderflow <- direction * ESmatch$tds_volume
ESmatch <- merge (ESmatch, direction, orderflow)
# Trick to eliminate a column in an object very quickly
# ESmatch$orderflow = NULL
# Matching quotes and trades at the 0.5 and 1 SECONDS level -------------------------
# data to store in ESmatch.second
# Create a vector of average price per second. So price at 17:00:00 means
# the average price from 17:00:00.000 to 17:00:00.999
timestamp_halfsecond <- align.time(index(ESmatch), n= 0.5)
timestamp_quartersecond <- align.time(index(ESmatch), n= 0.25)
ESmatch.quartersecond <- aggregate (x= ESmatch$PRICE,
by= timestamp_quartersecond,
FUN= mean)
ESmatch.halfsecond <- aggregate (x= ESmatch$PRICE,
by= timestamp_halfsecond,
FUN= mean)
ESmatch.second <- aggregate (x= ESmatch$PRICE,
by= fastPOSIXct(trunc(index(ESmatch), units= "secs"),tz= "Chicago"),
FUN= mean)
# Change the vector to xts object and name the first column
ESmatch.quartersecond <- as.xts(x= ESmatch.quartersecond)
dimnames(ESmatch.quartersecond) <- list(list(), c("PRICE"))
ESmatch.halfsecond <- as.xts(x= ESmatch.halfsecond)
dimnames(ESmatch.halfsecond) <- list(list(), c("PRICE"))
ESmatch.second <- as.xts(x= ESmatch.second)
dimnames(ESmatch.second) <- list(list(), c("PRICE"))
getwd()
#Now create several vectors of BID ASk etc
tempquart <- as.xts (aggregate (x= ESmatch[,2:5],
by= timestamp_quartersecond,
FUN= median))
temphalf <- as.xts (aggregate (x= ESmatch[,2:5],
by= timestamp_halfsecond,
FUN= median))
temp <- as.xts (aggregate (x= ESmatch[,2:5],
by= fastPOSIXct(trunc(index(ESmatch), units= "secs"),tz= "Chicago"),
FUN= median))
# merge. After this ESmatch.second has 5 columns
ESmatch.quartersecond <- merge(ESmatch.quartersecond, tempquart)
ESmatch.quartersecond$direction <- getTradeDirection (ESmatch.quartersecond)
ESmatch.halfsecond <- merge(ESmatch.halfsecond, temphalf)
ESmatch.halfsecond$direction <- getTradeDirection (ESmatch.halfsecond)
ESmatch.second <- merge(ESmatch.second, temp)
ESmatch.second$direction <- getTradeDirection (ESmatch.second)
# get the order flow by summing up directions from higher frequency:
ESmatch.quartersecond$orderflow <- as.xts (
aggregate (x= ESmatch$direction,
by= timestamp_quartersecond,
FUN= sum))
ESmatch.halfsecond$orderflow <- as.xts (
aggregate (x= ESmatch$direction,
by= timestamp_halfsecond,
FUN= sum))
ESmatch.second$orderflow <- as.xts (
aggregate (x= ESmatch$direction,
by= fastPOSIXct (trunc (index (ESmatch), units= "secs"), tz= "Chicago"),
FUN= sum))
# get weighted_orderflow by multiplying direction with tds_volume before summing up:
ESmatch.quartersecond$weighted_orderflow <- as.xts (
aggregate (x= (ESmatch$direction * ESmatch$tds_volume),
by= timestamp_quartersecond,
FUN= sum))
ESmatch.halfsecond$weighted_orderflow <- as.xts (
aggregate (x= (ESmatch$direction * ESmatch$tds_volume),
by= timestamp_halfsecond,
FUN= sum))
ESmatch.second$weighted_orderflow <- as.xts (
aggregate (x= (ESmatch$direction * ESmatch$tds_volume),
by= fastPOSIXct (trunc (index (ESmatch), units= "secs"), tz= "Chicago"),
FUN= sum))
# get trade volume by summing up tds_volume
ESmatch.quartersecond$volume <- as.xts (
aggregate (x= ESmatch$tds_volume,
by= timestamp_quartersecond,
FUN= sum))
ESmatch.halfsecond$volume <- as.xts (
aggregate (x= ESmatch$tds_volume,
by= timestamp_halfsecond,
FUN= sum))
ESmatch.second$volume <- as.xts (
aggregate (x= ESmatch$tds_volume,
by= fastPOSIXct (trunc (index (ESmatch), units= "secs"), tz= "Chicago"),
FUN= sum))
# get the returns and standardize it
ESmatch.quartersecond$returns <- log (ESmatch.quartersecond$PRICE / lag(ESmatch.quartersecond$PRICE))
ESmatch.quartersecond$normalizedreturns <- scale (x= ESmatch.quartersecond$returns)
ESmatch.second$returns <- log (ESmatch.second$PRICE / lag(ESmatch.second$PRICE))
ESmatch.second$normalizedreturns <- scale (x= ESmatch.second$returns)
ESmatch.halfsecond$returns <- log (ESmatch.halfsecond$PRICE / lag(ESmatch.halfsecond$PRICE))
ESmatch.halfsecond$normalizedreturns <- scale (x= ESmatch.halfsecond$returns)
# get the dummy variables for lunchtime and trading hours
# Lunchtime is between 12:00 and 12:59:59.999 each day
# Trading hour is between 9:00 and 16:00 each day
# Non-lunch and non-trading hour are low-liquidity
ESmatch.quartersecond$lunchtime <- as.numeric((hour (index (ESmatch.quartersecond)) == 12))
ESmatch.quartersecond$tradinghour <- as.numeric(((hour (index (ESmatch.quartersecond)) >= 9)
& (hour (index (ESmatch.quartersecond)) < 16)
& !(ESmatch.quartersecond$lunchtime)))
ESmatch.quartersecond$hour <- as.factor(hour (index (ESmatch.quartersecond)))
ESmatch.quartersecond$wday <- as.factor(wday (index (ESmatch.quartersecond)))
ESmatch.halfsecond$lunchtime <- as.numeric((hour (index (ESmatch.halfsecond)) == 12))
ESmatch.halfsecond$tradinghour <- as.numeric(((hour (index (ESmatch.halfsecond)) >= 9)
& (hour (index (ESmatch.halfsecond)) < 16)
& !(ESmatch.halfsecond$lunchtime)))
ESmatch.halfsecond$hour <- as.factor(hour (index (ESmatch.halfsecond)))
ESmatch.halfsecond$wday <- as.factor(wday (index (ESmatch.halfsecond)))
ESmatch.second$lunchtime <- as.numeric((hour (index (ESmatch.second)) == 12))
ESmatch.second$tradinghour <- as.numeric(((hour (index (ESmatch.second)) >= 9)
& (hour (index (ESmatch.second)) < 16)
& !(ESmatch.second$lunchtime)))
ESmatch.second$hour <- as.factor(hour (index (ESmatch.second)))
ESmatch.second$wday <- as.factor(wday (index (ESmatch.second)))
length(ESmatch.quartersecond) / length(ESmatch.second$PRICE)
# estimate moving average of price and orderflow
ESmatch.quartersecond$movingaverage <- WMA (x= ESmatch.quartersecond$PRICE, n= 20, wts=1:20)
ESmatch.quartersecond$deltamovingaverage <- diff (ESmatch.quartersecond$movingaverage)
ESmatch.halfsecond$movingaverage <- WMA (x= ESmatch.halfsecond$PRICE, n= 20, wts=1:20)
ESmatch.halfsecond$deltamovingaverage <- diff (ESmatch.halfsecond$movingaverage)
ESmatch.second$movingaverage <- WMA (x= ESmatch.second$PRICE, n= 20, wts=1:20)
ESmatch.second$deltamovingaverage <- diff (ESmatch.second$movingaverage)
ESmatch.second$averageorderflow <- WMA (x= ESmatch.second$orderflow, n= 10, wts=1:10)
# estimate volatility
ESmatch.quartersecond$volatility <- volatility(OHLC=ESmatch.quartersecond$PRICE, n=20, calc="close")
ESmatch.halfsecond$volatility <- volatility(OHLC=ESmatch.halfsecond$PRICE, n=20, calc="close")
ESmatch.second$volatility <- volatility(OHLC=ESmatch.second$PRICE, n=20, calc="close")
# Matching quotes and trades at the MINUTES level -------------------------
# data to store in ESmatch.minute
# Create a vector of average price per minute. So price at 17:00:00 means
# the average price from 17:00:00.000 to 17:00:59.999 - 1 min gap
ESmatch.minute <- aggregate (x= ESmatch.second$PRICE,
by= fastPOSIXct(trunc(index(ESmatch.second), units= "mins"),tz= "Chicago"),
FUN= mean)
# Change the vector to xts object and name the first column
ESmatch.minute <- as.xts(x= ESmatch.minute)
dimnames(ESmatch.minute) <- list(list(), c("PRICE"))
# Create another vector of median price named medPRICE, data used from the original timseries
ESmatch.minute$PRICEmed <- as.xts (
aggregate (x= ESmatch$PRICE,
by= fastPOSIXct (trunc (index (ESmatch), units= "mins"), tz= "Chicago"),
FUN= median))
# Now create several vectors of BID ASk etc
temp <- as.xts (aggregate (x= ESmatch.second[,2:5],
by= fastPOSIXct(trunc(index(ESmatch.second), units= "mins"),tz= "Chicago"),
FUN= median))
# merge. After this ESmatch.second has 5 columns
ESmatch.minute <- merge(ESmatch.minute, temp)
# get orderlow
ESmatch.minute$orderflow <- as.xts (
aggregate (x= ESmatch.second$orderflow,
by= fastPOSIXct (trunc (index (ESmatch.second), units= "mins"), tz= "Chicago"),
FUN= sum))
# get weighted_orderflow by multiplying direction with tds_volume before summing up:
ESmatch.minute$weighted_orderflow <- as.xts (
aggregate (x= ESmatch.second$weighted_orderflow,
by= fastPOSIXct (trunc (index (ESmatch.second), units= "mins"), tz= "Chicago"),
FUN= sum))
# get trade volume
ESmatch.minute$volume <- as.xts (
aggregate (x= ESmatch.second$volume,
by= fastPOSIXct (trunc (index (ESmatch.second), units= "mins"), tz= "Chicago"),
FUN= sum))
# get the returns - both mean and median - and standardized it
ESmatch.minute$returns <- log (ESmatch.minute$PRICE / lag(ESmatch.minute$PRICE))
ESmatch.minute$normalizedreturns <- scale (ESmatch.minute$returns)
ESmatch.minute$returnsmed <- log (ESmatch.minute$PRICEmed / lag(ESmatch.minute$PRICEmed))
ESmatch.minute$normalizedreturnsmed <- scale (ESmatch.minute$returnsmed)
# get the dummy variables for lunchtime and trading hours
# Lunchtime is between 12:00 and 12:59:59.999 each day
# Trading hour is between 9:00 and 16:00 each day
# Non-lunch and non-trading hour are low-liquidity
ESmatch.minute$lunchtime <- (hour (index (ESmatch.minute)) == 12)
ESmatch.minute$tradinghour <- ((hour (index (ESmatch.minute)) >= 9)
& (hour (index (ESmatch.minute)) < 16)
& !(ESmatch.minute$lunchtime))
ESmatch.minute$hour <- as.factor(hour (index (ESmatch.minute)))
ESmatch.minute$wday <- as.factor(wday (index (ESmatch.minute)))
# estimate moving average
ESmatch.minute$movingaverage <- WMA (x= ESmatch.minute$PRICE, n= 20, wts=1:20)
ESmatch.minute$deltamovingaverage <- diff (ESmatch.minute$movingaverage)
# estimate volatility
ESmatch.minute$volatility <- volatility(OHLC=ESmatch.minute$PRICE, n=20, calc="close")
# Matching quotes and trades at the 2, 5, 10, 20 and 30-seconds level ---------------------
# Create a timestamp index that has 5-second intervals, using align.time in the xts package:
timestamp_2second <- align.time (index (ESmatch.second), n= 2)
timestamp_5second <- align.time (index (ESmatch.second), n= 5)
timestamp_10second <- align.time (index (ESmatch.second), n= 10)
timestamp_20second <- align.time (index (ESmatch.second), n= 20)
timestamp_30second <- align.time (index (ESmatch.second), n= 30)
ESmatch.2second <- as.xts (aggregate (x= ESmatch.second[, 1:5],
by= timestamp_2second,
FUN= mean))
ESmatch.5second <- as.xts (aggregate (x= ESmatch.second[, 1:5],
by= timestamp_5second,
FUN= mean))
ESmatch.10second <- as.xts (aggregate (x= ESmatch.second[, 1:5],
by= timestamp_10second,
FUN= mean))
ESmatch.20second <- as.xts (aggregate (x= ESmatch.second[, 1:5],
by= timestamp_20second,
FUN= mean))
ESmatch.30second <- as.xts (aggregate (x= ESmatch.second[, 1:5],
by= timestamp_30second,
FUN= mean))
# Create another vector of median price named medPRICE, data used from the original timseries
ESmatch.2second$PRICEmed <- as.xts (
aggregate (x= ESmatch$PRICE,
by= align.time (index (ESmatch), n= 2),
FUN= median))
ESmatch.5second$PRICEmed <- as.xts (
aggregate (x= ESmatch$PRICE,
by= align.time (index (ESmatch), n= 5),
FUN= median))
ESmatch.10second$PRICEmed <- as.xts (
aggregate (x= ESmatch$PRICE,
by= align.time (index (ESmatch), n= 10),
FUN= median))
ESmatch.20second$PRICEmed <- as.xts (
aggregate (x= ESmatch$PRICE,
by= align.time (index (ESmatch), n= 20),
FUN= median))
ESmatch.30second$PRICEmed <- as.xts (
aggregate (x= ESmatch$PRICE,
by= align.time (index (ESmatch), n= 30),
FUN= median))
# get the orderflows
ESmatch.2second$orderflow <- as.xts (aggregate (x= ESmatch.second$orderflow,
by= timestamp_2second,
FUN= sum))
ESmatch.5second$orderflow <- as.xts (aggregate (x= ESmatch.second$orderflow,
by= timestamp_5second,
FUN= sum))
ESmatch.10second$orderflow <- as.xts (aggregate (x= ESmatch.second$orderflow,
by= timestamp_10second,
FUN= sum))
ESmatch.20second$orderflow <- as.xts (aggregate (x= ESmatch.second$orderflow,
by= timestamp_20second,
FUN= sum))
ESmatch.30second$orderflow <- as.xts (aggregate (x= ESmatch.second$orderflow,
by= timestamp_30second,
FUN= sum))
# get the weighted orderflow
ESmatch.2second$weighted_orderflow <- as.xts (aggregate (x= ESmatch.second$weighted_orderflow,
by= timestamp_2second,
FUN= sum))
ESmatch.5second$weighted_orderflow <- as.xts (aggregate (x= ESmatch.second$weighted_orderflow,
by= timestamp_5second,
FUN= sum))
ESmatch.10second$weighted_orderflow <- as.xts (aggregate (x= ESmatch.second$weighted_orderflow,
by= timestamp_10second,
FUN= sum))
ESmatch.20second$weighted_orderflow <- as.xts (aggregate (x= ESmatch.second$weighted_orderflow,
by= timestamp_20second,
FUN= sum))
ESmatch.30second$weighted_orderflow <- as.xts (aggregate (x= ESmatch.second$weighted_orderflow,
by= timestamp_30second,
FUN= sum))
# get the volume
ESmatch.2second$volume <- as.xts (aggregate (x= ESmatch.second$volume,
by= timestamp_2second,
FUN= sum))
ESmatch.5second$volume <- as.xts (aggregate (x= ESmatch.second$volume,
by= timestamp_5second,
FUN= sum))
ESmatch.10second$volume <- as.xts (aggregate (x= ESmatch.second$volume,
by= timestamp_10second,
FUN= sum))
ESmatch.20second$volume <- as.xts (aggregate (x= ESmatch.second$volume,
by= timestamp_20second,
FUN= sum))
ESmatch.30second$volume <- as.xts(aggregate (x= ESmatch.second$volume,
by= timestamp_30second,
FUN= sum))
# get the returns
ESmatch.2second$returns <- log(ESmatch.2second$PRICE / lag(ESmatch.2second$PRICE))
ESmatch.2second$normalizedreturns <- scale (ESmatch.2second$returns)
ESmatch.2second$returnsmed <- log (ESmatch.2second$PRICEmed / lag(ESmatch.2second$PRICEmed))
ESmatch.2second$normalizedreturnsmed <- scale (ESmatch.2second$returnsmed)
ESmatch.5second$returns <- log(ESmatch.5second$PRICE / lag(ESmatch.5second$PRICE))
ESmatch.5second$normalizedreturns <- scale (ESmatch.5second$returns)
ESmatch.5second$returnsmed <- log (ESmatch.5second$PRICEmed / lag(ESmatch.5second$PRICEmed))
ESmatch.5second$normalizedreturnsmed <- scale (ESmatch.5second$returnsmed)
ESmatch.10second$returns <- log(ESmatch.10second$PRICE / lag(ESmatch.10second$PRICE))
ESmatch.10second$normalizedreturns <- scale (ESmatch.10second$returns)
ESmatch.10second$returnsmed <- log (ESmatch.10second$PRICEmed / lag(ESmatch.10second$PRICEmed))
ESmatch.10second$normalizedreturnsmed <- scale (ESmatch.10second$returnsmed)
ESmatch.20second$returns <- log(ESmatch.20second$PRICE / lag(ESmatch.20second$PRICE))
ESmatch.20second$normalizedreturns <- scale (ESmatch.20second$returns)
ESmatch.20second$returnsmed <- log (ESmatch.20second$PRICEmed / lag(ESmatch.20second$PRICEmed))
ESmatch.20second$normalizedreturnsmed <- scale (ESmatch.20second$returnsmed)
ESmatch.30second$returns <- log(ESmatch.30second$PRICE / lag(ESmatch.30second$PRICE))
ESmatch.30second$normalizedreturns <- scale (ESmatch.30second$returns)
ESmatch.30second$returnsmed <- log (ESmatch.30second$PRICEmed / lag(ESmatch.30second$PRICEmed))
ESmatch.30second$normalizedreturnsmed <- scale (ESmatch.30second$returnsmed)
# get the dummy variables for lunchtime and trading hours
# Lunchtime is between 12:00 and 12:59:59.999 each day
# Trading hour is between 9:00 and 16:00 each day
# Non-lunch and non-trading hour are low-liquidity
ESmatch.2second$lunchtime <- as.numeric((hour (index (ESmatch.2second)) == 12))
ESmatch.2second$tradinghour <- as.numeric((hour (index (ESmatch.2second)) >= 9)
& (hour (index (ESmatch.2second)) < 16)
& !(ESmatch.2second$lunchtime))
ESmatch.2second$hour <- as.factor(hour (index (ESmatch.2second)))
ESmatch.2second$wday <- as.factor(wday (index (ESmatch.2second)))
ESmatch.5second$lunchtime <- as.numeric((hour (index (ESmatch.5second)) == 12))
ESmatch.5second$tradinghour <- as.numeric((hour (index (ESmatch.5second)) >= 9)
& (hour (index (ESmatch.5second)) < 16)
& !(ESmatch.5second$lunchtime))
ESmatch.5second$hour <- as.factor(hour (index (ESmatch.5second)))
ESmatch.5second$wday <- as.factor(wday (index (ESmatch.5second)))
ESmatch.10second$lunchtime <- as.numeric((hour (index (ESmatch.10second)) == 12))
ESmatch.10second$tradinghour <- as.numeric((hour (index (ESmatch.10second)) >= 9)
& (hour (index (ESmatch.10second)) < 16)
& !(ESmatch.10second$lunchtime))
ESmatch.10second$hour <- as.factor(hour (index (ESmatch.10second)))
ESmatch.10second$wday <- as.factor(wday (index (ESmatch.10second)))
ESmatch.20second$lunchtime <- as.numeric((hour (index (ESmatch.20second)) == 12))
ESmatch.20second$tradinghour <- as.numeric((hour (index (ESmatch.20second)) >= 9)
& (hour (index (ESmatch.20second)) < 16)
& !(ESmatch.20second$lunchtime))
ESmatch.20second$hour <- as.factor(hour (index (ESmatch.20second)))
ESmatch.20second$wday <- as.factor(wday (index (ESmatch.20second)))
ESmatch.30second$lunchtime <- as.numeric(hour (index (ESmatch.30second)) == 12)
ESmatch.30second$tradinghour <- as.numeric((hour (index (ESmatch.30second)) >= 9)
& (hour (index (ESmatch.30second)) < 16)
& !(ESmatch.30second$lunchtime))
ESmatch.30second$hour <- as.factor(hour (index (ESmatch.30second)))
ESmatch.30second$wday <- as.factor(wday (index (ESmatch.30second)))
# estimate moving average
ESmatch.2second$movingaverage <- WMA (x= ESmatch.2second$PRICE, n= 20, wts=1:20)
ESmatch.2second$deltamovingaverage <- diff (ESmatch.2second$movingaverage)
ESmatch.2second$averageorderflow <- WMA (x= ESmatch.2second$orderflow, n= 10, wts=1:10)
ESmatch.5second$movingaverage <- WMA (x= ESmatch.5second$PRICE, n= 20, wts=1:20)
ESmatch.5second$deltamovingaverage <- diff (ESmatch.5second$movingaverage)
ESmatch.5second$averageorderflow <- WMA (x= ESmatch.5second$orderflow, n= 5, wts=1:5)
ESmatch.10second$movingaverage <- WMA (x= ESmatch.10second$PRICE, n= 20, wts=1:20)
ESmatch.10second$deltamovingaverage <- diff (ESmatch.10second$movingaverage)
ESmatch.10second$averageorderflow <- WMA (x= ESmatch.10second$orderflow, n= 5, wts=1:5)
ESmatch.20second$movingaverage <- WMA (x= ESmatch.20second$PRICE, n= 20, wts=1:20)
ESmatch.20second$deltamovingaverage <- diff (ESmatch.20second$movingaverage)
ESmatch.20second$averageorderflow <- WMA (x= ESmatch.20second$orderflow, n= 5, wts=1:5)
ESmatch.30second$movingaverage <- WMA (x= ESmatch.30second$PRICE, n= 20, wts=1:20)
ESmatch.30second$deltamovingaverage <- diff (ESmatch.30second$movingaverage)
# estimate volatility
ESmatch.2second$volatility <- volatility(OHLC=ESmatch.2second$PRICE, n=20, calc="close")
ESmatch.5second$volatility <- volatility(OHLC=ESmatch.5second$PRICE, n=20, calc="close")
ESmatch.10second$volatility <- volatility(OHLC=ESmatch.10second$PRICE, n=20, calc="close")
ESmatch.20second$volatility <- volatility(OHLC=ESmatch.20second$PRICE, n=20, calc="close")
ESmatch.30second$volatility <- volatility(OHLC=ESmatch.30second$PRICE, n=20, calc="close")
# Matching quotes and trades at the 5-MINUTES, 15-MINUTES and 30-MINUTES level --------
# Create a timestamp index that has 5-minute and 15-minute intervals, using align.time in the xts package:
timestamp_5minute <- align.time (index (ESmatch.second), n= 5*60)
timestamp_15minute <- align.time (index (ESmatch.second), n= 15*60)
timestamp_30minute <- align.time (index (ESmatch.second), n= 30*60)
ESmatch.5minute <- as.xts (aggregate (x= ESmatch.second[, 1:5],
by= timestamp_5minute,
FUN= mean))
ESmatch.15minute <- as.xts (aggregate (x= ESmatch.second[, 1:5],
by= timestamp_15minute,
FUN= mean))
ESmatch.30minute <- as.xts (aggregate (x= ESmatch.second[, 1:5],
by= timestamp_30minute,
FUN= mean))
# Create another vector of median price named medPRICE, data used from the original timseries
ESmatch.5minute$PRICEmed <- as.xts (
aggregate (x= ESmatch$PRICE,
by= align.time (index (ESmatch), n= 5*60),
FUN= median))
ESmatch.15minute$PRICEmed <- as.xts (
aggregate (x= ESmatch$PRICE,
by= align.time (index (ESmatch), n= 15*60),
FUN= median))
ESmatch.30minute$PRICEmed <- as.xts (
aggregate (x= ESmatch$PRICE,
by= align.time (index (ESmatch), n= 30*60),
FUN= median))
# get the orderflows
ESmatch.5minute$orderflow <- as.xts (aggregate (x= ESmatch.second$orderflow,
by= timestamp_5minute,
FUN= sum))
ESmatch.15minute$orderflow <- as.xts (aggregate (x= ESmatch.second$orderflow,
by= timestamp_15minute,
FUN= sum))
ESmatch.30minute$orderflow <- as.xts (aggregate (x= ESmatch.second$orderflow,
by= timestamp_30minute,
FUN= sum))
# get the weighted orderflow
ESmatch.5minute$weighted_orderflow <- as.xts (aggregate (x= ESmatch.second$weighted_orderflow,
by= timestamp_5minute,
FUN= sum))
ESmatch.15minute$weighted_orderflow <- as.xts (aggregate (x= ESmatch.second$weighted_orderflow,
by= timestamp_15minute,
FUN= sum))
ESmatch.30minute$weighted_orderflow <- as.xts (aggregate (x= ESmatch.second$weighted_orderflow,
by= timestamp_30minute,
FUN= sum))
# get the volume
ESmatch.5minute$volume <- as.xts (aggregate (x= ESmatch.second$volume,
by= timestamp_5minute,
FUN= sum))
ESmatch.15minute$volume <- as.xts(aggregate (x= ESmatch.second$volume,
by= timestamp_15minute,
FUN= sum))
ESmatch.30minute$volume <- as.xts(aggregate (x= ESmatch.second$volume,
by= timestamp_30minute,
FUN= sum))
# get the returns
ESmatch.5minute$returns <- log(ESmatch.5minute$PRICE / lag(ESmatch.5minute$PRICE))
ESmatch.5minute$normalizedreturns <- scale (ESmatch.5minute$returns)
ESmatch.5minute$returnsmed <- log (ESmatch.5minute$PRICEmed / lag(ESmatch.5minute$PRICEmed))
ESmatch.5minute$normalizedreturnsmed <- scale (ESmatch.5minute$returnsmed)
ESmatch.15minute$returns <- log(ESmatch.15minute$PRICE / lag(ESmatch.15minute$PRICE))
ESmatch.15minute$normalizedreturns <- scale (ESmatch.15minute$returns)
ESmatch.15minute$returnsmed <- log (ESmatch.15minute$PRICEmed / lag(ESmatch.15minute$PRICEmed))
ESmatch.15minute$normalizedreturnsmed <- scale (ESmatch.15minute$returnsmed)
ESmatch.30minute$returns <- log(ESmatch.30minute$PRICE / lag(ESmatch.30minute$PRICE))
ESmatch.30minute$normalizedreturns <- scale (ESmatch.30minute$returns)
ESmatch.30minute$returnsmed <- log (ESmatch.30minute$PRICEmed / lag(ESmatch.30minute$PRICEmed))
ESmatch.30minute$normalizedreturnsmed <- scale (ESmatch.30minute$returnsmed)
# get the dummy variables for lunchtime and trading hours
# Lunchtime is between 12:00 and 12:59:59.999 each day
# Trading hour is between 9:00 and 16:00 each day
# Non-lunch and non-trading hour are low-liquidity
ESmatch.5minute$lunchtime <- as.numeric (hour (index (ESmatch.5minute)) == 12)
ESmatch.5minute$tradinghour <- as.numeric ((hour (index (ESmatch.5minute)) >= 9)
& (hour (index (ESmatch.5minute)) < 16)
& !(ESmatch.5minute$lunchtime))
ESmatch.5minute$hour <- as.factor(hour (index (ESmatch.5minute)))
ESmatch.5minute$wday <- as.factor(wday (index (ESmatch.5minute)))
ESmatch.15minute$lunchtime <- as.numeric (hour (index (ESmatch.15minute)) == 12)
ESmatch.15minute$tradinghour <- as.numeric ((hour (index (ESmatch.15minute)) >= 9)
& (hour (index (ESmatch.15minute)) < 16)
& !(ESmatch.15minute$lunchtime))
ESmatch.15minute$hour <- as.factor(hour (index (ESmatch.15minute)))
ESmatch.15minute$wday <- as.factor(wday (index (ESmatch.15minute)))
ESmatch.30minute$lunchtime <- as.numeric (hour (index (ESmatch.30minute)) == 12)
ESmatch.30minute$tradinghour <- as.numeric ((hour (index (ESmatch.30minute)) >= 9)
& (hour (index (ESmatch.30minute)) < 16)
& !(ESmatch.30minute$lunchtime))
ESmatch.30minute$hour <- as.factor(hour (index (ESmatch.30minute)))
ESmatch.30minute$wday <- as.factor(wday (index (ESmatch.30minute)))
# estimate moving average
ESmatch.5minute$movingaverage <- WMA (x= ESmatch.5minute$PRICE, n= 6, wts=1:6)
ESmatch.5minute$deltamovingaverage <- diff (ESmatch.5minute$movingaverage)
ESmatch.15minute$movingaverage <- WMA (x= ESmatch.15minute$PRICE, n= 4, wts=1:4)
ESmatch.15minute$deltamovingaverage <- diff (ESmatch.15minute$movingaverage)
ESmatch.30minute$movingaverage <- WMA (x= ESmatch.30minute$PRICE, n= 2, wts=1:2)
ESmatch.30minute$deltamovingaverage <- diff (ESmatch.30minute$movingaverage)
# estimate volatility
ESmatch.5minute$volatility <- volatility(OHLC=ESmatch.5minute$PRICE, n=6, calc="close")
ESmatch.15minute$volatility <- volatility(OHLC=ESmatch.15minute$PRICE, n=4, calc="close")
# Fitting linear models! --------------------------------------------------
dim (ESmatch.5second)
dim (ESmatch.quartersecond)
model.quartersecond.full <- dynlm (returns ~ orderflow
+ L (normalizedreturns, 1)
+ volume + factor(hour) + factor(wday)
+ volatility
+ L (deltamovingaverage, 1)
, data= ESmatch.quartersecond)
model.halfsecond.full <- dynlm (normalizedreturns ~ orderflow
+ volume + factor(hour) + factor(wday)
+ volatility
+ L (deltamovingaverage, 1)
, data= ESmatch.halfsecond)
model.second <- dynlm (normalizedreturns ~ L(orderflow, -1) + L(weighted_orderflow, -1)
+ L(BID, -1) + L(OFR, -1) + L (BIDSIZ, -1) + L (OFRSIZ, -1)
+ L(BID, -2) + L(OFR, -2) + L (BIDSIZ, -2) + L (OFRSIZ, -2)
+ L(BID, -3) + L(OFR, -3) + L (BIDSIZ, -3) + L (OFRSIZ, -3)
+ L (volume, -1) + L (volume, -2)
+ tradinghour + lunchtime
+ L (weighted_orderflow, -1) + L (weighted_orderflow, -2)
+ L (weighted_orderflow, -3) + L (weighted_orderflow, -4)
+ L (normalizedreturns, -1)
+ L (normalizedreturns, -2) + L (normalizedreturns, -3)
+ L (normalizedreturns, -4) + L (normalizedreturns, -5)
+ L (normalizedreturns, -6) + L (normalizedreturns, -7)
+ L (volatility, -1) + L (deltamovingaverage, -1)
, data= ESmatch.second)
model.2second <- dynlm (normalizedreturns ~ L(orderflow, -1) + L(weighted_orderflow, -1)
+ L(BID, -1) + L(OFR, -1) + L (BIDSIZ, -1) + L (OFRSIZ, -1)
+ L(BID, -2) + L(OFR, -2) + L (BIDSIZ, -2) + L (OFRSIZ, -2)
+ L(BID, -3) + L(OFR, -3) + L (BIDSIZ, -3) + L (OFRSIZ, -3)
+ L (volume, -1) + L (volume, -2)
+ tradinghour + lunchtime
+ L (weighted_orderflow, -1) + L (weighted_orderflow, -2)
+ L (weighted_orderflow, -3) + L (weighted_orderflow, -4)
+ L (normalizedreturns, -1)
+ L (normalizedreturns, -2) + L (normalizedreturns, -3)
+ L (normalizedreturns, -4) + L (normalizedreturns, -5)
+ L (normalizedreturns, -6) + L (normalizedreturns, -7)
+ L (volatility, -1) + L (deltamovingaverage, -1)
, data= ESmatch.2second)
model.5second <- dynlm (normalizedreturns ~ L(orderflow, -1) + L(weighted_orderflow, -1)
+ L(BID, -1) + L(OFR, -1) + L (BIDSIZ, -1) + L (OFRSIZ, -1)
+ L(BID, -2) + L(OFR, -2) + L (BIDSIZ, -2) + L (OFRSIZ, -2)
+ L(BID, -3) + L(OFR, -3) + L (BIDSIZ, -3) + L (OFRSIZ, -3)
+ L (volume, -1) + L (volume, -2)
+ tradinghour + lunchtime
+ L (weighted_orderflow, -1) + L (weighted_orderflow, -2)
+ L (weighted_orderflow, -3) + L (weighted_orderflow, -4)
+ L (normalizedreturns, -1)
+ L (normalizedreturns, -2) + L (normalizedreturns, -3)
+ L (normalizedreturns, -4) + L (normalizedreturns, -5)
+ L (normalizedreturns, -6) + L (normalizedreturns, -7)
+ L (volatility, -1) + L (deltamovingaverage, -1)
, data= ESmatch.5second)
model.10second <- dynlm (normalizedreturns ~ L(orderflow, -1) + L(weighted_orderflow, -1)
+ L(BID, -1) + L(OFR, -1) + L (BIDSIZ, -1) + L (OFRSIZ, -1)
+ L(BID, -2) + L(OFR, -2) + L (BIDSIZ, -2) + L (OFRSIZ, -2)
+ L(BID, -3) + L(OFR, -3) + L (BIDSIZ, -3) + L (OFRSIZ, -3)
+ L (volume, -1) + L (volume, -2)
+ tradinghour + lunchtime
+ L (weighted_orderflow, -1) + L (weighted_orderflow, -2)
+ L (weighted_orderflow, -3) + L (weighted_orderflow, -4)
+ L (normalizedreturns, -1)
+ L (normalizedreturns, -2) + L (normalizedreturns, -3)
+ L (normalizedreturns, -4) + L (normalizedreturns, -5)
+ L (normalizedreturns, -6) + L (normalizedreturns, -7)
+ L (volatility, -1) + L (deltamovingaverage, -1)
, data= ESmatch.10second)
model.20second <- dynlm (normalizedreturns ~ L(orderflow, -1) + L(weighted_orderflow, -1)
+ L(BID, -1) + L(OFR, -1) + L (BIDSIZ, -1) + L (OFRSIZ, -1)
+ L(BID, -2) + L(OFR, -2) + L (BIDSIZ, -2) + L (OFRSIZ, -2)
+ L(BID, -3) + L(OFR, -3) + L (BIDSIZ, -3) + L (OFRSIZ, -3)
+ L (volume, -1) + L (volume, -2)
+ tradinghour + lunchtime
+ L (weighted_orderflow, -1) + L (weighted_orderflow, -2)
+ L (weighted_orderflow, -3) + L (weighted_orderflow, -4)
+ L (normalizedreturns, -1)
+ L (normalizedreturns, -2) + L (normalizedreturns, -3)
+ L (normalizedreturns, -4) + L (normalizedreturns, -5)
+ L (normalizedreturns, -6) + L (normalizedreturns, -7)
+ L (volatility, -1) + L (deltamovingaverage, -1)
, data= ESmatch.20second)
model.30second <- dynlm (normalizedreturns ~ L(orderflow, -1) + L(weighted_orderflow, -1)
+ L(BID, -1) + L(OFR, -1) + L (BIDSIZ, -1) + L (OFRSIZ, -1)
+ L(BID, -2) + L(OFR, -2) + L (BIDSIZ, -2) + L (OFRSIZ, -2)
+ L(BID, -3) + L(OFR, -3) + L (BIDSIZ, -3) + L (OFRSIZ, -3)
+ L (volume, -1) + L (volume, -2)
+ tradinghour + lunchtime
+ L (weighted_orderflow, -1) + L (weighted_orderflow, -2)
+ L (weighted_orderflow, -3) + L (weighted_orderflow, -4)
+ L (weighted_orderflow, -5) + L (weighted_orderflow, -6)
+ L (normalizedreturns, -1)
+ L (normalizedreturns, -2) + L (normalizedreturns, -3)
+ L (normalizedreturns, -4) + L (normalizedreturns, -5)
+ L (normalizedreturns, -6)
+ L (volatility, -1) + L (deltamovingaverage, -1)
, data= ESmatch.30second)
model.minute <- dynlm (normalizedreturnsmed ~ L(orderflow, -1) + L(weighted_orderflow, -1)
+ L(BID, -1) + L(OFR, -1) + L (BIDSIZ, -1) + L (OFRSIZ, -1)
+ L(BID, -2) + L(OFR, -2) + L (BIDSIZ, -2) + L (OFRSIZ, -2)
+ L(BID, -3) + L(OFR, -3) + L (BIDSIZ, -3) + L (OFRSIZ, -3)
+ L (volume, -1) + L (volume, -2)
+ tradinghour + lunchtime
+ L (weighted_orderflow, -1) + L (weighted_orderflow, -2)
+ L (weighted_orderflow, -3) + L (weighted_orderflow, -4)
+ L (normalizedreturnsmed, -1)
+ L (normalizedreturnsmed, -2) + L (normalizedreturnsmed, -3)
+ L (normalizedreturnsmed, -4) + L (normalizedreturnsmed, -5)
+ L (volatility, -1) + L (deltamovingaverage, -1)
, data= ESmatch.minute)
model.5minute <- dynlm (normalizedreturnsmed ~ L(orderflow, -1) + L(weighted_orderflow, -1)
+ L(BID, -1) + L(OFR, -1) + L (BIDSIZ, -1) + L (OFRSIZ, -1)
+ L(BID, -2) + L(OFR, -2) + L (BIDSIZ, -2) + L (OFRSIZ, -2)
+ L(BID, -3) + L(OFR, -3) + L (BIDSIZ, -3) + L (OFRSIZ, -3)
+ L (volume, -1) + L (volume, -2)
+ tradinghour + lunchtime
+ L (weighted_orderflow, -1) + L (weighted_orderflow, -2)
+ L (weighted_orderflow, -3) + L (weighted_orderflow, -4)
+ L (normalizedreturnsmed, -1)
+ L (normalizedreturnsmed, -2) + L (normalizedreturnsmed, -3)
+ L (normalizedreturnsmed, -4) + L (normalizedreturnsmed, -5)
+ L (volatility, -1) + L (deltamovingaverage, -1)
, data= ESmatch.5minute)
model.15minute <- dynlm (normalizedreturnsmed ~ L(orderflow, -1) + L(weighted_orderflow, -1)
+ L(BID, -1) + L(OFR, -1) + L (BIDSIZ, -1) + L (OFRSIZ, -1)
+ L(BID, -2) + L(OFR, -2) + L (BIDSIZ, -2) + L (OFRSIZ, -2)
+ L(BID, -3) + L(OFR, -3) + L (BIDSIZ, -3) + L (OFRSIZ, -3)
+ L (volume, -1) + L (volume, -2)
+ tradinghour + lunchtime
+ L (weighted_orderflow, -1) + L (weighted_orderflow, -2)
+ L (weighted_orderflow, -3)
+ L (normalizedreturnsmed, -1)
+ L (normalizedreturnsmed, -2) + L (normalizedreturnsmed, -3)
+ L (volatility, -1) + L (deltamovingaverage, -1)
, data= ESmatch.15minute)
model.30minute <- dynlm (normalizedreturnsmed ~ L(orderflow, -1) + L(weighted_orderflow, -1)
+ L(BID, -1) + L(OFR, -1) + L (BIDSIZ, -1) + L (OFRSIZ, -1)
+ L(BID, -2) + L(OFR, -2) + L (BIDSIZ, -2) + L (OFRSIZ, -2)
+ L(BID, -3) + L(OFR, -3) + L (BIDSIZ, -3) + L (OFRSIZ, -3)
+ L (volume, -1) + L (volume, -2)
+ tradinghour + lunchtime
+ L (weighted_orderflow, -1) + L (weighted_orderflow, -2)
+ L (weighted_orderflow, -3)
+ L (normalizedreturnsmed, -1)
+ L (normalizedreturnsmed, -2) + L (normalizedreturnsmed, -3)
+ L (deltamovingaverage, -1)
, data= ESmatch.30minute)
summary(model.quartersecond.full)
summary(model.halfsecond.full)
summary(model.second.full)
summary(model.2second.full)
summary(model.5second.full)
summary(model.10second.full)
summary(model.20second.full)
summary(model.30second.full)
summary(model.minute)
summary(model.5minute)
summary(model.15minute)
summary(model.30minute)
# Simple, contemporary models, no lags
model.quartersecond.nolag <- lm(normalizedreturns ~ orderflow, data= ESmatch.quartersecond)
model.halfsecond.nolag <- lm(normalizedreturns ~ orderflow, data= ESmatch.halfsecond)
model.second.nolag <- lm(normalizedreturns ~ orderflow, data= ESmatch.second)
model.2second.nolag <- lm(normalizedreturns ~ orderflow, data= ESmatch.2second)
model.5second.nolag <- lm(normalizedreturns ~ orderflow, data= ESmatch.5second)
model.10second.nolag <- lm(normalizedreturns ~ orderflow, data= ESmatch.10second)
model.minute.nolag <- lm(normalizedreturns ~ orderflow, data= ESmatch.minute)
model.halfsecond.lag <- dynlm(normalizedreturns ~ L(orderflow, -1), data= ESmatch.halfsecond)
model.2second.lag <- dynlm(normalizedreturns ~ L(orderflow, -1), data= ESmatch.2second)
model.second.lag <- dynlm(normalizedreturns ~ L(orderflow, -1), data= ESmatch.second)
model.5second.lag <- dynlm(normalizedreturns ~ L(orderflow, -1), data= ESmatch.5second)
model.10second.lag <- dynlm(normalizedreturns ~ L(orderflow, -1), data= ESmatch.10second)
model.20second.lag <- dynlm(normalizedreturns ~ L(orderflow, -1), data= ESmatch.20second)
model.halfsecond.lag1 <- dynlm(normalizedreturns ~ L(weighted_orderflow, -1), data= ESmatch.halfsecond)
model.second.lag1 <- dynlm(normalizedreturns ~ L(averageorderflow, -1), data= ESmatch.second)
model.2second.lag1 <- dynlm(normalizedreturns ~ L(averageorderflow, -1), data= ESmatch.2second)
model.5second.lag1 <- dynlm(normalizedreturns ~ L(averageorderflow, -1), data= ESmatch.5second)
model.10second.lag1 <- dynlm(normalizedreturns ~ L(averageorderflow, -1), data= ESmatch.10second)
model.20second.lag1 <- dynlm(normalizedreturns ~ L(averageorderflow, -1), data= ESmatch.20second)
model.5minute.lag1 <- dynlm(normalizedreturns ~ L(averageorderflow, -1), data= ESmatch.5minute)
summary(model.quartersecond.nolag)
summary(model.halfsecond.nolag)
summary(model.second.nolag)
summary(model.2second.nolag)
summary(model.5second.nolag)
summary(model.10second.nolag)
summary(model.minute.nolag)
summary(model.halfsecond.lag)
summary(model.second.lag)
summary(model.2second.lag)
summary(model.5second.lag)
summary(model.10second.lag)
summary(model.20second.lag)
summary(model.5minute.lag)
summary(model.halfsecond.lag1)
summary(model.second.lag1)
summary(model.2second.lag1)
summary(model.5second.lag1)
summary(model.10second.lag1)
summary(model.20second.lag1)
summary(model.5minute.lag1)
# can we predict orderflow with lag returns and lags flows, then use it to predict price?
model.futureflow.2second <- dynlm(orderflow ~ L(averageorderflow, -1) + L(movingaverage, -1)
+ L (normalizedreturns, -1) + L (normalizedreturns, -2)
+ L (orderflow, -1) + L (orderflow, -2)
+ L (volatility, -1) + L (deltamovingaverage, -1)
+ L(weighted_orderflow, -1)
+ L(BID, -1) + L(OFR, -1) + L (BIDSIZ, -1) + L (OFRSIZ, -1)
,data= ESmatch.2second )
rm(list=ls())
# Test liquidity during different time of the day:
ESliquidity <- aggregate (x= ESmatch.second$volume,
by= fastPOSIXct(trunc(index(ESmatch.second), units= "hours"),tz= "Chicago"),
FUN= sum)
plot(liquidity)
rm(direction)
|
##checking input
test_that("Input check", {
test1 = matrix(rnorm(100*4), nrow=100, ncol=4) #test input is in matrix form
#check input has to be GRangesList
checkException(run.cin.chr(grl.seg = test1, thr.gain=2.25, thr.loss=1.75, V.def=3, V.mode="sum"), silent = TRUE)
})
|
/inst/unitTests/test-run.cin.chr.R
|
no_license
|
ICBI/CINdex
|
R
| false
| false
| 286
|
r
|
##checking input
test_that("Input check", {
test1 = matrix(rnorm(100*4), nrow=100, ncol=4) #test input is in matrix form
#check input has to be GRangesList
checkException(run.cin.chr(grl.seg = test1, thr.gain=2.25, thr.loss=1.75, V.def=3, V.mode="sum"), silent = TRUE)
})
|
library(testthat)
library(photosynthesis)
context("Fitting light response curves")
df <- data.frame(
A_net = c(10, 9.5, 8, 3.5, 2.5, 2.0, 1, 0.2),
PPFD = c(1500, 750, 375, 125, 100, 75, 50, 25)
)
model <- fit_aq_response(df)
test_that("Outputs", {
expect_is(object = model[1], class = "list")
expect_is(object = model[[2]], class = "data.frame")
expect_is(object = model[3], class = "list")
expect_length(object = model, 3)
})
|
/tests/testthat/test-fit_aq_response.R
|
permissive
|
wangdata/photosynthesis
|
R
| false
| false
| 442
|
r
|
library(testthat)
library(photosynthesis)
context("Fitting light response curves")
df <- data.frame(
A_net = c(10, 9.5, 8, 3.5, 2.5, 2.0, 1, 0.2),
PPFD = c(1500, 750, 375, 125, 100, 75, 50, 25)
)
model <- fit_aq_response(df)
test_that("Outputs", {
expect_is(object = model[1], class = "list")
expect_is(object = model[[2]], class = "data.frame")
expect_is(object = model[3], class = "list")
expect_length(object = model, 3)
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/postProcess.R
\name{postProcess}
\alias{postProcess}
\alias{postProcess.list}
\alias{postProcess.default}
\title{Generic function to post process objects}
\usage{
postProcess(x, ...)
\method{postProcess}{list}(x, ...)
\method{postProcess}{default}(x, ...)
}
\arguments{
\item{x}{A GIS object of postProcessing,
e.g., Spat* or sf*. This can be provided as a
\code{rlang::quosure} or a normal R object.}
\item{...}{Additional arguments passed to methods. For \code{spatialClasses},
these are: \code{\link[=cropTo]{cropTo()}}, \code{\link[=fixErrorsIn]{fixErrorsIn()}},
\code{\link[=projectTo]{projectTo()}}, \code{\link[=maskTo]{maskTo()}},
\code{\link[=determineFilename]{determineFilename()}}, and \code{\link[=writeTo]{writeTo()}}.
Each of these may also pass \code{...} into other functions, like
\code{\link[=writeTo]{writeTo()}}.
This might include potentially important arguments like \code{datatype},
\code{format}. Also passed to \code{terra::project},
with likely important arguments such as \code{method = "bilinear"}.
See details.}
}
\value{
A GIS file (e.g., \code{RasterLayer}, \code{SpatRaster} etc.) that has been
appropriately cropped, reprojected, masked, depending on the inputs.
}
\description{
\if{html}{\figure{lifecycle-maturing.svg}{options: alt="maturing"}}
The method for GIS objects (terra \verb{Spat*} & sf classes) will
crop, reproject, and mask, in that order.
This is a wrapper for \code{\link[=cropTo]{cropTo()}}, \code{\link[=fixErrorsIn]{fixErrorsIn()}},
\code{\link[=projectTo]{projectTo()}}, \code{\link[=maskTo]{maskTo()}} and \code{\link[=writeTo]{writeTo()}},
with a required amount of data manipulation between these calls so that the crs match.
}
\section{Post processing sequence}{
If the \code{rasterToMatch} or \code{studyArea} are passed, then
the following sequence will occur:
\enumerate{
\item Fix errors \code{\link[=fixErrorsIn]{fixErrorsIn()}}. Currently only errors fixed are for
\code{SpatialPolygons} using \code{buffer(..., width = 0)}.
\item Crop using \code{\link[=cropTo]{cropTo()}}
\item Project using \code{\link[=projectTo]{projectTo()}}
\item Mask using \code{\link[=maskTo]{maskTo()}}
\item Determine file name \code{\link[=determineFilename]{determineFilename()}}
\item Write that file name to disk, optionally \code{\link[=writeTo]{writeTo()}}
}
NOTE: checksumming does not occur during the post-processing stage, as
there are no file downloads. To achieve fast results, wrap
\code{prepInputs} with \code{Cache}
}
\section{Backwards compatibility with \code{rasterToMatch} and/or \code{studyArea} arguments}{
For backwards compatibility, \code{postProcess} will continue to allow passing
\code{rasterToMatch} and/or \code{studyArea} arguments. Depending on which of these
are passed, different things will happen to the \code{targetFile} located at \code{filename1}.
See \emph{Use cases} section in \code{\link[=postProcessTo]{postProcessTo()}} for post processing behaviour with
the new \code{from} and \code{to} arguments.
\subsection{If \code{targetFile} is a raster (\verb{Raster*}, or \code{SpatRaster}) object:}{
\tabular{lccc}{
\tab \code{rasterToMatch} \tab \code{studyArea} \tab Both \cr
\code{extent} \tab Yes \tab Yes \tab \code{rasterToMatch} \cr
\code{resolution} \tab Yes \tab No \tab \code{rasterToMatch} \cr
\code{projection} \tab Yes \tab No* \tab \code{rasterToMatch}* \cr
\code{alignment} \tab Yes \tab No \tab \code{rasterToMatch} \cr
\code{mask} \tab No** \tab Yes \tab \code{studyArea}** \cr
}
*Can be overridden with \code{useSAcrs}.
**Will mask with \code{NA}s from \code{rasterToMatch} if \code{maskWithRTM}.
}
\subsection{If \code{targetFile} is a vector (\verb{Spatial*}, \code{sf} or \code{SpatVector}) object:}{
\tabular{lccc}{
\tab \code{rasterToMatch} \tab \code{studyArea} \tab Both \cr
\code{extent} \tab Yes \tab Yes \tab \code{rasterToMatch} \cr
\code{resolution} \tab NA \tab NA \tab NA \cr
\code{projection} \tab Yes \tab No* \tab \code{rasterToMatch}* \cr
\code{alignment} \tab NA \tab NA \tab NA \cr
\code{mask} \tab No \tab Yes \tab \code{studyArea} \cr
}
*Can be overridden with \code{useSAcrs}
}
}
\examples{
if (requireNamespace("terra", quietly = TRUE) && requireNamespace("sf", quietly = TRUE)) {
library(reproducible)
od <- setwd(tempdir2())
# download a (spatial) file from remote url (which often is an archive) load into R
# need 3 files for this example; 1 from remote, 2 local
dPath <- file.path(tempdir2())
remoteTifUrl <- "https://github.com/rspatial/terra/raw/master/inst/ex/elev.tif"
localFileLuxSm <- system.file("ex/luxSmall.shp", package = "reproducible")
localFileLux <- system.file("ex/lux.shp", package = "terra")
# 1 step for each layer
# 1st step -- get study area
studyArea <- prepInputs(localFileLuxSm, fun = "terra::vect") # default is sf::st_read
# 2nd step: make the input data layer like the studyArea map
# Test only relevant if connected to internet -- so using try just in case
elevForStudy <- try(prepInputs(url = remoteTifUrl, to = studyArea, res = 250,
destinationPath = dPath))
# Alternate way, one step at a time. Must know each of these steps, and perform for each layer
\donttest{
dir.create(dPath, recursive = TRUE, showWarnings = FALSE)
file.copy(localFileLuxSm, file.path(dPath, basename(localFileLuxSm)))
studyArea2 <- terra::vect(localFileLuxSm)
if (!all(terra::is.valid(studyArea2))) studyArea2 <- terra::makeValid(studyArea2)
tf <- tempfile(fileext = ".tif")
download.file(url = remoteTifUrl, destfile = tf, mode = "wb")
Checksums(dPath, write = TRUE, files = tf)
elevOrig <- terra::rast(tf)
elevForStudy2 <- terra::project(elevOrig, terra::crs(studyArea2), res = 250) |>
terra::crop(studyArea2) |>
terra::mask(studyArea2)
isTRUE(all.equal(studyArea, studyArea2)) # Yes!
}
# sf class
studyAreaSmall <- prepInputs(localFileLuxSm)
studyAreas <- list()
studyAreas[["orig"]] <- prepInputs(localFileLux)
studyAreas[["reprojected"]] <- projectTo(studyAreas[["orig"]], studyAreaSmall)
studyAreas[["cropped"]] <- suppressWarnings(cropTo(studyAreas[["orig"]], studyAreaSmall))
studyAreas[["masked"]] <- suppressWarnings(maskTo(studyAreas[["orig"]], studyAreaSmall))
# SpatVector-- note: doesn't matter what class the "to" object is, only the "from"
studyAreas <- list()
studyAreas[["orig"]] <- prepInputs(localFileLux, fun = "terra::vect")
studyAreas[["reprojected"]] <- projectTo(studyAreas[["orig"]], studyAreaSmall)
studyAreas[["cropped"]] <- suppressWarnings(cropTo(studyAreas[["orig"]], studyAreaSmall))
studyAreas[["masked"]] <- suppressWarnings(maskTo(studyAreas[["orig"]], studyAreaSmall))
if (interactive()) {
par(mfrow = c(2,2));
out <- lapply(studyAreas, function(x) terra::plot(x))
}
setwd(od)
}
}
\seealso{
\code{prepInputs}
}
|
/man/postProcess.Rd
|
no_license
|
PredictiveEcology/reproducible
|
R
| false
| true
| 7,331
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/postProcess.R
\name{postProcess}
\alias{postProcess}
\alias{postProcess.list}
\alias{postProcess.default}
\title{Generic function to post process objects}
\usage{
postProcess(x, ...)
\method{postProcess}{list}(x, ...)
\method{postProcess}{default}(x, ...)
}
\arguments{
\item{x}{A GIS object of postProcessing,
e.g., Spat* or sf*. This can be provided as a
\code{rlang::quosure} or a normal R object.}
\item{...}{Additional arguments passed to methods. For \code{spatialClasses},
these are: \code{\link[=cropTo]{cropTo()}}, \code{\link[=fixErrorsIn]{fixErrorsIn()}},
\code{\link[=projectTo]{projectTo()}}, \code{\link[=maskTo]{maskTo()}},
\code{\link[=determineFilename]{determineFilename()}}, and \code{\link[=writeTo]{writeTo()}}.
Each of these may also pass \code{...} into other functions, like
\code{\link[=writeTo]{writeTo()}}.
This might include potentially important arguments like \code{datatype},
\code{format}. Also passed to \code{terra::project},
with likely important arguments such as \code{method = "bilinear"}.
See details.}
}
\value{
A GIS file (e.g., \code{RasterLayer}, \code{SpatRaster} etc.) that has been
appropriately cropped, reprojected, masked, depending on the inputs.
}
\description{
\if{html}{\figure{lifecycle-maturing.svg}{options: alt="maturing"}}
The method for GIS objects (terra \verb{Spat*} & sf classes) will
crop, reproject, and mask, in that order.
This is a wrapper for \code{\link[=cropTo]{cropTo()}}, \code{\link[=fixErrorsIn]{fixErrorsIn()}},
\code{\link[=projectTo]{projectTo()}}, \code{\link[=maskTo]{maskTo()}} and \code{\link[=writeTo]{writeTo()}},
with a required amount of data manipulation between these calls so that the crs match.
}
\section{Post processing sequence}{
If the \code{rasterToMatch} or \code{studyArea} are passed, then
the following sequence will occur:
\enumerate{
\item Fix errors \code{\link[=fixErrorsIn]{fixErrorsIn()}}. Currently only errors fixed are for
\code{SpatialPolygons} using \code{buffer(..., width = 0)}.
\item Crop using \code{\link[=cropTo]{cropTo()}}
\item Project using \code{\link[=projectTo]{projectTo()}}
\item Mask using \code{\link[=maskTo]{maskTo()}}
\item Determine file name \code{\link[=determineFilename]{determineFilename()}}
\item Write that file name to disk, optionally \code{\link[=writeTo]{writeTo()}}
}
NOTE: checksumming does not occur during the post-processing stage, as
there are no file downloads. To achieve fast results, wrap
\code{prepInputs} with \code{Cache}
}
\section{Backwards compatibility with \code{rasterToMatch} and/or \code{studyArea} arguments}{
For backwards compatibility, \code{postProcess} will continue to allow passing
\code{rasterToMatch} and/or \code{studyArea} arguments. Depending on which of these
are passed, different things will happen to the \code{targetFile} located at \code{filename1}.
See \emph{Use cases} section in \code{\link[=postProcessTo]{postProcessTo()}} for post processing behaviour with
the new \code{from} and \code{to} arguments.
\subsection{If \code{targetFile} is a raster (\verb{Raster*}, or \code{SpatRaster}) object:}{
\tabular{lccc}{
\tab \code{rasterToMatch} \tab \code{studyArea} \tab Both \cr
\code{extent} \tab Yes \tab Yes \tab \code{rasterToMatch} \cr
\code{resolution} \tab Yes \tab No \tab \code{rasterToMatch} \cr
\code{projection} \tab Yes \tab No* \tab \code{rasterToMatch}* \cr
\code{alignment} \tab Yes \tab No \tab \code{rasterToMatch} \cr
\code{mask} \tab No** \tab Yes \tab \code{studyArea}** \cr
}
*Can be overridden with \code{useSAcrs}.
**Will mask with \code{NA}s from \code{rasterToMatch} if \code{maskWithRTM}.
}
\subsection{If \code{targetFile} is a vector (\verb{Spatial*}, \code{sf} or \code{SpatVector}) object:}{
\tabular{lccc}{
\tab \code{rasterToMatch} \tab \code{studyArea} \tab Both \cr
\code{extent} \tab Yes \tab Yes \tab \code{rasterToMatch} \cr
\code{resolution} \tab NA \tab NA \tab NA \cr
\code{projection} \tab Yes \tab No* \tab \code{rasterToMatch}* \cr
\code{alignment} \tab NA \tab NA \tab NA \cr
\code{mask} \tab No \tab Yes \tab \code{studyArea} \cr
}
*Can be overridden with \code{useSAcrs}
}
}
\examples{
if (requireNamespace("terra", quietly = TRUE) && requireNamespace("sf", quietly = TRUE)) {
library(reproducible)
od <- setwd(tempdir2())
# download a (spatial) file from remote url (which often is an archive) load into R
# need 3 files for this example; 1 from remote, 2 local
dPath <- file.path(tempdir2())
remoteTifUrl <- "https://github.com/rspatial/terra/raw/master/inst/ex/elev.tif"
localFileLuxSm <- system.file("ex/luxSmall.shp", package = "reproducible")
localFileLux <- system.file("ex/lux.shp", package = "terra")
# 1 step for each layer
# 1st step -- get study area
studyArea <- prepInputs(localFileLuxSm, fun = "terra::vect") # default is sf::st_read
# 2nd step: make the input data layer like the studyArea map
# Test only relevant if connected to internet -- so using try just in case
elevForStudy <- try(prepInputs(url = remoteTifUrl, to = studyArea, res = 250,
destinationPath = dPath))
# Alternate way, one step at a time. Must know each of these steps, and perform for each layer
\donttest{
dir.create(dPath, recursive = TRUE, showWarnings = FALSE)
file.copy(localFileLuxSm, file.path(dPath, basename(localFileLuxSm)))
studyArea2 <- terra::vect(localFileLuxSm)
if (!all(terra::is.valid(studyArea2))) studyArea2 <- terra::makeValid(studyArea2)
tf <- tempfile(fileext = ".tif")
download.file(url = remoteTifUrl, destfile = tf, mode = "wb")
Checksums(dPath, write = TRUE, files = tf)
elevOrig <- terra::rast(tf)
elevForStudy2 <- terra::project(elevOrig, terra::crs(studyArea2), res = 250) |>
terra::crop(studyArea2) |>
terra::mask(studyArea2)
isTRUE(all.equal(studyArea, studyArea2)) # Yes!
}
# sf class
studyAreaSmall <- prepInputs(localFileLuxSm)
studyAreas <- list()
studyAreas[["orig"]] <- prepInputs(localFileLux)
studyAreas[["reprojected"]] <- projectTo(studyAreas[["orig"]], studyAreaSmall)
studyAreas[["cropped"]] <- suppressWarnings(cropTo(studyAreas[["orig"]], studyAreaSmall))
studyAreas[["masked"]] <- suppressWarnings(maskTo(studyAreas[["orig"]], studyAreaSmall))
# SpatVector-- note: doesn't matter what class the "to" object is, only the "from"
studyAreas <- list()
studyAreas[["orig"]] <- prepInputs(localFileLux, fun = "terra::vect")
studyAreas[["reprojected"]] <- projectTo(studyAreas[["orig"]], studyAreaSmall)
studyAreas[["cropped"]] <- suppressWarnings(cropTo(studyAreas[["orig"]], studyAreaSmall))
studyAreas[["masked"]] <- suppressWarnings(maskTo(studyAreas[["orig"]], studyAreaSmall))
if (interactive()) {
par(mfrow = c(2,2));
out <- lapply(studyAreas, function(x) terra::plot(x))
}
setwd(od)
}
}
\seealso{
\code{prepInputs}
}
|
testlist <- list(Rs = numeric(0), atmp = 0, relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.52156593271487e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161 ))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
/meteor/inst/testfiles/ET0_Makkink/AFL_ET0_Makkink/ET0_Makkink_valgrind_files/1615853880-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 659
|
r
|
testlist <- list(Rs = numeric(0), atmp = 0, relh = -1.72131968218895e+83, temp = c(8.5728629954997e-312, 1.52156593271487e+82, 8.96970809549085e-158, -1.3258495253834e-113, 2.79620616433656e-119, -6.80033518839696e+41, 2.68298522855314e-211, 1444042902784.06, 6.68889884134308e+51, -4.05003163986346e-308, -3.52601820453991e+43, -1.49815227045093e+197, -2.61605817623304e+76, -1.18078903777423e-90, 1.86807199752012e+112, -5.58551357556946e+160, 2.00994342527714e-162, 1.81541609400943e-79, 7.89363005545926e+139, 2.3317908961407e-93, 2.16562581831091e+161 ))
result <- do.call(meteor:::ET0_Makkink,testlist)
str(result)
|
#' merge images into a multiChannel antsImage
#'
#' merge images into a multiChannel antsImage
#'
#' @param imageList a list of antsImage objects to merge
#' @return A multiChannel antsImage object
#' @author Duda, JT
#' @examples
#' dims = c(30, 30)
#' n = prod(dims)
#' r <- floor( seq(n) / (n) * 255 )
#' dim(r) <- dims
#' arr = r
#' r <- as.antsImage(r)
#' g <- r*0
#' b <- r*0
#' rgbImage = mergeChannels( list(r,g,b) )
#' testthat::expect_error(mergeChannels(list(arr, arr)))
#'
#' @export mergeChannels
mergeChannels <- function(imageList) {
nImages = length(imageList)
for ( i in c(1:nImages) )
{
if ( !is.antsImage( imageList[[i]]) )
{
stop( "list may only contain 'antsImage' objects")
}
if ( length( imageList[[i]]@components ) == 0)
{
imageList[[i]]@components = as.integer(1)
}
}
img = .Call("mergeChannels", imageList, package="ANTsRCore")
return(img)
}
|
/R/mergeChannels.R
|
permissive
|
ANTsX/ANTsRCore
|
R
| false
| false
| 930
|
r
|
#' merge images into a multiChannel antsImage
#'
#' merge images into a multiChannel antsImage
#'
#' @param imageList a list of antsImage objects to merge
#' @return A multiChannel antsImage object
#' @author Duda, JT
#' @examples
#' dims = c(30, 30)
#' n = prod(dims)
#' r <- floor( seq(n) / (n) * 255 )
#' dim(r) <- dims
#' arr = r
#' r <- as.antsImage(r)
#' g <- r*0
#' b <- r*0
#' rgbImage = mergeChannels( list(r,g,b) )
#' testthat::expect_error(mergeChannels(list(arr, arr)))
#'
#' @export mergeChannels
mergeChannels <- function(imageList) {
nImages = length(imageList)
for ( i in c(1:nImages) )
{
if ( !is.antsImage( imageList[[i]]) )
{
stop( "list may only contain 'antsImage' objects")
}
if ( length( imageList[[i]]@components ) == 0)
{
imageList[[i]]@components = as.integer(1)
}
}
img = .Call("mergeChannels", imageList, package="ANTsRCore")
return(img)
}
|
library(leaflet)
library(shiny)
library(shinyjs)
library(sp)
library(raster)
library(rgdal)
library(rgeos)
library(Cairo)
library(RColorBrewer)
### LOAD ADDING FUNCTIONS ------------------------------------------------------
fls <- list.files(
path = "R",
pattern = "\\.R$",
full.names = TRUE
)
tmp <- sapply(fls, source, .GlobalEnv)
rm(list = c("fls", "tmp"))
### LOAD DATASETS --------------------------------------------------------------
data_species <- readRDS("data/infos/species_list.rds")
data_climate <- readRDS("data/infos/variables_list.rds")
data_ecosystem <- readRDS("data/infos/ecosystem_list.rds")
data_network <- readRDS("data/infos/network_list.rds")
data_vulnerability <- data_network[which(data_network[ , "code"] == "Net01"), ]
data_network <- data_network[which(data_network[ , "code"] != "Net01"), ]
grd <- readRDS("data/background/grid.rds")
grd_tundra <- readRDS("data/background/grid_tundra.rds")
### AVAILABLE RAMP COLORS ------------------------------------------------------
rampcolors <- data.frame(
palette = rownames(brewer.pal.info),
maxcolors = brewer.pal.info[ , "maxcolors"],
stringsAsFactors = FALSE
)
ui <- navbarPage(
title = "Tundra Nunavik",
id = "nav",
collapsible = TRUE,
### HOME PANEL ---------------------------------------------------------------
tabPanel(
title = icon("home"),
value = "tab_home",
includeHTML("includes/home-page.html")
),
### CLIMATE CHANGE PANEL -----------------------------------------------------
tabPanel(
title = "Climate change",
value = "tab_climate",
div(class = "outer",
useShinyjs(),
tags$head(
includeCSS("css/style.css"),
includeCSS("css/color-gradients.css"),
tags$link(
rel = "stylesheet",
type = "text/css",
href = "https://use.fontawesome.com/releases/v5.0.6/css/all.css"
),
includeCSS("css/font-awesome-animation.min.css"),
includeScript("js/appscript.js")
),
leafletOutput(
outputId = "map_climate",
width = "100%",
height = "100%"
),
absolutePanel(
id = "panel_climate",
class = "panel panel-default",
fixed = TRUE,
draggable = TRUE,
top = 60,
left = "auto",
right = 5,
bottom = "auto",
width = 340,
height = "auto",
HTML('<h4>Climate change interface</h4><hr />'),
radioButtons(
inputId = "language_climate",
label = "Select the language:",
choices = list(
"English" = "english",
"French" = "french"
),
selected = "english",
inline = FALSE,
width = 300
),
selectInput(
inputId = "select_climate",
label = "Select the variable:",
choices = c("Select a variable" = "", sort(unique(as.character(data_climate[, "english"]))))
),
radioButtons(
inputId = "horizon_climate",
label = "Select the horizon:",
choices = c("1981-2010", "2011-2040", "2041-2070", "2071-2100"),
selected = "1981-2010",
inline = FALSE,
width = 300
),
radioButtons(
inputId = "scenario_climate",
label = "Select the RCP:",
choices = list(
"RCP4.5" = "rcp45_climate",
"RCP8.5" = "rcp85_climate"
),
selected = character(0),
inline = FALSE,
width = 300
),
radioButtons(
inputId = "infos_climate",
label = "Information to be displayed:",
choices = list(
"Climate normals" = "normals_climate",
"Uncertainties" = "uncertainties_climate",
"Anomalies" = "anomalies_climate"
),
selected = "normals_climate",
inline = FALSE,
width = 300
),
HTML(
paste0(
"<h5>Select a color palette:</h5>",
"<div class=\"color-picker\">",
" <div id=\"grad_climate\">",
" <div id=\"color-sel_climate\" class=\"YlGnBu\"></div>",
" <div id=\"color-arrow_climate\">",
" <i class=\"fa fa-caret-down\"></i>",
" </div>",
" </div>",
" <div id=\"menu_climate\">"
)
),
includeHTML("includes/color-picker.html"),
HTML(
paste0(
" </div>",
"</div>"
)
),
textInput(
inputId = "color_climate",
label = "",
value = ""
),
HTML(
paste0(
'<hr />',
'<div class="buttons">',
'<div id="btn-climate" class="btn-png btn-left">',
'<i class="fa fa-download"></i>',
'Download Map',
'</div>',
'<div id="help-climate" class="btn-png btn-right">',
'<i class="fa fa-info-circle"></i>',
'Informations',
'</div>',
'</div>'
)
)
)
)
),
### SPECIES DISTRIBUTION PANEL -----------------------------------------------
tabPanel(
title = "Species distribution",
value = "tab_species",
div(class = "outer",
leafletOutput(
outputId = "map_species",
width = "100%",
height = "100%"
),
absolutePanel(
id = "panel_species",
class = "panel panel-default",
fixed = TRUE,
draggable = TRUE,
top = 60,
left = "auto",
right = 5,
bottom = "auto",
width = 340,
height = "auto",
HTML('<h4>Species distribution interface</h4><hr />'),
radioButtons(
inputId = "language_species",
label = "Search species by:",
choices = list(
"English name" = "common_en",
"French name" = "common_fr",
"Scientific name" = "latin",
"Inuktitut name" = "inuktitut"
),
selected = "latin",
inline = FALSE,
width = 300
),
radioButtons(
inputId = "class_species",
label = "Select the species:",
choices = list(
"Birds" = "Aves",
"Mammals" = "Mammalia"
),
selected = "Aves",
inline = FALSE,
width = 300
),
selectInput(
inputId = "select_species",
label = NULL,
choices = c("Select a species" = "", sort(unique(as.character(data_species[, "latin"]))))
),
radioButtons(
inputId = "horizon_species",
label = "Select the horizon:",
choices = c("1981-2010", "2011-2040", "2041-2070", "2071-2100"),
selected = "1981-2010",
inline = FALSE,
width = 300
),
radioButtons(
inputId = "scenario_species",
label = "Select the RCP:",
choices = list(
"RCP4.5" = "rcp45_species",
"RCP8.5" = "rcp85_species"
),
selected = character(0),
inline = FALSE,
width = 300
),
radioButtons(
inputId = "infos_species",
label = "Information to be displayed:",
choices = list(
"Observations" = "observations_species",
"Binaries" = "binaries_species",
"Probabilities" = "probabilities_species",
"Uncertainties" = "uncertainties_species"
),
selected = "observations_species",
inline = FALSE,
width = 300
),
HTML(
paste0(
"<h5>Select a color palette:</h5>",
"<div class=\"color-picker\">",
" <div id=\"grad_species\">",
" <div id=\"color-sel_species\" class=\"YlGnBu\"></div>",
" <div id=\"color-arrow_species\">",
" <i class=\"fa fa-caret-down\"></i>",
" </div>",
" </div>",
" <div id=\"menu_species\">"
)
),
includeHTML("includes/color-picker.html"),
HTML(
paste0(
" </div>",
"</div>"
)
),
textInput(
inputId = "color_species",
label = "",
value = ""
),
HTML(
paste0(
'<hr />',
'<div class="buttons">',
'<div id="btn-species" class="btn-png btn-left">',
'<i class="fa fa-download"></i>',
'Download Map',
'</div>',
'<div id="help-species" class="btn-png btn-right">',
'<i class="fa fa-info-circle"></i>',
'Informations',
'</div>',
'</div>'
)
)
)
)
),
### BIODIVERSITY CHANGES PANEL -----------------------------------------------
tabPanel(
title = "Biodiversity distribution",
value = "tab_ecosystem",
div(class = "outer",
leafletOutput(
outputId = "map_ecosystem",
width = "100%",
height = "100%"
),
absolutePanel(
id = "panel_ecosystem",
class = "panel panel-default",
fixed = TRUE,
draggable = TRUE,
top = 60,
left = "auto",
right = 5,
bottom = "auto",
width = 340,
height = "auto",
HTML('<h4>Biodiversity interface</h4><hr />'),
radioButtons(
inputId = "class_ecosystem",
label = "Select the species group:",
choices = list(
"All species" = "total",
"Birds" = "birds",
"Mammals" = "mammals"
),
selected = "total",
inline = FALSE,
width = 300
),
radioButtons(
inputId = "horizon_ecosystem",
label = "Select the horizon:",
choices = c("1981-2010", "2011-2040", "2041-2070", "2071-2100"),
selected = "1981-2010",
inline = FALSE,
width = 300
),
radioButtons(
inputId = "scenario_ecosystem",
label = "Select the RCP:",
choices = list(
"RCP4.5" = "rcp45_ecosystem",
"RCP8.5" = "rcp85_ecosystem"
),
selected = character(0),
inline = FALSE,
width = 300
),
radioButtons(
inputId = "infos_ecosystem",
label = "Information to be displayed:",
choices = c(
"Species richness" = "richness",
"Species gains" = "gains",
"Species losses" = "losses",
"Turnover" = "turnover"
),
selected = "richness",
inline = FALSE,
width = 300
),
HTML(
paste0(
"<h5>Select a color palette:</h5>",
"<div class=\"color-picker\">",
" <div id=\"grad_ecosystem\">",
" <div id=\"color-sel_ecosystem\" class=\"YlGnBu\"></div>",
" <div id=\"color-arrow_ecosystem\">",
" <i class=\"fa fa-caret-down\"></i>",
" </div>",
" </div>",
" <div id=\"menu_ecosystem\">"
)
),
includeHTML("includes/color-picker.html"),
HTML(
paste0(
" </div>",
"</div>"
)
),
textInput(
inputId = "color_ecosystem",
label = "",
value = ""
),
HTML(
paste0(
'<hr />',
'<div class="buttons">',
'<div id="btn-ecosystem" class="btn-png btn-left">',
'<i class="fa fa-download"></i>',
'Download Map',
'</div>',
'<div id="help-ecosystem" class="btn-png btn-right">',
'<i class="fa fa-info-circle"></i>',
'Informations',
'</div>',
'</div>'
)
)
)
)
),
### NETWORK CHANGES PANEL ----------------------------------------------------
tabPanel(
title = "Trophic network",
value = "tab_network",
div(class = "outer",
leafletOutput(
outputId = "map_network",
width = "100%",
height = "100%"
),
absolutePanel(
id = "panel_network",
class = "panel panel-default",
fixed = TRUE,
draggable = TRUE,
top = 60,
left = "auto",
right = 5,
bottom = "auto",
width = 340,
height = "auto",
HTML('<h4>Trophic network interface</h4><hr />'),
radioButtons(
inputId = "language_network",
label = "Select the language:",
choices = list(
"English" = "english",
"French" = "french"
),
selected = "english",
inline = FALSE,
width = 300
),
selectInput(
inputId = "select_network",
label = "Select the variable:",
choices = c("Select a variable" = "", sort(unique(as.character(data_network[, "english"]))))
),
radioButtons(
inputId = "horizon_network",
label = "Select the horizon:",
choices = c("1981-2010", "2011-2040", "2041-2070", "2071-2100"),
selected = "1981-2010",
inline = FALSE,
width = 300
),
radioButtons(
inputId = "scenario_network",
label = "Select the RCP:",
choices = list(
"RCP4.5" = "rcp45_network",
"RCP8.5" = "rcp85_network"
),
selected = character(0),
inline = FALSE,
width = 300
),
radioButtons(
inputId = "infos_network",
label = "Information to be displayed:",
choices = list(
"Values" = "values_network",
"Anomalies" = "anomalies_network"
),
selected = "values_network",
inline = FALSE,
width = 300
),
HTML(
paste0(
"<h5>Select a color palette:</h5>",
"<div class=\"color-picker\">",
" <div id=\"grad_network\">",
" <div id=\"color-sel_network\" class=\"YlGnBu\"></div>",
" <div id=\"color-arrow_network\">",
" <i class=\"fa fa-caret-down\"></i>",
" </div>",
" </div>",
" <div id=\"menu_network\">"
)
),
includeHTML("includes/color-picker.html"),
HTML(
paste0(
" </div>",
"</div>"
)
),
textInput(
inputId = "color_network",
label = "",
value = ""
),
HTML(
paste0(
'<hr />',
'<div class="buttons">',
'<div id="btn-network" class="btn-png btn-left">',
'<i class="fa fa-download"></i>',
'Download Map',
'</div>',
'<div id="help-network" class="btn-png btn-right">',
'<i class="fa fa-info-circle"></i>',
'Informations',
'</div>',
'</div>'
)
)
)
)
),
### NETWORK CHANGES PANEL ----------------------------------------------------
tabPanel(
title = "Vulnerability index",
value = "tab_vulnerability",
div(class = "outer",
leafletOutput(
outputId = "map_vulnerability",
width = "100%",
height = "100%"
),
absolutePanel(
id = "panel_vulnerability",
class = "panel panel-default",
fixed = TRUE,
draggable = TRUE,
top = 60,
left = "auto",
right = 5,
bottom = "auto",
width = 340,
height = "auto",
HTML('<h4>Vulnerability index</h4><hr />'),
radioButtons(
inputId = "horizon_vulnerability",
label = "Select the horizon:",
choices = c("2011-2040", "2041-2070", "2071-2100"),
selected = "2011-2040",
inline = FALSE,
width = 300
),
radioButtons(
inputId = "scenario_vulnerability",
label = "Select the RCP:",
choices = list(
"RCP4.5" = "rcp45_vulnerability",
"RCP8.5" = "rcp85_vulnerability"
),
selected = character(0),
inline = FALSE,
width = 300
),
HTML(
paste0(
"<h5>Select a color palette:</h5>",
"<div class=\"color-picker\">",
" <div id=\"grad_vulnerability\">",
" <div id=\"color-sel_vulnerability\" class=\"YlGnBu\"></div>",
" <div id=\"color-arrow_vulnerability\">",
" <i class=\"fa fa-caret-down\"></i>",
" </div>",
" </div>",
" <div id=\"menu_vulnerability\">"
)
),
includeHTML("includes/color-picker.html"),
HTML(
paste0(
" </div>",
"</div>"
)
),
textInput(
inputId = "color_vulnerability",
label = "",
value = ""
),
HTML(
paste0(
'<hr />',
'<div class="buttons">',
'<div id="btn-vulnerability" class="btn-png btn-left">',
'<i class="fa fa-download"></i>',
'Download Map',
'</div>',
'<div id="help-vulnerability" class="btn-png btn-right">',
'<i class="fa fa-info-circle"></i>',
'Informations',
'</div>',
'</div>'
)
)
)
)
)
### GET CODE PANEL -----------------------------------------------------------
# Added with jQuery
)
|
/ui.R
|
no_license
|
ahasverus/bioclimaticatlas
|
R
| false
| false
| 18,532
|
r
|
library(leaflet)
library(shiny)
library(shinyjs)
library(sp)
library(raster)
library(rgdal)
library(rgeos)
library(Cairo)
library(RColorBrewer)
### LOAD ADDING FUNCTIONS ------------------------------------------------------
fls <- list.files(
path = "R",
pattern = "\\.R$",
full.names = TRUE
)
tmp <- sapply(fls, source, .GlobalEnv)
rm(list = c("fls", "tmp"))
### LOAD DATASETS --------------------------------------------------------------
data_species <- readRDS("data/infos/species_list.rds")
data_climate <- readRDS("data/infos/variables_list.rds")
data_ecosystem <- readRDS("data/infos/ecosystem_list.rds")
data_network <- readRDS("data/infos/network_list.rds")
data_vulnerability <- data_network[which(data_network[ , "code"] == "Net01"), ]
data_network <- data_network[which(data_network[ , "code"] != "Net01"), ]
grd <- readRDS("data/background/grid.rds")
grd_tundra <- readRDS("data/background/grid_tundra.rds")
### AVAILABLE RAMP COLORS ------------------------------------------------------
rampcolors <- data.frame(
palette = rownames(brewer.pal.info),
maxcolors = brewer.pal.info[ , "maxcolors"],
stringsAsFactors = FALSE
)
ui <- navbarPage(
title = "Tundra Nunavik",
id = "nav",
collapsible = TRUE,
### HOME PANEL ---------------------------------------------------------------
tabPanel(
title = icon("home"),
value = "tab_home",
includeHTML("includes/home-page.html")
),
### CLIMATE CHANGE PANEL -----------------------------------------------------
tabPanel(
title = "Climate change",
value = "tab_climate",
div(class = "outer",
useShinyjs(),
tags$head(
includeCSS("css/style.css"),
includeCSS("css/color-gradients.css"),
tags$link(
rel = "stylesheet",
type = "text/css",
href = "https://use.fontawesome.com/releases/v5.0.6/css/all.css"
),
includeCSS("css/font-awesome-animation.min.css"),
includeScript("js/appscript.js")
),
leafletOutput(
outputId = "map_climate",
width = "100%",
height = "100%"
),
absolutePanel(
id = "panel_climate",
class = "panel panel-default",
fixed = TRUE,
draggable = TRUE,
top = 60,
left = "auto",
right = 5,
bottom = "auto",
width = 340,
height = "auto",
HTML('<h4>Climate change interface</h4><hr />'),
radioButtons(
inputId = "language_climate",
label = "Select the language:",
choices = list(
"English" = "english",
"French" = "french"
),
selected = "english",
inline = FALSE,
width = 300
),
selectInput(
inputId = "select_climate",
label = "Select the variable:",
choices = c("Select a variable" = "", sort(unique(as.character(data_climate[, "english"]))))
),
radioButtons(
inputId = "horizon_climate",
label = "Select the horizon:",
choices = c("1981-2010", "2011-2040", "2041-2070", "2071-2100"),
selected = "1981-2010",
inline = FALSE,
width = 300
),
radioButtons(
inputId = "scenario_climate",
label = "Select the RCP:",
choices = list(
"RCP4.5" = "rcp45_climate",
"RCP8.5" = "rcp85_climate"
),
selected = character(0),
inline = FALSE,
width = 300
),
radioButtons(
inputId = "infos_climate",
label = "Information to be displayed:",
choices = list(
"Climate normals" = "normals_climate",
"Uncertainties" = "uncertainties_climate",
"Anomalies" = "anomalies_climate"
),
selected = "normals_climate",
inline = FALSE,
width = 300
),
HTML(
paste0(
"<h5>Select a color palette:</h5>",
"<div class=\"color-picker\">",
" <div id=\"grad_climate\">",
" <div id=\"color-sel_climate\" class=\"YlGnBu\"></div>",
" <div id=\"color-arrow_climate\">",
" <i class=\"fa fa-caret-down\"></i>",
" </div>",
" </div>",
" <div id=\"menu_climate\">"
)
),
includeHTML("includes/color-picker.html"),
HTML(
paste0(
" </div>",
"</div>"
)
),
textInput(
inputId = "color_climate",
label = "",
value = ""
),
HTML(
paste0(
'<hr />',
'<div class="buttons">',
'<div id="btn-climate" class="btn-png btn-left">',
'<i class="fa fa-download"></i>',
'Download Map',
'</div>',
'<div id="help-climate" class="btn-png btn-right">',
'<i class="fa fa-info-circle"></i>',
'Informations',
'</div>',
'</div>'
)
)
)
)
),
### SPECIES DISTRIBUTION PANEL -----------------------------------------------
tabPanel(
title = "Species distribution",
value = "tab_species",
div(class = "outer",
leafletOutput(
outputId = "map_species",
width = "100%",
height = "100%"
),
absolutePanel(
id = "panel_species",
class = "panel panel-default",
fixed = TRUE,
draggable = TRUE,
top = 60,
left = "auto",
right = 5,
bottom = "auto",
width = 340,
height = "auto",
HTML('<h4>Species distribution interface</h4><hr />'),
radioButtons(
inputId = "language_species",
label = "Search species by:",
choices = list(
"English name" = "common_en",
"French name" = "common_fr",
"Scientific name" = "latin",
"Inuktitut name" = "inuktitut"
),
selected = "latin",
inline = FALSE,
width = 300
),
radioButtons(
inputId = "class_species",
label = "Select the species:",
choices = list(
"Birds" = "Aves",
"Mammals" = "Mammalia"
),
selected = "Aves",
inline = FALSE,
width = 300
),
selectInput(
inputId = "select_species",
label = NULL,
choices = c("Select a species" = "", sort(unique(as.character(data_species[, "latin"]))))
),
radioButtons(
inputId = "horizon_species",
label = "Select the horizon:",
choices = c("1981-2010", "2011-2040", "2041-2070", "2071-2100"),
selected = "1981-2010",
inline = FALSE,
width = 300
),
radioButtons(
inputId = "scenario_species",
label = "Select the RCP:",
choices = list(
"RCP4.5" = "rcp45_species",
"RCP8.5" = "rcp85_species"
),
selected = character(0),
inline = FALSE,
width = 300
),
radioButtons(
inputId = "infos_species",
label = "Information to be displayed:",
choices = list(
"Observations" = "observations_species",
"Binaries" = "binaries_species",
"Probabilities" = "probabilities_species",
"Uncertainties" = "uncertainties_species"
),
selected = "observations_species",
inline = FALSE,
width = 300
),
HTML(
paste0(
"<h5>Select a color palette:</h5>",
"<div class=\"color-picker\">",
" <div id=\"grad_species\">",
" <div id=\"color-sel_species\" class=\"YlGnBu\"></div>",
" <div id=\"color-arrow_species\">",
" <i class=\"fa fa-caret-down\"></i>",
" </div>",
" </div>",
" <div id=\"menu_species\">"
)
),
includeHTML("includes/color-picker.html"),
HTML(
paste0(
" </div>",
"</div>"
)
),
textInput(
inputId = "color_species",
label = "",
value = ""
),
HTML(
paste0(
'<hr />',
'<div class="buttons">',
'<div id="btn-species" class="btn-png btn-left">',
'<i class="fa fa-download"></i>',
'Download Map',
'</div>',
'<div id="help-species" class="btn-png btn-right">',
'<i class="fa fa-info-circle"></i>',
'Informations',
'</div>',
'</div>'
)
)
)
)
),
### BIODIVERSITY CHANGES PANEL -----------------------------------------------
tabPanel(
title = "Biodiversity distribution",
value = "tab_ecosystem",
div(class = "outer",
leafletOutput(
outputId = "map_ecosystem",
width = "100%",
height = "100%"
),
absolutePanel(
id = "panel_ecosystem",
class = "panel panel-default",
fixed = TRUE,
draggable = TRUE,
top = 60,
left = "auto",
right = 5,
bottom = "auto",
width = 340,
height = "auto",
HTML('<h4>Biodiversity interface</h4><hr />'),
radioButtons(
inputId = "class_ecosystem",
label = "Select the species group:",
choices = list(
"All species" = "total",
"Birds" = "birds",
"Mammals" = "mammals"
),
selected = "total",
inline = FALSE,
width = 300
),
radioButtons(
inputId = "horizon_ecosystem",
label = "Select the horizon:",
choices = c("1981-2010", "2011-2040", "2041-2070", "2071-2100"),
selected = "1981-2010",
inline = FALSE,
width = 300
),
radioButtons(
inputId = "scenario_ecosystem",
label = "Select the RCP:",
choices = list(
"RCP4.5" = "rcp45_ecosystem",
"RCP8.5" = "rcp85_ecosystem"
),
selected = character(0),
inline = FALSE,
width = 300
),
radioButtons(
inputId = "infos_ecosystem",
label = "Information to be displayed:",
choices = c(
"Species richness" = "richness",
"Species gains" = "gains",
"Species losses" = "losses",
"Turnover" = "turnover"
),
selected = "richness",
inline = FALSE,
width = 300
),
HTML(
paste0(
"<h5>Select a color palette:</h5>",
"<div class=\"color-picker\">",
" <div id=\"grad_ecosystem\">",
" <div id=\"color-sel_ecosystem\" class=\"YlGnBu\"></div>",
" <div id=\"color-arrow_ecosystem\">",
" <i class=\"fa fa-caret-down\"></i>",
" </div>",
" </div>",
" <div id=\"menu_ecosystem\">"
)
),
includeHTML("includes/color-picker.html"),
HTML(
paste0(
" </div>",
"</div>"
)
),
textInput(
inputId = "color_ecosystem",
label = "",
value = ""
),
HTML(
paste0(
'<hr />',
'<div class="buttons">',
'<div id="btn-ecosystem" class="btn-png btn-left">',
'<i class="fa fa-download"></i>',
'Download Map',
'</div>',
'<div id="help-ecosystem" class="btn-png btn-right">',
'<i class="fa fa-info-circle"></i>',
'Informations',
'</div>',
'</div>'
)
)
)
)
),
### NETWORK CHANGES PANEL ----------------------------------------------------
tabPanel(
title = "Trophic network",
value = "tab_network",
div(class = "outer",
leafletOutput(
outputId = "map_network",
width = "100%",
height = "100%"
),
absolutePanel(
id = "panel_network",
class = "panel panel-default",
fixed = TRUE,
draggable = TRUE,
top = 60,
left = "auto",
right = 5,
bottom = "auto",
width = 340,
height = "auto",
HTML('<h4>Trophic network interface</h4><hr />'),
radioButtons(
inputId = "language_network",
label = "Select the language:",
choices = list(
"English" = "english",
"French" = "french"
),
selected = "english",
inline = FALSE,
width = 300
),
selectInput(
inputId = "select_network",
label = "Select the variable:",
choices = c("Select a variable" = "", sort(unique(as.character(data_network[, "english"]))))
),
radioButtons(
inputId = "horizon_network",
label = "Select the horizon:",
choices = c("1981-2010", "2011-2040", "2041-2070", "2071-2100"),
selected = "1981-2010",
inline = FALSE,
width = 300
),
radioButtons(
inputId = "scenario_network",
label = "Select the RCP:",
choices = list(
"RCP4.5" = "rcp45_network",
"RCP8.5" = "rcp85_network"
),
selected = character(0),
inline = FALSE,
width = 300
),
radioButtons(
inputId = "infos_network",
label = "Information to be displayed:",
choices = list(
"Values" = "values_network",
"Anomalies" = "anomalies_network"
),
selected = "values_network",
inline = FALSE,
width = 300
),
HTML(
paste0(
"<h5>Select a color palette:</h5>",
"<div class=\"color-picker\">",
" <div id=\"grad_network\">",
" <div id=\"color-sel_network\" class=\"YlGnBu\"></div>",
" <div id=\"color-arrow_network\">",
" <i class=\"fa fa-caret-down\"></i>",
" </div>",
" </div>",
" <div id=\"menu_network\">"
)
),
includeHTML("includes/color-picker.html"),
HTML(
paste0(
" </div>",
"</div>"
)
),
textInput(
inputId = "color_network",
label = "",
value = ""
),
HTML(
paste0(
'<hr />',
'<div class="buttons">',
'<div id="btn-network" class="btn-png btn-left">',
'<i class="fa fa-download"></i>',
'Download Map',
'</div>',
'<div id="help-network" class="btn-png btn-right">',
'<i class="fa fa-info-circle"></i>',
'Informations',
'</div>',
'</div>'
)
)
)
)
),
### NETWORK CHANGES PANEL ----------------------------------------------------
tabPanel(
title = "Vulnerability index",
value = "tab_vulnerability",
div(class = "outer",
leafletOutput(
outputId = "map_vulnerability",
width = "100%",
height = "100%"
),
absolutePanel(
id = "panel_vulnerability",
class = "panel panel-default",
fixed = TRUE,
draggable = TRUE,
top = 60,
left = "auto",
right = 5,
bottom = "auto",
width = 340,
height = "auto",
HTML('<h4>Vulnerability index</h4><hr />'),
radioButtons(
inputId = "horizon_vulnerability",
label = "Select the horizon:",
choices = c("2011-2040", "2041-2070", "2071-2100"),
selected = "2011-2040",
inline = FALSE,
width = 300
),
radioButtons(
inputId = "scenario_vulnerability",
label = "Select the RCP:",
choices = list(
"RCP4.5" = "rcp45_vulnerability",
"RCP8.5" = "rcp85_vulnerability"
),
selected = character(0),
inline = FALSE,
width = 300
),
HTML(
paste0(
"<h5>Select a color palette:</h5>",
"<div class=\"color-picker\">",
" <div id=\"grad_vulnerability\">",
" <div id=\"color-sel_vulnerability\" class=\"YlGnBu\"></div>",
" <div id=\"color-arrow_vulnerability\">",
" <i class=\"fa fa-caret-down\"></i>",
" </div>",
" </div>",
" <div id=\"menu_vulnerability\">"
)
),
includeHTML("includes/color-picker.html"),
HTML(
paste0(
" </div>",
"</div>"
)
),
textInput(
inputId = "color_vulnerability",
label = "",
value = ""
),
HTML(
paste0(
'<hr />',
'<div class="buttons">',
'<div id="btn-vulnerability" class="btn-png btn-left">',
'<i class="fa fa-download"></i>',
'Download Map',
'</div>',
'<div id="help-vulnerability" class="btn-png btn-right">',
'<i class="fa fa-info-circle"></i>',
'Informations',
'</div>',
'</div>'
)
)
)
)
)
### GET CODE PANEL -----------------------------------------------------------
# Added with jQuery
)
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1837701012L, -128659642L, -14914341L, 1092032927L, NA, 1632068659L ), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L ), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result)
|
/IntervalSurgeon/inst/testfiles/rcpp_pile/AFL_rcpp_pile/rcpp_pile_valgrind_files/1609873890-test.R
|
no_license
|
akhikolla/updated-only-Issues
|
R
| false
| false
| 720
|
r
|
testlist <- list(ends = c(-1125300777L, 765849512L, -1760774663L, 791623263L, 1837701012L, -128659642L, -14914341L, 1092032927L, NA, 1632068659L ), pts = c(1758370433L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L ), starts = c(16777216L, 0L, 738263040L, 682962941L, 1612840977L, 150997320L, 747898999L, -1195392662L, 2024571419L, 808515032L, 1373469055L, -282236997L, -207881465L, -237801926L, -168118689L, -1090227888L, 235129118L, 949454105L, 1651285440L, -1119277667L, -1328604284L), members = NULL, total_members = 0L)
result <- do.call(IntervalSurgeon:::rcpp_pile,testlist)
str(result)
|
library(ggmap)
library(ggplot2)
library(dplyr)
# chemin vers le projet
path <- "./crime_classification/"
setwd(path)
# charger les données
train <- read.csv("train.csv")
# charger la carte géographique de San Francisco
map <- get_map("San Francisco", zoom = 12, color = "bw")
# fonction pour filtrer les données selon la catégorie et les projeter sur la map
map_crime <- function(crime_df, crime) {
filtered <- filter(crime_df, Category %in% crime)
plot <- ggmap(map, extent = 'device') +
geom_point(data = filtered, aes(x = X, y = Y, color = Category), alpha = 0.6)
return(plot)
}
map_crime(train, c('SUICIDE', 'ARSON'))
|
/visualisation_map.r
|
no_license
|
cyuss/crime_classification
|
R
| false
| false
| 651
|
r
|
library(ggmap)
library(ggplot2)
library(dplyr)
# chemin vers le projet
path <- "./crime_classification/"
setwd(path)
# charger les données
train <- read.csv("train.csv")
# charger la carte géographique de San Francisco
map <- get_map("San Francisco", zoom = 12, color = "bw")
# fonction pour filtrer les données selon la catégorie et les projeter sur la map
map_crime <- function(crime_df, crime) {
filtered <- filter(crime_df, Category %in% crime)
plot <- ggmap(map, extent = 'device') +
geom_point(data = filtered, aes(x = X, y = Y, color = Category), alpha = 0.6)
return(plot)
}
map_crime(train, c('SUICIDE', 'ARSON'))
|
\name{nporg}
\docType{data}
\alias{nporg}
\encoding{latin1}
\title{Nelson and Plosser original data set}
\description{
This data set contains the fourteen U.S. economic time series used by
Nelson and Plosser in their seminal paper.
}
\usage{data(nporg)}
\format{
A data frame containing fourteen series.
\tabular{rl}{
\code{year} \tab Time index from 1860 until 1970. \cr
\code{gnp.r} \tab Real GNP, \cr
\tab [Billions of 1958 Dollars], [1909 -- 1970] \cr
\code{gnp.n} \tab Nominal GNP, \cr
\tab [Millions of Current Dollars], [1909 -- 1970] \cr
\code{gnp.pc} \tab Real Per Capita GNP, \cr
\tab [1958 Dollars], [1909 -- 1970] \cr
\code{ip} \tab Industrial Production Index, \cr
\tab [1967 = 100], [1860 -- 1970] \cr
\code{emp} \tab Total Employment, \cr
\tab [Thousands], [1890 -- 1970] \cr
\code{ur} \tab Total Unemployment Rate, \cr
\tab [Percent], [1890 -- 1970] \cr
\code{gnp.p} \tab GNP Deflator, \cr
\tab [1958 = 100], [1889 -- 1970] \cr
\code{cpi} \tab Consumer Price Index, \cr
\tab [1967 = 100], [1860 -- 1970] \cr
\code{wg.n} \tab Nominal Wages \cr
\tab (Average annual earnings per full-time employee in manufacturing), \cr
\tab [current Dollars], [1900 -- 1970] \cr
\code{wg.r} \tab Real Wages, \cr
\tab [Nominal wages/CPI], [1900 -- 1970] \cr
\code{M} \tab Money Stock (M2), \cr
\tab [Billions of Dollars, annual averages], [1889 -- 1970] \cr
\code{vel} \tab Velocity of Money, \cr
\tab [1869 -- 1970] \cr
\code{bnd} \tab Bond Yield (Basic Yields of 30-year
corporate bonds), \cr
\tab [Percent per annum], [1900 -- 1970] \cr
\code{sp} \tab Stock Prices, \cr
\tab [Index; 1941 -- 43 = 100], [1871 -- 1970] \cr
}
}
\source{
Nelson, C.R. and Plosser, C.I. (1982), Trends and Random Walks in
Macroeconomic Time Series, \emph{Journal of Monetary Economics},
\bold{10}, 139--162.
}
\references{
\url{http://korora.econ.yale.edu/phillips/index.htm}
}
\author{Bernhard Pfaff}
\keyword{datasets}
\concept{data set Nelson Plosser macroeconomic variables}
|
/man/NPORG.Rd
|
no_license
|
cran/urca
|
R
| false
| false
| 2,166
|
rd
|
\name{nporg}
\docType{data}
\alias{nporg}
\encoding{latin1}
\title{Nelson and Plosser original data set}
\description{
This data set contains the fourteen U.S. economic time series used by
Nelson and Plosser in their seminal paper.
}
\usage{data(nporg)}
\format{
A data frame containing fourteen series.
\tabular{rl}{
\code{year} \tab Time index from 1860 until 1970. \cr
\code{gnp.r} \tab Real GNP, \cr
\tab [Billions of 1958 Dollars], [1909 -- 1970] \cr
\code{gnp.n} \tab Nominal GNP, \cr
\tab [Millions of Current Dollars], [1909 -- 1970] \cr
\code{gnp.pc} \tab Real Per Capita GNP, \cr
\tab [1958 Dollars], [1909 -- 1970] \cr
\code{ip} \tab Industrial Production Index, \cr
\tab [1967 = 100], [1860 -- 1970] \cr
\code{emp} \tab Total Employment, \cr
\tab [Thousands], [1890 -- 1970] \cr
\code{ur} \tab Total Unemployment Rate, \cr
\tab [Percent], [1890 -- 1970] \cr
\code{gnp.p} \tab GNP Deflator, \cr
\tab [1958 = 100], [1889 -- 1970] \cr
\code{cpi} \tab Consumer Price Index, \cr
\tab [1967 = 100], [1860 -- 1970] \cr
\code{wg.n} \tab Nominal Wages \cr
\tab (Average annual earnings per full-time employee in manufacturing), \cr
\tab [current Dollars], [1900 -- 1970] \cr
\code{wg.r} \tab Real Wages, \cr
\tab [Nominal wages/CPI], [1900 -- 1970] \cr
\code{M} \tab Money Stock (M2), \cr
\tab [Billions of Dollars, annual averages], [1889 -- 1970] \cr
\code{vel} \tab Velocity of Money, \cr
\tab [1869 -- 1970] \cr
\code{bnd} \tab Bond Yield (Basic Yields of 30-year
corporate bonds), \cr
\tab [Percent per annum], [1900 -- 1970] \cr
\code{sp} \tab Stock Prices, \cr
\tab [Index; 1941 -- 43 = 100], [1871 -- 1970] \cr
}
}
\source{
Nelson, C.R. and Plosser, C.I. (1982), Trends and Random Walks in
Macroeconomic Time Series, \emph{Journal of Monetary Economics},
\bold{10}, 139--162.
}
\references{
\url{http://korora.econ.yale.edu/phillips/index.htm}
}
\author{Bernhard Pfaff}
\keyword{datasets}
\concept{data set Nelson Plosser macroeconomic variables}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getComputedDataSWS.R
\name{getComputedDataSWS}
\alias{getComputedDataSWS}
\title{Get computed dataset on SWS.}
\usage{
getComputedDataSWS(reporter = NA, omit = FALSE)
}
\arguments{
\item{reporter}{Reporter.}
\item{omit}{Logical indicating whether to omit or not NAs.}
}
\value{
A data.table with results.
}
\description{
Get computed dataset on SWS.
}
|
/man/getComputedDataSWS.Rd
|
no_license
|
SWS-Methodology/faoswsTrade
|
R
| false
| true
| 431
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getComputedDataSWS.R
\name{getComputedDataSWS}
\alias{getComputedDataSWS}
\title{Get computed dataset on SWS.}
\usage{
getComputedDataSWS(reporter = NA, omit = FALSE)
}
\arguments{
\item{reporter}{Reporter.}
\item{omit}{Logical indicating whether to omit or not NAs.}
}
\value{
A data.table with results.
}
\description{
Get computed dataset on SWS.
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lookoutmetrics_operations.R
\name{lookoutmetrics_list_metric_sets}
\alias{lookoutmetrics_list_metric_sets}
\title{Lists the datasets in the current AWS Region}
\usage{
lookoutmetrics_list_metric_sets(
AnomalyDetectorArn = NULL,
MaxResults = NULL,
NextToken = NULL
)
}
\arguments{
\item{AnomalyDetectorArn}{The ARN of the anomaly detector containing the metrics sets to list.}
\item{MaxResults}{The maximum number of results to return.}
\item{NextToken}{If the result of the previous request was truncated, the response
includes a \code{NextToken}. To retrieve the next set of results, use the
token in the next request. Tokens expire after 24 hours.}
}
\description{
Lists the datasets in the current AWS Region.
See \url{https://www.paws-r-sdk.com/docs/lookoutmetrics_list_metric_sets/} for full documentation.
}
\keyword{internal}
|
/cran/paws.machine.learning/man/lookoutmetrics_list_metric_sets.Rd
|
permissive
|
paws-r/paws
|
R
| false
| true
| 920
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lookoutmetrics_operations.R
\name{lookoutmetrics_list_metric_sets}
\alias{lookoutmetrics_list_metric_sets}
\title{Lists the datasets in the current AWS Region}
\usage{
lookoutmetrics_list_metric_sets(
AnomalyDetectorArn = NULL,
MaxResults = NULL,
NextToken = NULL
)
}
\arguments{
\item{AnomalyDetectorArn}{The ARN of the anomaly detector containing the metrics sets to list.}
\item{MaxResults}{The maximum number of results to return.}
\item{NextToken}{If the result of the previous request was truncated, the response
includes a \code{NextToken}. To retrieve the next set of results, use the
token in the next request. Tokens expire after 24 hours.}
}
\description{
Lists the datasets in the current AWS Region.
See \url{https://www.paws-r-sdk.com/docs/lookoutmetrics_list_metric_sets/} for full documentation.
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/daybreak-wrappers.R
\name{astronomical_twilight}
\alias{astronomical_twilight}
\title{Astronomical twilight}
\usage{
astronomical_twilight(date, lon, lat)
}
\arguments{
\item{date}{The date to compute the length for. An R \link{DateTimeClasses} object
or something that can be coerced into one by \code{\link[=as.POSIXlt]{as.POSIXlt()}}.}
\item{lon, lat}{longitude & latitude}
}
\value{
(dbl) astronomical twilight
}
\description{
Astronomical twilight
}
\examples{
astronomical_twilight("2019-12-31", -70.8636, 43.2683)
}
|
/man/astronomical_twilight.Rd
|
permissive
|
hrbrmstr/daybreak
|
R
| false
| true
| 602
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/daybreak-wrappers.R
\name{astronomical_twilight}
\alias{astronomical_twilight}
\title{Astronomical twilight}
\usage{
astronomical_twilight(date, lon, lat)
}
\arguments{
\item{date}{The date to compute the length for. An R \link{DateTimeClasses} object
or something that can be coerced into one by \code{\link[=as.POSIXlt]{as.POSIXlt()}}.}
\item{lon, lat}{longitude & latitude}
}
\value{
(dbl) astronomical twilight
}
\description{
Astronomical twilight
}
\examples{
astronomical_twilight("2019-12-31", -70.8636, 43.2683)
}
|
# Setup ------------------------------------------------------------------------
# Load Friedman 1 benchmark data
friedman1 <- readRDS("friedman.rds")$friedman1
# Convert x.4 to categorical
friedman1$x.4 <- cut(friedman1$x.4, breaks = 2, labels = letters[1L:2L])
# Create a copy of friedman1 and coerce x.4 to character
friedman2 <- friedman1
friedman2$x.4 <- as.character(friedman2$x.4)
# Create a copy of friedman1 and coerce it to a matrix
friedman3 <- data.matrix(friedman1)
friedman3[, "x.4"] <- friedman3[, "x.4"] - 1
# Feature matrix and response vector
X <- friedman3[, paste0("x.", 1L:10L)]
y <- friedman3[, "y"]
# Test that character columns are properly handled for data frames -------------
if (require(rpart, quietly = TRUE)) {
# Fit decision trees
tree1 <- rpart::rpart(y ~ ., data = friedman1, control = list(cp = 0))
tree2 <- rpart::rpart(y ~ ., data = friedman2, control = list(cp = 0))
expect_identical(tree1$variable.importance, tree2$variable.importance)
# Compute partial dependence
pd_tree1 <- partial(tree1, pred.var = "x.4")
pd_tree2 <- partial(tree2, pred.var = "x.4")
# Expectations
expect_true(inherits(friedman1$x.4, what = "factor"))
expect_true(inherits(friedman2$x.4, what = "character"))
expect_identical(pd_tree1$yhat, pd_tree2$yhat)
}
# Test that cats argument works properly for matrices --------------------------
# FIXME: When is the cats argument actually necessary?
if (require(randomForest, quietly = TRUE)) {
# Fit default random forests
set.seed(0825)
rfo1 <- randomForest::randomForest(y ~ ., data = friedman1)
rfo2 <- randomForest::randomForest(x = X, y = y)
# Compute partial dependence
pd_rfo1 <- partial(rfo1, pred.var = "x.4")
pd_rfo2 <- partial(rfo2, pred.var = "x.4", train = X, cats = "x.4")
# Expectations
expect_identical(pd_tree1$yhat, pd_tree2$yhat)
}
|
/inst/tinytest/test_cats_argument.R
|
no_license
|
bgreenwell/pdp
|
R
| false
| false
| 1,866
|
r
|
# Setup ------------------------------------------------------------------------
# Load Friedman 1 benchmark data
friedman1 <- readRDS("friedman.rds")$friedman1
# Convert x.4 to categorical
friedman1$x.4 <- cut(friedman1$x.4, breaks = 2, labels = letters[1L:2L])
# Create a copy of friedman1 and coerce x.4 to character
friedman2 <- friedman1
friedman2$x.4 <- as.character(friedman2$x.4)
# Create a copy of friedman1 and coerce it to a matrix
friedman3 <- data.matrix(friedman1)
friedman3[, "x.4"] <- friedman3[, "x.4"] - 1
# Feature matrix and response vector
X <- friedman3[, paste0("x.", 1L:10L)]
y <- friedman3[, "y"]
# Test that character columns are properly handled for data frames -------------
if (require(rpart, quietly = TRUE)) {
# Fit decision trees
tree1 <- rpart::rpart(y ~ ., data = friedman1, control = list(cp = 0))
tree2 <- rpart::rpart(y ~ ., data = friedman2, control = list(cp = 0))
expect_identical(tree1$variable.importance, tree2$variable.importance)
# Compute partial dependence
pd_tree1 <- partial(tree1, pred.var = "x.4")
pd_tree2 <- partial(tree2, pred.var = "x.4")
# Expectations
expect_true(inherits(friedman1$x.4, what = "factor"))
expect_true(inherits(friedman2$x.4, what = "character"))
expect_identical(pd_tree1$yhat, pd_tree2$yhat)
}
# Test that cats argument works properly for matrices --------------------------
# FIXME: When is the cats argument actually necessary?
if (require(randomForest, quietly = TRUE)) {
# Fit default random forests
set.seed(0825)
rfo1 <- randomForest::randomForest(y ~ ., data = friedman1)
rfo2 <- randomForest::randomForest(x = X, y = y)
# Compute partial dependence
pd_rfo1 <- partial(rfo1, pred.var = "x.4")
pd_rfo2 <- partial(rfo2, pred.var = "x.4", train = X, cats = "x.4")
# Expectations
expect_identical(pd_tree1$yhat, pd_tree2$yhat)
}
|
draw.plot3 <- function() {
# Set the graphics parameters
png(filename = "plot3.png",
width = 480, height = 480, units = "px", pointsize = 12,
bg = "white", res = NA, family = "", restoreConsole = TRUE,
type = c("windows", "cairo", "cairo-png"))
par(mfrow = c(1,1))
# Read data and filter it.
data <- read.csv('household_power_consumption.txt', na.strings = c("?"),
colClasses = c("character", "character", rep("numeric", 7)), sep = ";")
data <- data[data$Date == '1/2/2007' | data$Date == '2/2/2007', ]
data$DateTime <- as.POSIXlt(paste(data$Date, data$Time), format = "%d/%m/%Y %H:%M:%S")
# Build the plot
plot(data$DateTime, data$Sub_metering_1, type="n",
xlab="", ylab="Energy sub metering", main="")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"), lty = 1)
lines(data$DateTime, data$Sub_metering_1, col="black")
lines(data$DateTime, data$Sub_metering_2, col="red")
lines(data$DateTime, data$Sub_metering_3, col="blue")
# Finish up.
dev.off()
}
|
/plot3.R
|
no_license
|
DADGAD/ExData_Plotting1
|
R
| false
| false
| 1,124
|
r
|
draw.plot3 <- function() {
# Set the graphics parameters
png(filename = "plot3.png",
width = 480, height = 480, units = "px", pointsize = 12,
bg = "white", res = NA, family = "", restoreConsole = TRUE,
type = c("windows", "cairo", "cairo-png"))
par(mfrow = c(1,1))
# Read data and filter it.
data <- read.csv('household_power_consumption.txt', na.strings = c("?"),
colClasses = c("character", "character", rep("numeric", 7)), sep = ";")
data <- data[data$Date == '1/2/2007' | data$Date == '2/2/2007', ]
data$DateTime <- as.POSIXlt(paste(data$Date, data$Time), format = "%d/%m/%Y %H:%M:%S")
# Build the plot
plot(data$DateTime, data$Sub_metering_1, type="n",
xlab="", ylab="Energy sub metering", main="")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"),
col = c("black", "red", "blue"), lty = 1)
lines(data$DateTime, data$Sub_metering_1, col="black")
lines(data$DateTime, data$Sub_metering_2, col="red")
lines(data$DateTime, data$Sub_metering_3, col="blue")
# Finish up.
dev.off()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/correltable.R
\name{correltable}
\alias{correltable}
\title{Create correlation table (with stars for significance)
for scientific publication}
\usage{
correltable(
data,
vars = NULL,
var_names = vars,
vars2 = NULL,
var_names2 = vars2,
method = c("pearson", "spearman"),
use = c("pairwise", "complete"),
round_n = 2,
tri = c("upper", "lower", "all"),
cutempty = c(FALSE, TRUE),
colnum = c(FALSE, TRUE),
html = c(FALSE, TRUE),
strata = NULL
)
}
\arguments{
\item{data}{The input dataset.}
\item{vars}{A list of the names of variables to correlate,
e.g. c("Age","height","WASI"),
if NULL, all variables in \code{data} will be used.}
\item{var_names}{An optional list to rename the \code{vars} colnames
in the output table, e.g. c("Age (years)","Height (inches)","IQ").
Must match \code{vars} in length. If not supplied, \code{vars} will be printed as is.}
\item{vars2}{If cross-correlation between two sets of variables
is desired, add a second list of variables to correlate with
\code{vars}; Overrides \code{tri}, \code{cutempty}, and \code{colnum}.}
\item{var_names2}{An optional list to rename the \code{vars2} colnames
in the output table If not supplied, \code{vars2} will be printed as is.}
\item{method}{Type of correlation to calculate c("pearson", "spearman"),
based on \code{stats::cor}, default = "pearson".}
\item{use}{Use pairwise.complete.obs or restrict to complete cases
c("pairwise", "complete"), based on \code{stats::cor}, default = "pairwise".}
\item{round_n}{The number of decimal places to
round all output to (default=2).}
\item{tri}{Select output formatting c("upper", "lower","all");
KEEP the upper triangle, lower triangle, or all values, default ="upper.}
\item{cutempty}{If keeping only upper/lower triangle with \code{tri},
cut empty row/column, default=FALSE.}
\item{colnum}{For more concise column names, number row names and
just use corresponding numbers as column names,
default=FALSE, if TRUE overrides cutempty.}
\item{html}{Format as html in viewer or not (default=F, print in console),
needs library(htmlTable) installed.}
\item{strata}{Split table by a 2-level factor variable
with level1 in the upper and level2 in the lower triangle
must have 2+ cases per level, cannot be combined with vars2}
}
\value{
Output Table 1
}
\description{
The \code{correltable} function can be used to create correlation
table (with stars for significance) for scientific publication
This is intended to summarize correlations between (\code{vars})
from an input dataset (\code{data}).
Correlations are based on \code{stats::cor}, \code{use} and \code{method}
follow from that function.
Stars indicate significance: \verb{*p<.05, **p<.01, ***p<.001}
For formatting, variables can be renamed, numbers can be rounded,
upper or lower triangle only can be selected (or whole matrix),
and empty columns/rows can be dropped if using triangles.
For more compact columns, variable names can be numbered in the
rows and column names will be corresponding numbers.
If only cross-correlation between two sets of variables is desired
(no correlations within a set of variables),
\code{vars2} and \code{var_names} can be used.
This function will drop any non-numeric variables by default.
Requires \code{tidyverse} and \code{stats} libraries.
}
\examples{
correltable(data = psydat)
correltable(
data = psydat, vars = c("Age", "Height", "iq"),
tri = "lower", html = TRUE
)
correltable(
data = psydat, vars = c("Age", "Height", "iq"),
tri = "lower", html = TRUE, strata = "Sex"
)
correltable(
data = psydat, vars = c("Age", "Height", "iq"),
var_names = c("Age (months)", "Height (inches)", "IQ"),
tri = "upper", colnum = TRUE, html = TRUE
)
correltable(
data = psydat, vars = c("Age", "Height", "iq"),
var_names = c("Age (months)", "Height (inches)", "IQ"),
vars2 = c("depressT", "anxT"),
var_names2 = c("Depression T", "Anxiety T"), html = TRUE
)
}
|
/man/correltable.Rd
|
no_license
|
cran/scipub
|
R
| false
| true
| 3,994
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/correltable.R
\name{correltable}
\alias{correltable}
\title{Create correlation table (with stars for significance)
for scientific publication}
\usage{
correltable(
data,
vars = NULL,
var_names = vars,
vars2 = NULL,
var_names2 = vars2,
method = c("pearson", "spearman"),
use = c("pairwise", "complete"),
round_n = 2,
tri = c("upper", "lower", "all"),
cutempty = c(FALSE, TRUE),
colnum = c(FALSE, TRUE),
html = c(FALSE, TRUE),
strata = NULL
)
}
\arguments{
\item{data}{The input dataset.}
\item{vars}{A list of the names of variables to correlate,
e.g. c("Age","height","WASI"),
if NULL, all variables in \code{data} will be used.}
\item{var_names}{An optional list to rename the \code{vars} colnames
in the output table, e.g. c("Age (years)","Height (inches)","IQ").
Must match \code{vars} in length. If not supplied, \code{vars} will be printed as is.}
\item{vars2}{If cross-correlation between two sets of variables
is desired, add a second list of variables to correlate with
\code{vars}; Overrides \code{tri}, \code{cutempty}, and \code{colnum}.}
\item{var_names2}{An optional list to rename the \code{vars2} colnames
in the output table If not supplied, \code{vars2} will be printed as is.}
\item{method}{Type of correlation to calculate c("pearson", "spearman"),
based on \code{stats::cor}, default = "pearson".}
\item{use}{Use pairwise.complete.obs or restrict to complete cases
c("pairwise", "complete"), based on \code{stats::cor}, default = "pairwise".}
\item{round_n}{The number of decimal places to
round all output to (default=2).}
\item{tri}{Select output formatting c("upper", "lower","all");
KEEP the upper triangle, lower triangle, or all values, default ="upper.}
\item{cutempty}{If keeping only upper/lower triangle with \code{tri},
cut empty row/column, default=FALSE.}
\item{colnum}{For more concise column names, number row names and
just use corresponding numbers as column names,
default=FALSE, if TRUE overrides cutempty.}
\item{html}{Format as html in viewer or not (default=F, print in console),
needs library(htmlTable) installed.}
\item{strata}{Split table by a 2-level factor variable
with level1 in the upper and level2 in the lower triangle
must have 2+ cases per level, cannot be combined with vars2}
}
\value{
Output Table 1
}
\description{
The \code{correltable} function can be used to create correlation
table (with stars for significance) for scientific publication
This is intended to summarize correlations between (\code{vars})
from an input dataset (\code{data}).
Correlations are based on \code{stats::cor}, \code{use} and \code{method}
follow from that function.
Stars indicate significance: \verb{*p<.05, **p<.01, ***p<.001}
For formatting, variables can be renamed, numbers can be rounded,
upper or lower triangle only can be selected (or whole matrix),
and empty columns/rows can be dropped if using triangles.
For more compact columns, variable names can be numbered in the
rows and column names will be corresponding numbers.
If only cross-correlation between two sets of variables is desired
(no correlations within a set of variables),
\code{vars2} and \code{var_names} can be used.
This function will drop any non-numeric variables by default.
Requires \code{tidyverse} and \code{stats} libraries.
}
\examples{
correltable(data = psydat)
correltable(
data = psydat, vars = c("Age", "Height", "iq"),
tri = "lower", html = TRUE
)
correltable(
data = psydat, vars = c("Age", "Height", "iq"),
tri = "lower", html = TRUE, strata = "Sex"
)
correltable(
data = psydat, vars = c("Age", "Height", "iq"),
var_names = c("Age (months)", "Height (inches)", "IQ"),
tri = "upper", colnum = TRUE, html = TRUE
)
correltable(
data = psydat, vars = c("Age", "Height", "iq"),
var_names = c("Age (months)", "Height (inches)", "IQ"),
vars2 = c("depressT", "anxT"),
var_names2 = c("Depression T", "Anxiety T"), html = TRUE
)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Job-class.R
\name{is.Job}
\alias{is.Job}
\title{Check if given job is a job}
\usage{
is.Job(obj)
}
\arguments{
\item{obj}{Job to be checked}
}
\value{
Is obj a job or not
}
\description{
Check if given job is a job
}
|
/man/is.Job.Rd
|
no_license
|
appelmar/openEo.gdalcubes
|
R
| false
| true
| 295
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Job-class.R
\name{is.Job}
\alias{is.Job}
\title{Check if given job is a job}
\usage{
is.Job(obj)
}
\arguments{
\item{obj}{Job to be checked}
}
\value{
Is obj a job or not
}
\description{
Check if given job is a job
}
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(caret)
library(randomForest)
df <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/datasets/Titanic.csv", na.strings = "NA")
df[is.na(df)] <- 30.39
unn_cols <- c("X", "Name", "SexCode")
df <- df[, !(names(df)) %in% unn_cols]
df <-as.data.frame(sapply(df, sub, pattern='\\*', replacement="crew"))
df$Age <- as.double(as.character(df$Age))
df$Sex <- as.numeric(df$Sex)
df$PClass <- as.numeric(df$PClass)
modelRF <- randomForest(Survived~., data=df)
shinyServer(function(input, output) {
output$textOutput <- renderText({
set.seed(2017-17-03)
if(input$InputClass=="1st"){
PClass = 1
}else if(input$InputClass=="2nd"){
PClass = 2
} else if (input$InputClass=="3rd"){
PClass =3
} else PClass = 4
Age <- input$InputAge
Sex <- ifelse(input$InputSex=="female", 1, 0)
testdata <- data.frame(PClass, Age, Sex)
if (as.character(predict(modelRF, testdata))== "1"){
"Survived!!! :)"
}else{
"Died!!! :("
}
})
})
|
/server.R
|
no_license
|
athni/Titanic-app
|
R
| false
| false
| 1,570
|
r
|
#
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(caret)
library(randomForest)
df <- read.csv("https://vincentarelbundock.github.io/Rdatasets/csv/datasets/Titanic.csv", na.strings = "NA")
df[is.na(df)] <- 30.39
unn_cols <- c("X", "Name", "SexCode")
df <- df[, !(names(df)) %in% unn_cols]
df <-as.data.frame(sapply(df, sub, pattern='\\*', replacement="crew"))
df$Age <- as.double(as.character(df$Age))
df$Sex <- as.numeric(df$Sex)
df$PClass <- as.numeric(df$PClass)
modelRF <- randomForest(Survived~., data=df)
shinyServer(function(input, output) {
output$textOutput <- renderText({
set.seed(2017-17-03)
if(input$InputClass=="1st"){
PClass = 1
}else if(input$InputClass=="2nd"){
PClass = 2
} else if (input$InputClass=="3rd"){
PClass =3
} else PClass = 4
Age <- input$InputAge
Sex <- ifelse(input$InputSex=="female", 1, 0)
testdata <- data.frame(PClass, Age, Sex)
if (as.character(predict(modelRF, testdata))== "1"){
"Survived!!! :)"
}else{
"Died!!! :("
}
})
})
|
library(galgo)
### Name: reInit.Gene
### Title: Erases all internal values in order to re-use the object
### Aliases: reInit.Gene Gene.reInit reInit.Gene reInit,Gene-method
### Keywords: methods internal methods
### ** Examples
ge <- Gene(shape1=1, shape2=100)
ge
reInit(ge)
|
/data/genthat_extracted_code/galgo/examples/reInit.Gene.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 288
|
r
|
library(galgo)
### Name: reInit.Gene
### Title: Erases all internal values in order to re-use the object
### Aliases: reInit.Gene Gene.reInit reInit.Gene reInit,Gene-method
### Keywords: methods internal methods
### ** Examples
ge <- Gene(shape1=1, shape2=100)
ge
reInit(ge)
|
###You will need to adjust this function based on the structure of your states actuals
to_xts_COVID <- function(df,c) {
d <- df
j <- d[which(d$County.Name==c),c(1:4)]
#j$date <- as.Date(as.POSIXct(j$date), tz = "")
j.ts <- xts(j[,-1],
j[[1]])
return(j.ts)
}
|
/R/to_xts_COVID.R
|
permissive
|
wwheeler6/CalCAT-1
|
R
| false
| false
| 290
|
r
|
###You will need to adjust this function based on the structure of your states actuals
to_xts_COVID <- function(df,c) {
d <- df
j <- d[which(d$County.Name==c),c(1:4)]
#j$date <- as.Date(as.POSIXct(j$date), tz = "")
j.ts <- xts(j[,-1],
j[[1]])
return(j.ts)
}
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{getPictures}
\alias{getPictures}
\title{getPictures from the fishbase database}
\usage{
getPictures(scientific_name, type = c("adult", "juvenile", "larvae",
"stamps"), what = c("actual", "thumbnail", "author"), download = FALSE,
...)
}
\arguments{
\item{scientific_name}{the space-separated genus and species names}
\item{type}{the kind of photo requested: adult, juvenile, larvae, or stamps.}
\item{what}{character specifying what to return: actual image, thumbnail, or author name?}
\item{download}{logical, download to working directory?}
\item{...}{additional options to download.file}
}
\value{
list of image urls. If download=TRUE, will also dowload images to working directory.
}
\description{
get urls of fishbase images given a genus and species
}
|
/man/getPictures.Rd
|
permissive
|
CottonRockwood/rfishbase
|
R
| false
| false
| 826
|
rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{getPictures}
\alias{getPictures}
\title{getPictures from the fishbase database}
\usage{
getPictures(scientific_name, type = c("adult", "juvenile", "larvae",
"stamps"), what = c("actual", "thumbnail", "author"), download = FALSE,
...)
}
\arguments{
\item{scientific_name}{the space-separated genus and species names}
\item{type}{the kind of photo requested: adult, juvenile, larvae, or stamps.}
\item{what}{character specifying what to return: actual image, thumbnail, or author name?}
\item{download}{logical, download to working directory?}
\item{...}{additional options to download.file}
}
\value{
list of image urls. If download=TRUE, will also dowload images to working directory.
}
\description{
get urls of fishbase images given a genus and species
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pfa.glmnet.R
\name{pfa.glmnet.extractParams}
\alias{pfa.glmnet.extractParams}
\title{pfa.glmnet.extractParams}
\usage{
pfa.glmnet.extractParams(cvfit, lambdaval = "lambda.1se")
}
\arguments{
\item{cvfit}{an object of class "cv.glmnet"}
\item{lambdaval}{FIXME}
}
\value{
PFA as a list-of-lists that can be inserted into a cell or pool
}
\description{
Extract generalized linear model net parameters from the glm library
}
\examples{
FIXME
}
|
/aurelius/man/pfa.glmnet.extractParams.Rd
|
permissive
|
maximk/hadrian
|
R
| false
| true
| 520
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pfa.glmnet.R
\name{pfa.glmnet.extractParams}
\alias{pfa.glmnet.extractParams}
\title{pfa.glmnet.extractParams}
\usage{
pfa.glmnet.extractParams(cvfit, lambdaval = "lambda.1se")
}
\arguments{
\item{cvfit}{an object of class "cv.glmnet"}
\item{lambdaval}{FIXME}
}
\value{
PFA as a list-of-lists that can be inserted into a cell or pool
}
\description{
Extract generalized linear model net parameters from the glm library
}
\examples{
FIXME
}
|
#' Write raw data in tab-separated data format
#'
#' @param obj Seurat object to print
#' @param outfile Character. Name of a tsv file that should be written to
#' @param raw A logical scalar. Should raw data be written?
#' @param gzip A logical scalar. Should data be gzipped after writing?
#' @export
#' @importFrom assertthat assert_that
#' @importFrom data.table fwrite
#' @examples
#' WriteTsv(obj, filename, raw=FALSE)
WriteTsv <- function(obj, outfile, raw = TRUE, gzip = TRUE) {
assert_that(class(obj) == "seurat")
if (raw) {
mat <- obj@raw.data[, obj@cell.names] %>%
as.matrix()
} else {
mat <- obj@data[, obj@cell.names] %>%
as.matrix()
}
cat("gene\t", paste(colnames(mat), collapse = "\t"), "\n", file = outfile)
fwrite(as.data.frame(mat), file = outfile, append = TRUE, quote = FALSE, sep = "\t",
row.names = TRUE, col.names = FALSE)
if (gzip)
gzip(outfile)
return()
}
|
/R/WriteTsv.R
|
no_license
|
daskelly/earlycross
|
R
| false
| false
| 970
|
r
|
#' Write raw data in tab-separated data format
#'
#' @param obj Seurat object to print
#' @param outfile Character. Name of a tsv file that should be written to
#' @param raw A logical scalar. Should raw data be written?
#' @param gzip A logical scalar. Should data be gzipped after writing?
#' @export
#' @importFrom assertthat assert_that
#' @importFrom data.table fwrite
#' @examples
#' WriteTsv(obj, filename, raw=FALSE)
WriteTsv <- function(obj, outfile, raw = TRUE, gzip = TRUE) {
assert_that(class(obj) == "seurat")
if (raw) {
mat <- obj@raw.data[, obj@cell.names] %>%
as.matrix()
} else {
mat <- obj@data[, obj@cell.names] %>%
as.matrix()
}
cat("gene\t", paste(colnames(mat), collapse = "\t"), "\n", file = outfile)
fwrite(as.data.frame(mat), file = outfile, append = TRUE, quote = FALSE, sep = "\t",
row.names = TRUE, col.names = FALSE)
if (gzip)
gzip(outfile)
return()
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Connect.R
\name{createDbiConnectionDetails}
\alias{createDbiConnectionDetails}
\title{Create DBI connection details}
\usage{
createDbiConnectionDetails(dbms, drv, ...)
}
\arguments{
\item{dbms}{The type of DBMS running on the server. Valid values are
\itemize{
\item "oracle" for Oracle
\item "postgresql" for PostgreSQL
\item "redshift" for Amazon Redshift
\item "sql server" for Microsoft SQL Server
\item "pdw" for Microsoft Parallel Data Warehouse (PDW)
\item "netezza" for IBM Netezza
\item "bigquery" for Google BigQuery
\item "sqlite" for SQLite
\item "sqlite extended" for SQLite with extended types (DATE and DATETIME)
\item "spark" for Spark
\item "snowflake" for Snowflake
}}
\item{drv}{An object that inherits from DBIDriver, or an existing DBIConnection object
(in order to clone an existing connection).}
\item{...}{authentication arguments needed by the DBMS instance; these typically
include user, password, host, port, dbname, etc. For details see the appropriate DBIDriver}
}
\value{
A list with all the details needed to connect to a database.
}
\description{
For advanced users only. This function will allow \code{DatabaseConnector} to wrap any DBI driver. Using a driver that
\code{DatabaseConnector} hasn't been tested with may give unpredictable performance. Use at your own risk. No
support will be provided.
}
|
/man/createDbiConnectionDetails.Rd
|
permissive
|
alondhe/DatabaseConnector
|
R
| false
| true
| 1,416
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Connect.R
\name{createDbiConnectionDetails}
\alias{createDbiConnectionDetails}
\title{Create DBI connection details}
\usage{
createDbiConnectionDetails(dbms, drv, ...)
}
\arguments{
\item{dbms}{The type of DBMS running on the server. Valid values are
\itemize{
\item "oracle" for Oracle
\item "postgresql" for PostgreSQL
\item "redshift" for Amazon Redshift
\item "sql server" for Microsoft SQL Server
\item "pdw" for Microsoft Parallel Data Warehouse (PDW)
\item "netezza" for IBM Netezza
\item "bigquery" for Google BigQuery
\item "sqlite" for SQLite
\item "sqlite extended" for SQLite with extended types (DATE and DATETIME)
\item "spark" for Spark
\item "snowflake" for Snowflake
}}
\item{drv}{An object that inherits from DBIDriver, or an existing DBIConnection object
(in order to clone an existing connection).}
\item{...}{authentication arguments needed by the DBMS instance; these typically
include user, password, host, port, dbname, etc. For details see the appropriate DBIDriver}
}
\value{
A list with all the details needed to connect to a database.
}
\description{
For advanced users only. This function will allow \code{DatabaseConnector} to wrap any DBI driver. Using a driver that
\code{DatabaseConnector} hasn't been tested with may give unpredictable performance. Use at your own risk. No
support will be provided.
}
|
library(dplyr)
library(readr)
library(tidyr)
library(caret)
library(caTools)
library(caretEnsemble)
data_train <- read.csv('./data/processed/processed_train.csv')
data_test <- read.csv('./data/processed/processed_test.csv')
# Create custom indices
my_folds <- createMultiFolds(y = data_train$brand, k = 6, times = 2)
# Preprocessing steps
pre_proc <- c('center', 'scale')
# Create reusable trainControl object: myControl
fitControl <- trainControl(
summaryFunction = twoClassSummary,
classProbs = TRUE,
verboseIter = TRUE,
savePredictions = 'final',
index = my_folds
)
target <- 'brand'
predictors <- names(data_train)[!names(data_train) %in% target]
# C5.0 (method = 'C5.0')
# For classification using packages C50 and plyr with tuning parameters:
# Number of Boosting Iterations (trials, numeric)
# Model Type (model, character)
# Winnow (winnow, logical)
fit_c5 <-
train(
data_train[, predictors],
data_train[, target],
trControl = fitControl,
preProc = pre_proc,
method = 'C5.0',
metric = 'ROC'
)
summary(fit_c5)
ggplot(fit_c5)
# eXtreme Gradient Boosting (method = 'xgbTree')
# For classification and regression using packages xgboost and plyr with tuning parameters:
# Number of Boosting Iterations (nrounds, numeric)
# Max Tree Depth (max_depth, numeric)
# Shrinkage (eta, numeric)
# Minimum Loss Reduction (gamma, numeric)
# Subsample Ratio of Columns (colsample_bytree, numeric)
# Minimum Sum of Instance Weight (min_child_weight, numeric)
# Subsample Percentage (subsample, numeric)
fit_egb <-
train(
data_train[, predictors],
data_train[, target],
trControl = fitControl,
preProc = pre_proc,
method = 'xgbTree',
metric = 'ROC'
)
fit_egb
ggplot(fit_egb)
# glmnet (method = 'glmnet')
# For classification and regression using packages glmnet and Matrix with tuning parameters:
# Mixing Percentage (alpha, numeric)
# Regularization Parameter (lambda, numeric)
fit_glmnet <-
train(
data_train[, predictors],
data_train[, target],
trControl = fitControl,
preProc = pre_proc,
method = 'glmnet',
metric = 'ROC'
)
fit_glmnet
ggplot(fit_glmnet)
# Random Forest (method = 'rf')
# For classification and regression using package randomForest with tuning parameters:
# Number of Randomly Selected Predictors (mtry, numeric)
fit_rf <-
train(
data_train[, predictors],
data_train[, target],
trControl = fitControl,
preProc = pre_proc,
method = 'ranger',
metric = 'ROC'
)
fit_rf
ggplot(fit_rf)
# Trying the same models but this time without centering and scaling
fit_c5_nopre <-
train(
data_train[, predictors],
data_train[, target],
trControl = fitControl,
method = 'C5.0',
metric = 'ROC'
)
summary(fit_c5_nopre)
ggplot(fit_c5_nopre)
fit_egb_nopre <-
train(
data_train[, predictors],
data_train[, target],
trControl = fitControl,
method = 'xgbTree',
metric = 'ROC'
)
fit_egb_nopre
ggplot(fit_egb_nopre)
fit_glmnet_nopre <-
train(
data_train[, predictors],
data_train[, target],
trControl = fitControl,
method = 'glmnet',
metric = 'ROC'
)
fit_glmnet_nopre
ggplot(fit_glmnet_nopre)
fit_rf_nopre <-
train(
data_train[, predictors],
data_train[, target],
trControl = fitControl,
method = 'ranger',
metric = 'ROC'
)
fit_rf_nopre
ggplot(fit_rf_nopre)
# Compare models graphically
cv_results <-
resamples(list(
C5 = fit_c5,
EGB = fit_egb,
GLMNET = fit_glmnet,
RF = fit_rf,
C5_nopre = fit_c5_nopre,
EGB_nopre = fit_egb_nopre,
GLMNET_nopre = fit_glmnet_nopre,
RF_nopre = fit_rf_nopre
))
# Random Forest and Extreme Gradient boosting are clearly the best models
summary(cv_results)
ggplot(cv_results)
dotplot(cv_results)
predictions_c5 <-
predict(fit_c5, newdata = data_test, type = 'prob')
predictions_egb <-
predict(fit_egb, newdata = data_test, type = 'prob')
predictions_glmnet <-
predict(fit_glmnet, newdata = data_test, type = 'prob')
predictions_rf <-
predict(fit_rf, newdata = data_test, type = 'prob')
colAUC(
data.frame(
'RF' = predictions_rf$Acer,
'C5.0' = predictions_c5$Acer,
'GLMNET' = predictions_glmnet$Acer,
'EGB' = predictions_egb$Acer
),
data_test$brand,
plotROC = TRUE
)
postResample()
# Interestingly the model correlation between RF and EGB is very low - Maybe the models could be ensembled?
# also C5 and EGB could be good candite for ensembling
modelCor(cv_results)
splom(cv_results)
fit_rf_egb <- caretList(
data_train[, predictors],
data_train[, target],
trControl = fitControl,
preProc = pre_proc,
methodList = c('xgbTree', 'ranger'),
metric = 'ROC'
)
fit_c5_egb <- caretList(
data_train[, predictors],
data_train[, target],
trControl = fitControl,
preProc = pre_proc,
methodList = c('C5.0', 'ranger'),
# metric = 'ROC'
)
plot(caretEnsemble(fit_rf_egb))
plot(caretEnsemble(fit_glmnet_egb))
stack_rf_egb <-
caretStack(fit_rf_egb,
method = 'glm',
metric = 'ROC',
trControl = fitControl)
stack_c5_egb <-
caretStack(fit_c5_egb,
method = 'glm',
metric = 'ROC',
trControl = fitControl)
plot(caretEnsemble(fit_c5_egb))
# TODO check how the stacks perform in CV
stack_cv_results <-
resamples(list(
RF_EGB = stack_rf_egb,
C5.0_EGB = stack_c5_egb
)
)
"plot"(stack_rf_egb)
predictions_stack_rf_egb <-
predict(stack_rf_egb, newdata = data_test, type = 'prob')
predictions_stack_c5_egb <-
predict(stack_c5_egb, newdata = data_test, type = 'prob')
colAUC(
data.frame(
'RF' = predictions_rf$Acer,
'C5.0' = predictions_c5$Acer,
'GLMNET' = predictions_glmnet$Acer,
'EGB' = predictions_egb$Acer,
'RF_EGB' = predictions_stack_rf_egb,
'C5.0_EGB' = predictions_stack_c5_egb
),
data_test$brand,
plotROC = FALSE
)
models <- caretList(iris[1:50,1:2], iris[1:50,3], methodList=c("glm", "rpart"))
ens <- caretEnsemble(models)
plot(ens)
|
/notebooks/model_exploration.R
|
permissive
|
TuomoKareoja/brand-preferance-prediction
|
R
| false
| false
| 6,421
|
r
|
library(dplyr)
library(readr)
library(tidyr)
library(caret)
library(caTools)
library(caretEnsemble)
data_train <- read.csv('./data/processed/processed_train.csv')
data_test <- read.csv('./data/processed/processed_test.csv')
# Create custom indices
my_folds <- createMultiFolds(y = data_train$brand, k = 6, times = 2)
# Preprocessing steps
pre_proc <- c('center', 'scale')
# Create reusable trainControl object: myControl
fitControl <- trainControl(
summaryFunction = twoClassSummary,
classProbs = TRUE,
verboseIter = TRUE,
savePredictions = 'final',
index = my_folds
)
target <- 'brand'
predictors <- names(data_train)[!names(data_train) %in% target]
# C5.0 (method = 'C5.0')
# For classification using packages C50 and plyr with tuning parameters:
# Number of Boosting Iterations (trials, numeric)
# Model Type (model, character)
# Winnow (winnow, logical)
fit_c5 <-
train(
data_train[, predictors],
data_train[, target],
trControl = fitControl,
preProc = pre_proc,
method = 'C5.0',
metric = 'ROC'
)
summary(fit_c5)
ggplot(fit_c5)
# eXtreme Gradient Boosting (method = 'xgbTree')
# For classification and regression using packages xgboost and plyr with tuning parameters:
# Number of Boosting Iterations (nrounds, numeric)
# Max Tree Depth (max_depth, numeric)
# Shrinkage (eta, numeric)
# Minimum Loss Reduction (gamma, numeric)
# Subsample Ratio of Columns (colsample_bytree, numeric)
# Minimum Sum of Instance Weight (min_child_weight, numeric)
# Subsample Percentage (subsample, numeric)
fit_egb <-
train(
data_train[, predictors],
data_train[, target],
trControl = fitControl,
preProc = pre_proc,
method = 'xgbTree',
metric = 'ROC'
)
fit_egb
ggplot(fit_egb)
# glmnet (method = 'glmnet')
# For classification and regression using packages glmnet and Matrix with tuning parameters:
# Mixing Percentage (alpha, numeric)
# Regularization Parameter (lambda, numeric)
fit_glmnet <-
train(
data_train[, predictors],
data_train[, target],
trControl = fitControl,
preProc = pre_proc,
method = 'glmnet',
metric = 'ROC'
)
fit_glmnet
ggplot(fit_glmnet)
# Random Forest (method = 'rf')
# For classification and regression using package randomForest with tuning parameters:
# Number of Randomly Selected Predictors (mtry, numeric)
fit_rf <-
train(
data_train[, predictors],
data_train[, target],
trControl = fitControl,
preProc = pre_proc,
method = 'ranger',
metric = 'ROC'
)
fit_rf
ggplot(fit_rf)
# Trying the same models but this time without centering and scaling
fit_c5_nopre <-
train(
data_train[, predictors],
data_train[, target],
trControl = fitControl,
method = 'C5.0',
metric = 'ROC'
)
summary(fit_c5_nopre)
ggplot(fit_c5_nopre)
fit_egb_nopre <-
train(
data_train[, predictors],
data_train[, target],
trControl = fitControl,
method = 'xgbTree',
metric = 'ROC'
)
fit_egb_nopre
ggplot(fit_egb_nopre)
fit_glmnet_nopre <-
train(
data_train[, predictors],
data_train[, target],
trControl = fitControl,
method = 'glmnet',
metric = 'ROC'
)
fit_glmnet_nopre
ggplot(fit_glmnet_nopre)
fit_rf_nopre <-
train(
data_train[, predictors],
data_train[, target],
trControl = fitControl,
method = 'ranger',
metric = 'ROC'
)
fit_rf_nopre
ggplot(fit_rf_nopre)
# Compare models graphically
cv_results <-
resamples(list(
C5 = fit_c5,
EGB = fit_egb,
GLMNET = fit_glmnet,
RF = fit_rf,
C5_nopre = fit_c5_nopre,
EGB_nopre = fit_egb_nopre,
GLMNET_nopre = fit_glmnet_nopre,
RF_nopre = fit_rf_nopre
))
# Random Forest and Extreme Gradient boosting are clearly the best models
summary(cv_results)
ggplot(cv_results)
dotplot(cv_results)
predictions_c5 <-
predict(fit_c5, newdata = data_test, type = 'prob')
predictions_egb <-
predict(fit_egb, newdata = data_test, type = 'prob')
predictions_glmnet <-
predict(fit_glmnet, newdata = data_test, type = 'prob')
predictions_rf <-
predict(fit_rf, newdata = data_test, type = 'prob')
colAUC(
data.frame(
'RF' = predictions_rf$Acer,
'C5.0' = predictions_c5$Acer,
'GLMNET' = predictions_glmnet$Acer,
'EGB' = predictions_egb$Acer
),
data_test$brand,
plotROC = TRUE
)
postResample()
# Interestingly the model correlation between RF and EGB is very low - Maybe the models could be ensembled?
# also C5 and EGB could be good candite for ensembling
modelCor(cv_results)
splom(cv_results)
fit_rf_egb <- caretList(
data_train[, predictors],
data_train[, target],
trControl = fitControl,
preProc = pre_proc,
methodList = c('xgbTree', 'ranger'),
metric = 'ROC'
)
fit_c5_egb <- caretList(
data_train[, predictors],
data_train[, target],
trControl = fitControl,
preProc = pre_proc,
methodList = c('C5.0', 'ranger'),
# metric = 'ROC'
)
plot(caretEnsemble(fit_rf_egb))
plot(caretEnsemble(fit_glmnet_egb))
stack_rf_egb <-
caretStack(fit_rf_egb,
method = 'glm',
metric = 'ROC',
trControl = fitControl)
stack_c5_egb <-
caretStack(fit_c5_egb,
method = 'glm',
metric = 'ROC',
trControl = fitControl)
plot(caretEnsemble(fit_c5_egb))
# TODO check how the stacks perform in CV
stack_cv_results <-
resamples(list(
RF_EGB = stack_rf_egb,
C5.0_EGB = stack_c5_egb
)
)
"plot"(stack_rf_egb)
predictions_stack_rf_egb <-
predict(stack_rf_egb, newdata = data_test, type = 'prob')
predictions_stack_c5_egb <-
predict(stack_c5_egb, newdata = data_test, type = 'prob')
colAUC(
data.frame(
'RF' = predictions_rf$Acer,
'C5.0' = predictions_c5$Acer,
'GLMNET' = predictions_glmnet$Acer,
'EGB' = predictions_egb$Acer,
'RF_EGB' = predictions_stack_rf_egb,
'C5.0_EGB' = predictions_stack_c5_egb
),
data_test$brand,
plotROC = FALSE
)
models <- caretList(iris[1:50,1:2], iris[1:50,3], methodList=c("glm", "rpart"))
ens <- caretEnsemble(models)
plot(ens)
|
#' Transform normal HMM parameters from natural to working
#'
#' The function transforms the natural normal HMM parameters that have
#' additional constraints into working parameters that incorporate the
#' constraints.
#'
#' @param num_states The number of states in the desired HMM.
#' @param num_variables The number of variables in the data.
#' @param num_subjects The number of subjects/trials that generated the data.
#' @param mu A list of matrices containing the means of the state dependent
#' normal distribution. Each matrix corresponds to a different variable,
#' each row corresponds to a different subject and each column corresponds
#' to a different state.
#' @param sigma A list of matrices containing the standard deviations of the
#' state dependent normal distribution. Each matrix corresponds to a
#' different variable, each row corresponds to a different subject and each
#' column corresponds to a different state.
#' @param beta A matrix of regression coefficients for the effect of the
#' covariates on the transition probability matrix `gamma`.
#' @param delta A list of the initial state distributions for each subject.
#'
#' @return A single vector containing working parameters.
#' @export
#' @examples
#' # define values of parameters
#' num_states <- 2
#' num_variables <- 2
#' num_subjects <- 2
#' mu <- list(matrix(c(1, 5, 2, 4), 2, 2, byrow = TRUE),
#' matrix(c(1, 5, 2, 4), 2, 2, byrow = TRUE))
#' sigma <- list(matrix(c(1, 2, 1, 1.5), 2, 2, byrow = TRUE),
#' matrix(c(1, 2, 1, 1.5), 2, 2, byrow = TRUE))
#' beta <- matrix(c(-2, 0, 0), nrow = 1, ncol = 3)
#' delta <- list(c(1/2, 1/2), c(1/2, 1/2))
#'
#' #transform to working parametes
#' norm_working_params(num_states, num_variables, num_subjects,
#' mu, sigma, beta, delta)
norm_working_params <- function(num_states, num_variables, num_subjects,
mu, sigma, beta, delta) {
tmu <- numeric()
tsigma <- numeric()
for (j in 1:num_variables) {
tmu <- c(tmu, as.vector(t(mu[[j]])))
tsigma <- c(tsigma, log(as.vector(t(sigma[[j]]))))
}
if (num_states == 1) {
return(tmu, tsigma)
}
tbeta <- as.vector(beta)
tdelta <- numeric()
for (i in 1:num_subjects) {
tdelta <- c(tdelta, log(delta[[i]][-1]/delta[[i]][1]))
}
c(tmu, tsigma, tbeta, tdelta)
}
|
/R/norm_working_params.R
|
permissive
|
simonecollier/lizardHMM
|
R
| false
| false
| 2,421
|
r
|
#' Transform normal HMM parameters from natural to working
#'
#' The function transforms the natural normal HMM parameters that have
#' additional constraints into working parameters that incorporate the
#' constraints.
#'
#' @param num_states The number of states in the desired HMM.
#' @param num_variables The number of variables in the data.
#' @param num_subjects The number of subjects/trials that generated the data.
#' @param mu A list of matrices containing the means of the state dependent
#' normal distribution. Each matrix corresponds to a different variable,
#' each row corresponds to a different subject and each column corresponds
#' to a different state.
#' @param sigma A list of matrices containing the standard deviations of the
#' state dependent normal distribution. Each matrix corresponds to a
#' different variable, each row corresponds to a different subject and each
#' column corresponds to a different state.
#' @param beta A matrix of regression coefficients for the effect of the
#' covariates on the transition probability matrix `gamma`.
#' @param delta A list of the initial state distributions for each subject.
#'
#' @return A single vector containing working parameters.
#' @export
#' @examples
#' # define values of parameters
#' num_states <- 2
#' num_variables <- 2
#' num_subjects <- 2
#' mu <- list(matrix(c(1, 5, 2, 4), 2, 2, byrow = TRUE),
#' matrix(c(1, 5, 2, 4), 2, 2, byrow = TRUE))
#' sigma <- list(matrix(c(1, 2, 1, 1.5), 2, 2, byrow = TRUE),
#' matrix(c(1, 2, 1, 1.5), 2, 2, byrow = TRUE))
#' beta <- matrix(c(-2, 0, 0), nrow = 1, ncol = 3)
#' delta <- list(c(1/2, 1/2), c(1/2, 1/2))
#'
#' #transform to working parametes
#' norm_working_params(num_states, num_variables, num_subjects,
#' mu, sigma, beta, delta)
norm_working_params <- function(num_states, num_variables, num_subjects,
mu, sigma, beta, delta) {
tmu <- numeric()
tsigma <- numeric()
for (j in 1:num_variables) {
tmu <- c(tmu, as.vector(t(mu[[j]])))
tsigma <- c(tsigma, log(as.vector(t(sigma[[j]]))))
}
if (num_states == 1) {
return(tmu, tsigma)
}
tbeta <- as.vector(beta)
tdelta <- numeric()
for (i in 1:num_subjects) {
tdelta <- c(tdelta, log(delta[[i]][-1]/delta[[i]][1]))
}
c(tmu, tsigma, tbeta, tdelta)
}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307394783e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615781749-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 329
|
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307394783e+77, 9.53818252170339e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(8L, 3L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
### Get all the networks together
##############################################################################################
# Object Details
G_Mu #The graph of relation between microbes and genes (both conditions)
NEL_BF #The gene coexpression network from data (Breast Fed conditions)
NEL_FF #The gene coexpression network from data (Formula Fed conditions)
brayGraph_FF #Species network from abundance data condition Formula Fed-BRAY CURTIS
brayGraph_BF #Species network from abundance data condition Breast Fed-BRAY CURTIS
nel.cornet_FF #Species network from abundance data condition Formula Fed-CORRELATION NETWORK
nel.cornet_BF #Species network from abundance data condition Breast Fed-CORRELATION NETWORK
##############################################################################################
combine into one object
MyallGraphs=list(Species_Gene=G_Mu, Gene_GeneBF=NEL_BF, Gene_GeneFF=NEL_FF, Species_BrayFF=brayGraph_FF, Species_BrayBF=brayGraph_BF, Species_CorFF=nel.cornet_FF, Species_CorBF=nel.cornet_BF)
# > MyallGraphs
# $Species_Gene
# A graphNEL graph with directed edges
# Number of Nodes = 843
# Number of Edges = 418
#
# $Gene_GeneBF
# A graphNEL graph with undirected edges
# Number of Nodes = 2172
# Number of Edges = 23326
#
# $Gene_GeneFF
# A graphNEL graph with undirected edges
# Number of Nodes = 2172
# Number of Edges = 16553
#
# $Species_BrayFF
# A graphNEL graph with undirected edges
# Number of Nodes = 35
# Number of Edges = 98
#
# $Species_BrayBF
# A graphNEL graph with undirected edges
# Number of Nodes = 35
# Number of Edges = 36
#
# $Species_CorFF
# A graphNEL graph with directed edges
# Number of Nodes = 35
# Number of Edges = 61
#
# $Species_CorBF
# A graphNEL graph with directed edges
# Number of Nodes = 35
# Number of Edges = 20
MyallGraphs=list(Species_Gene=G_Mu, Gene_GeneBF=NEL_BF, Gene_GeneFF=NEL_FF, Species_BrayFF=brayGraph_FF, Species_BrayBF=brayGraph_BF, Species_CorFF=nel.cornet_FF, Species_CorBF=nel.cornet_BF)
save(MyallGraphs, file="allGraphs.rda")
save(MyallGraphs, file="allGraphs.rda")
brayBasedjoinBF=join(MyallGraphs$Species_Gene, MyallGraphs$Gene_GeneBF)
brayBasedjoinBF=join(brayBasedjoinBF, MyallGraphs$Species_BrayBF)
brayBasedjoinFF=join(MyallGraphs$Species_Gene, MyallGraphs$Gene_GeneFF)
brayBasedjoinFF=join(brayBasedjoinFF, MyallGraphs$Species_BrayFF)
#######################################################################
#######################################################################
graphNEL2SIF=function(G){
sif=data.frame()
myG=as(G, "matrix")
myG[myG>0]<-1
for(i in 1:(nrow(myG)-1)){
n=i+1
for(j in n:ncol(myG)){
if(myG[i,j]==1)
sif=rbind(sif, data.frame(Node1=rownames(myG)[i], Node2=colnames(myG)[j]))
}
}
return(sif)
}
brayBasedjoinBF.SIF=graphNEL2SIF(brayBasedjoinBF)
brayBasedjoinFF.SIF=graphNEL2SIF(brayBasedjoinFF)
for(i in 1:length(MyallGraphs)){
file=paste(names(MyallGraphs)[i], ".csv", sep="")
write.csv(graphNEL2SIF(MyallGraphs[[i]]), file=file)
}
write.csv(brayBasedjoinBF.SIF, file="brayBasedjoinBF_SIF.csv")
write.csv(brayBasedjoinFF.SIF, file="brayBasedjoinFF_SIF.csv")
### Compute edge densities
D=c()
for(i in 1:length(MyallGraphs)){
nd=length(nodes(MyallGraphs[[i]]))
m=as(MyallGraphs[[i]], "matrix")
m[m>0]<-1
ed=sum(m)
ed=ed/2
d1=2*ed
d2=nd*(nd-1)
D=c(D,(d1/d2))
rm(nd,ed, d1, d2)
}
names(D)=names(MyallGraphs)
###########################################################################
# CorBasedjoinBF=join(MyallGraphs$Species_Gene, MyallGraphs$Gene_GeneBF)
# CorBasedjoinBF=join(CorBasedjoinBF, MyallGraphs$Species_CorBF)
#
# CorBasedjoinFF=join(MyallGraphs$Species_Gene, MyallGraphs$Gene_GeneFF)
# CorBasedjoinFF=join(CorBasedjoinFF, MyallGraphs$Species_CorFF)
############################################################################
# install devtools
install.packages("devtools")
# load devtools
library(devtools)
# install arcdiagram
install_github('arcdiagram', username='gastonstat')
# load arcdiagram
library(arcdiagram)
# location of 'gml' file
mis_file = "/Users/gaston/lesmiserables.txt"
# read 'gml' file
mis_graph = read.graph(mis_file, format="gml")
get edgelist
edgelist = get.edgelist(mis_graph)
# get vertex labels
vlabels = get.vertex.attribute(mis_graph, "label")
# get vertex groups
vgroups = get.vertex.attribute(mis_graph, "group")
# get vertex fill color
vfill = get.vertex.attribute(mis_graph, "fill")
# get vertex border color
vborders = get.vertex.attribute(mis_graph, "border")
# get vertex degree
degrees = degree(mis_graph)
# get edges value
values = get.edge.attribute(mis_graph, "value")
# load reshape
library(reshape)
# data frame with vgroups, degree, vlabels and ind
x = data.frame(vgroups, degrees, vlabels, ind=1:vcount(mis_graph))
# arranging by vgroups and degrees
y = arrange(x, desc(vgroups), desc(degrees))
# get ordering 'ind'
new_ord = y$ind
arcplot(edgelist, ordering=new_ord, labels=vlabels, cex.labels=0.8,
show.nodes=TRUE, col.nodes=vborders, bg.nodes=vfill,
cex.nodes = log(degrees)+0.5, pch.nodes=21,
lwd.nodes = 2, line=-0.5,
col.arcs = hsv(0, 0, 0.2, 0.25), lwd.arcs = 1.5 * values)
ggplot(melt(as(MyallGraphs$Gene_GeneFF, "matrix")), aes(X1, X2, fill = value)) + geom_tile() + scale_fill_gradient(low = "blue", high = "yellow")
####################################################
library(network)
library(ggplot2)
library(sna)
library(ergm)
plotg <- function(net, value=NULL) {
m <- as.matrix.network.adjacency(net) # get sociomatrix
# get coordinates from Fruchterman and Reingold's force-directed placement algorithm.
plotcord <- data.frame(gplot.layout.fruchtermanreingold(m, NULL))
# or get it them from Kamada-Kawai's algorithm:
# plotcord <- data.frame(gplot.layout.kamadakawai(m, NULL))
colnames(plotcord) = c("X1","X2")
edglist <- as.matrix.network.edgelist(net)
edges <- data.frame(plotcord[edglist[,1],], plotcord[edglist[,2],])
plotcord$elements <- as.factor(get.vertex.attribute(net, "elements"))
colnames(edges) <- c("X1","Y1","X2","Y2")
edges$midX <- (edges$X1 + edges$X2) / 2
edges$midY <- (edges$Y1 + edges$Y2) / 2
pnet <- ggplot() +
geom_segment(aes(x=X1, y=Y1, xend = X2, yend = Y2),
data=edges, size = 0.5, colour="grey") +
geom_point(aes(X1, X2,colour=elements), data=plotcord) +
scale_colour_brewer(palette="Set1") +
scale_x_continuous(breaks = NA) + scale_y_continuous(breaks = NA) +
# discard default grid + titles in ggplot2
opts(panel.background = theme_blank()) + opts(legend.position="none")+
opts(axis.title.x = theme_blank(), axis.title.y = theme_blank()) +
opts( legend.background = theme_rect(colour = NA)) +
opts(panel.background = theme_rect(fill = "white", colour = NA)) +
opts(panel.grid.minor = theme_blank(), panel.grid.major = theme_blank())
return(print(pnet))
}
g <- network(150, directed=FALSE, density=0.03)
classes <- rbinom(150,1,0.5) + rbinom(150,1,0.5) + rbinom(150,1,0.5)
set.vertex.attribute(g, "elements", classes)
plotg(g)
|
/netJoinVis.R
|
no_license
|
paurushp/metagenomics
|
R
| false
| false
| 7,236
|
r
|
### Get all the networks together
##############################################################################################
# Object Details
G_Mu #The graph of relation between microbes and genes (both conditions)
NEL_BF #The gene coexpression network from data (Breast Fed conditions)
NEL_FF #The gene coexpression network from data (Formula Fed conditions)
brayGraph_FF #Species network from abundance data condition Formula Fed-BRAY CURTIS
brayGraph_BF #Species network from abundance data condition Breast Fed-BRAY CURTIS
nel.cornet_FF #Species network from abundance data condition Formula Fed-CORRELATION NETWORK
nel.cornet_BF #Species network from abundance data condition Breast Fed-CORRELATION NETWORK
##############################################################################################
combine into one object
MyallGraphs=list(Species_Gene=G_Mu, Gene_GeneBF=NEL_BF, Gene_GeneFF=NEL_FF, Species_BrayFF=brayGraph_FF, Species_BrayBF=brayGraph_BF, Species_CorFF=nel.cornet_FF, Species_CorBF=nel.cornet_BF)
# > MyallGraphs
# $Species_Gene
# A graphNEL graph with directed edges
# Number of Nodes = 843
# Number of Edges = 418
#
# $Gene_GeneBF
# A graphNEL graph with undirected edges
# Number of Nodes = 2172
# Number of Edges = 23326
#
# $Gene_GeneFF
# A graphNEL graph with undirected edges
# Number of Nodes = 2172
# Number of Edges = 16553
#
# $Species_BrayFF
# A graphNEL graph with undirected edges
# Number of Nodes = 35
# Number of Edges = 98
#
# $Species_BrayBF
# A graphNEL graph with undirected edges
# Number of Nodes = 35
# Number of Edges = 36
#
# $Species_CorFF
# A graphNEL graph with directed edges
# Number of Nodes = 35
# Number of Edges = 61
#
# $Species_CorBF
# A graphNEL graph with directed edges
# Number of Nodes = 35
# Number of Edges = 20
MyallGraphs=list(Species_Gene=G_Mu, Gene_GeneBF=NEL_BF, Gene_GeneFF=NEL_FF, Species_BrayFF=brayGraph_FF, Species_BrayBF=brayGraph_BF, Species_CorFF=nel.cornet_FF, Species_CorBF=nel.cornet_BF)
save(MyallGraphs, file="allGraphs.rda")
save(MyallGraphs, file="allGraphs.rda")
brayBasedjoinBF=join(MyallGraphs$Species_Gene, MyallGraphs$Gene_GeneBF)
brayBasedjoinBF=join(brayBasedjoinBF, MyallGraphs$Species_BrayBF)
brayBasedjoinFF=join(MyallGraphs$Species_Gene, MyallGraphs$Gene_GeneFF)
brayBasedjoinFF=join(brayBasedjoinFF, MyallGraphs$Species_BrayFF)
#######################################################################
#######################################################################
graphNEL2SIF=function(G){
sif=data.frame()
myG=as(G, "matrix")
myG[myG>0]<-1
for(i in 1:(nrow(myG)-1)){
n=i+1
for(j in n:ncol(myG)){
if(myG[i,j]==1)
sif=rbind(sif, data.frame(Node1=rownames(myG)[i], Node2=colnames(myG)[j]))
}
}
return(sif)
}
brayBasedjoinBF.SIF=graphNEL2SIF(brayBasedjoinBF)
brayBasedjoinFF.SIF=graphNEL2SIF(brayBasedjoinFF)
for(i in 1:length(MyallGraphs)){
file=paste(names(MyallGraphs)[i], ".csv", sep="")
write.csv(graphNEL2SIF(MyallGraphs[[i]]), file=file)
}
write.csv(brayBasedjoinBF.SIF, file="brayBasedjoinBF_SIF.csv")
write.csv(brayBasedjoinFF.SIF, file="brayBasedjoinFF_SIF.csv")
### Compute edge densities
D=c()
for(i in 1:length(MyallGraphs)){
nd=length(nodes(MyallGraphs[[i]]))
m=as(MyallGraphs[[i]], "matrix")
m[m>0]<-1
ed=sum(m)
ed=ed/2
d1=2*ed
d2=nd*(nd-1)
D=c(D,(d1/d2))
rm(nd,ed, d1, d2)
}
names(D)=names(MyallGraphs)
###########################################################################
# CorBasedjoinBF=join(MyallGraphs$Species_Gene, MyallGraphs$Gene_GeneBF)
# CorBasedjoinBF=join(CorBasedjoinBF, MyallGraphs$Species_CorBF)
#
# CorBasedjoinFF=join(MyallGraphs$Species_Gene, MyallGraphs$Gene_GeneFF)
# CorBasedjoinFF=join(CorBasedjoinFF, MyallGraphs$Species_CorFF)
############################################################################
# install devtools
install.packages("devtools")
# load devtools
library(devtools)
# install arcdiagram
install_github('arcdiagram', username='gastonstat')
# load arcdiagram
library(arcdiagram)
# location of 'gml' file
mis_file = "/Users/gaston/lesmiserables.txt"
# read 'gml' file
mis_graph = read.graph(mis_file, format="gml")
get edgelist
edgelist = get.edgelist(mis_graph)
# get vertex labels
vlabels = get.vertex.attribute(mis_graph, "label")
# get vertex groups
vgroups = get.vertex.attribute(mis_graph, "group")
# get vertex fill color
vfill = get.vertex.attribute(mis_graph, "fill")
# get vertex border color
vborders = get.vertex.attribute(mis_graph, "border")
# get vertex degree
degrees = degree(mis_graph)
# get edges value
values = get.edge.attribute(mis_graph, "value")
# load reshape
library(reshape)
# data frame with vgroups, degree, vlabels and ind
x = data.frame(vgroups, degrees, vlabels, ind=1:vcount(mis_graph))
# arranging by vgroups and degrees
y = arrange(x, desc(vgroups), desc(degrees))
# get ordering 'ind'
new_ord = y$ind
arcplot(edgelist, ordering=new_ord, labels=vlabels, cex.labels=0.8,
show.nodes=TRUE, col.nodes=vborders, bg.nodes=vfill,
cex.nodes = log(degrees)+0.5, pch.nodes=21,
lwd.nodes = 2, line=-0.5,
col.arcs = hsv(0, 0, 0.2, 0.25), lwd.arcs = 1.5 * values)
ggplot(melt(as(MyallGraphs$Gene_GeneFF, "matrix")), aes(X1, X2, fill = value)) + geom_tile() + scale_fill_gradient(low = "blue", high = "yellow")
####################################################
library(network)
library(ggplot2)
library(sna)
library(ergm)
plotg <- function(net, value=NULL) {
m <- as.matrix.network.adjacency(net) # get sociomatrix
# get coordinates from Fruchterman and Reingold's force-directed placement algorithm.
plotcord <- data.frame(gplot.layout.fruchtermanreingold(m, NULL))
# or get it them from Kamada-Kawai's algorithm:
# plotcord <- data.frame(gplot.layout.kamadakawai(m, NULL))
colnames(plotcord) = c("X1","X2")
edglist <- as.matrix.network.edgelist(net)
edges <- data.frame(plotcord[edglist[,1],], plotcord[edglist[,2],])
plotcord$elements <- as.factor(get.vertex.attribute(net, "elements"))
colnames(edges) <- c("X1","Y1","X2","Y2")
edges$midX <- (edges$X1 + edges$X2) / 2
edges$midY <- (edges$Y1 + edges$Y2) / 2
pnet <- ggplot() +
geom_segment(aes(x=X1, y=Y1, xend = X2, yend = Y2),
data=edges, size = 0.5, colour="grey") +
geom_point(aes(X1, X2,colour=elements), data=plotcord) +
scale_colour_brewer(palette="Set1") +
scale_x_continuous(breaks = NA) + scale_y_continuous(breaks = NA) +
# discard default grid + titles in ggplot2
opts(panel.background = theme_blank()) + opts(legend.position="none")+
opts(axis.title.x = theme_blank(), axis.title.y = theme_blank()) +
opts( legend.background = theme_rect(colour = NA)) +
opts(panel.background = theme_rect(fill = "white", colour = NA)) +
opts(panel.grid.minor = theme_blank(), panel.grid.major = theme_blank())
return(print(pnet))
}
g <- network(150, directed=FALSE, density=0.03)
classes <- rbinom(150,1,0.5) + rbinom(150,1,0.5) + rbinom(150,1,0.5)
set.vertex.attribute(g, "elements", classes)
plotg(g)
|
power <- read.table("household_power_consumption.txt",skip=1,sep=";")
names(power) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
subpower <- subset(power,power$Date=="1/2/2007" | power$Date =="2/2/2007")
subpower$Date <- as.Date(subpower$Date, format="%d/%m/%Y")
subpower$Time <- strptime(subpower$Time, format="%H:%M:%S")
subpower[1:1440,"Time"] <- format(subpower[1:1440,"Time"],"2007-02-01 %H:%M:%S")
subpower[1441:2880,"Time"] <- format(subpower[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
plot(subpower$Time,as.numeric(as.character(subpower$Global_active_power)),type="l",xlab="",ylab="Global Active Power (kilowatts)")
title(main="Global Active Power Vs Time")
dev.cur()
dev.copy(png, filename = "Plot2.png")
dev.off()
|
/Plot2.R
|
no_license
|
erginozcan1993/ExData_Plotting1
|
R
| false
| false
| 823
|
r
|
power <- read.table("household_power_consumption.txt",skip=1,sep=";")
names(power) <- c("Date","Time","Global_active_power","Global_reactive_power","Voltage","Global_intensity","Sub_metering_1","Sub_metering_2","Sub_metering_3")
subpower <- subset(power,power$Date=="1/2/2007" | power$Date =="2/2/2007")
subpower$Date <- as.Date(subpower$Date, format="%d/%m/%Y")
subpower$Time <- strptime(subpower$Time, format="%H:%M:%S")
subpower[1:1440,"Time"] <- format(subpower[1:1440,"Time"],"2007-02-01 %H:%M:%S")
subpower[1441:2880,"Time"] <- format(subpower[1441:2880,"Time"],"2007-02-02 %H:%M:%S")
plot(subpower$Time,as.numeric(as.character(subpower$Global_active_power)),type="l",xlab="",ylab="Global Active Power (kilowatts)")
title(main="Global Active Power Vs Time")
dev.cur()
dev.copy(png, filename = "Plot2.png")
dev.off()
|
#' Makes a data.frame that contains the best error rates from a
#' grid search
#'
#' get_best_grid creates a data.frame that has the datasets in
#' the first column and the best error rate obtained in the grid
#' search in the second column.
#' @param grid_data data.frame obtained from get_grid_data or with
#' several datasets from get_grid_data combined with rbind.
#' @return Returns a data.frame with the names of the datasets
#' in the first column and the best loss value in the second
#' column. The first column is named "Data" and the second column
#' is named "Best"
#'
#' @seealso \code{\link{get_grid_data}}, \code{\link{eztune_table}},
#' \code{\link{grid_search}},
#'
#' @export
#'
get_best_grid <- function(grid_data) {
best <- dplyr::group_by(grid_data, Data) %>%
dplyr::summarize(Best = min(Loss, na.rm = TRUE))
as.data.frame(best)
}
|
/R/get_best_grid.R
|
no_license
|
jillbo1000/EZtuneTest
|
R
| false
| false
| 864
|
r
|
#' Makes a data.frame that contains the best error rates from a
#' grid search
#'
#' get_best_grid creates a data.frame that has the datasets in
#' the first column and the best error rate obtained in the grid
#' search in the second column.
#' @param grid_data data.frame obtained from get_grid_data or with
#' several datasets from get_grid_data combined with rbind.
#' @return Returns a data.frame with the names of the datasets
#' in the first column and the best loss value in the second
#' column. The first column is named "Data" and the second column
#' is named "Best"
#'
#' @seealso \code{\link{get_grid_data}}, \code{\link{eztune_table}},
#' \code{\link{grid_search}},
#'
#' @export
#'
get_best_grid <- function(grid_data) {
best <- dplyr::group_by(grid_data, Data) %>%
dplyr::summarize(Best = min(Loss, na.rm = TRUE))
as.data.frame(best)
}
|
#Load Libraries
library(shiny)
library(ggplot2)
library(dplyr)
library(leaflet)
# Define UI for miles per gallon application
shinyUI(fluidPage(
# Application title
titlePanel("Climate Change in Major Country"),
navbarPage("", id="nav",
tabPanel("Interactive map",
# Sidebar with controls to select city, month and type of plot
sidebarLayout(
sidebarPanel(
helpText("Select one or more cities:"),
uiOutput("CitySelector"),
helpText("Select one or more months:"),
uiOutput("MonthSelector"),
helpText("Select type of plot or histogram:"),
checkboxGroupInput("checkPlot",
label = ("Plots"),
choices=c("GAM Plot","Point Plot"),
selected = "GAM Plot"
),
helpText("Dataset is available below:"),
tags$a(href = "https://www.kaggle.com/berkeleyearth/climate-change-earth-surface-temperature-data", "Source")
),
#Main Panel contains the plot/s
mainPanel(
textOutput("overview"),
plotOutput("RegPlot")
)
)
),
tabPanel("Data explorer",
basicPage(
DT::dataTableOutput("climatetable")
)),
tabPanel("World Map",
basicPage(
leafletOutput("pal", height=400)
))
)
))
|
/shiny/ui.R
|
no_license
|
coperli/Climate-Change
|
R
| false
| false
| 2,214
|
r
|
#Load Libraries
library(shiny)
library(ggplot2)
library(dplyr)
library(leaflet)
# Define UI for miles per gallon application
shinyUI(fluidPage(
# Application title
titlePanel("Climate Change in Major Country"),
navbarPage("", id="nav",
tabPanel("Interactive map",
# Sidebar with controls to select city, month and type of plot
sidebarLayout(
sidebarPanel(
helpText("Select one or more cities:"),
uiOutput("CitySelector"),
helpText("Select one or more months:"),
uiOutput("MonthSelector"),
helpText("Select type of plot or histogram:"),
checkboxGroupInput("checkPlot",
label = ("Plots"),
choices=c("GAM Plot","Point Plot"),
selected = "GAM Plot"
),
helpText("Dataset is available below:"),
tags$a(href = "https://www.kaggle.com/berkeleyearth/climate-change-earth-surface-temperature-data", "Source")
),
#Main Panel contains the plot/s
mainPanel(
textOutput("overview"),
plotOutput("RegPlot")
)
)
),
tabPanel("Data explorer",
basicPage(
DT::dataTableOutput("climatetable")
)),
tabPanel("World Map",
basicPage(
leafletOutput("pal", height=400)
))
)
))
|
# read in all data, set stringsAsFactors=FALSE to deal with date and numerical conversions
alldf<-read.csv("household_power_consumption.txt",sep=";",stringsAsFactors=FALSE)
#subset only the dates we're intersted in
#NOTE THE DATE CONVENTION, it day/month/year
df<-alldf[alldf$Date=="1/2/2007"|alldf$Date=="2/2/2007",]
#Convert to POSIX date
df$Date<-strptime(df$Date,format="%d/%m/%Y")
#Convert strings to numeric
df$Global_active_power<-as.numeric(df$Global_active_power)
#Create new column, that has the full date and time
df$datetime<-paste(df$Date,df$Time)
#Convert datetime to POSIX
df$datetime<-strptime(df$datetime,format="%Y-%m-%d %H:%M:%S")
#Convert strings to numeric. Could also do it with a range, but this is explicit
df$Sub_metering_1<-as.numeric(df$Sub_metering_1)
df$Sub_metering_2<-as.numeric(df$Sub_metering_2)
df$Sub_metering_3<-as.numeric(df$Sub_metering_3)
#open the png device to export file
#This method seems to work better than the dev.copy() one, it doesn't chopp off bits
png("plot3.png")
#Plots with labels, legends, etc
#the call to plot() has type "n" to avoid printing anything
#plot() is called with df$datetime and df$Sub_metering_1 as a way to set the scales right
plot(df$datetime,df$Sub_metering_1,type="n",ylab="Energy sub metering",xlab="")
lines(df$datetime,df$Sub_metering_1,col="black",type="l")
lines(df$datetime,df$Sub_metering_2,col="red",type="l")
lines(df$datetime,df$Sub_metering_3,col="blue",type="l")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=c(1,1,1),col=c("black","red","blue"))
#close the png device
dev.off()
|
/plot3.R
|
no_license
|
papadopc/ExData_Plotting1
|
R
| false
| false
| 1,598
|
r
|
# read in all data, set stringsAsFactors=FALSE to deal with date and numerical conversions
alldf<-read.csv("household_power_consumption.txt",sep=";",stringsAsFactors=FALSE)
#subset only the dates we're intersted in
#NOTE THE DATE CONVENTION, it day/month/year
df<-alldf[alldf$Date=="1/2/2007"|alldf$Date=="2/2/2007",]
#Convert to POSIX date
df$Date<-strptime(df$Date,format="%d/%m/%Y")
#Convert strings to numeric
df$Global_active_power<-as.numeric(df$Global_active_power)
#Create new column, that has the full date and time
df$datetime<-paste(df$Date,df$Time)
#Convert datetime to POSIX
df$datetime<-strptime(df$datetime,format="%Y-%m-%d %H:%M:%S")
#Convert strings to numeric. Could also do it with a range, but this is explicit
df$Sub_metering_1<-as.numeric(df$Sub_metering_1)
df$Sub_metering_2<-as.numeric(df$Sub_metering_2)
df$Sub_metering_3<-as.numeric(df$Sub_metering_3)
#open the png device to export file
#This method seems to work better than the dev.copy() one, it doesn't chopp off bits
png("plot3.png")
#Plots with labels, legends, etc
#the call to plot() has type "n" to avoid printing anything
#plot() is called with df$datetime and df$Sub_metering_1 as a way to set the scales right
plot(df$datetime,df$Sub_metering_1,type="n",ylab="Energy sub metering",xlab="")
lines(df$datetime,df$Sub_metering_1,col="black",type="l")
lines(df$datetime,df$Sub_metering_2,col="red",type="l")
lines(df$datetime,df$Sub_metering_3,col="blue",type="l")
legend("topright",c("Sub_metering_1","Sub_metering_2","Sub_metering_3"),lty=c(1,1,1),col=c("black","red","blue"))
#close the png device
dev.off()
|
context("Stacking Helper Functions")
# load the libraries
library(ModelComparison)
test_that("Add predictions to df for training with stacking", {
# helper function for a 2 response iris dataset
iris <- PrepareIris()
x.values = iris[, 1:4]
dim.x <- dim(x.values)
# create the models
comp <- GetModelComparisons(x.values, iris[,5])
# get expected values
dim.exp.x <- dim.x
dim.exp.x[[2]] = dim.exp.x[[2]] + length(comp$model.list)
# get expected names
expected.names <- c(as.character(colnames(x.values)), as.character(names(comp$model.list)))
# get the new dataframe
df.for.stacking <- ModelComparison::GetPredictionsForStacking(comp$model.list, x.values)
expect_equal(dim(df.for.stacking), dim.exp.x)
expect_equal(names(df.for.stacking), expected.names)
})
|
/tests/testthat/test_stacking_help.R
|
no_license
|
orionw/ModelComparison
|
R
| false
| false
| 818
|
r
|
context("Stacking Helper Functions")
# load the libraries
library(ModelComparison)
test_that("Add predictions to df for training with stacking", {
# helper function for a 2 response iris dataset
iris <- PrepareIris()
x.values = iris[, 1:4]
dim.x <- dim(x.values)
# create the models
comp <- GetModelComparisons(x.values, iris[,5])
# get expected values
dim.exp.x <- dim.x
dim.exp.x[[2]] = dim.exp.x[[2]] + length(comp$model.list)
# get expected names
expected.names <- c(as.character(colnames(x.values)), as.character(names(comp$model.list)))
# get the new dataframe
df.for.stacking <- ModelComparison::GetPredictionsForStacking(comp$model.list, x.values)
expect_equal(dim(df.for.stacking), dim.exp.x)
expect_equal(names(df.for.stacking), expected.names)
})
|
###############################################################################
###############################################################################
###############################################################################
## loaduju balíčky ------------------------------------------------------------
library(openxlsx)
## ----------------------------------------------------------------------------
###############################################################################
## nastavuji pracovní složku --------------------------------------------------
setwd(choose.dir())
mother_working_directory <- getwd()
## ----------------------------------------------------------------------------
###############################################################################
## vytvářím složku pro výsledné diagramy --------------------------------------
if(!file.exists("diagramy_nad_vysledky_uchazecu")){
dir.create(file.path(
mother_working_directory, "diagramy_nad_vysledky_uchazecu"
))
}
## ----------------------------------------------------------------------------
## ----------------------------------------------------------------------------
###############################################################################
###############################################################################
###############################################################################
## helper funkce --------------------------------------------------------------
## funkce pro získání bodových zisků ----------------------------------------
getMyScores <- function(data){
my_scores <- NULL
for(i in 1:dim(data)[1]){
my_scores <- c(my_scores,
sum(
grepl("X",
data[i,
which(grepl(pattern = "[0-9]+",
x = colnames(data)))]
)
)
)
}
return(my_scores)
}
## ----------------------------------------------------------------------------
## funkce pro vytvoření nula-jedničkovou transformaci -------------------------
getMyTrueFalseTable <- function(data){
if(sum(grepl("id", colnames(data))) > 0){
temp_data <- as.data.frame(
data[, which(grepl("id", colnames(data)))[1]]
)
}else{
temp_data <- data.frame("id" = as.factor(rep(NA, dim(data)[1])))
}
for(i in which(grepl(pattern = "[0-9]+", x = colnames(data)))){
temp_data <- as.data.frame(
cbind(temp_data, grepl(pattern = "X", x = data[, i]))
)
}
colnames(temp_data) <- c(
"id",
colnames(data)[which(grepl(pattern = "[0-9]+", x = colnames(data)))]
)
return(temp_data)
}
## ----------------------------------------------------------------------------
## funkce pro vykreslení histogramu nad bodovými zisky v rámci předmětu -------
getMyHistogram <- function(my_data, number_of_breaks = 5){
###########################################################################
my_scores <- getMyScores(my_data)
###########################################################################
## vykresluji histogram ---------------------------------------------------
hist(
my_scores,
breaks = number_of_breaks,
xlim = c(0, length(
which(
grepl(pattern = "[0-9]+", x = colnames(my_data))
)
)),
main = "Histogram bodových zisků uchazečů \nvšeobecné lékařství",
xlab = "hodnota bodového zisku",
ylab = "absolutní počet studentů",
col = "lightgrey"
)
}
## ----------------------------------------------------------------------------
## funkce pro vykreslení obtížnost-diskriminace diagramu ----------------------
getMyDifficultyDiscriminationPlot <- function(my_data){
###########################################################################
## doluju bodová skóre pro všechny uchazeče
my_scores <- getMyScores(my_data)
###########################################################################
## vytvářím nula-jedničkovou tranformaci dle správnosti odpovědí,
## nadále už nebudou původní odpovědi uhcazečů třeba
my_data <- getMyTrueFalseTable(my_data)
###########################################################################
## vytvářím kvintily
my_quintiles <- NULL
for(i in 0:5){
my_quintiles <- c(my_quintiles,
quantile(my_scores, probs = 0.2*i, names = FALSE)
)
}
my_quintiles[c(1, 6)] <- c(0, (dim(my_data)[2] - 1) + 1)
my_quintiles <- ceiling(my_quintiles)
###########################################################################
## vytvářím bary
## obtížnost otázky chápu jako 1 - podíl správně odpovídajících studentů
## ku všem studentům odpovídajícím na otázku
obtiznosti <- NULL
for(i in 2:dim(my_data)[2]){
obtiznosti <- c(obtiznosti,
1 - length(which(my_data[, i]))/length(my_data[, i])
)
}
###########################################################################
## diskriminační schopnost otázky chápu jako podíl správně odpovídajících
## studentů ku všem studentům odpovídajícím na otázku určitého kvantilu,
## to celé lomenu podílem správně odpovídajících studentů ku všem
## studentům odpovídajícím na otázku určitého jiného kvantilu
## diskriminace jako 5. kvintil - 4. kvintil
## i jako 5. kvintil - 1. kvintil
###########################################################################
diskriminace_nizsi <- NULL
for(i in 2:dim(my_data)[2]){
diskriminace_nizsi <- c(
diskriminace_nizsi,
((sum(
my_data[which(my_scores >= my_quintiles[5]), i]
) / length(
my_data[which(my_scores >= my_quintiles[5]), i]
)) - (sum(
my_data[which(my_scores < my_quintiles[2]), i]
) / length(
my_data[which(my_scores < my_quintiles[2]), i]
)))
)
}
###########################################################################
diskriminace_vyssi <- NULL
for(i in 2:dim(my_data)[2]){
diskriminace_vyssi <- c(
diskriminace_vyssi,
((sum(
my_data[which(my_scores >= my_quintiles[5]), i]
) / length(
my_data[which(my_scores >= my_quintiles[5]), i]
)) - (sum(
my_data[which(my_scores < my_quintiles[5] &
my_scores >= my_quintiles[4]), i]
) / length(
my_data[which(my_scores < my_quintiles[5] &
my_scores >= my_quintiles[4]), i]
)))
)
}
###########################################################################
## koriguji velmi záporné hodnoty diskriminačních měr, to kvůli
## komfortnějšímu vykreslení v diagramu, aby záporné sloupečky
## nezasahovaly do popisků položek
diskriminace_nizsi[which(diskriminace_nizsi < 0)] <- -0.01
diskriminace_vyssi[which(diskriminace_vyssi < 0)] <- -0.01
###########################################################################
## určuji soubor k vykreslení
my_sample <- rbind("obtiznost" = obtiznosti[order(obtiznosti)],
"5p_1p" = diskriminace_nizsi[order(obtiznosti)],
"5p_4p" = diskriminace_vyssi[order(obtiznosti)]
)
my_colours <- c("red", "darkgrey", "blue")
###########################################################################
## vykresluji konečný barplot
barplot(
my_sample,
beside = TRUE,
col = my_colours,
xlab = "",
ylim = c(0.0, 1.0),
main = "Diagram obtížnosti vs. diskriminace \nvšeobecné lékařství",
horiz = FALSE
)
title(xlab = "číslo otázky (řazeno dle obtížnosti)",
line = 4)
abline(h = c(0.2, 0.4), lty = 2)
axis(side = 1,
at = seq(2.5, 2.5 + 4 * (dim(my_data)[2] - 2), by = 4),
labels = colnames(my_data)[
2:dim(my_data)[2]
][order(obtiznosti)],
las = 2
)
legend(x = "topleft",
inset = c(0.04, 0.0),
legend = c(
"obtížnost",
"diskriminace (rozdíl podílu úspěšných 5. a 1. pětiny)",
"diskriminace (rozdíl podílu úspěšných 5. a 4. pětiny)"
),
pch = 19,
col = my_colours,
bg = "white",
title = expression(bold("legenda")))
}
## ----------------------------------------------------------------------------
## printable verze 'obtížnost-diskriminace' diagramu --------------------------
getMyFirstHalfDifficultyDiscriminationPlot <- function(my_data){
###########################################################################
## doluju bodová skóre pro všechny uchazeče
my_scores <- getMyScores(my_data)
###########################################################################
## vytvářím nula-jedničkovou tranformaci dle správnosti odpovědí,
## nadále už nebudou původní odpovědi uhcazečů třeba
my_data <- getMyTrueFalseTable(my_data)
###########################################################################
## vytvářím kvintily
my_quintiles <- NULL
for(i in 0:5){
my_quintiles <- c(my_quintiles,
quantile(my_scores, probs = 0.2*i, names = FALSE)
)
}
my_quintiles[c(1, 6)] <- c(0, (dim(my_data)[2] - 1) + 1)
my_quintiles <- ceiling(my_quintiles)
###########################################################################
## vytvářím bary
## obtížnost otázky chápu jako 1 - podíl správně odpovídajících studentů
## ku všem studentům odpovídajícím na otázku
obtiznosti <- NULL
for(i in 2:dim(my_data)[2]){
obtiznosti <- c(obtiznosti,
1 - length(which(my_data[, i]))/length(my_data[, i])
)
}
###########################################################################
## diskriminační schopnost otázky chápu jako podíl správně odpovídajících
## studentů ku všem studentům odpovídajícím na otázku určitého kvantilu,
## to celé lomenu podílem správně odpovídajících studentů ku všem
## studentům odpovídajícím na otázku určitého jiného kvantilu
## diskriminace jako 5. kvintil - 4. kvintil
## i jako 5. kvintil - 1. kvintil
###########################################################################
diskriminace_nizsi <- NULL
for(i in 2:dim(my_data)[2]){
diskriminace_nizsi <- c(
diskriminace_nizsi,
((sum(
my_data[which(my_scores >= my_quintiles[5]), i]
) / length(
my_data[which(my_scores >= my_quintiles[5]), i]
)) - (sum(
my_data[which(my_scores < my_quintiles[2]), i]
) / length(
my_data[which(my_scores < my_quintiles[2]), i]
)))
)
}
###########################################################################
diskriminace_vyssi <- NULL
for(i in 2:dim(my_data)[2]){
diskriminace_vyssi <- c(
diskriminace_vyssi,
((sum(
my_data[which(my_scores >= my_quintiles[5]), i]
) / length(
my_data[which(my_scores >= my_quintiles[5]), i]
)) - (sum(
my_data[which(my_scores < my_quintiles[5] &
my_scores >= my_quintiles[4]), i]
) / length(
my_data[which(my_scores < my_quintiles[5] &
my_scores >= my_quintiles[4]), i]
)))
)
}
###########################################################################
## koriguji velmi záporné hodnoty diskriminačních měr, to kvůli
## komfortnějšímu vykreslení v diagramu, aby záporné sloupečky
## nezasahovaly do popisků položek
diskriminace_nizsi[which(diskriminace_nizsi < 0)] <- -0.01
diskriminace_vyssi[which(diskriminace_vyssi < 0)] <- -0.01
###########################################################################
## určuji soubor k vykreslení
my_sample <- rbind("obtiznost" = obtiznosti[order(obtiznosti)],
"5p_1p" = diskriminace_nizsi[order(obtiznosti)],
"5p_4p" = diskriminace_vyssi[order(obtiznosti)]
)
my_colours <- c("red", "darkgrey", "blue")
###########################################################################
## vykresluji konečný barplot
par(mar = c(3, 6, 3, 3))
barplot(
my_sample[, (floor(length(obtiznosti) / 2) + 1):length(obtiznosti)],
beside = TRUE,
col = my_colours,
xlab = "",
ylim = c(0.0, 1.0),
main = paste(
"Diagram obtížnosti vs. diskriminace (těžší polovina otázek)",
"\nvšeobecné lékařství",
sep = ""),
horiz = FALSE
)
title(ylab = "číslo otázky (řazeno dle obtížnosti)",
line = 4)
abline(h = c(0.2, 0.4), lty = 2)
axis(side = 1,
at = seq(2.5, 2.5 + 4 * (dim(my_data)[2] - 2), by = 4)[
1:floor(length(obtiznosti) / 2)
],
labels = colnames(my_data)[
2:dim(my_data)[2]
][order(obtiznosti)][
(floor(length(obtiznosti) / 2) + 1):length(obtiznosti)
],
las = 2
)
legend(x = "topleft",
inset = c(0.0, 0.0),
legend = c(
"obtížnost",
"diskriminace (rozdíl podílu úspěšných 5. a 1. pětiny)",
"diskriminace (rozdíl podílu úspěšných 5. a 4. pětiny)"
),
pch = 19,
col = my_colours,
bg = "white",
title = expression(bold("legenda")),
cex = 0.7)
}
## ----------------------------------------------------------------------------
getMySecondHalfDifficultyDiscriminationPlot <- function(my_data){
###########################################################################
## doluju bodová skóre pro všechny uchazeče
my_scores <- getMyScores(my_data)
###########################################################################
## vytvářím nula-jedničkovou tranformaci dle správnosti odpovědí,
## nadále už nebudou původní odpovědi uhcazečů třeba
my_data <- getMyTrueFalseTable(my_data)
###########################################################################
## vytvářím kvintily
my_quintiles <- NULL
for(i in 0:5){
my_quintiles <- c(my_quintiles,
quantile(my_scores, probs = 0.2*i, names = FALSE)
)
}
my_quintiles[c(1, 6)] <- c(0, (dim(my_data)[2] - 1) + 1)
my_quintiles <- ceiling(my_quintiles)
###########################################################################
## vytvářím bary
## obtížnost otázky chápu jako 1 - podíl správně odpovídajících studentů
## ku všem studentům odpovídajícím na otázku
obtiznosti <- NULL
for(i in 2:dim(my_data)[2]){
obtiznosti <- c(obtiznosti,
1 - length(which(my_data[, i]))/length(my_data[, i])
)
}
###########################################################################
## diskriminační schopnost otázky chápu jako podíl správně odpovídajících
## studentů ku všem studentům odpovídajícím na otázku určitého kvantilu,
## to celé lomenu podílem správně odpovídajících studentů ku všem
## studentům odpovídajícím na otázku určitého jiného kvantilu
## diskriminace jako 5. kvintil - 4. kvintil
## i jako 5. kvintil - 1. kvintil
###########################################################################
diskriminace_nizsi <- NULL
for(i in 2:dim(my_data)[2]){
diskriminace_nizsi <- c(
diskriminace_nizsi,
((sum(
my_data[which(my_scores >= my_quintiles[5]), i]
) / length(
my_data[which(my_scores >= my_quintiles[5]), i]
)) - (sum(
my_data[which(my_scores < my_quintiles[2]), i]
) / length(
my_data[which(my_scores < my_quintiles[2]), i]
)))
)
}
###########################################################################
diskriminace_vyssi <- NULL
for(i in 2:dim(my_data)[2]){
diskriminace_vyssi <- c(
diskriminace_vyssi,
((sum(
my_data[which(my_scores >= my_quintiles[5]), i]
) / length(
my_data[which(my_scores >= my_quintiles[5]), i]
)) - (sum(
my_data[which(my_scores < my_quintiles[5] &
my_scores >= my_quintiles[4]), i]
) / length(
my_data[which(my_scores < my_quintiles[5] &
my_scores >= my_quintiles[4]), i]
)))
)
}
###########################################################################
## koriguji velmi záporné hodnoty diskriminačních měr, to kvůli
## komfortnějšímu vykreslení v diagramu, aby záporné sloupečky
## nezasahovaly do popisků položek
diskriminace_nizsi[which(diskriminace_nizsi < 0)] <- -0.01
diskriminace_vyssi[which(diskriminace_vyssi < 0)] <- -0.01
###########################################################################
## určuji soubor k vykreslení
my_sample <- rbind("obtiznost" = obtiznosti[order(obtiznosti)],
"5p_1p" = diskriminace_nizsi[order(obtiznosti)],
"5p_4p" = diskriminace_vyssi[order(obtiznosti)]
)
my_colours <- c("red", "darkgrey", "blue")
###########################################################################
## vykresluji konečný barplot
par(mar = c(3, 6, 3, 3))
barplot(
my_sample[, 1:floor(length(obtiznosti) / 2)],
beside = TRUE,
col = my_colours,
xlab = "",
ylim = c(0.0, 1.0),
main = paste("Diagram obtížnosti vs. diskriminace ",
"(lehčí polovina otázek)",
"\nvšeobecné lékařství",
sep = ""),
horiz = FALSE
)
title(ylab = "číslo otázky (řazeno dle obtížnosti)",
line = 4)
abline(h = c(0.2, 0.4), lty = 2)
axis(side = 1,
at = seq(2.5, 2.5 + 4 * (dim(my_data)[2] - 2), by = 4)[
1:floor(length(obtiznosti)/2)
],
labels = colnames(my_data)[
2:dim(my_data)[2]
][order(obtiznosti)][
1:floor(length(obtiznosti)/2)
],
las = 2
)
legend(x = "topleft",
inset = c(0.0, 0.0),
legend = c(
"obtížnost",
"diskriminace (rozdíl podílu úspěšných 5. a 1. pětiny)",
"diskriminace (rozdíl podílu úspěšných 5. a 4. pětiny)"
),
pch = 19,
col = my_colours,
bg = "white",
title = expression(bold("legenda")),
cex = 0.7)
}
## ----------------------------------------------------------------------------
## funkce pro vykreslení separátního diagramu na celkovou úspěšnost
## jednotlivých pětin uchazečů ----------------------------------------------
getMyOverallSuccessRatePlot <- function(my_data, my_item){
###########################################################################
## doluju bodová skóre pro všechny uchazeče
my_scores <- getMyScores(my_data)
###########################################################################
## vytvářím nula-jedničkovou tranformaci dle správnosti odpovědí
my_data <- getMyTrueFalseTable(my_data)
###########################################################################
## vytvářím kvintily
my_quintiles <- NULL
for(i in 0:5){
my_quintiles <- c(my_quintiles,
quantile(my_scores, probs = 0.2*i, names = FALSE)
)
}
my_quintiles[c(1, 6)] <- c(0, (dim(my_data)[2] - 1) + 1)
my_quintiles <- ceiling(my_quintiles)
###########################################################################
## vytvářím bary
my_bars <- NULL
for(i in 1:5){
my_bars <- c(
my_bars,
sum(
my_data[which(
my_quintiles[i] <= my_scores &
my_scores < my_quintiles[i + 1]
),
as.character(my_item)
]) / length(
my_data[which(
my_quintiles[i] <= my_scores &
my_scores < my_quintiles[i + 1]
),
as.character(my_item)
])
)
}
###########################################################################
## moje popisky barů
my_labels <- NULL
for(i in 1:5){
my_labels <- c(
my_labels,
paste(my_quintiles[i], my_quintiles[i + 1] - 1,
sep = " - ")
)
}
###########################################################################
## vytvářím barplot s úspěšnosti jednotlivých pětin dané položky
par(mar = c(8, 5, 6, 8), xpd = TRUE)
plot(
c(0.5:4.5),
my_bars,
ylim = c(0.0, 1.0),
col = "green",
pch = 19,
type = "b",
xlab = "celkový počet bodů",
xaxt = "n",
ylab = "relativní četnost odpovědi",
main = paste(
"Úspěšnost jednotlivých pětin dle celkového počtu bodů, položka ",
my_item, "\nvšeobecné lékařství",
sep = "")
)
points(
c(0.5:4.5),
1 - my_bars,
type = "b",
col = "red",
pch = 19
)
axis(side = 1,
at = c(0.5:4.5),
labels = my_labels
)
legend(x = "topright",
inset = c(-0.15, 0),
legend = c("správná", "špatná"),
pch = 19,
col = c("green", "red"),
title = "odpověď"
)
###########################################################################
}
## ----------------------------------------------------------------------------
## funkce pro vykreslení separátního diagramu na celkovou detailní úspěšnost
## jednotlivých pětin uchazečů ------------------------------------------------
getMyDetailedSuccessRatePlot <- function(
my_data,
my_item
){
###########################################################################
## vytvářím data.frame s informacemi o odpovědích pro každou možnost A, B,
## C, D pro každého uchazeče
odpovedi <- c("A", "B", "C", "D")
if(sum(grepl("id", colnames(my_data))) > 0){
temp_data <- as.data.frame(
my_data[, which(grepl("id", colnames(my_data)))[1]]
)
}else{
temp_data <- data.frame(
"id" = rep(as.factor(rep(NA, dim(my_data)[1])), 4)
)
}
for(i in which(grepl(pattern = "[0-9]+", x = colnames(my_data)))){
new_column <- NULL
for(letter in odpovedi){
new_column <- c(
new_column,
grepl(pattern = letter, x = my_data[,i])
)
}
temp_data <- as.data.frame(cbind(temp_data, new_column))
}
dummy_odpovedi <- NULL
for(symbol in odpovedi){
dummy_odpovedi <- c(dummy_odpovedi, rep(symbol, dim(my_data)[1]))
}
temp_data <- as.data.frame(cbind(temp_data, dummy_odpovedi))
colnames(temp_data)<-c(
"id",
colnames(my_data)[which(grepl(pattern = "[0-9]+", x = colnames(my_data)))],
"moznost"
)
assign("odpovedni_arch", value = temp_data)
###########################################################################
## vytvářím klíče, tj. čtveřici TRUE-FALSE hodnot, kdy TRUE u dané
## možnosti značí, že možnost je součástí kombinace správné opdpovědi
temp_data <- as.data.frame(odpovedi)
for(i in which(grepl(pattern = "[0-9]+", x = colnames(my_data)))){
if("X" %in% my_data[, i]){
klic <- rep(TRUE, 4)
}else{
j <- 1
while(!grepl(pattern = "X",x = my_data[j, i]) & j <= dim(my_data)[1]){
j <- j + 1
}
klic <- NULL
if(j <= dim(my_data)[1]){
for(k in 1:length(odpovedi)){
klic <- c(klic,
grepl(pattern = odpovedi[k], x = my_data[j, i]))
}
}
## vytvářím klíč, pokud žádný uchazeč neodpoví na položku správně
if(j == (dim(my_data)[1] + 1)){
klic <- rep(FALSE, 4)
}
}
temp_data <- as.data.frame(cbind(temp_data, klic))
}
colnames(temp_data) <- c(
"moznost",
colnames(my_data)[which(
grepl(pattern = "[0-9]+", x = colnames(my_data))
)]
)
assign("klicovy_arch", value = temp_data)
###########################################################################
## doluju bodová skóre pro všechny uchazeče
my_scores <- getMyScores(my_data)
###########################################################################
## vytvářím nula-jedničkovou tranformaci dle správnosti odpovědí
my_data <- getMyTrueFalseTable(my_data)
###########################################################################
## vytvářím kvintily
my_quintiles <- NULL
for(i in 0:5){
my_quintiles <- c(my_quintiles,
quantile(my_scores, probs = 0.2*i, names = FALSE)
)
}
my_quintiles[c(1, 6)] <- c(0, (dim(my_data)[2] - 1) + 1)
my_quintiles <- ceiling(my_quintiles)
###########################################################################
## vytvářím bary
my_bars <- NULL
for(i in 1:5){
my_bars <- c(
my_bars,
sum(
my_data[which(
my_quintiles[i] <= my_scores &
my_scores < my_quintiles[i + 1]
),
as.character(my_item)
]) / length(
my_data[which(
my_quintiles[i] <= my_scores &
my_scores < my_quintiles[i + 1]
),
as.character(my_item)
])
)
}
###########################################################################
## moje popisky barů
my_labels <- NULL
for(i in 1:5){
my_labels <- c(
my_labels,
paste(my_quintiles[i], my_quintiles[i + 1] - 1,
sep = " - ")
)
}
###########################################################################
## vytvářím hodnoty pro četnosti jednotlivých odpovědí
for(letter in odpovedi){
temp_data <- NULL
for(i in 1:5){
temp_data <- c(
temp_data,
sum(
subset(
odpovedni_arch, moznost == letter)[
which(
my_quintiles[i] <= my_scores &
my_scores < my_quintiles[i + 1]
),
as.character(my_item)]
) / length(
subset(odpovedni_arch, moznost == letter)[
which(
my_quintiles[i] <= my_scores &
my_scores < my_quintiles[i + 1]),
as.character(my_item)]
)
)
}
assign(paste(letter, "cetnost", sep = "_"), temp_data)
}
###########################################################################
## vytvářím klíč pro uživatelem vybranou položku
my_key <- klicovy_arch[, as.character(my_item)]
###########################################################################
## vytvářím typy čar do legendy
my_lty <- NULL
for(letter in odpovedi){
my_lty <- c(my_lty,
if(which(letter == odpovedi) %in% which(my_key)){1}else{2})
}
###########################################################################
## vytvářím barplot s charakteristikami položky
par(mar = c(8, 5, 6, 8), xpd = TRUE)
barplot(
my_bars,
space = rep(0, 5),
ylim = c(0, 1),
col = "lightgrey",
xlab = "celkový počet bodů",
names = my_labels,
ylab = "relativní četnost odpovědi"
)
title(
main = paste("Psychometrické charakteristiky, položka ",
my_item,
"\nvšeobecné lékařství",
sep = ""),
line = 3
)
for(letter in odpovedi){
points(
c(0.5:4.5),
get(paste(letter, "cetnost", sep = "_")),
type = "b",
col = which(letter == odpovedi),
pch = which(letter == odpovedi),
lty = if(my_key[which(letter == odpovedi)]){1}else{2}
)
}
legend(x = "topright",
inset = c(-0.15, 0),
legend = c(
if(1 %in% which(my_key)){expression(bold("A"))}else{"A"},
if(2 %in% which(my_key)){expression(bold("B"))}else{"B"},
if(3 %in% which(my_key)){expression(bold("C"))}else{"C"},
if(4 %in% which(my_key)){expression(bold("D"))}else{"D"}
),
pch = c(1:4),
col = c(1:4),
lty = my_lty,
title = "odpověď")
###########################################################################
}
## ----------------------------------------------------------------------------
## funkce pro vykreslení separátního diagramu na celkovou úspěšnost
## jednotlivých pětin uchazečů ------------------------------------------------
getMyAnswerSchemaPlot <- function(
my_data,
my_item
){
###########################################################################
## vytvářím celé spektrum možných odpovědí
vsechny_moznosti <- c(
"",
"A", "B", "C", "D",
"AB", "AC", "AD", "BC", "BD", "CD",
"ABC", "ABD", "ACD", "BCD",
"ABCD"
)
###########################################################################
## vytvářím tabulku s četnostmi jednotlivých kombinací odpovědí
my_table <- matrix(rep(0, 16), nrow = 1)
my_table <- as.data.frame(my_table)
colnames(my_table) <- vsechny_moznosti
## tabulka s četnostmi jednotlivých kombinací odpovědí
item_table <- table(my_data[, as.character(my_item)])
my_names <- names(item_table)
for(name in names(item_table)){
if(grepl("X", name)){
my_names[which(name == names(item_table))] <- gsub("X", "", name)
}
}
for(i in 1:length(item_table)){
for(j in 2:length(vsechny_moznosti)){
if(my_names[i] == vsechny_moznosti[j]){
my_table[1, vsechny_moznosti[j]] <- item_table[i]
}
}
}
for(i in 1:length(names(item_table))){
if(names(item_table)[i] == "" | names(item_table)[i] == "X"){
my_table[1, 1] <- item_table[i]
}
}
colnames(my_table)[1] <- "bez odpovědi"
###########################################################################
## vytvářím vlastní popisky osy x
if(any(names(item_table) == "X")){
my_index <- c(1:16)
my_long_labels <- rep("", 16)
}else{
my_index <- 1
if(sum(grepl("X", names(item_table)))){
for(i in 1:length(vsechny_moznosti)){
if(vsechny_moznosti[i] == gsub(
"X",
"",
names(item_table)[which(grepl("X", names(item_table)))])){
my_index <- i
}
}
}
my_long_labels <- c(
expression(symbol("\306")),
colnames(my_table)[2:16]
)
my_long_labels[my_index] <- ""
}
###########################################################################
## vytvářím data.frame s informacemi o odpovědích pro každou možnost A, B,
## C, D pro každého uchazeče
odpovedi <- c("A", "B", "C", "D")
if(sum(grepl("id", colnames(my_data))) > 0){
temp_data <- as.data.frame(
my_data[, which(grepl("id", colnames(my_data)))[1]]
)
}else{
temp_data <- data.frame(
"id" = rep(as.factor(rep(NA, dim(my_data)[1])), 4)
)
}
for(i in which(grepl(pattern = "[0-9]+", x = colnames(my_data)))){
new_column <- NULL
for(letter in odpovedi){
new_column <- c(
new_column,
grepl(pattern = letter, x = my_data[,i])
)
}
temp_data <- as.data.frame(cbind(temp_data, new_column))
}
dummy_odpovedi <- NULL
for(symbol in odpovedi){
dummy_odpovedi <- c(dummy_odpovedi, rep(symbol, dim(my_data)[1]))
}
temp_data <- as.data.frame(cbind(temp_data, dummy_odpovedi))
colnames(temp_data)<-c(
"id",
colnames(my_data)[which(grepl(pattern = "[0-9]+", x = colnames(my_data)))],
"moznost"
)
assign("odpovedni_arch", value = temp_data)
###########################################################################
## vytvářím klíče, tj. čtveřici TRUE-FALSE hodnot, kdy TRUE u dané
## možnosti značí, že možnost je součástí kombinace správné opdpovědi
temp_data <- as.data.frame(odpovedi)
for(i in which(grepl(pattern = "[0-9]+", x = colnames(my_data)))){
if("X" %in% my_data[, i]){
klic <- rep(TRUE, 4)
}else{
j <- 1
while(!grepl(pattern = "X",x = my_data[j, i]) & j <= dim(my_data)[1]){
j <- j + 1
}
klic <- NULL
if(j <= dim(my_data)[1]){
for(k in 1:length(odpovedi)){
klic <- c(klic,
grepl(pattern = odpovedi[k], x = my_data[j, i]))
}
}
## vytvářím klíč, pokud žádný uchazeč neodpoví na položku správně
if(j == (dim(my_data)[1] + 1)){
klic <- rep(FALSE, 4)
}
}
temp_data <- as.data.frame(cbind(temp_data, klic))
}
colnames(temp_data) <- c(
"moznost",
colnames(my_data)[which(
grepl(pattern = "[0-9]+", x = colnames(my_data))
)]
)
assign("klicovy_arch", value = temp_data)
###########################################################################
## doluju bodová skóre pro všechny uchazeče
my_scores <- getMyScores(my_data)
###########################################################################
## vytvářím nula-jedničkovou tranformaci dle správnosti odpovědí
my_data <- getMyTrueFalseTable(my_data)
###########################################################################
## vytvářím klíč pro uživatelem vybranou položku
my_key <- klicovy_arch[, as.character(my_item)]
###########################################################################
## vytvářím diagram s relativními četnostmi jednotlivých kombinací
## odpovědí
par(mar = c(8, 5, 6, 4), xpd = TRUE)
barplot(
t(my_table)[, 1] / sum(my_table[1, ]),
ylim = c(0.0, 1.0),
xlab = "vzorec odpovědi",
ylab = "relativní četnost odpovědi",
xaxt = "n",
main = paste(
"Relativní četnost jednotlivých kombinací odpovědí, položka ",
my_item,
"\nvšeobecné lékařství",
sep = "")
)
text(seq(0.65, 0.65 + 15 * 1.205, 1.205),
y = -0.06,
labels = my_long_labels,
srt = 45)
if(length(my_index) == 1){
text(x = 0.65 + 1.205 * (my_index - 1),
y = -0.06,
labels = if(my_index == 1){
expression(symbol("\306"))
}else{
gsub("X",
"",
names(item_table)[which(grepl("X", names(item_table)))])},
font = 2,
srt = 45)
}
if(length(my_index) > 1){
text(x = 0.65,
y = -0.06,
labels = expression(symbol("\306")),
font = 2,
srt = 45)
text(x = seq(0.65 + 1.205, 0.65 + 1.205 * (length(my_index) - 1), 1.205),
y = -0.06,
labels = names(my_table)[2:16],
font = 2,
srt = 45)
}
###########################################################################
}
## ----------------------------------------------------------------------------
###############################################################################
###############################################################################
###############################################################################
## loaduju data ---------------------------------------------------------------
setwd(mother_working_directory)
my_data <- read.csv(
paste(
"https://raw.githubusercontent.com/LStepanek",
"Nekolik-postrehu-k-teorii-odpovedi-na-polozku/master/my_data.csv",
sep = "/"
),
sep = ";",
skip = 1,
check.names = FALSE,
encoding = "UTF-8"
)
## ----------------------------------------------------------------------------
###############################################################################
## preprocessing --------------------------------------------------------------
data <- my_data
if(!is.null(my_data)){
## hledám, zda je přítomna proměnná s rodným číslem;
## pokud ano, přejmenovávám ji na 'id'
## a raději ji kóduju jako factor
for(i in 1:length(colnames(my_data))){
if(
grepl("rodné.číslo", tolower(colnames(my_data)[i])) |
grepl("rodcislo", tolower(colnames(my_data)[i]))
){
colnames(my_data)[i] <- "id"
my_data[,i] <- as.factor(as.character(my_data[, i]))
}
}
## pokud dataset obsahuje proměnnou 'obor' či 'kobor',
## z datasetů extrahuji jen data podmnožiny ucházející
## se o studium lékařství ('LEK')
for(i in 1:length(colnames(my_data))){
if(
grepl("obor", tolower(colnames(my_data)[i]))
){
if("51418" %in% levels(my_data[, i])){
my_data <- subset(my_data, my_data[, i] == "51418")
}else{
my_data <- subset(my_data, my_data[, i] == "LEK")
}
}
}
## pokud dataset obsahuje proměnnou 'kolo', z datasetů
## extrahuji jen data pro první kolo přijímacích zkoušek
for(i in 1:length(colnames(my_data))){
if(
grepl("kolo", x = tolower(colnames(my_data)[i]))
){
my_data <- subset(my_data, my_data[, i] == "1")
}
}
## nakonec ještě přetypovávám kategorické proměnné, aby
## měly správný počet levelů
for(i in 1:dim(my_data)[2]){
my_data[,i] <- as.character(my_data[, i])
}
for(i in 1:dim(my_data)[2]){
my_data[,i] <- as.factor(my_data[, i])
}
for(i in which(grepl("[0-9]+", colnames(my_data)))){
my_data[, i] <- as.character(my_data[, i])
}
}
## ----------------------------------------------------------------------------
###############################################################################
## vytvářím pro každou položku každého modulu a ročníku všechny typy
## diagramů -------------------------------------------------------------------
year <- "2020"
predmet <- "biologie"
setwd(
paste(
mother_working_directory,
"diagramy_nad_vysledky_uchazecu",
sep = "/"
)
)
## ----------------------------------------------------------------------------
if(!is.null(paste(predmet,year,sep="_"))){
data <- my_data
## ----------------------------------------------------------------------------
png(
filename = paste("histogram_20_",predmet,"_",year,".png",sep=""),
width=8,
height=5,
units="in",
res=600
)
getMyHistogram(data,20)
dev.off()
## ----------------------------------------------------------------------------
png(
filename = paste("histogram_100_",predmet,"_",year,".png",sep=""),
width=8,
height=5,
units="in",
res=600
)
getMyHistogram(data,100)
dev.off()
## ----------------------------------------------------------------------------
png(
filename = paste("holy_trinity_",predmet,"_",year,".png",sep=""),
width=24,
height=8,
units="in",
res=600
)
getMyDifficultyDiscriminationPlot(data)
dev.off()
## ----------------------------------------------------------------------------
png(
filename = paste("holy_trinity_harder_",predmet,"_",year,".png",sep=""),
width=12,
height=8,
units="in",
res=600
)
getMyFirstHalfDifficultyDiscriminationPlot(data)
dev.off()
## ----------------------------------------------------------------------------
png(
filename = paste("holy_trinity_easier_",predmet,"_",year,".png",sep=""),
width=12,
height=8,
units="in",
res=600
)
getMySecondHalfDifficultyDiscriminationPlot(data)
dev.off()
## ----------------------------------------------------------------------------
for(my_item in colnames(data)[which(grepl("[0-9]+",colnames(data)))]){
flush.console()
print(
paste(
"ročník ",year,", předmět ",predmet,", ",
format(
which(
colnames(data)[grepl("[0-9]+",colnames(data))]==my_item
)/length(
which(grepl("[0-9]+",colnames(data)))
)*100,nsmall=2
),
" %",
sep=""
)
)
png(
filename = paste(
"overall_success_rate_item_",my_item,"_",predmet,"_",year,".png",sep=""
),
width=10,
height=6.5,
units="in",
res=600
)
getMyOverallSuccessRatePlot(data,my_item)
dev.off()
png(
filename = paste(
"detailed_success_rate_item_",my_item,"_",predmet,"_",year,".png",sep=""
),
width=8,
height=6.5,
units="in",
res=600
)
getMyDetailedSuccessRatePlot(data,my_item)
dev.off()
png(
filename = paste(
"answer_schema_plot_item_",my_item,"_",predmet,"_",year,".png",sep=""
),
width=8,
height=6.5,
units="in",
res=600
)
getMyAnswerSchemaPlot(data,my_item)
dev.off()
}
## ----------------------------------------------------------------------------
}
## ----------------------------------------------------------------------------
###############################################################################
## ----------------------------------------------------------------------------
###############################################################################
###############################################################################
###############################################################################
|
/script.R
|
no_license
|
LStepanek/Nekolik-postrehu-k-teorii-odpovedi-na-polozku
|
R
| false
| false
| 45,320
|
r
|
###############################################################################
###############################################################################
###############################################################################
## loaduju balíčky ------------------------------------------------------------
library(openxlsx)
## ----------------------------------------------------------------------------
###############################################################################
## nastavuji pracovní složku --------------------------------------------------
setwd(choose.dir())
mother_working_directory <- getwd()
## ----------------------------------------------------------------------------
###############################################################################
## vytvářím složku pro výsledné diagramy --------------------------------------
if(!file.exists("diagramy_nad_vysledky_uchazecu")){
dir.create(file.path(
mother_working_directory, "diagramy_nad_vysledky_uchazecu"
))
}
## ----------------------------------------------------------------------------
## ----------------------------------------------------------------------------
###############################################################################
###############################################################################
###############################################################################
## helper funkce --------------------------------------------------------------
## funkce pro získání bodových zisků ----------------------------------------
getMyScores <- function(data){
my_scores <- NULL
for(i in 1:dim(data)[1]){
my_scores <- c(my_scores,
sum(
grepl("X",
data[i,
which(grepl(pattern = "[0-9]+",
x = colnames(data)))]
)
)
)
}
return(my_scores)
}
## ----------------------------------------------------------------------------
## funkce pro vytvoření nula-jedničkovou transformaci -------------------------
getMyTrueFalseTable <- function(data){
if(sum(grepl("id", colnames(data))) > 0){
temp_data <- as.data.frame(
data[, which(grepl("id", colnames(data)))[1]]
)
}else{
temp_data <- data.frame("id" = as.factor(rep(NA, dim(data)[1])))
}
for(i in which(grepl(pattern = "[0-9]+", x = colnames(data)))){
temp_data <- as.data.frame(
cbind(temp_data, grepl(pattern = "X", x = data[, i]))
)
}
colnames(temp_data) <- c(
"id",
colnames(data)[which(grepl(pattern = "[0-9]+", x = colnames(data)))]
)
return(temp_data)
}
## ----------------------------------------------------------------------------
## funkce pro vykreslení histogramu nad bodovými zisky v rámci předmětu -------
getMyHistogram <- function(my_data, number_of_breaks = 5){
###########################################################################
my_scores <- getMyScores(my_data)
###########################################################################
## vykresluji histogram ---------------------------------------------------
hist(
my_scores,
breaks = number_of_breaks,
xlim = c(0, length(
which(
grepl(pattern = "[0-9]+", x = colnames(my_data))
)
)),
main = "Histogram bodových zisků uchazečů \nvšeobecné lékařství",
xlab = "hodnota bodového zisku",
ylab = "absolutní počet studentů",
col = "lightgrey"
)
}
## ----------------------------------------------------------------------------
## funkce pro vykreslení obtížnost-diskriminace diagramu ----------------------
getMyDifficultyDiscriminationPlot <- function(my_data){
###########################################################################
## doluju bodová skóre pro všechny uchazeče
my_scores <- getMyScores(my_data)
###########################################################################
## vytvářím nula-jedničkovou tranformaci dle správnosti odpovědí,
## nadále už nebudou původní odpovědi uhcazečů třeba
my_data <- getMyTrueFalseTable(my_data)
###########################################################################
## vytvářím kvintily
my_quintiles <- NULL
for(i in 0:5){
my_quintiles <- c(my_quintiles,
quantile(my_scores, probs = 0.2*i, names = FALSE)
)
}
my_quintiles[c(1, 6)] <- c(0, (dim(my_data)[2] - 1) + 1)
my_quintiles <- ceiling(my_quintiles)
###########################################################################
## vytvářím bary
## obtížnost otázky chápu jako 1 - podíl správně odpovídajících studentů
## ku všem studentům odpovídajícím na otázku
obtiznosti <- NULL
for(i in 2:dim(my_data)[2]){
obtiznosti <- c(obtiznosti,
1 - length(which(my_data[, i]))/length(my_data[, i])
)
}
###########################################################################
## diskriminační schopnost otázky chápu jako podíl správně odpovídajících
## studentů ku všem studentům odpovídajícím na otázku určitého kvantilu,
## to celé lomenu podílem správně odpovídajících studentů ku všem
## studentům odpovídajícím na otázku určitého jiného kvantilu
## diskriminace jako 5. kvintil - 4. kvintil
## i jako 5. kvintil - 1. kvintil
###########################################################################
diskriminace_nizsi <- NULL
for(i in 2:dim(my_data)[2]){
diskriminace_nizsi <- c(
diskriminace_nizsi,
((sum(
my_data[which(my_scores >= my_quintiles[5]), i]
) / length(
my_data[which(my_scores >= my_quintiles[5]), i]
)) - (sum(
my_data[which(my_scores < my_quintiles[2]), i]
) / length(
my_data[which(my_scores < my_quintiles[2]), i]
)))
)
}
###########################################################################
diskriminace_vyssi <- NULL
for(i in 2:dim(my_data)[2]){
diskriminace_vyssi <- c(
diskriminace_vyssi,
((sum(
my_data[which(my_scores >= my_quintiles[5]), i]
) / length(
my_data[which(my_scores >= my_quintiles[5]), i]
)) - (sum(
my_data[which(my_scores < my_quintiles[5] &
my_scores >= my_quintiles[4]), i]
) / length(
my_data[which(my_scores < my_quintiles[5] &
my_scores >= my_quintiles[4]), i]
)))
)
}
###########################################################################
## koriguji velmi záporné hodnoty diskriminačních měr, to kvůli
## komfortnějšímu vykreslení v diagramu, aby záporné sloupečky
## nezasahovaly do popisků položek
diskriminace_nizsi[which(diskriminace_nizsi < 0)] <- -0.01
diskriminace_vyssi[which(diskriminace_vyssi < 0)] <- -0.01
###########################################################################
## určuji soubor k vykreslení
my_sample <- rbind("obtiznost" = obtiznosti[order(obtiznosti)],
"5p_1p" = diskriminace_nizsi[order(obtiznosti)],
"5p_4p" = diskriminace_vyssi[order(obtiznosti)]
)
my_colours <- c("red", "darkgrey", "blue")
###########################################################################
## vykresluji konečný barplot
barplot(
my_sample,
beside = TRUE,
col = my_colours,
xlab = "",
ylim = c(0.0, 1.0),
main = "Diagram obtížnosti vs. diskriminace \nvšeobecné lékařství",
horiz = FALSE
)
title(xlab = "číslo otázky (řazeno dle obtížnosti)",
line = 4)
abline(h = c(0.2, 0.4), lty = 2)
axis(side = 1,
at = seq(2.5, 2.5 + 4 * (dim(my_data)[2] - 2), by = 4),
labels = colnames(my_data)[
2:dim(my_data)[2]
][order(obtiznosti)],
las = 2
)
legend(x = "topleft",
inset = c(0.04, 0.0),
legend = c(
"obtížnost",
"diskriminace (rozdíl podílu úspěšných 5. a 1. pětiny)",
"diskriminace (rozdíl podílu úspěšných 5. a 4. pětiny)"
),
pch = 19,
col = my_colours,
bg = "white",
title = expression(bold("legenda")))
}
## ----------------------------------------------------------------------------
## printable verze 'obtížnost-diskriminace' diagramu --------------------------
getMyFirstHalfDifficultyDiscriminationPlot <- function(my_data){
###########################################################################
## doluju bodová skóre pro všechny uchazeče
my_scores <- getMyScores(my_data)
###########################################################################
## vytvářím nula-jedničkovou tranformaci dle správnosti odpovědí,
## nadále už nebudou původní odpovědi uhcazečů třeba
my_data <- getMyTrueFalseTable(my_data)
###########################################################################
## vytvářím kvintily
my_quintiles <- NULL
for(i in 0:5){
my_quintiles <- c(my_quintiles,
quantile(my_scores, probs = 0.2*i, names = FALSE)
)
}
my_quintiles[c(1, 6)] <- c(0, (dim(my_data)[2] - 1) + 1)
my_quintiles <- ceiling(my_quintiles)
###########################################################################
## vytvářím bary
## obtížnost otázky chápu jako 1 - podíl správně odpovídajících studentů
## ku všem studentům odpovídajícím na otázku
obtiznosti <- NULL
for(i in 2:dim(my_data)[2]){
obtiznosti <- c(obtiznosti,
1 - length(which(my_data[, i]))/length(my_data[, i])
)
}
###########################################################################
## diskriminační schopnost otázky chápu jako podíl správně odpovídajících
## studentů ku všem studentům odpovídajícím na otázku určitého kvantilu,
## to celé lomenu podílem správně odpovídajících studentů ku všem
## studentům odpovídajícím na otázku určitého jiného kvantilu
## diskriminace jako 5. kvintil - 4. kvintil
## i jako 5. kvintil - 1. kvintil
###########################################################################
diskriminace_nizsi <- NULL
for(i in 2:dim(my_data)[2]){
diskriminace_nizsi <- c(
diskriminace_nizsi,
((sum(
my_data[which(my_scores >= my_quintiles[5]), i]
) / length(
my_data[which(my_scores >= my_quintiles[5]), i]
)) - (sum(
my_data[which(my_scores < my_quintiles[2]), i]
) / length(
my_data[which(my_scores < my_quintiles[2]), i]
)))
)
}
###########################################################################
diskriminace_vyssi <- NULL
for(i in 2:dim(my_data)[2]){
diskriminace_vyssi <- c(
diskriminace_vyssi,
((sum(
my_data[which(my_scores >= my_quintiles[5]), i]
) / length(
my_data[which(my_scores >= my_quintiles[5]), i]
)) - (sum(
my_data[which(my_scores < my_quintiles[5] &
my_scores >= my_quintiles[4]), i]
) / length(
my_data[which(my_scores < my_quintiles[5] &
my_scores >= my_quintiles[4]), i]
)))
)
}
###########################################################################
## koriguji velmi záporné hodnoty diskriminačních měr, to kvůli
## komfortnějšímu vykreslení v diagramu, aby záporné sloupečky
## nezasahovaly do popisků položek
diskriminace_nizsi[which(diskriminace_nizsi < 0)] <- -0.01
diskriminace_vyssi[which(diskriminace_vyssi < 0)] <- -0.01
###########################################################################
## určuji soubor k vykreslení
my_sample <- rbind("obtiznost" = obtiznosti[order(obtiznosti)],
"5p_1p" = diskriminace_nizsi[order(obtiznosti)],
"5p_4p" = diskriminace_vyssi[order(obtiznosti)]
)
my_colours <- c("red", "darkgrey", "blue")
###########################################################################
## vykresluji konečný barplot
par(mar = c(3, 6, 3, 3))
barplot(
my_sample[, (floor(length(obtiznosti) / 2) + 1):length(obtiznosti)],
beside = TRUE,
col = my_colours,
xlab = "",
ylim = c(0.0, 1.0),
main = paste(
"Diagram obtížnosti vs. diskriminace (těžší polovina otázek)",
"\nvšeobecné lékařství",
sep = ""),
horiz = FALSE
)
title(ylab = "číslo otázky (řazeno dle obtížnosti)",
line = 4)
abline(h = c(0.2, 0.4), lty = 2)
axis(side = 1,
at = seq(2.5, 2.5 + 4 * (dim(my_data)[2] - 2), by = 4)[
1:floor(length(obtiznosti) / 2)
],
labels = colnames(my_data)[
2:dim(my_data)[2]
][order(obtiznosti)][
(floor(length(obtiznosti) / 2) + 1):length(obtiznosti)
],
las = 2
)
legend(x = "topleft",
inset = c(0.0, 0.0),
legend = c(
"obtížnost",
"diskriminace (rozdíl podílu úspěšných 5. a 1. pětiny)",
"diskriminace (rozdíl podílu úspěšných 5. a 4. pětiny)"
),
pch = 19,
col = my_colours,
bg = "white",
title = expression(bold("legenda")),
cex = 0.7)
}
## ----------------------------------------------------------------------------
getMySecondHalfDifficultyDiscriminationPlot <- function(my_data){
###########################################################################
## doluju bodová skóre pro všechny uchazeče
my_scores <- getMyScores(my_data)
###########################################################################
## vytvářím nula-jedničkovou tranformaci dle správnosti odpovědí,
## nadále už nebudou původní odpovědi uhcazečů třeba
my_data <- getMyTrueFalseTable(my_data)
###########################################################################
## vytvářím kvintily
my_quintiles <- NULL
for(i in 0:5){
my_quintiles <- c(my_quintiles,
quantile(my_scores, probs = 0.2*i, names = FALSE)
)
}
my_quintiles[c(1, 6)] <- c(0, (dim(my_data)[2] - 1) + 1)
my_quintiles <- ceiling(my_quintiles)
###########################################################################
## vytvářím bary
## obtížnost otázky chápu jako 1 - podíl správně odpovídajících studentů
## ku všem studentům odpovídajícím na otázku
obtiznosti <- NULL
for(i in 2:dim(my_data)[2]){
obtiznosti <- c(obtiznosti,
1 - length(which(my_data[, i]))/length(my_data[, i])
)
}
###########################################################################
## diskriminační schopnost otázky chápu jako podíl správně odpovídajících
## studentů ku všem studentům odpovídajícím na otázku určitého kvantilu,
## to celé lomenu podílem správně odpovídajících studentů ku všem
## studentům odpovídajícím na otázku určitého jiného kvantilu
## diskriminace jako 5. kvintil - 4. kvintil
## i jako 5. kvintil - 1. kvintil
###########################################################################
diskriminace_nizsi <- NULL
for(i in 2:dim(my_data)[2]){
diskriminace_nizsi <- c(
diskriminace_nizsi,
((sum(
my_data[which(my_scores >= my_quintiles[5]), i]
) / length(
my_data[which(my_scores >= my_quintiles[5]), i]
)) - (sum(
my_data[which(my_scores < my_quintiles[2]), i]
) / length(
my_data[which(my_scores < my_quintiles[2]), i]
)))
)
}
###########################################################################
diskriminace_vyssi <- NULL
for(i in 2:dim(my_data)[2]){
diskriminace_vyssi <- c(
diskriminace_vyssi,
((sum(
my_data[which(my_scores >= my_quintiles[5]), i]
) / length(
my_data[which(my_scores >= my_quintiles[5]), i]
)) - (sum(
my_data[which(my_scores < my_quintiles[5] &
my_scores >= my_quintiles[4]), i]
) / length(
my_data[which(my_scores < my_quintiles[5] &
my_scores >= my_quintiles[4]), i]
)))
)
}
###########################################################################
## koriguji velmi záporné hodnoty diskriminačních měr, to kvůli
## komfortnějšímu vykreslení v diagramu, aby záporné sloupečky
## nezasahovaly do popisků položek
diskriminace_nizsi[which(diskriminace_nizsi < 0)] <- -0.01
diskriminace_vyssi[which(diskriminace_vyssi < 0)] <- -0.01
###########################################################################
## určuji soubor k vykreslení
my_sample <- rbind("obtiznost" = obtiznosti[order(obtiznosti)],
"5p_1p" = diskriminace_nizsi[order(obtiznosti)],
"5p_4p" = diskriminace_vyssi[order(obtiznosti)]
)
my_colours <- c("red", "darkgrey", "blue")
###########################################################################
## vykresluji konečný barplot
par(mar = c(3, 6, 3, 3))
barplot(
my_sample[, 1:floor(length(obtiznosti) / 2)],
beside = TRUE,
col = my_colours,
xlab = "",
ylim = c(0.0, 1.0),
main = paste("Diagram obtížnosti vs. diskriminace ",
"(lehčí polovina otázek)",
"\nvšeobecné lékařství",
sep = ""),
horiz = FALSE
)
title(ylab = "číslo otázky (řazeno dle obtížnosti)",
line = 4)
abline(h = c(0.2, 0.4), lty = 2)
axis(side = 1,
at = seq(2.5, 2.5 + 4 * (dim(my_data)[2] - 2), by = 4)[
1:floor(length(obtiznosti)/2)
],
labels = colnames(my_data)[
2:dim(my_data)[2]
][order(obtiznosti)][
1:floor(length(obtiznosti)/2)
],
las = 2
)
legend(x = "topleft",
inset = c(0.0, 0.0),
legend = c(
"obtížnost",
"diskriminace (rozdíl podílu úspěšných 5. a 1. pětiny)",
"diskriminace (rozdíl podílu úspěšných 5. a 4. pětiny)"
),
pch = 19,
col = my_colours,
bg = "white",
title = expression(bold("legenda")),
cex = 0.7)
}
## ----------------------------------------------------------------------------
## funkce pro vykreslení separátního diagramu na celkovou úspěšnost
## jednotlivých pětin uchazečů ----------------------------------------------
getMyOverallSuccessRatePlot <- function(my_data, my_item){
###########################################################################
## doluju bodová skóre pro všechny uchazeče
my_scores <- getMyScores(my_data)
###########################################################################
## vytvářím nula-jedničkovou tranformaci dle správnosti odpovědí
my_data <- getMyTrueFalseTable(my_data)
###########################################################################
## vytvářím kvintily
my_quintiles <- NULL
for(i in 0:5){
my_quintiles <- c(my_quintiles,
quantile(my_scores, probs = 0.2*i, names = FALSE)
)
}
my_quintiles[c(1, 6)] <- c(0, (dim(my_data)[2] - 1) + 1)
my_quintiles <- ceiling(my_quintiles)
###########################################################################
## vytvářím bary
my_bars <- NULL
for(i in 1:5){
my_bars <- c(
my_bars,
sum(
my_data[which(
my_quintiles[i] <= my_scores &
my_scores < my_quintiles[i + 1]
),
as.character(my_item)
]) / length(
my_data[which(
my_quintiles[i] <= my_scores &
my_scores < my_quintiles[i + 1]
),
as.character(my_item)
])
)
}
###########################################################################
## moje popisky barů
my_labels <- NULL
for(i in 1:5){
my_labels <- c(
my_labels,
paste(my_quintiles[i], my_quintiles[i + 1] - 1,
sep = " - ")
)
}
###########################################################################
## vytvářím barplot s úspěšnosti jednotlivých pětin dané položky
par(mar = c(8, 5, 6, 8), xpd = TRUE)
plot(
c(0.5:4.5),
my_bars,
ylim = c(0.0, 1.0),
col = "green",
pch = 19,
type = "b",
xlab = "celkový počet bodů",
xaxt = "n",
ylab = "relativní četnost odpovědi",
main = paste(
"Úspěšnost jednotlivých pětin dle celkového počtu bodů, položka ",
my_item, "\nvšeobecné lékařství",
sep = "")
)
points(
c(0.5:4.5),
1 - my_bars,
type = "b",
col = "red",
pch = 19
)
axis(side = 1,
at = c(0.5:4.5),
labels = my_labels
)
legend(x = "topright",
inset = c(-0.15, 0),
legend = c("správná", "špatná"),
pch = 19,
col = c("green", "red"),
title = "odpověď"
)
###########################################################################
}
## ----------------------------------------------------------------------------
## funkce pro vykreslení separátního diagramu na celkovou detailní úspěšnost
## jednotlivých pětin uchazečů ------------------------------------------------
getMyDetailedSuccessRatePlot <- function(
my_data,
my_item
){
###########################################################################
## vytvářím data.frame s informacemi o odpovědích pro každou možnost A, B,
## C, D pro každého uchazeče
odpovedi <- c("A", "B", "C", "D")
if(sum(grepl("id", colnames(my_data))) > 0){
temp_data <- as.data.frame(
my_data[, which(grepl("id", colnames(my_data)))[1]]
)
}else{
temp_data <- data.frame(
"id" = rep(as.factor(rep(NA, dim(my_data)[1])), 4)
)
}
for(i in which(grepl(pattern = "[0-9]+", x = colnames(my_data)))){
new_column <- NULL
for(letter in odpovedi){
new_column <- c(
new_column,
grepl(pattern = letter, x = my_data[,i])
)
}
temp_data <- as.data.frame(cbind(temp_data, new_column))
}
dummy_odpovedi <- NULL
for(symbol in odpovedi){
dummy_odpovedi <- c(dummy_odpovedi, rep(symbol, dim(my_data)[1]))
}
temp_data <- as.data.frame(cbind(temp_data, dummy_odpovedi))
colnames(temp_data)<-c(
"id",
colnames(my_data)[which(grepl(pattern = "[0-9]+", x = colnames(my_data)))],
"moznost"
)
assign("odpovedni_arch", value = temp_data)
###########################################################################
## vytvářím klíče, tj. čtveřici TRUE-FALSE hodnot, kdy TRUE u dané
## možnosti značí, že možnost je součástí kombinace správné opdpovědi
temp_data <- as.data.frame(odpovedi)
for(i in which(grepl(pattern = "[0-9]+", x = colnames(my_data)))){
if("X" %in% my_data[, i]){
klic <- rep(TRUE, 4)
}else{
j <- 1
while(!grepl(pattern = "X",x = my_data[j, i]) & j <= dim(my_data)[1]){
j <- j + 1
}
klic <- NULL
if(j <= dim(my_data)[1]){
for(k in 1:length(odpovedi)){
klic <- c(klic,
grepl(pattern = odpovedi[k], x = my_data[j, i]))
}
}
## vytvářím klíč, pokud žádný uchazeč neodpoví na položku správně
if(j == (dim(my_data)[1] + 1)){
klic <- rep(FALSE, 4)
}
}
temp_data <- as.data.frame(cbind(temp_data, klic))
}
colnames(temp_data) <- c(
"moznost",
colnames(my_data)[which(
grepl(pattern = "[0-9]+", x = colnames(my_data))
)]
)
assign("klicovy_arch", value = temp_data)
###########################################################################
## doluju bodová skóre pro všechny uchazeče
my_scores <- getMyScores(my_data)
###########################################################################
## vytvářím nula-jedničkovou tranformaci dle správnosti odpovědí
my_data <- getMyTrueFalseTable(my_data)
###########################################################################
## vytvářím kvintily
my_quintiles <- NULL
for(i in 0:5){
my_quintiles <- c(my_quintiles,
quantile(my_scores, probs = 0.2*i, names = FALSE)
)
}
my_quintiles[c(1, 6)] <- c(0, (dim(my_data)[2] - 1) + 1)
my_quintiles <- ceiling(my_quintiles)
###########################################################################
## vytvářím bary
my_bars <- NULL
for(i in 1:5){
my_bars <- c(
my_bars,
sum(
my_data[which(
my_quintiles[i] <= my_scores &
my_scores < my_quintiles[i + 1]
),
as.character(my_item)
]) / length(
my_data[which(
my_quintiles[i] <= my_scores &
my_scores < my_quintiles[i + 1]
),
as.character(my_item)
])
)
}
###########################################################################
## moje popisky barů
my_labels <- NULL
for(i in 1:5){
my_labels <- c(
my_labels,
paste(my_quintiles[i], my_quintiles[i + 1] - 1,
sep = " - ")
)
}
###########################################################################
## vytvářím hodnoty pro četnosti jednotlivých odpovědí
for(letter in odpovedi){
temp_data <- NULL
for(i in 1:5){
temp_data <- c(
temp_data,
sum(
subset(
odpovedni_arch, moznost == letter)[
which(
my_quintiles[i] <= my_scores &
my_scores < my_quintiles[i + 1]
),
as.character(my_item)]
) / length(
subset(odpovedni_arch, moznost == letter)[
which(
my_quintiles[i] <= my_scores &
my_scores < my_quintiles[i + 1]),
as.character(my_item)]
)
)
}
assign(paste(letter, "cetnost", sep = "_"), temp_data)
}
###########################################################################
## vytvářím klíč pro uživatelem vybranou položku
my_key <- klicovy_arch[, as.character(my_item)]
###########################################################################
## vytvářím typy čar do legendy
my_lty <- NULL
for(letter in odpovedi){
my_lty <- c(my_lty,
if(which(letter == odpovedi) %in% which(my_key)){1}else{2})
}
###########################################################################
## vytvářím barplot s charakteristikami položky
par(mar = c(8, 5, 6, 8), xpd = TRUE)
barplot(
my_bars,
space = rep(0, 5),
ylim = c(0, 1),
col = "lightgrey",
xlab = "celkový počet bodů",
names = my_labels,
ylab = "relativní četnost odpovědi"
)
title(
main = paste("Psychometrické charakteristiky, položka ",
my_item,
"\nvšeobecné lékařství",
sep = ""),
line = 3
)
for(letter in odpovedi){
points(
c(0.5:4.5),
get(paste(letter, "cetnost", sep = "_")),
type = "b",
col = which(letter == odpovedi),
pch = which(letter == odpovedi),
lty = if(my_key[which(letter == odpovedi)]){1}else{2}
)
}
legend(x = "topright",
inset = c(-0.15, 0),
legend = c(
if(1 %in% which(my_key)){expression(bold("A"))}else{"A"},
if(2 %in% which(my_key)){expression(bold("B"))}else{"B"},
if(3 %in% which(my_key)){expression(bold("C"))}else{"C"},
if(4 %in% which(my_key)){expression(bold("D"))}else{"D"}
),
pch = c(1:4),
col = c(1:4),
lty = my_lty,
title = "odpověď")
###########################################################################
}
## ----------------------------------------------------------------------------
## funkce pro vykreslení separátního diagramu na celkovou úspěšnost
## jednotlivých pětin uchazečů ------------------------------------------------
getMyAnswerSchemaPlot <- function(
my_data,
my_item
){
###########################################################################
## vytvářím celé spektrum možných odpovědí
vsechny_moznosti <- c(
"",
"A", "B", "C", "D",
"AB", "AC", "AD", "BC", "BD", "CD",
"ABC", "ABD", "ACD", "BCD",
"ABCD"
)
###########################################################################
## vytvářím tabulku s četnostmi jednotlivých kombinací odpovědí
my_table <- matrix(rep(0, 16), nrow = 1)
my_table <- as.data.frame(my_table)
colnames(my_table) <- vsechny_moznosti
## tabulka s četnostmi jednotlivých kombinací odpovědí
item_table <- table(my_data[, as.character(my_item)])
my_names <- names(item_table)
for(name in names(item_table)){
if(grepl("X", name)){
my_names[which(name == names(item_table))] <- gsub("X", "", name)
}
}
for(i in 1:length(item_table)){
for(j in 2:length(vsechny_moznosti)){
if(my_names[i] == vsechny_moznosti[j]){
my_table[1, vsechny_moznosti[j]] <- item_table[i]
}
}
}
for(i in 1:length(names(item_table))){
if(names(item_table)[i] == "" | names(item_table)[i] == "X"){
my_table[1, 1] <- item_table[i]
}
}
colnames(my_table)[1] <- "bez odpovědi"
###########################################################################
## vytvářím vlastní popisky osy x
if(any(names(item_table) == "X")){
my_index <- c(1:16)
my_long_labels <- rep("", 16)
}else{
my_index <- 1
if(sum(grepl("X", names(item_table)))){
for(i in 1:length(vsechny_moznosti)){
if(vsechny_moznosti[i] == gsub(
"X",
"",
names(item_table)[which(grepl("X", names(item_table)))])){
my_index <- i
}
}
}
my_long_labels <- c(
expression(symbol("\306")),
colnames(my_table)[2:16]
)
my_long_labels[my_index] <- ""
}
###########################################################################
## vytvářím data.frame s informacemi o odpovědích pro každou možnost A, B,
## C, D pro každého uchazeče
odpovedi <- c("A", "B", "C", "D")
if(sum(grepl("id", colnames(my_data))) > 0){
temp_data <- as.data.frame(
my_data[, which(grepl("id", colnames(my_data)))[1]]
)
}else{
temp_data <- data.frame(
"id" = rep(as.factor(rep(NA, dim(my_data)[1])), 4)
)
}
for(i in which(grepl(pattern = "[0-9]+", x = colnames(my_data)))){
new_column <- NULL
for(letter in odpovedi){
new_column <- c(
new_column,
grepl(pattern = letter, x = my_data[,i])
)
}
temp_data <- as.data.frame(cbind(temp_data, new_column))
}
dummy_odpovedi <- NULL
for(symbol in odpovedi){
dummy_odpovedi <- c(dummy_odpovedi, rep(symbol, dim(my_data)[1]))
}
temp_data <- as.data.frame(cbind(temp_data, dummy_odpovedi))
colnames(temp_data)<-c(
"id",
colnames(my_data)[which(grepl(pattern = "[0-9]+", x = colnames(my_data)))],
"moznost"
)
assign("odpovedni_arch", value = temp_data)
###########################################################################
## vytvářím klíče, tj. čtveřici TRUE-FALSE hodnot, kdy TRUE u dané
## možnosti značí, že možnost je součástí kombinace správné opdpovědi
temp_data <- as.data.frame(odpovedi)
for(i in which(grepl(pattern = "[0-9]+", x = colnames(my_data)))){
if("X" %in% my_data[, i]){
klic <- rep(TRUE, 4)
}else{
j <- 1
while(!grepl(pattern = "X",x = my_data[j, i]) & j <= dim(my_data)[1]){
j <- j + 1
}
klic <- NULL
if(j <= dim(my_data)[1]){
for(k in 1:length(odpovedi)){
klic <- c(klic,
grepl(pattern = odpovedi[k], x = my_data[j, i]))
}
}
## vytvářím klíč, pokud žádný uchazeč neodpoví na položku správně
if(j == (dim(my_data)[1] + 1)){
klic <- rep(FALSE, 4)
}
}
temp_data <- as.data.frame(cbind(temp_data, klic))
}
colnames(temp_data) <- c(
"moznost",
colnames(my_data)[which(
grepl(pattern = "[0-9]+", x = colnames(my_data))
)]
)
assign("klicovy_arch", value = temp_data)
###########################################################################
## doluju bodová skóre pro všechny uchazeče
my_scores <- getMyScores(my_data)
###########################################################################
## vytvářím nula-jedničkovou tranformaci dle správnosti odpovědí
my_data <- getMyTrueFalseTable(my_data)
###########################################################################
## vytvářím klíč pro uživatelem vybranou položku
my_key <- klicovy_arch[, as.character(my_item)]
###########################################################################
## vytvářím diagram s relativními četnostmi jednotlivých kombinací
## odpovědí
par(mar = c(8, 5, 6, 4), xpd = TRUE)
barplot(
t(my_table)[, 1] / sum(my_table[1, ]),
ylim = c(0.0, 1.0),
xlab = "vzorec odpovědi",
ylab = "relativní četnost odpovědi",
xaxt = "n",
main = paste(
"Relativní četnost jednotlivých kombinací odpovědí, položka ",
my_item,
"\nvšeobecné lékařství",
sep = "")
)
text(seq(0.65, 0.65 + 15 * 1.205, 1.205),
y = -0.06,
labels = my_long_labels,
srt = 45)
if(length(my_index) == 1){
text(x = 0.65 + 1.205 * (my_index - 1),
y = -0.06,
labels = if(my_index == 1){
expression(symbol("\306"))
}else{
gsub("X",
"",
names(item_table)[which(grepl("X", names(item_table)))])},
font = 2,
srt = 45)
}
if(length(my_index) > 1){
text(x = 0.65,
y = -0.06,
labels = expression(symbol("\306")),
font = 2,
srt = 45)
text(x = seq(0.65 + 1.205, 0.65 + 1.205 * (length(my_index) - 1), 1.205),
y = -0.06,
labels = names(my_table)[2:16],
font = 2,
srt = 45)
}
###########################################################################
}
## ----------------------------------------------------------------------------
###############################################################################
###############################################################################
###############################################################################
## loaduju data ---------------------------------------------------------------
setwd(mother_working_directory)
my_data <- read.csv(
paste(
"https://raw.githubusercontent.com/LStepanek",
"Nekolik-postrehu-k-teorii-odpovedi-na-polozku/master/my_data.csv",
sep = "/"
),
sep = ";",
skip = 1,
check.names = FALSE,
encoding = "UTF-8"
)
## ----------------------------------------------------------------------------
###############################################################################
## preprocessing --------------------------------------------------------------
data <- my_data
if(!is.null(my_data)){
## hledám, zda je přítomna proměnná s rodným číslem;
## pokud ano, přejmenovávám ji na 'id'
## a raději ji kóduju jako factor
for(i in 1:length(colnames(my_data))){
if(
grepl("rodné.číslo", tolower(colnames(my_data)[i])) |
grepl("rodcislo", tolower(colnames(my_data)[i]))
){
colnames(my_data)[i] <- "id"
my_data[,i] <- as.factor(as.character(my_data[, i]))
}
}
## pokud dataset obsahuje proměnnou 'obor' či 'kobor',
## z datasetů extrahuji jen data podmnožiny ucházející
## se o studium lékařství ('LEK')
for(i in 1:length(colnames(my_data))){
if(
grepl("obor", tolower(colnames(my_data)[i]))
){
if("51418" %in% levels(my_data[, i])){
my_data <- subset(my_data, my_data[, i] == "51418")
}else{
my_data <- subset(my_data, my_data[, i] == "LEK")
}
}
}
## pokud dataset obsahuje proměnnou 'kolo', z datasetů
## extrahuji jen data pro první kolo přijímacích zkoušek
for(i in 1:length(colnames(my_data))){
if(
grepl("kolo", x = tolower(colnames(my_data)[i]))
){
my_data <- subset(my_data, my_data[, i] == "1")
}
}
## nakonec ještě přetypovávám kategorické proměnné, aby
## měly správný počet levelů
for(i in 1:dim(my_data)[2]){
my_data[,i] <- as.character(my_data[, i])
}
for(i in 1:dim(my_data)[2]){
my_data[,i] <- as.factor(my_data[, i])
}
for(i in which(grepl("[0-9]+", colnames(my_data)))){
my_data[, i] <- as.character(my_data[, i])
}
}
## ----------------------------------------------------------------------------
###############################################################################
## vytvářím pro každou položku každého modulu a ročníku všechny typy
## diagramů -------------------------------------------------------------------
year <- "2020"
predmet <- "biologie"
setwd(
paste(
mother_working_directory,
"diagramy_nad_vysledky_uchazecu",
sep = "/"
)
)
## ----------------------------------------------------------------------------
if(!is.null(paste(predmet,year,sep="_"))){
data <- my_data
## ----------------------------------------------------------------------------
png(
filename = paste("histogram_20_",predmet,"_",year,".png",sep=""),
width=8,
height=5,
units="in",
res=600
)
getMyHistogram(data,20)
dev.off()
## ----------------------------------------------------------------------------
png(
filename = paste("histogram_100_",predmet,"_",year,".png",sep=""),
width=8,
height=5,
units="in",
res=600
)
getMyHistogram(data,100)
dev.off()
## ----------------------------------------------------------------------------
png(
filename = paste("holy_trinity_",predmet,"_",year,".png",sep=""),
width=24,
height=8,
units="in",
res=600
)
getMyDifficultyDiscriminationPlot(data)
dev.off()
## ----------------------------------------------------------------------------
png(
filename = paste("holy_trinity_harder_",predmet,"_",year,".png",sep=""),
width=12,
height=8,
units="in",
res=600
)
getMyFirstHalfDifficultyDiscriminationPlot(data)
dev.off()
## ----------------------------------------------------------------------------
png(
filename = paste("holy_trinity_easier_",predmet,"_",year,".png",sep=""),
width=12,
height=8,
units="in",
res=600
)
getMySecondHalfDifficultyDiscriminationPlot(data)
dev.off()
## ----------------------------------------------------------------------------
for(my_item in colnames(data)[which(grepl("[0-9]+",colnames(data)))]){
flush.console()
print(
paste(
"ročník ",year,", předmět ",predmet,", ",
format(
which(
colnames(data)[grepl("[0-9]+",colnames(data))]==my_item
)/length(
which(grepl("[0-9]+",colnames(data)))
)*100,nsmall=2
),
" %",
sep=""
)
)
png(
filename = paste(
"overall_success_rate_item_",my_item,"_",predmet,"_",year,".png",sep=""
),
width=10,
height=6.5,
units="in",
res=600
)
getMyOverallSuccessRatePlot(data,my_item)
dev.off()
png(
filename = paste(
"detailed_success_rate_item_",my_item,"_",predmet,"_",year,".png",sep=""
),
width=8,
height=6.5,
units="in",
res=600
)
getMyDetailedSuccessRatePlot(data,my_item)
dev.off()
png(
filename = paste(
"answer_schema_plot_item_",my_item,"_",predmet,"_",year,".png",sep=""
),
width=8,
height=6.5,
units="in",
res=600
)
getMyAnswerSchemaPlot(data,my_item)
dev.off()
}
## ----------------------------------------------------------------------------
}
## ----------------------------------------------------------------------------
###############################################################################
## ----------------------------------------------------------------------------
###############################################################################
###############################################################################
###############################################################################
|
# Functions
##-------------------------------------------------------------------------------------------------------------------##
### KML: This is just a function for me to use for testing.
#toyData <- function(n) {
# sigma <- matrix(0.3, 2, 2)
# diag(sigma) <- 1.0
#
# dat1 <- rmvnorm(n, c(0, 0), sigma)
# colnames(dat1) <- paste0("X", 1 : 2)
#
# r <- as.logical(rbinom(n, 1, 0.3))
# dat1[r, "X1"] <- NA
# as.data.frame(dat1)
#}
##-------------------------------------------------------------------------------------------------------------------##
simData <- function (parm, N)
{
n <- N
sigma <- matrix(parm$cov, parm$pred, parm$pred)
diag(sigma) <- 1.0
#Generate data
X <- rmvnorm(n = n, mean = rep(0, parm$pred), sigma = sigma)
data <- data.frame(X)
data
}
##-------------------------------------------------------------------------------------------------------------------##
##-------------------------------------------------------------------------------------------------------------------##
makeMissing <- function(data,
mechanism="MCAR",
pm,
preds,
snr=NULL)
{
#MAR missing data mechanism
if(mechanism=="MAR")
{
#Specify where holes will be poked into the data sets
out <- simLinearMissingness(pm = pm,
data = data,
snr = parm$snr,
preds = preds,
type = "high",
optimize = FALSE)
#Poke Holes
missingdf <- data
missingdf[out$r , 1] <- NA
missingdf
}
#MCAR missing data mechanism
else if(mechanism=="MCAR")
{
#Random sampling pm*N elements from the df
r <- sample(1:nrow(data), nrow(data)*pm)
#Creating a vector of 500 FALSE elements and replacing previously sampled elements with TRUE
tmp <- rep(FALSE, nrow(data))
tmp[r] <- TRUE
r <- tmp
out <- list(r = r)#,
#Poke holes
missingdf <- data
missingdf[out$r , 1] <- NA
missingdf
}
else
{
stop("Undefined or unsupported missing data mechanism.")
}
}
##-------------------------------------------------------------------------------------------------------------------##
#New doRep function saving the impList
doIter <- function(conds, parm, counter)
{
data_main <- try(simData(parm = parm, N = parm$n))
c <- counter
for (i in 1 : nrow(conds))
{
#Create a seperate data matrix to avoid possible problems
data <- data_main
#Save current values of pm and mec to check if new imputed data sets need to be created
pm <- parm$pm
mec <- parm$mec
m <- parm$m
#Save current values of the varying values
parm$m <- conds[i, "m"]
parm$mec <- conds[i, "mec"]
parm$pm <- conds[i, "pm"]
check <- (is.null(pm) | is.null(mec)) || (pm != parm$pm | mec != parm$mec)
#Is TRUE when either pm/mec equals NULL OR when either pm/mec are not the same as parm$pm/mec
#When TRUE: new imputation list needs to be generated
#Is FALSE when either pm/mec is not null OR when either pm/mec are the same as parm$pm/mec
#When FALSE: no new imputation list is needed, list needs to be adjusted to new m!
#Check does what it is supposed to do, only 10 imp sets are created per iteration
if(check == "TRUE")
{
MissingData <- try(makeMissing(data = data,
mechanism = parm$mec,
pm = parm$pm,
preds = parm$Vecpred,
snr = NULL
))
#Impute missing values
impData <- try(mice(data = MissingData,
m = parm$m,
method = "norm",
print = FALSE
))
#Save a list of imputed data sets
impListdf <- try(complete(data = impData,
action = "all"
))
impList <- impListdf
}
else if(check == "FALSE")
{
impList <- adjustImpList(impListdf = impListdf,
parm = parm)
}
else print("something went wrong") #Very necessary
#Calculate FMI
fmi <- try(fmi(data = impList,
method = "sat",
fewImps = TRUE
))
#Save FMIs to list
store[[i]] <- fmi
}
#Write list to disc
saveRDS(store,
file = paste0("results/doRep2_",c,".rds")) #c is the current iteration
}
##-------------------------------------------------------------------------------------------------------------------##
getTrueFMI <- function(conds, parm)
{
#Create one dataset with N = 500.000
data_main <- simData(parm = parm, N = parm$Nfmi)
for(i in 1 : nrow(conds))
{
#Save current values of the varying values
parm$m <- conds[i, "m"]
parm$mec <- conds[i, "mec"]
parm$pm <- conds[i, "pm"]
#Keep reusing the same previously created dataset
data <- data_main
#Poke holes into the data set
MissingData <- try(makeMissing(data = data,
mechanism = parm$mec,
pm = parm$pm,
preds = parm$Vecpred,
snr = NULL
))
#Impute missing values via FIML and calculate FMI
fmi <- try(fmi(data = MissingData,
method = "sat",
### POTENTIALLY EXCLUDE X2 AS THERE ARE NO MISSING VALUES? but also maybe not
))
#Save FMIs to list
storage[[i]] <- fmi
}
saveRDS(storage,
file = paste0("results/data_trueFMI",i,".rds"))
}
##-------------------------------------------------------------------------------------------------------------------##
#Reusing the big impSet to save computational cost
#Adjusts the number of m to the newly specified m while maintaining the big impList of m = 500
adjustImpList <- function(impListdf, parm)
{
#Copy the m = 500 imputation list
out <- impListdf
#Compute
parm$mcomp <- parm$m/length(out)
#Sampling pm*500 observations
r <- sample(1:length(out), length(out)-parm$m)
#Creating a vector of 500 FALSE elements and replacing previously sampled elements with TRUE
tmp <- rep(FALSE, length(out))
tmp[r] <- TRUE
r <- tmp
#As the imputation list is a list, this also has to be a list
list <- list(r = r)
#Remove the 'TRUE' values from the imputation list
impList2 <- out
impList2[list$r] <- NULL
impList2
}
##-------------------------------------------------------------------------------------------------------------------##
|
/simFunctions.R
|
no_license
|
kylelang/BachelorThesisCode
|
R
| false
| false
| 7,587
|
r
|
# Functions
##-------------------------------------------------------------------------------------------------------------------##
### KML: This is just a function for me to use for testing.
#toyData <- function(n) {
# sigma <- matrix(0.3, 2, 2)
# diag(sigma) <- 1.0
#
# dat1 <- rmvnorm(n, c(0, 0), sigma)
# colnames(dat1) <- paste0("X", 1 : 2)
#
# r <- as.logical(rbinom(n, 1, 0.3))
# dat1[r, "X1"] <- NA
# as.data.frame(dat1)
#}
##-------------------------------------------------------------------------------------------------------------------##
simData <- function (parm, N)
{
n <- N
sigma <- matrix(parm$cov, parm$pred, parm$pred)
diag(sigma) <- 1.0
#Generate data
X <- rmvnorm(n = n, mean = rep(0, parm$pred), sigma = sigma)
data <- data.frame(X)
data
}
##-------------------------------------------------------------------------------------------------------------------##
##-------------------------------------------------------------------------------------------------------------------##
makeMissing <- function(data,
mechanism="MCAR",
pm,
preds,
snr=NULL)
{
#MAR missing data mechanism
if(mechanism=="MAR")
{
#Specify where holes will be poked into the data sets
out <- simLinearMissingness(pm = pm,
data = data,
snr = parm$snr,
preds = preds,
type = "high",
optimize = FALSE)
#Poke Holes
missingdf <- data
missingdf[out$r , 1] <- NA
missingdf
}
#MCAR missing data mechanism
else if(mechanism=="MCAR")
{
#Random sampling pm*N elements from the df
r <- sample(1:nrow(data), nrow(data)*pm)
#Creating a vector of 500 FALSE elements and replacing previously sampled elements with TRUE
tmp <- rep(FALSE, nrow(data))
tmp[r] <- TRUE
r <- tmp
out <- list(r = r)#,
#Poke holes
missingdf <- data
missingdf[out$r , 1] <- NA
missingdf
}
else
{
stop("Undefined or unsupported missing data mechanism.")
}
}
##-------------------------------------------------------------------------------------------------------------------##
#New doRep function saving the impList
doIter <- function(conds, parm, counter)
{
data_main <- try(simData(parm = parm, N = parm$n))
c <- counter
for (i in 1 : nrow(conds))
{
#Create a seperate data matrix to avoid possible problems
data <- data_main
#Save current values of pm and mec to check if new imputed data sets need to be created
pm <- parm$pm
mec <- parm$mec
m <- parm$m
#Save current values of the varying values
parm$m <- conds[i, "m"]
parm$mec <- conds[i, "mec"]
parm$pm <- conds[i, "pm"]
check <- (is.null(pm) | is.null(mec)) || (pm != parm$pm | mec != parm$mec)
#Is TRUE when either pm/mec equals NULL OR when either pm/mec are not the same as parm$pm/mec
#When TRUE: new imputation list needs to be generated
#Is FALSE when either pm/mec is not null OR when either pm/mec are the same as parm$pm/mec
#When FALSE: no new imputation list is needed, list needs to be adjusted to new m!
#Check does what it is supposed to do, only 10 imp sets are created per iteration
if(check == "TRUE")
{
MissingData <- try(makeMissing(data = data,
mechanism = parm$mec,
pm = parm$pm,
preds = parm$Vecpred,
snr = NULL
))
#Impute missing values
impData <- try(mice(data = MissingData,
m = parm$m,
method = "norm",
print = FALSE
))
#Save a list of imputed data sets
impListdf <- try(complete(data = impData,
action = "all"
))
impList <- impListdf
}
else if(check == "FALSE")
{
impList <- adjustImpList(impListdf = impListdf,
parm = parm)
}
else print("something went wrong") #Very necessary
#Calculate FMI
fmi <- try(fmi(data = impList,
method = "sat",
fewImps = TRUE
))
#Save FMIs to list
store[[i]] <- fmi
}
#Write list to disc
saveRDS(store,
file = paste0("results/doRep2_",c,".rds")) #c is the current iteration
}
##-------------------------------------------------------------------------------------------------------------------##
getTrueFMI <- function(conds, parm)
{
#Create one dataset with N = 500.000
data_main <- simData(parm = parm, N = parm$Nfmi)
for(i in 1 : nrow(conds))
{
#Save current values of the varying values
parm$m <- conds[i, "m"]
parm$mec <- conds[i, "mec"]
parm$pm <- conds[i, "pm"]
#Keep reusing the same previously created dataset
data <- data_main
#Poke holes into the data set
MissingData <- try(makeMissing(data = data,
mechanism = parm$mec,
pm = parm$pm,
preds = parm$Vecpred,
snr = NULL
))
#Impute missing values via FIML and calculate FMI
fmi <- try(fmi(data = MissingData,
method = "sat",
### POTENTIALLY EXCLUDE X2 AS THERE ARE NO MISSING VALUES? but also maybe not
))
#Save FMIs to list
storage[[i]] <- fmi
}
saveRDS(storage,
file = paste0("results/data_trueFMI",i,".rds"))
}
##-------------------------------------------------------------------------------------------------------------------##
#Reusing the big impSet to save computational cost
#Adjusts the number of m to the newly specified m while maintaining the big impList of m = 500
adjustImpList <- function(impListdf, parm)
{
#Copy the m = 500 imputation list
out <- impListdf
#Compute
parm$mcomp <- parm$m/length(out)
#Sampling pm*500 observations
r <- sample(1:length(out), length(out)-parm$m)
#Creating a vector of 500 FALSE elements and replacing previously sampled elements with TRUE
tmp <- rep(FALSE, length(out))
tmp[r] <- TRUE
r <- tmp
#As the imputation list is a list, this also has to be a list
list <- list(r = r)
#Remove the 'TRUE' values from the imputation list
impList2 <- out
impList2[list$r] <- NULL
impList2
}
##-------------------------------------------------------------------------------------------------------------------##
|
#' foo: A package to process natural language.
#'
#' Currently the package allows training and using name finder models.
#'
#' @section functions:
#' \itemize{
#' \item{\code{\link{tnf}} to use a model and extract names from character vector.}
#' \item{\code{\link{tnf_}} to use a model and extract names from file.}
#' \item{\code{\link{tnf_train}} train a name finder model from character vector.}
#' \item{\code{\link{tnf_train_}} train a name finder model from file.}
#' \item{\code{\link{get_names}} extract identified names from character vector.}
#' \item{\code{\link{get_names_}} extract identified names from file.}
#' \item{\code{\link{dc}} classify documents from character vector.}
#' \item{\code{\link{dc_}} classify document from on file.}
#' \item{\code{\link{dc_train}} train document classifer from file.}
#' }
#'
#' @examples
#' \dontrun{
#' # get working directory
#' # need to pass full path
#' wd <- getwd()
#'
#' # Name extraction
#' # Training to find "WEF"
#' data <- paste("This organisation is called the <START:wef> World Economic Forum <END>",
#' "It is often referred to as <START:wef> Davos <END> or the <START:wef> WEF <END>.")
#'
#' # Save the above as file
#' write(data, file = "input.txt")
#'
#' # Trains the model and returns the full path to the model
#' model <- tnf_train_(model = paste0(wd, "/wef.bin"), lang = "en",
#' data = paste0(wd, "/input.txt"), type = "wef")
#'
#' # Create sentences to test our model
#' sentences <- paste("This sentence mentions the World Economic Forum the annual meeting",
#' "of which takes place in Davos. Note that the forum is often called the WEF.")
#'
#' # Save sentences
#' write(data, file = "sentences.txt")
#'
#' # Extract names
#' # Without specifying an output file the extracted names appear in the console
#' tnf(model = model, sentences = paste0(wd, "/sentences.txt"))
#'
#' # returns path to output file
#' output <- tnf_(model = model, sentences = paste0(wd, "/sentences.txt"),
#' output = paste0(wd, "/output.txt"))
#'
#' # extract names
#' (names <- get_names(output))
#'
#' # Classification
#' # create dummy data
#' data <- data.frame(class = c("Sport", "Business", "Sport", "Sport"),
#' doc = c("Football, tennis, golf and, bowling and, score",
#' "Marketing, Finance, Legal and, Administration",
#' "Tennis, Ski, Golf and, gym and, match",
#' "football, climbing and gym"))
#'
#' # repeat data 50 times to have enough data
#' # Obviously do not do that in te real world
#' data <- do.call("rbind", replicate(50, data, simplify = FALSE))
#'
#' # train model
#' model <- dc_train(model = paste0(wd, "/model.bin"), data = data, lang = "en")
#'
#' # create documents to classify
#' documents <- data.frame(
#' docs = c("This discusses golf which is a sport.",
#' "This documents is about business administration.",
#' "This is about people who do sport, go to the gym and play tennis.",
#' "Some play tennis and work in Finance")
#' )
#'
#' # classify documents
#' classified <- dc(model, documents)
#' }
#'
#' @importFrom utils write.table
#'
#' @docType package
#' @name decipher
NULL
|
/R/decipher-package.R
|
no_license
|
news-r/decipher
|
R
| false
| false
| 3,168
|
r
|
#' foo: A package to process natural language.
#'
#' Currently the package allows training and using name finder models.
#'
#' @section functions:
#' \itemize{
#' \item{\code{\link{tnf}} to use a model and extract names from character vector.}
#' \item{\code{\link{tnf_}} to use a model and extract names from file.}
#' \item{\code{\link{tnf_train}} train a name finder model from character vector.}
#' \item{\code{\link{tnf_train_}} train a name finder model from file.}
#' \item{\code{\link{get_names}} extract identified names from character vector.}
#' \item{\code{\link{get_names_}} extract identified names from file.}
#' \item{\code{\link{dc}} classify documents from character vector.}
#' \item{\code{\link{dc_}} classify document from on file.}
#' \item{\code{\link{dc_train}} train document classifer from file.}
#' }
#'
#' @examples
#' \dontrun{
#' # get working directory
#' # need to pass full path
#' wd <- getwd()
#'
#' # Name extraction
#' # Training to find "WEF"
#' data <- paste("This organisation is called the <START:wef> World Economic Forum <END>",
#' "It is often referred to as <START:wef> Davos <END> or the <START:wef> WEF <END>.")
#'
#' # Save the above as file
#' write(data, file = "input.txt")
#'
#' # Trains the model and returns the full path to the model
#' model <- tnf_train_(model = paste0(wd, "/wef.bin"), lang = "en",
#' data = paste0(wd, "/input.txt"), type = "wef")
#'
#' # Create sentences to test our model
#' sentences <- paste("This sentence mentions the World Economic Forum the annual meeting",
#' "of which takes place in Davos. Note that the forum is often called the WEF.")
#'
#' # Save sentences
#' write(data, file = "sentences.txt")
#'
#' # Extract names
#' # Without specifying an output file the extracted names appear in the console
#' tnf(model = model, sentences = paste0(wd, "/sentences.txt"))
#'
#' # returns path to output file
#' output <- tnf_(model = model, sentences = paste0(wd, "/sentences.txt"),
#' output = paste0(wd, "/output.txt"))
#'
#' # extract names
#' (names <- get_names(output))
#'
#' # Classification
#' # create dummy data
#' data <- data.frame(class = c("Sport", "Business", "Sport", "Sport"),
#' doc = c("Football, tennis, golf and, bowling and, score",
#' "Marketing, Finance, Legal and, Administration",
#' "Tennis, Ski, Golf and, gym and, match",
#' "football, climbing and gym"))
#'
#' # repeat data 50 times to have enough data
#' # Obviously do not do that in te real world
#' data <- do.call("rbind", replicate(50, data, simplify = FALSE))
#'
#' # train model
#' model <- dc_train(model = paste0(wd, "/model.bin"), data = data, lang = "en")
#'
#' # create documents to classify
#' documents <- data.frame(
#' docs = c("This discusses golf which is a sport.",
#' "This documents is about business administration.",
#' "This is about people who do sport, go to the gym and play tennis.",
#' "Some play tennis and work in Finance")
#' )
#'
#' # classify documents
#' classified <- dc(model, documents)
#' }
#'
#' @importFrom utils write.table
#'
#' @docType package
#' @name decipher
NULL
|
employee_ori = read.csv (file.choose())
general_ori = read.csv(file.choose())
manager_ori = read.csv(file.choose())
Merger1 = merge(employee_ori, general_ori, by = c("EmployeeID"))
Merger2 = merge(Merger1, manager_ori, by = c("EmployeeID"))
dataset_fin = Merger2
View(dataset_fin)
summary(dataset_fin)
dataset_fin1 = na.omit(dataset_fin)
#
str(dataset_fin1)
outvars= names(dataset_fin1)%in%c('EmployeeID','EmployeeCount','Over18','StandardHours')
dataset_fin2 = dataset_fin1[!outvars]
typeof(dataset_fin2)
hist(dataset_fin2)
for ( colnames in 1:25)
{dataset_fin2[,c(colnames:colnames)]= as.numeric(dataset_fin2[,c(colnames:colnames)])
hist(dataset_fin2[,c(colnames:colnames)])}
#CART method
library("rpart.plot")
library("rpart")
n = colnames(dataset_fin2)
fullmodel = as.formula(paste("Attrition ~", paste(n[!n %in% "Attrition"], collapse = " + ")))
carfit = rpart(fullmodel, data = dataset_fin2, method = "class" )
print(carfit)
rpart.plot(carfit, main ="Attrition - Classification Tree",
box.palette="Blues")
rpart.rules(carfit)
plotcp(carfit)
printcp(carfit)
#C50
library(C50)
ruleModel <- C5.0(fullmodel, data = dataset_fin1[,c(-1,-12,-19,-21)])
ruleModel
summary(ruleModel)
plot(ruleModel)
#conditional inference
library(party)
citree = ctree(fullmodel, data = dataset_fin1[,c(-1,-12,-19,-21)])
plot(citree)
#Accuracy testing
dataset_fin3 = dataset_fin1[,c(-1,-12,-19,-21)]
library(Hmisc) # Needed for %nin%
totalrows = nrow(dataset_fin3)
pickrows = round(runif(totalrows*.80, 1, totalrows),0)
traindataset = dataset_fin3[pickrows, ]
testdataset = dataset_fin3[-pickrows, ]
library(MLmetrics)
#CART acc test
carfit2 = rpart(fullmodel, data = traindataset, method = "class")
testdataset$predict = predict(carfit2, newdata=testdataset, type='class')
Accuracytable = table(testdataset$predict,testdataset$Attrition)
Accuracy(testdataset$predict, testdataset$Attrition)
#C50 acc test
rulemodel2 = C5.0(fullmodel, data =testdataset)
testdataset$predict2 = predict( rulemodel2, newdata = testdataset)
Accuracy(testdataset$predict2, testdataset$Attrition)
#conditional Inference acc test
citree1 = ctree(fullmodel, data=traindataset)
testdataset$predict3 = predict(citree1, newdata = testdataset)
Accuracy(testdataset$predict3, testdataset$Attrition)
|
/Sample5 Decision Tree.R
|
no_license
|
KKKChi/Data_analytics
|
R
| false
| false
| 2,276
|
r
|
employee_ori = read.csv (file.choose())
general_ori = read.csv(file.choose())
manager_ori = read.csv(file.choose())
Merger1 = merge(employee_ori, general_ori, by = c("EmployeeID"))
Merger2 = merge(Merger1, manager_ori, by = c("EmployeeID"))
dataset_fin = Merger2
View(dataset_fin)
summary(dataset_fin)
dataset_fin1 = na.omit(dataset_fin)
#
str(dataset_fin1)
outvars= names(dataset_fin1)%in%c('EmployeeID','EmployeeCount','Over18','StandardHours')
dataset_fin2 = dataset_fin1[!outvars]
typeof(dataset_fin2)
hist(dataset_fin2)
for ( colnames in 1:25)
{dataset_fin2[,c(colnames:colnames)]= as.numeric(dataset_fin2[,c(colnames:colnames)])
hist(dataset_fin2[,c(colnames:colnames)])}
#CART method
library("rpart.plot")
library("rpart")
n = colnames(dataset_fin2)
fullmodel = as.formula(paste("Attrition ~", paste(n[!n %in% "Attrition"], collapse = " + ")))
carfit = rpart(fullmodel, data = dataset_fin2, method = "class" )
print(carfit)
rpart.plot(carfit, main ="Attrition - Classification Tree",
box.palette="Blues")
rpart.rules(carfit)
plotcp(carfit)
printcp(carfit)
#C50
library(C50)
ruleModel <- C5.0(fullmodel, data = dataset_fin1[,c(-1,-12,-19,-21)])
ruleModel
summary(ruleModel)
plot(ruleModel)
#conditional inference
library(party)
citree = ctree(fullmodel, data = dataset_fin1[,c(-1,-12,-19,-21)])
plot(citree)
#Accuracy testing
dataset_fin3 = dataset_fin1[,c(-1,-12,-19,-21)]
library(Hmisc) # Needed for %nin%
totalrows = nrow(dataset_fin3)
pickrows = round(runif(totalrows*.80, 1, totalrows),0)
traindataset = dataset_fin3[pickrows, ]
testdataset = dataset_fin3[-pickrows, ]
library(MLmetrics)
#CART acc test
carfit2 = rpart(fullmodel, data = traindataset, method = "class")
testdataset$predict = predict(carfit2, newdata=testdataset, type='class')
Accuracytable = table(testdataset$predict,testdataset$Attrition)
Accuracy(testdataset$predict, testdataset$Attrition)
#C50 acc test
rulemodel2 = C5.0(fullmodel, data =testdataset)
testdataset$predict2 = predict( rulemodel2, newdata = testdataset)
Accuracy(testdataset$predict2, testdataset$Attrition)
#conditional Inference acc test
citree1 = ctree(fullmodel, data=traindataset)
testdataset$predict3 = predict(citree1, newdata = testdataset)
Accuracy(testdataset$predict3, testdataset$Attrition)
|
# ==========================================================================
# Package: Cognitivemodels
# File: utils-checks.R
# Author: Jana B. Jarecki
# ==========================================================================
# ==========================================================================
# Utility functions for checking sanity of model inputs
# ==========================================================================
#' Checks the choicerule
#'
#' @importFrom utils menu
#' @importFrom utils install.packages
#'
#' @param x the name of the choicerule
#' @export
#' @noRd
.check_and_match_choicerule <- function(x = NULL) {
if (!length(x)) {
stop("Must supply a 'choicerule'.\n * Set choicerule to 'none' to not apply a choicerule.\n * Allowed values are 'none', softmax', 'luce', 'epsilon'", call. = FALSE)
}
x <- match.arg(x, c("none", "softmax", "argmax", "luce", "epsilon"))
return(x)
}
#' Checks the parameter values
#'
#' @param x A vector or list with parameters to fix
#' @param pass Logical, whether to pass this check
#' @export
#' @noRd
.check_par <- function(x = NULL, parspace, pass = FALSE) {
# Formal checks
if (pass == TRUE | length(x) == 0L) { return() }
if (is.character(x)) { if(x[1] == "start") { return() }}
if (length(x) & all(is.numeric(x))) { x <- as.list(x) }
if (length(x) > 1L & !is.list(x)) {
stop("Parameters to fix must be a list, not a ", typeof(x), ".\n * Did you forget to supply a list to 'fix'? fix = list( ... )?", call.=FALSE)
}
if (length(x) != sum(sapply(x, length))) {
stop("Parameters to fix must be a list with 1 parameter per list entry, but the ", which(lapply(x, length) > 1L), ". entry of 'fix' has multiple parameters.\n * Do you need to change the format of 'fix'?", call. = FALSE)
}
if (any(duplicated(names(x)))) {
stop("Names of fixed parameters must be unique, but 'fix' contains ", .dotify(sQuote(names(x)[duplicated(names(x))])), " ", sum(duplicated(names(x))) + 1, " times.", call. = FALSE)
}
# apply the check par function iteratively if par length > 1
if (length(x) > 1L) {
Map(function(x, i) .check_par(x = setNames(x, i), parspace), x, names(x) )
return()
}
.check_parnames(x = names(x), y = rownames(parspace), pass = pass)
.check_parvalues(x = x, y = parspace, pass = pass)
.check_fixvalues(x = x, y = parspace, pass = pass)
}
.check_parnames <- function(x, y, pass = FALSE) {
x <- unlist(x)
if (pass == TRUE) { return() }
if (!x %in% y) {
stop("Parameter names must be ", .brackify(dQuote(y)), ", not ", dQuote(x), ".\n * ", .didyoumean(x, y), call. = FALSE)
}
}
.check_parvalues <- function(x, y, n = names(x), pass = FALSE) {
x <- unlist(x)
if (pass == TRUE | is.na(x) | is.character(x)) { return() }
y <- y[n, ]
tolerance <- sqrt(.Machine$double.eps)
if (x < (y["lb"] - tolerance) | (x > y["ub"] + tolerance)) {
stop("Parameter ", sQuote(n), " must be between ", y["lb"], " and ", y["ub"], ".\n * Did you accidentally fix '", n, " = ", x, "'?\n * Would you like to change the parameter range? options = list(", ifelse(x > y["ub"] + tolerance, "ub", "lb"), " = c(", n, " = ", x, ")", call.=FALSE)
}
}
#' Checks the fixed parameter
#'
#' @param x the fixed parameter
#' @param y the parameter space object
#' @export
#' @noRd
.check_fixvalues = function(x, y, pass = FALSE) {
x <- unlist(x)
if (pass == TRUE | length(x) == 0L) { return() }
if (is.character(x)) {
if (names(x) == x) {
stop("Fixed parameter (equality-constrained) must be equal to another parameter, not itself. \n * Did you accidentally fix ", names(x), " = ", dQuote(x), "?", call. = FALSE)
}
if (!x %in% rownames(y)) {
stop("Fixed parameter (equality-constrained) must be equal to one of ", .dotify(dQuote(rownames(y))), ".\n * Did you accidentally fix ", names(x), " = ", dQuote(x), "? ", .didyoumean(x, setdiff(rownames(y), names(x))), call. = FALSE)
}
}
if (is.na(x) & is.na(y[names(x), "na"])) {
stop("Fixed parameter ", sQuote(names(x)), " can't be NA and thereby ignored, because the model needs the parameter ", sQuote(names(x)), ".\n * Do you want to fix ", sQuote(names(x)), " to be between ", paste(y[names(x), c("lb","ub")], collapse = " and "), "?", call. = FALSE)
}
}
#' Prints the possible optimization solvers
#'
#' @export
#' @noRd
solvers <- function() {
roi_solvers <- gsub("ROI.plugin.", "", ROI::ROI_available_solvers()$Package)
roi_registered <- names(ROI::ROI_registered_solvers())
roi_solvers <- unique(c(roi_solvers, roi_registered))
return(c("grid", "solnp", "auto", roi_solvers))
}
#' Checks and optionally installs missing solvers
#'
#' @param solver_name the name of the solver
#' @export
#' @noRd
.check_and_match_solver <- function(solver) {
allowed <- cognitivemodels:::solvers()
for (s in solver) {
if (inherits(try(match.arg(s, allowed), silent = TRUE), "try-error")) {
stop("'solver' must be a valid name, not ", dQuote(setdiff(s, allowed)), ".\n * ", .didyoumean(s, allowed), "\n * Would you like to see all valid names? cognitivemodels:::solvers()", call. = FALSE)
}
}
solver <- unique(match.arg(solver, allowed, several.ok = TRUE))
if (length(solver) > 2L) {
stop("'solver' must have 2 entries, not ", length(solver), ".")
}
if (length(solver) == 2L) {
if (!any(grepl("grid", solver))) {
warning("Dropped the second solver '", solver[2], "', using only '", solver[1], "'.", call. = FALSE)
} else if (solver[2] == "grid") {
solver <- solver[2:1]
warning("Using solver 'grid' first, then '", solver[2], "'.", call. = FALSE)
}
}
missing <- is.na(match(solver, c("grid", "solnp", "auto", names(ROI::ROI_registered_solvers()))))
if (any(missing)) {
install <- utils::menu(c("Yes", "No, stop the model."), title = paste0("The solver '", solver[missing], "' is not (yet) installed. Want to install it?"))
if (install == 1) {
install.packages(paste0("ROI.plugin.", solver[missing]))
library(paste0("ROI.plugin.", solver[missing]), character.only=TRUE)
return(solver)
} else {
stop("Model stopped, because the ROI solver plugin was not (yet) installed. \n * Would you like to see the solvers that are installed, ROI::ROI_registered_solvers()?\n * Would you like to change the solver?", call. = FALSE)
}
} else {
return(solver)
}
}
# if (length(fix) < nrow(parspace) & is.null(self$res) & self$options$fit == TRUE ) {
# stop("'formula' must have a left side to estimate parameter ", .brackify(setdiff(rownames(parspace), names(fix))), ".\n
# * Did you forget to add a left-hand to the formula?\n
# * Did you forget to fix the parameter ", .dotify(setdiff(rownames(parspace), names(fix))), "?", call. = FALSE)
# }
|
/R/utils-checks.R
|
permissive
|
oliviaguest/cognitivemodels
|
R
| false
| false
| 6,838
|
r
|
# ==========================================================================
# Package: Cognitivemodels
# File: utils-checks.R
# Author: Jana B. Jarecki
# ==========================================================================
# ==========================================================================
# Utility functions for checking sanity of model inputs
# ==========================================================================
#' Checks the choicerule
#'
#' @importFrom utils menu
#' @importFrom utils install.packages
#'
#' @param x the name of the choicerule
#' @export
#' @noRd
.check_and_match_choicerule <- function(x = NULL) {
if (!length(x)) {
stop("Must supply a 'choicerule'.\n * Set choicerule to 'none' to not apply a choicerule.\n * Allowed values are 'none', softmax', 'luce', 'epsilon'", call. = FALSE)
}
x <- match.arg(x, c("none", "softmax", "argmax", "luce", "epsilon"))
return(x)
}
#' Checks the parameter values
#'
#' @param x A vector or list with parameters to fix
#' @param pass Logical, whether to pass this check
#' @export
#' @noRd
.check_par <- function(x = NULL, parspace, pass = FALSE) {
# Formal checks
if (pass == TRUE | length(x) == 0L) { return() }
if (is.character(x)) { if(x[1] == "start") { return() }}
if (length(x) & all(is.numeric(x))) { x <- as.list(x) }
if (length(x) > 1L & !is.list(x)) {
stop("Parameters to fix must be a list, not a ", typeof(x), ".\n * Did you forget to supply a list to 'fix'? fix = list( ... )?", call.=FALSE)
}
if (length(x) != sum(sapply(x, length))) {
stop("Parameters to fix must be a list with 1 parameter per list entry, but the ", which(lapply(x, length) > 1L), ". entry of 'fix' has multiple parameters.\n * Do you need to change the format of 'fix'?", call. = FALSE)
}
if (any(duplicated(names(x)))) {
stop("Names of fixed parameters must be unique, but 'fix' contains ", .dotify(sQuote(names(x)[duplicated(names(x))])), " ", sum(duplicated(names(x))) + 1, " times.", call. = FALSE)
}
# apply the check par function iteratively if par length > 1
if (length(x) > 1L) {
Map(function(x, i) .check_par(x = setNames(x, i), parspace), x, names(x) )
return()
}
.check_parnames(x = names(x), y = rownames(parspace), pass = pass)
.check_parvalues(x = x, y = parspace, pass = pass)
.check_fixvalues(x = x, y = parspace, pass = pass)
}
.check_parnames <- function(x, y, pass = FALSE) {
x <- unlist(x)
if (pass == TRUE) { return() }
if (!x %in% y) {
stop("Parameter names must be ", .brackify(dQuote(y)), ", not ", dQuote(x), ".\n * ", .didyoumean(x, y), call. = FALSE)
}
}
.check_parvalues <- function(x, y, n = names(x), pass = FALSE) {
x <- unlist(x)
if (pass == TRUE | is.na(x) | is.character(x)) { return() }
y <- y[n, ]
tolerance <- sqrt(.Machine$double.eps)
if (x < (y["lb"] - tolerance) | (x > y["ub"] + tolerance)) {
stop("Parameter ", sQuote(n), " must be between ", y["lb"], " and ", y["ub"], ".\n * Did you accidentally fix '", n, " = ", x, "'?\n * Would you like to change the parameter range? options = list(", ifelse(x > y["ub"] + tolerance, "ub", "lb"), " = c(", n, " = ", x, ")", call.=FALSE)
}
}
#' Checks the fixed parameter
#'
#' @param x the fixed parameter
#' @param y the parameter space object
#' @export
#' @noRd
.check_fixvalues = function(x, y, pass = FALSE) {
x <- unlist(x)
if (pass == TRUE | length(x) == 0L) { return() }
if (is.character(x)) {
if (names(x) == x) {
stop("Fixed parameter (equality-constrained) must be equal to another parameter, not itself. \n * Did you accidentally fix ", names(x), " = ", dQuote(x), "?", call. = FALSE)
}
if (!x %in% rownames(y)) {
stop("Fixed parameter (equality-constrained) must be equal to one of ", .dotify(dQuote(rownames(y))), ".\n * Did you accidentally fix ", names(x), " = ", dQuote(x), "? ", .didyoumean(x, setdiff(rownames(y), names(x))), call. = FALSE)
}
}
if (is.na(x) & is.na(y[names(x), "na"])) {
stop("Fixed parameter ", sQuote(names(x)), " can't be NA and thereby ignored, because the model needs the parameter ", sQuote(names(x)), ".\n * Do you want to fix ", sQuote(names(x)), " to be between ", paste(y[names(x), c("lb","ub")], collapse = " and "), "?", call. = FALSE)
}
}
#' Prints the possible optimization solvers
#'
#' @export
#' @noRd
solvers <- function() {
roi_solvers <- gsub("ROI.plugin.", "", ROI::ROI_available_solvers()$Package)
roi_registered <- names(ROI::ROI_registered_solvers())
roi_solvers <- unique(c(roi_solvers, roi_registered))
return(c("grid", "solnp", "auto", roi_solvers))
}
#' Checks and optionally installs missing solvers
#'
#' @param solver_name the name of the solver
#' @export
#' @noRd
.check_and_match_solver <- function(solver) {
allowed <- cognitivemodels:::solvers()
for (s in solver) {
if (inherits(try(match.arg(s, allowed), silent = TRUE), "try-error")) {
stop("'solver' must be a valid name, not ", dQuote(setdiff(s, allowed)), ".\n * ", .didyoumean(s, allowed), "\n * Would you like to see all valid names? cognitivemodels:::solvers()", call. = FALSE)
}
}
solver <- unique(match.arg(solver, allowed, several.ok = TRUE))
if (length(solver) > 2L) {
stop("'solver' must have 2 entries, not ", length(solver), ".")
}
if (length(solver) == 2L) {
if (!any(grepl("grid", solver))) {
warning("Dropped the second solver '", solver[2], "', using only '", solver[1], "'.", call. = FALSE)
} else if (solver[2] == "grid") {
solver <- solver[2:1]
warning("Using solver 'grid' first, then '", solver[2], "'.", call. = FALSE)
}
}
missing <- is.na(match(solver, c("grid", "solnp", "auto", names(ROI::ROI_registered_solvers()))))
if (any(missing)) {
install <- utils::menu(c("Yes", "No, stop the model."), title = paste0("The solver '", solver[missing], "' is not (yet) installed. Want to install it?"))
if (install == 1) {
install.packages(paste0("ROI.plugin.", solver[missing]))
library(paste0("ROI.plugin.", solver[missing]), character.only=TRUE)
return(solver)
} else {
stop("Model stopped, because the ROI solver plugin was not (yet) installed. \n * Would you like to see the solvers that are installed, ROI::ROI_registered_solvers()?\n * Would you like to change the solver?", call. = FALSE)
}
} else {
return(solver)
}
}
# if (length(fix) < nrow(parspace) & is.null(self$res) & self$options$fit == TRUE ) {
# stop("'formula' must have a left side to estimate parameter ", .brackify(setdiff(rownames(parspace), names(fix))), ".\n
# * Did you forget to add a left-hand to the formula?\n
# * Did you forget to fix the parameter ", .dotify(setdiff(rownames(parspace), names(fix))), "?", call. = FALSE)
# }
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## makeCacheMatrix creates a special matrix, which is the list containing
## a function to set the value of matrix, get the value of matrix,
## set the value of inverse and get the value of inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(solve) i <<- solve
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## Function calculates inverse of the matrix created with above function
## Function first checks if the inverse is already calculated. If so, the
## function skips the computation and gets the inverse from cache.
## Otherwise, it calculates inverse of the matrix and sets the value of
## the inverse in the cache via the setinverse function
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
/cachematrix.R
|
no_license
|
carizma111/ProgrammingAssignment2
|
R
| false
| false
| 1,166
|
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
## makeCacheMatrix creates a special matrix, which is the list containing
## a function to set the value of matrix, get the value of matrix,
## set the value of inverse and get the value of inverse
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y) {
x <<- y
i <<- NULL
}
get <- function() x
setinverse <- function(solve) i <<- solve
getinverse <- function() i
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Write a short comment describing this function
## Function calculates inverse of the matrix created with above function
## Function first checks if the inverse is already calculated. If so, the
## function skips the computation and gets the inverse from cache.
## Otherwise, it calculates inverse of the matrix and sets the value of
## the inverse in the cache via the setinverse function
cacheSolve <- function(x, ...) {
i <- x$getinverse()
if(!is.null(i)) {
message("getting cached data")
return(i)
}
data <- x$get()
i <- solve(data, ...)
x$setinverse(i)
i
}
|
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above.
## calculates matrix inverse
makeCacheMatrix <- function(x = matrix()) {
}
## used to calculate matrix inverse
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
}
inv <- x$getInverse()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setInverse(inv)
inv
}
|
/cachematrix.R
|
no_license
|
Abdullah-Abdul-Kareem/ProgrammingAssignment2
|
R
| false
| false
| 460
|
r
|
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above.
## calculates matrix inverse
makeCacheMatrix <- function(x = matrix()) {
}
## used to calculate matrix inverse
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
}
inv <- x$getInverse()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setInverse(inv)
inv
}
|
###############################################################################################################################################################
#
# Input parameters (This is how they should look)
#
###############################################################################################################################################################
#
#workspace = "I:/Ethiopia/Change_Detection"
#pointsshp = "I:/Ethiopia/Change_Detection/test_points"
#classfield = "change"
#predicttype = "Thematic"
#imageList = c("I:/Ethiopia/Change_Detection/Guji/l8_20130127_guji_stack_7band.img","I:/Ethiopia/Change_Detection/Guji/guji_l5tm_feb_1987_mosaic_clipped.img","I:/Ethiopia/Change_Detection/Guji/guji_1987_2014_difference.img")
#thematicimagelist = c("I:/Ethiopia/Change_Detection/Guji/l8_20130127_guji_stack_7band.img")
#outfile = "I:/Ethiopia/Change_Detection/change_test.img"
#
###############################################################################################################################################################
#install and get required packages
install.packages("raster", repos='http://cran.us.r-project.org')
install.packages("rgdal", repos='http://cran.us.r-project.org')
install.packages("tools", repos='http://cran.us.r-project.org')
library(raster)
library(tools)
library(rgdal)
#set working directory
setwd(workspace)
#read in points
points = shapefile(pointsshp)
rm(pointsshp)
gc()
rasterOptions(chunksize = 2e+06)
######################################################################################
#
# create stacklist
#
######################################################################################
stacklist = c()
#read each raster from imageList as stack
print("Stacking Rasters")
if (length(imageList) > 0)
{
for(i in 1:length(imageList))
{
rast = stack(imageList[i])
stacklist = append(stacklist,rast)
rm(rast)
}
}
if (length(thematicimagelist) > 0 & thematicimagelist[1] != "")
{
for(i in 1:length(thematicimagelist))
{
rast = stack(thematicimagelist[i])
stacklist = append(stacklist,rast)
rm(rast)
}
}
print(stacklist)
#stack all the rasters
ourStack = stack(stacklist)
rm(stacklist)
gc()
######################################################################################
#
# extract points
#
######################################################################################
print("Extracting Point Values")
pointvalues = extract(ourStack, points)
pointsDF = as.data.frame(points)
rm(points)
gc()
######################################################################################
#
# get class field
#
######################################################################################
classindex = which(colnames(pointsDF)==classfield)
rm(classfield)
gc()
######################################################################################
#
# Recode the thematic values
#
######################################################################################
legend = ""
emptyvec = c()
if(predicttype=="Thematic")
{
classnames = as.factor(pointsDF[,classindex])
numvec = seq(1,length(levels(classnames)),1)
legend = as.data.frame(cbind(numvec,levels(classnames)))
#loop through and change values in pointsDF
for(i in 1:dim(pointsDF)[1])
{
newvalue = as.numeric(as.character(legend[which(legend[,2]==pointsDF[i,classindex]),][1]))
emptyvec = append(emptyvec,newvalue)
}
colnames(legend) = c("NumValue","TextValue")
rm(newvalue)
rm(numvec)
rm(classnames)
gc()
}
######################################################################################
#
# create modeldataset
#
######################################################################################
print("Creating Model Dataset")
if(predicttype=="Thematic"){
ModelDataset = as.data.frame(cbind(emptyvec, pointvalues))
}else{
ModelDataset = as.data.frame(cbind(pointsDF[,classindex], pointvalues))
}
#if it should be a thematic output, then force "class" field to a factor
if(predicttype=="Thematic")
{
ModelDataset[,1] = as.factor(ModelDataset[,1])
}
rm(emptyvec)
rm(classindex)
rm(pointvalues)
rm(pointsDF)
gc()
######################################################################################
#
# set data type for the rasters
#
######################################################################################
#set continuous rasters to numeric
if (length(imageList)>0)
{
print("Setting Rasters to continuous")
for( i in 1:length(imageList))
{
ImageName = basename(file_path_sans_ext(imageList[i]))
for(j in 1:dim(ModelDataset)[2])
{
columnname = unlist(strsplit(basename(colnames(ModelDataset)[j]),split = "\\."))[1]
if(ImageName == columnname)
{
ModelDataset[,j] = as.numeric(ModelDataset[,j])
print("Changed following column to numeric:")
print(colnames(ModelDataset)[j])
print(is.numeric(ModelDataset[,j]))
}
}
rm(columnname)
rm(ImageName)
gc()
}
}
#set thematic rasters to factor
if(length(thematicimagelist)>0)
{
print("Setting Rasters to factor")
for( i in 1:length(thematicimagelist))
{
ImageName = basename(file_path_sans_ext(thematicimagelist[i]))
for(j in 1:dim(ModelDataset)[2])
{
columnname = basename(colnames(ModelDataset)[j])
if(ImageName == columnname)
{
ModelDataset[,j] = as.factor(ModelDataset[,j])
print("Changed following column to factor:")
print(colnames(ModelDataset)[j])
print(is.factor(ModelDataset[,j]))
}
}
rm(columnname)
rm(ImageName)
gc()
}
}
rm(imageList)
rm(thematicimagelist)
gc()
######################################################################################
colnames(ModelDataset)[1] = "Class"
######################################################################################
#
# create random forest model
#
######################################################################################
print("Creating Random Forest Model")
LM_Model = lm(Class~.,data = ModelDataset)
rm(ModelDataset)
gc()
######################################################################################
#
# output to file varimpplot and confusion matrix
#
######################################################################################
base = basename(file_path_sans_ext(OutputModel))
######################################################################################
#
# create predict raster
#
######################################################################################
time1 = Sys.time()
print("Predicting Raster")
if(predicttype=="Thematic"){
outputrast = predict(ourStack, RF_Model, filename = OutputModel, type='response',progress = "text", datatype = 'INT1U', inf.rm = TRUE)
}else{
outputrast = predict(ourStack, LM_Model, filename = OutputModel, type='response',progress = "text", datatype = 'INT2U', inf.rm = TRUE)
}
time2 = Sys.time()
totaltime = time2 - time1
print(totaltime)
rm(outputrast)
rm(RF_Model)
rm(predicttype)
rm(ourStack)
rm(OutputModel)
rm(time1)
rm(time2)
rm(totaltime)
gc()
|
/regression_template_.R
|
no_license
|
carsonas/RSAC
|
R
| false
| false
| 7,029
|
r
|
###############################################################################################################################################################
#
# Input parameters (This is how they should look)
#
###############################################################################################################################################################
#
#workspace = "I:/Ethiopia/Change_Detection"
#pointsshp = "I:/Ethiopia/Change_Detection/test_points"
#classfield = "change"
#predicttype = "Thematic"
#imageList = c("I:/Ethiopia/Change_Detection/Guji/l8_20130127_guji_stack_7band.img","I:/Ethiopia/Change_Detection/Guji/guji_l5tm_feb_1987_mosaic_clipped.img","I:/Ethiopia/Change_Detection/Guji/guji_1987_2014_difference.img")
#thematicimagelist = c("I:/Ethiopia/Change_Detection/Guji/l8_20130127_guji_stack_7band.img")
#outfile = "I:/Ethiopia/Change_Detection/change_test.img"
#
###############################################################################################################################################################
#install and get required packages
install.packages("raster", repos='http://cran.us.r-project.org')
install.packages("rgdal", repos='http://cran.us.r-project.org')
install.packages("tools", repos='http://cran.us.r-project.org')
library(raster)
library(tools)
library(rgdal)
#set working directory
setwd(workspace)
#read in points
points = shapefile(pointsshp)
rm(pointsshp)
gc()
rasterOptions(chunksize = 2e+06)
######################################################################################
#
# create stacklist
#
######################################################################################
stacklist = c()
#read each raster from imageList as stack
print("Stacking Rasters")
if (length(imageList) > 0)
{
for(i in 1:length(imageList))
{
rast = stack(imageList[i])
stacklist = append(stacklist,rast)
rm(rast)
}
}
if (length(thematicimagelist) > 0 & thematicimagelist[1] != "")
{
for(i in 1:length(thematicimagelist))
{
rast = stack(thematicimagelist[i])
stacklist = append(stacklist,rast)
rm(rast)
}
}
print(stacklist)
#stack all the rasters
ourStack = stack(stacklist)
rm(stacklist)
gc()
######################################################################################
#
# extract points
#
######################################################################################
print("Extracting Point Values")
pointvalues = extract(ourStack, points)
pointsDF = as.data.frame(points)
rm(points)
gc()
######################################################################################
#
# get class field
#
######################################################################################
classindex = which(colnames(pointsDF)==classfield)
rm(classfield)
gc()
######################################################################################
#
# Recode the thematic values
#
######################################################################################
legend = ""
emptyvec = c()
if(predicttype=="Thematic")
{
classnames = as.factor(pointsDF[,classindex])
numvec = seq(1,length(levels(classnames)),1)
legend = as.data.frame(cbind(numvec,levels(classnames)))
#loop through and change values in pointsDF
for(i in 1:dim(pointsDF)[1])
{
newvalue = as.numeric(as.character(legend[which(legend[,2]==pointsDF[i,classindex]),][1]))
emptyvec = append(emptyvec,newvalue)
}
colnames(legend) = c("NumValue","TextValue")
rm(newvalue)
rm(numvec)
rm(classnames)
gc()
}
######################################################################################
#
# create modeldataset
#
######################################################################################
print("Creating Model Dataset")
if(predicttype=="Thematic"){
ModelDataset = as.data.frame(cbind(emptyvec, pointvalues))
}else{
ModelDataset = as.data.frame(cbind(pointsDF[,classindex], pointvalues))
}
#if it should be a thematic output, then force "class" field to a factor
if(predicttype=="Thematic")
{
ModelDataset[,1] = as.factor(ModelDataset[,1])
}
rm(emptyvec)
rm(classindex)
rm(pointvalues)
rm(pointsDF)
gc()
######################################################################################
#
# set data type for the rasters
#
######################################################################################
#set continuous rasters to numeric
if (length(imageList)>0)
{
print("Setting Rasters to continuous")
for( i in 1:length(imageList))
{
ImageName = basename(file_path_sans_ext(imageList[i]))
for(j in 1:dim(ModelDataset)[2])
{
columnname = unlist(strsplit(basename(colnames(ModelDataset)[j]),split = "\\."))[1]
if(ImageName == columnname)
{
ModelDataset[,j] = as.numeric(ModelDataset[,j])
print("Changed following column to numeric:")
print(colnames(ModelDataset)[j])
print(is.numeric(ModelDataset[,j]))
}
}
rm(columnname)
rm(ImageName)
gc()
}
}
#set thematic rasters to factor
if(length(thematicimagelist)>0)
{
print("Setting Rasters to factor")
for( i in 1:length(thematicimagelist))
{
ImageName = basename(file_path_sans_ext(thematicimagelist[i]))
for(j in 1:dim(ModelDataset)[2])
{
columnname = basename(colnames(ModelDataset)[j])
if(ImageName == columnname)
{
ModelDataset[,j] = as.factor(ModelDataset[,j])
print("Changed following column to factor:")
print(colnames(ModelDataset)[j])
print(is.factor(ModelDataset[,j]))
}
}
rm(columnname)
rm(ImageName)
gc()
}
}
rm(imageList)
rm(thematicimagelist)
gc()
######################################################################################
colnames(ModelDataset)[1] = "Class"
######################################################################################
#
# create random forest model
#
######################################################################################
print("Creating Random Forest Model")
LM_Model = lm(Class~.,data = ModelDataset)
rm(ModelDataset)
gc()
######################################################################################
#
# output to file varimpplot and confusion matrix
#
######################################################################################
base = basename(file_path_sans_ext(OutputModel))
######################################################################################
#
# create predict raster
#
######################################################################################
time1 = Sys.time()
print("Predicting Raster")
if(predicttype=="Thematic"){
outputrast = predict(ourStack, RF_Model, filename = OutputModel, type='response',progress = "text", datatype = 'INT1U', inf.rm = TRUE)
}else{
outputrast = predict(ourStack, LM_Model, filename = OutputModel, type='response',progress = "text", datatype = 'INT2U', inf.rm = TRUE)
}
time2 = Sys.time()
totaltime = time2 - time1
print(totaltime)
rm(outputrast)
rm(RF_Model)
rm(predicttype)
rm(ourStack)
rm(OutputModel)
rm(time1)
rm(time2)
rm(totaltime)
gc()
|
1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# Load Packages and get the Data
packages <- c("data.table", "reshape2")
sapply(packages, require, character.only=TRUE, quietly=TRUE)
path <- getwd()
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url, file.path(path, "dataFiles.zip"))
unzip(zipfile = "dataFiles.zip")
# Load activity labels + features
activityLabels <- fread(file.path(path, "UCI HAR Dataset/activity_labels.txt")
, col.names = c("classLabels", "activityName"))
features <- fread(file.path(path, "UCI HAR Dataset/features.txt")
, col.names = c("index", "featureNames"))
featuresWanted <- grep("(mean|std)\\(\\)", features[, featureNames])
measurements <- features[featuresWanted, featureNames]
measurements <- gsub('[()]', '', measurements)
# Load train datasets
train <- fread(file.path(path, "UCI HAR Dataset/train/X_train.txt"))[, featuresWanted, with = FALSE]
data.table::setnames(train, colnames(train), measurements)
trainActivities <- fread(file.path(path, "UCI HAR Dataset/train/Y_train.txt")
, col.names = c("Activity"))
trainSubjects <- fread(file.path(path, "UCI HAR Dataset/train/subject_train.txt")
, col.names = c("SubjectNum"))
train <- cbind(trainSubjects, trainActivities, train)
# Load test datasets
test <- fread(file.path(path, "UCI HAR Dataset/test/X_test.txt"))[, featuresWanted, with = FALSE]
data.table::setnames(test, colnames(test), measurements)
testActivities <- fread(file.path(path, "UCI HAR Dataset/test/Y_test.txt")
, col.names = c("Activity"))
testSubjects <- fread(file.path(path, "UCI HAR Dataset/test/subject_test.txt")
, col.names = c("SubjectNum"))
test <- cbind(testSubjects, testActivities, test)
# merge datasets
combined <- rbind(train, test)
# Convert classLabels to activityName basically. More explicit.
combined[["Activity"]] <- factor(combined[, Activity]
, levels = activityLabels[["classLabels"]]
, labels = activityLabels[["activityName"]])
combined[["SubjectNum"]] <- as.factor(combined[, SubjectNum])
combined <- reshape2::melt(data = combined, id = c("SubjectNum", "Activity"))
combined <- reshape2::dcast(data = combined, SubjectNum + Activity ~ variable, fun.aggregate = mean)
data.table::fwrite(x = combined, file = "tidyData.txt", quote = FALSE)
|
/programme.R
|
no_license
|
Mahima-bit/gettingcleanpeer
|
R
| false
| false
| 2,951
|
r
|
1. Merges the training and the test sets to create one data set.
# 2. Extracts only the measurements on the mean and standard deviation for each measurement.
# 3. Uses descriptive activity names to name the activities in the data set
# 4. Appropriately labels the data set with descriptive variable names.
# 5. From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
# Load Packages and get the Data
packages <- c("data.table", "reshape2")
sapply(packages, require, character.only=TRUE, quietly=TRUE)
path <- getwd()
url <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(url, file.path(path, "dataFiles.zip"))
unzip(zipfile = "dataFiles.zip")
# Load activity labels + features
activityLabels <- fread(file.path(path, "UCI HAR Dataset/activity_labels.txt")
, col.names = c("classLabels", "activityName"))
features <- fread(file.path(path, "UCI HAR Dataset/features.txt")
, col.names = c("index", "featureNames"))
featuresWanted <- grep("(mean|std)\\(\\)", features[, featureNames])
measurements <- features[featuresWanted, featureNames]
measurements <- gsub('[()]', '', measurements)
# Load train datasets
train <- fread(file.path(path, "UCI HAR Dataset/train/X_train.txt"))[, featuresWanted, with = FALSE]
data.table::setnames(train, colnames(train), measurements)
trainActivities <- fread(file.path(path, "UCI HAR Dataset/train/Y_train.txt")
, col.names = c("Activity"))
trainSubjects <- fread(file.path(path, "UCI HAR Dataset/train/subject_train.txt")
, col.names = c("SubjectNum"))
train <- cbind(trainSubjects, trainActivities, train)
# Load test datasets
test <- fread(file.path(path, "UCI HAR Dataset/test/X_test.txt"))[, featuresWanted, with = FALSE]
data.table::setnames(test, colnames(test), measurements)
testActivities <- fread(file.path(path, "UCI HAR Dataset/test/Y_test.txt")
, col.names = c("Activity"))
testSubjects <- fread(file.path(path, "UCI HAR Dataset/test/subject_test.txt")
, col.names = c("SubjectNum"))
test <- cbind(testSubjects, testActivities, test)
# merge datasets
combined <- rbind(train, test)
# Convert classLabels to activityName basically. More explicit.
combined[["Activity"]] <- factor(combined[, Activity]
, levels = activityLabels[["classLabels"]]
, labels = activityLabels[["activityName"]])
combined[["SubjectNum"]] <- as.factor(combined[, SubjectNum])
combined <- reshape2::melt(data = combined, id = c("SubjectNum", "Activity"))
combined <- reshape2::dcast(data = combined, SubjectNum + Activity ~ variable, fun.aggregate = mean)
data.table::fwrite(x = combined, file = "tidyData.txt", quote = FALSE)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/methods-vclMatrix.R, R/methods.R
\docType{methods}
\name{block}
\alias{block}
\alias{block,vclMatrix,integer,integer,integer,integer-method}
\alias{block,gpuMatrix,integer,integer,integer,integer-method}
\title{Matrix Blocks}
\usage{
block(object, rowStart, rowEnd, colStart, colEnd)
\S4method{block}{vclMatrix,integer,integer,integer,integer}(object,
rowStart, rowEnd, colStart, colEnd)
\S4method{block}{gpuMatrix,integer,integer,integer,integer}(object,
rowStart, rowEnd, colStart, colEnd)
}
\arguments{
\item{object}{A \code{gpuMatrix} or \code{vclMatrix} object}
\item{rowStart}{An integer indicating the first row of block}
\item{rowEnd}{An integer indicating the last row of block}
\item{colStart}{An integer indicating the first column of block}
\item{colEnd}{An integer indicating the last column of block}
}
\value{
A \code{gpuMatrixBlock} or \code{vclMatrixBlock} object
}
\description{
This doesn't create a copy, it provides a child class that
points to a contiguous submatrix of a \code{\link{gpuMatrix}} or
\code{\link{vclMatrix}}. Non-contiguous blocks are currently not supported.
}
\details{
This function allows a user to create a gpuR matrix object that
references a continuous subset of columns and rows of another gpuR matrix
object without a copy.
NOTE - this means that altering values in a matrix block object will alter
values in the source matrix.
}
\author{
Charles Determan Jr.
}
|
/man/gpuR-block.Rd
|
no_license
|
nnQuynh/gpuR
|
R
| false
| true
| 1,514
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/generics.R, R/methods-vclMatrix.R, R/methods.R
\docType{methods}
\name{block}
\alias{block}
\alias{block,vclMatrix,integer,integer,integer,integer-method}
\alias{block,gpuMatrix,integer,integer,integer,integer-method}
\title{Matrix Blocks}
\usage{
block(object, rowStart, rowEnd, colStart, colEnd)
\S4method{block}{vclMatrix,integer,integer,integer,integer}(object,
rowStart, rowEnd, colStart, colEnd)
\S4method{block}{gpuMatrix,integer,integer,integer,integer}(object,
rowStart, rowEnd, colStart, colEnd)
}
\arguments{
\item{object}{A \code{gpuMatrix} or \code{vclMatrix} object}
\item{rowStart}{An integer indicating the first row of block}
\item{rowEnd}{An integer indicating the last row of block}
\item{colStart}{An integer indicating the first column of block}
\item{colEnd}{An integer indicating the last column of block}
}
\value{
A \code{gpuMatrixBlock} or \code{vclMatrixBlock} object
}
\description{
This doesn't create a copy, it provides a child class that
points to a contiguous submatrix of a \code{\link{gpuMatrix}} or
\code{\link{vclMatrix}}. Non-contiguous blocks are currently not supported.
}
\details{
This function allows a user to create a gpuR matrix object that
references a continuous subset of columns and rows of another gpuR matrix
object without a copy.
NOTE - this means that altering values in a matrix block object will alter
values in the source matrix.
}
\author{
Charles Determan Jr.
}
|
library(solaR)
### Name: C_corrFdKt
### Title: Correlations between the fraction of diffuse irradiation and the
### clearness index.
### Aliases: corrFdKt FdKtPage FdKtLJ FdKtCPR FdKtEKDd FdKtCLIMEDd FdKtEKDh
### FdKtCLIMEDh FdKtBRL
### Keywords: utilities
### ** Examples
Ktd=seq(0, 1, .01)
Monthly=data.frame(Ktd=Ktd)
Monthly$Page=FdKtPage(Ktd)
Monthly$LJ=FdKtLJ(Ktd)
xyplot(Page+LJ~Ktd, data=Monthly,
type=c('l', 'g'), auto.key=list(space='right'))
Ktd=seq(0, 1, .01)
Daily=data.frame(Ktd=Ktd)
Daily$CPR=FdKtCPR(Ktd)
Daily$CLIMEDd=FdKtCLIMEDd(Ktd)
xyplot(CPR+CLIMEDd~Ktd, data=Daily,
type=c('l', 'g'), auto.key=list(space='right'))
|
/data/genthat_extracted_code/solaR/examples/corrFdKt.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 664
|
r
|
library(solaR)
### Name: C_corrFdKt
### Title: Correlations between the fraction of diffuse irradiation and the
### clearness index.
### Aliases: corrFdKt FdKtPage FdKtLJ FdKtCPR FdKtEKDd FdKtCLIMEDd FdKtEKDh
### FdKtCLIMEDh FdKtBRL
### Keywords: utilities
### ** Examples
Ktd=seq(0, 1, .01)
Monthly=data.frame(Ktd=Ktd)
Monthly$Page=FdKtPage(Ktd)
Monthly$LJ=FdKtLJ(Ktd)
xyplot(Page+LJ~Ktd, data=Monthly,
type=c('l', 'g'), auto.key=list(space='right'))
Ktd=seq(0, 1, .01)
Daily=data.frame(Ktd=Ktd)
Daily$CPR=FdKtCPR(Ktd)
Daily$CLIMEDd=FdKtCLIMEDd(Ktd)
xyplot(CPR+CLIMEDd~Ktd, data=Daily,
type=c('l', 'g'), auto.key=list(space='right'))
|
################################# Start of Code =====================================
rm(ls())
getwd()
setwd("G:/Georgia Tech/Analytical Models/Assignments")
#install.packages("data.table")
#install.packages("kernlab")
#install.packages("caret")
#install.packages("e1071")
require(data.table)
require(kernlab)
require(caret)
require(e1071)
######################### Get data & manipulate =================================
cred_data = fread("credit_card_data.csv")
#Changing the name of the response variable
cred_names = colnames(cred_data)
cred_names[11] = "Response"
names(cred_data) = cred_names
#Exploring the data
summary(cred_data)
str(cred_data)
unique(cred_data$V5)
#V1, V5, V6, V8 are binary
#Converting V5 to a binary (1, 0) response as there is no loss of info in doing so
cred_data$V5[cred_data$V5 == "t"] = as.integer(1)
cred_data$V5[cred_data$V5 == "f"] = as.integer(0)
#The binary variables are all integers. Should convert them to factors for a binary
#response.
to_fact = function(data){
for(i in 1:ncol(data)){
if (length(unique(data[[i]])) == 2 & max(data[[i]]) == 1
& min(data[[i]]) == 0){
print(paste("Changing class from", class(data[[i]]), "to factor.", sep = ""))
data[[i]] = as.factor(data[[i]])
}
}
return(data)
}
cred_data = to_fact(cred_data)
for(i in 1:ncol(cred_data)){print(class(cred_data[[i]]))}
################################# KSVM with Cross Vaidation ========================
#Building the model
#Using the default kernel
#model_list = list()
#j = 1
for (i in c(0.1, 0.5, 1, 10, 100, 1000, 10000)){
assign(paste("modelCV", (as.character(i)), sep = ""),
ksvm(Response ~ ., data = cred_data, type='C-svc', cross=10, C=i))
#model_list[j] = paste("modelCV", (as.character(i)), sep = "")
#j = j + 1
#Making a list containing elements as models is a very bad practice.
}
#Calculating the cross validation error
cross(modelCV0.1)
cross(modelCV0.5)
cross(modelCV1)
cross(modelCV10)
cross(modelCV100)
cross(modelCV1000)
cross(modelCV10000)
############################# Cross Validation with caret ==========================
####################################
#Good source for svm with caret
#https://www.r-bloggers.com/the-5th-tribe-support-vector-machines-and-caret/
####################################
#Define the number of folds
#This function sets the control parameters for the train function. We have different
#resampling methods in this like CV or bootstrapping etc. There are many other
#parameters, some related to the resampling and others not, that we could tune.
numFolds = trainControl(method = "cv" , number = 10)
#Define the various complexity parameters
cGrid = expand.grid(C = c(0.1, 0.5, 1, 10, 100, 1000, 10000))
#Sigma is required for radial svm
rGrid = expand.grid(sigma = c(.01, .015, 0.2), C = c(0.1, 0.5, 1, 10, 100, 1000, 10000))
#Running the k folds cross validation on a linear svm
linear = train(Response ~ ., data = cred_data, method = "svmLinear" , trControl = numFolds, tuneGrid = cGrid)
linear
#Running the k folds cV on a radial svm
rad = train(Response ~ ., data = cred_data, method = "svmRadial" , trControl = numFolds, tuneGrid = rGrid)
rad
#Comparing the 3 models using resampling. Resamples checks if the results match
#after resampling
resamps <- resamples(list(Linear = linear, Radial = rad))
summary(resamps)
#According to the resampling, the radial kernel performs much better
#The range for the radial is more and it does have a lower minima
#compared to the linear but the maximas have a much higher value compared to linear.
#Using radial for our analysis
rad
#This also gives C = 0.5 as the best value
|
/HW3_SVM_Cross-validation.R
|
no_license
|
aten2001/Analytical-Models-Assignments
|
R
| false
| false
| 3,890
|
r
|
################################# Start of Code =====================================
rm(ls())
getwd()
setwd("G:/Georgia Tech/Analytical Models/Assignments")
#install.packages("data.table")
#install.packages("kernlab")
#install.packages("caret")
#install.packages("e1071")
require(data.table)
require(kernlab)
require(caret)
require(e1071)
######################### Get data & manipulate =================================
cred_data = fread("credit_card_data.csv")
#Changing the name of the response variable
cred_names = colnames(cred_data)
cred_names[11] = "Response"
names(cred_data) = cred_names
#Exploring the data
summary(cred_data)
str(cred_data)
unique(cred_data$V5)
#V1, V5, V6, V8 are binary
#Converting V5 to a binary (1, 0) response as there is no loss of info in doing so
cred_data$V5[cred_data$V5 == "t"] = as.integer(1)
cred_data$V5[cred_data$V5 == "f"] = as.integer(0)
#The binary variables are all integers. Should convert them to factors for a binary
#response.
to_fact = function(data){
for(i in 1:ncol(data)){
if (length(unique(data[[i]])) == 2 & max(data[[i]]) == 1
& min(data[[i]]) == 0){
print(paste("Changing class from", class(data[[i]]), "to factor.", sep = ""))
data[[i]] = as.factor(data[[i]])
}
}
return(data)
}
cred_data = to_fact(cred_data)
for(i in 1:ncol(cred_data)){print(class(cred_data[[i]]))}
################################# KSVM with Cross Vaidation ========================
#Building the model
#Using the default kernel
#model_list = list()
#j = 1
for (i in c(0.1, 0.5, 1, 10, 100, 1000, 10000)){
assign(paste("modelCV", (as.character(i)), sep = ""),
ksvm(Response ~ ., data = cred_data, type='C-svc', cross=10, C=i))
#model_list[j] = paste("modelCV", (as.character(i)), sep = "")
#j = j + 1
#Making a list containing elements as models is a very bad practice.
}
#Calculating the cross validation error
cross(modelCV0.1)
cross(modelCV0.5)
cross(modelCV1)
cross(modelCV10)
cross(modelCV100)
cross(modelCV1000)
cross(modelCV10000)
############################# Cross Validation with caret ==========================
####################################
#Good source for svm with caret
#https://www.r-bloggers.com/the-5th-tribe-support-vector-machines-and-caret/
####################################
#Define the number of folds
#This function sets the control parameters for the train function. We have different
#resampling methods in this like CV or bootstrapping etc. There are many other
#parameters, some related to the resampling and others not, that we could tune.
numFolds = trainControl(method = "cv" , number = 10)
#Define the various complexity parameters
cGrid = expand.grid(C = c(0.1, 0.5, 1, 10, 100, 1000, 10000))
#Sigma is required for radial svm
rGrid = expand.grid(sigma = c(.01, .015, 0.2), C = c(0.1, 0.5, 1, 10, 100, 1000, 10000))
#Running the k folds cross validation on a linear svm
linear = train(Response ~ ., data = cred_data, method = "svmLinear" , trControl = numFolds, tuneGrid = cGrid)
linear
#Running the k folds cV on a radial svm
rad = train(Response ~ ., data = cred_data, method = "svmRadial" , trControl = numFolds, tuneGrid = rGrid)
rad
#Comparing the 3 models using resampling. Resamples checks if the results match
#after resampling
resamps <- resamples(list(Linear = linear, Radial = rad))
summary(resamps)
#According to the resampling, the radial kernel performs much better
#The range for the radial is more and it does have a lower minima
#compared to the linear but the maximas have a much higher value compared to linear.
#Using radial for our analysis
rad
#This also gives C = 0.5 as the best value
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Test_uSPA.R
\name{Test_uSPA}
\alias{Test_uSPA}
\title{Test uniform Superior Predictive Ability}
\usage{
Test_uSPA(LossDiff, L, B = 999)
}
\arguments{
\item{LossDiff}{the T x H matrix forecast path loss differential}
\item{L}{the parameter for the moving block bootstrap}
\item{B}{integer, the number of bootstrap iterations. Default 999}
}
\value{
A list containing two objects:
\item{"p_value"}{the p-value for uSPA}
\item{"t_uSPA"}{the statistics for uSPA}
}
\description{
Implements the test for uniform Superior Predictive Ability (uSPA) of Quaedvlieg (2021)
}
\examples{
## Test for uSPA
data(LossDiff_uSPA)
Test_uSPA(LossDiff=LossDiff_uSPA, L=3, B=10)
}
\references{
Quaedvlieg, Rogier. "Multi-horizon forecast comparison." Journal of Business & Economic Statistics 39.1 (2021): 40-53.
}
\seealso{
\code{\link{Test_aSPA}}
}
\author{
Luca Barbaglia \url{https://lucabarbaglia.github.io/}
}
|
/man/Test_uSPA.Rd
|
no_license
|
cran/MultiHorizonSPA
|
R
| false
| true
| 976
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Test_uSPA.R
\name{Test_uSPA}
\alias{Test_uSPA}
\title{Test uniform Superior Predictive Ability}
\usage{
Test_uSPA(LossDiff, L, B = 999)
}
\arguments{
\item{LossDiff}{the T x H matrix forecast path loss differential}
\item{L}{the parameter for the moving block bootstrap}
\item{B}{integer, the number of bootstrap iterations. Default 999}
}
\value{
A list containing two objects:
\item{"p_value"}{the p-value for uSPA}
\item{"t_uSPA"}{the statistics for uSPA}
}
\description{
Implements the test for uniform Superior Predictive Ability (uSPA) of Quaedvlieg (2021)
}
\examples{
## Test for uSPA
data(LossDiff_uSPA)
Test_uSPA(LossDiff=LossDiff_uSPA, L=3, B=10)
}
\references{
Quaedvlieg, Rogier. "Multi-horizon forecast comparison." Journal of Business & Economic Statistics 39.1 (2021): 40-53.
}
\seealso{
\code{\link{Test_aSPA}}
}
\author{
Luca Barbaglia \url{https://lucabarbaglia.github.io/}
}
|
library(shiny)
library(shinydashboard)
library(DT)
library(shinyjs)
#
# dashboardPage(
# dashboardHeader(),
# dashboardSidebar(),
# dashboardBody()
# )
tagList(
useShinyjs(),
dashboardPage(
dashboardHeader(title = "Meu Dash"),
dashboardSidebar(
sidebarMenu(
menuItem("Aba 1", tabName = "aba1", icon = icon("star")),
menuItem("Aba 2", tabName = "aba2", icon = icon("tag")),
menuItem("Aba 3", tabName = "aba3"),
menuItem("Aba 4", tabName = "aba4"),
menuItem("Covid19", tabName = "covid")
)
),
dashboardBody(
# includeCSS("www/style.css"),#para incluir as mudanças das cores do dash
#includeCSS("www/estilo2.css"),
tabItems(
tabItem(tabName = "aba1",
fluidRow(
box(title = "Opções"),
box(title = "Resultado", status = "primary")
)
),
tabItem(tabName = "aba2",
fluidRow(
box(title = "Opções", solidHeader = TRUE,
textInput("texto", label = "Texto: "),
numericInput("numero", label = "Número: ", value = 0, min = 0)
),
box(title = "Resultado", status = "primary",
textOutput("saida")
)
)
),
tabItem( tabName= "aba3",
fluidRow(
tabBox(
title = "Primeiro tabBox",
# The id lets us use input$tabset1 on the server to find the current tab
id = "tabset1", height = "250px",
tabPanel("Tab1", "Primeiro conteudo "),
tabPanel("Tab2", "Segundo conteudo")
),
tabBox(
side = "right", height = "250px",
selected = "Tab3",
tabPanel("Tab1", "Primeiro conteudo- tabbox2"),
tabPanel("Tab2", "Segundo conteudo- tabbox2"),
tabPanel("Tab3", "Diferença no side=right, a ordem foi invertida")
)
)
),
tabItem(tabName="aba4",
fluidRow(
column(width = 4,
box(
title = "Titulo1", width = NULL, solidHeader = TRUE, status = "primary",
"Conteudo no box"
),
box(
width = NULL, background = "black",
"A cor, ps: Sem titulo"
)
),
column(width = 4,
box(
title = "Titulo3", width = NULL, solidHeader = TRUE, status = "warning",
"Conteudo"
),
box(
title = "Titulo5", width = NULL, background = "light-blue",
"A cor..."
)
),
column(width = 4,
box(
title = "Titulo2", width = NULL, solidHeader = TRUE,
"conteudo"
),
box(
title = "Titulo6", width = NULL, background = "maroon",
"A cor..."
)
)
)
),
tabItem(tabName="covid",
fluidRow(
tabsetPanel(
type = "tabs",
tabPanel("Resumos", value = "tab1",
fluidPage(
selectInput("SelectRegião", "Filtre aqui:",
c(
unique(informacoesUltimoDia$regiao)
)),
div(id='clickfinalizadosdesert',
valueBoxOutput("vbox1", width = 3)),
valueBoxOutput("vbox2", width = 3),
valueBoxOutput("vbox3", width = 3),
valueBoxOutput("vbox4", width = 3),
valueBoxOutput("vbox5", width = 2),
valueBoxOutput("vbox6", width = 2),
valueBoxOutput("vbox7", width = 2),
valueBoxOutput("vbox8", width = 2),
valueBoxOutput("vbox9", width = 2),
valueBoxOutput("vbox10", width = 2),
box(
plotlyOutput("graficoCasos")),
box(
plotlyOutput("graficoObtos"))
)
),
tabPanel("Tabelas interativas", value = "tab2",
selectInput("Select1", "NomeSelect1:",
c("Todos" = "todos",
"Result Brasil"="Brasil",
"Centro-Oeste"="Centro-Oeste",
"Nordeste"="Nordeste",
"Norte","Norte",
"Sudeste","Sudeste" ,
"Sul"="Sul")),
DT::dataTableOutput("TabelaCovid")
),
tabPanel("Caixas interativas", value = "tab2",
div(id='primeiroClick',
valueBox( width = 4,
"Clique aqui",
"Eu disse pra clicar..."
)
),
box(width = 12,
DT::dataTableOutput("TabelaInterativaCovid"),
actionButton("TabelaInterativaCovid_rows_selected", "Show")
)
))
)
)
)
)
)
)
|
/ui.R
|
no_license
|
Julia-Nascimento/ShinyDashboard
|
R
| false
| false
| 8,059
|
r
|
library(shiny)
library(shinydashboard)
library(DT)
library(shinyjs)
#
# dashboardPage(
# dashboardHeader(),
# dashboardSidebar(),
# dashboardBody()
# )
tagList(
useShinyjs(),
dashboardPage(
dashboardHeader(title = "Meu Dash"),
dashboardSidebar(
sidebarMenu(
menuItem("Aba 1", tabName = "aba1", icon = icon("star")),
menuItem("Aba 2", tabName = "aba2", icon = icon("tag")),
menuItem("Aba 3", tabName = "aba3"),
menuItem("Aba 4", tabName = "aba4"),
menuItem("Covid19", tabName = "covid")
)
),
dashboardBody(
# includeCSS("www/style.css"),#para incluir as mudanças das cores do dash
#includeCSS("www/estilo2.css"),
tabItems(
tabItem(tabName = "aba1",
fluidRow(
box(title = "Opções"),
box(title = "Resultado", status = "primary")
)
),
tabItem(tabName = "aba2",
fluidRow(
box(title = "Opções", solidHeader = TRUE,
textInput("texto", label = "Texto: "),
numericInput("numero", label = "Número: ", value = 0, min = 0)
),
box(title = "Resultado", status = "primary",
textOutput("saida")
)
)
),
tabItem( tabName= "aba3",
fluidRow(
tabBox(
title = "Primeiro tabBox",
# The id lets us use input$tabset1 on the server to find the current tab
id = "tabset1", height = "250px",
tabPanel("Tab1", "Primeiro conteudo "),
tabPanel("Tab2", "Segundo conteudo")
),
tabBox(
side = "right", height = "250px",
selected = "Tab3",
tabPanel("Tab1", "Primeiro conteudo- tabbox2"),
tabPanel("Tab2", "Segundo conteudo- tabbox2"),
tabPanel("Tab3", "Diferença no side=right, a ordem foi invertida")
)
)
),
tabItem(tabName="aba4",
fluidRow(
column(width = 4,
box(
title = "Titulo1", width = NULL, solidHeader = TRUE, status = "primary",
"Conteudo no box"
),
box(
width = NULL, background = "black",
"A cor, ps: Sem titulo"
)
),
column(width = 4,
box(
title = "Titulo3", width = NULL, solidHeader = TRUE, status = "warning",
"Conteudo"
),
box(
title = "Titulo5", width = NULL, background = "light-blue",
"A cor..."
)
),
column(width = 4,
box(
title = "Titulo2", width = NULL, solidHeader = TRUE,
"conteudo"
),
box(
title = "Titulo6", width = NULL, background = "maroon",
"A cor..."
)
)
)
),
tabItem(tabName="covid",
fluidRow(
tabsetPanel(
type = "tabs",
tabPanel("Resumos", value = "tab1",
fluidPage(
selectInput("SelectRegião", "Filtre aqui:",
c(
unique(informacoesUltimoDia$regiao)
)),
div(id='clickfinalizadosdesert',
valueBoxOutput("vbox1", width = 3)),
valueBoxOutput("vbox2", width = 3),
valueBoxOutput("vbox3", width = 3),
valueBoxOutput("vbox4", width = 3),
valueBoxOutput("vbox5", width = 2),
valueBoxOutput("vbox6", width = 2),
valueBoxOutput("vbox7", width = 2),
valueBoxOutput("vbox8", width = 2),
valueBoxOutput("vbox9", width = 2),
valueBoxOutput("vbox10", width = 2),
box(
plotlyOutput("graficoCasos")),
box(
plotlyOutput("graficoObtos"))
)
),
tabPanel("Tabelas interativas", value = "tab2",
selectInput("Select1", "NomeSelect1:",
c("Todos" = "todos",
"Result Brasil"="Brasil",
"Centro-Oeste"="Centro-Oeste",
"Nordeste"="Nordeste",
"Norte","Norte",
"Sudeste","Sudeste" ,
"Sul"="Sul")),
DT::dataTableOutput("TabelaCovid")
),
tabPanel("Caixas interativas", value = "tab2",
div(id='primeiroClick',
valueBox( width = 4,
"Clique aqui",
"Eu disse pra clicar..."
)
),
box(width = 12,
DT::dataTableOutput("TabelaInterativaCovid"),
actionButton("TabelaInterativaCovid_rows_selected", "Show")
)
))
)
)
)
)
)
)
|
heckitTfit <- function(selection, outcome,
data=sys.frame(sys.parent()),
ys=FALSE, yo=FALSE,
xs=FALSE, xo=FALSE,
mfs=FALSE, mfo=FALSE,
print.level=0,
maxMethod="Newton-Raphson", ... ) {
## 2-step all-normal treatment effect estimator
## not public API
##
## maxMethod: probit method
##
## Do a few sanity checks...
if( class( selection ) != "formula" ) {
stop( "argument 'selection' must be a formula" )
}
if( length( selection ) != 3 ) {
stop( "argument 'selection' must be a 2-sided formula" )
}
thisCall <- match.call()
## extract selection frame
mf <- match.call(expand.dots = FALSE)
m <- match(c("selection", "data", "subset"), names(mf), 0)
mfS <- mf[c(1, m)]
mfS$drop.unused.levels <- TRUE
mfS$na.action <- na.pass
mfS[[1]] <- as.name("model.frame")
names(mfS)[2] <- "formula"
# model.frame requires the parameter to
# be 'formula'
mfS <- eval(mfS, parent.frame())
mtS <- attr(mfS, "terms")
XS <- model.matrix(mtS, mfS)
YS <- model.response( mfS )
YSLevels <- levels( as.factor( YS ) )
if( length( YSLevels ) != 2 ) {
stop( "the dependent variable of 'selection' has to contain",
" exactly two levels (e.g. FALSE and TRUE)" )
}
ysNames <- names( YS )
YS <- as.integer(YS == YSLevels[ 2 ])
# selection kept as integer internally
names( YS ) <- ysNames
## check for NA-s. Because we have to find NA-s in several frames, we cannot use the standard 'na.'
## functions here. Find bad rows and remove them later.
badRow <- !complete.cases(YS, XS)
badRow <- badRow | is.infinite(YS)
badRow <- badRow | apply(XS, 1, function(v) any(is.infinite(v)))
if("formula" %in% class( outcome)) {
if( length( outcome ) != 3 ) {
stop( "argument 'outcome1' must be a 2-sided formula" )
}
m <- match(c("outcome", "data", "subset"), names(mf), 0)
mfO <- mf[c(1, m)]
mfO$drop.unused.levels <- TRUE
mfO$na.action <- na.pass
mfO[[1]] <- as.name("model.frame")
names(mfO)[2] <- "formula"
mfO <- eval(mfO, parent.frame())
mtO <- attr(mfO, "terms")
XO <- model.matrix(mtO, mfO)
YO <- model.response(mfO, "numeric")
badRow <- badRow | !complete.cases(YO, XO)
badRow <- badRow | is.infinite(YO)
badRow <- badRow | apply(XO, 1, function(v) any(is.infinite(v)))
}
else
stop("argument 'outcome' must be a formula")
NXS <- ncol(XS)
NXO <- ncol(XO)
# Remove rows w/NA-s
XS <- XS[!badRow,,drop=FALSE]
YS <- YS[!badRow]
XO <- XO[!badRow,,drop=FALSE]
YO <- YO[!badRow]
nObs <- length(YS)
# few pre-calculations: split according to selection
i0 <- YS == 0
i1 <- YS == 1
N0 <- sum(i0)
N1 <- sum(i1)
## and run the model: selection
probitResult <- probit(YS ~ XS - 1, maxMethod = maxMethod )
if( print.level > 1) {
cat("The probit part of the model:\n")
print(summary(probitResult))
}
gamma <- coef(probitResult)
##
z <- XS %*% gamma
## outcome
invMillsRatio0 <- lambda(-z[i0])
invMillsRatio1 <- lambda(z[i1])
XO <- cbind(XO, .invMillsRatio=0)
XO[i0,".invMillsRatio"] <- -invMillsRatio0
XO[i1,".invMillsRatio"] <- invMillsRatio1
## if(checkIMRcollinearity(XO)) {
## warning("Inverse Mills Ratio is virtually multicollinear to the rest of explanatory variables in the outcome equation")
## }
olm <- lm(YO ~ -1 + XO)
# XO includes the constant (probably)
if(print.level > 1) {
cat("Raw outcome equation\n")
print(summary(olm))
}
intercept <- any(apply(model.matrix(olm), 2,
function(v) (v[1] > 0) & (all(v == v[1]))))
# we have determine whether the outcome model has intercept.
# This is necessary later for calculating
# R^2
delta0 <- mean( invMillsRatio0^2 - z[i0]*invMillsRatio0)
delta1 <- mean( invMillsRatio1^2 + z[i1]*invMillsRatio1)
betaL <- coef(olm)["XO.invMillsRatio"]
sigma0.2 <- mean((residuals(olm)[i0])^2)*nObs/(nObs - NXS)
sigma1.2 <- mean((residuals(olm)[i1])^2)*nObs/(nObs - NXS)
# residual variance: differs for
# treated/non-treated
if(print.level > 2) {
s2 <- 1
rho <- 0.8
th0.2 <- s2 + rho^2*s2*mean(z[i0]*invMillsRatio0) -
rho^2*s2*mean(invMillsRatio0^2)
th1.2 <- s2 - rho^2*s2*mean(z[i1]*invMillsRatio1) -
rho^2*s2*mean(invMillsRatio1^2)
a <- rbind(sd=c("non-participants"=sqrt(sigma0.2),
"participants"=sqrt(sigma1.2)),
th=c(sqrt(th0.2), sqrt(th1.2))
)
cat("variances:\n")
print(a)
}
sigma.02 <- sigma0.2 - betaL^2*mean(z[i0]*invMillsRatio0) +
betaL^2*mean(invMillsRatio0^2)
sigma.12 <- sigma1.2 + betaL^2*mean(z[i1]*invMillsRatio1) +
betaL^2*mean(invMillsRatio1^2)
## take a weighted average over participants/non-participants
sigma.2 <-
(sum(i0)*sigma.02 + sum(i1)*sigma.12)/length(i1)
sigma <- sqrt(sigma.2)
rho <- betaL/sigma
## Now pack the results into a parameter vector
## indices in for the parameter vector
iBetaS <- seq(length=NXS)
iBetaO <- seq(tail(iBetaS, 1)+1, length=NXO)
iMills <- tail(iBetaO, 1) + 1
# invMillsRatios are counted as parameter
iSigma <- iMills + 1
iRho <- tail(iSigma, 1) + 1
nParam <- iRho
## Varcovar matrix. Fill only a few parts, rest will remain NA
coefficients <- numeric(nParam)
coefficients[iBetaS] <- coef(probitResult)
names(coefficients)[iBetaS] <- gsub("^XS", "",
names(coef(probitResult)))
coefficients[iBetaO] <- coef(olm)[names(coef(olm)) != "XO.invMillsRatio"]
names(coefficients)[iBetaO] <- gsub("^XO", "",
names(coef(olm))[names(coef(olm)) != "XO.invMillsRatio"])
coefficients[c(iMills, iSigma, iRho)] <-
c(coef(olm)["XO.invMillsRatio"], sigma, rho)
names(coefficients)[c(iMills, iSigma, iRho)] <-
c("invMillsRatio", "sigma", "rho")
vc <- matrix(0, nParam, nParam)
colnames(vc) <- row.names(vc) <- names(coefficients)
vc[] <- NA
if(!is.null(vcov(probitResult)))
vc[iBetaS,iBetaS] <- vcov(probitResult)
## the 'param' component is intended to all kind of technical info
param <- list(index=list(betaS=iBetaS,
betaO=iBetaO,
Mills=iMills, sigma=iSigma, rho=iRho,
errTerms = c(iMills, iSigma, iRho),
outcome = c(iBetaO, iMills) ),
# The location of results in the coef vector
oIntercept1=intercept,
nObs=nObs, nParam=nParam, df=nObs-nParam + 2,
NXS=NXS, NXO=NXO, N0=N0, N1=N1,
levels=YSLevels
# levels[1]: selection 1; levels[2]: selection 2
)
#
result <- list(probit=probitResult,
lm=olm,
rho=rho,
sigma=sigma,
call = thisCall,
termsS=mtS,
termsO=mtO,
ys=switch(as.character(ys), "TRUE"=YS, "FALSE"=NULL),
xs=switch(as.character(xs), "TRUE"=XS, "FALSE"=NULL),
yo=switch(as.character(yo), "TRUE"=YO, "FALSE"=NULL),
xo=switch(as.character(xo), "TRUE"=XO, "FALSE"=NULL),
mfs=switch(as.character(mfs), "TRUE"=list(mfS), "FALSE"=NULL),
mfo=switch(as.character(mfs), "TRUE"=mfO, "FALSE"=NULL),
param=param,
coefficients=coefficients,
vcov=vc
)
result$tobitType <- "treatment"
result$method <- "2step"
class( result ) <- c( "selection", class(result))
return( result )
}
|
/sampleSelection/R/heckitTfit.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 8,172
|
r
|
heckitTfit <- function(selection, outcome,
data=sys.frame(sys.parent()),
ys=FALSE, yo=FALSE,
xs=FALSE, xo=FALSE,
mfs=FALSE, mfo=FALSE,
print.level=0,
maxMethod="Newton-Raphson", ... ) {
## 2-step all-normal treatment effect estimator
## not public API
##
## maxMethod: probit method
##
## Do a few sanity checks...
if( class( selection ) != "formula" ) {
stop( "argument 'selection' must be a formula" )
}
if( length( selection ) != 3 ) {
stop( "argument 'selection' must be a 2-sided formula" )
}
thisCall <- match.call()
## extract selection frame
mf <- match.call(expand.dots = FALSE)
m <- match(c("selection", "data", "subset"), names(mf), 0)
mfS <- mf[c(1, m)]
mfS$drop.unused.levels <- TRUE
mfS$na.action <- na.pass
mfS[[1]] <- as.name("model.frame")
names(mfS)[2] <- "formula"
# model.frame requires the parameter to
# be 'formula'
mfS <- eval(mfS, parent.frame())
mtS <- attr(mfS, "terms")
XS <- model.matrix(mtS, mfS)
YS <- model.response( mfS )
YSLevels <- levels( as.factor( YS ) )
if( length( YSLevels ) != 2 ) {
stop( "the dependent variable of 'selection' has to contain",
" exactly two levels (e.g. FALSE and TRUE)" )
}
ysNames <- names( YS )
YS <- as.integer(YS == YSLevels[ 2 ])
# selection kept as integer internally
names( YS ) <- ysNames
## check for NA-s. Because we have to find NA-s in several frames, we cannot use the standard 'na.'
## functions here. Find bad rows and remove them later.
badRow <- !complete.cases(YS, XS)
badRow <- badRow | is.infinite(YS)
badRow <- badRow | apply(XS, 1, function(v) any(is.infinite(v)))
if("formula" %in% class( outcome)) {
if( length( outcome ) != 3 ) {
stop( "argument 'outcome1' must be a 2-sided formula" )
}
m <- match(c("outcome", "data", "subset"), names(mf), 0)
mfO <- mf[c(1, m)]
mfO$drop.unused.levels <- TRUE
mfO$na.action <- na.pass
mfO[[1]] <- as.name("model.frame")
names(mfO)[2] <- "formula"
mfO <- eval(mfO, parent.frame())
mtO <- attr(mfO, "terms")
XO <- model.matrix(mtO, mfO)
YO <- model.response(mfO, "numeric")
badRow <- badRow | !complete.cases(YO, XO)
badRow <- badRow | is.infinite(YO)
badRow <- badRow | apply(XO, 1, function(v) any(is.infinite(v)))
}
else
stop("argument 'outcome' must be a formula")
NXS <- ncol(XS)
NXO <- ncol(XO)
# Remove rows w/NA-s
XS <- XS[!badRow,,drop=FALSE]
YS <- YS[!badRow]
XO <- XO[!badRow,,drop=FALSE]
YO <- YO[!badRow]
nObs <- length(YS)
# few pre-calculations: split according to selection
i0 <- YS == 0
i1 <- YS == 1
N0 <- sum(i0)
N1 <- sum(i1)
## and run the model: selection
probitResult <- probit(YS ~ XS - 1, maxMethod = maxMethod )
if( print.level > 1) {
cat("The probit part of the model:\n")
print(summary(probitResult))
}
gamma <- coef(probitResult)
##
z <- XS %*% gamma
## outcome
invMillsRatio0 <- lambda(-z[i0])
invMillsRatio1 <- lambda(z[i1])
XO <- cbind(XO, .invMillsRatio=0)
XO[i0,".invMillsRatio"] <- -invMillsRatio0
XO[i1,".invMillsRatio"] <- invMillsRatio1
## if(checkIMRcollinearity(XO)) {
## warning("Inverse Mills Ratio is virtually multicollinear to the rest of explanatory variables in the outcome equation")
## }
olm <- lm(YO ~ -1 + XO)
# XO includes the constant (probably)
if(print.level > 1) {
cat("Raw outcome equation\n")
print(summary(olm))
}
intercept <- any(apply(model.matrix(olm), 2,
function(v) (v[1] > 0) & (all(v == v[1]))))
# we have determine whether the outcome model has intercept.
# This is necessary later for calculating
# R^2
delta0 <- mean( invMillsRatio0^2 - z[i0]*invMillsRatio0)
delta1 <- mean( invMillsRatio1^2 + z[i1]*invMillsRatio1)
betaL <- coef(olm)["XO.invMillsRatio"]
sigma0.2 <- mean((residuals(olm)[i0])^2)*nObs/(nObs - NXS)
sigma1.2 <- mean((residuals(olm)[i1])^2)*nObs/(nObs - NXS)
# residual variance: differs for
# treated/non-treated
if(print.level > 2) {
s2 <- 1
rho <- 0.8
th0.2 <- s2 + rho^2*s2*mean(z[i0]*invMillsRatio0) -
rho^2*s2*mean(invMillsRatio0^2)
th1.2 <- s2 - rho^2*s2*mean(z[i1]*invMillsRatio1) -
rho^2*s2*mean(invMillsRatio1^2)
a <- rbind(sd=c("non-participants"=sqrt(sigma0.2),
"participants"=sqrt(sigma1.2)),
th=c(sqrt(th0.2), sqrt(th1.2))
)
cat("variances:\n")
print(a)
}
sigma.02 <- sigma0.2 - betaL^2*mean(z[i0]*invMillsRatio0) +
betaL^2*mean(invMillsRatio0^2)
sigma.12 <- sigma1.2 + betaL^2*mean(z[i1]*invMillsRatio1) +
betaL^2*mean(invMillsRatio1^2)
## take a weighted average over participants/non-participants
sigma.2 <-
(sum(i0)*sigma.02 + sum(i1)*sigma.12)/length(i1)
sigma <- sqrt(sigma.2)
rho <- betaL/sigma
## Now pack the results into a parameter vector
## indices in for the parameter vector
iBetaS <- seq(length=NXS)
iBetaO <- seq(tail(iBetaS, 1)+1, length=NXO)
iMills <- tail(iBetaO, 1) + 1
# invMillsRatios are counted as parameter
iSigma <- iMills + 1
iRho <- tail(iSigma, 1) + 1
nParam <- iRho
## Varcovar matrix. Fill only a few parts, rest will remain NA
coefficients <- numeric(nParam)
coefficients[iBetaS] <- coef(probitResult)
names(coefficients)[iBetaS] <- gsub("^XS", "",
names(coef(probitResult)))
coefficients[iBetaO] <- coef(olm)[names(coef(olm)) != "XO.invMillsRatio"]
names(coefficients)[iBetaO] <- gsub("^XO", "",
names(coef(olm))[names(coef(olm)) != "XO.invMillsRatio"])
coefficients[c(iMills, iSigma, iRho)] <-
c(coef(olm)["XO.invMillsRatio"], sigma, rho)
names(coefficients)[c(iMills, iSigma, iRho)] <-
c("invMillsRatio", "sigma", "rho")
vc <- matrix(0, nParam, nParam)
colnames(vc) <- row.names(vc) <- names(coefficients)
vc[] <- NA
if(!is.null(vcov(probitResult)))
vc[iBetaS,iBetaS] <- vcov(probitResult)
## the 'param' component is intended to all kind of technical info
param <- list(index=list(betaS=iBetaS,
betaO=iBetaO,
Mills=iMills, sigma=iSigma, rho=iRho,
errTerms = c(iMills, iSigma, iRho),
outcome = c(iBetaO, iMills) ),
# The location of results in the coef vector
oIntercept1=intercept,
nObs=nObs, nParam=nParam, df=nObs-nParam + 2,
NXS=NXS, NXO=NXO, N0=N0, N1=N1,
levels=YSLevels
# levels[1]: selection 1; levels[2]: selection 2
)
#
result <- list(probit=probitResult,
lm=olm,
rho=rho,
sigma=sigma,
call = thisCall,
termsS=mtS,
termsO=mtO,
ys=switch(as.character(ys), "TRUE"=YS, "FALSE"=NULL),
xs=switch(as.character(xs), "TRUE"=XS, "FALSE"=NULL),
yo=switch(as.character(yo), "TRUE"=YO, "FALSE"=NULL),
xo=switch(as.character(xo), "TRUE"=XO, "FALSE"=NULL),
mfs=switch(as.character(mfs), "TRUE"=list(mfS), "FALSE"=NULL),
mfo=switch(as.character(mfs), "TRUE"=mfO, "FALSE"=NULL),
param=param,
coefficients=coefficients,
vcov=vc
)
result$tobitType <- "treatment"
result$method <- "2step"
class( result ) <- c( "selection", class(result))
return( result )
}
|
\name{multhist}
\alias{multhist}
\title{Plot a multiple histogram, as a barplot}
\description{
Given a list, plots a side-by-side barplot containing the histograms
of the elements
}
\usage{
multhist(x,beside=TRUE,freq=NULL,probability=!freq,plot.it=TRUE,...)
}
\arguments{
\item{x}{a list of numeric vectors}
\item{beside}{plot histogram bars for groups side-by-side?}
\item{freq}{logical; if 'TRUE', the histogram graphic is a representation
of frequencies, the 'counts' component of the result; if
'FALSE', probability densities, component 'density', are
plotted (so that the histogram has a total area of one).
Defaults to 'TRUE' if 'probability' is not specified
(does not consider equidistant breaks as in \link{hist})}
\item{probability}{an alias for '!freq', for S compatibility}
\item{plot.it}{Whether or not to display the histogram.}
\item{...}{additional arguments to \link{hist} or \link{barplot}}
}
\value{
A list including the return value for the first call to \samp{hist} (itself a list)
and the values for the bar heights.
}
\author{Ben Bolker}
\seealso{\link{hist},\link{barplot}}
\note{
The 'inside' argument to \link{barplot} (which is not currently
implemented in barplot anyway) is deleted from the argument list. The
default value of NULL for \samp{freq} is for consistency with \samp{hist}
but is equivalent to TRUE.
}
\examples{
l <- list(runif(10)*10,1:10,c(1,1,1,1,4,8))
multhist(l)
}
\keyword{hplot}
|
/plotrix/man/multhist.Rd
|
no_license
|
edenduthie/palsR
|
R
| false
| false
| 1,513
|
rd
|
\name{multhist}
\alias{multhist}
\title{Plot a multiple histogram, as a barplot}
\description{
Given a list, plots a side-by-side barplot containing the histograms
of the elements
}
\usage{
multhist(x,beside=TRUE,freq=NULL,probability=!freq,plot.it=TRUE,...)
}
\arguments{
\item{x}{a list of numeric vectors}
\item{beside}{plot histogram bars for groups side-by-side?}
\item{freq}{logical; if 'TRUE', the histogram graphic is a representation
of frequencies, the 'counts' component of the result; if
'FALSE', probability densities, component 'density', are
plotted (so that the histogram has a total area of one).
Defaults to 'TRUE' if 'probability' is not specified
(does not consider equidistant breaks as in \link{hist})}
\item{probability}{an alias for '!freq', for S compatibility}
\item{plot.it}{Whether or not to display the histogram.}
\item{...}{additional arguments to \link{hist} or \link{barplot}}
}
\value{
A list including the return value for the first call to \samp{hist} (itself a list)
and the values for the bar heights.
}
\author{Ben Bolker}
\seealso{\link{hist},\link{barplot}}
\note{
The 'inside' argument to \link{barplot} (which is not currently
implemented in barplot anyway) is deleted from the argument list. The
default value of NULL for \samp{freq} is for consistency with \samp{hist}
but is equivalent to TRUE.
}
\examples{
l <- list(runif(10)*10,1:10,c(1,1,1,1,4,8))
multhist(l)
}
\keyword{hplot}
|
#' (multi-core) Kriging
#'
#' This function statistically downscales input data using covariate data and the kriging methodology. The function can be run in two ways:
#' \enumerate{
#' \item \strong{By Itself}: Use the arguments Data, Covariates_coarse, Covariates_fine when you already have raster files for your data which is to be downscaled as well as covariate raster data.
#' \item \strong{From Scratch}: Use the arguments Variable, Type, DataSet, DateStart, DateStop, TResolution, TStep, Extent, Dir, FileName, API_Key, API_User, and arget_res. By doing so, krigR will call the functions download_ERA() and download_DEM() for one coherent kriging workflow. Note that this process does not work when targetting UERRA data.
#' }
#' Use optional arguments such as Dir, FileName, Keep_Temporary, SingularTry, KrigingEquation and Cores for ease of use, substitution of non-GMTED2010 covariates, and parallel processing.
#'
#' @param Data Raster file which is to be downscaled.
#' @param Covariates_coarse Raster file containing covariates at training resolution.
#' @param Covariates_fine Raster file containing covariates at target resolution.
#' @param KrigingEquation Formula or character string specifying which covariates to use and how. Layer names in Covariates_coarse and Covariates_fine need to match Parameters in this formula. Needs to start with "X ~ ". X can read anything you like.
#' @param Dir Optional. Directory specifying where to place final kriged product. Default is current working directory.
#' @param FileName Optional. A file name for the netcdf produced. Default is a combination parameters in the function call.
#' @param Keep_Temporary Logical, whether to delete individual kriging products of layers in Data after processing. Default is TRUE.
#' @param Cores Numeric. How many cores to use. If you want output to your console during the process, use Cores == 1. Parallel processing is carried out when Cores is bigger than 1. Default is detecting all cores of your machine.
#' @param SingularTry Numeric. How often to try kriging of each layer of the input. This usually gets around issues of singular covariance matrices in the kriging process, but takes some time. Default is 10
#' @param Variable Optional, calls download_ERA(). ERA5(Land)-contained climate variable.
#' @param PrecipFix Optional. Era5(-land) total precipitation is recorded in cumulative steps per hour from the 00:00 time mark per day. Setting PrecipFix to TRUE converts these into records which represent the total precipitation per hour. Monthly records in Era5(-land) express the average daily total precipitation. Setting this argument to TRUE multiplies monthly records by the number of days per the respective month(s) to get to total precipitation records instead of average. Default is FALSE.
#' @param Type Optional. Whether to download reanalysis ('reanalysis') or ensemble ('ensemble_members', 'ensemble_mean', or 'ensemble_spread') data. Passed on to download_ERA.
#' @param DataSet Optional. Which ERA5 data set to download data from. 'era5' or 'era5-land'. Passed on to download_ERA.
#' @param DateStart Optional. Date ('YYYY-MM-DD') at which to start time series of downloaded data. Passed on to download_ERA.
#' @param DateStop Optional. Date ('YYYY-MM-DD') at which to stop time series of downloaded data. Passed on to download_ERA.
#' @param TResolution Optional. Temporal resolution of final product. hour', 'day', 'month'. Passed on to download_ERA.
#' @param TStep Optional. Which time steps (numeric) to consider for temporal resolution. Passed on to download_ERA.
#' @param FUN Optional. A raster calculation argument as passed to `raster::stackApply()`. This controls what kind of data to obtain for temporal aggregates of reanalysis data. Specify 'mean' (default) for mean values, 'min' for minimum values, and 'max' for maximum values, among others.
#' @param Extent Optional, download data according to rectangular bounding box. specify as extent() object or as a raster, a SpatialPolygonsDataFrame object, or a data.frame object. If Extent is a SpatialPolygonsDataFrame, this will be treated as a shapefile and the output will be cropped and masked to this shapefile. If Extent is a data.frame of geo-referenced point records, it needs to contain Lat and Lon columns as well as a non-repeating ID-column. Passed on to download_ERA and download_DEM.
#' @param Buffer Optional. Identifies how big a rectangular buffer to draw around points if Extent is a data frame of points. Buffer is expressed as centessimal degrees. Passed on to download_ERA and download_DEM.
#' @param ID Optional. Identifies which column in Extent to use for creation of individual buffers if Extent is a data.frame. Passed on to download_ERA and download_DEM.
#' @param Target_res Optional. The target resolution for the kriging step (i.e. which resolution to downscale to). An object as specified/produced by raster::res(). Passed on to download_DEM.
#' @param Source Optional, character. Whether to attempt download from the official USGS data viewer (Source = "USGS") or a static copy of the data set on a private drive (Source = "Drive"). Default is "USGS". Use this if the USGS viewer is unavailable. Passed on to download_DEM.
#' @param API_Key Optional. ECMWF cds API key. Passed on to download_ERA.
#' @param API_User Optional. ECMWF cds user number. Passed on to download_ERA.
#' @param nmax Optional. Controls local kriging. Number of nearest observations to be used kriging of each observation. Default is to use all available (Inf). You can specify as a number (numeric).
#' @param TryDown Optional, numeric. How often to attempt the download of each individual file (if querying data download) that the function queries from the server. This is to circumvent having to restart the entire function when encountering connectivity issues.
#' @param verbose Optional, logical. Whether to report progress of data download (if queried) in the console or not.
#' @param TimeOut Numeric. The timeout for each download in seconds. Default 36000 seconds (10 hours).
#' @param SingularDL Logical. Whether to force download of data in one call to CDS or automatically break download requests into individual monthly downloads. Default is FALSE.
#' @return A list object containing the downscaled data as well as the standard error for downscaling as well as the call to the krigR function, and two NETCDF (.nc) file in the specified directory which are the two data contents of the aforementioned list. A temporary directory is populated with individual NETCDF (.nc) files throughout the runtime of krigR which is deleted upon completion if Keep_Temporary = TRUE and all layers in the Data raster object were kriged successfully.
#' @examples
#' \dontrun{
#' ## THREE-STEP PROCESS (By Itself)
#' # Downloading ERA5-Land air temperature reanalysis data in 12-hour intervals for 02/01/1995 - 04/01/1995 (DD/MM/YYYY). API User and Key in this example are non-functional. Substitute with your user number and key to run this example.
#' Extent <- extent(c(11.8,15.1,50.1,51.7)) # roughly the extent of Saxony
#' API_User <- "..."
#' API_Key <- "..."
#' State_Raw <- download_ERA(
#' Variable = "2m_temperature",
#' DataSet = "era5-land",
#' DateStart = "1995-01-02",
#' DateStop = "1995-01-04",
#' TResolution = "hour",
#' TStep = 12,
#' Extent = Extent,
#' API_User = API_User,
#' API_Key = API_Key
#' )
#' State_Raw # a raster brick with 6 layers at resolution of ~0.1°
#' # Downloading GMTED2010-data at resolution and extent obtained by a call to download_ERA and a target resolution of .02.
#' Covs_ls <- download_DEM(
#' Train_ras = State_Raw,
#' Target_res = .02,
#' Keep_Temporary = TRUE
#' )
#' Covs_ls # a list with two elements: (1) GMTED 2010 data at training resolution, and (2) GMTED 2010 data aggregated as close as possible to a resolution of 0.02
#' # Kriging the data sets prepared with the previous functions.
#' State_Krig <- krigR(
#' Data = State_Raw, # data we want to krig as a raster object
#' Covariates_coarse = Covs_ls[[1]], # training covariate as a raster object
#' Covariates_fine = Covs_ls[[2]], # target covariate as a raster object
#' )
#'
#' ## PIPELINE (From Scratch)
#' #' # Downloading ERA5-Land air temperature reanalysis data in 12-hour intervals for 02/01/1995 - 04/01/1995 (DD/MM/YYYY), downloading and preparing GMTED 2010 covariate data, and kriging. API User and Key in this example are non-functional. Substitute with your user number and key to run this example. This example produces the same output as the example above.
#' Extent <- extent(c(11.8,15.1,50.1,51.7)) # roughly the extent of Saxony
#' API_User <- "..."
#' API_Key <- "..."
#' Pipe_Krig <- krigR(
#' Variable = "2m_temperature",
#' Type = "reanalysis",
#' DataSet = "era5-land",
#' DateStart = "1995-01-02",
#' DateStop = "1995-01-04",
#' TResolution = "hour",#
#' TStep = 12,
#' Extent = Extent,
#' API_User = API_User,
#' API_Key = API_Key,
#' Target_res = .02,
#' )
#' }
#'
#' @export
krigR <- function(Data = NULL, Covariates_coarse = NULL, Covariates_fine = NULL, KrigingEquation = "ERA ~ DEM", Cores = detectCores(), Dir = getwd(), FileName, Keep_Temporary = TRUE, SingularTry = 10, Variable, PrecipFix = FALSE, Type = "reanalysis", DataSet = "era5-land", DateStart, DateStop, TResolution = "month", TStep = 1, FUN = 'mean', Extent, Buffer = 0.5, ID = "ID", API_Key, API_User, Target_res, Source = "USGS", nmax = Inf, TryDown = 10, verbose = TRUE, TimeOut = 36000, SingularDL = FALSE, ...){
## CALL LIST (for storing how the function as called in the output) ----
if(is.null(Data)){
Data_Retrieval <- list(Variable = Variable,
Type = Type,
PrecipFix = PrecipFix,
DataSet = DataSet,
DateStart = DateStart,
DateStop = DateStop,
TResolution = TResolution,
TStep = TStep,
Extent = Extent)
}else{
Data_Retrieval <- "None needed. Data was not queried via krigR function, but supplied by user."
}
## CLIMATE DATA (call to download_ERA function if no Data set is specified) ----
if(is.null(Data)){ # data check: if no data has been specified
Data <- download_ERA(Variable = Variable, PrecipFix = PrecipFix, Type = Type, DataSet = DataSet, DateStart = DateStart, DateStop = DateStop, TResolution = TResolution, TStep = TStep, FUN = FUN, Extent = Extent, API_User = API_User, API_Key = API_Key, Dir = Dir, TryDown = TryDown, verbose = verbose, ID = ID, Cores = Cores, TimeOut = TimeOut, SingularDL = SingularDL)
} # end of data check
## COVARIATE DATA (call to download_DEM function when no covariates are specified) ----
if(is.null(Covariates_coarse) & is.null(Covariates_fine)){ # covariate check: if no covariates have been specified
if(class(Extent) == "SpatialPolygonsDataFrame" | class(Extent) == "data.frame"){ # Extent check: if Extent input is a shapefile
Shape <- Extent # save shapefile for use as Shape in masking covariate data
}else{ # if Extent is not a shape, then extent specification is already baked into Data
Shape <- NULL # set Shape to NULL so it is ignored in download_DEM function when masking is applied
} # end of Extent check
Covs_ls <- download_DEM(Train_ras = Data, Target_res = Target_res, Shape = Shape, Buffer = Buffer, ID = ID, Keep_Temporary = Keep_Temporary, Dir = Dir)
Covariates_coarse <- Covs_ls[[1]] # extract coarse covariates from download_DEM output
Covariates_fine <- Covs_ls[[2]] # extract fine covariates from download_DEM output
} # end of covariate check
## KRIGING FORMULA (assure that KrigingEquation is a formula object) ----
KrigingEquation <- as.formula(KrigingEquation)
## CALL LIST (for storing how the function as called in the output) ----
Call_ls <- list(Data = SummarizeRaster(Data),
Covariates_coarse = SummarizeRaster(Covariates_coarse),
Covariates_fine = SummarizeRaster(Covariates_fine),
KrigingEquation = KrigingEquation,
Cores = Cores,
FileName = FileName,
Keep_Temporary = Keep_Temporary,
nmax = nmax,
Data_Retrieval = Data_Retrieval,
misc = ...)
## SANITY CHECKS (step into check_Krig function to catch most common error messages) ----
Check_Product <- check_Krig(Data = Data, CovariatesCoarse = Covariates_coarse, CovariatesFine = Covariates_fine, KrigingEquation = KrigingEquation)
KrigingEquation <- Check_Product[[1]] # extract KrigingEquation (this may have changed in check_Krig)
DataSkips <- Check_Product[[2]] # extract which layers to skip due to missing data (this is unlikely to ever come into action)
Terms <- unique(unlist(strsplit(labels(terms(KrigingEquation)), split = ":"))) # identify which layers of data are needed
## DATA REFORMATTING (Kriging requires spatially referenced data frames, reformatting from rasters happens here) ---
Origin <- raster::as.data.frame(Covariates_coarse, xy = TRUE) # extract covariate layers
Origin <- Origin[, c(1:2, which(colnames(Origin) %in% Terms))] # retain only columns containing terms
Target <- raster::as.data.frame(Covariates_fine, xy = TRUE) # extract covariate layers
Target <- Target[, c(1:2, which(colnames(Target) %in% Terms))] # retain only columns containing terms
Target <- na.omit(Target)
suppressWarnings(gridded(Target) <- ~x+y) # establish a gridded data product ready for use in kriging
Target@grid@cellsize[1] <- Target@grid@cellsize[2] # ensure that grid cells are square
## SET-UP TEMPORARY DIRECTORY (this is where kriged products of each layer will be saved) ----
Dir.Temp <- file.path(Dir, paste("Kriging", FileName, sep="_"))
if(!dir.exists(Dir.Temp)){dir.create(Dir.Temp)}
## KRIGING SPECIFICATION (this will be parsed and evaluated in parallel and non-parallel evaluations further down) ----
looptext <- "
OriginK <- cbind(Origin, raster::extract(x = Data[[Iter_Krige]], y = Origin[,1:2], df=TRUE)[, 2]) # combine data of current data layer with training covariate data
OriginK <- na.omit(OriginK) # get rid of NA cells
colnames(OriginK)[length(Terms)+3] <- c(terms(KrigingEquation)[[2]]) # assign column names
suppressWarnings(gridded(OriginK) <- ~x+y) # generate gridded product
OriginK@grid@cellsize[1] <- OriginK@grid@cellsize[2] # ensure that grid cells are square
Iter_Try = 0 # number of tries set to 0
kriging_result <- NULL
while(class(kriging_result)[1] != 'autoKrige' & Iter_Try < SingularTry){ # try kriging SingularTry times, this is because of a random process of variogram identification within the automap package that can fail on smaller datasets randomly when it isn't supposed to
try(invisible(capture.output(kriging_result <- autoKrige(formula = KrigingEquation, input_data = OriginK, new_data = Target, nmax = nmax))), silent = TRUE)
Iter_Try <- Iter_Try +1
}
if(class(kriging_result)[1] != 'autoKrige'){ # give error if kriging fails
message(paste0('Kriging failed for layer ', Iter_Krige, '. Error message produced by autoKrige function: ', geterrmessage()))
}
## retransform to raster
try( # try fastest way - this fails with certain edge artefacts in meractor projection and is fixed by using rasterize
Krig_ras <- raster(x = kriging_result$krige_output, layer = 1), # extract raster from kriging product
silent = TRUE
)
try(
Var_ras <- raster(x = kriging_result$krige_output, layer = 3), # extract raster from kriging product
silent = TRUE
)
if(!exists('Krig_ras') & !exists('Var_ras')){
Krig_ras <- rasterize(x = kriging_result$krige_output, y = Covariates_fine[[1]])[[2]] # extract raster from kriging product
Var_ras <- rasterize(x = kriging_result$krige_output, y = Covariates_fine)[[4]] # extract raster from kriging product
}
crs(Krig_ras) <- crs(Data) # setting the crs according to the data
crs(Var_ras) <- crs(Data) # setting the crs according to the data
if(Cores == 1){
Ras_Krig[[Iter_Krige]] <- Krig_ras
Ras_Var[[Iter_Krige]] <- Var_ras
} # stack kriged raster into raster list if non-parallel computing
writeRaster(x = Krig_ras, filename = file.path(Dir.Temp, paste0(str_pad(Iter_Krige,4,'left','0'), '_data.nc')), overwrite = TRUE, format='CDF') # save kriged raster to temporary directory
writeRaster(x = Var_ras, filename = file.path(Dir.Temp, paste0(str_pad(Iter_Krige,4,'left','0'), '_SE.nc')), overwrite = TRUE, format='CDF') # save kriged raster to temporary directory
if(Cores == 1){ # core check: if processing non-parallel
if(Count_Krige == 1){ # count check: if this was the first actual computation
T_End <- Sys.time() # record time at which kriging was done for current layer
Duration <- as.numeric(T_End)-as.numeric(T_Begin) # calculate how long it took to krig on layer
message(paste('Kriging of remaining ', nlayers(Data)-Iter_Krige, ' data layers should finish around: ', as.POSIXlt(T_Begin + Duration*nlayers(Data), tz = Sys.timezone(location=TRUE)), sep='')) # console output with estimate of when the kriging should be done
ProgBar <- txtProgressBar(min = 0, max = nlayers(Data), style = 3) # create progress bar when non-parallel processing
Count_Krige <- Count_Krige + 1 # raise count by one so the stimator isn't called again
} # end of count check
setTxtProgressBar(ProgBar, Iter_Krige) # update progress bar with number of current layer
} # end of core check
"
## KRIGING PREPARATION (establishing objects which the kriging refers to) ----
Ras_Krig <- as.list(rep(NA, nlayers(Data))) # establish an empty list which will be filled with kriged layers
Ras_Var <- as.list(rep(NA, nlayers(Data))) # establish an empty list which will be filled with kriged layers
if(verbose){message("Commencing Kriging")}
## DATA SKIPS (if certain layers in the data are empty and need to be skipped, this is handled here) ---
if(!is.null(DataSkips)){ # Skip check: if layers need to be skipped
for(Iter_Skip in DataSkips){ # Skip loop: loop over all layers that need to be skipped
Ras_Krig[[Iter_Skip]] <- Data[[Iter_Skip]] # add raw data (which should be empty) to list
writeRaster(x = Ras_Krig[[Iter_Skip]], filename = file.path(Dir.Temp, str_pad(Iter_Skip,4,'left','0')), overwrite = TRUE, format = 'CDF') # save raw layer to temporary directory, needed for loading back in when parallel processing
} # end of Skip loop
Layers_vec <- 1:nlayers(Data) # identify vector of all layers in data
Compute_Layers <- Layers_vec[which(!Layers_vec %in% DataSkips)] # identify which layers can actually be computed on
}else{ # if we don't need to skip any layers
Compute_Layers <- 1:nlayers(Data) # set computing layers to all layers in data
} # end of Skip check
## ACTUAL KRIGING (carry out kriging according to user specifications either in parallel or on a single core) ----
if(Cores > 1){ # Cores check: if parallel processing has been specified
### PARALLEL KRIGING ---
ForeachObjects <- c("Dir.Temp", "Cores", "Data", "KrigingEquation", "Origin", "Target", "Covariates_coarse", "Covariates_fine", "Terms", "SingularTry", "nmax") # objects which are needed for each kriging run and are thus handed to each cluster unit
cl <- makeCluster(Cores) # Assuming Cores node cluster
registerDoParallel(cl) # registering cores
foreach(Iter_Krige = Compute_Layers, # kriging loop over all layers in Data, with condition (%:% when(...)) to only run if current layer is not present in Dir.Temp yet
.packages = c("raster", "stringr", "automap", "ncdf4", "rgdal"), # import packages necessary to each itteration
.export = ForeachObjects) %:% when(!paste0(str_pad(Iter_Krige,4,"left","0"), '_data.nc') %in% list.files(Dir.Temp)) %dopar% { # parallel kriging loop
Ras_Krig <- eval(parse(text=looptext)) # evaluate the kriging specification per cluster unit per layer
} # end of parallel kriging loop
stopCluster(cl) # close down cluster
Files_krig <- list.files(Dir.Temp)[grep(pattern = "_data.nc", x = list.files(Dir.Temp))]
Files_var <- list.files(Dir.Temp)[grep(pattern = "_SE.nc", x = list.files(Dir.Temp))]
for(Iter_Load in 1:length(Files_krig)){ # load loop: load data from temporary files in Dir.Temp
Ras_Krig[[Iter_Load]] <- raster(file.path(Dir.Temp, Files_krig[Iter_Load])) # load current temporary file and write contents to list of rasters
Ras_Var[[Iter_Load]] <- raster(file.path(Dir.Temp, Files_var[Iter_Load])) # load current temporary file and write contents to list of rasters
} # end of load loop
}else{ # if non-parallel processing has been specified
### NON-PARALLEL KRIGING ---
Count_Krige <- 1 # Establish count variable which is targeted in kriging specification text for producing an estimator
for(Iter_Krige in Compute_Layers){ # non-parallel kriging loop over all layers in Data
if(paste0(str_pad(Iter_Krige,4,'left','0'), '_data.nc') %in% list.files(Dir.Temp)){ # file check: if this file has already been produced
Ras_Krig[[Iter_Krige]] <- raster(file.path(Dir.Temp, paste0(str_pad(Iter_Krige,4,'left','0'), '_data.nc'))) # load already produced kriged file and save it to list of rasters
Ras_Var[[Iter_Krige]] <- raster(file.path(Dir.Temp, paste0(str_pad(Iter_Krige,4,'left','0'), '_SE.nc')))
if(!exists("ProgBar")){ProgBar <- txtProgressBar(min = 0, max = nlayers(Data), style = 3)} # create progress bar when non-parallel processing}
setTxtProgressBar(ProgBar, Iter_Krige) # update progress bar
next() # jump to next layer
} # end of file check
T_Begin <- Sys.time() # record system time when layer kriging starts
eval(parse(text=looptext)) # evaluate the kriging specification per layer
} # end of non-parallel kriging loop
} # end of Cores check
## SAVING FINAL PRODUCT ----
if(is.null(DataSkips)){ # Skip check: if no layers needed to be skipped
Ras_Krig <- brick(Ras_Krig) # convert list of kriged layers in actual rasterbrick of kriged layers
writeRaster(x = Ras_Krig, filename = file.path(Dir, FileName), overwrite = TRUE, format="CDF") # save final product as raster
Ras_Var <- brick(Ras_Var) # convert list of kriged layers in actual rasterbrick of kriged layers
writeRaster(x = Ras_Var, filename = file.path(Dir, paste0("SE_",FileName)), overwrite = TRUE, format="CDF") # save final product as raster
}else{ # if some layers needed to be skipped
warning(paste0("Some of the layers in your raster could not be kriged. You will find all the individual layers (kriged and not kriged) in ", Dir, "."))
Keep_Temporary <- TRUE # keep temporary files so kriged products are not deleted
} # end of Skip check
### REMOVE FILES FROM HARD DRIVE ---
if(Keep_Temporary == FALSE){ # cleanup check
unlink(Dir.Temp, recursive = TRUE)
} # end of cleanup check
Krig_ls <- list(Ras_Krig, Ras_Var, Call_ls)
names(Krig_ls) <- c("Kriging_Output", "Kriging_SE", "Call")
return(Krig_ls) # return raster or list of layers
}
|
/R/Kriging.R
|
permissive
|
junjie2008v/KrigR
|
R
| false
| false
| 23,260
|
r
|
#' (multi-core) Kriging
#'
#' This function statistically downscales input data using covariate data and the kriging methodology. The function can be run in two ways:
#' \enumerate{
#' \item \strong{By Itself}: Use the arguments Data, Covariates_coarse, Covariates_fine when you already have raster files for your data which is to be downscaled as well as covariate raster data.
#' \item \strong{From Scratch}: Use the arguments Variable, Type, DataSet, DateStart, DateStop, TResolution, TStep, Extent, Dir, FileName, API_Key, API_User, and arget_res. By doing so, krigR will call the functions download_ERA() and download_DEM() for one coherent kriging workflow. Note that this process does not work when targetting UERRA data.
#' }
#' Use optional arguments such as Dir, FileName, Keep_Temporary, SingularTry, KrigingEquation and Cores for ease of use, substitution of non-GMTED2010 covariates, and parallel processing.
#'
#' @param Data Raster file which is to be downscaled.
#' @param Covariates_coarse Raster file containing covariates at training resolution.
#' @param Covariates_fine Raster file containing covariates at target resolution.
#' @param KrigingEquation Formula or character string specifying which covariates to use and how. Layer names in Covariates_coarse and Covariates_fine need to match Parameters in this formula. Needs to start with "X ~ ". X can read anything you like.
#' @param Dir Optional. Directory specifying where to place final kriged product. Default is current working directory.
#' @param FileName Optional. A file name for the netcdf produced. Default is a combination parameters in the function call.
#' @param Keep_Temporary Logical, whether to delete individual kriging products of layers in Data after processing. Default is TRUE.
#' @param Cores Numeric. How many cores to use. If you want output to your console during the process, use Cores == 1. Parallel processing is carried out when Cores is bigger than 1. Default is detecting all cores of your machine.
#' @param SingularTry Numeric. How often to try kriging of each layer of the input. This usually gets around issues of singular covariance matrices in the kriging process, but takes some time. Default is 10
#' @param Variable Optional, calls download_ERA(). ERA5(Land)-contained climate variable.
#' @param PrecipFix Optional. Era5(-land) total precipitation is recorded in cumulative steps per hour from the 00:00 time mark per day. Setting PrecipFix to TRUE converts these into records which represent the total precipitation per hour. Monthly records in Era5(-land) express the average daily total precipitation. Setting this argument to TRUE multiplies monthly records by the number of days per the respective month(s) to get to total precipitation records instead of average. Default is FALSE.
#' @param Type Optional. Whether to download reanalysis ('reanalysis') or ensemble ('ensemble_members', 'ensemble_mean', or 'ensemble_spread') data. Passed on to download_ERA.
#' @param DataSet Optional. Which ERA5 data set to download data from. 'era5' or 'era5-land'. Passed on to download_ERA.
#' @param DateStart Optional. Date ('YYYY-MM-DD') at which to start time series of downloaded data. Passed on to download_ERA.
#' @param DateStop Optional. Date ('YYYY-MM-DD') at which to stop time series of downloaded data. Passed on to download_ERA.
#' @param TResolution Optional. Temporal resolution of final product. hour', 'day', 'month'. Passed on to download_ERA.
#' @param TStep Optional. Which time steps (numeric) to consider for temporal resolution. Passed on to download_ERA.
#' @param FUN Optional. A raster calculation argument as passed to `raster::stackApply()`. This controls what kind of data to obtain for temporal aggregates of reanalysis data. Specify 'mean' (default) for mean values, 'min' for minimum values, and 'max' for maximum values, among others.
#' @param Extent Optional, download data according to rectangular bounding box. specify as extent() object or as a raster, a SpatialPolygonsDataFrame object, or a data.frame object. If Extent is a SpatialPolygonsDataFrame, this will be treated as a shapefile and the output will be cropped and masked to this shapefile. If Extent is a data.frame of geo-referenced point records, it needs to contain Lat and Lon columns as well as a non-repeating ID-column. Passed on to download_ERA and download_DEM.
#' @param Buffer Optional. Identifies how big a rectangular buffer to draw around points if Extent is a data frame of points. Buffer is expressed as centessimal degrees. Passed on to download_ERA and download_DEM.
#' @param ID Optional. Identifies which column in Extent to use for creation of individual buffers if Extent is a data.frame. Passed on to download_ERA and download_DEM.
#' @param Target_res Optional. The target resolution for the kriging step (i.e. which resolution to downscale to). An object as specified/produced by raster::res(). Passed on to download_DEM.
#' @param Source Optional, character. Whether to attempt download from the official USGS data viewer (Source = "USGS") or a static copy of the data set on a private drive (Source = "Drive"). Default is "USGS". Use this if the USGS viewer is unavailable. Passed on to download_DEM.
#' @param API_Key Optional. ECMWF cds API key. Passed on to download_ERA.
#' @param API_User Optional. ECMWF cds user number. Passed on to download_ERA.
#' @param nmax Optional. Controls local kriging. Number of nearest observations to be used kriging of each observation. Default is to use all available (Inf). You can specify as a number (numeric).
#' @param TryDown Optional, numeric. How often to attempt the download of each individual file (if querying data download) that the function queries from the server. This is to circumvent having to restart the entire function when encountering connectivity issues.
#' @param verbose Optional, logical. Whether to report progress of data download (if queried) in the console or not.
#' @param TimeOut Numeric. The timeout for each download in seconds. Default 36000 seconds (10 hours).
#' @param SingularDL Logical. Whether to force download of data in one call to CDS or automatically break download requests into individual monthly downloads. Default is FALSE.
#' @return A list object containing the downscaled data as well as the standard error for downscaling as well as the call to the krigR function, and two NETCDF (.nc) file in the specified directory which are the two data contents of the aforementioned list. A temporary directory is populated with individual NETCDF (.nc) files throughout the runtime of krigR which is deleted upon completion if Keep_Temporary = TRUE and all layers in the Data raster object were kriged successfully.
#' @examples
#' \dontrun{
#' ## THREE-STEP PROCESS (By Itself)
#' # Downloading ERA5-Land air temperature reanalysis data in 12-hour intervals for 02/01/1995 - 04/01/1995 (DD/MM/YYYY). API User and Key in this example are non-functional. Substitute with your user number and key to run this example.
#' Extent <- extent(c(11.8,15.1,50.1,51.7)) # roughly the extent of Saxony
#' API_User <- "..."
#' API_Key <- "..."
#' State_Raw <- download_ERA(
#' Variable = "2m_temperature",
#' DataSet = "era5-land",
#' DateStart = "1995-01-02",
#' DateStop = "1995-01-04",
#' TResolution = "hour",
#' TStep = 12,
#' Extent = Extent,
#' API_User = API_User,
#' API_Key = API_Key
#' )
#' State_Raw # a raster brick with 6 layers at resolution of ~0.1°
#' # Downloading GMTED2010-data at resolution and extent obtained by a call to download_ERA and a target resolution of .02.
#' Covs_ls <- download_DEM(
#' Train_ras = State_Raw,
#' Target_res = .02,
#' Keep_Temporary = TRUE
#' )
#' Covs_ls # a list with two elements: (1) GMTED 2010 data at training resolution, and (2) GMTED 2010 data aggregated as close as possible to a resolution of 0.02
#' # Kriging the data sets prepared with the previous functions.
#' State_Krig <- krigR(
#' Data = State_Raw, # data we want to krig as a raster object
#' Covariates_coarse = Covs_ls[[1]], # training covariate as a raster object
#' Covariates_fine = Covs_ls[[2]], # target covariate as a raster object
#' )
#'
#' ## PIPELINE (From Scratch)
#' #' # Downloading ERA5-Land air temperature reanalysis data in 12-hour intervals for 02/01/1995 - 04/01/1995 (DD/MM/YYYY), downloading and preparing GMTED 2010 covariate data, and kriging. API User and Key in this example are non-functional. Substitute with your user number and key to run this example. This example produces the same output as the example above.
#' Extent <- extent(c(11.8,15.1,50.1,51.7)) # roughly the extent of Saxony
#' API_User <- "..."
#' API_Key <- "..."
#' Pipe_Krig <- krigR(
#' Variable = "2m_temperature",
#' Type = "reanalysis",
#' DataSet = "era5-land",
#' DateStart = "1995-01-02",
#' DateStop = "1995-01-04",
#' TResolution = "hour",#
#' TStep = 12,
#' Extent = Extent,
#' API_User = API_User,
#' API_Key = API_Key,
#' Target_res = .02,
#' )
#' }
#'
#' @export
krigR <- function(Data = NULL, Covariates_coarse = NULL, Covariates_fine = NULL, KrigingEquation = "ERA ~ DEM", Cores = detectCores(), Dir = getwd(), FileName, Keep_Temporary = TRUE, SingularTry = 10, Variable, PrecipFix = FALSE, Type = "reanalysis", DataSet = "era5-land", DateStart, DateStop, TResolution = "month", TStep = 1, FUN = 'mean', Extent, Buffer = 0.5, ID = "ID", API_Key, API_User, Target_res, Source = "USGS", nmax = Inf, TryDown = 10, verbose = TRUE, TimeOut = 36000, SingularDL = FALSE, ...){
## CALL LIST (for storing how the function as called in the output) ----
if(is.null(Data)){
Data_Retrieval <- list(Variable = Variable,
Type = Type,
PrecipFix = PrecipFix,
DataSet = DataSet,
DateStart = DateStart,
DateStop = DateStop,
TResolution = TResolution,
TStep = TStep,
Extent = Extent)
}else{
Data_Retrieval <- "None needed. Data was not queried via krigR function, but supplied by user."
}
## CLIMATE DATA (call to download_ERA function if no Data set is specified) ----
if(is.null(Data)){ # data check: if no data has been specified
Data <- download_ERA(Variable = Variable, PrecipFix = PrecipFix, Type = Type, DataSet = DataSet, DateStart = DateStart, DateStop = DateStop, TResolution = TResolution, TStep = TStep, FUN = FUN, Extent = Extent, API_User = API_User, API_Key = API_Key, Dir = Dir, TryDown = TryDown, verbose = verbose, ID = ID, Cores = Cores, TimeOut = TimeOut, SingularDL = SingularDL)
} # end of data check
## COVARIATE DATA (call to download_DEM function when no covariates are specified) ----
if(is.null(Covariates_coarse) & is.null(Covariates_fine)){ # covariate check: if no covariates have been specified
if(class(Extent) == "SpatialPolygonsDataFrame" | class(Extent) == "data.frame"){ # Extent check: if Extent input is a shapefile
Shape <- Extent # save shapefile for use as Shape in masking covariate data
}else{ # if Extent is not a shape, then extent specification is already baked into Data
Shape <- NULL # set Shape to NULL so it is ignored in download_DEM function when masking is applied
} # end of Extent check
Covs_ls <- download_DEM(Train_ras = Data, Target_res = Target_res, Shape = Shape, Buffer = Buffer, ID = ID, Keep_Temporary = Keep_Temporary, Dir = Dir)
Covariates_coarse <- Covs_ls[[1]] # extract coarse covariates from download_DEM output
Covariates_fine <- Covs_ls[[2]] # extract fine covariates from download_DEM output
} # end of covariate check
## KRIGING FORMULA (assure that KrigingEquation is a formula object) ----
KrigingEquation <- as.formula(KrigingEquation)
## CALL LIST (for storing how the function as called in the output) ----
Call_ls <- list(Data = SummarizeRaster(Data),
Covariates_coarse = SummarizeRaster(Covariates_coarse),
Covariates_fine = SummarizeRaster(Covariates_fine),
KrigingEquation = KrigingEquation,
Cores = Cores,
FileName = FileName,
Keep_Temporary = Keep_Temporary,
nmax = nmax,
Data_Retrieval = Data_Retrieval,
misc = ...)
## SANITY CHECKS (step into check_Krig function to catch most common error messages) ----
Check_Product <- check_Krig(Data = Data, CovariatesCoarse = Covariates_coarse, CovariatesFine = Covariates_fine, KrigingEquation = KrigingEquation)
KrigingEquation <- Check_Product[[1]] # extract KrigingEquation (this may have changed in check_Krig)
DataSkips <- Check_Product[[2]] # extract which layers to skip due to missing data (this is unlikely to ever come into action)
Terms <- unique(unlist(strsplit(labels(terms(KrigingEquation)), split = ":"))) # identify which layers of data are needed
## DATA REFORMATTING (Kriging requires spatially referenced data frames, reformatting from rasters happens here) ---
Origin <- raster::as.data.frame(Covariates_coarse, xy = TRUE) # extract covariate layers
Origin <- Origin[, c(1:2, which(colnames(Origin) %in% Terms))] # retain only columns containing terms
Target <- raster::as.data.frame(Covariates_fine, xy = TRUE) # extract covariate layers
Target <- Target[, c(1:2, which(colnames(Target) %in% Terms))] # retain only columns containing terms
Target <- na.omit(Target)
suppressWarnings(gridded(Target) <- ~x+y) # establish a gridded data product ready for use in kriging
Target@grid@cellsize[1] <- Target@grid@cellsize[2] # ensure that grid cells are square
## SET-UP TEMPORARY DIRECTORY (this is where kriged products of each layer will be saved) ----
Dir.Temp <- file.path(Dir, paste("Kriging", FileName, sep="_"))
if(!dir.exists(Dir.Temp)){dir.create(Dir.Temp)}
## KRIGING SPECIFICATION (this will be parsed and evaluated in parallel and non-parallel evaluations further down) ----
looptext <- "
OriginK <- cbind(Origin, raster::extract(x = Data[[Iter_Krige]], y = Origin[,1:2], df=TRUE)[, 2]) # combine data of current data layer with training covariate data
OriginK <- na.omit(OriginK) # get rid of NA cells
colnames(OriginK)[length(Terms)+3] <- c(terms(KrigingEquation)[[2]]) # assign column names
suppressWarnings(gridded(OriginK) <- ~x+y) # generate gridded product
OriginK@grid@cellsize[1] <- OriginK@grid@cellsize[2] # ensure that grid cells are square
Iter_Try = 0 # number of tries set to 0
kriging_result <- NULL
while(class(kriging_result)[1] != 'autoKrige' & Iter_Try < SingularTry){ # try kriging SingularTry times, this is because of a random process of variogram identification within the automap package that can fail on smaller datasets randomly when it isn't supposed to
try(invisible(capture.output(kriging_result <- autoKrige(formula = KrigingEquation, input_data = OriginK, new_data = Target, nmax = nmax))), silent = TRUE)
Iter_Try <- Iter_Try +1
}
if(class(kriging_result)[1] != 'autoKrige'){ # give error if kriging fails
message(paste0('Kriging failed for layer ', Iter_Krige, '. Error message produced by autoKrige function: ', geterrmessage()))
}
## retransform to raster
try( # try fastest way - this fails with certain edge artefacts in meractor projection and is fixed by using rasterize
Krig_ras <- raster(x = kriging_result$krige_output, layer = 1), # extract raster from kriging product
silent = TRUE
)
try(
Var_ras <- raster(x = kriging_result$krige_output, layer = 3), # extract raster from kriging product
silent = TRUE
)
if(!exists('Krig_ras') & !exists('Var_ras')){
Krig_ras <- rasterize(x = kriging_result$krige_output, y = Covariates_fine[[1]])[[2]] # extract raster from kriging product
Var_ras <- rasterize(x = kriging_result$krige_output, y = Covariates_fine)[[4]] # extract raster from kriging product
}
crs(Krig_ras) <- crs(Data) # setting the crs according to the data
crs(Var_ras) <- crs(Data) # setting the crs according to the data
if(Cores == 1){
Ras_Krig[[Iter_Krige]] <- Krig_ras
Ras_Var[[Iter_Krige]] <- Var_ras
} # stack kriged raster into raster list if non-parallel computing
writeRaster(x = Krig_ras, filename = file.path(Dir.Temp, paste0(str_pad(Iter_Krige,4,'left','0'), '_data.nc')), overwrite = TRUE, format='CDF') # save kriged raster to temporary directory
writeRaster(x = Var_ras, filename = file.path(Dir.Temp, paste0(str_pad(Iter_Krige,4,'left','0'), '_SE.nc')), overwrite = TRUE, format='CDF') # save kriged raster to temporary directory
if(Cores == 1){ # core check: if processing non-parallel
if(Count_Krige == 1){ # count check: if this was the first actual computation
T_End <- Sys.time() # record time at which kriging was done for current layer
Duration <- as.numeric(T_End)-as.numeric(T_Begin) # calculate how long it took to krig on layer
message(paste('Kriging of remaining ', nlayers(Data)-Iter_Krige, ' data layers should finish around: ', as.POSIXlt(T_Begin + Duration*nlayers(Data), tz = Sys.timezone(location=TRUE)), sep='')) # console output with estimate of when the kriging should be done
ProgBar <- txtProgressBar(min = 0, max = nlayers(Data), style = 3) # create progress bar when non-parallel processing
Count_Krige <- Count_Krige + 1 # raise count by one so the stimator isn't called again
} # end of count check
setTxtProgressBar(ProgBar, Iter_Krige) # update progress bar with number of current layer
} # end of core check
"
## KRIGING PREPARATION (establishing objects which the kriging refers to) ----
Ras_Krig <- as.list(rep(NA, nlayers(Data))) # establish an empty list which will be filled with kriged layers
Ras_Var <- as.list(rep(NA, nlayers(Data))) # establish an empty list which will be filled with kriged layers
if(verbose){message("Commencing Kriging")}
## DATA SKIPS (if certain layers in the data are empty and need to be skipped, this is handled here) ---
if(!is.null(DataSkips)){ # Skip check: if layers need to be skipped
for(Iter_Skip in DataSkips){ # Skip loop: loop over all layers that need to be skipped
Ras_Krig[[Iter_Skip]] <- Data[[Iter_Skip]] # add raw data (which should be empty) to list
writeRaster(x = Ras_Krig[[Iter_Skip]], filename = file.path(Dir.Temp, str_pad(Iter_Skip,4,'left','0')), overwrite = TRUE, format = 'CDF') # save raw layer to temporary directory, needed for loading back in when parallel processing
} # end of Skip loop
Layers_vec <- 1:nlayers(Data) # identify vector of all layers in data
Compute_Layers <- Layers_vec[which(!Layers_vec %in% DataSkips)] # identify which layers can actually be computed on
}else{ # if we don't need to skip any layers
Compute_Layers <- 1:nlayers(Data) # set computing layers to all layers in data
} # end of Skip check
## ACTUAL KRIGING (carry out kriging according to user specifications either in parallel or on a single core) ----
if(Cores > 1){ # Cores check: if parallel processing has been specified
### PARALLEL KRIGING ---
ForeachObjects <- c("Dir.Temp", "Cores", "Data", "KrigingEquation", "Origin", "Target", "Covariates_coarse", "Covariates_fine", "Terms", "SingularTry", "nmax") # objects which are needed for each kriging run and are thus handed to each cluster unit
cl <- makeCluster(Cores) # Assuming Cores node cluster
registerDoParallel(cl) # registering cores
foreach(Iter_Krige = Compute_Layers, # kriging loop over all layers in Data, with condition (%:% when(...)) to only run if current layer is not present in Dir.Temp yet
.packages = c("raster", "stringr", "automap", "ncdf4", "rgdal"), # import packages necessary to each itteration
.export = ForeachObjects) %:% when(!paste0(str_pad(Iter_Krige,4,"left","0"), '_data.nc') %in% list.files(Dir.Temp)) %dopar% { # parallel kriging loop
Ras_Krig <- eval(parse(text=looptext)) # evaluate the kriging specification per cluster unit per layer
} # end of parallel kriging loop
stopCluster(cl) # close down cluster
Files_krig <- list.files(Dir.Temp)[grep(pattern = "_data.nc", x = list.files(Dir.Temp))]
Files_var <- list.files(Dir.Temp)[grep(pattern = "_SE.nc", x = list.files(Dir.Temp))]
for(Iter_Load in 1:length(Files_krig)){ # load loop: load data from temporary files in Dir.Temp
Ras_Krig[[Iter_Load]] <- raster(file.path(Dir.Temp, Files_krig[Iter_Load])) # load current temporary file and write contents to list of rasters
Ras_Var[[Iter_Load]] <- raster(file.path(Dir.Temp, Files_var[Iter_Load])) # load current temporary file and write contents to list of rasters
} # end of load loop
}else{ # if non-parallel processing has been specified
### NON-PARALLEL KRIGING ---
Count_Krige <- 1 # Establish count variable which is targeted in kriging specification text for producing an estimator
for(Iter_Krige in Compute_Layers){ # non-parallel kriging loop over all layers in Data
if(paste0(str_pad(Iter_Krige,4,'left','0'), '_data.nc') %in% list.files(Dir.Temp)){ # file check: if this file has already been produced
Ras_Krig[[Iter_Krige]] <- raster(file.path(Dir.Temp, paste0(str_pad(Iter_Krige,4,'left','0'), '_data.nc'))) # load already produced kriged file and save it to list of rasters
Ras_Var[[Iter_Krige]] <- raster(file.path(Dir.Temp, paste0(str_pad(Iter_Krige,4,'left','0'), '_SE.nc')))
if(!exists("ProgBar")){ProgBar <- txtProgressBar(min = 0, max = nlayers(Data), style = 3)} # create progress bar when non-parallel processing}
setTxtProgressBar(ProgBar, Iter_Krige) # update progress bar
next() # jump to next layer
} # end of file check
T_Begin <- Sys.time() # record system time when layer kriging starts
eval(parse(text=looptext)) # evaluate the kriging specification per layer
} # end of non-parallel kriging loop
} # end of Cores check
## SAVING FINAL PRODUCT ----
if(is.null(DataSkips)){ # Skip check: if no layers needed to be skipped
Ras_Krig <- brick(Ras_Krig) # convert list of kriged layers in actual rasterbrick of kriged layers
writeRaster(x = Ras_Krig, filename = file.path(Dir, FileName), overwrite = TRUE, format="CDF") # save final product as raster
Ras_Var <- brick(Ras_Var) # convert list of kriged layers in actual rasterbrick of kriged layers
writeRaster(x = Ras_Var, filename = file.path(Dir, paste0("SE_",FileName)), overwrite = TRUE, format="CDF") # save final product as raster
}else{ # if some layers needed to be skipped
warning(paste0("Some of the layers in your raster could not be kriged. You will find all the individual layers (kriged and not kriged) in ", Dir, "."))
Keep_Temporary <- TRUE # keep temporary files so kriged products are not deleted
} # end of Skip check
### REMOVE FILES FROM HARD DRIVE ---
if(Keep_Temporary == FALSE){ # cleanup check
unlink(Dir.Temp, recursive = TRUE)
} # end of cleanup check
Krig_ls <- list(Ras_Krig, Ras_Var, Call_ls)
names(Krig_ls) <- c("Kriging_Output", "Kriging_SE", "Call")
return(Krig_ls) # return raster or list of layers
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.