content large_stringlengths 0 6.46M | path large_stringlengths 3 331 | license_type large_stringclasses 2 values | repo_name large_stringlengths 5 125 | language large_stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.46M | extension large_stringclasses 75 values | text stringlengths 0 6.46M |
|---|---|---|---|---|---|---|---|---|---|
library("seqinr")
getwd()
dnafile <- read.fasta("Fragaria_vesca.fasta")
length(dnafile)
n1<-dnafile[[2]]
table1 <- count(n1,1) #counts the number of nucleotides
table2 <- count(n1,2) #counts the number of dinucleotides
table3 <- count(n1,3) #counts the number of trinucleotides
GC(n1) #GC content
annotation <- getAnnot(dnafile) #storing the fasta header
#graphs for the fragaria vesca
barplot(table1, main="Nucleotides Count for the Fragaria Vesca", xlab="Nucleotides",las=1, col=blues9)
barplot(table2, main="Dinucleotides Count for the Fragaria Vesca", xlab="Dinucleotides", las=1, col=blues9)
barplot(table3, main="Trinucleotides Count for the Fragaria Vesca", xlab="Trinucleotides", las=1, col=blues9)
############################################################
dnafile2 <- read.fasta("Fragaria_ananassa.fasta")
length(dnafile2)
n2<-dnafile[[2]]
table4 <- count(n2,1)
table5 <- count(n2,2)
table6 <- count(n3,3)
GC(n1)
annotation <- getAnnot(dnafile2)
barplot(table4, main="Nucleotides Count for the Fragaria Ananassa", xlab="Nucleotides",las=1)
barplot(table5, main="Dinucleotides Count for the Fragaria Ananassa", xlab="Dinucleotides", las=1)
barplot(table6, main="Trinucleotides Count for the Fragaria Ananassa", xlab="Trinucleotides", las=1)
| /main.R | no_license | bolivarez9193/Gene-Web-App | R | false | false | 1,258 | r |
library("seqinr")
getwd()
dnafile <- read.fasta("Fragaria_vesca.fasta")
length(dnafile)
n1<-dnafile[[2]]
table1 <- count(n1,1) #counts the number of nucleotides
table2 <- count(n1,2) #counts the number of dinucleotides
table3 <- count(n1,3) #counts the number of trinucleotides
GC(n1) #GC content
annotation <- getAnnot(dnafile) #storing the fasta header
#graphs for the fragaria vesca
barplot(table1, main="Nucleotides Count for the Fragaria Vesca", xlab="Nucleotides",las=1, col=blues9)
barplot(table2, main="Dinucleotides Count for the Fragaria Vesca", xlab="Dinucleotides", las=1, col=blues9)
barplot(table3, main="Trinucleotides Count for the Fragaria Vesca", xlab="Trinucleotides", las=1, col=blues9)
############################################################
dnafile2 <- read.fasta("Fragaria_ananassa.fasta")
length(dnafile2)
n2<-dnafile[[2]]
table4 <- count(n2,1)
table5 <- count(n2,2)
table6 <- count(n3,3)
GC(n1)
annotation <- getAnnot(dnafile2)
barplot(table4, main="Nucleotides Count for the Fragaria Ananassa", xlab="Nucleotides",las=1)
barplot(table5, main="Dinucleotides Count for the Fragaria Ananassa", xlab="Dinucleotides", las=1)
barplot(table6, main="Trinucleotides Count for the Fragaria Ananassa", xlab="Trinucleotides", las=1)
|
levels(gnat) <- c("GLD", "50", "200")
levels(gnat)
[1] "GLD" "50" "200" | /task_reference/base_functions/levels.r | no_license | githubfun/R | R | false | false | 74 | r | levels(gnat) <- c("GLD", "50", "200")
levels(gnat)
[1] "GLD" "50" "200" |
#' Download and Parse Hero Data
#'
#' This function downloads and parses the heroesjson data into a mangable
#' data.frame.
#'
#' @details
#'
#' The raw data is pulled from \url{http://heroesjson.com/heroes.json}.
#'
#' @export
hero_data <- function() {
GET('http://heroesjson.com/heroes.json') %>%
content %>%
lapply(
function(hero) {
data_frame(
Id = hero$id,
# Name = hero$name,
Title = hero$title,
Description = hero$description,
Role = hero$role,
Type = hero$type,
Gender = hero$gender,
Franchise = hero$franchise,
Difficulty = hero$difficulty,
DamageRating = hero$ratings$damage,
UtilityRating = hero$ratings$utility,
SurvivabilityRating = hero$ratings$survivability,
ComplexityRating = hero$ratings$complexity,
ReleaseDate = hero$releaseDate
) %>%
left_join(
parse_stats(hero$id, hero$stats),
by = 'Id'
) %>%
left_join(
parse_abilities(hero$abilities),
by = 'Name'
) %>%
left_join(
parse_talents(hero$id, hero$talents),
by = 'Id'
)
}
) %>%
bind_rows %>%
select(Id, Name, everything())
}
parse_stats <- function(id, stats) {
bind_rows(
lapply(
names(stats),
function(nm) {
s <- stats[[nm]]
data_frame(
Id = id,
Name = `if`(nm == 'Uther', c('Uther', 'UtherSpirit'), nm),
Hp = s$hp,
HpPerLevel = s$hpPerLevel,
HpRegen = s$hpRegen,
HpRegenPerLevel = s$hpRegenPerLevel,
Mana = s$mana,
ManaPerLevel = s$manaPerLevel,
ManaRegen = s$manaRegen,
ManaRegenPerLevel = s$manaRegenPerLevel
)
}
)
)
}
parse_abilities <- function(abilities) {
bind_rows(
lapply(
names(abilities),
function(nm) {
abilities_df <- bind_rows(
lapply( # abilities
abilities[[nm]],
function(abl) {
if (is.null(abl$trait)) {
data_frame(
Name = `if`(nm == 'LostVikings', c('HeroBaleog', 'HeroErik', 'HeroOlaf'), nm),
AbilityId = abl$id,
AbilityName = abl$name,
AbilityManaCost = abl$manaCost %||% NA,
AbilityHeroic = abl$heroic %||% FALSE,
AbilityDescription = abl$description,
AbilityCooldown = abl$cooldown %||% NA,
AbilityShortcut = abl$shortcut
)
}
}
)
)
trait_df <- bind_rows(
lapply( # traits
abilities[[nm]],
function(trt) {
if (!is.null(trt$trait) && trt$trait) {
data_frame(
Name = `if`(nm == 'LostVikings', c('HeroBaleog', 'HeroErik', 'HeroOlaf'), nm),
TraitId = trt$id,
TraitName = trt$name,
TraitDescription = trt$description,
TraitCooldown = trt$cooldown %||% NA
)
}
}
)
)
if (NROW(trait_df) == 0) {
abilities_df
} else {
left_join(abilities_df, trait_df, by = 'Name')
}
}
)
)
}
parse_talents <- function(id, talents) {
bind_rows(
lapply(
names(talents),
function(nm) {
bind_rows(
lapply(
talents[[nm]],
function(t) {
data_frame(
Id = id,
TalentTier = nm,
TalentId = t$id,
TalentName = t$name,
TalentDescription = t$description,
TalentCooldown = t$cooldown %||% NA
)
}
)
)
}
)
)
}
| /R/heroesjson.R | no_license | nteetor/hotr | R | false | false | 3,974 | r | #' Download and Parse Hero Data
#'
#' This function downloads and parses the heroesjson data into a mangable
#' data.frame.
#'
#' @details
#'
#' The raw data is pulled from \url{http://heroesjson.com/heroes.json}.
#'
#' @export
hero_data <- function() {
GET('http://heroesjson.com/heroes.json') %>%
content %>%
lapply(
function(hero) {
data_frame(
Id = hero$id,
# Name = hero$name,
Title = hero$title,
Description = hero$description,
Role = hero$role,
Type = hero$type,
Gender = hero$gender,
Franchise = hero$franchise,
Difficulty = hero$difficulty,
DamageRating = hero$ratings$damage,
UtilityRating = hero$ratings$utility,
SurvivabilityRating = hero$ratings$survivability,
ComplexityRating = hero$ratings$complexity,
ReleaseDate = hero$releaseDate
) %>%
left_join(
parse_stats(hero$id, hero$stats),
by = 'Id'
) %>%
left_join(
parse_abilities(hero$abilities),
by = 'Name'
) %>%
left_join(
parse_talents(hero$id, hero$talents),
by = 'Id'
)
}
) %>%
bind_rows %>%
select(Id, Name, everything())
}
parse_stats <- function(id, stats) {
bind_rows(
lapply(
names(stats),
function(nm) {
s <- stats[[nm]]
data_frame(
Id = id,
Name = `if`(nm == 'Uther', c('Uther', 'UtherSpirit'), nm),
Hp = s$hp,
HpPerLevel = s$hpPerLevel,
HpRegen = s$hpRegen,
HpRegenPerLevel = s$hpRegenPerLevel,
Mana = s$mana,
ManaPerLevel = s$manaPerLevel,
ManaRegen = s$manaRegen,
ManaRegenPerLevel = s$manaRegenPerLevel
)
}
)
)
}
parse_abilities <- function(abilities) {
bind_rows(
lapply(
names(abilities),
function(nm) {
abilities_df <- bind_rows(
lapply( # abilities
abilities[[nm]],
function(abl) {
if (is.null(abl$trait)) {
data_frame(
Name = `if`(nm == 'LostVikings', c('HeroBaleog', 'HeroErik', 'HeroOlaf'), nm),
AbilityId = abl$id,
AbilityName = abl$name,
AbilityManaCost = abl$manaCost %||% NA,
AbilityHeroic = abl$heroic %||% FALSE,
AbilityDescription = abl$description,
AbilityCooldown = abl$cooldown %||% NA,
AbilityShortcut = abl$shortcut
)
}
}
)
)
trait_df <- bind_rows(
lapply( # traits
abilities[[nm]],
function(trt) {
if (!is.null(trt$trait) && trt$trait) {
data_frame(
Name = `if`(nm == 'LostVikings', c('HeroBaleog', 'HeroErik', 'HeroOlaf'), nm),
TraitId = trt$id,
TraitName = trt$name,
TraitDescription = trt$description,
TraitCooldown = trt$cooldown %||% NA
)
}
}
)
)
if (NROW(trait_df) == 0) {
abilities_df
} else {
left_join(abilities_df, trait_df, by = 'Name')
}
}
)
)
}
parse_talents <- function(id, talents) {
bind_rows(
lapply(
names(talents),
function(nm) {
bind_rows(
lapply(
talents[[nm]],
function(t) {
data_frame(
Id = id,
TalentTier = nm,
TalentId = t$id,
TalentName = t$name,
TalentDescription = t$description,
TalentCooldown = t$cooldown %||% NA
)
}
)
)
}
)
)
}
|
skip_on_cran()
oldtz <- Sys.getenv('TZ', unset = NA)
Sys.setenv(TZ = 'UTC')
tests.home <- getwd()
setwd(tempdir())
test_that("loadBio stops if arguments or file are missing", {
# Missing arguments
expect_error(loadBio(), "'input' is missing.", fixed = TRUE)
expect_error(loadBio(input = "test"), "'tz' is missing.", fixed = TRUE)
# Missing file
expect_error(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Could not find a 'biometrics.csv' file in the working directory.", fixed = TRUE)
})
test_that("loadBio stops if there are duplicated columns", {
# Duplicated cols
bio <- example.biometrics
colnames(bio)[2:3] <- "Group"
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_error(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"The following columns are duplicated in the biometrics: 'Group'.", fixed = TRUE)
file.remove("biometrics.csv")
})
test_that("loadBio fails if needed columns are missing", {
# Missing release date
bio <- example.biometrics
colnames(bio)[1] <- "test"
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_error(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"The biometrics must contain an 'Release.date' column.", fixed = TRUE)
# Missing Signal column
bio <- example.biometrics
colnames(bio)[4] <- "test"
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_error(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"The biometrics must contain an 'Signal' column.", fixed = TRUE)
# No release sites
bio <- example.biometrics
write.csv(bio[, -2], "biometrics.csv", row.names = FALSE)
expect_message(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"M: No Release site has been indicated in the biometrics. Creating a 'Release.site' column to avoid function failure. Filling with 'unspecified'.", fixed = TRUE)
# no group column
bio <- example.biometrics
write.csv(bio[, -5], "biometrics.csv", row.names = FALSE)
expect_message(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"M: No 'Group' column found in the biometrics. Assigning all animals to group 'All'.", fixed = TRUE)
file.remove("biometrics.csv")
})
test_that("loadBio stops if column content is unexpected", {
# Badly formated release date
bio <- example.biometrics
bio$Release.date <- as.character(bio$Release.date)
bio$Release.date[1] <- "test"
write.csv(bio, "biometrics", row.names = FALSE)
expect_error(loadBio("biometrics", tz = "Europe/Copenhagen"),
"Not all values in the 'Release.date' column appear to be in a 'yyyy-mm-dd hh:mm' format (seconds are optional). Please double-check the biometrics.", fixed = TRUE)
# Badly coded release date
bio <- example.biometrics
bio$Release.date <- as.character(bio$Release.date)
bio$Release.date[1] <- "2999-19-39 29:59"
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_error(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Could not recognise the data in the 'Release.date' column as POSIX-compatible timestamps. Please double-check the biometrics.", fixed = TRUE)
# Badly formatted signal
bio <- example.biometrics
bio$Signal[1] <- "test"
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_error(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Could not recognise the data in the 'Signal' column as integers. Please double-check the biometrics.", fixed = TRUE)
# Missing signal data
bio <- example.biometrics
bio$Signal[1] <- NA
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_error(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Some animals have no 'Signal' information. Please double-check the biometrics.", fixed = TRUE)
# one duplicated signal
bio <- example.biometrics
bio$Signal[1:2] <- 1234
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_error(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Signal 1234 is duplicated in the biometrics.", fixed = TRUE)
# multiple duplicated signal
bio <- example.biometrics
bio$Signal[1:4] <- c(1234, 1234, 5678, 5678)
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_error(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Signals 1234, 5678 are duplicated in the biometrics.", fixed = TRUE)
# some animals missing release site information
bio <- example.biometrics
bio$Release.site[1] <- NA
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_warning(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Some animals contain no release site information. You may want to double-check the data.\n Filling the blanks with 'unspecified'.", fixed = TRUE)
bio <- example.biometrics
bio$Release.site <- as.character(bio$Release.site)
bio$Release.site[1] <- ""
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_warning(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Some animals contain no release site information. You may want to double-check the data.\n Filling the blanks with 'unspecified'.", fixed = TRUE)
# some animals missing group information
bio <- example.biometrics
bio$Group[1] <- NA
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_warning(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Some animals contain no group information. You may want to double-check the data.\n Filling the blanks with 'unspecified'.", fixed = TRUE)
bio <- example.biometrics
bio$Group <- as.character(bio$Group)
bio$Group[1] <- ""
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_warning(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Some animals contain no group information. You may want to double-check the data.\n Filling the blanks with 'unspecified'.", fixed = TRUE)
# Some groups are contained within others
bio <- example.biometrics
levels(bio$Group) <- c("A", "AB")
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_warning(output <- loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Group 'A' is contained within other groups. To avoid function failure, a number will be appended to this group.", fixed = TRUE)
expect_equal(levels(output$Group), c("A_1", "AB"))
rm(output)
# Some groups have dots
bio <- example.biometrics
levels(bio$Group) <- c("A", "B.C")
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_message(output <- loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"M: Some groups contain one or more '.' characters. To avoid function failure, these will be replaced with '_'.", fixed = TRUE)
expect_equal(levels(output$Group), c("A", "B_C"))
rm(output)
file.remove("biometrics.csv")
})
test_that("loadBio output matches example.biometrics", {
# Output is correct
write.csv(example.biometrics, "biometrics.csv", row.names = FALSE)
bio <- loadBio("biometrics.csv", tz = "Europe/Copenhagen")
expect_equal(bio, example.biometrics)
file.remove("biometrics.csv")
})
test_that("loadBio can handle multi-sensor tags.", {
xbio <- example.biometrics[-(1:4), ]
xbio$Signal <- as.character(xbio$Signal)
xbio$Signal[1] <- "4453|4454"
write.csv(xbio, "biometrics.csv", row.names = FALSE)
expect_message(
expect_warning(bio <- loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Tags with multiple sensors are listed in the biometrics, but a 'Sensor.unit' column could not be found. Skipping sensor unit assignment.", fixed = TRUE),
"M: Multi-sensor tags detected. These tags will be referred to by their lowest signal value.", fixed = TRUE)
xbio$Signal[1] <- "test|4454"
write.csv(xbio, "biometrics.csv", row.names = FALSE)
expect_message(
expect_error(bio <- loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Could not recognise the data in the 'Signal' column as integers. Please double-check the biometrics.", fixed = TRUE),
"M: Multi-sensor tags detected. These tags will be referred to by their lowest signal value.", fixed = TRUE)
xbio$Signal[1] <- "4455|4456"
write.csv(xbio, "biometrics.csv", row.names = FALSE)
expect_message(
expect_error(bio <- loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Signal 4456 is duplicated in the biometrics.", fixed = TRUE),
"M: Multi-sensor tags detected. These tags will be referred to by their lowest signal value.", fixed = TRUE)
xbio$Signal[1] <- "4453|4454"
xbio$Sensor.unit <- NA
write.csv(xbio, "biometrics.csv", row.names = FALSE)
expect_message(bio <- loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"M: Multi-sensor tags detected. These tags will be referred to by their lowest signal value.", fixed = TRUE)
file.remove("biometrics.csv")
})
test_that("loadBio converts factors to character", {
xbio <- example.biometrics
xbio$temp <- "dummy text"
xbio$temp <- as.factor(xbio$temp)
output <- loadBio(xbio, tz = "Europe/Copenhagen")
expect_equal(typeof(output$temp), "character")
# note: The Release.site and Group columns are converted into a factor within loadBio
})
setwd(tests.home)
if (is.na(oldtz)) Sys.unsetenv("TZ") else Sys.setenv(TZ = oldtz)
rm(list = ls())
| /tests/testthat/test_loadBio.R | no_license | ec564/actel | R | false | false | 8,947 | r | skip_on_cran()
oldtz <- Sys.getenv('TZ', unset = NA)
Sys.setenv(TZ = 'UTC')
tests.home <- getwd()
setwd(tempdir())
test_that("loadBio stops if arguments or file are missing", {
# Missing arguments
expect_error(loadBio(), "'input' is missing.", fixed = TRUE)
expect_error(loadBio(input = "test"), "'tz' is missing.", fixed = TRUE)
# Missing file
expect_error(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Could not find a 'biometrics.csv' file in the working directory.", fixed = TRUE)
})
test_that("loadBio stops if there are duplicated columns", {
# Duplicated cols
bio <- example.biometrics
colnames(bio)[2:3] <- "Group"
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_error(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"The following columns are duplicated in the biometrics: 'Group'.", fixed = TRUE)
file.remove("biometrics.csv")
})
test_that("loadBio fails if needed columns are missing", {
# Missing release date
bio <- example.biometrics
colnames(bio)[1] <- "test"
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_error(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"The biometrics must contain an 'Release.date' column.", fixed = TRUE)
# Missing Signal column
bio <- example.biometrics
colnames(bio)[4] <- "test"
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_error(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"The biometrics must contain an 'Signal' column.", fixed = TRUE)
# No release sites
bio <- example.biometrics
write.csv(bio[, -2], "biometrics.csv", row.names = FALSE)
expect_message(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"M: No Release site has been indicated in the biometrics. Creating a 'Release.site' column to avoid function failure. Filling with 'unspecified'.", fixed = TRUE)
# no group column
bio <- example.biometrics
write.csv(bio[, -5], "biometrics.csv", row.names = FALSE)
expect_message(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"M: No 'Group' column found in the biometrics. Assigning all animals to group 'All'.", fixed = TRUE)
file.remove("biometrics.csv")
})
test_that("loadBio stops if column content is unexpected", {
# Badly formated release date
bio <- example.biometrics
bio$Release.date <- as.character(bio$Release.date)
bio$Release.date[1] <- "test"
write.csv(bio, "biometrics", row.names = FALSE)
expect_error(loadBio("biometrics", tz = "Europe/Copenhagen"),
"Not all values in the 'Release.date' column appear to be in a 'yyyy-mm-dd hh:mm' format (seconds are optional). Please double-check the biometrics.", fixed = TRUE)
# Badly coded release date
bio <- example.biometrics
bio$Release.date <- as.character(bio$Release.date)
bio$Release.date[1] <- "2999-19-39 29:59"
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_error(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Could not recognise the data in the 'Release.date' column as POSIX-compatible timestamps. Please double-check the biometrics.", fixed = TRUE)
# Badly formatted signal
bio <- example.biometrics
bio$Signal[1] <- "test"
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_error(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Could not recognise the data in the 'Signal' column as integers. Please double-check the biometrics.", fixed = TRUE)
# Missing signal data
bio <- example.biometrics
bio$Signal[1] <- NA
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_error(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Some animals have no 'Signal' information. Please double-check the biometrics.", fixed = TRUE)
# one duplicated signal
bio <- example.biometrics
bio$Signal[1:2] <- 1234
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_error(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Signal 1234 is duplicated in the biometrics.", fixed = TRUE)
# multiple duplicated signal
bio <- example.biometrics
bio$Signal[1:4] <- c(1234, 1234, 5678, 5678)
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_error(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Signals 1234, 5678 are duplicated in the biometrics.", fixed = TRUE)
# some animals missing release site information
bio <- example.biometrics
bio$Release.site[1] <- NA
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_warning(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Some animals contain no release site information. You may want to double-check the data.\n Filling the blanks with 'unspecified'.", fixed = TRUE)
bio <- example.biometrics
bio$Release.site <- as.character(bio$Release.site)
bio$Release.site[1] <- ""
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_warning(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Some animals contain no release site information. You may want to double-check the data.\n Filling the blanks with 'unspecified'.", fixed = TRUE)
# some animals missing group information
bio <- example.biometrics
bio$Group[1] <- NA
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_warning(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Some animals contain no group information. You may want to double-check the data.\n Filling the blanks with 'unspecified'.", fixed = TRUE)
bio <- example.biometrics
bio$Group <- as.character(bio$Group)
bio$Group[1] <- ""
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_warning(loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Some animals contain no group information. You may want to double-check the data.\n Filling the blanks with 'unspecified'.", fixed = TRUE)
# Some groups are contained within others
bio <- example.biometrics
levels(bio$Group) <- c("A", "AB")
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_warning(output <- loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Group 'A' is contained within other groups. To avoid function failure, a number will be appended to this group.", fixed = TRUE)
expect_equal(levels(output$Group), c("A_1", "AB"))
rm(output)
# Some groups have dots
bio <- example.biometrics
levels(bio$Group) <- c("A", "B.C")
write.csv(bio, "biometrics.csv", row.names = FALSE)
expect_message(output <- loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"M: Some groups contain one or more '.' characters. To avoid function failure, these will be replaced with '_'.", fixed = TRUE)
expect_equal(levels(output$Group), c("A", "B_C"))
rm(output)
file.remove("biometrics.csv")
})
test_that("loadBio output matches example.biometrics", {
# Output is correct
write.csv(example.biometrics, "biometrics.csv", row.names = FALSE)
bio <- loadBio("biometrics.csv", tz = "Europe/Copenhagen")
expect_equal(bio, example.biometrics)
file.remove("biometrics.csv")
})
test_that("loadBio can handle multi-sensor tags.", {
xbio <- example.biometrics[-(1:4), ]
xbio$Signal <- as.character(xbio$Signal)
xbio$Signal[1] <- "4453|4454"
write.csv(xbio, "biometrics.csv", row.names = FALSE)
expect_message(
expect_warning(bio <- loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Tags with multiple sensors are listed in the biometrics, but a 'Sensor.unit' column could not be found. Skipping sensor unit assignment.", fixed = TRUE),
"M: Multi-sensor tags detected. These tags will be referred to by their lowest signal value.", fixed = TRUE)
xbio$Signal[1] <- "test|4454"
write.csv(xbio, "biometrics.csv", row.names = FALSE)
expect_message(
expect_error(bio <- loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Could not recognise the data in the 'Signal' column as integers. Please double-check the biometrics.", fixed = TRUE),
"M: Multi-sensor tags detected. These tags will be referred to by their lowest signal value.", fixed = TRUE)
xbio$Signal[1] <- "4455|4456"
write.csv(xbio, "biometrics.csv", row.names = FALSE)
expect_message(
expect_error(bio <- loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"Signal 4456 is duplicated in the biometrics.", fixed = TRUE),
"M: Multi-sensor tags detected. These tags will be referred to by their lowest signal value.", fixed = TRUE)
xbio$Signal[1] <- "4453|4454"
xbio$Sensor.unit <- NA
write.csv(xbio, "biometrics.csv", row.names = FALSE)
expect_message(bio <- loadBio("biometrics.csv", tz = "Europe/Copenhagen"),
"M: Multi-sensor tags detected. These tags will be referred to by their lowest signal value.", fixed = TRUE)
file.remove("biometrics.csv")
})
test_that("loadBio converts factors to character", {
xbio <- example.biometrics
xbio$temp <- "dummy text"
xbio$temp <- as.factor(xbio$temp)
output <- loadBio(xbio, tz = "Europe/Copenhagen")
expect_equal(typeof(output$temp), "character")
# note: The Release.site and Group columns are converted into a factor within loadBio
})
setwd(tests.home)
if (is.na(oldtz)) Sys.unsetenv("TZ") else Sys.setenv(TZ = oldtz)
rm(list = ls())
|
#plant competition data, Ecology Lab
# @author Benjamin Ahn 03_31_2020
# @version 1.0
#import data
library(dplyr);
library(ggplot2);
experimental_data <- read.csv(file.choose(new = FALSE));
#split into groups
intraspecific <- experimental_data[1:12,];
interspecific <- experimental_data[c(5:13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43),];
intraspecific.lm <- lm(intraspecific$BiomassPerPlant ~ intraspecific$Treatment);
interspecific.lm <- lm(interspecific$BiomassPerPlant ~ interspecific$Treatment);
#statistical tests
intraspecific.aov <- aov(intraspecific.lm);
TukeyHSD(intraspecific.aov);
interspecific.aov <- aov(interspecific.lm);
TukeyHSD(interspecific.aov);
#boxplots
treatment_biomass <- select(experimental_data, "Treatment", "BiomassPerPlant");
treatment_biomass$Treatment <- factor(treatment_biomass$Treatment, levels = c("2W", "4W", "2W2R", "6W", "3W3R", "4W2R", "2W4R"));
treatment_biomass$BiomassPerPlant <- as.numeric(as.character(treatment_biomass$BiomassPerPlant));
plotmain <- ggplot(treatment_biomass, aes(x = Treatment, y = BiomassPerPlant)) + geom_boxplot() +
labs(x="Experimental Groups",
y = "Biomass Per Plant");
plotmain_points <- plotmain + geom_jitter(shape=16, position=position_jitter(0.2));
treatment_biomass_intra <- select(intraspecific, "Treatment", "BiomassPerPlant");
treatment_biomass_intra$BiomassPerPlant <- as.numeric(as.character(treatment_biomass_intra$BiomassPerPlant));
plot_intra <- ggplot(treatment_biomass_intra, aes(x = Treatment, y = BiomassPerPlant)) + geom_boxplot() +
labs(x = "Experimental Groups",
y = "Biomass Per Plant");
plot_intra_points <- plot_intra + geom_jitter(shape=16, position=position_jitter(0.2));
treatment_biomass_inter <- select(interspecific, "Treatment", "BiomassPerPlant");
treatment_biomass_inter$Treatment <- factor(treatment_biomass_inter$Treatment, levels = c("4W", "2W2R", "6W", "3W3R", "4W2R", "2W4R"));
treatment_biomass_inter$BiomassPerPlant <- as.numeric(as.character(treatment_biomass_inter$BiomassPerPlant));
plot_inter <- ggplot(treatment_biomass_inter, aes(x = Treatment, y = BiomassPerPlant)) + geom_boxplot() +
labs(x = "Experimental Groups",
y = "Biomass Per Plant");
plot_inter_points <- plot_inter + geom_jitter(shape=16, position=position_jitter(0.2));
end | /wheat_radish_dataanalysis.r | no_license | bioben/ecology_plant_competition_analysis | R | false | false | 2,308 | r | #plant competition data, Ecology Lab
# @author Benjamin Ahn 03_31_2020
# @version 1.0
#import data
library(dplyr);
library(ggplot2);
experimental_data <- read.csv(file.choose(new = FALSE));
#split into groups
intraspecific <- experimental_data[1:12,];
interspecific <- experimental_data[c(5:13, 15, 17, 19, 21, 23, 25, 27, 29, 31, 33, 35, 37, 39, 41, 43),];
intraspecific.lm <- lm(intraspecific$BiomassPerPlant ~ intraspecific$Treatment);
interspecific.lm <- lm(interspecific$BiomassPerPlant ~ interspecific$Treatment);
#statistical tests
intraspecific.aov <- aov(intraspecific.lm);
TukeyHSD(intraspecific.aov);
interspecific.aov <- aov(interspecific.lm);
TukeyHSD(interspecific.aov);
#boxplots
treatment_biomass <- select(experimental_data, "Treatment", "BiomassPerPlant");
treatment_biomass$Treatment <- factor(treatment_biomass$Treatment, levels = c("2W", "4W", "2W2R", "6W", "3W3R", "4W2R", "2W4R"));
treatment_biomass$BiomassPerPlant <- as.numeric(as.character(treatment_biomass$BiomassPerPlant));
plotmain <- ggplot(treatment_biomass, aes(x = Treatment, y = BiomassPerPlant)) + geom_boxplot() +
labs(x="Experimental Groups",
y = "Biomass Per Plant");
plotmain_points <- plotmain + geom_jitter(shape=16, position=position_jitter(0.2));
treatment_biomass_intra <- select(intraspecific, "Treatment", "BiomassPerPlant");
treatment_biomass_intra$BiomassPerPlant <- as.numeric(as.character(treatment_biomass_intra$BiomassPerPlant));
plot_intra <- ggplot(treatment_biomass_intra, aes(x = Treatment, y = BiomassPerPlant)) + geom_boxplot() +
labs(x = "Experimental Groups",
y = "Biomass Per Plant");
plot_intra_points <- plot_intra + geom_jitter(shape=16, position=position_jitter(0.2));
treatment_biomass_inter <- select(interspecific, "Treatment", "BiomassPerPlant");
treatment_biomass_inter$Treatment <- factor(treatment_biomass_inter$Treatment, levels = c("4W", "2W2R", "6W", "3W3R", "4W2R", "2W4R"));
treatment_biomass_inter$BiomassPerPlant <- as.numeric(as.character(treatment_biomass_inter$BiomassPerPlant));
plot_inter <- ggplot(treatment_biomass_inter, aes(x = Treatment, y = BiomassPerPlant)) + geom_boxplot() +
labs(x = "Experimental Groups",
y = "Biomass Per Plant");
plot_inter_points <- plot_inter + geom_jitter(shape=16, position=position_jitter(0.2));
end |
\name{BIC.ssym}
\alias{BIC.ssym}
\title{BIC.ssym}
\description{
\bold{BIC.ssym} calculates the goodness-of-fit statistic BIC from an object of class ``"ssym".}
| /man/BIC.ssym.Rd | no_license | cran/ssym | R | false | false | 165 | rd | \name{BIC.ssym}
\alias{BIC.ssym}
\title{BIC.ssym}
\description{
\bold{BIC.ssym} calculates the goodness-of-fit statistic BIC from an object of class ``"ssym".}
|
library(Ecfun)
### Name: readNIPA
### Title: Read a National Income and Product Accounts data table
### Aliases: readNIPA
### Keywords: IO
### ** Examples
# Find demoFiles/*.csv
demoDir <- system.file('demoFiles', package='Ecdat')
(demoCsv <- dir(demoDir, pattern='csv$', full.names=TRUE))
nipa6.16 <- readNIPA(demoCsv)
str(nipa6.16)
| /data/genthat_extracted_code/Ecfun/examples/readNIPA.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 343 | r | library(Ecfun)
### Name: readNIPA
### Title: Read a National Income and Product Accounts data table
### Aliases: readNIPA
### Keywords: IO
### ** Examples
# Find demoFiles/*.csv
demoDir <- system.file('demoFiles', package='Ecdat')
(demoCsv <- dir(demoDir, pattern='csv$', full.names=TRUE))
nipa6.16 <- readNIPA(demoCsv)
str(nipa6.16)
|
n = 50;
test <- function(X, y, beta) {
# lasso
fit = cv.ncvreg(X, y);
beta_lasso = fit$fit$beta[, which.min(fit$cve)]
error_lasso = sum((beta - beta_lasso[2:length(beta_lasso)])^2);
# ridge
library("hdrm");
fit = ridge(X, y);
beta_ridge = fit$beta[, which.min(fit$GCV)];
error_ridge = sum((beta - beta_ridge[2:length(beta_ridge)])^2);
# forward
X = as.data.frame(X);
variables = colnames(X);
form = as.formula(paste("y ~ 0+", paste(variables, collapse = "+")));
fit = step(lm(form, data = X), k = log(nrow(X)));
beta_forward = as.data.frame(matrix(rep(0, length(beta)), nrow = 1));
beta_forward[names(fit$coefficients)] = fit$coefficients;
error_forward = sum((beta - beta_forward)^2);
# MCP
fit = cv.ncvreg(X, y, penalty = "MCP");
beta_MCP = fit$fit$beta[, which.min(fit$cve)];
error_MCP = sum((beta - beta_MCP[2:length(beta_MCP)])^2);
# SCAD
fit = cv.ncvreg(X, y, penalty = "SCAD");
beta_SCAD = fit$fit$beta[, which.min(fit$cve)];
error_SCAD = sum((beta - beta_SCAD[2:length(beta_SCAD)])^2);
return(list(lasso=error_lasso, ridge=error_ridge, forward=error_forward, MCP=error_MCP, SCAD=error_SCAD));
}
test_without_forward <- function(X, y, beta) {
# lasso
fit = cv.ncvreg(X, y);
beta_lasso = fit$fit$beta[, which.min(fit$cve)]
error_lasso = sum((beta - beta_lasso[2:length(beta_lasso)])^2);
# ridge
library("hdrm");
fit = ridge(X, y);
beta_ridge = fit$beta[, which.min(fit$GCV)];
error_ridge = sum((beta - beta_ridge[2:length(beta_ridge)])^2);
# MCP
fit = cv.ncvreg(X, y, penalty = "MCP");
beta_MCP = fit$fit$beta[, which.min(fit$cve)];
error_MCP = sum((beta - beta_MCP[2:length(beta_MCP)])^2);
# SCAD
fit = cv.ncvreg(X, y, penalty = "SCAD");
beta_SCAD = fit$fit$beta[, which.min(fit$cve)];
error_SCAD = sum((beta - beta_SCAD[2:length(beta_SCAD)])^2);
return(list(lasso=error_lasso, ridge=error_ridge, MCP=error_MCP, SCAD=error_SCAD));
}
# problem 1
forward_error = 0;
lasso_error = 0;
ridge_error = 0;
mcp_error = 0;
scad_error = 0;
for(i in 1:100) {
p = 25;
X = matrix(rnorm(n*p), nrow = n);
beta = rep(0, p);
beta[1] = 1; beta[2] = -1;
y = rnorm(n, mean = X%*%beta);
res = test(X, y, beta);
forward_error = forward_error + res$forward;
lasso_error = lasso_error + res$lasso;
ridge_error = ridge_error + res$ridge;
mcp_error = mcp_error + res$MCP;
scad_error = scad_error + res$SCAD;
}
print(c(forward_error/100, lasso_error/100, ridge_error/100, mcp_error/100, scad_error/100))
# problem 2
forward_error = 0;
lasso_error = 0;
ridge_error = 0;
mcp_error = 0;
scad_error = 0;
for(i in 1:100) {
p = 100;
X = matrix(rnorm(n*p), nrow = n);
beta = rep(0, p);
beta[1] = 1; beta[2] = -1;
y = rnorm(n, mean = X%*%beta);
res = test_without_forward(X, y, beta);
lasso_error = lasso_error + res$lasso;
ridge_error = ridge_error + res$ridge;
mcp_error = mcp_error + res$MCP;
scad_error = scad_error + res$SCAD;
}
print(c(forward_error/100, lasso_error/100, ridge_error/100, mcp_error/100, scad_error/100))
# problem 3
forward_error = 0;
lasso_error = 0;
ridge_error = 0;
mcp_error = 0;
scad_error = 0;
for(i in 1:100) {
p = 25;
X = matrix(rnorm(n*p), nrow = n);
beta = rep(0, p);
beta[1:4] = 0.5; beta[5:8] = -0.5;
y = rnorm(n, mean = X%*%beta);
res = test(X, y, beta);
forward_error = forward_error + res$forward;
lasso_error = lasso_error + res$lasso;
ridge_error = ridge_error + res$ridge;
mcp_error = mcp_error + res$MCP;
scad_error = scad_error + res$SCAD;
}
print(c(forward_error/100, lasso_error/100, ridge_error/100, mcp_error/100, scad_error/100))
# problem 4
forward_error = 0;
lasso_error = 0;
ridge_error = 0;
mcp_error = 0;
scad_error = 0;
for(i in 1:100) {
p = 100;
X = matrix(rnorm(n*p), nrow = n);
beta = rep(0, p);
beta[1:16] = 0.25; beta[17:32] = -0.25;
y = rnorm(n, mean = X%*%beta);
res = test_without_forward(X, y, beta);
forward_error = forward_error + res$forward;
lasso_error = lasso_error + res$lasso;
ridge_error = ridge_error + res$ridge;
mcp_error = mcp_error + res$MCP;
scad_error = scad_error + res$SCAD;
}
print(c(forward_error/100, lasso_error/100, ridge_error/100, mcp_error/100, scad_error/100)) | /High dimensional data analysis/Homework 5/problem3.7.R | no_license | Orcuslc/Courses-2019 | R | false | false | 4,196 | r | n = 50;
test <- function(X, y, beta) {
# lasso
fit = cv.ncvreg(X, y);
beta_lasso = fit$fit$beta[, which.min(fit$cve)]
error_lasso = sum((beta - beta_lasso[2:length(beta_lasso)])^2);
# ridge
library("hdrm");
fit = ridge(X, y);
beta_ridge = fit$beta[, which.min(fit$GCV)];
error_ridge = sum((beta - beta_ridge[2:length(beta_ridge)])^2);
# forward
X = as.data.frame(X);
variables = colnames(X);
form = as.formula(paste("y ~ 0+", paste(variables, collapse = "+")));
fit = step(lm(form, data = X), k = log(nrow(X)));
beta_forward = as.data.frame(matrix(rep(0, length(beta)), nrow = 1));
beta_forward[names(fit$coefficients)] = fit$coefficients;
error_forward = sum((beta - beta_forward)^2);
# MCP
fit = cv.ncvreg(X, y, penalty = "MCP");
beta_MCP = fit$fit$beta[, which.min(fit$cve)];
error_MCP = sum((beta - beta_MCP[2:length(beta_MCP)])^2);
# SCAD
fit = cv.ncvreg(X, y, penalty = "SCAD");
beta_SCAD = fit$fit$beta[, which.min(fit$cve)];
error_SCAD = sum((beta - beta_SCAD[2:length(beta_SCAD)])^2);
return(list(lasso=error_lasso, ridge=error_ridge, forward=error_forward, MCP=error_MCP, SCAD=error_SCAD));
}
test_without_forward <- function(X, y, beta) {
# lasso
fit = cv.ncvreg(X, y);
beta_lasso = fit$fit$beta[, which.min(fit$cve)]
error_lasso = sum((beta - beta_lasso[2:length(beta_lasso)])^2);
# ridge
library("hdrm");
fit = ridge(X, y);
beta_ridge = fit$beta[, which.min(fit$GCV)];
error_ridge = sum((beta - beta_ridge[2:length(beta_ridge)])^2);
# MCP
fit = cv.ncvreg(X, y, penalty = "MCP");
beta_MCP = fit$fit$beta[, which.min(fit$cve)];
error_MCP = sum((beta - beta_MCP[2:length(beta_MCP)])^2);
# SCAD
fit = cv.ncvreg(X, y, penalty = "SCAD");
beta_SCAD = fit$fit$beta[, which.min(fit$cve)];
error_SCAD = sum((beta - beta_SCAD[2:length(beta_SCAD)])^2);
return(list(lasso=error_lasso, ridge=error_ridge, MCP=error_MCP, SCAD=error_SCAD));
}
# problem 1
forward_error = 0;
lasso_error = 0;
ridge_error = 0;
mcp_error = 0;
scad_error = 0;
for(i in 1:100) {
p = 25;
X = matrix(rnorm(n*p), nrow = n);
beta = rep(0, p);
beta[1] = 1; beta[2] = -1;
y = rnorm(n, mean = X%*%beta);
res = test(X, y, beta);
forward_error = forward_error + res$forward;
lasso_error = lasso_error + res$lasso;
ridge_error = ridge_error + res$ridge;
mcp_error = mcp_error + res$MCP;
scad_error = scad_error + res$SCAD;
}
print(c(forward_error/100, lasso_error/100, ridge_error/100, mcp_error/100, scad_error/100))
# problem 2
forward_error = 0;
lasso_error = 0;
ridge_error = 0;
mcp_error = 0;
scad_error = 0;
for(i in 1:100) {
p = 100;
X = matrix(rnorm(n*p), nrow = n);
beta = rep(0, p);
beta[1] = 1; beta[2] = -1;
y = rnorm(n, mean = X%*%beta);
res = test_without_forward(X, y, beta);
lasso_error = lasso_error + res$lasso;
ridge_error = ridge_error + res$ridge;
mcp_error = mcp_error + res$MCP;
scad_error = scad_error + res$SCAD;
}
print(c(forward_error/100, lasso_error/100, ridge_error/100, mcp_error/100, scad_error/100))
# problem 3
forward_error = 0;
lasso_error = 0;
ridge_error = 0;
mcp_error = 0;
scad_error = 0;
for(i in 1:100) {
p = 25;
X = matrix(rnorm(n*p), nrow = n);
beta = rep(0, p);
beta[1:4] = 0.5; beta[5:8] = -0.5;
y = rnorm(n, mean = X%*%beta);
res = test(X, y, beta);
forward_error = forward_error + res$forward;
lasso_error = lasso_error + res$lasso;
ridge_error = ridge_error + res$ridge;
mcp_error = mcp_error + res$MCP;
scad_error = scad_error + res$SCAD;
}
print(c(forward_error/100, lasso_error/100, ridge_error/100, mcp_error/100, scad_error/100))
# problem 4
forward_error = 0;
lasso_error = 0;
ridge_error = 0;
mcp_error = 0;
scad_error = 0;
for(i in 1:100) {
p = 100;
X = matrix(rnorm(n*p), nrow = n);
beta = rep(0, p);
beta[1:16] = 0.25; beta[17:32] = -0.25;
y = rnorm(n, mean = X%*%beta);
res = test_without_forward(X, y, beta);
forward_error = forward_error + res$forward;
lasso_error = lasso_error + res$lasso;
ridge_error = ridge_error + res$ridge;
mcp_error = mcp_error + res$MCP;
scad_error = scad_error + res$SCAD;
}
print(c(forward_error/100, lasso_error/100, ridge_error/100, mcp_error/100, scad_error/100)) |
# Logistic Regression : Predict Purchase
# Import the dataset
df1 = read.csv('./data/logr2.csv')
head(df1)
url="https://docs.google.com/spreadsheets/d/1Md_ro2t3M7nA9JMH1DsE12jfeX7qq-UPw6p8WQd6A2Y/edit#gid=120271978"
library(gsheet)
df2 = as.data.frame(gsheet2tbl(url))
head(df2)
dataset=df2 #or df2 if data is imported from google sheets
head(dataset)
str(dataset)
summary(dataset)
dim(dataset)
View(dataset) #shows dataset in a new tab like excel
dataset$gender = factor(dataset$gender)
# Split the dataset into the Training set and Test set
#install.packages('caTools')
library(caTools)
set.seed(2000)
split = sample.split(dataset$purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
dim(dataset); dim(training_set); dim(test_set)
names(dataset)
# Logisitic Model on Training Set, generalised linear modelling
logitmodel1 = glm(purchased ~ gender + age + salary, family = binomial, data = training_set)
summary(logitmodel1)
# gender not insignificant dropped here
logitmodel2 = glm(purchased ~ age + salary, family = binomial, data = training_set)
summary(logitmodel2)
#summary(logitmodel2)$coefficient # they are in log terms
#WE ARE assuming logitmodel1 is better then logitmodel2, as AIC didnt improved
#predict on sample data taking logitmodel2
#test_set2 = data.frame(age=c(40,65), salary=c(40000, 50000))
#(prob_pred2 = predict(logitmodel2, type = 'response', newdata = test_set2))
#cbind(test_set2, prob_pred2)
#age=65 person likely to purchase
test_set2 = data.frame(age=c(40,65), gender=c('Male','Female'), salary=c(40000, 50000))
(prob_pred2 = predict(logitmodel1, type = 'response', newdata = test_set2))
cbind(test_set2, prob_pred2)
#age=65 person likely to purchase prob is 98%
# Predicting the Test set results from testset
head(test_set)
prob_pred = predict(logitmodel1, type = 'response', newdata = test_set) #take logitmodel2 if required
summary(prob_pred)
head(cbind(test_set,prob_pred ),10)
#if prob > 0.5 make it 1, else 0
y_pred = ifelse(prob_pred > 0.5, 1, 0)
head(cbind(test_set$purchased, y_pred),15)
# Making the Confusion Matrix
cm = table(test_set[,5], y_pred)
cm
library(caret)
caret::confusionMatrix(cm)
names(dataset)
| /LogR-purchase.R | no_license | arpitaaparajita/analytics | R | false | false | 2,237 | r | # Logistic Regression : Predict Purchase
# Import the dataset
df1 = read.csv('./data/logr2.csv')
head(df1)
url="https://docs.google.com/spreadsheets/d/1Md_ro2t3M7nA9JMH1DsE12jfeX7qq-UPw6p8WQd6A2Y/edit#gid=120271978"
library(gsheet)
df2 = as.data.frame(gsheet2tbl(url))
head(df2)
dataset=df2 #or df2 if data is imported from google sheets
head(dataset)
str(dataset)
summary(dataset)
dim(dataset)
View(dataset) #shows dataset in a new tab like excel
dataset$gender = factor(dataset$gender)
# Split the dataset into the Training set and Test set
#install.packages('caTools')
library(caTools)
set.seed(2000)
split = sample.split(dataset$purchased, SplitRatio = 0.75)
training_set = subset(dataset, split == TRUE)
test_set = subset(dataset, split == FALSE)
dim(dataset); dim(training_set); dim(test_set)
names(dataset)
# Logisitic Model on Training Set, generalised linear modelling
logitmodel1 = glm(purchased ~ gender + age + salary, family = binomial, data = training_set)
summary(logitmodel1)
# gender not insignificant dropped here
logitmodel2 = glm(purchased ~ age + salary, family = binomial, data = training_set)
summary(logitmodel2)
#summary(logitmodel2)$coefficient # they are in log terms
#WE ARE assuming logitmodel1 is better then logitmodel2, as AIC didnt improved
#predict on sample data taking logitmodel2
#test_set2 = data.frame(age=c(40,65), salary=c(40000, 50000))
#(prob_pred2 = predict(logitmodel2, type = 'response', newdata = test_set2))
#cbind(test_set2, prob_pred2)
#age=65 person likely to purchase
test_set2 = data.frame(age=c(40,65), gender=c('Male','Female'), salary=c(40000, 50000))
(prob_pred2 = predict(logitmodel1, type = 'response', newdata = test_set2))
cbind(test_set2, prob_pred2)
#age=65 person likely to purchase prob is 98%
# Predicting the Test set results from testset
head(test_set)
prob_pred = predict(logitmodel1, type = 'response', newdata = test_set) #take logitmodel2 if required
summary(prob_pred)
head(cbind(test_set,prob_pred ),10)
#if prob > 0.5 make it 1, else 0
y_pred = ifelse(prob_pred > 0.5, 1, 0)
head(cbind(test_set$purchased, y_pred),15)
# Making the Confusion Matrix
cm = table(test_set[,5], y_pred)
cm
library(caret)
caret::confusionMatrix(cm)
names(dataset)
|
library(rpart)
library(rpart.plot)
#Working with the Credits dataset to predict the Creditability variable
#loading data into dataset
getwd()
setwd("E:\\Decision_Trees")
df=read.csv("Credit.csv")
#understanding our dataset
head(df)
summary(df)
str(df)#data_type mismatch is present in the dataset
sum(is.null(df))
#we see that we dont have to deal with any missing values
a=colnames(df)
df$Creditability=as.factor(df$Creditability)
df$Account_Balance=as.factor(df$Account_Balance)
df$Credit_History=as.factor(df$Credit_History)
df$Purpose=as.factor(df$Purpose)
df$Value_Savings=as.factor(df$Value_Savings)
df$Emp_Len=as.factor(df$Emp_Len)
df$Sex=as.factor(df$Sex)
df$Guarantors=as.factor(df$Guarantors)
df$asset=as.factor(df$asset)
df$Concurrent_Credits=as.factor(df$Concurrent_Credits)
df$Type_of_apartment=as.factor(df$Type_of_apartment)
df$Occupation=as.factor(df$Occupation)
df$Telephone=as.factor(df$Telephone)
df$Foreign_Worker=as.factor(df$Foreign_Worker)
str(df)#data_type fixed
#Building train and test set
set.seed(12345)
train_idx=sample(nrow(df),nrow(df)*0.7)
train_data=df[train_idx,]
test_data=df[-train_idx,]
dim(train_data)
dim(test_data)
#Our target variale in this case is Creditability
#Visualizing our taget variable
a=table(df$Creditability)
print(a)
#0 are the defaulters people who have not paid back the loan
barplot(a,main="Creditability")
#setting the control
x=rpart.control()
basic_model=rpart(Creditability~.,data = train_data,method = "class",control = x)
summary(basic_model)
rpart.plot(basic_model)
predicted_values=predict(basic_model,type = "class",newdata = test_data)
conf_table=table(predicted_values,test_data$Creditability)
install.packages("lattice")
install.packages("caret")
library(caret)
conf_mat=confusionMatrix(conf_table)
conf_mat
#tuning our model
| /DecisionTreeCreditsDataset.R | no_license | snakeyes95/Decision_Trees | R | false | false | 1,820 | r | library(rpart)
library(rpart.plot)
#Working with the Credits dataset to predict the Creditability variable
#loading data into dataset
getwd()
setwd("E:\\Decision_Trees")
df=read.csv("Credit.csv")
#understanding our dataset
head(df)
summary(df)
str(df)#data_type mismatch is present in the dataset
sum(is.null(df))
#we see that we dont have to deal with any missing values
a=colnames(df)
df$Creditability=as.factor(df$Creditability)
df$Account_Balance=as.factor(df$Account_Balance)
df$Credit_History=as.factor(df$Credit_History)
df$Purpose=as.factor(df$Purpose)
df$Value_Savings=as.factor(df$Value_Savings)
df$Emp_Len=as.factor(df$Emp_Len)
df$Sex=as.factor(df$Sex)
df$Guarantors=as.factor(df$Guarantors)
df$asset=as.factor(df$asset)
df$Concurrent_Credits=as.factor(df$Concurrent_Credits)
df$Type_of_apartment=as.factor(df$Type_of_apartment)
df$Occupation=as.factor(df$Occupation)
df$Telephone=as.factor(df$Telephone)
df$Foreign_Worker=as.factor(df$Foreign_Worker)
str(df)#data_type fixed
#Building train and test set
set.seed(12345)
train_idx=sample(nrow(df),nrow(df)*0.7)
train_data=df[train_idx,]
test_data=df[-train_idx,]
dim(train_data)
dim(test_data)
#Our target variale in this case is Creditability
#Visualizing our taget variable
a=table(df$Creditability)
print(a)
#0 are the defaulters people who have not paid back the loan
barplot(a,main="Creditability")
#setting the control
x=rpart.control()
basic_model=rpart(Creditability~.,data = train_data,method = "class",control = x)
summary(basic_model)
rpart.plot(basic_model)
predicted_values=predict(basic_model,type = "class",newdata = test_data)
conf_table=table(predicted_values,test_data$Creditability)
install.packages("lattice")
install.packages("caret")
library(caret)
conf_mat=confusionMatrix(conf_table)
conf_mat
#tuning our model
|
library(plgp)
### Name: params.GP
### Title: Extract parameters from GP particles
### Aliases: params.GP params.CGP params.ConstGP
### Keywords: models regression classif methods
### ** Examples
## See the demos via demo(package="plgp") and the examples
## section of ?plgp
| /data/genthat_extracted_code/plgp/examples/params.GP.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 281 | r | library(plgp)
### Name: params.GP
### Title: Extract parameters from GP particles
### Aliases: params.GP params.CGP params.ConstGP
### Keywords: models regression classif methods
### ** Examples
## See the demos via demo(package="plgp") and the examples
## section of ?plgp
|
## SJB
##
## reproduce table 10.1 of Sennott
##
source("sjb-r-solver-general.R");
library(parallel) ## for mclapply
########################################
## Load parameterization data
lambdas <- c(3,2,2,2,5,5,10,20);
svcrates <- matrix(
data = c(
2,4,8,
1,4,7,
1,4,7,
1,4,7,
5,5.5,5.8,
5.1,5.3,6.0,
10.2,10.6,12.0,
24.0,27.0,30.0),
ncol = 3,
byrow = TRUE);
costs <- t(matrix(
data = c(
9.0, 1.0, 10, 1.0, 0.0, 0.0, 0.0, 1.0,
13.0, 50.0, 50.0, 50.0, 10.0, 10.0, 10.0, 1.5,
21.0, 500.0, 150.0, 100, 100, 25, 25, 5),
nrow = 3,
byrow = TRUE));
problem_params_base <- list(
N_state_size = 48, ## Max Queue Size
N_action_size = 3,
service_rate = 2.0,
holding_cost_rate = 1.0,
epsilon = 0.0001,
MAXITER = 20000,
output_base = "tab_scenario10-1"
);
########################################
## Process the data into lists of params and
## ac objects
pp_list = list();
ac_list = list();
N_scenario = length(lambdas);
for (i in 1:N_scenario) {
pp_tmp = problem_params_base;
if(i > 1) {
pp_tmp$N_state_size = 84;
}
pp_tmp$service_rate = lambdas[i];
pp_tmp$output_base = paste(problem_params_base$output_base, "_n", i, ".txt", sep="");
ac_tmp = list(act = svcrates[i,], costact = costs[i,]);
pp_list[[i]] = pp_tmp;
ac_list[[i]] = ac_tmp;
}
solns <- lapply(as.list(1:N_scenario), function(i) {solve_dp(pp_list[[i]], ac_map = ac_list[[i]])})
| /r-solver/sjb-reproduce-table-10.1.R | no_license | stephenjbarr/yp-mm1-mu-dpsolver | R | false | false | 1,589 | r | ## SJB
##
## reproduce table 10.1 of Sennott
##
source("sjb-r-solver-general.R");
library(parallel) ## for mclapply
########################################
## Load parameterization data
lambdas <- c(3,2,2,2,5,5,10,20);
svcrates <- matrix(
data = c(
2,4,8,
1,4,7,
1,4,7,
1,4,7,
5,5.5,5.8,
5.1,5.3,6.0,
10.2,10.6,12.0,
24.0,27.0,30.0),
ncol = 3,
byrow = TRUE);
costs <- t(matrix(
data = c(
9.0, 1.0, 10, 1.0, 0.0, 0.0, 0.0, 1.0,
13.0, 50.0, 50.0, 50.0, 10.0, 10.0, 10.0, 1.5,
21.0, 500.0, 150.0, 100, 100, 25, 25, 5),
nrow = 3,
byrow = TRUE));
problem_params_base <- list(
N_state_size = 48, ## Max Queue Size
N_action_size = 3,
service_rate = 2.0,
holding_cost_rate = 1.0,
epsilon = 0.0001,
MAXITER = 20000,
output_base = "tab_scenario10-1"
);
########################################
## Process the data into lists of params and
## ac objects
pp_list = list();
ac_list = list();
N_scenario = length(lambdas);
for (i in 1:N_scenario) {
pp_tmp = problem_params_base;
if(i > 1) {
pp_tmp$N_state_size = 84;
}
pp_tmp$service_rate = lambdas[i];
pp_tmp$output_base = paste(problem_params_base$output_base, "_n", i, ".txt", sep="");
ac_tmp = list(act = svcrates[i,], costact = costs[i,]);
pp_list[[i]] = pp_tmp;
ac_list[[i]] = ac_tmp;
}
solns <- lapply(as.list(1:N_scenario), function(i) {solve_dp(pp_list[[i]], ac_map = ac_list[[i]])})
|
###形態評価のためのグラフを作成する###
TIME_1 <- TIME+1
###管体積の時間変化をグラフ化###
#Vtube = Σ(sqrt(Di)*Li)
#V_archiveの作成
for(i in 1:TIME_1){
V <- 0
for(j in 1:all_link){
V <- V + sqrt(D_archive[i,j])*L[j]
}
V_archive[i+1] <- V
}
data <- matrix(nrow=TIME+1, ncol=2)
for(i in 1:TIME_1){
data[i,1] <- i
data[i,2] <- V0
}
#保存するフォルダと図の名前を指定
file_name <- sprintf("./Vtube.png")
png(file_name, width=700, height=700)
matplot(V_archive, xlim=c(0,TIME), ylim=c(5,V0*1.2), type="l",lwd=3, xlab="Timestep",ylab="Vtube", col="red", lty=1)
matplot(data[,1],data[,2], xlim=c(min,max), ylim=c(5,V0*1.2), type="l",lwd=3, xlab="Timestep",ylab="Vtube", col="black", lty=2, add=TRUE)
#図の書き込み終了
dev.off()
##################################
###ノード数の時間変化をグラフ化###
node_sum <- numeric(TIME+1)
for(i in 1:TIME_1){
for(j in 1:all_node){
if(node_archive[i,j]==1) node_sum[i] <- node_sum[i]+1
}
}
#保存するフォルダと図の名前を指定
file_name <- sprintf("./node.png")
png(file_name, width=700, height=700)
matplot(node_sum, xlim=c(0,TIME), type="l",lwd=3, xlab="Timestep",ylab="Node", col=1, lty=1)
#図の書き込み終了
dev.off()
##################################
###リンク数の時間変化をグラフ化###
link_sum <- numeric(TIME+1)
for(i in 1:TIME_1){
for(j in 1:all_link){
if(D_archive[i,j]>0) link_sum[i] <- link_sum[i]+1
}
}
#保存するフォルダと図の名前を指定
file_name <- sprintf("./link.png")
png(file_name, width=700, height=700)
matplot(link_sum, xlim=c(0,TIME), type="l",lwd=3, xlab="Timestep",ylab="Link", col=1, lty=1)
#図の書き込み終了
dev.off()
##################################
###重心座標の移動距離の時間変化をグラフ化###
#全ノードの平均座標を重心座標とする
#Centroid = (1/all_node)*Σ(xi,yi)
Centroid <- matrix(0, nrow=TIME+1, ncol=2) #タイムステップごとの重心座標(x,y)
Distance <- numeric(TIME+1) #タイムステップごとの重心座標の移動距離
for(i in 1:TIME_1){
node_status <- numeric(all_node) #ノードの存在(1),不在(0)を保存
node_count <- 0 #ノード数を足しあげる
#node_status[i,]の作成
for(j in 1:all_link){
if(D_archive[i,j]>0){ #リンクjが存在する場合
node1 <- link_connection[j,1] #両端のノードを検出
node2 <- link_connection[j,2] #両端のノードを検出
node_status[node1] <- 1 #ノードの存在を保存
node_status[node2] <- 1 #ノードの存在を保存
}
}
for(j in 1:all_node){
if(node_status[j]==1){
Centroid[i,1] <- Centroid[i,1] + node_potision[j,1]
Centroid[i,2] <- Centroid[i,2] + node_potision[j,2]
node_count <- node_count+1
}
}
Centroid[i,] <- Centroid[i,]/node_count #全ノード座標を平均し、重心座標を求める
Distance[i] <- sqrt( Centroid[i,1]^2 + Centroid[i,2]^2 ) #d=sqrt(x^2+y^2)
}
#重心座標の移動距離の時間変化を描画
matplot(Distance, xlim=c(0,TIME), type="l",lwd=3, xlab="Timestep",ylab="Distance", col=1, lty=1)
#ネットワーク上に重心座標を描画する
#描画リセット
plot(0, 0, xlim=c(-horizontal , horizontal), ylim=c(-vertical, vertical), type="n", xlab="", ylab="", xaxt="n", yaxt="n")
#リンクの描画
for(i in 1:all_link){
par(new=T)
segments(link_potision[i,1], link_potision[i,2], link_potision[i,3], link_potision[i,4], xlim=c(-horizontal, horizontal), ylim=c(-vertical, vertical), xlab="", ylab="", lwd=1, col="black", lty=2)
}
#中心ノードの描画
par(new=T)
plot(node_potision[1,1], node_potision[1,2], xlim=c(-horizontal, horizontal), ylim=c(-vertical, vertical), xlab="", ylab="", cex=1.5, lwd=2, pch=16, col="black")
#ノードの描画(ソースは赤,シンクは青,それ以外は黒で表示)
for(i in 1:all_node){
par(new=T)
plot(node_potision[i,1], node_potision[i,2], xlim=c(-horizontal, horizontal), ylim=c(-vertical, vertical), xlab="", ylab="", cex=0.7, lwd=2, pch=16, col="black")
}
#重心座標のプロット(黄色線)
for(i in 1:TIME_1){
par(new=T)
plot(Centroid[i,1], Centroid[i,2], xlim=c(-horizontal, horizontal), ylim=c(-vertical, vertical), xlab="", ylab="", cex=0.5, lwd=2, pch=16, col="yellow")
}
##################################
###密度の時間変化をグラフ化###
#Density = (存在するリンクの数)/(接続可能なリンクの数)
link_possible <- matrix(0, nrow=TIME+1, ncol=all_link) #タイムステップごとの接続可能リンクの保存(可能:1, 不可能:0)
link_possible_sum <- numeric(TIME+1) #タイムステップごとの接続可能リンクの総数
link_sum <- numeric(TIME+1) #タイムステップごとの存在しているリンクの総数
for(i in 1:TIME_1){ #タイムステップiにおいて(実際はi-1)
for(j in 1:all_link){
if(D_archive[i,j]>0){ #リンクjが存在する場合
node1 <- link_connection[j,1] #両端のノードを検出
node2 <- link_connection[j,2] #両端のノードを検出
for(k in 1:6){ #両端のノードに隣接するリンクを接続可能リンクとして保存
if(node_connection[node1,k]!=0) link_possible[i,node_connection[node1,k]] <- 1
if(node_connection[node2,k]!=0) link_possible[i,node_connection[node2,k]] <- 1
}
link_sum[i] <- link_sum[i]+1 #リンクjを存在するリンクの数にカウント
}
}
for(j in 1:all_link){ #接続可能リンクの総数を算出する
if(link_possible[i,j]==1) link_possible_sum[i] <- link_possible_sum[i]+1
}
}
Density <- link_sum/link_possible_sum
matplot(Density, xlim=c(0,TIME), type="l",lwd=3, xlab="Timestep",ylab="Density", col=1, lty=1)
##################################
###メッシュ度計算###
node_sum <- numeric(TIME+1)
for(i in 1:TIME_1){
for(j in 1:all_node){
if(node_archive[i,j]==1) node_sum[i] <- node_sum[i]+1
}
}
link_sum <- numeric(TIME+1)
for(i in 1:TIME_1){
for(j in 1:all_link){
if(D_archive[i,j]>0) link_sum[i] <- link_sum[i]+1
}
}
mesh <- numeric(TIME+1)
for(i in 1:TIME_1){
mesh[i] <- (link_sum[i]-node_sum[i]+1)/(2*node_sum[i]-5)
}
matplot(mesh, xlim=c(0,TIME), ylim=c(0,1), type="l",lwd=3, xlab="Timestep",ylab="Mesh", col=1, lty=1)
###平均次数K###############
K <- numeric(TIME_1)
for(i in 1:TIME_1){
#node_status[i,]の作成
node_status <- numeric(all_node)
for(j in 1:all_link){
if(D_archive[i,j]>0){ #リンクjが存在する場合
node1 <- link_connection[j,1] #両端のノードを検出
node2 <- link_connection[j,2] #両端のノードを検出
node_status[node1] <- 1 #ノードの存在を保存
node_status[node2] <- 1 #ノードの存在を保存
}
}
K_sum <- 0
node_sum <- 0
for(j in 1:all_node){
#次数足し上げ
if(node_status[j]==1){
node_sum <- node_sum+1
for(k in 1:6){
num <- node_connection[j,k]
if(D_archive[i,num]>0 && num!=0) K_sum <- K_sum+1
}
}
}
K[i] <- K_sum/node_sum
}
plot(K)
##########################
###媒介中心性 最大値#######
bw_max2 <- numeric(TIME_1)
for(i in 1:TIME_1){
for(j in 1:all_node){
if(bw_max2[i] < bw_archive[i,j])
bw_max2[i] <- bw_archive[i,j]
}
}
plot(bw_max2, ylim=c(0,1), type="l", las=1, xlab="", ylab="")
#
x1 <- c(0:2000)
y1 <- -0.08*log(x1)+1
par(new=T)
plot(x1,y1, ylim=c(0,1), type="l", las=1, xlab="", ylab="", col="red")
x2 <- c(0:2000)
y2 <- -0.12*log(x1)+1
par(new=T)
plot(x2,y2, ylim=c(0,1), type="l", las=1, xlab="", ylab="", col="blue")
labels=c("ソース", "a=0.09", "a=0.12")
cols=c("black","red","blue")
legend("topright", legend = labels, col = cols, lty=1) | /ネットワーク評価.R | no_license | MAEDA-KOSUKE/network_growth_model | R | false | false | 8,201 | r | ###形態評価のためのグラフを作成する###
TIME_1 <- TIME+1
###管体積の時間変化をグラフ化###
#Vtube = Σ(sqrt(Di)*Li)
#V_archiveの作成
for(i in 1:TIME_1){
V <- 0
for(j in 1:all_link){
V <- V + sqrt(D_archive[i,j])*L[j]
}
V_archive[i+1] <- V
}
data <- matrix(nrow=TIME+1, ncol=2)
for(i in 1:TIME_1){
data[i,1] <- i
data[i,2] <- V0
}
#保存するフォルダと図の名前を指定
file_name <- sprintf("./Vtube.png")
png(file_name, width=700, height=700)
matplot(V_archive, xlim=c(0,TIME), ylim=c(5,V0*1.2), type="l",lwd=3, xlab="Timestep",ylab="Vtube", col="red", lty=1)
matplot(data[,1],data[,2], xlim=c(min,max), ylim=c(5,V0*1.2), type="l",lwd=3, xlab="Timestep",ylab="Vtube", col="black", lty=2, add=TRUE)
#図の書き込み終了
dev.off()
##################################
###ノード数の時間変化をグラフ化###
node_sum <- numeric(TIME+1)
for(i in 1:TIME_1){
for(j in 1:all_node){
if(node_archive[i,j]==1) node_sum[i] <- node_sum[i]+1
}
}
#保存するフォルダと図の名前を指定
file_name <- sprintf("./node.png")
png(file_name, width=700, height=700)
matplot(node_sum, xlim=c(0,TIME), type="l",lwd=3, xlab="Timestep",ylab="Node", col=1, lty=1)
#図の書き込み終了
dev.off()
##################################
###リンク数の時間変化をグラフ化###
link_sum <- numeric(TIME+1)
for(i in 1:TIME_1){
for(j in 1:all_link){
if(D_archive[i,j]>0) link_sum[i] <- link_sum[i]+1
}
}
#保存するフォルダと図の名前を指定
file_name <- sprintf("./link.png")
png(file_name, width=700, height=700)
matplot(link_sum, xlim=c(0,TIME), type="l",lwd=3, xlab="Timestep",ylab="Link", col=1, lty=1)
#図の書き込み終了
dev.off()
##################################
###重心座標の移動距離の時間変化をグラフ化###
#全ノードの平均座標を重心座標とする
#Centroid = (1/all_node)*Σ(xi,yi)
Centroid <- matrix(0, nrow=TIME+1, ncol=2) #タイムステップごとの重心座標(x,y)
Distance <- numeric(TIME+1) #タイムステップごとの重心座標の移動距離
for(i in 1:TIME_1){
node_status <- numeric(all_node) #ノードの存在(1),不在(0)を保存
node_count <- 0 #ノード数を足しあげる
#node_status[i,]の作成
for(j in 1:all_link){
if(D_archive[i,j]>0){ #リンクjが存在する場合
node1 <- link_connection[j,1] #両端のノードを検出
node2 <- link_connection[j,2] #両端のノードを検出
node_status[node1] <- 1 #ノードの存在を保存
node_status[node2] <- 1 #ノードの存在を保存
}
}
for(j in 1:all_node){
if(node_status[j]==1){
Centroid[i,1] <- Centroid[i,1] + node_potision[j,1]
Centroid[i,2] <- Centroid[i,2] + node_potision[j,2]
node_count <- node_count+1
}
}
Centroid[i,] <- Centroid[i,]/node_count #全ノード座標を平均し、重心座標を求める
Distance[i] <- sqrt( Centroid[i,1]^2 + Centroid[i,2]^2 ) #d=sqrt(x^2+y^2)
}
#重心座標の移動距離の時間変化を描画
matplot(Distance, xlim=c(0,TIME), type="l",lwd=3, xlab="Timestep",ylab="Distance", col=1, lty=1)
#ネットワーク上に重心座標を描画する
#描画リセット
plot(0, 0, xlim=c(-horizontal , horizontal), ylim=c(-vertical, vertical), type="n", xlab="", ylab="", xaxt="n", yaxt="n")
#リンクの描画
for(i in 1:all_link){
par(new=T)
segments(link_potision[i,1], link_potision[i,2], link_potision[i,3], link_potision[i,4], xlim=c(-horizontal, horizontal), ylim=c(-vertical, vertical), xlab="", ylab="", lwd=1, col="black", lty=2)
}
#中心ノードの描画
par(new=T)
plot(node_potision[1,1], node_potision[1,2], xlim=c(-horizontal, horizontal), ylim=c(-vertical, vertical), xlab="", ylab="", cex=1.5, lwd=2, pch=16, col="black")
#ノードの描画(ソースは赤,シンクは青,それ以外は黒で表示)
for(i in 1:all_node){
par(new=T)
plot(node_potision[i,1], node_potision[i,2], xlim=c(-horizontal, horizontal), ylim=c(-vertical, vertical), xlab="", ylab="", cex=0.7, lwd=2, pch=16, col="black")
}
#重心座標のプロット(黄色線)
for(i in 1:TIME_1){
par(new=T)
plot(Centroid[i,1], Centroid[i,2], xlim=c(-horizontal, horizontal), ylim=c(-vertical, vertical), xlab="", ylab="", cex=0.5, lwd=2, pch=16, col="yellow")
}
##################################
###密度の時間変化をグラフ化###
#Density = (存在するリンクの数)/(接続可能なリンクの数)
link_possible <- matrix(0, nrow=TIME+1, ncol=all_link) #タイムステップごとの接続可能リンクの保存(可能:1, 不可能:0)
link_possible_sum <- numeric(TIME+1) #タイムステップごとの接続可能リンクの総数
link_sum <- numeric(TIME+1) #タイムステップごとの存在しているリンクの総数
for(i in 1:TIME_1){ #タイムステップiにおいて(実際はi-1)
for(j in 1:all_link){
if(D_archive[i,j]>0){ #リンクjが存在する場合
node1 <- link_connection[j,1] #両端のノードを検出
node2 <- link_connection[j,2] #両端のノードを検出
for(k in 1:6){ #両端のノードに隣接するリンクを接続可能リンクとして保存
if(node_connection[node1,k]!=0) link_possible[i,node_connection[node1,k]] <- 1
if(node_connection[node2,k]!=0) link_possible[i,node_connection[node2,k]] <- 1
}
link_sum[i] <- link_sum[i]+1 #リンクjを存在するリンクの数にカウント
}
}
for(j in 1:all_link){ #接続可能リンクの総数を算出する
if(link_possible[i,j]==1) link_possible_sum[i] <- link_possible_sum[i]+1
}
}
Density <- link_sum/link_possible_sum
matplot(Density, xlim=c(0,TIME), type="l",lwd=3, xlab="Timestep",ylab="Density", col=1, lty=1)
##################################
###メッシュ度計算###
node_sum <- numeric(TIME+1)
for(i in 1:TIME_1){
for(j in 1:all_node){
if(node_archive[i,j]==1) node_sum[i] <- node_sum[i]+1
}
}
link_sum <- numeric(TIME+1)
for(i in 1:TIME_1){
for(j in 1:all_link){
if(D_archive[i,j]>0) link_sum[i] <- link_sum[i]+1
}
}
mesh <- numeric(TIME+1)
for(i in 1:TIME_1){
mesh[i] <- (link_sum[i]-node_sum[i]+1)/(2*node_sum[i]-5)
}
matplot(mesh, xlim=c(0,TIME), ylim=c(0,1), type="l",lwd=3, xlab="Timestep",ylab="Mesh", col=1, lty=1)
###平均次数K###############
K <- numeric(TIME_1)
for(i in 1:TIME_1){
#node_status[i,]の作成
node_status <- numeric(all_node)
for(j in 1:all_link){
if(D_archive[i,j]>0){ #リンクjが存在する場合
node1 <- link_connection[j,1] #両端のノードを検出
node2 <- link_connection[j,2] #両端のノードを検出
node_status[node1] <- 1 #ノードの存在を保存
node_status[node2] <- 1 #ノードの存在を保存
}
}
K_sum <- 0
node_sum <- 0
for(j in 1:all_node){
#次数足し上げ
if(node_status[j]==1){
node_sum <- node_sum+1
for(k in 1:6){
num <- node_connection[j,k]
if(D_archive[i,num]>0 && num!=0) K_sum <- K_sum+1
}
}
}
K[i] <- K_sum/node_sum
}
plot(K)
##########################
###媒介中心性 最大値#######
bw_max2 <- numeric(TIME_1)
for(i in 1:TIME_1){
for(j in 1:all_node){
if(bw_max2[i] < bw_archive[i,j])
bw_max2[i] <- bw_archive[i,j]
}
}
plot(bw_max2, ylim=c(0,1), type="l", las=1, xlab="", ylab="")
#
x1 <- c(0:2000)
y1 <- -0.08*log(x1)+1
par(new=T)
plot(x1,y1, ylim=c(0,1), type="l", las=1, xlab="", ylab="", col="red")
x2 <- c(0:2000)
y2 <- -0.12*log(x1)+1
par(new=T)
plot(x2,y2, ylim=c(0,1), type="l", las=1, xlab="", ylab="", col="blue")
labels=c("ソース", "a=0.09", "a=0.12")
cols=c("black","red","blue")
legend("topright", legend = labels, col = cols, lty=1) |
#' @include MADproject.R
NULL
#' Test (visually) the convergence of a MADproject object.
#'
#' \code{test_convergence} returns a plot to help the user to visualize if
#' there are enough realizations in the project for converged likelihood
#' values
#'
#' @param proj The MADproject object to be tested.
#' @param dsubset The subset of inversion data to use for the likelihood
#' calculations.
#' @param samples A vector of sample IDs for which to calculate
#' likelihood values (defaults to all available in the
#' @param NR The number of different realization totals for which to
#' calculate likelihood values (defaults to 10)
#' @param NS The number of randomly selected samples to test (defaults to 7)
#' @return NULL.
#'
#' @export
setGeneric("test_convergence", function(proj, dsubset, ...) {
standardGeneric("test_convergence")
})
setMethod("test_convergence",
signature(proj="MADproject", dsubset="numeric"),
function(proj, dsubset, samples=1:proj@numSamples, NR=10, NS=7) {
minr <- min(daply(subset(proj@realizations,sid %in% samples & zid %in% dsubset),
.(sid), function(df) max(df$rid)))
samps <- sample(samples,min(NS,length(samples)))
nr <- seq(ceiling(.1*minr),minr,length.out=NR)
likes <- plyr::adply(nr, 1, function(x) calcLikelihood(proj, dsubset=dsubset,
num_realz=x,
samples=samps)@likelihoods)
likes$nr <- nr[likes$X1]
likes$sid <- as.factor(likes$sid)
ggplot(likes, aes(x=nr, y=like, group=sid, colour=sid)) +
geom_line() + scale_y_log10() + xlab("Number of Realizations") +
ylab("Log 10 Likelihood") + scale_colour_discrete(name = "Sample ID")
}
)
setMethod("test_convergence",
signature(proj="MADproject"),
function(proj, samples=1:proj@numSamples, NR=10, NS=7) {
minr <- min(daply(subset(proj@realizations,sid %in% samples),
.(sid), function(df) max(df$rid)))
samps <- sample(samples,min(NS,length(samples)))
nr <- seq(ceiling(.1*minr),minr,length.out=NR)
likes <- plyr::adply(nr, 1, function(x) calcLikelihood(proj,
num_realz=x,
samples=samps)@likelihoods)
likes$nr <- nr[likes$X1]
likes$sid <- as.factor(likes$sid)
ggplot(likes, aes(x=nr, y=like, group=sid, colour=sid)) +
geom_line() + scale_y_log10() + xlab("Number of Realizations") +
ylab("Log 10 Likelihood") + scale_colour_discrete(name = "Sample ID")
}
)
| /R/test_convergence.R | no_license | hsavoy/madr | R | false | false | 2,847 | r | #' @include MADproject.R
NULL
#' Test (visually) the convergence of a MADproject object.
#'
#' \code{test_convergence} returns a plot to help the user to visualize if
#' there are enough realizations in the project for converged likelihood
#' values
#'
#' @param proj The MADproject object to be tested.
#' @param dsubset The subset of inversion data to use for the likelihood
#' calculations.
#' @param samples A vector of sample IDs for which to calculate
#' likelihood values (defaults to all available in the
#' @param NR The number of different realization totals for which to
#' calculate likelihood values (defaults to 10)
#' @param NS The number of randomly selected samples to test (defaults to 7)
#' @return NULL.
#'
#' @export
setGeneric("test_convergence", function(proj, dsubset, ...) {
standardGeneric("test_convergence")
})
setMethod("test_convergence",
signature(proj="MADproject", dsubset="numeric"),
function(proj, dsubset, samples=1:proj@numSamples, NR=10, NS=7) {
minr <- min(daply(subset(proj@realizations,sid %in% samples & zid %in% dsubset),
.(sid), function(df) max(df$rid)))
samps <- sample(samples,min(NS,length(samples)))
nr <- seq(ceiling(.1*minr),minr,length.out=NR)
likes <- plyr::adply(nr, 1, function(x) calcLikelihood(proj, dsubset=dsubset,
num_realz=x,
samples=samps)@likelihoods)
likes$nr <- nr[likes$X1]
likes$sid <- as.factor(likes$sid)
ggplot(likes, aes(x=nr, y=like, group=sid, colour=sid)) +
geom_line() + scale_y_log10() + xlab("Number of Realizations") +
ylab("Log 10 Likelihood") + scale_colour_discrete(name = "Sample ID")
}
)
setMethod("test_convergence",
signature(proj="MADproject"),
function(proj, samples=1:proj@numSamples, NR=10, NS=7) {
minr <- min(daply(subset(proj@realizations,sid %in% samples),
.(sid), function(df) max(df$rid)))
samps <- sample(samples,min(NS,length(samples)))
nr <- seq(ceiling(.1*minr),minr,length.out=NR)
likes <- plyr::adply(nr, 1, function(x) calcLikelihood(proj,
num_realz=x,
samples=samps)@likelihoods)
likes$nr <- nr[likes$X1]
likes$sid <- as.factor(likes$sid)
ggplot(likes, aes(x=nr, y=like, group=sid, colour=sid)) +
geom_line() + scale_y_log10() + xlab("Number of Realizations") +
ylab("Log 10 Likelihood") + scale_colour_discrete(name = "Sample ID")
}
)
|
Md_plot[upper.tri(Md_plot)] <- Inf
nams0 <- c("Local_Diagnosis", "Local_Relapse", "Li_Diagnosis", "Li_Relapse")
colnames(Md_plot) <- nams0 #gsub(" ","_",tolower(names(ams)))
rownames(Md_plot) <- nams0 #gsub(" ","_",tolower(names(ams)))
melted_cormat <- melt(Md_plot)
head(melted_cormat)
vcn <- as.numeric(Md_plot)
vcn[vcn==0] <- NA
melted_cormat$value[melted_cormat$value==0] <- NA
melted_cormat2 <- melted_cormat[-which(is.infinite(melted_cormat$value)),]
#melted_cormat2$value <- as.numeric(scale(melted_cormat2$value))
ggplot(data = melted_cormat2 , aes(x=Var1, y=Var2, fill=value), label = ifelse(Padj < 0.001, "***", "ns")) +
geom_tile(color = "lightgray") +
scale_fill_gradient2(low = "chartreuse3", mid="snow", high = "orchid3", midpoint = (min(melted_cormat2$value, na.rm = T)+max(melted_cormat2$value, na.rm = T))/2, space = "Lab", na.value="lightgray",
name="distance") +
coord_fixed() +
xlab("") +
ylab("") +
theme_minimal() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.text.x = element_text(angle = 0, vjust = 1, size = 14, hjust = 0.5),
axis.text.y = element_text(angle = 0, vjust = 0.5, size = 14, hjust = 1),
legend.text=element_text(size=12),
legend.title=element_text(size=14))
#### PERMUTATION TEST #####
newModel0 <- newModel
Md_fin1_perm <- list()
Md_fin2_perm <- list()
Md_fin3_perm <- list()
Md_fin4_perm <- list()
Md_fin5_perm <- list()
Md_fin6_perm <- list()
#permutations
for(j in 1:100){
Md_l1_perm <- list()
#Md_l2_perm <- list()
Md_l3_perm <- list()
#Md_l4_perm <- list()
#Md_l5_perm <- list()
#Md_l6_perm <- list()
set.seed(j)
#newModel0[,3:8] <- newModel0[sample(1:length(newModel0$decision)),3:8] # need to try with only decision
newModel0[,3] <- newModel0[sample(1:length(newModel0$decision)),3]
for(i in 1:30){
pddnos_n <- sample(which(as.character(newModel0$decision)=="PDD-NOS"), rls_min)
asperg_n <- sample(which(as.character(newModel0$decision)=="ASPERGER'S DISORDER"), rls_min)
autism_n <- sample(which(as.character(newModel0$decision)=="AUTISM"), rls_min)
control_n <- sample(which(as.character(newModel0$decision)=="CONTROL"), rls_min)
newModel2 <- newModel0[c(pddnos_n, asperg_n, autism_n, control_n), ]
net0 <- network_gen(newModel2, decision="PDD-NOS", ruleSet = 'own', NodeColor = 'DL', Param = 'Min Cov', minValue=0, minAcc=0.25, type = 'RDF', topN = 100)
net1 <- network_gen(newModel2, decision="ASPERGER'S DISORDER", ruleSet = 'own', NodeColor = 'DL', Param = 'Min Cov', minValue=0, minAcc=0.25, type = 'RDF', topN = 100)
net2 <- network_gen(newModel2, decision="AUTISM", ruleSet = 'own', NodeColor = 'DL', Param = 'Min Cov', minValue=0, minAcc=0.25, type = 'RDF', topN = 100)
net3 <- network_gen(newModel2, decision="CONTROL", ruleSet = 'own', NodeColor = 'DL', Param = 'Min Cov', minValue=0, minAcc=0.25, type = 'RDF', topN = 100)
vis_out <- list(net0[[1]], net1[[1]], net2[[1]], net3[[1]])
names(vis_out) <- c("PDD-NOS", "ASPERGER'S DISORDER", "AUTISM", "CONTROL")
#vis_out <- visunet(newModel2, type = "RDF")
#### create adjacency matrices
ams <- createAdjMat(vis_out)
Md1_perm <- nd.centrality(ams, mode="Degree", directed = FALSE)
#Md2_perm <- nd.centrality(ams, mode="Close", directed = FALSE)
Md3_perm <- nd.centrality(ams, mode="Between", directed = FALSE)
#Md4_perm <- nd.hamming(ams, out.dist = TRUE)
#Md5_perm <- nd.gdd(ams, out.dist = TRUE)
#Md6_perm <- nd.dsd(ams, out.dist = TRUE, type="Adj")
Md1_perm <- as.matrix(Md1_perm$D)
#Md2_perm <- as.matrix(Md2_perm$D)
Md3_perm <- as.matrix(Md3_perm$D)
#Md4_perm <- as.matrix(Md4_perm$D)
#Md5_perm <- as.matrix(Md5_perm$D)
#Md6_perm <- as.matrix(Md6_perm$D)
Md_l1_perm[[i]] <- Md1_perm
#Md_l2_perm[[i]] <- Md2_perm
Md_l3_perm[[i]] <- Md3_perm
# Md_l4_perm[[i]] <- Md4_perm
#Md_l5_perm[[i]] <- Md5_perm
#Md_l6_perm[[i]] <- Md6_perm
}
#Md2_perm[[j]] <- Reduce("+", Md_l2)/length(Md_l2)
Md_fin1_perm[[j]] <- Reduce("+", Md_l1_perm)/length(Md_l1_perm)
#Md_fin2_perm[[j]] <- Reduce("+", Md_l2_perm)/length(Md_l2_perm)
Md_fin3_perm[[j]] <- Reduce("+", Md_l3_perm)/length(Md_l3_perm)
#Md_fin4_perm[[j]] <- Reduce("+", Md_l4_perm)/length(Md_l4_perm)
#Md_fin5_perm[[j]] <- Reduce("+", Md_l5_perm)/length(Md_l5_perm)
#Md_fin6_perm[[j]] <- Reduce("+", Md_l6_perm)/length(Md_l6_perm)
print(j)
}
Md_fin_perm <- Md_fin1_perm
Md_fin <- Md_fin1
###########################
saveRDS(Md_fin1,"/Users/mateuszgarbulowski/Desktop/ASD_subtypes_permutation_test/ori_cent_degree.rds")
saveRDS(Md_fin3,"/Users/mateuszgarbulowski/Desktop/ASD_subtypes_permutation_test/ori_cent_between.rds")
saveRDS(Md_fin1_perm,"/Users/mateuszgarbulowski/Desktop/ASD_subtypes_permutation_test/perm_cent_degree.rds")
saveRDS(Md_fin3_perm,"/Users/mateuszgarbulowski/Desktop/ASD_subtypes_permutation_test/perm_cent_between.rds")
Md_fin <- readRDS("/Users/mateuszgarbulowski/Desktop/ASD_subtypes_permutation_test/subtype/ori_cent_between.rds")
Md_fin_perm <- readRDS("/Users/mateuszgarbulowski/Desktop/ASD_subtypes_permutation_test/subtype/perm_cent_between.rds")
nams0 <- c("PDD-NOS","AS","autism","control")
plotNetPerm <- function(crds){
nms2 <- nams0
nms3 <- paste0(nms2[crds], collapse = " vs ")
crds2 <- rev(crds)
vals <- data.frame(values = unlist(lapply(Md_fin_perm, function(x) x[crds[1],crds[2]])))
thrt <- Md_fin[crds2[1],crds2[2]]
colnames(vals) <- "values"
# We add a 1 to the numerator and denominator to account for misestimation of the p-value
# (for more details see Phipson and Smyth, Permutation P-values should never be zero).
#the proportion of permutations with larger difference
# two tailed p-value:
pval <- round((sum(vals <= thrt)+1/500)/(dim(vals)[1]+1/500), digits = 3)
pval <- format.pval(pval, digits = 3, eps = 0.001, nsmall = 2)
ggplot(vals, aes(x = values)) +
geom_histogram(aes(y=..density..), bins = 25, col="gray35", fill="gray70")+
geom_density(alpha=.3, fill="tomato", col="tomato3") +
geom_vline(aes(xintercept=thrt), color="gray30", linetype="dashed", size=1) +
ggtitle(nms3)+
xlab("scaled distance")+
theme_classic()+
labs(subtitle = paste0("p-value: ",pval))
}
p1 <- plotNetPerm(c(1,2))
p2 <- plotNetPerm(c(1,3))
p3 <- plotNetPerm(c(2,3))
p4 <- plotNetPerm(c(3,4))
p5 <- plotNetPerm(c(1,4))
p6 <- plotNetPerm(c(2,4))
ggarrange(p1, p2, p3, p4, p5, p6, ncol=2, nrow=3, labels = c("a", "b","c","d","e","f"),
common.legend = TRUE, legend = "bottom")
| /R/networkDistances.R | no_license | mategarb/netvaluatoR | R | false | false | 6,584 | r |
Md_plot[upper.tri(Md_plot)] <- Inf
nams0 <- c("Local_Diagnosis", "Local_Relapse", "Li_Diagnosis", "Li_Relapse")
colnames(Md_plot) <- nams0 #gsub(" ","_",tolower(names(ams)))
rownames(Md_plot) <- nams0 #gsub(" ","_",tolower(names(ams)))
melted_cormat <- melt(Md_plot)
head(melted_cormat)
vcn <- as.numeric(Md_plot)
vcn[vcn==0] <- NA
melted_cormat$value[melted_cormat$value==0] <- NA
melted_cormat2 <- melted_cormat[-which(is.infinite(melted_cormat$value)),]
#melted_cormat2$value <- as.numeric(scale(melted_cormat2$value))
ggplot(data = melted_cormat2 , aes(x=Var1, y=Var2, fill=value), label = ifelse(Padj < 0.001, "***", "ns")) +
geom_tile(color = "lightgray") +
scale_fill_gradient2(low = "chartreuse3", mid="snow", high = "orchid3", midpoint = (min(melted_cormat2$value, na.rm = T)+max(melted_cormat2$value, na.rm = T))/2, space = "Lab", na.value="lightgray",
name="distance") +
coord_fixed() +
xlab("") +
ylab("") +
theme_minimal() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.background = element_blank(),
axis.text.x = element_text(angle = 0, vjust = 1, size = 14, hjust = 0.5),
axis.text.y = element_text(angle = 0, vjust = 0.5, size = 14, hjust = 1),
legend.text=element_text(size=12),
legend.title=element_text(size=14))
#### PERMUTATION TEST #####
newModel0 <- newModel
Md_fin1_perm <- list()
Md_fin2_perm <- list()
Md_fin3_perm <- list()
Md_fin4_perm <- list()
Md_fin5_perm <- list()
Md_fin6_perm <- list()
#permutations
for(j in 1:100){
Md_l1_perm <- list()
#Md_l2_perm <- list()
Md_l3_perm <- list()
#Md_l4_perm <- list()
#Md_l5_perm <- list()
#Md_l6_perm <- list()
set.seed(j)
#newModel0[,3:8] <- newModel0[sample(1:length(newModel0$decision)),3:8] # need to try with only decision
newModel0[,3] <- newModel0[sample(1:length(newModel0$decision)),3]
for(i in 1:30){
pddnos_n <- sample(which(as.character(newModel0$decision)=="PDD-NOS"), rls_min)
asperg_n <- sample(which(as.character(newModel0$decision)=="ASPERGER'S DISORDER"), rls_min)
autism_n <- sample(which(as.character(newModel0$decision)=="AUTISM"), rls_min)
control_n <- sample(which(as.character(newModel0$decision)=="CONTROL"), rls_min)
newModel2 <- newModel0[c(pddnos_n, asperg_n, autism_n, control_n), ]
net0 <- network_gen(newModel2, decision="PDD-NOS", ruleSet = 'own', NodeColor = 'DL', Param = 'Min Cov', minValue=0, minAcc=0.25, type = 'RDF', topN = 100)
net1 <- network_gen(newModel2, decision="ASPERGER'S DISORDER", ruleSet = 'own', NodeColor = 'DL', Param = 'Min Cov', minValue=0, minAcc=0.25, type = 'RDF', topN = 100)
net2 <- network_gen(newModel2, decision="AUTISM", ruleSet = 'own', NodeColor = 'DL', Param = 'Min Cov', minValue=0, minAcc=0.25, type = 'RDF', topN = 100)
net3 <- network_gen(newModel2, decision="CONTROL", ruleSet = 'own', NodeColor = 'DL', Param = 'Min Cov', minValue=0, minAcc=0.25, type = 'RDF', topN = 100)
vis_out <- list(net0[[1]], net1[[1]], net2[[1]], net3[[1]])
names(vis_out) <- c("PDD-NOS", "ASPERGER'S DISORDER", "AUTISM", "CONTROL")
#vis_out <- visunet(newModel2, type = "RDF")
#### create adjacency matrices
ams <- createAdjMat(vis_out)
Md1_perm <- nd.centrality(ams, mode="Degree", directed = FALSE)
#Md2_perm <- nd.centrality(ams, mode="Close", directed = FALSE)
Md3_perm <- nd.centrality(ams, mode="Between", directed = FALSE)
#Md4_perm <- nd.hamming(ams, out.dist = TRUE)
#Md5_perm <- nd.gdd(ams, out.dist = TRUE)
#Md6_perm <- nd.dsd(ams, out.dist = TRUE, type="Adj")
Md1_perm <- as.matrix(Md1_perm$D)
#Md2_perm <- as.matrix(Md2_perm$D)
Md3_perm <- as.matrix(Md3_perm$D)
#Md4_perm <- as.matrix(Md4_perm$D)
#Md5_perm <- as.matrix(Md5_perm$D)
#Md6_perm <- as.matrix(Md6_perm$D)
Md_l1_perm[[i]] <- Md1_perm
#Md_l2_perm[[i]] <- Md2_perm
Md_l3_perm[[i]] <- Md3_perm
# Md_l4_perm[[i]] <- Md4_perm
#Md_l5_perm[[i]] <- Md5_perm
#Md_l6_perm[[i]] <- Md6_perm
}
#Md2_perm[[j]] <- Reduce("+", Md_l2)/length(Md_l2)
Md_fin1_perm[[j]] <- Reduce("+", Md_l1_perm)/length(Md_l1_perm)
#Md_fin2_perm[[j]] <- Reduce("+", Md_l2_perm)/length(Md_l2_perm)
Md_fin3_perm[[j]] <- Reduce("+", Md_l3_perm)/length(Md_l3_perm)
#Md_fin4_perm[[j]] <- Reduce("+", Md_l4_perm)/length(Md_l4_perm)
#Md_fin5_perm[[j]] <- Reduce("+", Md_l5_perm)/length(Md_l5_perm)
#Md_fin6_perm[[j]] <- Reduce("+", Md_l6_perm)/length(Md_l6_perm)
print(j)
}
Md_fin_perm <- Md_fin1_perm
Md_fin <- Md_fin1
###########################
saveRDS(Md_fin1,"/Users/mateuszgarbulowski/Desktop/ASD_subtypes_permutation_test/ori_cent_degree.rds")
saveRDS(Md_fin3,"/Users/mateuszgarbulowski/Desktop/ASD_subtypes_permutation_test/ori_cent_between.rds")
saveRDS(Md_fin1_perm,"/Users/mateuszgarbulowski/Desktop/ASD_subtypes_permutation_test/perm_cent_degree.rds")
saveRDS(Md_fin3_perm,"/Users/mateuszgarbulowski/Desktop/ASD_subtypes_permutation_test/perm_cent_between.rds")
Md_fin <- readRDS("/Users/mateuszgarbulowski/Desktop/ASD_subtypes_permutation_test/subtype/ori_cent_between.rds")
Md_fin_perm <- readRDS("/Users/mateuszgarbulowski/Desktop/ASD_subtypes_permutation_test/subtype/perm_cent_between.rds")
nams0 <- c("PDD-NOS","AS","autism","control")
plotNetPerm <- function(crds){
nms2 <- nams0
nms3 <- paste0(nms2[crds], collapse = " vs ")
crds2 <- rev(crds)
vals <- data.frame(values = unlist(lapply(Md_fin_perm, function(x) x[crds[1],crds[2]])))
thrt <- Md_fin[crds2[1],crds2[2]]
colnames(vals) <- "values"
# We add a 1 to the numerator and denominator to account for misestimation of the p-value
# (for more details see Phipson and Smyth, Permutation P-values should never be zero).
#the proportion of permutations with larger difference
# two tailed p-value:
pval <- round((sum(vals <= thrt)+1/500)/(dim(vals)[1]+1/500), digits = 3)
pval <- format.pval(pval, digits = 3, eps = 0.001, nsmall = 2)
ggplot(vals, aes(x = values)) +
geom_histogram(aes(y=..density..), bins = 25, col="gray35", fill="gray70")+
geom_density(alpha=.3, fill="tomato", col="tomato3") +
geom_vline(aes(xintercept=thrt), color="gray30", linetype="dashed", size=1) +
ggtitle(nms3)+
xlab("scaled distance")+
theme_classic()+
labs(subtitle = paste0("p-value: ",pval))
}
p1 <- plotNetPerm(c(1,2))
p2 <- plotNetPerm(c(1,3))
p3 <- plotNetPerm(c(2,3))
p4 <- plotNetPerm(c(3,4))
p5 <- plotNetPerm(c(1,4))
p6 <- plotNetPerm(c(2,4))
ggarrange(p1, p2, p3, p4, p5, p6, ncol=2, nrow=3, labels = c("a", "b","c","d","e","f"),
common.legend = TRUE, legend = "bottom")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tags.R
\name{singleton_tools}
\alias{singleton_tools}
\alias{surroundSingletons}
\alias{takeSingletons}
\title{Singleton manipulation functions}
\usage{
surroundSingletons(ui)
takeSingletons(ui, singletons = character(0), desingleton = TRUE)
}
\arguments{
\item{ui}{Tag object or lists of tag objects. See \link{builder} topic.}
\item{singletons}{Character vector of singleton signatures that have already
been encountered (i.e. returned from previous calls to
\code{takeSingletons}).}
\item{desingleton}{Logical value indicating whether singletons that are
encountered should have the singleton attribute removed.}
}
\value{
\code{surroundSingletons} preprocesses a tag object by changing any
singleton X into \verb{<!--SHINY.SINGLETON[sig]-->X'<!--/SHINY.SINGLETON[sig]-->}
where sig is the sha1 of X, and X' is X minus the singleton attribute.
\code{takeSingletons} returns a list with the elements \code{ui} (the
processed tag objects with any duplicate singleton objects removed) and
\code{singletons} (the list of known singleton signatures).
}
\description{
Functions for manipulating \code{\link[=singleton]{singleton()}} objects in tag
hierarchies. Intended for framework authors.
}
| /man/singleton_tools.Rd | no_license | rstudio/htmltools | R | false | true | 1,274 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tags.R
\name{singleton_tools}
\alias{singleton_tools}
\alias{surroundSingletons}
\alias{takeSingletons}
\title{Singleton manipulation functions}
\usage{
surroundSingletons(ui)
takeSingletons(ui, singletons = character(0), desingleton = TRUE)
}
\arguments{
\item{ui}{Tag object or lists of tag objects. See \link{builder} topic.}
\item{singletons}{Character vector of singleton signatures that have already
been encountered (i.e. returned from previous calls to
\code{takeSingletons}).}
\item{desingleton}{Logical value indicating whether singletons that are
encountered should have the singleton attribute removed.}
}
\value{
\code{surroundSingletons} preprocesses a tag object by changing any
singleton X into \verb{<!--SHINY.SINGLETON[sig]-->X'<!--/SHINY.SINGLETON[sig]-->}
where sig is the sha1 of X, and X' is X minus the singleton attribute.
\code{takeSingletons} returns a list with the elements \code{ui} (the
processed tag objects with any duplicate singleton objects removed) and
\code{singletons} (the list of known singleton signatures).
}
\description{
Functions for manipulating \code{\link[=singleton]{singleton()}} objects in tag
hierarchies. Intended for framework authors.
}
|
\name{match_atrack}
\alias{match_atrack}
\title{Extending Annotation Vectors}
\usage{
match_atrack(x, data = NULL)
}
\arguments{
\item{x}{annotation vector}
\item{data}{reference data}
}
\value{
a vector of the same type as \code{x}
}
\description{
Extends a vector used as an annotation track to match the
number of rows and the row names of a given data.
}
| /man/match_atrack.Rd | no_license | cran/NMF | R | false | false | 373 | rd | \name{match_atrack}
\alias{match_atrack}
\title{Extending Annotation Vectors}
\usage{
match_atrack(x, data = NULL)
}
\arguments{
\item{x}{annotation vector}
\item{data}{reference data}
}
\value{
a vector of the same type as \code{x}
}
\description{
Extends a vector used as an annotation track to match the
number of rows and the row names of a given data.
}
|
library(depend.truncation)
### Name: Logrank.stat.tie
### Title: The weighted log-rank statistics for testing quasi-independence
### (with ties in data)
### Aliases: Logrank.stat.tie
### Keywords: Copula Quasi-independence test
### ** Examples
x.trunc=c(10,5,7,1,3,9)
z.trunc=c(12,11,8,6,4,13)
d=c(1,1,1,1,0,1)
Logrank.stat.tie(x.trunc,z.trunc,d)
Logrank.stat(x.trunc,z.trunc,d) ## since there is no tie, the results are the same.
| /data/genthat_extracted_code/depend.truncation/examples/Logrank.stat.tie.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 440 | r | library(depend.truncation)
### Name: Logrank.stat.tie
### Title: The weighted log-rank statistics for testing quasi-independence
### (with ties in data)
### Aliases: Logrank.stat.tie
### Keywords: Copula Quasi-independence test
### ** Examples
x.trunc=c(10,5,7,1,3,9)
z.trunc=c(12,11,8,6,4,13)
d=c(1,1,1,1,0,1)
Logrank.stat.tie(x.trunc,z.trunc,d)
Logrank.stat(x.trunc,z.trunc,d) ## since there is no tie, the results are the same.
|
\name{dv.tie}
\alias{dv.tie}
\title{ Resolve tie for departing variables }
\description{
This function returns the row index of the departing variable based on which of the corresponding
variables has a higher priority level.
}
\usage{
dv.tie(tab, i, ip)
}
\arguments{
\item{tab}{ An object of class 'llgptab' that is the modified simplex tableau }
\item{i}{ An integer index for a departing variable }
\item{ip}{ An integer index for a departing variable }
}
\value{
An integer index for a departing variable.
}
\references{
Ignizio, J. P. (1976). Goal Programming and Extensions, Lexington Books, D. C. Heath and Company.
}
\author{ Frederick Novomestky \email{fnovomes@poly.edu} }
\seealso{
\code{\link{dv.llgp}},
\code{\link{llgptab}}
}
\keyword{ math }
| /man/dv.tie.Rd | no_license | Bhanditz/goalprog | R | false | false | 797 | rd | \name{dv.tie}
\alias{dv.tie}
\title{ Resolve tie for departing variables }
\description{
This function returns the row index of the departing variable based on which of the corresponding
variables has a higher priority level.
}
\usage{
dv.tie(tab, i, ip)
}
\arguments{
\item{tab}{ An object of class 'llgptab' that is the modified simplex tableau }
\item{i}{ An integer index for a departing variable }
\item{ip}{ An integer index for a departing variable }
}
\value{
An integer index for a departing variable.
}
\references{
Ignizio, J. P. (1976). Goal Programming and Extensions, Lexington Books, D. C. Heath and Company.
}
\author{ Frederick Novomestky \email{fnovomes@poly.edu} }
\seealso{
\code{\link{dv.llgp}},
\code{\link{llgptab}}
}
\keyword{ math }
|
#Attempts to find the unique preimage of a point under a map
#obtained by copmap (in expectation)
#
#Args
#p An increasing vector of points in [-1,1] - this would have been
# the p argument of copmap
#copmapout The output of the call to copmap
#imval The image value to take the preimage of - would be the correlation
# or covariance of the x and y inputs to copmap, depending on the
# value of cflag that was used in the call to copmap
#center Either "mean" or "median" depending on what you want the preimage
# of
#
#Output
#Taking the means of each column of copmapout gives a vector of the same length
#as p. These together determine a function, via linear interpolation between
#values. The code computes the pre-image under this map, if it exists, of imval.
#If there is no point in the pre-image, the function returns -Inf. If more than
#one point, it returns Inf. If exactly one point, it returns that value.
getinv<-function(p,copmapout,imval,center="mean")
{
#x and y of the function to be inverted
x<-p
if (center=="mean")
{
y<-apply(FUN=mean,X=copmapout,MARGIN=2)
}
if (center=="median")
{
y<-apply(FUN=median,X=copmapout,MARGIN=2)
}
if (!(center %in% c("mean","median")))
{
stop("Error in getinv: bad value for center")
}
res<-numeric(0)
for (counter in 1:(length(x)-1))
{
if (y[counter]==imval && y[counter+1]==imval)
{
return(Inf)
}
if ((y[counter]<=imval && y[counter+1]>=imval) ||
(y[counter]>=imval && y[counter+1]<=imval))
{
res<-c(res,x[counter]+
(x[counter+1]-x[counter])*(imval-y[counter])/(y[counter+1]-y[counter]))
}
}
if (length(res)==0)
{
return(-Inf)
}
if (length(res)>1)
{
return(Inf)
}
return(res)
} | /getinv.R | no_license | sghosh89/Copula_spaceavg | R | false | false | 1,852 | r | #Attempts to find the unique preimage of a point under a map
#obtained by copmap (in expectation)
#
#Args
#p An increasing vector of points in [-1,1] - this would have been
# the p argument of copmap
#copmapout The output of the call to copmap
#imval The image value to take the preimage of - would be the correlation
# or covariance of the x and y inputs to copmap, depending on the
# value of cflag that was used in the call to copmap
#center Either "mean" or "median" depending on what you want the preimage
# of
#
#Output
#Taking the means of each column of copmapout gives a vector of the same length
#as p. These together determine a function, via linear interpolation between
#values. The code computes the pre-image under this map, if it exists, of imval.
#If there is no point in the pre-image, the function returns -Inf. If more than
#one point, it returns Inf. If exactly one point, it returns that value.
getinv<-function(p,copmapout,imval,center="mean")
{
#x and y of the function to be inverted
x<-p
if (center=="mean")
{
y<-apply(FUN=mean,X=copmapout,MARGIN=2)
}
if (center=="median")
{
y<-apply(FUN=median,X=copmapout,MARGIN=2)
}
if (!(center %in% c("mean","median")))
{
stop("Error in getinv: bad value for center")
}
res<-numeric(0)
for (counter in 1:(length(x)-1))
{
if (y[counter]==imval && y[counter+1]==imval)
{
return(Inf)
}
if ((y[counter]<=imval && y[counter+1]>=imval) ||
(y[counter]>=imval && y[counter+1]<=imval))
{
res<-c(res,x[counter]+
(x[counter+1]-x[counter])*(imval-y[counter])/(y[counter+1]-y[counter]))
}
}
if (length(res)==0)
{
return(-Inf)
}
if (length(res)>1)
{
return(Inf)
}
return(res)
} |
#assign ncbi code to species, family and genus
#call data
total <- read.csv("DNA_RNA_phage_viral_species_all_v2.csv")
#select ncbi code
viral <- total %>%
select(Viral_Species)
taxinfo <- taxize::ncbi_get_taxon_summary(viral$Viral_Species, rank="family") #assign number to species; assign to species, family and genus and get same answer
###this will give species and will have to manually look at the unknowns
#NCBI_family.R and NCBI_genus.R were able to assign viral family and genus
#NCBI_species.R assigns species
#write taxinfo to file
# write.csv(taxinfo, "viral_ncbi_code_species.csv")
########################################################################################
#recode
#redone for filtered files; Sept28-16
#load packages
library(plyr)
library(dplyr)
library(ggplot2)
library(tidyr)
#call data and recode for DNa, RNA and phage
everyone <- read.csv("DNA_RNA_phage_viral_species_all_v2.csv", stringsAsFactors = FALSE)
vogueA <- read.csv("DNA_RNA_phage_viral_species_1A_v2.csv", stringsAsFactors = FALSE)
vogueB <- read.csv("DNA_RNA_phage_viral_species_1B_v2.csv", stringsAsFactors = FALSE)
vogue1b2 <- read.csv("DNA_RNA_phage_viral_species_1B2_v2.csv", stringsAsFactors = FALSE)
#omit empty column
everyone$X <- NULL
everyone$Total_Reads <- NULL
vogueA$X <- NULL
vogueA$Total_Reads <- NULL
vogueB$X <- NULL
vogueB$Total_Reads <- NULL
vogue1b2$X <- NULL
vogue1b2$Total_Reads <- NULL
#ref manual
vref <- read.csv("ref_ncbi_species_virome.csv", header=FALSE, stringsAsFactors = FALSE)
#omit empty rows and columns
# vref <- vref %>%
# select(V1, V2)
# vref <- vref[c(1:72),]
#recode ncbi code to species
#recode function
recoderFunc <- function(data, oldvalue, newvalue) {
# convert any factors to characters
if (is.factor(data)) data <- as.character(data)
if (is.factor(oldvalue)) oldvalue <- as.character(oldvalue)
if (is.factor(newvalue)) newvalue <- as.character(newvalue)
# create the return vector
newvec <- data
# put recoded values into the correct position in the return vector
for (i in unique(oldvalue)) newvec[data == i] <- newvalue[oldvalue == i]
newvec
}
#recode dataframes
#everyone
everyone2 <- recoderFunc(everyone, vref$V1, vref$V2)
vogueA2 <- recoderFunc(vogueA, vref$V1, vref$V2)
vogueB2 <- recoderFunc(vogueB, vref$V1, vref$V2)
vogue1b2a <- recoderFunc(vogue1b2, vref$V1, vref$V2)
#rename column
# everyone2 <- dplyr::rename(everyone2, Virus_Type = Viral_Species)
# vogueA2 <- dplyr::rename(vogueA2, Virus_Type = Viral_Family)
# vogueB2 <- dplyr::rename(vogueB2, Virus_Type = Viral_Family)
# vogue1b2a <- dplyr::rename(vogue1b2a, Virus_Type = Viral_Family)
#make columns into integers
everyone2[, -1] <- lapply(everyone2[, -1], as.integer)
vogueA2[, -1] <- lapply(vogueA2[, -1], as.integer)
vogueB2[, -1] <- lapply(vogueB2[, -1], as.integer)
vogue1b2a[, -1] <- lapply(vogue1b2a[, -1], as.integer)
#gathering like types
everyone3 <- ddply(everyone2,c("Viral_Species"),numcolwise(sum)) #includes all columns
vogueA3 <- ddply(vogueA2,c("Viral_Species"),numcolwise(sum)) #includes all columns
vogueB3 <- ddply(vogueB2,c("Viral_Species"),numcolwise(sum)) #includes all columns
vogue1b2b <- ddply(vogue1b2a,c("Viral_Species"),numcolwise(sum)) #includes all columns
#na to zero
everyone3[is.na(everyone3)] <- 0
vogueA3[is.na(vogueA3)] <- 0
vogueB3[is.na(vogueB3)] <- 0
vogue1b2b[is.na(vogue1b2b)] <- 0
#remove '1'
#remove #1 (root)
everyone4 <- everyone3[!everyone3$Viral_Species == "1",]
vogueA4 <- vogueA3[!vogueA3$Viral_Species == "1",]
vogueB4 <- vogueB3[!vogueB3$Viral_Species == "1",]
vogue1b2c <- vogue1b2b[!vogue1b2b$Viral_Species == "1",]
#write to file
# write.csv(everyone4, "DNA_RNA_phage_viral_species_all_v3.csv")
# write.csv(vogueA4, "DNA_RNA_phage_viral_species_1A_v3.csv")
# write.csv(vogueB4, "DNA_RNA_phage_viral_species_1B_v3.csv")
# write.csv(vogue1b2c, "DNA_RNA_phage_viral_species_1B2_v3.csv")
########################################################
#assign ncbi code to genus
#ref manual
vref <- read.csv("ref_ncbi_genus_virome.csv", header=FALSE, stringsAsFactors = FALSE)
#recode dataframes
#everyone
everyone2 <- recoderFunc(everyone, vref$V1, vref$V2)
vogueA2 <- recoderFunc(vogueA, vref$V1, vref$V2)
vogueB2 <- recoderFunc(vogueB, vref$V1, vref$V2)
vogue1b2a <- recoderFunc(vogue1b2, vref$V1, vref$V2)
#rename column
everyone2 <- dplyr::rename(everyone2, Viral_Genus = Viral_Species)
vogueA2 <- dplyr::rename(vogueA2, Viral_Genus = Viral_Species)
vogueB2 <- dplyr::rename(vogueB2, Viral_Genus = Viral_Species)
vogue1b2a <- dplyr::rename(vogue1b2a, Viral_Genus = Viral_Species)
#make columns into integers
everyone2[, -1] <- lapply(everyone2[, -1], as.integer)
vogueA2[, -1] <- lapply(vogueA2[, -1], as.integer)
vogueB2[, -1] <- lapply(vogueB2[, -1], as.integer)
vogue1b2a[, -1] <- lapply(vogue1b2a[, -1], as.integer)
#gathering like types
everyone3 <- ddply(everyone2,c("Viral_Genus"),numcolwise(sum)) #includes all columns
vogueA3 <- ddply(vogueA2,c("Viral_Genus"),numcolwise(sum)) #includes all columns
vogueB3 <- ddply(vogueB2,c("Viral_Genus"),numcolwise(sum)) #includes all columns
vogue1b2b <- ddply(vogue1b2a,c("Viral_Genus"),numcolwise(sum)) #includes all columns
#na to zero
everyone3[is.na(everyone3)] <- 0
vogueA3[is.na(vogueA3)] <- 0
vogueB3[is.na(vogueB3)] <- 0
vogue1b2b[is.na(vogue1b2b)] <- 0
#remove '1'
everyone4 <- everyone3[!everyone3$Viral_Genus == "1",]
vogueA4 <- vogueA3[!vogueA3$Viral_Genus == "1",]
vogueB4 <- vogueB3[!vogueB3$Viral_Genus == "1",]
vogue1b2c <- vogue1b2b[!vogue1b2b$Viral_Genus == "1",]
#write to file
# write.csv(everyone4, "DNA_RNA_phage_viral_genus_all.csv")
# write.csv(vogueA4, "DNA_RNA_phage_viral_genus_1A.csv")
# write.csv(vogueB4, "DNA_RNA_phage_viral_genus_1B.csv")
# write.csv(vogue1b2c, "DNA_RNA_phage_viral_genus_1B2.csv")
############################
#assign ncbi code to family
#ref manual
vref <- read.csv("ref_ncbi_family_virome.csv", header=FALSE, stringsAsFactors = FALSE)
#recode dataframes
#everyone
everyone2 <- recoderFunc(everyone, vref$V1, vref$V2)
vogueA2 <- recoderFunc(vogueA, vref$V1, vref$V2)
vogueB2 <- recoderFunc(vogueB, vref$V1, vref$V2)
vogue1b2a <- recoderFunc(vogue1b2, vref$V1, vref$V2)
#rename column
everyone2 <- dplyr::rename(everyone2, Viral_Family = Viral_Species)
vogueA2 <- dplyr::rename(vogueA2, Viral_Family = Viral_Species)
vogueB2 <- dplyr::rename(vogueB2, Viral_Family = Viral_Species)
vogue1b2a <- dplyr::rename(vogue1b2a, Viral_Family = Viral_Species)
#make columns into integers
everyone2[, -1] <- lapply(everyone2[, -1], as.integer)
vogueA2[, -1] <- lapply(vogueA2[, -1], as.integer)
vogueB2[, -1] <- lapply(vogueB2[, -1], as.integer)
vogue1b2a[, -1] <- lapply(vogue1b2a[, -1], as.integer)
#gathering like types
everyone3 <- ddply(everyone2,c("Viral_Family"),numcolwise(sum)) #includes all columns
vogueA3 <- ddply(vogueA2,c("Viral_Family"),numcolwise(sum)) #includes all columns
vogueB3 <- ddply(vogueB2,c("Viral_Family"),numcolwise(sum)) #includes all columns
vogue1b2b <- ddply(vogue1b2a,c("Viral_Family"),numcolwise(sum)) #includes all columns
#na to zero
everyone3[is.na(everyone3)] <- 0
vogueA3[is.na(vogueA3)] <- 0
vogueB3[is.na(vogueB3)] <- 0
vogue1b2b[is.na(vogue1b2b)] <- 0
#remove '1'
everyone4 <- everyone3[!everyone3$Viral_Family == "1",]
vogueA4 <- vogueA3[!vogueA3$Viral_Family == "1",]
vogueB4 <- vogueB3[!vogueB3$Viral_Family == "1",]
vogue1b2c <- vogue1b2b[!vogue1b2b$Viral_Family == "1",]
#write to file
# write.csv(everyone4, "DNA_RNA_phage_viral_family_all.csv")
# write.csv(vogueA4, "DNA_RNA_phage_viral_family_1A.csv")
# write.csv(vogueB4, "DNA_RNA_phage_viral_family_1B.csv")
# write.csv(vogue1b2c, "DNA_RNA_phage_viral_family_1B2.csv")
#########################
#viral family to type
#call data and recode for DNa, RNA and phage
everyone <- read.csv("DNA_RNA_phage_viral_family_all.csv", stringsAsFactors = FALSE)
vogueA <- read.csv("DNA_RNA_phage_viral_family_1A.csv", stringsAsFactors = FALSE)
vogueB <- read.csv("DNA_RNA_phage_viral_family_1B.csv", stringsAsFactors = FALSE)
vogue1b2 <- read.csv("DNA_RNA_phage_viral_family_1B2.csv", stringsAsFactors = FALSE)
#omit empty column
everyone$X <- NULL
vogueA$X <- NULL
vogueB$X <- NULL
vogue1b2$X <- NULL
#ref manual
vref <- read.csv("ref_family_type.csv", header=FALSE, stringsAsFactors = FALSE)
#omit empty rows and columns
# vref <- vref %>%
# select(V1, V2)
# vref <- vref[c(1:72),]
#recode ncbi code to species
#recode function
recoderFunc <- function(data, oldvalue, newvalue) {
# convert any factors to characters
if (is.factor(data)) data <- as.character(data)
if (is.factor(oldvalue)) oldvalue <- as.character(oldvalue)
if (is.factor(newvalue)) newvalue <- as.character(newvalue)
# create the return vector
newvec <- data
# put recoded values into the correct position in the return vector
for (i in unique(oldvalue)) newvec[data == i] <- newvalue[oldvalue == i]
newvec
}
#recode dataframes
#everyone
everyone2 <- recoderFunc(everyone, vref$V1, vref$V2)
vogueA2 <- recoderFunc(vogueA, vref$V1, vref$V2)
vogueB2 <- recoderFunc(vogueB, vref$V1, vref$V2)
vogue1b2a <- recoderFunc(vogue1b2, vref$V1, vref$V2)
#rename column
everyone2 <- dplyr::rename(everyone2, Viral_Type = Viral_Family)
vogueA2 <- dplyr::rename(vogueA2, Viral_Type = Viral_Family)
vogueB2 <- dplyr::rename(vogueB2, Viral_Type = Viral_Family)
vogue1b2a <- dplyr::rename(vogue1b2a, Viral_Type = Viral_Family)
#gathering like types
everyone3 <- ddply(everyone2,c("Viral_Type"),numcolwise(sum)) #includes all columns
vogueA3 <- ddply(vogueA2,c("Viral_Type"),numcolwise(sum)) #includes all columns
vogueB3 <- ddply(vogueB2,c("Viral_Type"),numcolwise(sum)) #includes all columns
vogue1b2b <- ddply(vogue1b2a,c("Viral_Type"),numcolwise(sum)) #includes all columns
#na to zero
everyone3[is.na(everyone3)] <- 0
vogueA3[is.na(vogueA3)] <- 0
vogueB3[is.na(vogueB3)] <- 0
vogue1b2b[is.na(vogue1b2b)] <- 0
#write to file
# write.csv(everyone3, "viral_type_all.csv")
# write.csv(vogueA3, "viral_type_1A.csv")
# write.csv(vogueB3, "viral_type_1B.csv")
# write.csv(vogue1b2b, "viral_type_1B2.csv")
#######
#wanted to make groupings but decided to condense family, and add types too
#viral family to type
#call data and recode for DNa, RNA and phage
everyone <- read.csv("DNA_RNA_phage_viral_family_all.csv", stringsAsFactors = FALSE)
vogueA <- read.csv("DNA_RNA_phage_viral_family_1A.csv", stringsAsFactors = FALSE)
vogueB <- read.csv("DNA_RNA_phage_viral_family_1B.csv", stringsAsFactors = FALSE)
vogue1b2 <- read.csv("DNA_RNA_phage_viral_family_1B2.csv", stringsAsFactors = FALSE)
#omit empty column
everyone$X <- NULL
vogueA$X <- NULL
vogueB$X <- NULL
vogue1b2$X <- NULL
#ref manual
vref <- read.csv("ref_familyandtype_condensed.csv", header=FALSE, stringsAsFactors = FALSE)
#omit empty rows and columns
# vref <- vref %>%
# select(V1, V2)
# vref <- vref[c(1:72),]
#recode ncbi code to species
#recode function
recoderFunc <- function(data, oldvalue, newvalue) {
# convert any factors to characters
if (is.factor(data)) data <- as.character(data)
if (is.factor(oldvalue)) oldvalue <- as.character(oldvalue)
if (is.factor(newvalue)) newvalue <- as.character(newvalue)
# create the return vector
newvec <- data
# put recoded values into the correct position in the return vector
for (i in unique(oldvalue)) newvec[data == i] <- newvalue[oldvalue == i]
newvec
}
#recode dataframes
#everyone
everyone2 <- recoderFunc(everyone, vref$V1, vref$V2)
vogueA2 <- recoderFunc(vogueA, vref$V1, vref$V2)
vogueB2 <- recoderFunc(vogueB, vref$V1, vref$V2)
vogue1b2a <- recoderFunc(vogue1b2, vref$V1, vref$V2)
#rename column
everyone2 <- dplyr::rename(everyone2, Viruses = Viral_Family)
vogueA2 <- dplyr::rename(vogueA2, Viruses = Viral_Family)
vogueB2 <- dplyr::rename(vogueB2, Viruses = Viral_Family)
vogue1b2a <- dplyr::rename(vogue1b2a, Viruses = Viral_Family)
#gathering like types
everyone3 <- ddply(everyone2,c("Viruses"),numcolwise(sum)) #includes all columns
vogueA3 <- ddply(vogueA2,c("Viruses"),numcolwise(sum)) #includes all columns
vogueB3 <- ddply(vogueB2,c("Viruses"),numcolwise(sum)) #includes all columns
vogue1b2b <- ddply(vogue1b2a,c("Viruses"),numcolwise(sum)) #includes all columns
#na to zero
everyone3[is.na(everyone3)] <- 0
vogueA3[is.na(vogueA3)] <- 0
vogueB3[is.na(vogueB3)] <- 0
vogue1b2b[is.na(vogue1b2b)] <- 0
#write to file
# write.csv(everyone3, "viral_groups_all.csv")
# write.csv(vogueA3, "viral_groups_1A.csv")
# write.csv(vogueB3, "viral_groups_1B.csv")
# write.csv(vogue1b2b, "viral_groups_1B2.csv")
#####
#same data set but minus papillomaviridae
everyone4 <- everyone3[!everyone3$Viruses == "Papillomaviridae",]
vogueA4 <- vogueA3[!vogueA3$Viruses == "Papillomaviridae",]
vogueB4 <- vogueB3[!vogueB3$Viruses == "Papillomaviridae",]
vogue1b2c <- vogue1b2b[!vogue1b2b$Viruses == "Papillomaviridae",]
#write to file
# write.csv(everyone4, "viral_groups_minus_papillomaviridae_all.csv")
# write.csv(vogueA4, "viral_groups_minus_papillomaviridae_1A.csv")
# write.csv(vogueB4, "viral_groups_minus_papillomaviridae_1B.csv")
# write.csv(vogue1b2c, "viral_groups_minus_papillomaviridae_1B2.csv")
############################################################################################################################################################
#NTCs and Positive Controls assign ncbi code to species
#call data
total <- read.csv("DNA_RNA_phage_viral_species_NTCs_PosCtrl.csv")
taxinfo <- taxize::ncbi_get_taxon_summary(total$Var1, rank="family") #assign number to species; assign to species, family and genus and get same answer
#write taxinfo to file
# write.csv(taxinfo, "viral_ncbi_classification_NTCs_PosCtrl.csv")
#Now we know what is in NTCs, Adeno and Entero
#merge with "DNA_RNA_phage_species"
total2 <- dplyr::rename(total, uid = Var1)
#omit extra column
total2$X <- NULL
#merge
total3 <- join(total2, taxinfo, type="full")
#collapse like ids
total3 <- ddply(total3,c("name"),numcolwise(sum)) #includes all columns
#write to file
# write.csv(total3, "DNA_RNA_phage_viral_species_NTCs_PosCtrl_v2.csv")
| /assign_ncbi_species_fam_genus.R | no_license | KeshiniD/Vogue | R | false | false | 14,250 | r | #assign ncbi code to species, family and genus
#call data
total <- read.csv("DNA_RNA_phage_viral_species_all_v2.csv")
#select ncbi code
viral <- total %>%
select(Viral_Species)
taxinfo <- taxize::ncbi_get_taxon_summary(viral$Viral_Species, rank="family") #assign number to species; assign to species, family and genus and get same answer
###this will give species and will have to manually look at the unknowns
#NCBI_family.R and NCBI_genus.R were able to assign viral family and genus
#NCBI_species.R assigns species
#write taxinfo to file
# write.csv(taxinfo, "viral_ncbi_code_species.csv")
########################################################################################
#recode
#redone for filtered files; Sept28-16
#load packages
library(plyr)
library(dplyr)
library(ggplot2)
library(tidyr)
#call data and recode for DNa, RNA and phage
everyone <- read.csv("DNA_RNA_phage_viral_species_all_v2.csv", stringsAsFactors = FALSE)
vogueA <- read.csv("DNA_RNA_phage_viral_species_1A_v2.csv", stringsAsFactors = FALSE)
vogueB <- read.csv("DNA_RNA_phage_viral_species_1B_v2.csv", stringsAsFactors = FALSE)
vogue1b2 <- read.csv("DNA_RNA_phage_viral_species_1B2_v2.csv", stringsAsFactors = FALSE)
#omit empty column
everyone$X <- NULL
everyone$Total_Reads <- NULL
vogueA$X <- NULL
vogueA$Total_Reads <- NULL
vogueB$X <- NULL
vogueB$Total_Reads <- NULL
vogue1b2$X <- NULL
vogue1b2$Total_Reads <- NULL
#ref manual
vref <- read.csv("ref_ncbi_species_virome.csv", header=FALSE, stringsAsFactors = FALSE)
#omit empty rows and columns
# vref <- vref %>%
# select(V1, V2)
# vref <- vref[c(1:72),]
#recode ncbi code to species
#recode function
recoderFunc <- function(data, oldvalue, newvalue) {
# convert any factors to characters
if (is.factor(data)) data <- as.character(data)
if (is.factor(oldvalue)) oldvalue <- as.character(oldvalue)
if (is.factor(newvalue)) newvalue <- as.character(newvalue)
# create the return vector
newvec <- data
# put recoded values into the correct position in the return vector
for (i in unique(oldvalue)) newvec[data == i] <- newvalue[oldvalue == i]
newvec
}
#recode dataframes
#everyone
everyone2 <- recoderFunc(everyone, vref$V1, vref$V2)
vogueA2 <- recoderFunc(vogueA, vref$V1, vref$V2)
vogueB2 <- recoderFunc(vogueB, vref$V1, vref$V2)
vogue1b2a <- recoderFunc(vogue1b2, vref$V1, vref$V2)
#rename column
# everyone2 <- dplyr::rename(everyone2, Virus_Type = Viral_Species)
# vogueA2 <- dplyr::rename(vogueA2, Virus_Type = Viral_Family)
# vogueB2 <- dplyr::rename(vogueB2, Virus_Type = Viral_Family)
# vogue1b2a <- dplyr::rename(vogue1b2a, Virus_Type = Viral_Family)
#make columns into integers
everyone2[, -1] <- lapply(everyone2[, -1], as.integer)
vogueA2[, -1] <- lapply(vogueA2[, -1], as.integer)
vogueB2[, -1] <- lapply(vogueB2[, -1], as.integer)
vogue1b2a[, -1] <- lapply(vogue1b2a[, -1], as.integer)
#gathering like types
everyone3 <- ddply(everyone2,c("Viral_Species"),numcolwise(sum)) #includes all columns
vogueA3 <- ddply(vogueA2,c("Viral_Species"),numcolwise(sum)) #includes all columns
vogueB3 <- ddply(vogueB2,c("Viral_Species"),numcolwise(sum)) #includes all columns
vogue1b2b <- ddply(vogue1b2a,c("Viral_Species"),numcolwise(sum)) #includes all columns
#na to zero
everyone3[is.na(everyone3)] <- 0
vogueA3[is.na(vogueA3)] <- 0
vogueB3[is.na(vogueB3)] <- 0
vogue1b2b[is.na(vogue1b2b)] <- 0
#remove '1'
#remove #1 (root)
everyone4 <- everyone3[!everyone3$Viral_Species == "1",]
vogueA4 <- vogueA3[!vogueA3$Viral_Species == "1",]
vogueB4 <- vogueB3[!vogueB3$Viral_Species == "1",]
vogue1b2c <- vogue1b2b[!vogue1b2b$Viral_Species == "1",]
#write to file
# write.csv(everyone4, "DNA_RNA_phage_viral_species_all_v3.csv")
# write.csv(vogueA4, "DNA_RNA_phage_viral_species_1A_v3.csv")
# write.csv(vogueB4, "DNA_RNA_phage_viral_species_1B_v3.csv")
# write.csv(vogue1b2c, "DNA_RNA_phage_viral_species_1B2_v3.csv")
########################################################
#assign ncbi code to genus
#ref manual
vref <- read.csv("ref_ncbi_genus_virome.csv", header=FALSE, stringsAsFactors = FALSE)
#recode dataframes
#everyone
everyone2 <- recoderFunc(everyone, vref$V1, vref$V2)
vogueA2 <- recoderFunc(vogueA, vref$V1, vref$V2)
vogueB2 <- recoderFunc(vogueB, vref$V1, vref$V2)
vogue1b2a <- recoderFunc(vogue1b2, vref$V1, vref$V2)
#rename column
everyone2 <- dplyr::rename(everyone2, Viral_Genus = Viral_Species)
vogueA2 <- dplyr::rename(vogueA2, Viral_Genus = Viral_Species)
vogueB2 <- dplyr::rename(vogueB2, Viral_Genus = Viral_Species)
vogue1b2a <- dplyr::rename(vogue1b2a, Viral_Genus = Viral_Species)
#make columns into integers
everyone2[, -1] <- lapply(everyone2[, -1], as.integer)
vogueA2[, -1] <- lapply(vogueA2[, -1], as.integer)
vogueB2[, -1] <- lapply(vogueB2[, -1], as.integer)
vogue1b2a[, -1] <- lapply(vogue1b2a[, -1], as.integer)
#gathering like types
everyone3 <- ddply(everyone2,c("Viral_Genus"),numcolwise(sum)) #includes all columns
vogueA3 <- ddply(vogueA2,c("Viral_Genus"),numcolwise(sum)) #includes all columns
vogueB3 <- ddply(vogueB2,c("Viral_Genus"),numcolwise(sum)) #includes all columns
vogue1b2b <- ddply(vogue1b2a,c("Viral_Genus"),numcolwise(sum)) #includes all columns
#na to zero
everyone3[is.na(everyone3)] <- 0
vogueA3[is.na(vogueA3)] <- 0
vogueB3[is.na(vogueB3)] <- 0
vogue1b2b[is.na(vogue1b2b)] <- 0
#remove '1'
everyone4 <- everyone3[!everyone3$Viral_Genus == "1",]
vogueA4 <- vogueA3[!vogueA3$Viral_Genus == "1",]
vogueB4 <- vogueB3[!vogueB3$Viral_Genus == "1",]
vogue1b2c <- vogue1b2b[!vogue1b2b$Viral_Genus == "1",]
#write to file
# write.csv(everyone4, "DNA_RNA_phage_viral_genus_all.csv")
# write.csv(vogueA4, "DNA_RNA_phage_viral_genus_1A.csv")
# write.csv(vogueB4, "DNA_RNA_phage_viral_genus_1B.csv")
# write.csv(vogue1b2c, "DNA_RNA_phage_viral_genus_1B2.csv")
############################
#assign ncbi code to family
#ref manual
vref <- read.csv("ref_ncbi_family_virome.csv", header=FALSE, stringsAsFactors = FALSE)
#recode dataframes
#everyone
everyone2 <- recoderFunc(everyone, vref$V1, vref$V2)
vogueA2 <- recoderFunc(vogueA, vref$V1, vref$V2)
vogueB2 <- recoderFunc(vogueB, vref$V1, vref$V2)
vogue1b2a <- recoderFunc(vogue1b2, vref$V1, vref$V2)
#rename column
everyone2 <- dplyr::rename(everyone2, Viral_Family = Viral_Species)
vogueA2 <- dplyr::rename(vogueA2, Viral_Family = Viral_Species)
vogueB2 <- dplyr::rename(vogueB2, Viral_Family = Viral_Species)
vogue1b2a <- dplyr::rename(vogue1b2a, Viral_Family = Viral_Species)
#make columns into integers
everyone2[, -1] <- lapply(everyone2[, -1], as.integer)
vogueA2[, -1] <- lapply(vogueA2[, -1], as.integer)
vogueB2[, -1] <- lapply(vogueB2[, -1], as.integer)
vogue1b2a[, -1] <- lapply(vogue1b2a[, -1], as.integer)
#gathering like types
everyone3 <- ddply(everyone2,c("Viral_Family"),numcolwise(sum)) #includes all columns
vogueA3 <- ddply(vogueA2,c("Viral_Family"),numcolwise(sum)) #includes all columns
vogueB3 <- ddply(vogueB2,c("Viral_Family"),numcolwise(sum)) #includes all columns
vogue1b2b <- ddply(vogue1b2a,c("Viral_Family"),numcolwise(sum)) #includes all columns
#na to zero
everyone3[is.na(everyone3)] <- 0
vogueA3[is.na(vogueA3)] <- 0
vogueB3[is.na(vogueB3)] <- 0
vogue1b2b[is.na(vogue1b2b)] <- 0
#remove '1'
everyone4 <- everyone3[!everyone3$Viral_Family == "1",]
vogueA4 <- vogueA3[!vogueA3$Viral_Family == "1",]
vogueB4 <- vogueB3[!vogueB3$Viral_Family == "1",]
vogue1b2c <- vogue1b2b[!vogue1b2b$Viral_Family == "1",]
#write to file
# write.csv(everyone4, "DNA_RNA_phage_viral_family_all.csv")
# write.csv(vogueA4, "DNA_RNA_phage_viral_family_1A.csv")
# write.csv(vogueB4, "DNA_RNA_phage_viral_family_1B.csv")
# write.csv(vogue1b2c, "DNA_RNA_phage_viral_family_1B2.csv")
#########################
#viral family to type
#call data and recode for DNa, RNA and phage
everyone <- read.csv("DNA_RNA_phage_viral_family_all.csv", stringsAsFactors = FALSE)
vogueA <- read.csv("DNA_RNA_phage_viral_family_1A.csv", stringsAsFactors = FALSE)
vogueB <- read.csv("DNA_RNA_phage_viral_family_1B.csv", stringsAsFactors = FALSE)
vogue1b2 <- read.csv("DNA_RNA_phage_viral_family_1B2.csv", stringsAsFactors = FALSE)
#omit empty column
everyone$X <- NULL
vogueA$X <- NULL
vogueB$X <- NULL
vogue1b2$X <- NULL
#ref manual
vref <- read.csv("ref_family_type.csv", header=FALSE, stringsAsFactors = FALSE)
#omit empty rows and columns
# vref <- vref %>%
# select(V1, V2)
# vref <- vref[c(1:72),]
#recode ncbi code to species
#recode function
recoderFunc <- function(data, oldvalue, newvalue) {
# convert any factors to characters
if (is.factor(data)) data <- as.character(data)
if (is.factor(oldvalue)) oldvalue <- as.character(oldvalue)
if (is.factor(newvalue)) newvalue <- as.character(newvalue)
# create the return vector
newvec <- data
# put recoded values into the correct position in the return vector
for (i in unique(oldvalue)) newvec[data == i] <- newvalue[oldvalue == i]
newvec
}
#recode dataframes
#everyone
everyone2 <- recoderFunc(everyone, vref$V1, vref$V2)
vogueA2 <- recoderFunc(vogueA, vref$V1, vref$V2)
vogueB2 <- recoderFunc(vogueB, vref$V1, vref$V2)
vogue1b2a <- recoderFunc(vogue1b2, vref$V1, vref$V2)
#rename column
everyone2 <- dplyr::rename(everyone2, Viral_Type = Viral_Family)
vogueA2 <- dplyr::rename(vogueA2, Viral_Type = Viral_Family)
vogueB2 <- dplyr::rename(vogueB2, Viral_Type = Viral_Family)
vogue1b2a <- dplyr::rename(vogue1b2a, Viral_Type = Viral_Family)
#gathering like types
everyone3 <- ddply(everyone2,c("Viral_Type"),numcolwise(sum)) #includes all columns
vogueA3 <- ddply(vogueA2,c("Viral_Type"),numcolwise(sum)) #includes all columns
vogueB3 <- ddply(vogueB2,c("Viral_Type"),numcolwise(sum)) #includes all columns
vogue1b2b <- ddply(vogue1b2a,c("Viral_Type"),numcolwise(sum)) #includes all columns
#na to zero
everyone3[is.na(everyone3)] <- 0
vogueA3[is.na(vogueA3)] <- 0
vogueB3[is.na(vogueB3)] <- 0
vogue1b2b[is.na(vogue1b2b)] <- 0
#write to file
# write.csv(everyone3, "viral_type_all.csv")
# write.csv(vogueA3, "viral_type_1A.csv")
# write.csv(vogueB3, "viral_type_1B.csv")
# write.csv(vogue1b2b, "viral_type_1B2.csv")
#######
#wanted to make groupings but decided to condense family, and add types too
#viral family to type
#call data and recode for DNa, RNA and phage
everyone <- read.csv("DNA_RNA_phage_viral_family_all.csv", stringsAsFactors = FALSE)
vogueA <- read.csv("DNA_RNA_phage_viral_family_1A.csv", stringsAsFactors = FALSE)
vogueB <- read.csv("DNA_RNA_phage_viral_family_1B.csv", stringsAsFactors = FALSE)
vogue1b2 <- read.csv("DNA_RNA_phage_viral_family_1B2.csv", stringsAsFactors = FALSE)
#omit empty column
everyone$X <- NULL
vogueA$X <- NULL
vogueB$X <- NULL
vogue1b2$X <- NULL
#ref manual
vref <- read.csv("ref_familyandtype_condensed.csv", header=FALSE, stringsAsFactors = FALSE)
#omit empty rows and columns
# vref <- vref %>%
# select(V1, V2)
# vref <- vref[c(1:72),]
#recode ncbi code to species
#recode function
recoderFunc <- function(data, oldvalue, newvalue) {
# convert any factors to characters
if (is.factor(data)) data <- as.character(data)
if (is.factor(oldvalue)) oldvalue <- as.character(oldvalue)
if (is.factor(newvalue)) newvalue <- as.character(newvalue)
# create the return vector
newvec <- data
# put recoded values into the correct position in the return vector
for (i in unique(oldvalue)) newvec[data == i] <- newvalue[oldvalue == i]
newvec
}
#recode dataframes
#everyone
everyone2 <- recoderFunc(everyone, vref$V1, vref$V2)
vogueA2 <- recoderFunc(vogueA, vref$V1, vref$V2)
vogueB2 <- recoderFunc(vogueB, vref$V1, vref$V2)
vogue1b2a <- recoderFunc(vogue1b2, vref$V1, vref$V2)
#rename column
everyone2 <- dplyr::rename(everyone2, Viruses = Viral_Family)
vogueA2 <- dplyr::rename(vogueA2, Viruses = Viral_Family)
vogueB2 <- dplyr::rename(vogueB2, Viruses = Viral_Family)
vogue1b2a <- dplyr::rename(vogue1b2a, Viruses = Viral_Family)
#gathering like types
everyone3 <- ddply(everyone2,c("Viruses"),numcolwise(sum)) #includes all columns
vogueA3 <- ddply(vogueA2,c("Viruses"),numcolwise(sum)) #includes all columns
vogueB3 <- ddply(vogueB2,c("Viruses"),numcolwise(sum)) #includes all columns
vogue1b2b <- ddply(vogue1b2a,c("Viruses"),numcolwise(sum)) #includes all columns
#na to zero
everyone3[is.na(everyone3)] <- 0
vogueA3[is.na(vogueA3)] <- 0
vogueB3[is.na(vogueB3)] <- 0
vogue1b2b[is.na(vogue1b2b)] <- 0
#write to file
# write.csv(everyone3, "viral_groups_all.csv")
# write.csv(vogueA3, "viral_groups_1A.csv")
# write.csv(vogueB3, "viral_groups_1B.csv")
# write.csv(vogue1b2b, "viral_groups_1B2.csv")
#####
#same data set but minus papillomaviridae
everyone4 <- everyone3[!everyone3$Viruses == "Papillomaviridae",]
vogueA4 <- vogueA3[!vogueA3$Viruses == "Papillomaviridae",]
vogueB4 <- vogueB3[!vogueB3$Viruses == "Papillomaviridae",]
vogue1b2c <- vogue1b2b[!vogue1b2b$Viruses == "Papillomaviridae",]
#write to file
# write.csv(everyone4, "viral_groups_minus_papillomaviridae_all.csv")
# write.csv(vogueA4, "viral_groups_minus_papillomaviridae_1A.csv")
# write.csv(vogueB4, "viral_groups_minus_papillomaviridae_1B.csv")
# write.csv(vogue1b2c, "viral_groups_minus_papillomaviridae_1B2.csv")
############################################################################################################################################################
#NTCs and Positive Controls assign ncbi code to species
#call data
total <- read.csv("DNA_RNA_phage_viral_species_NTCs_PosCtrl.csv")
taxinfo <- taxize::ncbi_get_taxon_summary(total$Var1, rank="family") #assign number to species; assign to species, family and genus and get same answer
#write taxinfo to file
# write.csv(taxinfo, "viral_ncbi_classification_NTCs_PosCtrl.csv")
#Now we know what is in NTCs, Adeno and Entero
#merge with "DNA_RNA_phage_species"
total2 <- dplyr::rename(total, uid = Var1)
#omit extra column
total2$X <- NULL
#merge
total3 <- join(total2, taxinfo, type="full")
#collapse like ids
total3 <- ddply(total3,c("name"),numcolwise(sum)) #includes all columns
#write to file
# write.csv(total3, "DNA_RNA_phage_viral_species_NTCs_PosCtrl_v2.csv")
|
#importing necessary libraries
library(tidyr) #tidyr and stringr used for cleaning data
library(stringr)
library(plyr) #plyr and dplyr used for aggregating data
library(dplyr)
library(ggplot2) #ggplot2 and plotly used for data visualition
library(plotly)
library(ggmap) #ggmap used for geocode function to get latitude and longitude info for cities
#Importing Illegal Immigration data set from Kaggle
setwd('/home/myProjects/illegalImmigration/')
arrests <- read.csv("arrests.csv")
arrests_loc <- read.csv("arrestLocations.csv")
#attempting to clean the untidy dataframe
arrests <- gather(arrests, Description, Number_Arrested, -Border, -Sector, -State.Territory)
arrests <- separate(arrests, Description, c("Year", "Demographic"))
#removing the X's from the Year column
arrests$Year <- gsub(pattern = "X", replacement = "", x = arrests$Year)
#the Year column is currently a character vector and we need it to be a numerical vector to be able to create
#meaningful graphs
arrests$Year <- as.integer(arrests$Year)
#changing "All" in the Demographic column to "All Immigrants" to make it more clear
arrests$Demographic <- str_replace(arrests$Demographic, "All", "All Immigrants")
#creating a new dataframe with yearly arrest totals
#it appears the original dataframe already included totals as observations where Border == United States
totals <- arrests %>%
group_by(Year, Demographic) %>%
filter(Border == "United States") %>%
arrange(Demographic)
#creating an area plot comparing yearly arrest totals of all immigrants and of only mexican immigrants
tot <- ggplot(totals, aes(x = Year, y = Number_Arrested, fill = Demographic)) +
geom_area(alpha = 0.65, position = "dodge") +
scale_fill_manual(values = c("skyblue1", "skyblue4")) +
xlab("Year") +
ylab("Total Arrests") +
ggtitle("Total Illegal Immigration Arrests") +
theme_minimal() +
scale_x_continuous(breaks = scales::pretty_breaks(n = 17)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplotly(tot)
#creating a new dataframe with yearly arrest totals by border
#again the original dataframe already included this totals as observations where Sector == All
by_border <- arrests %>%
group_by(Year, Demographic) %>%
filter(Sector == "All") %>%
arrange(Demographic)
#Since the arrest totals are so much higher for the southwest than for the other two borders it may make more
#sense to create individual graphs for each border instead of facet wrapping.
#To avoid rewriting the code for the graph for each border I chose to write a function that will create a graph
#for a given border.
border <- function(x, title) {
t <- ggplot(filter(by_border, Border == x),
aes(x = Year, y = Number_Arrested, fill = Demographic)) +
geom_area(alpha = 0.65, position = "dodge") +
scale_fill_manual(values = c("skyblue1", "skyblue4")) +
xlab("Year") +
ylab("Total Arrests") +
ggtitle(title) +
theme_minimal() +
scale_x_continuous(breaks = scales::pretty_breaks(n = 17)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplotly(t)
}
border("Coast", "Illegal Immigration Arrests Along the Coast")
border("North", "Illegal Immigration Arrests at Northern Border")
border("Southwest", "Illegal Immigration Arrests at Southwest Border")
#This comparision is nice but it is based on the Sectors with the 8 highest arrest total from 2000. It is
# possible that in later years the Sectors with the highest arrest totals changed. Also we can only see
# the arrest totals from 4 of the 17 years that we have data on. Unfortunately, adding more years to this plot
# would make it too hard to read, and it would be tedious to rewrite the code for a bar plot 17 times to look
# at the information for each year individually. For this reason I chose to write a function that can find the
# Sectors with the 8 highest arrest totals for a given year and create a bar plot comparing the arrest totals
# for all illegal immigrants and mexican immigrants for that year.
yearPlot <- function(yr) {
temp <- filter(arrests, Sector != "", Sector != "All", Year == yr) #filtering out rows that don't apply to
## to a specific Sector
#filtering by the provided Year value
top8 <- temp %>%
filter(Demographic == "All Immigrants") %>%
#finding the Sectors with the 8 highest arrest totals for that year
arrange(desc(Number_Arrested))
top8 <- top8[1:8,]
temp <- filter(temp, Sector %in% top8$Sector) #filtering by the Sectors w/ the 8 highest arrest totals
temp$Demographic <- ordered(temp$Demographic, levels = c("Mexicans", "All Immigrants"))
#setting the level order for the Demographic
# attribute so the smaller values on the bar plot
# aren't hidden
#creating a bar plot comparing the arrest totals for all illegal immigrants and for only mexican immigrants
plot <- ggplot(temp, aes(x = Sector, y = Number_Arrested, fill = Demographic)) +
geom_bar(stat = "identity", position = "identity", alpha = .65) +
coord_flip() +
scale_fill_manual(values = c("skyblue4", "skyblue1")) +
ylab("Total Arrests") +
theme_minimal() +
theme(axis.text.y = element_text(size = 7, angle = 30),
axis.title.y = element_blank(),
axis.text.x = element_blank())
ggplotly(plot) #making the plot interactive
}
yearPlot(2000)
yearPlot(2016)
# getting a map of the United States to show the areas with the most arrests
# this was the example US map from the plotly documentation page
g <- list(
scope = "usa",
projection = list(type = "albers usa"),
showland = TRUE,
landcolor = toRGB("gray95"),
subunitcolor = toRGB("gray85"),
countrycolor = toRGB("gray85"),
countrywidth = 0.5,
subunitwidth = 0.5
)
#creating a function to create a map of the areas with the most arrests for a given year
mapPlot <- function(yr, title = paste("Sectors with the Most Arrests in", as.character(yr))) {
tmp <- filter(arrests_loc, Year == yr)
p <- plot_geo(tmp, lat = ~lat, lon = ~lon,
color = ~Demographic,
colors = c("skyblue1", "skyblue4"),
size = ~Number_Arrested,
sizes = c(10, 600),
alpha = 0.65,
text = ~paste('Demographic: ', Demographic,
'</br> Sector: ', Sector,
'</br> Arrests: ', Number_Arrested)) %>%
add_markers() %>%
layout(title = title, geo = g)
print(p)
}
mapPlot(2000)
mapPlot(2016)
#creating separate dataframes with just "Mexicans" arrests and just "All Immigrants" arrests to find the percentage
# of arrests accounted for by Mexican immigrants each year
mexican_arrests <- filter(arrests, Border == "United States", Demographic == "Mexicans")
all_arrests <- filter(arrests, Border == "United States", Demographic == "All Immigrants")
#creating a new dataframe with these percentages (rounded to 2 decimal places) as well as the number of Mexican
# immigrants arrested and the total number of arrests for each year
percentages <- data.frame(all_arrests$Year,
mexican_arrests$Number_Arrested,
all_arrests$Number_Arrested,
round(mexican_arrests$Number_Arrested / all_arrests$Number_Arrested * 100, digits = 2))
names(percentages) <- c("Year","Mexicans_Arrested", "Total_Arrests", "Percentage")
percentages$Percentage <- paste(percentages$Percentage, '%', sep = '')
| /Illegal_Immigration.R | no_license | brianhumphreys/US_Illegal_Immigration_Arrests | R | false | false | 7,670 | r | #importing necessary libraries
library(tidyr) #tidyr and stringr used for cleaning data
library(stringr)
library(plyr) #plyr and dplyr used for aggregating data
library(dplyr)
library(ggplot2) #ggplot2 and plotly used for data visualition
library(plotly)
library(ggmap) #ggmap used for geocode function to get latitude and longitude info for cities
#Importing Illegal Immigration data set from Kaggle
setwd('/home/myProjects/illegalImmigration/')
arrests <- read.csv("arrests.csv")
arrests_loc <- read.csv("arrestLocations.csv")
#attempting to clean the untidy dataframe
arrests <- gather(arrests, Description, Number_Arrested, -Border, -Sector, -State.Territory)
arrests <- separate(arrests, Description, c("Year", "Demographic"))
#removing the X's from the Year column
arrests$Year <- gsub(pattern = "X", replacement = "", x = arrests$Year)
#the Year column is currently a character vector and we need it to be a numerical vector to be able to create
#meaningful graphs
arrests$Year <- as.integer(arrests$Year)
#changing "All" in the Demographic column to "All Immigrants" to make it more clear
arrests$Demographic <- str_replace(arrests$Demographic, "All", "All Immigrants")
#creating a new dataframe with yearly arrest totals
#it appears the original dataframe already included totals as observations where Border == United States
totals <- arrests %>%
group_by(Year, Demographic) %>%
filter(Border == "United States") %>%
arrange(Demographic)
#creating an area plot comparing yearly arrest totals of all immigrants and of only mexican immigrants
tot <- ggplot(totals, aes(x = Year, y = Number_Arrested, fill = Demographic)) +
geom_area(alpha = 0.65, position = "dodge") +
scale_fill_manual(values = c("skyblue1", "skyblue4")) +
xlab("Year") +
ylab("Total Arrests") +
ggtitle("Total Illegal Immigration Arrests") +
theme_minimal() +
scale_x_continuous(breaks = scales::pretty_breaks(n = 17)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplotly(tot)
#creating a new dataframe with yearly arrest totals by border
#again the original dataframe already included this totals as observations where Sector == All
by_border <- arrests %>%
group_by(Year, Demographic) %>%
filter(Sector == "All") %>%
arrange(Demographic)
#Since the arrest totals are so much higher for the southwest than for the other two borders it may make more
#sense to create individual graphs for each border instead of facet wrapping.
#To avoid rewriting the code for the graph for each border I chose to write a function that will create a graph
#for a given border.
border <- function(x, title) {
t <- ggplot(filter(by_border, Border == x),
aes(x = Year, y = Number_Arrested, fill = Demographic)) +
geom_area(alpha = 0.65, position = "dodge") +
scale_fill_manual(values = c("skyblue1", "skyblue4")) +
xlab("Year") +
ylab("Total Arrests") +
ggtitle(title) +
theme_minimal() +
scale_x_continuous(breaks = scales::pretty_breaks(n = 17)) +
theme(axis.text.x = element_text(angle = 45, hjust = 1))
ggplotly(t)
}
border("Coast", "Illegal Immigration Arrests Along the Coast")
border("North", "Illegal Immigration Arrests at Northern Border")
border("Southwest", "Illegal Immigration Arrests at Southwest Border")
#This comparision is nice but it is based on the Sectors with the 8 highest arrest total from 2000. It is
# possible that in later years the Sectors with the highest arrest totals changed. Also we can only see
# the arrest totals from 4 of the 17 years that we have data on. Unfortunately, adding more years to this plot
# would make it too hard to read, and it would be tedious to rewrite the code for a bar plot 17 times to look
# at the information for each year individually. For this reason I chose to write a function that can find the
# Sectors with the 8 highest arrest totals for a given year and create a bar plot comparing the arrest totals
# for all illegal immigrants and mexican immigrants for that year.
yearPlot <- function(yr) {
temp <- filter(arrests, Sector != "", Sector != "All", Year == yr) #filtering out rows that don't apply to
## to a specific Sector
#filtering by the provided Year value
top8 <- temp %>%
filter(Demographic == "All Immigrants") %>%
#finding the Sectors with the 8 highest arrest totals for that year
arrange(desc(Number_Arrested))
top8 <- top8[1:8,]
temp <- filter(temp, Sector %in% top8$Sector) #filtering by the Sectors w/ the 8 highest arrest totals
temp$Demographic <- ordered(temp$Demographic, levels = c("Mexicans", "All Immigrants"))
#setting the level order for the Demographic
# attribute so the smaller values on the bar plot
# aren't hidden
#creating a bar plot comparing the arrest totals for all illegal immigrants and for only mexican immigrants
plot <- ggplot(temp, aes(x = Sector, y = Number_Arrested, fill = Demographic)) +
geom_bar(stat = "identity", position = "identity", alpha = .65) +
coord_flip() +
scale_fill_manual(values = c("skyblue4", "skyblue1")) +
ylab("Total Arrests") +
theme_minimal() +
theme(axis.text.y = element_text(size = 7, angle = 30),
axis.title.y = element_blank(),
axis.text.x = element_blank())
ggplotly(plot) #making the plot interactive
}
yearPlot(2000)
yearPlot(2016)
# getting a map of the United States to show the areas with the most arrests
# this was the example US map from the plotly documentation page
g <- list(
scope = "usa",
projection = list(type = "albers usa"),
showland = TRUE,
landcolor = toRGB("gray95"),
subunitcolor = toRGB("gray85"),
countrycolor = toRGB("gray85"),
countrywidth = 0.5,
subunitwidth = 0.5
)
#creating a function to create a map of the areas with the most arrests for a given year
mapPlot <- function(yr, title = paste("Sectors with the Most Arrests in", as.character(yr))) {
tmp <- filter(arrests_loc, Year == yr)
p <- plot_geo(tmp, lat = ~lat, lon = ~lon,
color = ~Demographic,
colors = c("skyblue1", "skyblue4"),
size = ~Number_Arrested,
sizes = c(10, 600),
alpha = 0.65,
text = ~paste('Demographic: ', Demographic,
'</br> Sector: ', Sector,
'</br> Arrests: ', Number_Arrested)) %>%
add_markers() %>%
layout(title = title, geo = g)
print(p)
}
mapPlot(2000)
mapPlot(2016)
#creating separate dataframes with just "Mexicans" arrests and just "All Immigrants" arrests to find the percentage
# of arrests accounted for by Mexican immigrants each year
mexican_arrests <- filter(arrests, Border == "United States", Demographic == "Mexicans")
all_arrests <- filter(arrests, Border == "United States", Demographic == "All Immigrants")
#creating a new dataframe with these percentages (rounded to 2 decimal places) as well as the number of Mexican
# immigrants arrested and the total number of arrests for each year
percentages <- data.frame(all_arrests$Year,
mexican_arrests$Number_Arrested,
all_arrests$Number_Arrested,
round(mexican_arrests$Number_Arrested / all_arrests$Number_Arrested * 100, digits = 2))
names(percentages) <- c("Year","Mexicans_Arrested", "Total_Arrests", "Percentage")
percentages$Percentage <- paste(percentages$Percentage, '%', sep = '')
|
#!/home/statsadmin/R/bin/Rscript
source('Step_0_init.R')
args <- commandArgs()
idx <- as.numeric(args[length(args)])
.libPaths('ysidi/lib')
dt.full.X <- readRDS(file = sprintf('dtfullwaldxp80_%d.rds',idx))
set.seed(81762+idx+72)
#generate mnar2 for 5-25% by 5% DO
dt.mnar2 <- miss.apply.do(dt.full.X, b.trt=1, b.y=-2, b.X=-2, do=0.10)
dt.mnar2.check <- dt.miss.check(dt.mnar2 ,0.10)
saveRDS(dt.mnar2.check,file = sprintf('waldxp80dochmnar210_%d.rds',idx))
saveRDS(dt.mnar2,file = sprintf('dtwaldxp80mnar210_%d.rds',idx))
| /Step3_1_impose_miss/wald/step_3_1_type_waldxp80_missing_mnar2_percent_10.R | no_license | yuliasidi/Binomial_PE_Progs | R | false | false | 532 | r | #!/home/statsadmin/R/bin/Rscript
source('Step_0_init.R')
args <- commandArgs()
idx <- as.numeric(args[length(args)])
.libPaths('ysidi/lib')
dt.full.X <- readRDS(file = sprintf('dtfullwaldxp80_%d.rds',idx))
set.seed(81762+idx+72)
#generate mnar2 for 5-25% by 5% DO
dt.mnar2 <- miss.apply.do(dt.full.X, b.trt=1, b.y=-2, b.X=-2, do=0.10)
dt.mnar2.check <- dt.miss.check(dt.mnar2 ,0.10)
saveRDS(dt.mnar2.check,file = sprintf('waldxp80dochmnar210_%d.rds',idx))
saveRDS(dt.mnar2,file = sprintf('dtwaldxp80mnar210_%d.rds',idx))
|
#===================
#SimulateRoadMiles.R
#===================
#<doc>
#
## SimulateRoadMiles Module
#### February 11, 2019
#
#This module assigns freeway and arterial lane-miles to metropolitan areas (Marea) and calculates freeway lane-miles per capita.
#
### Model Parameter Estimation
#
#This module has no estimated parameters.
#
### How the Module Works
#
#Users provide inputs on the numbers of freeway lane-miles and arterial lane-miles by Marea and year. In addition to saving these inputs, the module loads the urbanized area population of each Marea and year from the datastore and computes the value of freeway lane-miles per capita. This relative roadway supply measure is used by several other modules.
#
#</doc>
#=============================================
#SECTION 1: ESTIMATE AND SAVE MODEL PARAMETERS
#=============================================
#This module has no parameters. Households are assigned to Bzones based on an
#algorithm implemented in the LocateHouseholds function.
#================================================
#SECTION 2: DEFINE THE MODULE DATA SPECIFICATIONS
#================================================
#Define the data specifications
#------------------------------
SimulateRoadMilesSpecifications <- list(
#Level of geography module is applied at
RunBy = "Region",
#Specify new tables to be created by Inp if any
#Specify new tables to be created by Set if any
#Specify input data
Inp = items(
item(
NAME =
items(
"FwyLaneMi",
"ArtLaneMi"),
FILE = "marea_lane_miles.csv",
TABLE = "Marea",
GROUP = "Year",
TYPE = "distance",
UNITS = "MI",
NAVALUE = -1,
SIZE = 0,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
UNLIKELY = "",
TOTAL = "",
DESCRIPTION =
items(
"Lane-miles of roadways functionally classified as freeways or expressways in the urbanized portion of the metropolitan area",
"Lane-miles of roadways functionally classified as arterials (but not freeways or expressways) in the urbanized portion of the metropolitan area")
)
),
#Specify data to be loaded from data store
Get = items(
item(
NAME = "Marea",
TABLE = "Marea",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
PROHIBIT = "",
ISELEMENTOF = ""
),
item(
NAME =
items(
"FwyLaneMi",
"ArtLaneMi"),
TABLE = "Marea",
GROUP = "Year",
TYPE = "distance",
UNITS = "MI",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "Marea",
TABLE = "Bzone",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
PROHIBIT = "",
ISELEMENTOF = ""
),
item(
NAME = "UrbanPop",
TABLE = "Bzone",
GROUP = "Year",
TYPE = "people",
UNITS = "PRSN",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
)
),
#Specify data to saved in the data store
Set = items(
item(
NAME = "FwyLaneMiPC",
TABLE = "Marea",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/PRSN",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Ratio of urbanized area freeway and expressway lane-miles to urbanized area population"
)
)
)
#Save the data specifications list
#---------------------------------
#' Specifications list for SimulateRoadMiles module
#'
#' A list containing specifications for the SimulateRoadMiles module.
#'
#' @format A list containing 4 components:
#' \describe{
#' \item{RunBy}{the level of geography that the module is run at}
#' \item{Inp}{scenario input data to be loaded into the datastore for this
#' module}
#' \item{Get}{module inputs to be read from the datastore}
#' \item{Set}{module outputs to be written to the datastore}
#' }
#' @source SimulateRoadMiles.R script.
"SimulateRoadMilesSpecifications"
usethis::use_data(SimulateRoadMilesSpecifications, overwrite = TRUE)
#=======================================================
#SECTION 3: DEFINE FUNCTIONS THAT IMPLEMENT THE SUBMODEL
#=======================================================
#This function calculates the freeway lane-miles per capita for the urbanized
#area from the number of freeway lane-miles and the urban area population.
#Main module function that calculates freeway lane-miles per capita
#------------------------------------------------------------------
#' Calculate freeway lane-miles per capita by Marea.
#'
#' \code{SimulateRoadMiles} calculate freeway lane-miles per capita.
#'
#' This function calculates freeway lane-miles per capita for each Marea.
#'
#' @param L A list containing the components listed in the Get specifications
#' for the module.
#' @return A list containing the components specified in the Set
#' specifications for the module.
#' @name SimulateRoadMiles
#' @import visioneval
#' @export
SimulateRoadMiles <- function(L) {
#Set up
#------
#Fix seed as synthesis involves sampling
set.seed(L$G$Seed)
#Define vector of Mareas
Ma <- L$Year$Marea$Marea
#Calculate the freeway lane-miles per capita
#-------------------------------------------
#Calculate freeway lane-miles
FwyLaneMi_Ma <- L$Year$Marea$FwyLaneMi
#Calculate population in the urbanized area
UrbanPop_Ma <-
tapply(L$Year$Bzone$UrbanPop, L$Year$Bzone$Marea, sum)[Ma]
#Calculate freeway lane-miles per capita
FwyLaneMiPC_Ma <- FwyLaneMi_Ma / UrbanPop_Ma
FwyLaneMiPC_Ma[UrbanPop_Ma == 0] <- 0
#Return the results
#------------------
#Initialize output list
Out_ls <- initDataList()
Out_ls$Year$Marea <-
list(FwyLaneMiPC = FwyLaneMiPC_Ma)
#Return the outputs list
Out_ls
}
#===============================================================
#SECTION 4: MODULE DOCUMENTATION AND AUXILLIARY DEVELOPMENT CODE
#===============================================================
#Run module automatic documentation
#----------------------------------
documentModule("SimulateRoadMiles")
#Test code to check specifications, loading inputs, and whether datastore
#contains data needed to run module. Return input list (L) to use for developing
#module functions
#-------------------------------------------------------------------------------
# library(filesstrings)
# library(visioneval)
# library(fields)
# source("tests/scripts/test_functions.R")
# #Set up test environment
# TestSetup_ls <- list(
# TestDataRepo = "../Test_Data/VE-State",
# DatastoreName = "Datastore.tar",
# LoadDatastore = TRUE,
# TestDocsDir = "vestate",
# ClearLogs = TRUE,
# # SaveDatastore = TRUE
# SaveDatastore = FALSE
# )
# setUpTests(TestSetup_ls)
# #Run test module
# TestDat_ <- testModule(
# ModuleName = "SimulateoadMiles",
# LoadDatastore = TRUE,
# SaveDatastore = TRUE,
# DoRun = FALSE
# )
# L <- TestDat_$L
# R <- SimulateRoadMiles(L)
| /sources/modules/VESimTransportSupply/R/SimulateRoadMiles.R | permissive | rickdonnelly/VisionEval-Dev | R | false | false | 6,951 | r | #===================
#SimulateRoadMiles.R
#===================
#<doc>
#
## SimulateRoadMiles Module
#### February 11, 2019
#
#This module assigns freeway and arterial lane-miles to metropolitan areas (Marea) and calculates freeway lane-miles per capita.
#
### Model Parameter Estimation
#
#This module has no estimated parameters.
#
### How the Module Works
#
#Users provide inputs on the numbers of freeway lane-miles and arterial lane-miles by Marea and year. In addition to saving these inputs, the module loads the urbanized area population of each Marea and year from the datastore and computes the value of freeway lane-miles per capita. This relative roadway supply measure is used by several other modules.
#
#</doc>
#=============================================
#SECTION 1: ESTIMATE AND SAVE MODEL PARAMETERS
#=============================================
#This module has no parameters. Households are assigned to Bzones based on an
#algorithm implemented in the LocateHouseholds function.
#================================================
#SECTION 2: DEFINE THE MODULE DATA SPECIFICATIONS
#================================================
#Define the data specifications
#------------------------------
SimulateRoadMilesSpecifications <- list(
#Level of geography module is applied at
RunBy = "Region",
#Specify new tables to be created by Inp if any
#Specify new tables to be created by Set if any
#Specify input data
Inp = items(
item(
NAME =
items(
"FwyLaneMi",
"ArtLaneMi"),
FILE = "marea_lane_miles.csv",
TABLE = "Marea",
GROUP = "Year",
TYPE = "distance",
UNITS = "MI",
NAVALUE = -1,
SIZE = 0,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
UNLIKELY = "",
TOTAL = "",
DESCRIPTION =
items(
"Lane-miles of roadways functionally classified as freeways or expressways in the urbanized portion of the metropolitan area",
"Lane-miles of roadways functionally classified as arterials (but not freeways or expressways) in the urbanized portion of the metropolitan area")
)
),
#Specify data to be loaded from data store
Get = items(
item(
NAME = "Marea",
TABLE = "Marea",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
PROHIBIT = "",
ISELEMENTOF = ""
),
item(
NAME =
items(
"FwyLaneMi",
"ArtLaneMi"),
TABLE = "Marea",
GROUP = "Year",
TYPE = "distance",
UNITS = "MI",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "Marea",
TABLE = "Bzone",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
PROHIBIT = "",
ISELEMENTOF = ""
),
item(
NAME = "UrbanPop",
TABLE = "Bzone",
GROUP = "Year",
TYPE = "people",
UNITS = "PRSN",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
)
),
#Specify data to saved in the data store
Set = items(
item(
NAME = "FwyLaneMiPC",
TABLE = "Marea",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/PRSN",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Ratio of urbanized area freeway and expressway lane-miles to urbanized area population"
)
)
)
#Save the data specifications list
#---------------------------------
#' Specifications list for SimulateRoadMiles module
#'
#' A list containing specifications for the SimulateRoadMiles module.
#'
#' @format A list containing 4 components:
#' \describe{
#' \item{RunBy}{the level of geography that the module is run at}
#' \item{Inp}{scenario input data to be loaded into the datastore for this
#' module}
#' \item{Get}{module inputs to be read from the datastore}
#' \item{Set}{module outputs to be written to the datastore}
#' }
#' @source SimulateRoadMiles.R script.
"SimulateRoadMilesSpecifications"
usethis::use_data(SimulateRoadMilesSpecifications, overwrite = TRUE)
#=======================================================
#SECTION 3: DEFINE FUNCTIONS THAT IMPLEMENT THE SUBMODEL
#=======================================================
#This function calculates the freeway lane-miles per capita for the urbanized
#area from the number of freeway lane-miles and the urban area population.
#Main module function that calculates freeway lane-miles per capita
#------------------------------------------------------------------
#' Calculate freeway lane-miles per capita by Marea.
#'
#' \code{SimulateRoadMiles} calculate freeway lane-miles per capita.
#'
#' This function calculates freeway lane-miles per capita for each Marea.
#'
#' @param L A list containing the components listed in the Get specifications
#' for the module.
#' @return A list containing the components specified in the Set
#' specifications for the module.
#' @name SimulateRoadMiles
#' @import visioneval
#' @export
SimulateRoadMiles <- function(L) {
#Set up
#------
#Fix seed as synthesis involves sampling
set.seed(L$G$Seed)
#Define vector of Mareas
Ma <- L$Year$Marea$Marea
#Calculate the freeway lane-miles per capita
#-------------------------------------------
#Calculate freeway lane-miles
FwyLaneMi_Ma <- L$Year$Marea$FwyLaneMi
#Calculate population in the urbanized area
UrbanPop_Ma <-
tapply(L$Year$Bzone$UrbanPop, L$Year$Bzone$Marea, sum)[Ma]
#Calculate freeway lane-miles per capita
FwyLaneMiPC_Ma <- FwyLaneMi_Ma / UrbanPop_Ma
FwyLaneMiPC_Ma[UrbanPop_Ma == 0] <- 0
#Return the results
#------------------
#Initialize output list
Out_ls <- initDataList()
Out_ls$Year$Marea <-
list(FwyLaneMiPC = FwyLaneMiPC_Ma)
#Return the outputs list
Out_ls
}
#===============================================================
#SECTION 4: MODULE DOCUMENTATION AND AUXILLIARY DEVELOPMENT CODE
#===============================================================
#Run module automatic documentation
#----------------------------------
documentModule("SimulateRoadMiles")
#Test code to check specifications, loading inputs, and whether datastore
#contains data needed to run module. Return input list (L) to use for developing
#module functions
#-------------------------------------------------------------------------------
# library(filesstrings)
# library(visioneval)
# library(fields)
# source("tests/scripts/test_functions.R")
# #Set up test environment
# TestSetup_ls <- list(
# TestDataRepo = "../Test_Data/VE-State",
# DatastoreName = "Datastore.tar",
# LoadDatastore = TRUE,
# TestDocsDir = "vestate",
# ClearLogs = TRUE,
# # SaveDatastore = TRUE
# SaveDatastore = FALSE
# )
# setUpTests(TestSetup_ls)
# #Run test module
# TestDat_ <- testModule(
# ModuleName = "SimulateoadMiles",
# LoadDatastore = TRUE,
# SaveDatastore = TRUE,
# DoRun = FALSE
# )
# L <- TestDat_$L
# R <- SimulateRoadMiles(L)
|
#' RImmPort: Enabling ready-for-analysis immunology research data
#'
#' The `RImmPort` package simplifies access to ImmPort data for analysis,
#' as the name implies, in the R statistical environment. It provides a
#' standards-based interface to the ImmPort study data that is in a proprietary format.
#'
#' @docType package
#' @name RImmPort
NULL
# > NULL
| /R/RImmPort.R | no_license | rdshankar/RImmPort | R | false | false | 362 | r | #' RImmPort: Enabling ready-for-analysis immunology research data
#'
#' The `RImmPort` package simplifies access to ImmPort data for analysis,
#' as the name implies, in the R statistical environment. It provides a
#' standards-based interface to the ImmPort study data that is in a proprietary format.
#'
#' @docType package
#' @name RImmPort
NULL
# > NULL
|
#' @title list.dir for git and svn repositories
#' @description list parent directories of svn repositores within a path
#' @param path character, path to seach for svn repositories
#' @param vcs character, vector of what vcs systems to look for, Default: 'git'
#' @return character
#' @examples
#' \dontrun{
#' if(interactive()){
#' find.remote('/data')
#' }
#' }
#' @rdname find.remote
#' @export
find.remote<-function(path,vcs=c('git','svn')){
x<-dir(path,pattern = sprintf('[.](%s)$',paste0(vcs,collapse='|')),
all.files = TRUE,include.dirs = TRUE,recursive = TRUE,full.names = TRUE)
diff=1000
while(diff>0){
x.now<-x
for(idx in 1:length(x)){
if(idx>length(x)) break
idx.rm<-which(grepl(dirname(x[idx]),x[-idx]))
if(length(idx.rm)>0) x<-x[-idx.rm]
}
diff<-length(x.now)-length(x)
}
ret <- data.frame(vcs=gsub('^[.]{1}','',basename(x)),dir=dirname(x),remote = NA,stringsAsFactors = FALSE)
if(nrow(ret)>0){
for(i in 1:nrow(ret)){
ret$remote[i] <- query_remote(ret$vcs[i],ret$dir[i])
}
}
ret
}
query_remote <- function(x,y){
if(x=='git'){
this <- sprintf('cat %s/.git/config | grep url',y)
}
if(x=='svn'){
this <- sprintf("svn info %s | grep URL",y)
}
gsub('^(.*?)[=:]\\s*','',system(this,intern = TRUE)[1])
} | /R/find_remote.R | permissive | yonicd/vcs | R | false | false | 1,276 | r | #' @title list.dir for git and svn repositories
#' @description list parent directories of svn repositores within a path
#' @param path character, path to seach for svn repositories
#' @param vcs character, vector of what vcs systems to look for, Default: 'git'
#' @return character
#' @examples
#' \dontrun{
#' if(interactive()){
#' find.remote('/data')
#' }
#' }
#' @rdname find.remote
#' @export
find.remote<-function(path,vcs=c('git','svn')){
x<-dir(path,pattern = sprintf('[.](%s)$',paste0(vcs,collapse='|')),
all.files = TRUE,include.dirs = TRUE,recursive = TRUE,full.names = TRUE)
diff=1000
while(diff>0){
x.now<-x
for(idx in 1:length(x)){
if(idx>length(x)) break
idx.rm<-which(grepl(dirname(x[idx]),x[-idx]))
if(length(idx.rm)>0) x<-x[-idx.rm]
}
diff<-length(x.now)-length(x)
}
ret <- data.frame(vcs=gsub('^[.]{1}','',basename(x)),dir=dirname(x),remote = NA,stringsAsFactors = FALSE)
if(nrow(ret)>0){
for(i in 1:nrow(ret)){
ret$remote[i] <- query_remote(ret$vcs[i],ret$dir[i])
}
}
ret
}
query_remote <- function(x,y){
if(x=='git'){
this <- sprintf('cat %s/.git/config | grep url',y)
}
if(x=='svn'){
this <- sprintf("svn info %s | grep URL",y)
}
gsub('^(.*?)[=:]\\s*','',system(this,intern = TRUE)[1])
} |
name_step <- function(step) {
if (!is.null(step$name)) {
nm <- step$name
step <- list(step)
names(step) <- nm
}
step
}
is_dense_column <- function(feature) {
inherits(feature, "tensorflow.python.feature_column.feature_column._DenseColumn")
}
dtype_chr <- function(x) {
x$name
}
# Selectors ---------------------------------------------------------------
#' Selectors
#'
#' List of selectors that can be used to specify variables inside
#' steps.
#'
#' @section Selectors:
#'
#' * [has_type()]
#' * [all_numeric()]
#' * [all_nominal()]
#' * [starts_with()]
#' * [ends_with()]
#' * [one_of()]
#' * [matches()]
#' * [contains()]
#' * [everything()]
#'
#' @name selectors
#' @rdname selectors
cur_info_env <- rlang::child_env(rlang::env_parent(rlang::env()))
set_current_info <- function(x) {
old <- cur_info_env
cur_info_env$feature_names <- x$feature_names
cur_info_env$feature_types <- x$feature_types
invisible(old)
}
current_info <- function() {
cur_info_env %||% stop("Variable context not set", call. = FALSE)
}
#' Identify the type of the variable.
#'
#' Can only be used inside the [steps] specifications to find
#' variables by type.
#'
#' @param match A list of types to match.
#'
#' @family Selectors
#' @export
has_type <- function(match = "float32") {
info <- current_info()
lgl_matches <- sapply(info$feature_types, function(x) any(x %in% match))
info$feature_names[which(lgl_matches)]
}
terms_select <- function(feature_names, feature_types, terms) {
old_info <- set_current_info(
list(feature_names = feature_names, feature_types = feature_types)
)
on.exit(set_current_info(old_info), add = TRUE)
sel <- tidyselect::vars_select(feature_names, !!! terms)
sel
}
#' Speciy all numeric variables.
#'
#' Find all the variables with the following types:
#' "float16", "float32", "float64", "int16", "int32", "int64",
#' "half", "double".
#'
#' @family Selectors
#' @export
all_numeric <- function() {
has_type(c("float16", "float32", "float64", "int16", "int32", "int64", "half", "double"))
}
#' Find all nominal variables.
#'
#' Currently we only consider "string" type as nominal.
#'
#' @family Selectors
#' @export
all_nominal <- function() {
has_type(c("string"))
}
#' @importFrom tidyselect starts_with
#' @export
tidyselect::starts_with
#' @importFrom tidyselect ends_with
#' @export
tidyselect::ends_with
#' @importFrom tidyselect contains
#' @export
tidyselect::contains
#' @importFrom tidyselect everything
#' @export
tidyselect::everything
#' @importFrom tidyselect matches
#' @export
tidyselect::matches
#' @importFrom tidyselect num_range
#' @export
tidyselect::num_range
#' @importFrom tidyselect one_of
#' @export
tidyselect::one_of
# FeatureSpec ------------------------------------------------------------------
FeatureSpec <- R6::R6Class(
"FeatureSpec",
public = list(
steps = list(),
formula = NULL,
column_names = NULL,
column_types = NULL,
dataset = NULL,
fitted = FALSE,
prepared_dataset = NULL,
x = NULL,
y = NULL,
initialize = function(dataset, x, y = NULL) {
if (inherits(dataset, "data.frame")) {
dataset <- tensors_dataset(dataset)
}
self$formula <- formula
self$x <- rlang::enquo(x)
self$y <- rlang::enquo(y)
self$set_dataset(dataset)
self$column_names <- column_names(self$dataset)
self$column_types <- output_types(self$dataset)
},
set_dataset = function(dataset) {
self$prepared_dataset <- dataset_prepare(dataset, !!self$x, !!self$y, named_features = TRUE)
self$dataset <- dataset_map(self$prepared_dataset, function(x) x$x)
invisible(self)
},
add_step = function(step) {
self$steps <- append(self$steps, name_step(step))
},
fit = function() {
if (self$fitted)
stop("FeatureSpec is already fitted.")
if (tf$executing_eagerly()) {
ds <- reticulate::as_iterator(self$dataset)
nxt <- reticulate::iter_next(ds)
} else {
ds <- make_iterator_one_shot(self$dataset)
nxt_it <- ds$get_next()
sess <- tf$compat$v1$Session()
nxt <- sess$run(nxt_it)
}
pb <- progress::progress_bar$new(
format = ":spin Preparing :tick_rate batches/s [:current batches in :elapsedfull]",
total = Inf)
while (!is.null(nxt)) {
pb$tick(1)
for (i in seq_along(self$steps)) {
self$steps[[i]]$fit_batch(nxt)
}
if (tf$executing_eagerly()) {
nxt <- reticulate::iter_next(ds)
} else {
nxt <- tryCatch({sess$run(nxt_it)}, error = out_of_range_handler)
}
}
for (i in seq_along(self$steps)) {
self$steps[[i]]$fit_resume()
}
self$fitted <- TRUE
if (!tf$executing_eagerly())
sess$close()
},
features = function() {
if (!self$fitted)
stop("Only available after fitting the feature_spec.")
feats <- NULL
for (i in seq_along(self$steps)) {
stp <- self$steps[i]
if (inherits(stp[[1]], "RemoveStep")) {
feats <- feats[-which(names(feats) == stp[[1]]$var)]
} else {
feature <- lapply(stp, function(x) x$feature(feats)) # keep list names
feats <- append(feats, feature)
feats <- unlist(feats)
}
}
feats
},
dense_features = function() {
if (!self$fitted)
stop("Only available after fitting the feature_spec.")
Filter(is_dense_column, self$features())
},
feature_names = function() {
unique(c(names(self$steps), self$column_names))
},
feature_types = function() {
feature_names <- self$feature_names()
feature_types <- character(length = length(feature_names))
for (i in seq_along(feature_names)) {
ft <- feature_names[i]
if (is.null(self$steps[[ft]])) {
feature_types[i] <- dtype_chr(self$column_types[[which(self$column_names == ft)]])
} else if (is.null(self$steps[[ft]]$column_type)) {
feature_types[i] <- dtype_chr(self$column_types[[which(self$column_names == ft)]])
} else {
feature_types[i] <- self$steps[[ft]]$column_type
}
}
feature_types
},
print = function() {
cat(cli::rule(left = "Feature Spec"), "\n")
cat(cli::style_bold(paste("A feature_spec with", length(self$steps), "steps.\n")))
cat(cli::style_bold("Fitted:"), self$fitted, "\n")
cat(cli::rule(left = "Steps"), "\n")
if (self$fitted)
cat("The feature_spec has", length(self$dense_features), "dense features.\n")
if (length(self$steps) > 0) {
step_types <- sapply(self$steps, function(x) class(x)[1])
for (step_type in sort(unique(step_types))) {
cat(
paste0(cli::style_bold(step_type), ":"),
paste(
names(step_types[step_types == step_type]),
collapse = ", "
),
"\n"
)
}
}
cat(cli::rule(left = "Dense features"), "\n")
if (self$fitted) {
} else {
cat("Feature spec must be fitted before we can detect the dense features.\n")
}
}
),
private = list(
deep_clone = function(name, value) {
if (inherits(value, "R6")) {
value$clone(deep = TRUE)
} else if (name == "steps" || name == "base_steps" ||
name == "derived_steps") {
lapply(value, function(x) x$clone(deep = TRUE))
} else {
value
}
}
)
)
# Step --------------------------------------------------------------------
Step <- R6::R6Class(
classname = "Step",
public = list(
name = NULL,
fit_batch = function (batch) {
},
fit_resume = function () {
}
),
private = list(
deep_clone = function(name, value) {
if (inherits(value, "python.builtin.object")) {
value
} else if (inherits(value, "R6")) {
value$clone(deep = TRUE)
} else {
value
}
}
)
)
CategoricalStep <- R6::R6Class(
classname = "CategoricalStep",
inherit = Step
)
RemoveStep <- R6::R6Class(
"RemoveStep",
inherit = Step,
public = list(
var = NULL,
initialize = function(var) {
self$var <- var
self$name <- var
}
)
)
DerivedStep <- R6::R6Class(
"DerivedStep",
inherit = Step
)
# Scalers -----------------------------------------------------------------
Scaler <- R6::R6Class(
"Scaler",
public = list(
fit_batch = function(batch) {
},
fit_resume = function() {
},
fun = function() {
}
)
)
# http://notmatthancock.github.io/2017/03/23/simple-batch-stat-updates.html
# batch updates for mean and variance.
StandardScaler <- R6::R6Class(
"StandardScaler",
inherit = Scaler,
public = list(
m = 0,
sd = 0,
mean = 0,
fit_batch = function (batch) {
m <- self$m
mu_m <- self$mean
sd_m <- self$sd
n <- length(batch)
mu_n <- mean(batch)
sd_n <- sqrt(var(batch)*(n-1)/(n))
self$mean <- (m*mu_m + n*mu_n)/(n + m)
self$sd <- sqrt((m*(sd_m^2) + n*(sd_n^2))/(m+n) + m*n/((m+n)^2)*((mu_m - mu_n)^2))
self$m <- m + n
},
fit_resume = function() {
self$sd <- sqrt((self$sd^2)*self$m/(self$m -1))
},
fun = function() {
mean_ <- self$mean
sd_ <- self$sd
function(x) {
if (!x$dtype$is_floating)
x <- tf$cast(x, tf$float32)
(x - tf$cast(mean_, x$dtype))/tf$cast(sd_, x$dtype)
}
}
)
)
MinMaxScaler <- R6::R6Class(
"MinMaxScaler",
inherit = Scaler,
public = list(
min = Inf,
max = -Inf,
fit_batch = function (batch) {
self$min <- min(c(self$min, min(batch)))
self$max <- max(c(self$max, max(batch)))
},
fun = function() {
min_ <- self$min
max_ <- self$max
function(x) {
if (!x$dtype$is_floating)
x <- tf$cast(x, tf$float32)
(x - tf$cast(min_, x$dtype))/(tf$cast(max_, x$dtype) - tf$cast(min_, x$dtype))
}
}
)
)
#' List of pre-made scalers
#'
#' * [scaler_standard]: mean and standard deviation normalizer.
#' * [scaler_min_max]: min max normalizer
#'
#' @seealso [step_numeric_column]
#' @name scaler
#' @rdname scaler
NULL
#' Creates an instance of a standard scaler
#'
#' This scaler will learn the mean and the standard deviation
#' and use this to create a `normalizer_fn`.
#'
#' @seealso [scaler] to a complete list of normalizers
#' @family scaler
#' @export
scaler_standard <- function() {
StandardScaler$new()
}
#' Creates an instance of a min max scaler
#'
#' This scaler will learn the min and max of the numeric variable
#' and use this to create a `normalizer_fn`.
#'
#' @seealso [scaler] to a complete list of normalizers
#' @family scaler
#' @export
scaler_min_max <- function() {
MinMaxScaler$new()
}
# StepNumericColumn -------------------------------------------------------
StepNumericColumn <- R6::R6Class(
"StepNumericColumn",
inherit = Step,
public = list(
key = NULL,
shape = NULL,
default_value = NULL,
dtype = NULL,
normalizer_fn = NULL,
column_type = NULL,
initialize = function(key, shape, default_value, dtype, normalizer_fn, name) {
self$key <- key
self$shape <- shape
self$default_value <- default_value
self$dtype <- dtype
self$normalizer_fn <- normalizer_fn
self$name <- name
self$column_type = dtype_chr(dtype)
},
fit_batch = function(batch) {
if (inherits(self$normalizer_fn, "Scaler")) {
self$normalizer_fn$fit_batch(as.numeric(batch[[self$key]]))
}
},
fit_resume = function() {
if (inherits(self$normalizer_fn, "Scaler")) {
self$normalizer_fn$fit_resume()
self$normalizer_fn <- self$normalizer_fn$fun()
}
},
feature = function (base_features) {
tf$feature_column$numeric_column(
key = self$key, shape = self$shape,
default_value = self$default_value,
dtype = self$dtype,
normalizer_fn = self$normalizer_fn
)
}
)
)
# StepCategoricalColumnWithVocabularyList ---------------------------------
StepCategoricalColumnWithVocabularyList <- R6::R6Class(
"StepCategoricalColumnWithVocabularyList",
inherit = CategoricalStep,
public = list(
key = NULL,
vocabulary_list = NULL,
dtype = NULL,
default_value = -1L,
num_oov_buckets = 0L,
vocabulary_list_aux = NULL,
column_type = NULL,
initialize = function(key, vocabulary_list = NULL, dtype = NULL, default_value = -1L,
num_oov_buckets = 0L, name) {
self$key <- key
self$vocabulary_list <- vocabulary_list
self$dtype = dtype
self$default_value <- default_value
self$num_oov_buckets <- num_oov_buckets
self$name <- name
if (!is.null(dtype)) {
self$column_type = dtype_chr(dtype)
}
},
fit_batch = function(batch) {
if (is.null(self$vocabulary_list)) {
values <- batch[[self$key]]
if (inherits(values, "tensorflow.tensor")) {
# add shape to tensor with no shape
if (identical(values$shape$as_list(), list()))
values <- tf$constant(values, shape = 1L)
# get unique values before converting to R.
values <- tensorflow::tf$unique(values)$y
if (!is.atomic(values))
values <- values$numpy()
}
# converts from bytes to an R string. Need in python >= 3.6
# special case when values is a single value of type string
if (inherits(values, "python.builtin.bytes"))
values <- values$decode()
if (inherits(values[[1]], "python.builtin.bytes"))
values <- sapply(values, function(x) x$decode())
unq <- unique(values)
self$vocabulary_list_aux <- sort(unique(c(self$vocabulary_list_aux, unq)))
}
},
fit_resume = function() {
if (is.null(self$vocabulary_list)) {
self$vocabulary_list <- self$vocabulary_list_aux
}
},
feature = function(base_features) {
tf$feature_column$categorical_column_with_vocabulary_list(
key = self$key,
vocabulary_list = self$vocabulary_list,
dtype = self$dtype,
default_value = self$default_value,
num_oov_buckets = self$num_oov_buckets
)
}
)
)
# StepCategoricalColumnWithHashBucket -------------------------------------
StepCategoricalColumnWithHashBucket <- R6::R6Class(
"StepCategoricalColumnWithHashBucket",
inherit = CategoricalStep,
public = list(
key = NULL,
hash_bucket_size = NULL,
dtype = NULL,
column_type = NULL,
initialize = function(key, hash_bucket_size, dtype = tf$string, name) {
self$key <- key
self$hash_bucket_size <- hash_bucket_size
self$dtype <- dtype
self$name <- name
if (!is.null(dtype)) {
self$column_type = dtype_chr(dtype)
}
},
feature = function (base_features) {
tf$feature_column$categorical_column_with_hash_bucket(
key = self$key,
hash_bucket_size = self$hash_bucket_size,
dtype = self$dtype
)
}
)
)
# StepCategoricalColumnWithIdentity -------------------------------------
StepCategoricalColumnWithIdentity <- R6::R6Class(
"StepCategoricalColumnWithIdentity",
inherit = CategoricalStep,
public = list(
key = NULL,
num_buckets = NULL,
default_value = NULL,
initialize = function(key, num_buckets, default_value = NULL, name) {
self$key <- key
self$num_buckets <- num_buckets
self$default_value <- default_value
self$name <- name
},
feature = function (base_features) {
tf$feature_column$categorical_column_with_identity(
key = self$key,
num_buckets = self$num_buckets,
default_value = self$default_value
)
}
)
)
# StepCategoricalColumnWithVocabularyFile -------------------------------------
StepCategoricalColumnWithVocabularyFile <- R6::R6Class(
"StepCategoricalColumnWithVocabularyFile",
inherit = CategoricalStep,
public = list(
key = NULL,
vocabulary_file = NULL,
vocabulary_size = NULL,
dtype = NULL,
default_value = NULL,
num_oov_buckets = NULL,
column_type = NULL,
initialize = function(key, vocabulary_file, vocabulary_size = NULL, dtype = tf$string,
default_value = NULL, num_oov_buckets = 0L, name) {
self$key <- key
self$vocabulary_file <- normalizePath(vocabulary_file)
self$vocabulary_size <- vocabulary_size
self$dtype <- dtype
self$default_value <- default_value
self$num_oov_buckets <- num_oov_buckets
self$name <- name
if (!is.null(dtype)) {
self$column_type = dtype_chr(dtype)
}
},
feature = function (base_features) {
tf$feature_column$categorical_column_with_vocabulary_file(
key = self$key,
vocabulary_file = self$vocabulary_file,
vocabulary_size = self$vocabulary_size,
dtype = self$dtype,
default_value = self$default_value,
num_oov_buckets = self$num_oov_buckets
)
}
)
)
# StepIndicatorColumn -----------------------------------------------------
StepIndicatorColumn <- R6::R6Class(
"StepIndicatorColumn",
inherit = Step,
public = list(
categorical_column = NULL,
base_features = NULL,
column_type = "float32",
initialize = function(categorical_column, name) {
self$categorical_column = categorical_column
self$name <- name
},
feature = function(base_features) {
tf$feature_column$indicator_column(base_features[[self$categorical_column]])
}
)
)
# StepEmbeddingColumn -----------------------------------------------------
StepEmbeddingColumn <- R6::R6Class(
"StepEmbeddingColumn",
inherit = Step,
public = list(
categorical_column = NULL,
dimension = NULL,
combiner = NULL,
initializer = NULL,
ckpt_to_load_from = NULL,
tensor_name_in_ckpt = NULL,
max_norm = NULL,
trainable = NULL,
column_type = "float32",
initialize = function(categorical_column, dimension = NULL, combiner = "mean", initializer = NULL,
ckpt_to_load_from = NULL, tensor_name_in_ckpt = NULL, max_norm = NULL,
trainable = TRUE, name) {
self$categorical_column <- categorical_column
self$dimension <- dimension
self$combiner <- combiner
self$initializer <- initializer
self$ckpt_to_load_from <- ckpt_to_load_from
self$tensor_name_in_ckpt <- tensor_name_in_ckpt
self$max_norm <- max_norm
self$trainable <- trainable
self$name <- name
},
feature = function(base_features) {
categorical_column <- base_features[[self$categorical_column]]
if (is.function(self$dimension)) {
dimension <- self$dimension(length(categorical_column$vocabulary_list))
} else {
dimension <- self$dimension
}
tf$feature_column$embedding_column(
categorical_column = categorical_column,
dimension = as.integer(dimension),
combiner = self$combiner,
initializer = self$initializer,
ckpt_to_load_from = self$ckpt_to_load_from,
tensor_name_in_ckpt = self$tensor_name_in_ckpt,
max_norm = self$max_norm,
trainable = self$trainable
)
}
)
)
# StepCrossedColumn -------------------------------------------------------
StepCrossedColumn <- R6::R6Class(
"StepCrossedColumn",
inherit = DerivedStep,
public = list(
keys = NULL,
hash_bucket_size = NULL,
hash_key = NULL,
column_type = "string",
initialize = function (keys, hash_bucket_size, hash_key = NULL, name = NULL) {
self$keys <- keys
self$hash_bucket_size <- hash_bucket_size
self$hash_key <- hash_key
self$name <- name
},
feature = function(base_features) {
keys <- lapply(self$keys, function(x) base_features[[x]])
names(keys) <- NULL
tf$feature_column$crossed_column(
keys = keys,
hash_bucket_size = self$hash_bucket_size,
hash_key = self$hash_key
)
}
)
)
# StepBucketizedColumn ----------------------------------------------------
StepBucketizedColumn <- R6::R6Class(
"StepBucketizedColumn",
inherit = DerivedStep,
public = list(
source_column = NULL,
boundaries = NULL,
column_type = "float32",
initialize = function(source_column, boundaries, name) {
self$source_column <- source_column
self$boundaries <- boundaries
self$name <- name
},
feature = function(base_features) {
tf$feature_column$bucketized_column(
source_column = base_features[[self$source_column]],
boundaries = self$boundaries
)
}
)
)
# StepSharedEmbeddings ----------------------------------------------------
StepSharedEmbeddings <- R6::R6Class(
"StepSharedEmbeddings",
inherit = DerivedStep,
public = list(
categorical_columns = NULL,
dimension = NULL,
combiner = NULL,
initializer = NULL,
shared_embedding_collection_name = NULL,
ckpt_to_load_from = NULL,
tensor_name_in_ckpt = NULL,
max_norm = NULL,
trainable = NULL,
column_type = "float32",
initialize = function(categorical_columns, dimension, combiner = "mean",
initializer = NULL, shared_embedding_collection_name = NULL,
ckpt_to_load_from = NULL, tensor_name_in_ckpt = NULL,
max_norm = NULL, trainable = TRUE, name = NULL) {
self$categorical_columns <- categorical_columns
self$dimension <- dimension
self$combiner <- combiner
self$initializer <- initializer
self$shared_embedding_collection_name <- shared_embedding_collection_name
self$ckpt_to_load_from <- ckpt_to_load_from
self$tensor_name_in_ckpt <- tensor_name_in_ckpt
self$max_norm <- max_norm
self$trainable <- trainable
self$name <- name
},
feature = function(base_features) {
categorical_columns <- lapply(self$categorical_columns, function(x) {
base_features[[x]]
})
names(categorical_columns) <- NULL
tf$feature_column$shared_embeddings(
categorical_columns = categorical_columns,
dimension = self$dimension,
combiner = self$combiner,
initializer = self$initializer,
shared_embedding_collection_name = self$shared_embedding_collection_name,
ckpt_to_load_from = self$ckpt_to_load_from,
tensor_name_in_ckpt = self$tensor_name_in_ckpt,
max_norm = self$max_norm,
trainable = self$trainable
)
}
)
)
# Wrappers ----------------------------------------------------------------
#' Creates a feature specification.
#'
#' Used to create initialize a feature columns specification.
#'
#' @param dataset A TensorFlow dataset.
#' @param x Features to include can use [tidyselect::select_helpers()] or
#' a `formula`.
#' @param y (Optional) The response variable. Can also be specified using
#' a `formula` in the `x` argument.
#'
#' @details
#' After creating the `feature_spec` object you can add steps using the
#' `step` functions.
#'
#' @return a `FeatureSpec` object.
#'
#' @seealso
#' * [fit.FeatureSpec()] to fit the FeatureSpec
#' * [dataset_use_spec()] to create a tensorflow dataset prepared to modeling.
#' * [steps] to a list of all implemented steps.
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ .)
#'
#' # select using `tidyselect` helpers
#' spec <- feature_spec(hearts, x = c(thal, age), y = target)
#' }
#' @family Feature Spec Functions
#' @export
feature_spec <- function(dataset, x, y = NULL) {
# currently due to a bug in TF we are only using feature columns api with TF
# >= 2.0. see https://github.com/tensorflow/tensorflow/issues/30307
if (tensorflow::tf_version() < "2.0")
stop("Feature spec is only available with TensorFlow >= 2.0", call. = FALSE)
en_x <- rlang::enquo(x)
en_y <- rlang::enquo(y)
spec <- FeatureSpec$new(dataset, x = !!en_x, y = !!en_y)
spec
}
#' Fits a feature specification.
#'
#' This function will `fit` the specification. Depending
#' on the steps added to the specification it will compute
#' for example, the levels of categorical features, normalization
#' constants, etc.
#'
#' @param object A feature specification created with [feature_spec()].
#' @param dataset (Optional) A TensorFlow dataset. If `NULL` it will use
#' the dataset provided when initilializing the `feature_spec`.
#' @param ... (unused)
#'
#' @seealso
#' * [feature_spec()] to initialize the feature specification.
#' * [dataset_use_spec()] to create a tensorflow dataset prepared to modeling.
#' * [steps] to a list of all implemented steps.
#'
#' @return a fitted `FeatureSpec` object.
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ age) %>%
#' step_numeric_column(age)
#'
#' spec_fit <- fit(spec)
#' spec_fit
#' }
#' @family Feature Spec Functions
#' @export
fit.FeatureSpec <- function(object, dataset=NULL, ...) {
spec <- object$clone(deep = TRUE)
if (!is.null(dataset))
spec$set_dataset(dataset)
spec$fit()
spec
}
#' Transform the dataset using the provided spec.
#'
#' Prepares the dataset to be used directly in a model.The transformed
#' dataset is prepared to return tuples (x,y) that can be used directly
#' in Keras.
#'
#' @param dataset A TensorFlow dataset.
#' @param spec A feature specification created with [feature_spec()].
#' @seealso
#' * [feature_spec()] to initialize the feature specification.
#' * [fit.FeatureSpec()] to create a tensorflow dataset prepared to modeling.
#' * [steps] to a list of all implemented steps.
#'
#' @return A TensorFlow dataset.
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ age) %>%
#' step_numeric_column(age)
#'
#' spec_fit <- fit(spec)
#' final_dataset <- hearts %>% dataset_use_spec(spec_fit)
#' }
#' @family Feature Spec Functions
#' @export
dataset_use_spec <- function(dataset, spec) {
if (!inherits(dataset, "tensorflow.python.data.ops.dataset_ops.DatasetV2"))
stop("`dataset` must be a TensorFlow dataset.")
if (!spec$fitted)
stop("FeatureSpec must be prepared before juicing.")
spec <- spec$clone(deep = TRUE)
spec$set_dataset(dataset)
spec$prepared_dataset %>%
dataset_map(function(x) reticulate::tuple(x$x, x$y))
}
#' Steps for feature columns specification.
#'
#' List of steps that can be used to specify columns in the `feature_spec` interface.
#'
#' @section Steps:
#'
#' * [step_numeric_column()] to define numeric columns.
#' * [step_categorical_column_with_vocabulary_list()] to define categorical columns.
#' * [step_categorical_column_with_hash_bucket()] to define categorical columns
#' where ids are set by hashing.
#' * [step_categorical_column_with_identity()] to define categorical columns
#' represented by integers in the range `[0-num_buckets)`.
#' * [step_categorical_column_with_vocabulary_file()] to define categorical columns
#' when their vocabulary is available in a file.
#' * [step_indicator_column()] to create indicator columns from categorical columns.
#' * [step_embedding_column()] to create embeddings columns from categorical columns.
#' * [step_bucketized_column()] to create bucketized columns from numeric columns.
#' * [step_crossed_column()] to perform crosses of categorical columns.
#' * [step_shared_embeddings_column()] to share embeddings between a list of
#' categorical columns.
#' * [step_remove_column()] to remove columns from the specification.
#'
#' @seealso
#' * [selectors] for a list of selectors that can be used to specify variables.
#'
#' @name steps
#' @rdname steps
#' @family Feature Spec Functions
NULL
#' Creates a numeric column specification
#'
#' `step_numeric_column` creates a numeric column specification. It can also be
#' used to normalize numeric columns.
#'
#' @param spec A feature specification created with [feature_spec()].
#' @param ... Comma separated list of variable names to apply the step. [selectors] can also be used.
#' @param shape An iterable of integers specifies the shape of the Tensor. An integer can be given
#' which means a single dimension Tensor with given width. The Tensor representing the column will
#' have the shape of `batch_size` + `shape`.
#' @param default_value A single value compatible with `dtype` or an iterable of values compatible
#' with `dtype` which the column takes on during `tf.Example` parsing if data is missing. A
#' default value of `NULL` will cause `tf.parse_example` to fail if an example does not contain
#' this column. If a single value is provided, the same value will be applied as
#' the default value for every item. If an iterable of values is provided, the shape
#' of the default_value should be equal to the given shape.
#' @param dtype defines the type of values. Default value is `tf$float32`. Must be a non-quantized,
#' real integer or floating point type.
#' @param normalizer_fn If not `NULL`, a function that can be used to normalize the value
#' of the tensor after default_value is applied for parsing. Normalizer function takes the
#' input Tensor as its argument, and returns the output Tensor. (e.g. `function(x) (x - 3.0) / 4.2)`.
#' Please note that even though the most common use case of this function is normalization, it
#' can be used for any kind of Tensorflow transformations. You can also a pre-made [scaler], in
#' this case a function will be created after [fit.FeatureSpec] is called on the feature specification.
#'
#' @return a `FeatureSpec` object.
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ age) %>%
#' step_numeric_column(age, normalizer_fn = standard_scaler())
#'
#' spec_fit <- fit(spec)
#' final_dataset <- hearts %>% dataset_use_spec(spec_fit)
#' }
#'
#' @seealso [steps] for a complete list of allowed steps.
#'
#' @family Feature Spec Functions
#' @export
step_numeric_column <- function(spec, ..., shape = 1L, default_value = NULL,
dtype = tf$float32, normalizer_fn = NULL) {
spec <- spec$clone(deep = TRUE)
quos_ <- quos(...)
variables <- terms_select(spec$feature_names(), spec$feature_types(), quos_)
for (var in variables) {
stp <- StepNumericColumn$new(var, shape, default_value, dtype, normalizer_fn,
name = var)
spec$add_step(stp)
}
spec
}
#' Creates a step that can remove columns
#'
#' Removes features of the feature specification.
#'
#' @inheritParams step_numeric_column
#'
#' @return a `FeatureSpec` object.
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ age) %>%
#' step_numeric_column(age, normalizer_fn = scaler_standard()) %>%
#' step_bucketized_column(age, boundaries = c(20, 50)) %>%
#' step_remove_column(age)
#'
#' spec_fit <- fit(spec)
#' final_dataset <- hearts %>% dataset_use_spec(spec_fit)
#' }
#'
#' @seealso [steps] for a complete list of allowed steps.
#' @family Feature Spec Functions
#'
#' @export
step_remove_column <- function(spec, ...) {
spec <- spec$clone(deep = TRUE)
quos_ <- quos(...)
variables <- terms_select(spec$feature_names(), spec$feature_types(), quos_)
for (var in variables) {
stp <- RemoveStep$new(var)
spec$add_step(stp)
}
spec
}
#' Creates a categorical column specification
#'
#' @inheritParams step_numeric_column
#' @param vocabulary_list An ordered iterable defining the vocabulary. Each
#' feature is mapped to the index of its value (if present) in vocabulary_list.
#' Must be castable to `dtype`. If `NULL` the vocabulary will be defined as
#' all unique values in the dataset provided when fitting the specification.
#' @param dtype The type of features. Only string and integer types are supported.
#' If `NULL`, it will be inferred from `vocabulary_list`.
#' @param default_value The integer ID value to return for out-of-vocabulary feature
#' values, defaults to `-1`. This can not be specified with a positive
#' num_oov_buckets.
#' @param num_oov_buckets Non-negative integer, the number of out-of-vocabulary buckets.
#' All out-of-vocabulary inputs will be assigned IDs in the range
#' `[lenght(vocabulary_list), length(vocabulary_list)+num_oov_buckets)` based on a hash of
#' the input value. A positive num_oov_buckets can not be specified with
#' default_value.
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ thal) %>%
#' step_categorical_column_with_vocabulary_list(thal)
#'
#' spec_fit <- fit(spec)
#' final_dataset <- hearts %>% dataset_use_spec(spec_fit)
#' }
#'
#' @return a `FeatureSpec` object.
#' @seealso [steps] for a complete list of allowed steps.
#'
#' @family Feature Spec Functions
#' @export
step_categorical_column_with_vocabulary_list <- function(spec, ..., vocabulary_list = NULL,
dtype = NULL, default_value = -1L,
num_oov_buckets = 0L) {
spec <- spec$clone(deep = TRUE)
quos_ <- quos(...)
variables <- terms_select(spec$feature_names(), spec$feature_types(), quos_)
for (var in variables) {
stp <- StepCategoricalColumnWithVocabularyList$new(
var, vocabulary_list, dtype,
default_value, num_oov_buckets,
name = var
)
spec$add_step(stp)
}
spec
}
#' Creates a categorical column with hash buckets specification
#'
#' Represents sparse feature where ids are set by hashing.
#'
#' @inheritParams step_numeric_column
#' @param hash_bucket_size An int > 1. The number of buckets.
#' @param dtype The type of features. Only string and integer types are supported.
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ thal) %>%
#' step_categorical_column_with_hash_bucket(thal, hash_bucket_size = 3)
#'
#' spec_fit <- fit(spec)
#' final_dataset <- hearts %>% dataset_use_spec(spec_fit)
#' }
#'
#' @return a `FeatureSpec` object.
#' @seealso [steps] for a complete list of allowed steps.
#'
#' @family Feature Spec Functions
#' @export
step_categorical_column_with_hash_bucket <- function(spec, ..., hash_bucket_size,
dtype = tf$string) {
spec <- spec$clone(deep = TRUE)
quos_ <- quos(...)
variables <- terms_select(spec$feature_names(), spec$feature_types(), quos_)
for (var in variables) {
stp <- StepCategoricalColumnWithHashBucket$new(
var,
hash_bucket_size = hash_bucket_size,
dtype = dtype,
name = var
)
spec$add_step(stp)
}
spec
}
#' Create a categorical column with identity
#'
#' Use this when your inputs are integers in the range `[0-num_buckets)`.
#'
#' @inheritParams step_numeric_column
#' @param num_buckets Range of inputs and outputs is `[0, num_buckets)`.
#' @param default_value If `NULL`, this column's graph operations will fail
#' for out-of-range inputs. Otherwise, this value must be in the range
#' `[0, num_buckets)`, and will replace inputs in that range.
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#'
#' hearts$thal <- as.integer(as.factor(hearts$thal)) - 1L
#'
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ thal) %>%
#' step_categorical_column_with_identity(thal, num_buckets = 5)
#'
#' spec_fit <- fit(spec)
#' final_dataset <- hearts %>% dataset_use_spec(spec_fit)
#' }
#'
#' @return a `FeatureSpec` object.
#' @seealso [steps] for a complete list of allowed steps.
#'
#' @family Feature Spec Functions
#' @export
step_categorical_column_with_identity <- function(spec, ..., num_buckets,
default_value = NULL) {
spec <- spec$clone(deep = TRUE)
quos_ <- quos(...)
variables <- terms_select(spec$feature_names(), spec$feature_types(), quos_)
for (var in variables) {
stp <- StepCategoricalColumnWithIdentity$new(
key = var,
num_buckets = num_buckets,
default_value = default_value,
name = var
)
spec$add_step(stp)
}
spec
}
#' Creates a categorical column with vocabulary file
#'
#' Use this function when the vocabulary of a categorical variable
#' is written to a file.
#'
#' @inheritParams step_numeric_column
#' @param vocabulary_file The vocabulary file name.
#' @param vocabulary_size Number of the elements in the vocabulary. This
#' must be no greater than length of `vocabulary_file`, if less than
#' length, later values are ignored. If None, it is set to the length of
#' `vocabulary_file`.
#' @param dtype The type of features. Only string and integer types are
#' supported.
#' @param default_value The integer ID value to return for out-of-vocabulary
#' feature values, defaults to `-1`. This can not be specified with a
#' positive `num_oov_buckets`.
#' @param num_oov_buckets Non-negative integer, the number of out-of-vocabulary
#' buckets. All out-of-vocabulary inputs will be assigned IDs in the range
#' `[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
#' the input value. A positive `num_oov_buckets` can not be specified with
#' default_value.
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' file <- tempfile()
#' writeLines(unique(hearts$thal), file)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ thal) %>%
#' step_categorical_column_with_vocabulary_file(thal, vocabulary_file = file)
#'
#' spec_fit <- fit(spec)
#' final_dataset <- hearts %>% dataset_use_spec(spec_fit)
#' }
#'
#' @return a `FeatureSpec` object.
#' @seealso [steps] for a complete list of allowed steps.
#'
#' @family Feature Spec Functions
#' @export
step_categorical_column_with_vocabulary_file <- function(spec, ..., vocabulary_file,
vocabulary_size = NULL,
dtype = tf$string,
default_value = NULL,
num_oov_buckets = 0L) {
spec <- spec$clone(deep = TRUE)
quos_ <- quos(...)
variables <- terms_select(spec$feature_names(), spec$feature_types(), quos_)
for (var in variables) {
stp <- StepCategoricalColumnWithVocabularyFile$new(
key = var,
vocabulary_file = vocabulary_file,
vocabulary_size = vocabulary_size,
dtype = dtype,
default_value = default_value,
num_oov_buckets = num_oov_buckets,
name = var
)
spec$add_step(stp)
}
spec
}
make_step_name <- function(quosure, variable, step) {
nms <- names(quosure)
if (!is.null(nms) && !is.na(nms) && length(nms) == 1 && nms != "" ) {
nms
} else {
paste0(step, "_", variable)
}
}
step_ <- function(spec, ..., step, args, prefix) {
spec <- spec$clone(deep = TRUE)
quosures <- quos(...)
variables <- terms_select(spec$feature_names(), spec$feature_types(), quosures)
nms <- names(quosures)
if ( !is.null(nms) && any(nms != "") && length(nms) != length(variables) )
stop("Can't name feature if using a selector.")
for (i in seq_along(variables)) {
args_ <- append(
list(
variables[i],
name = make_step_name(quosures[i], variables[i], prefix)
),
args
)
stp <- do.call(step, args_)
spec$add_step(stp)
}
spec
}
#' Creates Indicator Columns
#'
#' Use this step to create indicator columns from categorical columns.
#'
#' @inheritParams step_numeric_column
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' file <- tempfile()
#' writeLines(unique(hearts$thal), file)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ thal) %>%
#' step_categorical_column_with_vocabulary_list(thal) %>%
#' step_indicator_column(thal)
#' spec_fit <- fit(spec)
#' final_dataset <- hearts %>% dataset_use_spec(spec_fit)
#' }
#' @return a `FeatureSpec` object.
#' @seealso [steps] for a complete list of allowed steps.
#'
#' @family Feature Spec Functions
#' @export
step_indicator_column <- function(spec, ...) {
step_(spec, ..., step = StepIndicatorColumn$new, args = list(), prefix = "indicator")
}
#' Creates embeddings columns
#'
#' Use this step to create ambeddings columns from categorical
#' columns.
#'
#' @inheritParams step_numeric_column
#' @param dimension An integer specifying dimension of the embedding, must be > 0.
#' Can also be a function of the size of the vocabulary.
#' @param combiner A string specifying how to reduce if there are multiple entries in
#' a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with 'mean' the
#' default. 'sqrtn' often achieves good accuracy, in particular with bag-of-words
#' columns. Each of this can be thought as example level normalizations on
#' the column. For more information, see `tf.embedding_lookup_sparse`.
#' @param initializer A variable initializer function to be used in embedding
#' variable initialization. If not specified, defaults to
#' `tf.truncated_normal_initializer` with mean `0.0` and standard deviation
#' `1/sqrt(dimension)`.
#' @param ckpt_to_load_from String representing checkpoint name/pattern from
#' which to restore column weights. Required if `tensor_name_in_ckpt` is
#' not `NULL`.
#' @param tensor_name_in_ckpt Name of the Tensor in ckpt_to_load_from from which to
#' restore the column weights. Required if `ckpt_to_load_from` is not `NULL`.
#' @param max_norm If not `NULL`, embedding values are l2-normalized to this value.
#' @param trainable Whether or not the embedding is trainable. Default is `TRUE`.
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' file <- tempfile()
#' writeLines(unique(hearts$thal), file)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ thal) %>%
#' step_categorical_column_with_vocabulary_list(thal) %>%
#' step_embedding_column(thal, dimension = 3)
#' spec_fit <- fit(spec)
#' final_dataset <- hearts %>% dataset_use_spec(spec_fit)
#' }
#' @return a `FeatureSpec` object.
#' @seealso [steps] for a complete list of allowed steps.
#'
#' @family Feature Spec Functions
#' @export
step_embedding_column <- function(spec, ..., dimension = function(x) {as.integer(x^0.25)},
combiner = "mean",
initializer = NULL, ckpt_to_load_from = NULL,
tensor_name_in_ckpt = NULL, max_norm = NULL,
trainable = TRUE) {
args <- list(
dimension = dimension,
combiner = combiner,
initializer = initializer,
ckpt_to_load_from = ckpt_to_load_from,
tensor_name_in_ckpt = tensor_name_in_ckpt,
max_norm = max_norm,
trainable = trainable
)
step_(spec, ..., step = StepEmbeddingColumn$new, args = args, prefix = "embedding")
}
#' Creates bucketized columns
#'
#' Use this step to create bucketized columns from numeric columns.
#'
#' @inheritParams step_numeric_column
#' @param boundaries A sorted list or tuple of floats specifying the boundaries.
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' file <- tempfile()
#' writeLines(unique(hearts$thal), file)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ age) %>%
#' step_numeric_column(age) %>%
#' step_bucketized_column(age, boundaries = c(10, 20, 30))
#' spec_fit <- fit(spec)
#' final_dataset <- hearts %>% dataset_use_spec(spec_fit)
#' }
#' @return a `FeatureSpec` object.
#' @seealso [steps] for a complete list of allowed steps.
#'
#' @family Feature Spec Functions
#' @export
step_bucketized_column <- function(spec, ..., boundaries) {
args <- list(
boundaries = boundaries
)
step_(spec, ..., step = StepBucketizedColumn$new, args = args, prefix = "bucketized")
}
make_multiple_columns_step_name <- function(quosure, variables, step) {
nms <- names(quosure)
if (!is.null(nms) && nms != "") {
nms
} else {
paste0(step, "_", paste(variables, collapse= "_"))
}
}
step_multiple_ <- function(spec, ..., step, args, prefix) {
spec <- spec$clone(deep = TRUE)
quosures <- quos(...)
for (i in seq_along(quosures)) {
variables <- terms_select(spec$feature_names(), spec$feature_types(), quosures[i])
args_ <- append(
list(
variables,
name = make_multiple_columns_step_name(quosures[i], variables, "crossed")
),
args
)
stp <- do.call(step, args_)
spec$add_step(stp)
}
spec
}
#' Creates crosses of categorical columns
#'
#' Use this step to create crosses between categorical columns.
#'
#' @inheritParams step_numeric_column
#' @param hash_bucket_size An int > 1. The number of buckets.
#' @param hash_key (optional) Specify the hash_key that will be used by the
#' FingerprintCat64 function to combine the crosses fingerprints on
#' SparseCrossOp.
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' file <- tempfile()
#' writeLines(unique(hearts$thal), file)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ age) %>%
#' step_numeric_column(age) %>%
#' step_bucketized_column(age, boundaries = c(10, 20, 30))
#' spec_fit <- fit(spec)
#' final_dataset <- hearts %>% dataset_use_spec(spec_fit)
#' }
#' @return a `FeatureSpec` object.
#' @seealso [steps] for a complete list of allowed steps.
#'
#' @family Feature Spec Functions
#' @export
step_crossed_column <- function(spec, ..., hash_bucket_size, hash_key = NULL) {
args <- list(
hash_bucket_size = hash_bucket_size,
hash_key = hash_key
)
step_multiple_(spec, ..., step = StepCrossedColumn$new, args = args, prefix = "crossed")
}
#' Creates shared embeddings for categorical columns
#'
#' This is similar to [step_embedding_column], except that it produces a list of
#' embedding columns that share the same embedding weights.
#'
#' @inheritParams step_embedding_column
#' @param shared_embedding_collection_name Optional collective name of
#' these columns. If not given, a reasonable name will be chosen based on
#' the names of categorical_columns.
#'
#' @note Does not work in the eager mode.
#'
#' @return a `FeatureSpec` object.
#' @seealso [steps] for a complete list of allowed steps.
#'
#' @family Feature Spec Functions
#' @export
step_shared_embeddings_column <- function(spec, ..., dimension, combiner = "mean",
initializer = NULL, shared_embedding_collection_name = NULL,
ckpt_to_load_from = NULL, tensor_name_in_ckpt = NULL,
max_norm = NULL, trainable = TRUE) {
args <- list(
dimension = dimension,
combiner = combiner,
initializer = initializer,
shared_embedding_collection_name = shared_embedding_collection_name,
ckpt_to_load_from = ckpt_to_load_from,
tensor_name_in_ckpt = tensor_name_in_ckpt,
max_norm = max_norm,
trainable = trainable
)
step_multiple_(
spec, ...,
step = StepSharedEmbeddings$new,
args = args,
prefix = "shared_embeddings"
)
}
# Input from spec ---------------------------------------------------------
#' Creates a list of inputs from a dataset
#'
#' Create a list ok Keras input layers that can be used together
#' with [keras::layer_dense_features()].
#'
#' @param dataset a TensorFlow dataset or a data.frame
#' @return a list of Keras input layers
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ age + slope) %>%
#' step_numeric_column(age, slope) %>%
#' step_bucketized_column(age, boundaries = c(10, 20, 30))
#'
#' spec <- fit(spec)
#' dataset <- hearts %>% dataset_use_spec(spec)
#'
#' input <- layer_input_from_dataset(dataset)
#' }
#'
#' @export
layer_input_from_dataset <- function(dataset) {
# only needs the head to infer types, colnames and etc.
if (inherits(dataset, "data.frame") || inherits(dataset, "list"))
dataset <- tensor_slices_dataset(utils::head(dataset))
dataset <- dataset_map(dataset, ~.x)
col_names <- column_names(dataset)
col_types <- output_types(dataset)
col_shapes <- output_shapes(dataset)
inputs <- list()
for (i in seq_along(col_names)) {
x <- list(keras::layer_input(
name = col_names[i],
shape = col_shapes[[i]]$as_list()[-1],
dtype = col_types[[i]]$name
))
names(x) <- col_names[i]
inputs <- append(inputs, x)
}
reticulate::dict(inputs)
}
#' Dense Features
#'
#' Retrives the Dense Features from a spec.
#'
#' @inheritParams step_numeric_column
#'
#' @return A list of feature columns.
#'
#' @export
dense_features <- function(spec) {
spec$dense_features()
}
| /R/feature_spec.R | no_license | Athospd/tfdatasets | R | false | false | 50,460 | r | name_step <- function(step) {
if (!is.null(step$name)) {
nm <- step$name
step <- list(step)
names(step) <- nm
}
step
}
is_dense_column <- function(feature) {
inherits(feature, "tensorflow.python.feature_column.feature_column._DenseColumn")
}
dtype_chr <- function(x) {
x$name
}
# Selectors ---------------------------------------------------------------
#' Selectors
#'
#' List of selectors that can be used to specify variables inside
#' steps.
#'
#' @section Selectors:
#'
#' * [has_type()]
#' * [all_numeric()]
#' * [all_nominal()]
#' * [starts_with()]
#' * [ends_with()]
#' * [one_of()]
#' * [matches()]
#' * [contains()]
#' * [everything()]
#'
#' @name selectors
#' @rdname selectors
cur_info_env <- rlang::child_env(rlang::env_parent(rlang::env()))
set_current_info <- function(x) {
old <- cur_info_env
cur_info_env$feature_names <- x$feature_names
cur_info_env$feature_types <- x$feature_types
invisible(old)
}
current_info <- function() {
cur_info_env %||% stop("Variable context not set", call. = FALSE)
}
#' Identify the type of the variable.
#'
#' Can only be used inside the [steps] specifications to find
#' variables by type.
#'
#' @param match A list of types to match.
#'
#' @family Selectors
#' @export
has_type <- function(match = "float32") {
info <- current_info()
lgl_matches <- sapply(info$feature_types, function(x) any(x %in% match))
info$feature_names[which(lgl_matches)]
}
terms_select <- function(feature_names, feature_types, terms) {
old_info <- set_current_info(
list(feature_names = feature_names, feature_types = feature_types)
)
on.exit(set_current_info(old_info), add = TRUE)
sel <- tidyselect::vars_select(feature_names, !!! terms)
sel
}
#' Speciy all numeric variables.
#'
#' Find all the variables with the following types:
#' "float16", "float32", "float64", "int16", "int32", "int64",
#' "half", "double".
#'
#' @family Selectors
#' @export
all_numeric <- function() {
has_type(c("float16", "float32", "float64", "int16", "int32", "int64", "half", "double"))
}
#' Find all nominal variables.
#'
#' Currently we only consider "string" type as nominal.
#'
#' @family Selectors
#' @export
all_nominal <- function() {
has_type(c("string"))
}
#' @importFrom tidyselect starts_with
#' @export
tidyselect::starts_with
#' @importFrom tidyselect ends_with
#' @export
tidyselect::ends_with
#' @importFrom tidyselect contains
#' @export
tidyselect::contains
#' @importFrom tidyselect everything
#' @export
tidyselect::everything
#' @importFrom tidyselect matches
#' @export
tidyselect::matches
#' @importFrom tidyselect num_range
#' @export
tidyselect::num_range
#' @importFrom tidyselect one_of
#' @export
tidyselect::one_of
# FeatureSpec ------------------------------------------------------------------
FeatureSpec <- R6::R6Class(
"FeatureSpec",
public = list(
steps = list(),
formula = NULL,
column_names = NULL,
column_types = NULL,
dataset = NULL,
fitted = FALSE,
prepared_dataset = NULL,
x = NULL,
y = NULL,
initialize = function(dataset, x, y = NULL) {
if (inherits(dataset, "data.frame")) {
dataset <- tensors_dataset(dataset)
}
self$formula <- formula
self$x <- rlang::enquo(x)
self$y <- rlang::enquo(y)
self$set_dataset(dataset)
self$column_names <- column_names(self$dataset)
self$column_types <- output_types(self$dataset)
},
set_dataset = function(dataset) {
self$prepared_dataset <- dataset_prepare(dataset, !!self$x, !!self$y, named_features = TRUE)
self$dataset <- dataset_map(self$prepared_dataset, function(x) x$x)
invisible(self)
},
add_step = function(step) {
self$steps <- append(self$steps, name_step(step))
},
fit = function() {
if (self$fitted)
stop("FeatureSpec is already fitted.")
if (tf$executing_eagerly()) {
ds <- reticulate::as_iterator(self$dataset)
nxt <- reticulate::iter_next(ds)
} else {
ds <- make_iterator_one_shot(self$dataset)
nxt_it <- ds$get_next()
sess <- tf$compat$v1$Session()
nxt <- sess$run(nxt_it)
}
pb <- progress::progress_bar$new(
format = ":spin Preparing :tick_rate batches/s [:current batches in :elapsedfull]",
total = Inf)
while (!is.null(nxt)) {
pb$tick(1)
for (i in seq_along(self$steps)) {
self$steps[[i]]$fit_batch(nxt)
}
if (tf$executing_eagerly()) {
nxt <- reticulate::iter_next(ds)
} else {
nxt <- tryCatch({sess$run(nxt_it)}, error = out_of_range_handler)
}
}
for (i in seq_along(self$steps)) {
self$steps[[i]]$fit_resume()
}
self$fitted <- TRUE
if (!tf$executing_eagerly())
sess$close()
},
features = function() {
if (!self$fitted)
stop("Only available after fitting the feature_spec.")
feats <- NULL
for (i in seq_along(self$steps)) {
stp <- self$steps[i]
if (inherits(stp[[1]], "RemoveStep")) {
feats <- feats[-which(names(feats) == stp[[1]]$var)]
} else {
feature <- lapply(stp, function(x) x$feature(feats)) # keep list names
feats <- append(feats, feature)
feats <- unlist(feats)
}
}
feats
},
dense_features = function() {
if (!self$fitted)
stop("Only available after fitting the feature_spec.")
Filter(is_dense_column, self$features())
},
feature_names = function() {
unique(c(names(self$steps), self$column_names))
},
feature_types = function() {
feature_names <- self$feature_names()
feature_types <- character(length = length(feature_names))
for (i in seq_along(feature_names)) {
ft <- feature_names[i]
if (is.null(self$steps[[ft]])) {
feature_types[i] <- dtype_chr(self$column_types[[which(self$column_names == ft)]])
} else if (is.null(self$steps[[ft]]$column_type)) {
feature_types[i] <- dtype_chr(self$column_types[[which(self$column_names == ft)]])
} else {
feature_types[i] <- self$steps[[ft]]$column_type
}
}
feature_types
},
print = function() {
cat(cli::rule(left = "Feature Spec"), "\n")
cat(cli::style_bold(paste("A feature_spec with", length(self$steps), "steps.\n")))
cat(cli::style_bold("Fitted:"), self$fitted, "\n")
cat(cli::rule(left = "Steps"), "\n")
if (self$fitted)
cat("The feature_spec has", length(self$dense_features), "dense features.\n")
if (length(self$steps) > 0) {
step_types <- sapply(self$steps, function(x) class(x)[1])
for (step_type in sort(unique(step_types))) {
cat(
paste0(cli::style_bold(step_type), ":"),
paste(
names(step_types[step_types == step_type]),
collapse = ", "
),
"\n"
)
}
}
cat(cli::rule(left = "Dense features"), "\n")
if (self$fitted) {
} else {
cat("Feature spec must be fitted before we can detect the dense features.\n")
}
}
),
private = list(
deep_clone = function(name, value) {
if (inherits(value, "R6")) {
value$clone(deep = TRUE)
} else if (name == "steps" || name == "base_steps" ||
name == "derived_steps") {
lapply(value, function(x) x$clone(deep = TRUE))
} else {
value
}
}
)
)
# Step --------------------------------------------------------------------
Step <- R6::R6Class(
classname = "Step",
public = list(
name = NULL,
fit_batch = function (batch) {
},
fit_resume = function () {
}
),
private = list(
deep_clone = function(name, value) {
if (inherits(value, "python.builtin.object")) {
value
} else if (inherits(value, "R6")) {
value$clone(deep = TRUE)
} else {
value
}
}
)
)
CategoricalStep <- R6::R6Class(
classname = "CategoricalStep",
inherit = Step
)
RemoveStep <- R6::R6Class(
"RemoveStep",
inherit = Step,
public = list(
var = NULL,
initialize = function(var) {
self$var <- var
self$name <- var
}
)
)
DerivedStep <- R6::R6Class(
"DerivedStep",
inherit = Step
)
# Scalers -----------------------------------------------------------------
Scaler <- R6::R6Class(
"Scaler",
public = list(
fit_batch = function(batch) {
},
fit_resume = function() {
},
fun = function() {
}
)
)
# http://notmatthancock.github.io/2017/03/23/simple-batch-stat-updates.html
# batch updates for mean and variance.
StandardScaler <- R6::R6Class(
"StandardScaler",
inherit = Scaler,
public = list(
m = 0,
sd = 0,
mean = 0,
fit_batch = function (batch) {
m <- self$m
mu_m <- self$mean
sd_m <- self$sd
n <- length(batch)
mu_n <- mean(batch)
sd_n <- sqrt(var(batch)*(n-1)/(n))
self$mean <- (m*mu_m + n*mu_n)/(n + m)
self$sd <- sqrt((m*(sd_m^2) + n*(sd_n^2))/(m+n) + m*n/((m+n)^2)*((mu_m - mu_n)^2))
self$m <- m + n
},
fit_resume = function() {
self$sd <- sqrt((self$sd^2)*self$m/(self$m -1))
},
fun = function() {
mean_ <- self$mean
sd_ <- self$sd
function(x) {
if (!x$dtype$is_floating)
x <- tf$cast(x, tf$float32)
(x - tf$cast(mean_, x$dtype))/tf$cast(sd_, x$dtype)
}
}
)
)
MinMaxScaler <- R6::R6Class(
"MinMaxScaler",
inherit = Scaler,
public = list(
min = Inf,
max = -Inf,
fit_batch = function (batch) {
self$min <- min(c(self$min, min(batch)))
self$max <- max(c(self$max, max(batch)))
},
fun = function() {
min_ <- self$min
max_ <- self$max
function(x) {
if (!x$dtype$is_floating)
x <- tf$cast(x, tf$float32)
(x - tf$cast(min_, x$dtype))/(tf$cast(max_, x$dtype) - tf$cast(min_, x$dtype))
}
}
)
)
#' List of pre-made scalers
#'
#' * [scaler_standard]: mean and standard deviation normalizer.
#' * [scaler_min_max]: min max normalizer
#'
#' @seealso [step_numeric_column]
#' @name scaler
#' @rdname scaler
NULL
#' Creates an instance of a standard scaler
#'
#' This scaler will learn the mean and the standard deviation
#' and use this to create a `normalizer_fn`.
#'
#' @seealso [scaler] to a complete list of normalizers
#' @family scaler
#' @export
scaler_standard <- function() {
StandardScaler$new()
}
#' Creates an instance of a min max scaler
#'
#' This scaler will learn the min and max of the numeric variable
#' and use this to create a `normalizer_fn`.
#'
#' @seealso [scaler] to a complete list of normalizers
#' @family scaler
#' @export
scaler_min_max <- function() {
MinMaxScaler$new()
}
# StepNumericColumn -------------------------------------------------------
StepNumericColumn <- R6::R6Class(
"StepNumericColumn",
inherit = Step,
public = list(
key = NULL,
shape = NULL,
default_value = NULL,
dtype = NULL,
normalizer_fn = NULL,
column_type = NULL,
initialize = function(key, shape, default_value, dtype, normalizer_fn, name) {
self$key <- key
self$shape <- shape
self$default_value <- default_value
self$dtype <- dtype
self$normalizer_fn <- normalizer_fn
self$name <- name
self$column_type = dtype_chr(dtype)
},
fit_batch = function(batch) {
if (inherits(self$normalizer_fn, "Scaler")) {
self$normalizer_fn$fit_batch(as.numeric(batch[[self$key]]))
}
},
fit_resume = function() {
if (inherits(self$normalizer_fn, "Scaler")) {
self$normalizer_fn$fit_resume()
self$normalizer_fn <- self$normalizer_fn$fun()
}
},
feature = function (base_features) {
tf$feature_column$numeric_column(
key = self$key, shape = self$shape,
default_value = self$default_value,
dtype = self$dtype,
normalizer_fn = self$normalizer_fn
)
}
)
)
# StepCategoricalColumnWithVocabularyList ---------------------------------
StepCategoricalColumnWithVocabularyList <- R6::R6Class(
"StepCategoricalColumnWithVocabularyList",
inherit = CategoricalStep,
public = list(
key = NULL,
vocabulary_list = NULL,
dtype = NULL,
default_value = -1L,
num_oov_buckets = 0L,
vocabulary_list_aux = NULL,
column_type = NULL,
initialize = function(key, vocabulary_list = NULL, dtype = NULL, default_value = -1L,
num_oov_buckets = 0L, name) {
self$key <- key
self$vocabulary_list <- vocabulary_list
self$dtype = dtype
self$default_value <- default_value
self$num_oov_buckets <- num_oov_buckets
self$name <- name
if (!is.null(dtype)) {
self$column_type = dtype_chr(dtype)
}
},
fit_batch = function(batch) {
if (is.null(self$vocabulary_list)) {
values <- batch[[self$key]]
if (inherits(values, "tensorflow.tensor")) {
# add shape to tensor with no shape
if (identical(values$shape$as_list(), list()))
values <- tf$constant(values, shape = 1L)
# get unique values before converting to R.
values <- tensorflow::tf$unique(values)$y
if (!is.atomic(values))
values <- values$numpy()
}
# converts from bytes to an R string. Need in python >= 3.6
# special case when values is a single value of type string
if (inherits(values, "python.builtin.bytes"))
values <- values$decode()
if (inherits(values[[1]], "python.builtin.bytes"))
values <- sapply(values, function(x) x$decode())
unq <- unique(values)
self$vocabulary_list_aux <- sort(unique(c(self$vocabulary_list_aux, unq)))
}
},
fit_resume = function() {
if (is.null(self$vocabulary_list)) {
self$vocabulary_list <- self$vocabulary_list_aux
}
},
feature = function(base_features) {
tf$feature_column$categorical_column_with_vocabulary_list(
key = self$key,
vocabulary_list = self$vocabulary_list,
dtype = self$dtype,
default_value = self$default_value,
num_oov_buckets = self$num_oov_buckets
)
}
)
)
# StepCategoricalColumnWithHashBucket -------------------------------------
StepCategoricalColumnWithHashBucket <- R6::R6Class(
"StepCategoricalColumnWithHashBucket",
inherit = CategoricalStep,
public = list(
key = NULL,
hash_bucket_size = NULL,
dtype = NULL,
column_type = NULL,
initialize = function(key, hash_bucket_size, dtype = tf$string, name) {
self$key <- key
self$hash_bucket_size <- hash_bucket_size
self$dtype <- dtype
self$name <- name
if (!is.null(dtype)) {
self$column_type = dtype_chr(dtype)
}
},
feature = function (base_features) {
tf$feature_column$categorical_column_with_hash_bucket(
key = self$key,
hash_bucket_size = self$hash_bucket_size,
dtype = self$dtype
)
}
)
)
# StepCategoricalColumnWithIdentity -------------------------------------
StepCategoricalColumnWithIdentity <- R6::R6Class(
"StepCategoricalColumnWithIdentity",
inherit = CategoricalStep,
public = list(
key = NULL,
num_buckets = NULL,
default_value = NULL,
initialize = function(key, num_buckets, default_value = NULL, name) {
self$key <- key
self$num_buckets <- num_buckets
self$default_value <- default_value
self$name <- name
},
feature = function (base_features) {
tf$feature_column$categorical_column_with_identity(
key = self$key,
num_buckets = self$num_buckets,
default_value = self$default_value
)
}
)
)
# StepCategoricalColumnWithVocabularyFile -------------------------------------
StepCategoricalColumnWithVocabularyFile <- R6::R6Class(
"StepCategoricalColumnWithVocabularyFile",
inherit = CategoricalStep,
public = list(
key = NULL,
vocabulary_file = NULL,
vocabulary_size = NULL,
dtype = NULL,
default_value = NULL,
num_oov_buckets = NULL,
column_type = NULL,
initialize = function(key, vocabulary_file, vocabulary_size = NULL, dtype = tf$string,
default_value = NULL, num_oov_buckets = 0L, name) {
self$key <- key
self$vocabulary_file <- normalizePath(vocabulary_file)
self$vocabulary_size <- vocabulary_size
self$dtype <- dtype
self$default_value <- default_value
self$num_oov_buckets <- num_oov_buckets
self$name <- name
if (!is.null(dtype)) {
self$column_type = dtype_chr(dtype)
}
},
feature = function (base_features) {
tf$feature_column$categorical_column_with_vocabulary_file(
key = self$key,
vocabulary_file = self$vocabulary_file,
vocabulary_size = self$vocabulary_size,
dtype = self$dtype,
default_value = self$default_value,
num_oov_buckets = self$num_oov_buckets
)
}
)
)
# StepIndicatorColumn -----------------------------------------------------
StepIndicatorColumn <- R6::R6Class(
"StepIndicatorColumn",
inherit = Step,
public = list(
categorical_column = NULL,
base_features = NULL,
column_type = "float32",
initialize = function(categorical_column, name) {
self$categorical_column = categorical_column
self$name <- name
},
feature = function(base_features) {
tf$feature_column$indicator_column(base_features[[self$categorical_column]])
}
)
)
# StepEmbeddingColumn -----------------------------------------------------
StepEmbeddingColumn <- R6::R6Class(
"StepEmbeddingColumn",
inherit = Step,
public = list(
categorical_column = NULL,
dimension = NULL,
combiner = NULL,
initializer = NULL,
ckpt_to_load_from = NULL,
tensor_name_in_ckpt = NULL,
max_norm = NULL,
trainable = NULL,
column_type = "float32",
initialize = function(categorical_column, dimension = NULL, combiner = "mean", initializer = NULL,
ckpt_to_load_from = NULL, tensor_name_in_ckpt = NULL, max_norm = NULL,
trainable = TRUE, name) {
self$categorical_column <- categorical_column
self$dimension <- dimension
self$combiner <- combiner
self$initializer <- initializer
self$ckpt_to_load_from <- ckpt_to_load_from
self$tensor_name_in_ckpt <- tensor_name_in_ckpt
self$max_norm <- max_norm
self$trainable <- trainable
self$name <- name
},
feature = function(base_features) {
categorical_column <- base_features[[self$categorical_column]]
if (is.function(self$dimension)) {
dimension <- self$dimension(length(categorical_column$vocabulary_list))
} else {
dimension <- self$dimension
}
tf$feature_column$embedding_column(
categorical_column = categorical_column,
dimension = as.integer(dimension),
combiner = self$combiner,
initializer = self$initializer,
ckpt_to_load_from = self$ckpt_to_load_from,
tensor_name_in_ckpt = self$tensor_name_in_ckpt,
max_norm = self$max_norm,
trainable = self$trainable
)
}
)
)
# StepCrossedColumn -------------------------------------------------------
StepCrossedColumn <- R6::R6Class(
"StepCrossedColumn",
inherit = DerivedStep,
public = list(
keys = NULL,
hash_bucket_size = NULL,
hash_key = NULL,
column_type = "string",
initialize = function (keys, hash_bucket_size, hash_key = NULL, name = NULL) {
self$keys <- keys
self$hash_bucket_size <- hash_bucket_size
self$hash_key <- hash_key
self$name <- name
},
feature = function(base_features) {
keys <- lapply(self$keys, function(x) base_features[[x]])
names(keys) <- NULL
tf$feature_column$crossed_column(
keys = keys,
hash_bucket_size = self$hash_bucket_size,
hash_key = self$hash_key
)
}
)
)
# StepBucketizedColumn ----------------------------------------------------
StepBucketizedColumn <- R6::R6Class(
"StepBucketizedColumn",
inherit = DerivedStep,
public = list(
source_column = NULL,
boundaries = NULL,
column_type = "float32",
initialize = function(source_column, boundaries, name) {
self$source_column <- source_column
self$boundaries <- boundaries
self$name <- name
},
feature = function(base_features) {
tf$feature_column$bucketized_column(
source_column = base_features[[self$source_column]],
boundaries = self$boundaries
)
}
)
)
# StepSharedEmbeddings ----------------------------------------------------
StepSharedEmbeddings <- R6::R6Class(
"StepSharedEmbeddings",
inherit = DerivedStep,
public = list(
categorical_columns = NULL,
dimension = NULL,
combiner = NULL,
initializer = NULL,
shared_embedding_collection_name = NULL,
ckpt_to_load_from = NULL,
tensor_name_in_ckpt = NULL,
max_norm = NULL,
trainable = NULL,
column_type = "float32",
initialize = function(categorical_columns, dimension, combiner = "mean",
initializer = NULL, shared_embedding_collection_name = NULL,
ckpt_to_load_from = NULL, tensor_name_in_ckpt = NULL,
max_norm = NULL, trainable = TRUE, name = NULL) {
self$categorical_columns <- categorical_columns
self$dimension <- dimension
self$combiner <- combiner
self$initializer <- initializer
self$shared_embedding_collection_name <- shared_embedding_collection_name
self$ckpt_to_load_from <- ckpt_to_load_from
self$tensor_name_in_ckpt <- tensor_name_in_ckpt
self$max_norm <- max_norm
self$trainable <- trainable
self$name <- name
},
feature = function(base_features) {
categorical_columns <- lapply(self$categorical_columns, function(x) {
base_features[[x]]
})
names(categorical_columns) <- NULL
tf$feature_column$shared_embeddings(
categorical_columns = categorical_columns,
dimension = self$dimension,
combiner = self$combiner,
initializer = self$initializer,
shared_embedding_collection_name = self$shared_embedding_collection_name,
ckpt_to_load_from = self$ckpt_to_load_from,
tensor_name_in_ckpt = self$tensor_name_in_ckpt,
max_norm = self$max_norm,
trainable = self$trainable
)
}
)
)
# Wrappers ----------------------------------------------------------------
#' Creates a feature specification.
#'
#' Used to create initialize a feature columns specification.
#'
#' @param dataset A TensorFlow dataset.
#' @param x Features to include can use [tidyselect::select_helpers()] or
#' a `formula`.
#' @param y (Optional) The response variable. Can also be specified using
#' a `formula` in the `x` argument.
#'
#' @details
#' After creating the `feature_spec` object you can add steps using the
#' `step` functions.
#'
#' @return a `FeatureSpec` object.
#'
#' @seealso
#' * [fit.FeatureSpec()] to fit the FeatureSpec
#' * [dataset_use_spec()] to create a tensorflow dataset prepared to modeling.
#' * [steps] to a list of all implemented steps.
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ .)
#'
#' # select using `tidyselect` helpers
#' spec <- feature_spec(hearts, x = c(thal, age), y = target)
#' }
#' @family Feature Spec Functions
#' @export
feature_spec <- function(dataset, x, y = NULL) {
# currently due to a bug in TF we are only using feature columns api with TF
# >= 2.0. see https://github.com/tensorflow/tensorflow/issues/30307
if (tensorflow::tf_version() < "2.0")
stop("Feature spec is only available with TensorFlow >= 2.0", call. = FALSE)
en_x <- rlang::enquo(x)
en_y <- rlang::enquo(y)
spec <- FeatureSpec$new(dataset, x = !!en_x, y = !!en_y)
spec
}
#' Fits a feature specification.
#'
#' This function will `fit` the specification. Depending
#' on the steps added to the specification it will compute
#' for example, the levels of categorical features, normalization
#' constants, etc.
#'
#' @param object A feature specification created with [feature_spec()].
#' @param dataset (Optional) A TensorFlow dataset. If `NULL` it will use
#' the dataset provided when initilializing the `feature_spec`.
#' @param ... (unused)
#'
#' @seealso
#' * [feature_spec()] to initialize the feature specification.
#' * [dataset_use_spec()] to create a tensorflow dataset prepared to modeling.
#' * [steps] to a list of all implemented steps.
#'
#' @return a fitted `FeatureSpec` object.
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ age) %>%
#' step_numeric_column(age)
#'
#' spec_fit <- fit(spec)
#' spec_fit
#' }
#' @family Feature Spec Functions
#' @export
fit.FeatureSpec <- function(object, dataset=NULL, ...) {
spec <- object$clone(deep = TRUE)
if (!is.null(dataset))
spec$set_dataset(dataset)
spec$fit()
spec
}
#' Transform the dataset using the provided spec.
#'
#' Prepares the dataset to be used directly in a model.The transformed
#' dataset is prepared to return tuples (x,y) that can be used directly
#' in Keras.
#'
#' @param dataset A TensorFlow dataset.
#' @param spec A feature specification created with [feature_spec()].
#' @seealso
#' * [feature_spec()] to initialize the feature specification.
#' * [fit.FeatureSpec()] to create a tensorflow dataset prepared to modeling.
#' * [steps] to a list of all implemented steps.
#'
#' @return A TensorFlow dataset.
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ age) %>%
#' step_numeric_column(age)
#'
#' spec_fit <- fit(spec)
#' final_dataset <- hearts %>% dataset_use_spec(spec_fit)
#' }
#' @family Feature Spec Functions
#' @export
dataset_use_spec <- function(dataset, spec) {
if (!inherits(dataset, "tensorflow.python.data.ops.dataset_ops.DatasetV2"))
stop("`dataset` must be a TensorFlow dataset.")
if (!spec$fitted)
stop("FeatureSpec must be prepared before juicing.")
spec <- spec$clone(deep = TRUE)
spec$set_dataset(dataset)
spec$prepared_dataset %>%
dataset_map(function(x) reticulate::tuple(x$x, x$y))
}
#' Steps for feature columns specification.
#'
#' List of steps that can be used to specify columns in the `feature_spec` interface.
#'
#' @section Steps:
#'
#' * [step_numeric_column()] to define numeric columns.
#' * [step_categorical_column_with_vocabulary_list()] to define categorical columns.
#' * [step_categorical_column_with_hash_bucket()] to define categorical columns
#' where ids are set by hashing.
#' * [step_categorical_column_with_identity()] to define categorical columns
#' represented by integers in the range `[0-num_buckets)`.
#' * [step_categorical_column_with_vocabulary_file()] to define categorical columns
#' when their vocabulary is available in a file.
#' * [step_indicator_column()] to create indicator columns from categorical columns.
#' * [step_embedding_column()] to create embeddings columns from categorical columns.
#' * [step_bucketized_column()] to create bucketized columns from numeric columns.
#' * [step_crossed_column()] to perform crosses of categorical columns.
#' * [step_shared_embeddings_column()] to share embeddings between a list of
#' categorical columns.
#' * [step_remove_column()] to remove columns from the specification.
#'
#' @seealso
#' * [selectors] for a list of selectors that can be used to specify variables.
#'
#' @name steps
#' @rdname steps
#' @family Feature Spec Functions
NULL
#' Creates a numeric column specification
#'
#' `step_numeric_column` creates a numeric column specification. It can also be
#' used to normalize numeric columns.
#'
#' @param spec A feature specification created with [feature_spec()].
#' @param ... Comma separated list of variable names to apply the step. [selectors] can also be used.
#' @param shape An iterable of integers specifies the shape of the Tensor. An integer can be given
#' which means a single dimension Tensor with given width. The Tensor representing the column will
#' have the shape of `batch_size` + `shape`.
#' @param default_value A single value compatible with `dtype` or an iterable of values compatible
#' with `dtype` which the column takes on during `tf.Example` parsing if data is missing. A
#' default value of `NULL` will cause `tf.parse_example` to fail if an example does not contain
#' this column. If a single value is provided, the same value will be applied as
#' the default value for every item. If an iterable of values is provided, the shape
#' of the default_value should be equal to the given shape.
#' @param dtype defines the type of values. Default value is `tf$float32`. Must be a non-quantized,
#' real integer or floating point type.
#' @param normalizer_fn If not `NULL`, a function that can be used to normalize the value
#' of the tensor after default_value is applied for parsing. Normalizer function takes the
#' input Tensor as its argument, and returns the output Tensor. (e.g. `function(x) (x - 3.0) / 4.2)`.
#' Please note that even though the most common use case of this function is normalization, it
#' can be used for any kind of Tensorflow transformations. You can also a pre-made [scaler], in
#' this case a function will be created after [fit.FeatureSpec] is called on the feature specification.
#'
#' @return a `FeatureSpec` object.
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ age) %>%
#' step_numeric_column(age, normalizer_fn = standard_scaler())
#'
#' spec_fit <- fit(spec)
#' final_dataset <- hearts %>% dataset_use_spec(spec_fit)
#' }
#'
#' @seealso [steps] for a complete list of allowed steps.
#'
#' @family Feature Spec Functions
#' @export
step_numeric_column <- function(spec, ..., shape = 1L, default_value = NULL,
dtype = tf$float32, normalizer_fn = NULL) {
spec <- spec$clone(deep = TRUE)
quos_ <- quos(...)
variables <- terms_select(spec$feature_names(), spec$feature_types(), quos_)
for (var in variables) {
stp <- StepNumericColumn$new(var, shape, default_value, dtype, normalizer_fn,
name = var)
spec$add_step(stp)
}
spec
}
#' Creates a step that can remove columns
#'
#' Removes features of the feature specification.
#'
#' @inheritParams step_numeric_column
#'
#' @return a `FeatureSpec` object.
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ age) %>%
#' step_numeric_column(age, normalizer_fn = scaler_standard()) %>%
#' step_bucketized_column(age, boundaries = c(20, 50)) %>%
#' step_remove_column(age)
#'
#' spec_fit <- fit(spec)
#' final_dataset <- hearts %>% dataset_use_spec(spec_fit)
#' }
#'
#' @seealso [steps] for a complete list of allowed steps.
#' @family Feature Spec Functions
#'
#' @export
step_remove_column <- function(spec, ...) {
spec <- spec$clone(deep = TRUE)
quos_ <- quos(...)
variables <- terms_select(spec$feature_names(), spec$feature_types(), quos_)
for (var in variables) {
stp <- RemoveStep$new(var)
spec$add_step(stp)
}
spec
}
#' Creates a categorical column specification
#'
#' @inheritParams step_numeric_column
#' @param vocabulary_list An ordered iterable defining the vocabulary. Each
#' feature is mapped to the index of its value (if present) in vocabulary_list.
#' Must be castable to `dtype`. If `NULL` the vocabulary will be defined as
#' all unique values in the dataset provided when fitting the specification.
#' @param dtype The type of features. Only string and integer types are supported.
#' If `NULL`, it will be inferred from `vocabulary_list`.
#' @param default_value The integer ID value to return for out-of-vocabulary feature
#' values, defaults to `-1`. This can not be specified with a positive
#' num_oov_buckets.
#' @param num_oov_buckets Non-negative integer, the number of out-of-vocabulary buckets.
#' All out-of-vocabulary inputs will be assigned IDs in the range
#' `[lenght(vocabulary_list), length(vocabulary_list)+num_oov_buckets)` based on a hash of
#' the input value. A positive num_oov_buckets can not be specified with
#' default_value.
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ thal) %>%
#' step_categorical_column_with_vocabulary_list(thal)
#'
#' spec_fit <- fit(spec)
#' final_dataset <- hearts %>% dataset_use_spec(spec_fit)
#' }
#'
#' @return a `FeatureSpec` object.
#' @seealso [steps] for a complete list of allowed steps.
#'
#' @family Feature Spec Functions
#' @export
step_categorical_column_with_vocabulary_list <- function(spec, ..., vocabulary_list = NULL,
dtype = NULL, default_value = -1L,
num_oov_buckets = 0L) {
spec <- spec$clone(deep = TRUE)
quos_ <- quos(...)
variables <- terms_select(spec$feature_names(), spec$feature_types(), quos_)
for (var in variables) {
stp <- StepCategoricalColumnWithVocabularyList$new(
var, vocabulary_list, dtype,
default_value, num_oov_buckets,
name = var
)
spec$add_step(stp)
}
spec
}
#' Creates a categorical column with hash buckets specification
#'
#' Represents sparse feature where ids are set by hashing.
#'
#' @inheritParams step_numeric_column
#' @param hash_bucket_size An int > 1. The number of buckets.
#' @param dtype The type of features. Only string and integer types are supported.
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ thal) %>%
#' step_categorical_column_with_hash_bucket(thal, hash_bucket_size = 3)
#'
#' spec_fit <- fit(spec)
#' final_dataset <- hearts %>% dataset_use_spec(spec_fit)
#' }
#'
#' @return a `FeatureSpec` object.
#' @seealso [steps] for a complete list of allowed steps.
#'
#' @family Feature Spec Functions
#' @export
step_categorical_column_with_hash_bucket <- function(spec, ..., hash_bucket_size,
dtype = tf$string) {
spec <- spec$clone(deep = TRUE)
quos_ <- quos(...)
variables <- terms_select(spec$feature_names(), spec$feature_types(), quos_)
for (var in variables) {
stp <- StepCategoricalColumnWithHashBucket$new(
var,
hash_bucket_size = hash_bucket_size,
dtype = dtype,
name = var
)
spec$add_step(stp)
}
spec
}
#' Create a categorical column with identity
#'
#' Use this when your inputs are integers in the range `[0-num_buckets)`.
#'
#' @inheritParams step_numeric_column
#' @param num_buckets Range of inputs and outputs is `[0, num_buckets)`.
#' @param default_value If `NULL`, this column's graph operations will fail
#' for out-of-range inputs. Otherwise, this value must be in the range
#' `[0, num_buckets)`, and will replace inputs in that range.
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#'
#' hearts$thal <- as.integer(as.factor(hearts$thal)) - 1L
#'
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ thal) %>%
#' step_categorical_column_with_identity(thal, num_buckets = 5)
#'
#' spec_fit <- fit(spec)
#' final_dataset <- hearts %>% dataset_use_spec(spec_fit)
#' }
#'
#' @return a `FeatureSpec` object.
#' @seealso [steps] for a complete list of allowed steps.
#'
#' @family Feature Spec Functions
#' @export
step_categorical_column_with_identity <- function(spec, ..., num_buckets,
default_value = NULL) {
spec <- spec$clone(deep = TRUE)
quos_ <- quos(...)
variables <- terms_select(spec$feature_names(), spec$feature_types(), quos_)
for (var in variables) {
stp <- StepCategoricalColumnWithIdentity$new(
key = var,
num_buckets = num_buckets,
default_value = default_value,
name = var
)
spec$add_step(stp)
}
spec
}
#' Creates a categorical column with vocabulary file
#'
#' Use this function when the vocabulary of a categorical variable
#' is written to a file.
#'
#' @inheritParams step_numeric_column
#' @param vocabulary_file The vocabulary file name.
#' @param vocabulary_size Number of the elements in the vocabulary. This
#' must be no greater than length of `vocabulary_file`, if less than
#' length, later values are ignored. If None, it is set to the length of
#' `vocabulary_file`.
#' @param dtype The type of features. Only string and integer types are
#' supported.
#' @param default_value The integer ID value to return for out-of-vocabulary
#' feature values, defaults to `-1`. This can not be specified with a
#' positive `num_oov_buckets`.
#' @param num_oov_buckets Non-negative integer, the number of out-of-vocabulary
#' buckets. All out-of-vocabulary inputs will be assigned IDs in the range
#' `[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of
#' the input value. A positive `num_oov_buckets` can not be specified with
#' default_value.
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' file <- tempfile()
#' writeLines(unique(hearts$thal), file)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ thal) %>%
#' step_categorical_column_with_vocabulary_file(thal, vocabulary_file = file)
#'
#' spec_fit <- fit(spec)
#' final_dataset <- hearts %>% dataset_use_spec(spec_fit)
#' }
#'
#' @return a `FeatureSpec` object.
#' @seealso [steps] for a complete list of allowed steps.
#'
#' @family Feature Spec Functions
#' @export
step_categorical_column_with_vocabulary_file <- function(spec, ..., vocabulary_file,
vocabulary_size = NULL,
dtype = tf$string,
default_value = NULL,
num_oov_buckets = 0L) {
spec <- spec$clone(deep = TRUE)
quos_ <- quos(...)
variables <- terms_select(spec$feature_names(), spec$feature_types(), quos_)
for (var in variables) {
stp <- StepCategoricalColumnWithVocabularyFile$new(
key = var,
vocabulary_file = vocabulary_file,
vocabulary_size = vocabulary_size,
dtype = dtype,
default_value = default_value,
num_oov_buckets = num_oov_buckets,
name = var
)
spec$add_step(stp)
}
spec
}
make_step_name <- function(quosure, variable, step) {
nms <- names(quosure)
if (!is.null(nms) && !is.na(nms) && length(nms) == 1 && nms != "" ) {
nms
} else {
paste0(step, "_", variable)
}
}
step_ <- function(spec, ..., step, args, prefix) {
spec <- spec$clone(deep = TRUE)
quosures <- quos(...)
variables <- terms_select(spec$feature_names(), spec$feature_types(), quosures)
nms <- names(quosures)
if ( !is.null(nms) && any(nms != "") && length(nms) != length(variables) )
stop("Can't name feature if using a selector.")
for (i in seq_along(variables)) {
args_ <- append(
list(
variables[i],
name = make_step_name(quosures[i], variables[i], prefix)
),
args
)
stp <- do.call(step, args_)
spec$add_step(stp)
}
spec
}
#' Creates Indicator Columns
#'
#' Use this step to create indicator columns from categorical columns.
#'
#' @inheritParams step_numeric_column
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' file <- tempfile()
#' writeLines(unique(hearts$thal), file)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ thal) %>%
#' step_categorical_column_with_vocabulary_list(thal) %>%
#' step_indicator_column(thal)
#' spec_fit <- fit(spec)
#' final_dataset <- hearts %>% dataset_use_spec(spec_fit)
#' }
#' @return a `FeatureSpec` object.
#' @seealso [steps] for a complete list of allowed steps.
#'
#' @family Feature Spec Functions
#' @export
step_indicator_column <- function(spec, ...) {
step_(spec, ..., step = StepIndicatorColumn$new, args = list(), prefix = "indicator")
}
#' Creates embeddings columns
#'
#' Use this step to create ambeddings columns from categorical
#' columns.
#'
#' @inheritParams step_numeric_column
#' @param dimension An integer specifying dimension of the embedding, must be > 0.
#' Can also be a function of the size of the vocabulary.
#' @param combiner A string specifying how to reduce if there are multiple entries in
#' a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with 'mean' the
#' default. 'sqrtn' often achieves good accuracy, in particular with bag-of-words
#' columns. Each of this can be thought as example level normalizations on
#' the column. For more information, see `tf.embedding_lookup_sparse`.
#' @param initializer A variable initializer function to be used in embedding
#' variable initialization. If not specified, defaults to
#' `tf.truncated_normal_initializer` with mean `0.0` and standard deviation
#' `1/sqrt(dimension)`.
#' @param ckpt_to_load_from String representing checkpoint name/pattern from
#' which to restore column weights. Required if `tensor_name_in_ckpt` is
#' not `NULL`.
#' @param tensor_name_in_ckpt Name of the Tensor in ckpt_to_load_from from which to
#' restore the column weights. Required if `ckpt_to_load_from` is not `NULL`.
#' @param max_norm If not `NULL`, embedding values are l2-normalized to this value.
#' @param trainable Whether or not the embedding is trainable. Default is `TRUE`.
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' file <- tempfile()
#' writeLines(unique(hearts$thal), file)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ thal) %>%
#' step_categorical_column_with_vocabulary_list(thal) %>%
#' step_embedding_column(thal, dimension = 3)
#' spec_fit <- fit(spec)
#' final_dataset <- hearts %>% dataset_use_spec(spec_fit)
#' }
#' @return a `FeatureSpec` object.
#' @seealso [steps] for a complete list of allowed steps.
#'
#' @family Feature Spec Functions
#' @export
step_embedding_column <- function(spec, ..., dimension = function(x) {as.integer(x^0.25)},
combiner = "mean",
initializer = NULL, ckpt_to_load_from = NULL,
tensor_name_in_ckpt = NULL, max_norm = NULL,
trainable = TRUE) {
args <- list(
dimension = dimension,
combiner = combiner,
initializer = initializer,
ckpt_to_load_from = ckpt_to_load_from,
tensor_name_in_ckpt = tensor_name_in_ckpt,
max_norm = max_norm,
trainable = trainable
)
step_(spec, ..., step = StepEmbeddingColumn$new, args = args, prefix = "embedding")
}
#' Creates bucketized columns
#'
#' Use this step to create bucketized columns from numeric columns.
#'
#' @inheritParams step_numeric_column
#' @param boundaries A sorted list or tuple of floats specifying the boundaries.
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' file <- tempfile()
#' writeLines(unique(hearts$thal), file)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ age) %>%
#' step_numeric_column(age) %>%
#' step_bucketized_column(age, boundaries = c(10, 20, 30))
#' spec_fit <- fit(spec)
#' final_dataset <- hearts %>% dataset_use_spec(spec_fit)
#' }
#' @return a `FeatureSpec` object.
#' @seealso [steps] for a complete list of allowed steps.
#'
#' @family Feature Spec Functions
#' @export
step_bucketized_column <- function(spec, ..., boundaries) {
args <- list(
boundaries = boundaries
)
step_(spec, ..., step = StepBucketizedColumn$new, args = args, prefix = "bucketized")
}
make_multiple_columns_step_name <- function(quosure, variables, step) {
nms <- names(quosure)
if (!is.null(nms) && nms != "") {
nms
} else {
paste0(step, "_", paste(variables, collapse= "_"))
}
}
step_multiple_ <- function(spec, ..., step, args, prefix) {
spec <- spec$clone(deep = TRUE)
quosures <- quos(...)
for (i in seq_along(quosures)) {
variables <- terms_select(spec$feature_names(), spec$feature_types(), quosures[i])
args_ <- append(
list(
variables,
name = make_multiple_columns_step_name(quosures[i], variables, "crossed")
),
args
)
stp <- do.call(step, args_)
spec$add_step(stp)
}
spec
}
#' Creates crosses of categorical columns
#'
#' Use this step to create crosses between categorical columns.
#'
#' @inheritParams step_numeric_column
#' @param hash_bucket_size An int > 1. The number of buckets.
#' @param hash_key (optional) Specify the hash_key that will be used by the
#' FingerprintCat64 function to combine the crosses fingerprints on
#' SparseCrossOp.
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' file <- tempfile()
#' writeLines(unique(hearts$thal), file)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ age) %>%
#' step_numeric_column(age) %>%
#' step_bucketized_column(age, boundaries = c(10, 20, 30))
#' spec_fit <- fit(spec)
#' final_dataset <- hearts %>% dataset_use_spec(spec_fit)
#' }
#' @return a `FeatureSpec` object.
#' @seealso [steps] for a complete list of allowed steps.
#'
#' @family Feature Spec Functions
#' @export
step_crossed_column <- function(spec, ..., hash_bucket_size, hash_key = NULL) {
args <- list(
hash_bucket_size = hash_bucket_size,
hash_key = hash_key
)
step_multiple_(spec, ..., step = StepCrossedColumn$new, args = args, prefix = "crossed")
}
#' Creates shared embeddings for categorical columns
#'
#' This is similar to [step_embedding_column], except that it produces a list of
#' embedding columns that share the same embedding weights.
#'
#' @inheritParams step_embedding_column
#' @param shared_embedding_collection_name Optional collective name of
#' these columns. If not given, a reasonable name will be chosen based on
#' the names of categorical_columns.
#'
#' @note Does not work in the eager mode.
#'
#' @return a `FeatureSpec` object.
#' @seealso [steps] for a complete list of allowed steps.
#'
#' @family Feature Spec Functions
#' @export
step_shared_embeddings_column <- function(spec, ..., dimension, combiner = "mean",
initializer = NULL, shared_embedding_collection_name = NULL,
ckpt_to_load_from = NULL, tensor_name_in_ckpt = NULL,
max_norm = NULL, trainable = TRUE) {
args <- list(
dimension = dimension,
combiner = combiner,
initializer = initializer,
shared_embedding_collection_name = shared_embedding_collection_name,
ckpt_to_load_from = ckpt_to_load_from,
tensor_name_in_ckpt = tensor_name_in_ckpt,
max_norm = max_norm,
trainable = trainable
)
step_multiple_(
spec, ...,
step = StepSharedEmbeddings$new,
args = args,
prefix = "shared_embeddings"
)
}
# Input from spec ---------------------------------------------------------
#' Creates a list of inputs from a dataset
#'
#' Create a list ok Keras input layers that can be used together
#' with [keras::layer_dense_features()].
#'
#' @param dataset a TensorFlow dataset or a data.frame
#' @return a list of Keras input layers
#'
#' @examples
#' \dontrun{
#' library(tfdatasets)
#' data(hearts)
#' hearts <- tensor_slices_dataset(hearts) %>% dataset_batch(32)
#'
#' # use the formula interface
#' spec <- feature_spec(hearts, target ~ age + slope) %>%
#' step_numeric_column(age, slope) %>%
#' step_bucketized_column(age, boundaries = c(10, 20, 30))
#'
#' spec <- fit(spec)
#' dataset <- hearts %>% dataset_use_spec(spec)
#'
#' input <- layer_input_from_dataset(dataset)
#' }
#'
#' @export
layer_input_from_dataset <- function(dataset) {
# only needs the head to infer types, colnames and etc.
if (inherits(dataset, "data.frame") || inherits(dataset, "list"))
dataset <- tensor_slices_dataset(utils::head(dataset))
dataset <- dataset_map(dataset, ~.x)
col_names <- column_names(dataset)
col_types <- output_types(dataset)
col_shapes <- output_shapes(dataset)
inputs <- list()
for (i in seq_along(col_names)) {
x <- list(keras::layer_input(
name = col_names[i],
shape = col_shapes[[i]]$as_list()[-1],
dtype = col_types[[i]]$name
))
names(x) <- col_names[i]
inputs <- append(inputs, x)
}
reticulate::dict(inputs)
}
#' Dense Features
#'
#' Retrives the Dense Features from a spec.
#'
#' @inheritParams step_numeric_column
#'
#' @return A list of feature columns.
#'
#' @export
dense_features <- function(spec) {
spec$dense_features()
}
|
# Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test Team")
model.instance <- Team$new()
test_that("canPublicJoin", {
# tests for the property `canPublicJoin` (character)
# true for teams which members can join without an invitation or approval
# uncomment below to test the property
#expect_equal(model.instance$`canPublicJoin`, "EXPECTED_RESULT")
})
test_that("createdBy", {
# tests for the property `createdBy` (character)
# The ID of the user that created this Team.
# uncomment below to test the property
#expect_equal(model.instance$`createdBy`, "EXPECTED_RESULT")
})
test_that("createdOn", {
# tests for the property `createdOn` (character)
# The date this Team was created.
# uncomment below to test the property
#expect_equal(model.instance$`createdOn`, "EXPECTED_RESULT")
})
test_that("description", {
# tests for the property `description` (character)
# A short description of this Team.
# uncomment below to test the property
#expect_equal(model.instance$`description`, "EXPECTED_RESULT")
})
test_that("etag", {
# tests for the property `etag` (character)
# Synapse employs an Optimistic Concurrency Control (OCC) scheme to handle concurrent updates. Since the E-Tag changes every time a Team is updated it is used to detect when a client's current representation of a Team is out-of-date.
# uncomment below to test the property
#expect_equal(model.instance$`etag`, "EXPECTED_RESULT")
})
test_that("icon", {
# tests for the property `icon` (character)
# fileHandleId for icon image of the Team
# uncomment below to test the property
#expect_equal(model.instance$`icon`, "EXPECTED_RESULT")
})
test_that("id", {
# tests for the property `id` (character)
# The id of the Team.
# uncomment below to test the property
#expect_equal(model.instance$`id`, "EXPECTED_RESULT")
})
test_that("modifiedBy", {
# tests for the property `modifiedBy` (character)
# The ID of the user that last modified this Team.
# uncomment below to test the property
#expect_equal(model.instance$`modifiedBy`, "EXPECTED_RESULT")
})
test_that("modifiedOn", {
# tests for the property `modifiedOn` (character)
# The date this Team was last modified.
# uncomment below to test the property
#expect_equal(model.instance$`modifiedOn`, "EXPECTED_RESULT")
})
test_that("name", {
# tests for the property `name` (character)
# The name of the Team.
# uncomment below to test the property
#expect_equal(model.instance$`name`, "EXPECTED_RESULT")
})
| /tests/testthat/test_team.R | no_license | thomasyu888/synr-sdk-client | R | false | false | 2,617 | r | # Automatically generated by openapi-generator (https://openapi-generator.tech)
# Please update as you see appropriate
context("Test Team")
model.instance <- Team$new()
test_that("canPublicJoin", {
# tests for the property `canPublicJoin` (character)
# true for teams which members can join without an invitation or approval
# uncomment below to test the property
#expect_equal(model.instance$`canPublicJoin`, "EXPECTED_RESULT")
})
test_that("createdBy", {
# tests for the property `createdBy` (character)
# The ID of the user that created this Team.
# uncomment below to test the property
#expect_equal(model.instance$`createdBy`, "EXPECTED_RESULT")
})
test_that("createdOn", {
# tests for the property `createdOn` (character)
# The date this Team was created.
# uncomment below to test the property
#expect_equal(model.instance$`createdOn`, "EXPECTED_RESULT")
})
test_that("description", {
# tests for the property `description` (character)
# A short description of this Team.
# uncomment below to test the property
#expect_equal(model.instance$`description`, "EXPECTED_RESULT")
})
test_that("etag", {
# tests for the property `etag` (character)
# Synapse employs an Optimistic Concurrency Control (OCC) scheme to handle concurrent updates. Since the E-Tag changes every time a Team is updated it is used to detect when a client's current representation of a Team is out-of-date.
# uncomment below to test the property
#expect_equal(model.instance$`etag`, "EXPECTED_RESULT")
})
test_that("icon", {
# tests for the property `icon` (character)
# fileHandleId for icon image of the Team
# uncomment below to test the property
#expect_equal(model.instance$`icon`, "EXPECTED_RESULT")
})
test_that("id", {
# tests for the property `id` (character)
# The id of the Team.
# uncomment below to test the property
#expect_equal(model.instance$`id`, "EXPECTED_RESULT")
})
test_that("modifiedBy", {
# tests for the property `modifiedBy` (character)
# The ID of the user that last modified this Team.
# uncomment below to test the property
#expect_equal(model.instance$`modifiedBy`, "EXPECTED_RESULT")
})
test_that("modifiedOn", {
# tests for the property `modifiedOn` (character)
# The date this Team was last modified.
# uncomment below to test the property
#expect_equal(model.instance$`modifiedOn`, "EXPECTED_RESULT")
})
test_that("name", {
# tests for the property `name` (character)
# The name of the Team.
# uncomment below to test the property
#expect_equal(model.instance$`name`, "EXPECTED_RESULT")
})
|
unit.test<-function(i,use.list,model.list,model,outpur.dir,rc){
list.fct<-function(x,i) sum(!is.na(match(x=i,table=x)))
valid.params<-which(unlist(lapply(use.list,list.fct,i=i))!=0,arr.ind=TRUE)
Model.Out<-list()
if(model.list[[1]]$UnitTest==FALSE){
Model.Out[[i]]<-data.frame(list("FctFailed"=rep(FALSE,times=length(valid.params)),
"PredictFailed"=rep(FALSE,times=length(valid.params)),
"NullDev"=rep(0,times=length(valid.params)),
"FitDev"=rep(0,times=length(valid.params)),
"Correlation"=rep(0,times=length(valid.params)),
"ErrorMessage"=rep("No Error",times=length(valid.params))),row.names=valid.params)
class(Model.Out[[i]]$ErrorMessage)<-"character"
} else Model.Out<-list()
if(length(valid.params)!=0){
for(j in 1:length(valid.params)){
k<-valid.params[j]
if(Debug==TRUE) parameter.list$brt.list[[k]]$debug.mode=TRUE
a<-paste(paste(names(model.list[[k]]),model.list[[k]],sep="="),collapse=",")
call.fct<-paste("fit.",model,".fct(ma.name=\"",input.file[i],
"\", tif.dir=NULL,output.dir=\"",output.dir,"\", response.col=\"",rc,"\",",a,")",sep="")
#Right here I should remove almost everything since all output is written to the global environment
call.predict<-"PredictModel(workspace=paste(output.dir,\"modelWorkspace\",sep=\"/\"),out.dir=output.dir)"
if(Debug==FALSE) {call.fct<-paste("try(",call.fct,",silent=TRUE)",sep="")
call.predict<-paste("try(",call.predict,")",sep="")
#so that we don't predict on an old copy in the next loop
try(file.remove(paste(output.dir,"modelWorkspace",sep="/")))
}
fct.output<-eval(parse(text=call.fct))
sink()
if(model.list[[1]]$UnitTest==FALSE){
if(class(fct.output)=="try-error") {
Model.Out[[i]][j,1]=TRUE
Model.Out[[i]][j,6]=fct.output[1]
} else {
pred.output<-eval(parse(text=call.predict))
sink()
Model.Out[[i]][j,1]=FALSE
Model.Out[[i]][j,3]<-fct.output$mods$auc.output$null.dev
Model.Out[[i]][j,4]<-fct.output$mods$auc.output$dev.fit
Model.Out[[i]][j,5]<-fct.output$mods$auc.output$correlation
if(class(pred.output)=="try-error") Model.Out[[i]][j,2]=TRUE
}
} else {if(class(fct.output)=="try-error") Model.Out[[i]]<-fct.output
else Model.Out[[i]]<-fct.output$dat$ma}
}
} ## END BRT
return(Model.Out)
} | /pySAHM/Resources/R_Modules/Testing/unit.test.r | no_license | jpocom/sahm | R | false | false | 3,158 | r | unit.test<-function(i,use.list,model.list,model,outpur.dir,rc){
list.fct<-function(x,i) sum(!is.na(match(x=i,table=x)))
valid.params<-which(unlist(lapply(use.list,list.fct,i=i))!=0,arr.ind=TRUE)
Model.Out<-list()
if(model.list[[1]]$UnitTest==FALSE){
Model.Out[[i]]<-data.frame(list("FctFailed"=rep(FALSE,times=length(valid.params)),
"PredictFailed"=rep(FALSE,times=length(valid.params)),
"NullDev"=rep(0,times=length(valid.params)),
"FitDev"=rep(0,times=length(valid.params)),
"Correlation"=rep(0,times=length(valid.params)),
"ErrorMessage"=rep("No Error",times=length(valid.params))),row.names=valid.params)
class(Model.Out[[i]]$ErrorMessage)<-"character"
} else Model.Out<-list()
if(length(valid.params)!=0){
for(j in 1:length(valid.params)){
k<-valid.params[j]
if(Debug==TRUE) parameter.list$brt.list[[k]]$debug.mode=TRUE
a<-paste(paste(names(model.list[[k]]),model.list[[k]],sep="="),collapse=",")
call.fct<-paste("fit.",model,".fct(ma.name=\"",input.file[i],
"\", tif.dir=NULL,output.dir=\"",output.dir,"\", response.col=\"",rc,"\",",a,")",sep="")
#Right here I should remove almost everything since all output is written to the global environment
call.predict<-"PredictModel(workspace=paste(output.dir,\"modelWorkspace\",sep=\"/\"),out.dir=output.dir)"
if(Debug==FALSE) {call.fct<-paste("try(",call.fct,",silent=TRUE)",sep="")
call.predict<-paste("try(",call.predict,")",sep="")
#so that we don't predict on an old copy in the next loop
try(file.remove(paste(output.dir,"modelWorkspace",sep="/")))
}
fct.output<-eval(parse(text=call.fct))
sink()
if(model.list[[1]]$UnitTest==FALSE){
if(class(fct.output)=="try-error") {
Model.Out[[i]][j,1]=TRUE
Model.Out[[i]][j,6]=fct.output[1]
} else {
pred.output<-eval(parse(text=call.predict))
sink()
Model.Out[[i]][j,1]=FALSE
Model.Out[[i]][j,3]<-fct.output$mods$auc.output$null.dev
Model.Out[[i]][j,4]<-fct.output$mods$auc.output$dev.fit
Model.Out[[i]][j,5]<-fct.output$mods$auc.output$correlation
if(class(pred.output)=="try-error") Model.Out[[i]][j,2]=TRUE
}
} else {if(class(fct.output)=="try-error") Model.Out[[i]]<-fct.output
else Model.Out[[i]]<-fct.output$dat$ma}
}
} ## END BRT
return(Model.Out)
} |
library(tools)
library(progress)
library(TestDesign)
fp <- "~/Box Sync/Behaviormetrika_Special_Issue/Article_2_PassageCAT/Results"
# needs to be replaced with the path of your own simulation results folder
# run the simulation first using: simulation/study2.R
fs <- list.files(file.path(fp, "Study2"))
o <- as.data.frame(matrix(NA, length(fs), 4))
colnames(o) <- c("weave", "exposure", "target", "replication")
oo <- o
oo$info <- NA
pb <- progress_bar$new(format = "[:bar] :current / :total | :eta", total = length(fs))
for (i in 1:length(fs)) {
f <- fs[i]
solution <- NULL
load(file.path(fp, "Study2", f))
IVs <- strsplit(file_path_sans_ext(f), "_")[[1]][-c(1:2)]
if (IVs[4] == "always") next
oo[i, 1:4] <- IVs[c(1:3, 5)]
item_pool <- solution@constraints@pool
info_all_items <- calcFisher(item_pool, solution@true_theta)
info_list <-
sapply(
1:length(solution@output),
function(i) {
info_administered_items <- info_all_items[i, solution@output[[i]]@administered_item_index]
testinfo <- sum(info_administered_items)
return(testinfo)
}
)
info_list <- unlist(info_list)
oo$info[i] <- mean(info_list)
pb$tick(1)
}
oo$weave <- factor(oo$weave , c("interspersed", "ds", "sd", "setbased"))
oo$exposure <- factor(oo$exposure, c("none", "bigm"))
oo$target <- factor(oo$target , c("maxinfo", "goalinfo8", "goalinfo7", "goalinfo6"))
write.csv(oo, "analysis/study2_testinfo.csv")
o <- aggregate(oo$info, by = list(oo$target), mean)
names(o)[2] <- "mean"
x <- aggregate(oo$info, by = list(oo$target), sd)$x
o[["sd"]] <- x
x <- aggregate(oo$info, by = list(oo$target), min)$x
o[["min"]] <- x
x <- aggregate(oo$info, by = list(oo$target), max)$x
o[["max"]] <- x
for (q in seq(.1, .9, .1)) {
x <- aggregate(oo$info, by = list(oo$target), function(x) quantile(x, q))$x
o[[sprintf("%s", q)]] <- x
}
write.csv(o, "analysis/study2_testinfo_average.csv")
| /analysis/study2_averageinfo.R | no_license | FinkAr/mix_setbased_discrete | R | false | false | 1,935 | r | library(tools)
library(progress)
library(TestDesign)
fp <- "~/Box Sync/Behaviormetrika_Special_Issue/Article_2_PassageCAT/Results"
# needs to be replaced with the path of your own simulation results folder
# run the simulation first using: simulation/study2.R
fs <- list.files(file.path(fp, "Study2"))
o <- as.data.frame(matrix(NA, length(fs), 4))
colnames(o) <- c("weave", "exposure", "target", "replication")
oo <- o
oo$info <- NA
pb <- progress_bar$new(format = "[:bar] :current / :total | :eta", total = length(fs))
for (i in 1:length(fs)) {
f <- fs[i]
solution <- NULL
load(file.path(fp, "Study2", f))
IVs <- strsplit(file_path_sans_ext(f), "_")[[1]][-c(1:2)]
if (IVs[4] == "always") next
oo[i, 1:4] <- IVs[c(1:3, 5)]
item_pool <- solution@constraints@pool
info_all_items <- calcFisher(item_pool, solution@true_theta)
info_list <-
sapply(
1:length(solution@output),
function(i) {
info_administered_items <- info_all_items[i, solution@output[[i]]@administered_item_index]
testinfo <- sum(info_administered_items)
return(testinfo)
}
)
info_list <- unlist(info_list)
oo$info[i] <- mean(info_list)
pb$tick(1)
}
oo$weave <- factor(oo$weave , c("interspersed", "ds", "sd", "setbased"))
oo$exposure <- factor(oo$exposure, c("none", "bigm"))
oo$target <- factor(oo$target , c("maxinfo", "goalinfo8", "goalinfo7", "goalinfo6"))
write.csv(oo, "analysis/study2_testinfo.csv")
o <- aggregate(oo$info, by = list(oo$target), mean)
names(o)[2] <- "mean"
x <- aggregate(oo$info, by = list(oo$target), sd)$x
o[["sd"]] <- x
x <- aggregate(oo$info, by = list(oo$target), min)$x
o[["min"]] <- x
x <- aggregate(oo$info, by = list(oo$target), max)$x
o[["max"]] <- x
for (q in seq(.1, .9, .1)) {
x <- aggregate(oo$info, by = list(oo$target), function(x) quantile(x, q))$x
o[[sprintf("%s", q)]] <- x
}
write.csv(o, "analysis/study2_testinfo_average.csv")
|
\name{dbqa.get.idparam}
\alias{dbqa.get.idparam}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Restituisce l'ID numerico di un parametro
}
\description{
Restituisce l'ID numerico di un parametro (inquinante)
}
\usage{
dbqa.get.idparam(poll, con = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{poll}{
nome o sigla del parametro (stringa)
}
\item{con}{
connessione
}
}
\details{
Prima cerca in una lista di poche sigle usate di frequente. Se non trova corrispondenza, cerca una stringa simile tra i nomi nel DB.
}
\value{
ID numerico del parametro (inquinante). Se la stringa è ambigua o priva di corrispondenze, restituisce \code{NULL}, ma dà qualche messaggio utile.
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
| /man/dbqa.get.idparam.Rd | no_license | Arpae-it/arpautils | R | false | false | 1,095 | rd | \name{dbqa.get.idparam}
\alias{dbqa.get.idparam}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Restituisce l'ID numerico di un parametro
}
\description{
Restituisce l'ID numerico di un parametro (inquinante)
}
\usage{
dbqa.get.idparam(poll, con = NULL)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{poll}{
nome o sigla del parametro (stringa)
}
\item{con}{
connessione
}
}
\details{
Prima cerca in una lista di poche sigle usate di frequente. Se non trova corrispondenza, cerca una stringa simile tra i nomi nel DB.
}
\value{
ID numerico del parametro (inquinante). Se la stringa è ambigua o priva di corrispondenze, restituisce \code{NULL}, ma dà qualche messaggio utile.
}
\references{
%% ~put references to the literature/web site here ~
}
\author{
%% ~~who you are~~
}
\note{
%% ~~further notes~~
}
%% ~Make other sections like Warning with \section{Warning }{....} ~
\seealso{
%% ~~objects to See Also as \code{\link{help}}, ~~~
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
|
#' @include fitbitr.R
#' @include common.R
# Constants
url_activity <- paste0(url_api, "activities/")
#' @title Get Daily Activity Summary
#'
#' @description
#' \code{get_activity_summary()} retrieves a summary and list of a user's activities and activity log entries for a given day.
#'
#' @inheritParams inheritparams_token
#' @inheritParams inheritparams_date
#' @inheritParams inheritparams_simplify
#'
#' @details
#' See \url{https://dev.fitbit.com/reference/web-api/activity/#get-daily-activity-summary} for more details.
#'
#' @export
get_activity_summary <- function(token, date, simplify=TRUE)
{
url <- paste0(url_activity, sprintf("date/%s.json", format_date(date)))
# We can not simplify this output because it is so complicated nested list
tidy_output(get(url, token), simplify=FALSE)
}
#' @title Get Activity Time Series
#'
#' @description
#' \code{get_activity_time_series()} returns time series data in the specified range for a given resource.
#'
#' @inheritParams inheritparams_token
#' @param resource_path The resource path. see details below.
#' @param base_date The range start date. A Date class object or a string in the format yyyy-MM-dd or today.
#' @param end_date The end date of the range. A Date class object or a string in the format yyyy-MM-dd.
#' @param date The end date of the period specified. A Date class object or a string in the format yyyy-MM-dd.
#' @param period The range for which data will be returned. Options are "1d", "7d", "30d", "1w", "1m", "3m", "6m", "1y", or "max".
#' @inheritParams inheritparams_simplify
#'
#' @details
#' Available resource_path are
#' \itemize{
#' \item calories
#' \item caloriesBMR
#' \item steps
#' \item distance
#' \item floors
#' \item elevation
#' \item minutesSedentary
#' \item minutesLightlyActive
#' \item minutesFairlyActive
#' \item minutesVeryActive
#' \item activityCalories
#' }
#'
#' See \url{https://dev.fitbit.com/reference/web-api/activity/#get-activity-time-series} for more details.
#'
#' @export
get_activity_time_series <- function(token, resource_path, date="", period="", base_date="", end_date="", simplify=TRUE)
{
url <- if(date != "" && period != ""){
paste0(url_activity, sprintf("%s/date/%s/%s.json", resource_path, format_date(date), period))
} else if(base_date != "" & end_date != ""){
paste0(url_activity, sprintf("%s/date/%s/%s.json", resource_path, format_date(base_date), format_date(end_date)))
} else{
stop("Error: Need to enter combination of date/period or base_date/end_date")
}
tidy_output(get(url, token), simplify)
}
#' @title Get Activity Intraday Time Series
#'
#' @description
#' \code{get_activity_intraday_time_series()} returns intraday time series data in the specified range for a given resource.
#' Access to the Intraday Time Series for personal use (accessing your own data) is available through the "Personal" App Type.
#'
#' @inheritParams inheritparams_token
#' @param resource_path The resource path of the desired data
#' @inheritParams inheritparams_date
#' @param detail_level Number of data points to include. Either 1min or 15min. Optional.
#' @param start_time The start of the period, in the format HH:mm. Optional.
#' @param end_time The end of the period, in the format HH:mm. Optional.
#' @inheritParams inheritparams_simplify
#'
#' @details
#' Available resource_path are
#' \itemize{
#' \item calories
#' \item steps
#' \item distance
#' \item floors
#' \item elevation
#' }
#'
#' See \url{https://dev.fitbit.com/reference/web-api/activity/#get-activity-intraday-time-series} for more details.
#'
#' @export
get_activity_intraday_time_series <- function(token, resource_path, date, detail_level="15min", start_time=NULL, end_time=NULL, simplify=TRUE)
{
date <- format_date(date)
url <- if(!is.null(start_time) && !is.null(end_time)){
date2 <- if(start_time < end_time){
"1d"
} else{
date2 <- as.Date(date) + 1
}
paste0(url_activity, sprintf("%s/date/%s/%s/%s/time/%s/%s.json", resource_path, date, date2, detail_level, start_time, end_time))
} else{
paste0(url_activity, sprintf("%s/date/%s/1d/%s.json", resource_path, date, detail_level))
}
tidy_output(get(url, token), simplify)
}
#' @title Get Activity Types
#'
#' @description
#' Get a tree of all valid Fitbit public activities from the activities catalog as well as private custom activities the user created
#'
#' @inheritParams inheritparams_token
#' @inheritParams inheritparams_simplify
#'
#' @export
get_activity_types <- function(token, simplify=TRUE)
{
url <- paste0(url_activity, "activities.json")
# We can not simplify this output because it is so complicated nested list
tidy_output(get(url, token), simplify=FALSE)
}
#' @title Get Activity Type
#'
#' @description
#' \code{get_activity_type()} returns the details of a specific activity in the Fitbit activities database.
#'
#' @inheritParams inheritparams_token
#' @param activity_id The activity ID.
#' @inheritParams inheritparams_simplify
#'
#' @export
get_activity_type <- function(token, activity_id, simplify=TRUE)
{
url <- paste0(url_base, sprintf("activities/%s.json", activity_id))
tidy_output(get(url, token), simplify)
}
#' @title Get Frequent Activities
#'
#' @description
#' \code{get_frequent_activities()} retrieves a list of a user's frequent activities.
#'
#' @inheritParams inheritparams_token
#' @inheritParams inheritparams_simplify
#'
#' @details
#' See \url{https://dev.fitbit.com/reference/web-api/activity/#get-frequent-activities} for more details.
#'
#' @export
get_frequent_activities <- make_get_function(paste0(url_activity, "frequent.json"))
#' @title Get Recent Activity Types
#'
#' @description
#' \code{get_recent_activity_types()} retrieves a list of a user's recent activities types logged with some details of the last activity log of that type.
#'
#' @inheritParams inheritparams_token
#' @inheritParams inheritparams_simplify
#'
#' @details
#' See \url{https://dev.fitbit.com/reference/web-api/activity/#get-recent-activity-types} for more details.
#'
#' @export
get_recent_activity_types <- make_get_function(paste0(url_activity, "recent.json"))
#' @title Get Favorite Activities
#'
#' @description
#' \code{get_favorite_activities()} returns a list of a user's favorite activities.
#'
#' @inheritParams inheritparams_token
#' @inheritParams inheritparams_simplify
#'
#' @details
#' See \url{https://dev.fitbit.com/reference/web-api/activity/#get-favorite-activities} for more details.
#'
#' @export
get_favorite_activities <- make_get_function(paste0(url_activity, "favorite.json"))
#' @title Add Favorite Activity
#'
#' @description
#' \code{add_favorite_activity()} adds the activity with the given ID to user's list of favorite activities.
#'
#' @param token An OAuth 2.0 token generated by oauth_token()
#' @param activity_id The ID of the activity to add to user's favorites.
#'
#' @details
#' See \url{https://dev.fitbit.com/reference/web-api/activity/#add-favorite-activity} for more details.
#'
#' @export
add_favorite_activity <- function(token, activity_id)
{
#POST https://api.fitbit.com/1/user/-/activities/favorite/[activity-id].json
url <- paste0(url_activity, sprintf("favorite/%s.json", activity_id))
invisible(post(url, token, body=NULL))
}
#' @title Delete Favorite Activity
#'
#' @description
#' \code{delete_favorite_activity()} removes the activity with the given ID (activity_id) from a user's list of favorite activities.
#'
#' @inheritParams inheritparams_token
#' @param activity_id The ID of the activity to be removed.
#'
#' @details
#' See \url{https://dev.fitbit.com/reference/web-api/activity/#delete-favorite-activity} for more details.
#'
#' @export
delete_favorite_activity <- function(token, activity_id)
{
url <- paste0(url_activity, sprintf("favorite/%s.json", activity_id))
invisible(delete(url, token))
}
#' @title Get Activity Goals
#'
#' @description
#' \code{get_activity_goals()} retrieves a user's current daily or weekly activity goals
#'
#' @inheritParams inheritparams_token
#' @param period "daily" or "weekly"
#' @inheritParams inheritparams_simplify
#'
#' @details
#' See \url{https://dev.fitbit.com/reference/web-api/activity/#get-activity-goals} for more details.
#'
#' @export
get_activity_goals <- function(token, period, simplify=TRUE)
{
stop_if_x_is_not_in(period, c("daily", "weekly"))
url <- paste0(url_activity, sprintf("goals/%s.json", period))
tidy_output(get(url, token), simplify)
}
#' @title Get Lifetime Stats
#'
#' @description
#' \code{get_lifetime_stats()} retrieves the user's activity statistics.
#' Activity statistics includes Lifetime and Best achievement values from the My Achievements tile on the website dashboard.
#' Response contains both statistics from the tracker device and total numbers including tracker data and manual activity log entries as seen on the Fitbit website dashboard.
#'
#' @inheritParams inheritparams_token
#' @inheritParams inheritparams_simplify
#'
#' @details
#' See \url{https://dev.fitbit.com/reference/web-api/activity/#get-lifetime-stats} for more details.
#'
#' @export
get_lifetime_stats <- make_get_function(paste0(url_api, "activities.json"))
| /R/activity.R | permissive | vcannataro/fitbitr | R | false | false | 9,267 | r | #' @include fitbitr.R
#' @include common.R
# Constants
url_activity <- paste0(url_api, "activities/")
#' @title Get Daily Activity Summary
#'
#' @description
#' \code{get_activity_summary()} retrieves a summary and list of a user's activities and activity log entries for a given day.
#'
#' @inheritParams inheritparams_token
#' @inheritParams inheritparams_date
#' @inheritParams inheritparams_simplify
#'
#' @details
#' See \url{https://dev.fitbit.com/reference/web-api/activity/#get-daily-activity-summary} for more details.
#'
#' @export
get_activity_summary <- function(token, date, simplify=TRUE)
{
url <- paste0(url_activity, sprintf("date/%s.json", format_date(date)))
# We can not simplify this output because it is so complicated nested list
tidy_output(get(url, token), simplify=FALSE)
}
#' @title Get Activity Time Series
#'
#' @description
#' \code{get_activity_time_series()} returns time series data in the specified range for a given resource.
#'
#' @inheritParams inheritparams_token
#' @param resource_path The resource path. see details below.
#' @param base_date The range start date. A Date class object or a string in the format yyyy-MM-dd or today.
#' @param end_date The end date of the range. A Date class object or a string in the format yyyy-MM-dd.
#' @param date The end date of the period specified. A Date class object or a string in the format yyyy-MM-dd.
#' @param period The range for which data will be returned. Options are "1d", "7d", "30d", "1w", "1m", "3m", "6m", "1y", or "max".
#' @inheritParams inheritparams_simplify
#'
#' @details
#' Available resource_path are
#' \itemize{
#' \item calories
#' \item caloriesBMR
#' \item steps
#' \item distance
#' \item floors
#' \item elevation
#' \item minutesSedentary
#' \item minutesLightlyActive
#' \item minutesFairlyActive
#' \item minutesVeryActive
#' \item activityCalories
#' }
#'
#' See \url{https://dev.fitbit.com/reference/web-api/activity/#get-activity-time-series} for more details.
#'
#' @export
get_activity_time_series <- function(token, resource_path, date="", period="", base_date="", end_date="", simplify=TRUE)
{
url <- if(date != "" && period != ""){
paste0(url_activity, sprintf("%s/date/%s/%s.json", resource_path, format_date(date), period))
} else if(base_date != "" & end_date != ""){
paste0(url_activity, sprintf("%s/date/%s/%s.json", resource_path, format_date(base_date), format_date(end_date)))
} else{
stop("Error: Need to enter combination of date/period or base_date/end_date")
}
tidy_output(get(url, token), simplify)
}
#' @title Get Activity Intraday Time Series
#'
#' @description
#' \code{get_activity_intraday_time_series()} returns intraday time series data in the specified range for a given resource.
#' Access to the Intraday Time Series for personal use (accessing your own data) is available through the "Personal" App Type.
#'
#' @inheritParams inheritparams_token
#' @param resource_path The resource path of the desired data
#' @inheritParams inheritparams_date
#' @param detail_level Number of data points to include. Either 1min or 15min. Optional.
#' @param start_time The start of the period, in the format HH:mm. Optional.
#' @param end_time The end of the period, in the format HH:mm. Optional.
#' @inheritParams inheritparams_simplify
#'
#' @details
#' Available resource_path are
#' \itemize{
#' \item calories
#' \item steps
#' \item distance
#' \item floors
#' \item elevation
#' }
#'
#' See \url{https://dev.fitbit.com/reference/web-api/activity/#get-activity-intraday-time-series} for more details.
#'
#' @export
get_activity_intraday_time_series <- function(token, resource_path, date, detail_level="15min", start_time=NULL, end_time=NULL, simplify=TRUE)
{
date <- format_date(date)
url <- if(!is.null(start_time) && !is.null(end_time)){
date2 <- if(start_time < end_time){
"1d"
} else{
date2 <- as.Date(date) + 1
}
paste0(url_activity, sprintf("%s/date/%s/%s/%s/time/%s/%s.json", resource_path, date, date2, detail_level, start_time, end_time))
} else{
paste0(url_activity, sprintf("%s/date/%s/1d/%s.json", resource_path, date, detail_level))
}
tidy_output(get(url, token), simplify)
}
#' @title Get Activity Types
#'
#' @description
#' Get a tree of all valid Fitbit public activities from the activities catalog as well as private custom activities the user created
#'
#' @inheritParams inheritparams_token
#' @inheritParams inheritparams_simplify
#'
#' @export
get_activity_types <- function(token, simplify=TRUE)
{
url <- paste0(url_activity, "activities.json")
# We can not simplify this output because it is so complicated nested list
tidy_output(get(url, token), simplify=FALSE)
}
#' @title Get Activity Type
#'
#' @description
#' \code{get_activity_type()} returns the details of a specific activity in the Fitbit activities database.
#'
#' @inheritParams inheritparams_token
#' @param activity_id The activity ID.
#' @inheritParams inheritparams_simplify
#'
#' @export
get_activity_type <- function(token, activity_id, simplify=TRUE)
{
url <- paste0(url_base, sprintf("activities/%s.json", activity_id))
tidy_output(get(url, token), simplify)
}
#' @title Get Frequent Activities
#'
#' @description
#' \code{get_frequent_activities()} retrieves a list of a user's frequent activities.
#'
#' @inheritParams inheritparams_token
#' @inheritParams inheritparams_simplify
#'
#' @details
#' See \url{https://dev.fitbit.com/reference/web-api/activity/#get-frequent-activities} for more details.
#'
#' @export
get_frequent_activities <- make_get_function(paste0(url_activity, "frequent.json"))
#' @title Get Recent Activity Types
#'
#' @description
#' \code{get_recent_activity_types()} retrieves a list of a user's recent activities types logged with some details of the last activity log of that type.
#'
#' @inheritParams inheritparams_token
#' @inheritParams inheritparams_simplify
#'
#' @details
#' See \url{https://dev.fitbit.com/reference/web-api/activity/#get-recent-activity-types} for more details.
#'
#' @export
get_recent_activity_types <- make_get_function(paste0(url_activity, "recent.json"))
#' @title Get Favorite Activities
#'
#' @description
#' \code{get_favorite_activities()} returns a list of a user's favorite activities.
#'
#' @inheritParams inheritparams_token
#' @inheritParams inheritparams_simplify
#'
#' @details
#' See \url{https://dev.fitbit.com/reference/web-api/activity/#get-favorite-activities} for more details.
#'
#' @export
get_favorite_activities <- make_get_function(paste0(url_activity, "favorite.json"))
#' @title Add Favorite Activity
#'
#' @description
#' \code{add_favorite_activity()} adds the activity with the given ID to user's list of favorite activities.
#'
#' @param token An OAuth 2.0 token generated by oauth_token()
#' @param activity_id The ID of the activity to add to user's favorites.
#'
#' @details
#' See \url{https://dev.fitbit.com/reference/web-api/activity/#add-favorite-activity} for more details.
#'
#' @export
add_favorite_activity <- function(token, activity_id)
{
#POST https://api.fitbit.com/1/user/-/activities/favorite/[activity-id].json
url <- paste0(url_activity, sprintf("favorite/%s.json", activity_id))
invisible(post(url, token, body=NULL))
}
#' @title Delete Favorite Activity
#'
#' @description
#' \code{delete_favorite_activity()} removes the activity with the given ID (activity_id) from a user's list of favorite activities.
#'
#' @inheritParams inheritparams_token
#' @param activity_id The ID of the activity to be removed.
#'
#' @details
#' See \url{https://dev.fitbit.com/reference/web-api/activity/#delete-favorite-activity} for more details.
#'
#' @export
delete_favorite_activity <- function(token, activity_id)
{
url <- paste0(url_activity, sprintf("favorite/%s.json", activity_id))
invisible(delete(url, token))
}
#' @title Get Activity Goals
#'
#' @description
#' \code{get_activity_goals()} retrieves a user's current daily or weekly activity goals
#'
#' @inheritParams inheritparams_token
#' @param period "daily" or "weekly"
#' @inheritParams inheritparams_simplify
#'
#' @details
#' See \url{https://dev.fitbit.com/reference/web-api/activity/#get-activity-goals} for more details.
#'
#' @export
get_activity_goals <- function(token, period, simplify=TRUE)
{
stop_if_x_is_not_in(period, c("daily", "weekly"))
url <- paste0(url_activity, sprintf("goals/%s.json", period))
tidy_output(get(url, token), simplify)
}
#' @title Get Lifetime Stats
#'
#' @description
#' \code{get_lifetime_stats()} retrieves the user's activity statistics.
#' Activity statistics includes Lifetime and Best achievement values from the My Achievements tile on the website dashboard.
#' Response contains both statistics from the tracker device and total numbers including tracker data and manual activity log entries as seen on the Fitbit website dashboard.
#'
#' @inheritParams inheritparams_token
#' @inheritParams inheritparams_simplify
#'
#' @details
#' See \url{https://dev.fitbit.com/reference/web-api/activity/#get-lifetime-stats} for more details.
#'
#' @export
get_lifetime_stats <- make_get_function(paste0(url_api, "activities.json"))
|
naccess=function(pdb,exepath="",prefix="")
{
print("Running NACCESS")
print(unique(pdb$atom[,"chain"]))
infile=paste(prefix,".pdb",sep="",collapse="");
rsafile=paste(prefix,".rsa",sep="",collapse="");
asafile=paste(prefix,".asa",sep="",collapse="");
logfile=paste(prefix,".log",sep="",collapse="");
write.pdb(pdb, file = infile);
tryCatch({error=system(paste(exepath, " ",infile, sep = ""),ignore.stderr = FALSE,ignore.stdout =FALSE,intern=TRUE)},error=function(e){errorflag=1;return("SORRY");});
#In case of 1VTZ.pdb nacc fails with error "STOP SOLVA_ERROR: max cubes exceeded statement executed"
# But it doesnot captured in error variable instead we get nacc list object, however nacc$asa doesnot have any information
# 2 way of error handeling:
# 1) check error variable which is implemented below
# 2) nacc$asa which is same as finalasa variable, which is implemented at the last in return statement
#print(attributes(error));
if(is.list(attributes(error))){return("SORRY");}
if(!(file.exists(rsafile) & file.exists(asafile)))
{
# print("sorry")
return("SORRY");
}
raw.lines <- readLines(rsafile);
atom_acc=read.pdb(asafile);
unlink(c(infile,logfile,asafile))
id=substring(raw.lines,1,3);
TOT=unlist(strsplit(raw.lines[which(id=="TOT")]," +",perl=TRUE));
RES=raw.lines[which(id=="RES")];
resid=substring(RES, 5, 7)
ch=substring(RES, 9, 9)
resno=substring(RES, 10, 14) #resno.insert
resno=gsub(" ","",resno) # replace space with ""
# Residue ASA
asa_r_abs=as.numeric(substring(RES, 16, 22))
asa_r_rel=as.numeric(substring(RES, 23, 28))
# SideChain ASA
asa_s_abs=as.numeric(substring(RES, 29, 35))
asa_s_rel=as.numeric(substring(RES, 36, 41))
# MainChain ASA
asa_m_abs=as.numeric(substring(RES, 42, 48))
asa_m_rel=as.numeric(substring(RES, 49, 54))
# NP ASA
asa_np_abs=as.numeric(substring(RES, 55, 61))
asa_np_rel=as.numeric(substring(RES, 62, 67))
# NP ASA
asa_p_abs=as.numeric(substring(RES, 68, 74))
asa_p_rel=as.numeric(substring(RES, 75, 80))
finalasa= ch;
finalasa=cbind(finalasa,resno) ;
finalasa=cbind(finalasa,resid) ;
finalasa=cbind(finalasa,asa_r_abs) ;
finalasa=cbind(finalasa,asa_r_rel) ;
finalasa=cbind(finalasa,asa_s_abs) ;
finalasa=cbind(finalasa,asa_s_rel) ;
finalasa=cbind(finalasa,asa_m_abs) ;
finalasa=cbind(finalasa,asa_m_rel) ;
finalasa=cbind(finalasa,asa_np_abs) ;
finalasa=cbind(finalasa,asa_np_rel) ;
finalasa=cbind(finalasa,asa_p_abs) ;
finalasa=cbind(finalasa,asa_p_rel) ;
colnames(finalasa)[1]="Chain";
print("Finished NACCESS")
if(length(finalasa)==0){return("SORRY")}else{return(list(asa=finalasa,tot=TOT,atomacc=atom_acc));}
}
| /Script_dir/naccess.r | no_license | prpanigrahi/iRDP | R | false | false | 2,752 | r | naccess=function(pdb,exepath="",prefix="")
{
print("Running NACCESS")
print(unique(pdb$atom[,"chain"]))
infile=paste(prefix,".pdb",sep="",collapse="");
rsafile=paste(prefix,".rsa",sep="",collapse="");
asafile=paste(prefix,".asa",sep="",collapse="");
logfile=paste(prefix,".log",sep="",collapse="");
write.pdb(pdb, file = infile);
tryCatch({error=system(paste(exepath, " ",infile, sep = ""),ignore.stderr = FALSE,ignore.stdout =FALSE,intern=TRUE)},error=function(e){errorflag=1;return("SORRY");});
#In case of 1VTZ.pdb nacc fails with error "STOP SOLVA_ERROR: max cubes exceeded statement executed"
# But it doesnot captured in error variable instead we get nacc list object, however nacc$asa doesnot have any information
# 2 way of error handeling:
# 1) check error variable which is implemented below
# 2) nacc$asa which is same as finalasa variable, which is implemented at the last in return statement
#print(attributes(error));
if(is.list(attributes(error))){return("SORRY");}
if(!(file.exists(rsafile) & file.exists(asafile)))
{
# print("sorry")
return("SORRY");
}
raw.lines <- readLines(rsafile);
atom_acc=read.pdb(asafile);
unlink(c(infile,logfile,asafile))
id=substring(raw.lines,1,3);
TOT=unlist(strsplit(raw.lines[which(id=="TOT")]," +",perl=TRUE));
RES=raw.lines[which(id=="RES")];
resid=substring(RES, 5, 7)
ch=substring(RES, 9, 9)
resno=substring(RES, 10, 14) #resno.insert
resno=gsub(" ","",resno) # replace space with ""
# Residue ASA
asa_r_abs=as.numeric(substring(RES, 16, 22))
asa_r_rel=as.numeric(substring(RES, 23, 28))
# SideChain ASA
asa_s_abs=as.numeric(substring(RES, 29, 35))
asa_s_rel=as.numeric(substring(RES, 36, 41))
# MainChain ASA
asa_m_abs=as.numeric(substring(RES, 42, 48))
asa_m_rel=as.numeric(substring(RES, 49, 54))
# NP ASA
asa_np_abs=as.numeric(substring(RES, 55, 61))
asa_np_rel=as.numeric(substring(RES, 62, 67))
# NP ASA
asa_p_abs=as.numeric(substring(RES, 68, 74))
asa_p_rel=as.numeric(substring(RES, 75, 80))
finalasa= ch;
finalasa=cbind(finalasa,resno) ;
finalasa=cbind(finalasa,resid) ;
finalasa=cbind(finalasa,asa_r_abs) ;
finalasa=cbind(finalasa,asa_r_rel) ;
finalasa=cbind(finalasa,asa_s_abs) ;
finalasa=cbind(finalasa,asa_s_rel) ;
finalasa=cbind(finalasa,asa_m_abs) ;
finalasa=cbind(finalasa,asa_m_rel) ;
finalasa=cbind(finalasa,asa_np_abs) ;
finalasa=cbind(finalasa,asa_np_rel) ;
finalasa=cbind(finalasa,asa_p_abs) ;
finalasa=cbind(finalasa,asa_p_rel) ;
colnames(finalasa)[1]="Chain";
print("Finished NACCESS")
if(length(finalasa)==0){return("SORRY")}else{return(list(asa=finalasa,tot=TOT,atomacc=atom_acc));}
}
|
#' Initialize enumpart
#'
#'This ensures that the enumerate partitions programs is prepared to run.
#'This must be run once per install of the redist package.
#'
#' @return 0 on success
#' @export
#' @references
#' Benjamin Fifield, Kosuke Imai, Jun Kawahara, and Christopher T Kenny.
#' "The Essential Role of Empirical Validation in Legislative Redistricting Simulation."
#' Forthcoming, Statistics and Public Policy.
#'
#' @concept enumerate
#' @examples \dontrun{
#' redist.init.enumpart()
#' }
redist.init.enumpart <- function(){
# Update makefile to direct to library only if Windows
if(Sys.info()[['sysname']] == 'Windows'){
makecontent <- readLines(system.file('enumpart/Makefile', package = 'redist'))
makecontent[7] <- "\tg++ enumpart.cpp SAPPOROBDD/bddc.o SAPPOROBDD/BDD.o SAPPOROBDD/ZBDD.o -o enumpart -I$(TDZDD_DIR) -std=c++11 -O3 -DB_64 -DNDEBUG -lpsapi"
writeLines(text = makecontent, con = system.file('enumpart/Makefile', package = 'redist'))
}
servr::make(dir = system.file('enumpart', package = 'redist'), verbose = FALSE)
if(Sys.info()[['sysname']] == 'Windows'){
sys::exec_wait('python', args= c('-m', 'pip', 'install', 'networkx', '--user'))
} else {
sys::exec_wait('python3', args= c('-m', 'pip', 'install', 'networkx', '--user'))
}
return(0)
}
#' Prepares a run of the enumpart algorithm by ordering edges
#'
#' @param adj zero indexed adjacency list
#' @param unordered_path valid path to output the unordered adjacency map to
#' @param ordered_path valid path to output the ordered adjacency map to
#' @param adjlist Deprecated, use adj. zero indexed adjacency list
#'
#' @return 0 on success
#' @export
#' @importFrom sys exec_wait
#'
#' @references
#' Benjamin Fifield, Kosuke Imai, Jun Kawahara, and Christopher T Kenny.
#' "The Essential Role of Empirical Validation in Legislative Redistricting Simulation."
#' Forthcoming, Statistics and Public Policy.
#' @concept enumerate
#' @examples \dontrun{
#' temp <- tempdir()
#' data(fl25)
#' adj <- redist.adjacency(fl25)
#' redist.prep.enumpart(adj = adj, unordered_path = paste0(temp, '/unordered'),
#' ordered_path = paste0(temp, '/ordered'))
#' }
redist.prep.enumpart <- function(adj, unordered_path, ordered_path, adjlist){
if(!missing(adjlist)){
adj <- adjlist
.Deprecated(new = 'adj', old = 'adjlist')
}
# Return the list to 1 indexing
adj <- lapply(adj, function(x){x+1})
## Sink
adj_map <- c()
for(k in 1:length(adj)){
sub <- adj[[k]]
sub <- sub[sub > k]
if(length(sub) > 0){
for(l in 1:length(sub)){
adj_map <- rbind(adj_map, c(k, sub[l]))
}
}
}
utils::write.table(data.frame(adj_map), file = paste0(unordered_path,".dat"),
quote=FALSE, row.names=FALSE, col.names=FALSE)
## Order edges
if(Sys.info()[['sysname']] == 'Windows'){
res <- sys::exec_wait('python',
args = system.file('python/ndscut.py', package = 'redist'),
std_in = paste0(unordered_path, '.dat'),
std_out = paste0(ordered_path, '.dat'))
} else {
res <- sys::exec_wait('python3',
args = system.file('python/ndscut.py', package = 'redist'),
std_in = paste0(unordered_path, '.dat'),
std_out = paste0(ordered_path, '.dat'))
}
return(res)
}
#' Runs the enumpart algorithm
#'
#' @param ordered_path Path used in redist.prep.enumpart (not including ".dat")
#' @param out_path Valid path to output the enumerated districts
#' @param ndists number of districts to enumerate
#' @param all boolean. TRUE outputs all districts. FALSE samples n districts.
#' @param n integer. Number of districts to output if all is FALSE. Returns
#' districts selected from uniform random distribution.
#' @param weight_path A path (not including ".dat") to a space-delimited file containing a vector of
#' vertex weights, to be used along with \code{lower} and \code{upper}.
#' @param lower A lower bound on each partition's total weight, implemented by rejection sampling.
#' @param upper An upper bound on each partition's total weight.
#' @param options Additional enumpart arguments. Not recommended for use.
#' @param ndist Deprecated, use ndists. number of districts to enumerate
#'
#' @references
#' Benjamin Fifield, Kosuke Imai, Jun Kawahara, and Christopher T Kenny.
#' "The Essential Role of Empirical Validation in Legislative Redistricting Simulation."
#' Forthcoming, Statistics and Public Policy.
#'
#' @return 0 on success
#' @export
#' @concept enumerate
#'
#' @examples \dontrun{
#' temp <- tempdir()
#' redist.run.enumpart(ordered_path = paste0(temp, '/ordered'),
#' out_path = paste0(temp, '/enumerated'))
#' }
redist.run.enumpart <- function(ordered_path, out_path, ndists = 2,
all = TRUE, n = NULL, weight_path = NULL,
lower = NULL, upper = NULL, options = NULL, ndist){
if(!missing(ndist)){
ndists <- ndist
.Deprecated(new = 'ndists', old = 'ndist')
}
ndists <- as.integer(ndists)
n <- as.integer(n)
# use args based on types
if (is.null(options)) {
if (all) {
options <- c('-k', ndists, '-comp', '-allsols')
} else{
if (is.null(n)) {
stop('n must be specified when all is FALSE.')
}
options <- c('-k', ndists, '-comp', '-sample', n)
}
}
if (!is.null(lower)) {
options <- c(options, "-lower", as.character(lower))
}
if (!is.null(upper)) {
options = c(options, "-upper", as.character(upper))
}
if (is.null(weight_path)) {
options <- c(paste0(ordered_path, '.dat'), options)
} else {
options <- c(paste0(ordered_path, '.dat'), paste0(weight_path, ".dat"), options)
}
## Run enumpart
res <- sys::exec_wait(paste0(system.file('enumpart', package = 'redist'), '/enumpart'),
args = options,
std_out = paste0(out_path, '.dat'), std_err = TRUE)
return(res)
}
#' Read Results from enumpart
#'
#' @param out_path out_path specified in redist.run.enumpart
#' @param skip number of lines to skip
#' @param n_max max number of lines to read
#'
#' @return district_membership matrix
#' @export
#' @references
#' Benjamin Fifield, Kosuke Imai, Jun Kawahara, and Christopher T Kenny.
#' "The Essential Role of Empirical Validation in Legislative Redistricting Simulation."
#' Forthcoming, Statistics and Public Policy.
#'
#' @importFrom readr read_lines
#' @concept enumerate
#' @examples \dontrun{
#' temp <- tempdir()
#' cds <- redist.read.enumpart(out_path = paste0(temp,'/enumerated'))
#' }
redist.read.enumpart <- function(out_path, skip = 0, n_max = -1L){
sols <- read_lines(paste0(out_path, ".dat"), skip = skip, n_max = n_max)
sols <- apply(do.call("cbind", strsplit(sols, " ")), 2, as.numeric)
return(sols + 1L)
}
# check if last edge
#
# @param i integer, current frontier
# @param v integer, vertex to search for
# @param edges edgelist matrix
#
# @return bool
#
is_last <- function(i, v, edges){
if(i == nrow(edges)){
return(TRUE)
}
for(j in (i+1):nrow(edges)){
if(v == edges[j, 1] | v == edges[j, 2]){
return(FALSE)
}
}
return(TRUE)
}
#' Calculate Frontier Size
#'
#' @param ordered_path path to ordered path created by redist.prep.enumpart
#'
#' @return List, four objects
#' \itemize{
#' \item{max}{numeric, maximum frontier size}
#' \item{average}{numeric, average frontier size}
#' \item{average_sq}{numeric, average((frontier size)^2)}
#' \item{sequence}{numeric vector, lists out all sizes for every frontier}
#' }
#' @export
#' @concept enumerate
#'
#' @importFrom stringr str_split
#' @examples \dontrun{
#' data(fl25)
#' adj <- redist.adjacency(fl25)
#' redist.prep.enumpart(adj, 'unordered', 'ordered')
#' redist.calc.frontier.size('ordered')
#' }
redist.calc.frontier.size <- function(ordered_path){
lines_in <- readLines(paste0(ordered_path,'.dat'))
n <- length(lines_in)
edges_unsort <- apply(stringr::str_split(string = lines_in, pattern = ' ', simplify = T),2, as.integer)
edges <- cbind(apply(edges_unsort,1,min), apply(edges_unsort,1,max))
frontier_sizes <- rep(NA_real_, 1 +n)
frontier <- rep(FALSE, n)
frontier_sizes[1] <- 0
for(i in 1:n){
e1 <- edges[i,1]
e2 <- edges[i,2]
frontier[e1] <- TRUE
frontier[e2] <- TRUE
if(is_last(i, e1, edges)){
frontier[e1] <- FALSE
}
if(is_last(i, e2, edges)){
frontier[e2] <- FALSE
}
frontier_sizes[i+1] <- sum(frontier)
}
return(
list(max = max(frontier_sizes),
average = mean(frontier_sizes),
average_sq = mean(frontier_sizes^2),
sequence = frontier_sizes)
)
}
#' Enumerate All Parititions
#'
#' Single function for standard enumeration analysis.
#'
#' @param adj zero indexed adjacency list.
#' @param unordered_path valid path to output the unordered adjacency map to
#' @param ordered_path valid path to output the ordered adjacency map to
#' @param out_path Valid path to output the enumerated districts
#' @param ndists number of districts to enumerate
#' @param all boolean. TRUE outputs all districts. FALSE samples n districts.
#' @param n integer. Number of districts to output if all is FALSE. Returns
#' districts selected from uniform random distribution.
#' @param weight_path A path (not including ".dat") to a space-delimited file containing a vector of
#' vertex weights, to be used along with \code{lower} and \code{upper}.
#' @param lower A lower bound on each partition's total weight, implemented by rejection sampling.
#' @param upper An upper bound on each partition's total weight.
#' @param init Runs redist.init.enumpart. Defaults to false. Should be run on first use.
#' @param read boolean. Defaults to TRUE. reads
#' @param total_pop Integer Vector. Defaults to NULL. If supplied, computes the parity.
#' @param adjlist Deprecated, use adj. zero indexed adjacency list
#' @param ndist Deprecated, use ndists. number of districts to enumerate
#' @param population Deprecated, use total_pop. Integer Vector. Defaults to NULL. If supplied, computes the parity.
#'
#' @return List with entries district_membership and parity.
#'
#' @concept enumerate
#' @export
redist.enumpart <- function(adj, unordered_path, ordered_path,
out_path, ndists = 2, all = TRUE, n = NULL,
weight_path=NULL, lower=NULL, upper=NULL,
init = FALSE, read = TRUE,
total_pop = NULL, adjlist, population, ndist){
if(!missing(ndist)){
ndists <- ndist
.Deprecated(new = 'ndists', old = 'ndist')
}
if(!missing(population)){
total_pop <- population
.Deprecated(new = 'total_pop', old = 'population')
}
if(!missing(adjlist)){
adj <- adjlist
.Deprecated(new = 'adj', old = 'adjlist')
}
if(init){
redist.init.enumpart()
}
prep <- redist.prep.enumpart(adj = adj,
unordered_path = unordered_path,
ordered_path = ordered_path)
if(!prep){
run <- redist.run.enumpart(ordered_path = ordered_path,
out_path = out_path,
ndists = ndists,
all = all,
n = n,
weight_path = weight_path,
lower = lower,
upper = upper)
}
if(read){
cds <- redist.read.enumpart(out_path = out_path)
if(!is.null(total_pop)){
par <- redist.parity(plans = cds, total_pop = total_pop)
} else{
par <- rep(NA_real_, ncol(cds))
}
out <- list(plans = cds, parity = par)
} else{
return(0)
}
return(out)
}
| /R/enumpart.R | no_license | deonizm/redist | R | false | false | 11,865 | r | #' Initialize enumpart
#'
#'This ensures that the enumerate partitions programs is prepared to run.
#'This must be run once per install of the redist package.
#'
#' @return 0 on success
#' @export
#' @references
#' Benjamin Fifield, Kosuke Imai, Jun Kawahara, and Christopher T Kenny.
#' "The Essential Role of Empirical Validation in Legislative Redistricting Simulation."
#' Forthcoming, Statistics and Public Policy.
#'
#' @concept enumerate
#' @examples \dontrun{
#' redist.init.enumpart()
#' }
redist.init.enumpart <- function(){
# Update makefile to direct to library only if Windows
if(Sys.info()[['sysname']] == 'Windows'){
makecontent <- readLines(system.file('enumpart/Makefile', package = 'redist'))
makecontent[7] <- "\tg++ enumpart.cpp SAPPOROBDD/bddc.o SAPPOROBDD/BDD.o SAPPOROBDD/ZBDD.o -o enumpart -I$(TDZDD_DIR) -std=c++11 -O3 -DB_64 -DNDEBUG -lpsapi"
writeLines(text = makecontent, con = system.file('enumpart/Makefile', package = 'redist'))
}
servr::make(dir = system.file('enumpart', package = 'redist'), verbose = FALSE)
if(Sys.info()[['sysname']] == 'Windows'){
sys::exec_wait('python', args= c('-m', 'pip', 'install', 'networkx', '--user'))
} else {
sys::exec_wait('python3', args= c('-m', 'pip', 'install', 'networkx', '--user'))
}
return(0)
}
#' Prepares a run of the enumpart algorithm by ordering edges
#'
#' @param adj zero indexed adjacency list
#' @param unordered_path valid path to output the unordered adjacency map to
#' @param ordered_path valid path to output the ordered adjacency map to
#' @param adjlist Deprecated, use adj. zero indexed adjacency list
#'
#' @return 0 on success
#' @export
#' @importFrom sys exec_wait
#'
#' @references
#' Benjamin Fifield, Kosuke Imai, Jun Kawahara, and Christopher T Kenny.
#' "The Essential Role of Empirical Validation in Legislative Redistricting Simulation."
#' Forthcoming, Statistics and Public Policy.
#' @concept enumerate
#' @examples \dontrun{
#' temp <- tempdir()
#' data(fl25)
#' adj <- redist.adjacency(fl25)
#' redist.prep.enumpart(adj = adj, unordered_path = paste0(temp, '/unordered'),
#' ordered_path = paste0(temp, '/ordered'))
#' }
redist.prep.enumpart <- function(adj, unordered_path, ordered_path, adjlist){
if(!missing(adjlist)){
adj <- adjlist
.Deprecated(new = 'adj', old = 'adjlist')
}
# Return the list to 1 indexing
adj <- lapply(adj, function(x){x+1})
## Sink
adj_map <- c()
for(k in 1:length(adj)){
sub <- adj[[k]]
sub <- sub[sub > k]
if(length(sub) > 0){
for(l in 1:length(sub)){
adj_map <- rbind(adj_map, c(k, sub[l]))
}
}
}
utils::write.table(data.frame(adj_map), file = paste0(unordered_path,".dat"),
quote=FALSE, row.names=FALSE, col.names=FALSE)
## Order edges
if(Sys.info()[['sysname']] == 'Windows'){
res <- sys::exec_wait('python',
args = system.file('python/ndscut.py', package = 'redist'),
std_in = paste0(unordered_path, '.dat'),
std_out = paste0(ordered_path, '.dat'))
} else {
res <- sys::exec_wait('python3',
args = system.file('python/ndscut.py', package = 'redist'),
std_in = paste0(unordered_path, '.dat'),
std_out = paste0(ordered_path, '.dat'))
}
return(res)
}
#' Runs the enumpart algorithm
#'
#' @param ordered_path Path used in redist.prep.enumpart (not including ".dat")
#' @param out_path Valid path to output the enumerated districts
#' @param ndists number of districts to enumerate
#' @param all boolean. TRUE outputs all districts. FALSE samples n districts.
#' @param n integer. Number of districts to output if all is FALSE. Returns
#' districts selected from uniform random distribution.
#' @param weight_path A path (not including ".dat") to a space-delimited file containing a vector of
#' vertex weights, to be used along with \code{lower} and \code{upper}.
#' @param lower A lower bound on each partition's total weight, implemented by rejection sampling.
#' @param upper An upper bound on each partition's total weight.
#' @param options Additional enumpart arguments. Not recommended for use.
#' @param ndist Deprecated, use ndists. number of districts to enumerate
#'
#' @references
#' Benjamin Fifield, Kosuke Imai, Jun Kawahara, and Christopher T Kenny.
#' "The Essential Role of Empirical Validation in Legislative Redistricting Simulation."
#' Forthcoming, Statistics and Public Policy.
#'
#' @return 0 on success
#' @export
#' @concept enumerate
#'
#' @examples \dontrun{
#' temp <- tempdir()
#' redist.run.enumpart(ordered_path = paste0(temp, '/ordered'),
#' out_path = paste0(temp, '/enumerated'))
#' }
redist.run.enumpart <- function(ordered_path, out_path, ndists = 2,
all = TRUE, n = NULL, weight_path = NULL,
lower = NULL, upper = NULL, options = NULL, ndist){
if(!missing(ndist)){
ndists <- ndist
.Deprecated(new = 'ndists', old = 'ndist')
}
ndists <- as.integer(ndists)
n <- as.integer(n)
# use args based on types
if (is.null(options)) {
if (all) {
options <- c('-k', ndists, '-comp', '-allsols')
} else{
if (is.null(n)) {
stop('n must be specified when all is FALSE.')
}
options <- c('-k', ndists, '-comp', '-sample', n)
}
}
if (!is.null(lower)) {
options <- c(options, "-lower", as.character(lower))
}
if (!is.null(upper)) {
options = c(options, "-upper", as.character(upper))
}
if (is.null(weight_path)) {
options <- c(paste0(ordered_path, '.dat'), options)
} else {
options <- c(paste0(ordered_path, '.dat'), paste0(weight_path, ".dat"), options)
}
## Run enumpart
res <- sys::exec_wait(paste0(system.file('enumpart', package = 'redist'), '/enumpart'),
args = options,
std_out = paste0(out_path, '.dat'), std_err = TRUE)
return(res)
}
#' Read Results from enumpart
#'
#' @param out_path out_path specified in redist.run.enumpart
#' @param skip number of lines to skip
#' @param n_max max number of lines to read
#'
#' @return district_membership matrix
#' @export
#' @references
#' Benjamin Fifield, Kosuke Imai, Jun Kawahara, and Christopher T Kenny.
#' "The Essential Role of Empirical Validation in Legislative Redistricting Simulation."
#' Forthcoming, Statistics and Public Policy.
#'
#' @importFrom readr read_lines
#' @concept enumerate
#' @examples \dontrun{
#' temp <- tempdir()
#' cds <- redist.read.enumpart(out_path = paste0(temp,'/enumerated'))
#' }
redist.read.enumpart <- function(out_path, skip = 0, n_max = -1L){
sols <- read_lines(paste0(out_path, ".dat"), skip = skip, n_max = n_max)
sols <- apply(do.call("cbind", strsplit(sols, " ")), 2, as.numeric)
return(sols + 1L)
}
# check if last edge
#
# @param i integer, current frontier
# @param v integer, vertex to search for
# @param edges edgelist matrix
#
# @return bool
#
is_last <- function(i, v, edges){
if(i == nrow(edges)){
return(TRUE)
}
for(j in (i+1):nrow(edges)){
if(v == edges[j, 1] | v == edges[j, 2]){
return(FALSE)
}
}
return(TRUE)
}
#' Calculate Frontier Size
#'
#' @param ordered_path path to ordered path created by redist.prep.enumpart
#'
#' @return List, four objects
#' \itemize{
#' \item{max}{numeric, maximum frontier size}
#' \item{average}{numeric, average frontier size}
#' \item{average_sq}{numeric, average((frontier size)^2)}
#' \item{sequence}{numeric vector, lists out all sizes for every frontier}
#' }
#' @export
#' @concept enumerate
#'
#' @importFrom stringr str_split
#' @examples \dontrun{
#' data(fl25)
#' adj <- redist.adjacency(fl25)
#' redist.prep.enumpart(adj, 'unordered', 'ordered')
#' redist.calc.frontier.size('ordered')
#' }
redist.calc.frontier.size <- function(ordered_path){
lines_in <- readLines(paste0(ordered_path,'.dat'))
n <- length(lines_in)
edges_unsort <- apply(stringr::str_split(string = lines_in, pattern = ' ', simplify = T),2, as.integer)
edges <- cbind(apply(edges_unsort,1,min), apply(edges_unsort,1,max))
frontier_sizes <- rep(NA_real_, 1 +n)
frontier <- rep(FALSE, n)
frontier_sizes[1] <- 0
for(i in 1:n){
e1 <- edges[i,1]
e2 <- edges[i,2]
frontier[e1] <- TRUE
frontier[e2] <- TRUE
if(is_last(i, e1, edges)){
frontier[e1] <- FALSE
}
if(is_last(i, e2, edges)){
frontier[e2] <- FALSE
}
frontier_sizes[i+1] <- sum(frontier)
}
return(
list(max = max(frontier_sizes),
average = mean(frontier_sizes),
average_sq = mean(frontier_sizes^2),
sequence = frontier_sizes)
)
}
#' Enumerate All Parititions
#'
#' Single function for standard enumeration analysis.
#'
#' @param adj zero indexed adjacency list.
#' @param unordered_path valid path to output the unordered adjacency map to
#' @param ordered_path valid path to output the ordered adjacency map to
#' @param out_path Valid path to output the enumerated districts
#' @param ndists number of districts to enumerate
#' @param all boolean. TRUE outputs all districts. FALSE samples n districts.
#' @param n integer. Number of districts to output if all is FALSE. Returns
#' districts selected from uniform random distribution.
#' @param weight_path A path (not including ".dat") to a space-delimited file containing a vector of
#' vertex weights, to be used along with \code{lower} and \code{upper}.
#' @param lower A lower bound on each partition's total weight, implemented by rejection sampling.
#' @param upper An upper bound on each partition's total weight.
#' @param init Runs redist.init.enumpart. Defaults to false. Should be run on first use.
#' @param read boolean. Defaults to TRUE. reads
#' @param total_pop Integer Vector. Defaults to NULL. If supplied, computes the parity.
#' @param adjlist Deprecated, use adj. zero indexed adjacency list
#' @param ndist Deprecated, use ndists. number of districts to enumerate
#' @param population Deprecated, use total_pop. Integer Vector. Defaults to NULL. If supplied, computes the parity.
#'
#' @return List with entries district_membership and parity.
#'
#' @concept enumerate
#' @export
redist.enumpart <- function(adj, unordered_path, ordered_path,
out_path, ndists = 2, all = TRUE, n = NULL,
weight_path=NULL, lower=NULL, upper=NULL,
init = FALSE, read = TRUE,
total_pop = NULL, adjlist, population, ndist){
if(!missing(ndist)){
ndists <- ndist
.Deprecated(new = 'ndists', old = 'ndist')
}
if(!missing(population)){
total_pop <- population
.Deprecated(new = 'total_pop', old = 'population')
}
if(!missing(adjlist)){
adj <- adjlist
.Deprecated(new = 'adj', old = 'adjlist')
}
if(init){
redist.init.enumpart()
}
prep <- redist.prep.enumpart(adj = adj,
unordered_path = unordered_path,
ordered_path = ordered_path)
if(!prep){
run <- redist.run.enumpart(ordered_path = ordered_path,
out_path = out_path,
ndists = ndists,
all = all,
n = n,
weight_path = weight_path,
lower = lower,
upper = upper)
}
if(read){
cds <- redist.read.enumpart(out_path = out_path)
if(!is.null(total_pop)){
par <- redist.parity(plans = cds, total_pop = total_pop)
} else{
par <- rep(NA_real_, ncol(cds))
}
out <- list(plans = cds, parity = par)
} else{
return(0)
}
return(out)
}
|
# This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common get_config new_operation new_request send_request
#' @include iotanalytics_service.R
NULL
#' Sends messages to a channel
#'
#' Sends messages to a channel.
#'
#' @usage
#' iotanalytics_batch_put_message(channelName, messages)
#'
#' @param channelName [required] The name of the channel where the messages are sent.
#' @param messages [required] The list of messages to be sent. Each message has format: \'\{
#' \"messageId\": \"string\", \"payload\": \"string\"\}\'.
#'
#' Note that the field names of message payloads (data) that you send to
#' AWS IoT Analytics:
#'
#' - Must contain only alphanumeric characters and undescores (\\_); no
#' other special characters are allowed.
#'
#' - Must begin with an alphabetic character or single underscore (\\_).
#'
#' - Cannot contain hyphens (-).
#'
#' - In regular expression terms:
#' \"\\^\[A-Za-z\\_\](\[A-Za-z0-9\]*\\|\[A-Za-z0-9\]\[A-Za-z0-9\\_\]*)\\$\".
#'
#' - Cannot be greater than 255 characters.
#'
#' - Are case-insensitive. (Fields named \"foo\" and \"FOO\" in the same
#' payload are considered duplicates.)
#'
#' For example, \{\"temp\\_01\": 29\} or \{\"\\_temp\\_01\": 29\} are valid, but
#' \{\"temp-01\": 29\}, \{\"01\\_temp\": 29\} or \{\"\\_\\_temp\\_01\": 29\} are
#' invalid in message payloads.
#'
#' @section Request syntax:
#' ```
#' svc$batch_put_message(
#' channelName = "string",
#' messages = list(
#' list(
#' messageId = "string",
#' payload = raw
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_batch_put_message
iotanalytics_batch_put_message <- function(channelName, messages) {
op <- new_operation(
name = "BatchPutMessage",
http_method = "POST",
http_path = "/messages/batch",
paginator = list()
)
input <- .iotanalytics$batch_put_message_input(channelName = channelName, messages = messages)
output <- .iotanalytics$batch_put_message_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$batch_put_message <- iotanalytics_batch_put_message
#' Cancels the reprocessing of data through the pipeline
#'
#' Cancels the reprocessing of data through the pipeline.
#'
#' @usage
#' iotanalytics_cancel_pipeline_reprocessing(pipelineName, reprocessingId)
#'
#' @param pipelineName [required] The name of pipeline for which data reprocessing is canceled.
#' @param reprocessingId [required] The ID of the reprocessing task (returned by
#' \"StartPipelineReprocessing\").
#'
#' @section Request syntax:
#' ```
#' svc$cancel_pipeline_reprocessing(
#' pipelineName = "string",
#' reprocessingId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_cancel_pipeline_reprocessing
iotanalytics_cancel_pipeline_reprocessing <- function(pipelineName, reprocessingId) {
op <- new_operation(
name = "CancelPipelineReprocessing",
http_method = "DELETE",
http_path = "/pipelines/{pipelineName}/reprocessing/{reprocessingId}",
paginator = list()
)
input <- .iotanalytics$cancel_pipeline_reprocessing_input(pipelineName = pipelineName, reprocessingId = reprocessingId)
output <- .iotanalytics$cancel_pipeline_reprocessing_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$cancel_pipeline_reprocessing <- iotanalytics_cancel_pipeline_reprocessing
#' Creates a channel
#'
#' Creates a channel. A channel collects data from an MQTT topic and
#' archives the raw, unprocessed messages before publishing the data to a
#' pipeline.
#'
#' @usage
#' iotanalytics_create_channel(channelName, channelStorage,
#' retentionPeriod, tags)
#'
#' @param channelName [required] The name of the channel.
#' @param channelStorage Where channel data is stored. You may choose one of \"serviceManagedS3\"
#' or \"customerManagedS3\" storage. If not specified, the default is
#' \"serviceManagedS3\". This cannot be changed after creation of the
#' channel.
#' @param retentionPeriod How long, in days, message data is kept for the channel. When
#' \"customerManagedS3\" storage is selected, this parameter is ignored.
#' @param tags Metadata which can be used to manage the channel.
#'
#' @section Request syntax:
#' ```
#' svc$create_channel(
#' channelName = "string",
#' channelStorage = list(
#' serviceManagedS3 = list(),
#' customerManagedS3 = list(
#' bucket = "string",
#' keyPrefix = "string",
#' roleArn = "string"
#' )
#' ),
#' retentionPeriod = list(
#' unlimited = TRUE|FALSE,
#' numberOfDays = 123
#' ),
#' tags = list(
#' list(
#' key = "string",
#' value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_create_channel
iotanalytics_create_channel <- function(channelName, channelStorage = NULL, retentionPeriod = NULL, tags = NULL) {
op <- new_operation(
name = "CreateChannel",
http_method = "POST",
http_path = "/channels",
paginator = list()
)
input <- .iotanalytics$create_channel_input(channelName = channelName, channelStorage = channelStorage, retentionPeriod = retentionPeriod, tags = tags)
output <- .iotanalytics$create_channel_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$create_channel <- iotanalytics_create_channel
#' Creates a data set
#'
#' Creates a data set. A data set stores data retrieved from a data store
#' by applying a \"queryAction\" (a SQL query) or a \"containerAction\"
#' (executing a containerized application). This operation creates the
#' skeleton of a data set. The data set can be populated manually by
#' calling \"CreateDatasetContent\" or automatically according to a
#' \"trigger\" you specify.
#'
#' @usage
#' iotanalytics_create_dataset(datasetName, actions, triggers,
#' contentDeliveryRules, retentionPeriod, versioningConfiguration, tags)
#'
#' @param datasetName [required] The name of the data set.
#' @param actions [required] A list of actions that create the data set contents.
#' @param triggers A list of triggers. A trigger causes data set contents to be populated
#' at a specified time interval or when another data set\'s contents are
#' created. The list of triggers can be empty or contain up to five
#' **DataSetTrigger** objects.
#' @param contentDeliveryRules When data set contents are created they are delivered to destinations
#' specified here.
#' @param retentionPeriod \[Optional\] How long, in days, versions of data set contents are kept
#' for the data set. If not specified or set to null, versions of data set
#' contents are retained for at most 90 days. The number of versions of
#' data set contents retained is determined by the
#' `versioningConfiguration` parameter. (For more information, see
#' https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html\\#aws-iot-analytics-dataset-versions)
#' @param versioningConfiguration \[Optional\] How many versions of data set contents are kept. If not
#' specified or set to null, only the latest version plus the latest
#' succeeded version (if they are different) are kept for the time period
#' specified by the \"retentionPeriod\" parameter. (For more information,
#' see
#' https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html\\#aws-iot-analytics-dataset-versions)
#' @param tags Metadata which can be used to manage the data set.
#'
#' @section Request syntax:
#' ```
#' svc$create_dataset(
#' datasetName = "string",
#' actions = list(
#' list(
#' actionName = "string",
#' queryAction = list(
#' sqlQuery = "string",
#' filters = list(
#' list(
#' deltaTime = list(
#' offsetSeconds = 123,
#' timeExpression = "string"
#' )
#' )
#' )
#' ),
#' containerAction = list(
#' image = "string",
#' executionRoleArn = "string",
#' resourceConfiguration = list(
#' computeType = "ACU_1"|"ACU_2",
#' volumeSizeInGB = 123
#' ),
#' variables = list(
#' list(
#' name = "string",
#' stringValue = "string",
#' doubleValue = 123.0,
#' datasetContentVersionValue = list(
#' datasetName = "string"
#' ),
#' outputFileUriValue = list(
#' fileName = "string"
#' )
#' )
#' )
#' )
#' )
#' ),
#' triggers = list(
#' list(
#' schedule = list(
#' expression = "string"
#' ),
#' dataset = list(
#' name = "string"
#' )
#' )
#' ),
#' contentDeliveryRules = list(
#' list(
#' entryName = "string",
#' destination = list(
#' iotEventsDestinationConfiguration = list(
#' inputName = "string",
#' roleArn = "string"
#' ),
#' s3DestinationConfiguration = list(
#' bucket = "string",
#' key = "string",
#' glueConfiguration = list(
#' tableName = "string",
#' databaseName = "string"
#' ),
#' roleArn = "string"
#' )
#' )
#' )
#' ),
#' retentionPeriod = list(
#' unlimited = TRUE|FALSE,
#' numberOfDays = 123
#' ),
#' versioningConfiguration = list(
#' unlimited = TRUE|FALSE,
#' maxVersions = 123
#' ),
#' tags = list(
#' list(
#' key = "string",
#' value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_create_dataset
iotanalytics_create_dataset <- function(datasetName, actions, triggers = NULL, contentDeliveryRules = NULL, retentionPeriod = NULL, versioningConfiguration = NULL, tags = NULL) {
op <- new_operation(
name = "CreateDataset",
http_method = "POST",
http_path = "/datasets",
paginator = list()
)
input <- .iotanalytics$create_dataset_input(datasetName = datasetName, actions = actions, triggers = triggers, contentDeliveryRules = contentDeliveryRules, retentionPeriod = retentionPeriod, versioningConfiguration = versioningConfiguration, tags = tags)
output <- .iotanalytics$create_dataset_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$create_dataset <- iotanalytics_create_dataset
#' Creates the content of a data set by applying a "queryAction" (a SQL
#' query) or a "containerAction" (executing a containerized application)
#'
#' Creates the content of a data set by applying a \"queryAction\" (a SQL
#' query) or a \"containerAction\" (executing a containerized application).
#'
#' @usage
#' iotanalytics_create_dataset_content(datasetName)
#'
#' @param datasetName [required] The name of the data set.
#'
#' @section Request syntax:
#' ```
#' svc$create_dataset_content(
#' datasetName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_create_dataset_content
iotanalytics_create_dataset_content <- function(datasetName) {
op <- new_operation(
name = "CreateDatasetContent",
http_method = "POST",
http_path = "/datasets/{datasetName}/content",
paginator = list()
)
input <- .iotanalytics$create_dataset_content_input(datasetName = datasetName)
output <- .iotanalytics$create_dataset_content_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$create_dataset_content <- iotanalytics_create_dataset_content
#' Creates a data store, which is a repository for messages
#'
#' Creates a data store, which is a repository for messages.
#'
#' @usage
#' iotanalytics_create_datastore(datastoreName, datastoreStorage,
#' retentionPeriod, tags)
#'
#' @param datastoreName [required] The name of the data store.
#' @param datastoreStorage Where data store data is stored. You may choose one of
#' \"serviceManagedS3\" or \"customerManagedS3\" storage. If not specified,
#' the default is \"serviceManagedS3\". This cannot be changed after the
#' data store is created.
#' @param retentionPeriod How long, in days, message data is kept for the data store. When
#' \"customerManagedS3\" storage is selected, this parameter is ignored.
#' @param tags Metadata which can be used to manage the data store.
#'
#' @section Request syntax:
#' ```
#' svc$create_datastore(
#' datastoreName = "string",
#' datastoreStorage = list(
#' serviceManagedS3 = list(),
#' customerManagedS3 = list(
#' bucket = "string",
#' keyPrefix = "string",
#' roleArn = "string"
#' )
#' ),
#' retentionPeriod = list(
#' unlimited = TRUE|FALSE,
#' numberOfDays = 123
#' ),
#' tags = list(
#' list(
#' key = "string",
#' value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_create_datastore
iotanalytics_create_datastore <- function(datastoreName, datastoreStorage = NULL, retentionPeriod = NULL, tags = NULL) {
op <- new_operation(
name = "CreateDatastore",
http_method = "POST",
http_path = "/datastores",
paginator = list()
)
input <- .iotanalytics$create_datastore_input(datastoreName = datastoreName, datastoreStorage = datastoreStorage, retentionPeriod = retentionPeriod, tags = tags)
output <- .iotanalytics$create_datastore_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$create_datastore <- iotanalytics_create_datastore
#' Creates a pipeline
#'
#' Creates a pipeline. A pipeline consumes messages from a channel and
#' allows you to process the messages before storing them in a data store.
#' You must specify both a `channel` and a `datastore` activity and,
#' optionally, as many as 23 additional activities in the
#' `pipelineActivities` array.
#'
#' @usage
#' iotanalytics_create_pipeline(pipelineName, pipelineActivities, tags)
#'
#' @param pipelineName [required] The name of the pipeline.
#' @param pipelineActivities [required] A list of \"PipelineActivity\" objects. Activities perform
#' transformations on your messages, such as removing, renaming or adding
#' message attributes; filtering messages based on attribute values;
#' invoking your Lambda functions on messages for advanced processing; or
#' performing mathematical transformations to normalize device data.
#'
#' The list can be 2-25 **PipelineActivity** objects and must contain both
#' a `channel` and a `datastore` activity. Each entry in the list must
#' contain only one activity, for example:
#'
#' `pipelineActivities = \\[ \{ "channel": \{ ... \} \}, \{ "lambda": \{ ... \} \}, ... \\]`
#' @param tags Metadata which can be used to manage the pipeline.
#'
#' @section Request syntax:
#' ```
#' svc$create_pipeline(
#' pipelineName = "string",
#' pipelineActivities = list(
#' list(
#' channel = list(
#' name = "string",
#' channelName = "string",
#' next = "string"
#' ),
#' lambda = list(
#' name = "string",
#' lambdaName = "string",
#' batchSize = 123,
#' next = "string"
#' ),
#' datastore = list(
#' name = "string",
#' datastoreName = "string"
#' ),
#' addAttributes = list(
#' name = "string",
#' attributes = list(
#' "string"
#' ),
#' next = "string"
#' ),
#' removeAttributes = list(
#' name = "string",
#' attributes = list(
#' "string"
#' ),
#' next = "string"
#' ),
#' selectAttributes = list(
#' name = "string",
#' attributes = list(
#' "string"
#' ),
#' next = "string"
#' ),
#' filter = list(
#' name = "string",
#' filter = "string",
#' next = "string"
#' ),
#' math = list(
#' name = "string",
#' attribute = "string",
#' math = "string",
#' next = "string"
#' ),
#' deviceRegistryEnrich = list(
#' name = "string",
#' attribute = "string",
#' thingName = "string",
#' roleArn = "string",
#' next = "string"
#' ),
#' deviceShadowEnrich = list(
#' name = "string",
#' attribute = "string",
#' thingName = "string",
#' roleArn = "string",
#' next = "string"
#' )
#' )
#' ),
#' tags = list(
#' list(
#' key = "string",
#' value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_create_pipeline
iotanalytics_create_pipeline <- function(pipelineName, pipelineActivities, tags = NULL) {
op <- new_operation(
name = "CreatePipeline",
http_method = "POST",
http_path = "/pipelines",
paginator = list()
)
input <- .iotanalytics$create_pipeline_input(pipelineName = pipelineName, pipelineActivities = pipelineActivities, tags = tags)
output <- .iotanalytics$create_pipeline_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$create_pipeline <- iotanalytics_create_pipeline
#' Deletes the specified channel
#'
#' Deletes the specified channel.
#'
#' @usage
#' iotanalytics_delete_channel(channelName)
#'
#' @param channelName [required] The name of the channel to delete.
#'
#' @section Request syntax:
#' ```
#' svc$delete_channel(
#' channelName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_delete_channel
iotanalytics_delete_channel <- function(channelName) {
op <- new_operation(
name = "DeleteChannel",
http_method = "DELETE",
http_path = "/channels/{channelName}",
paginator = list()
)
input <- .iotanalytics$delete_channel_input(channelName = channelName)
output <- .iotanalytics$delete_channel_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$delete_channel <- iotanalytics_delete_channel
#' Deletes the specified data set
#'
#' Deletes the specified data set.
#'
#' You do not have to delete the content of the data set before you perform
#' this operation.
#'
#' @usage
#' iotanalytics_delete_dataset(datasetName)
#'
#' @param datasetName [required] The name of the data set to delete.
#'
#' @section Request syntax:
#' ```
#' svc$delete_dataset(
#' datasetName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_delete_dataset
iotanalytics_delete_dataset <- function(datasetName) {
op <- new_operation(
name = "DeleteDataset",
http_method = "DELETE",
http_path = "/datasets/{datasetName}",
paginator = list()
)
input <- .iotanalytics$delete_dataset_input(datasetName = datasetName)
output <- .iotanalytics$delete_dataset_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$delete_dataset <- iotanalytics_delete_dataset
#' Deletes the content of the specified data set
#'
#' Deletes the content of the specified data set.
#'
#' @usage
#' iotanalytics_delete_dataset_content(datasetName, versionId)
#'
#' @param datasetName [required] The name of the data set whose content is deleted.
#' @param versionId The version of the data set whose content is deleted. You can also use
#' the strings \"\\$LATEST\" or \"\\$LATEST\\_SUCCEEDED\" to delete the latest
#' or latest successfully completed data set. If not specified,
#' \"\\$LATEST\\_SUCCEEDED\" is the default.
#'
#' @section Request syntax:
#' ```
#' svc$delete_dataset_content(
#' datasetName = "string",
#' versionId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_delete_dataset_content
iotanalytics_delete_dataset_content <- function(datasetName, versionId = NULL) {
op <- new_operation(
name = "DeleteDatasetContent",
http_method = "DELETE",
http_path = "/datasets/{datasetName}/content",
paginator = list()
)
input <- .iotanalytics$delete_dataset_content_input(datasetName = datasetName, versionId = versionId)
output <- .iotanalytics$delete_dataset_content_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$delete_dataset_content <- iotanalytics_delete_dataset_content
#' Deletes the specified data store
#'
#' Deletes the specified data store.
#'
#' @usage
#' iotanalytics_delete_datastore(datastoreName)
#'
#' @param datastoreName [required] The name of the data store to delete.
#'
#' @section Request syntax:
#' ```
#' svc$delete_datastore(
#' datastoreName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_delete_datastore
iotanalytics_delete_datastore <- function(datastoreName) {
op <- new_operation(
name = "DeleteDatastore",
http_method = "DELETE",
http_path = "/datastores/{datastoreName}",
paginator = list()
)
input <- .iotanalytics$delete_datastore_input(datastoreName = datastoreName)
output <- .iotanalytics$delete_datastore_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$delete_datastore <- iotanalytics_delete_datastore
#' Deletes the specified pipeline
#'
#' Deletes the specified pipeline.
#'
#' @usage
#' iotanalytics_delete_pipeline(pipelineName)
#'
#' @param pipelineName [required] The name of the pipeline to delete.
#'
#' @section Request syntax:
#' ```
#' svc$delete_pipeline(
#' pipelineName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_delete_pipeline
iotanalytics_delete_pipeline <- function(pipelineName) {
op <- new_operation(
name = "DeletePipeline",
http_method = "DELETE",
http_path = "/pipelines/{pipelineName}",
paginator = list()
)
input <- .iotanalytics$delete_pipeline_input(pipelineName = pipelineName)
output <- .iotanalytics$delete_pipeline_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$delete_pipeline <- iotanalytics_delete_pipeline
#' Retrieves information about a channel
#'
#' Retrieves information about a channel.
#'
#' @usage
#' iotanalytics_describe_channel(channelName, includeStatistics)
#'
#' @param channelName [required] The name of the channel whose information is retrieved.
#' @param includeStatistics If true, additional statistical information about the channel is
#' included in the response. This feature cannot be used with a channel
#' whose S3 storage is customer-managed.
#'
#' @section Request syntax:
#' ```
#' svc$describe_channel(
#' channelName = "string",
#' includeStatistics = TRUE|FALSE
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_describe_channel
iotanalytics_describe_channel <- function(channelName, includeStatistics = NULL) {
op <- new_operation(
name = "DescribeChannel",
http_method = "GET",
http_path = "/channels/{channelName}",
paginator = list()
)
input <- .iotanalytics$describe_channel_input(channelName = channelName, includeStatistics = includeStatistics)
output <- .iotanalytics$describe_channel_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$describe_channel <- iotanalytics_describe_channel
#' Retrieves information about a data set
#'
#' Retrieves information about a data set.
#'
#' @usage
#' iotanalytics_describe_dataset(datasetName)
#'
#' @param datasetName [required] The name of the data set whose information is retrieved.
#'
#' @section Request syntax:
#' ```
#' svc$describe_dataset(
#' datasetName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_describe_dataset
iotanalytics_describe_dataset <- function(datasetName) {
op <- new_operation(
name = "DescribeDataset",
http_method = "GET",
http_path = "/datasets/{datasetName}",
paginator = list()
)
input <- .iotanalytics$describe_dataset_input(datasetName = datasetName)
output <- .iotanalytics$describe_dataset_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$describe_dataset <- iotanalytics_describe_dataset
#' Retrieves information about a data store
#'
#' Retrieves information about a data store.
#'
#' @usage
#' iotanalytics_describe_datastore(datastoreName, includeStatistics)
#'
#' @param datastoreName [required] The name of the data store
#' @param includeStatistics If true, additional statistical information about the data store is
#' included in the response. This feature cannot be used with a data store
#' whose S3 storage is customer-managed.
#'
#' @section Request syntax:
#' ```
#' svc$describe_datastore(
#' datastoreName = "string",
#' includeStatistics = TRUE|FALSE
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_describe_datastore
iotanalytics_describe_datastore <- function(datastoreName, includeStatistics = NULL) {
op <- new_operation(
name = "DescribeDatastore",
http_method = "GET",
http_path = "/datastores/{datastoreName}",
paginator = list()
)
input <- .iotanalytics$describe_datastore_input(datastoreName = datastoreName, includeStatistics = includeStatistics)
output <- .iotanalytics$describe_datastore_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$describe_datastore <- iotanalytics_describe_datastore
#' Retrieves the current settings of the AWS IoT Analytics logging options
#'
#' Retrieves the current settings of the AWS IoT Analytics logging options.
#'
#' @usage
#' iotanalytics_describe_logging_options()
#'
#' @section Request syntax:
#' ```
#' svc$describe_logging_options()
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_describe_logging_options
iotanalytics_describe_logging_options <- function() {
op <- new_operation(
name = "DescribeLoggingOptions",
http_method = "GET",
http_path = "/logging",
paginator = list()
)
input <- .iotanalytics$describe_logging_options_input()
output <- .iotanalytics$describe_logging_options_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$describe_logging_options <- iotanalytics_describe_logging_options
#' Retrieves information about a pipeline
#'
#' Retrieves information about a pipeline.
#'
#' @usage
#' iotanalytics_describe_pipeline(pipelineName)
#'
#' @param pipelineName [required] The name of the pipeline whose information is retrieved.
#'
#' @section Request syntax:
#' ```
#' svc$describe_pipeline(
#' pipelineName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_describe_pipeline
iotanalytics_describe_pipeline <- function(pipelineName) {
op <- new_operation(
name = "DescribePipeline",
http_method = "GET",
http_path = "/pipelines/{pipelineName}",
paginator = list()
)
input <- .iotanalytics$describe_pipeline_input(pipelineName = pipelineName)
output <- .iotanalytics$describe_pipeline_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$describe_pipeline <- iotanalytics_describe_pipeline
#' Retrieves the contents of a data set as pre-signed URIs
#'
#' Retrieves the contents of a data set as pre-signed URIs.
#'
#' @usage
#' iotanalytics_get_dataset_content(datasetName, versionId)
#'
#' @param datasetName [required] The name of the data set whose contents are retrieved.
#' @param versionId The version of the data set whose contents are retrieved. You can also
#' use the strings \"\\$LATEST\" or \"\\$LATEST\\_SUCCEEDED\" to retrieve the
#' contents of the latest or latest successfully completed data set. If not
#' specified, \"\\$LATEST\\_SUCCEEDED\" is the default.
#'
#' @section Request syntax:
#' ```
#' svc$get_dataset_content(
#' datasetName = "string",
#' versionId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_get_dataset_content
iotanalytics_get_dataset_content <- function(datasetName, versionId = NULL) {
op <- new_operation(
name = "GetDatasetContent",
http_method = "GET",
http_path = "/datasets/{datasetName}/content",
paginator = list()
)
input <- .iotanalytics$get_dataset_content_input(datasetName = datasetName, versionId = versionId)
output <- .iotanalytics$get_dataset_content_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$get_dataset_content <- iotanalytics_get_dataset_content
#' Retrieves a list of channels
#'
#' Retrieves a list of channels.
#'
#' @usage
#' iotanalytics_list_channels(nextToken, maxResults)
#'
#' @param nextToken The token for the next set of results.
#' @param maxResults The maximum number of results to return in this request.
#'
#' The default value is 100.
#'
#' @section Request syntax:
#' ```
#' svc$list_channels(
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_list_channels
iotanalytics_list_channels <- function(nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "ListChannels",
http_method = "GET",
http_path = "/channels",
paginator = list()
)
input <- .iotanalytics$list_channels_input(nextToken = nextToken, maxResults = maxResults)
output <- .iotanalytics$list_channels_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$list_channels <- iotanalytics_list_channels
#' Lists information about data set contents that have been created
#'
#' Lists information about data set contents that have been created.
#'
#' @usage
#' iotanalytics_list_dataset_contents(datasetName, nextToken, maxResults,
#' scheduledOnOrAfter, scheduledBefore)
#'
#' @param datasetName [required] The name of the data set whose contents information you want to list.
#' @param nextToken The token for the next set of results.
#' @param maxResults The maximum number of results to return in this request.
#' @param scheduledOnOrAfter A filter to limit results to those data set contents whose creation is
#' scheduled on or after the given time. See the field `triggers.schedule`
#' in the CreateDataset request. (timestamp)
#' @param scheduledBefore A filter to limit results to those data set contents whose creation is
#' scheduled before the given time. See the field `triggers.schedule` in
#' the CreateDataset request. (timestamp)
#'
#' @section Request syntax:
#' ```
#' svc$list_dataset_contents(
#' datasetName = "string",
#' nextToken = "string",
#' maxResults = 123,
#' scheduledOnOrAfter = as.POSIXct(
#' "2015-01-01"
#' ),
#' scheduledBefore = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_list_dataset_contents
iotanalytics_list_dataset_contents <- function(datasetName, nextToken = NULL, maxResults = NULL, scheduledOnOrAfter = NULL, scheduledBefore = NULL) {
op <- new_operation(
name = "ListDatasetContents",
http_method = "GET",
http_path = "/datasets/{datasetName}/contents",
paginator = list()
)
input <- .iotanalytics$list_dataset_contents_input(datasetName = datasetName, nextToken = nextToken, maxResults = maxResults, scheduledOnOrAfter = scheduledOnOrAfter, scheduledBefore = scheduledBefore)
output <- .iotanalytics$list_dataset_contents_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$list_dataset_contents <- iotanalytics_list_dataset_contents
#' Retrieves information about data sets
#'
#' Retrieves information about data sets.
#'
#' @usage
#' iotanalytics_list_datasets(nextToken, maxResults)
#'
#' @param nextToken The token for the next set of results.
#' @param maxResults The maximum number of results to return in this request.
#'
#' The default value is 100.
#'
#' @section Request syntax:
#' ```
#' svc$list_datasets(
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_list_datasets
iotanalytics_list_datasets <- function(nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "ListDatasets",
http_method = "GET",
http_path = "/datasets",
paginator = list()
)
input <- .iotanalytics$list_datasets_input(nextToken = nextToken, maxResults = maxResults)
output <- .iotanalytics$list_datasets_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$list_datasets <- iotanalytics_list_datasets
#' Retrieves a list of data stores
#'
#' Retrieves a list of data stores.
#'
#' @usage
#' iotanalytics_list_datastores(nextToken, maxResults)
#'
#' @param nextToken The token for the next set of results.
#' @param maxResults The maximum number of results to return in this request.
#'
#' The default value is 100.
#'
#' @section Request syntax:
#' ```
#' svc$list_datastores(
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_list_datastores
iotanalytics_list_datastores <- function(nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "ListDatastores",
http_method = "GET",
http_path = "/datastores",
paginator = list()
)
input <- .iotanalytics$list_datastores_input(nextToken = nextToken, maxResults = maxResults)
output <- .iotanalytics$list_datastores_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$list_datastores <- iotanalytics_list_datastores
#' Retrieves a list of pipelines
#'
#' Retrieves a list of pipelines.
#'
#' @usage
#' iotanalytics_list_pipelines(nextToken, maxResults)
#'
#' @param nextToken The token for the next set of results.
#' @param maxResults The maximum number of results to return in this request.
#'
#' The default value is 100.
#'
#' @section Request syntax:
#' ```
#' svc$list_pipelines(
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_list_pipelines
iotanalytics_list_pipelines <- function(nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "ListPipelines",
http_method = "GET",
http_path = "/pipelines",
paginator = list()
)
input <- .iotanalytics$list_pipelines_input(nextToken = nextToken, maxResults = maxResults)
output <- .iotanalytics$list_pipelines_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$list_pipelines <- iotanalytics_list_pipelines
#' Lists the tags (metadata) which you have assigned to the resource
#'
#' Lists the tags (metadata) which you have assigned to the resource.
#'
#' @usage
#' iotanalytics_list_tags_for_resource(resourceArn)
#'
#' @param resourceArn [required] The ARN of the resource whose tags you want to list.
#'
#' @section Request syntax:
#' ```
#' svc$list_tags_for_resource(
#' resourceArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_list_tags_for_resource
iotanalytics_list_tags_for_resource <- function(resourceArn) {
op <- new_operation(
name = "ListTagsForResource",
http_method = "GET",
http_path = "/tags",
paginator = list()
)
input <- .iotanalytics$list_tags_for_resource_input(resourceArn = resourceArn)
output <- .iotanalytics$list_tags_for_resource_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$list_tags_for_resource <- iotanalytics_list_tags_for_resource
#' Sets or updates the AWS IoT Analytics logging options
#'
#' Sets or updates the AWS IoT Analytics logging options.
#'
#' Note that if you update the value of any `loggingOptions` field, it
#' takes up to one minute for the change to take effect. Also, if you
#' change the policy attached to the role you specified in the roleArn
#' field (for example, to correct an invalid policy) it takes up to 5
#' minutes for that change to take effect.
#'
#' @usage
#' iotanalytics_put_logging_options(loggingOptions)
#'
#' @param loggingOptions [required] The new values of the AWS IoT Analytics logging options.
#'
#' @section Request syntax:
#' ```
#' svc$put_logging_options(
#' loggingOptions = list(
#' roleArn = "string",
#' level = "ERROR",
#' enabled = TRUE|FALSE
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_put_logging_options
iotanalytics_put_logging_options <- function(loggingOptions) {
op <- new_operation(
name = "PutLoggingOptions",
http_method = "PUT",
http_path = "/logging",
paginator = list()
)
input <- .iotanalytics$put_logging_options_input(loggingOptions = loggingOptions)
output <- .iotanalytics$put_logging_options_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$put_logging_options <- iotanalytics_put_logging_options
#' Simulates the results of running a pipeline activity on a message
#' payload
#'
#' Simulates the results of running a pipeline activity on a message
#' payload.
#'
#' @usage
#' iotanalytics_run_pipeline_activity(pipelineActivity, payloads)
#'
#' @param pipelineActivity [required] The pipeline activity that is run. This must not be a \'channel\'
#' activity or a \'datastore\' activity because these activities are used
#' in a pipeline only to load the original message and to store the
#' (possibly) transformed message. If a \'lambda\' activity is specified,
#' only short-running Lambda functions (those with a timeout of less than
#' 30 seconds or less) can be used.
#' @param payloads [required] The sample message payloads on which the pipeline activity is run.
#'
#' @section Request syntax:
#' ```
#' svc$run_pipeline_activity(
#' pipelineActivity = list(
#' channel = list(
#' name = "string",
#' channelName = "string",
#' next = "string"
#' ),
#' lambda = list(
#' name = "string",
#' lambdaName = "string",
#' batchSize = 123,
#' next = "string"
#' ),
#' datastore = list(
#' name = "string",
#' datastoreName = "string"
#' ),
#' addAttributes = list(
#' name = "string",
#' attributes = list(
#' "string"
#' ),
#' next = "string"
#' ),
#' removeAttributes = list(
#' name = "string",
#' attributes = list(
#' "string"
#' ),
#' next = "string"
#' ),
#' selectAttributes = list(
#' name = "string",
#' attributes = list(
#' "string"
#' ),
#' next = "string"
#' ),
#' filter = list(
#' name = "string",
#' filter = "string",
#' next = "string"
#' ),
#' math = list(
#' name = "string",
#' attribute = "string",
#' math = "string",
#' next = "string"
#' ),
#' deviceRegistryEnrich = list(
#' name = "string",
#' attribute = "string",
#' thingName = "string",
#' roleArn = "string",
#' next = "string"
#' ),
#' deviceShadowEnrich = list(
#' name = "string",
#' attribute = "string",
#' thingName = "string",
#' roleArn = "string",
#' next = "string"
#' )
#' ),
#' payloads = list(
#' raw
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_run_pipeline_activity
iotanalytics_run_pipeline_activity <- function(pipelineActivity, payloads) {
op <- new_operation(
name = "RunPipelineActivity",
http_method = "POST",
http_path = "/pipelineactivities/run",
paginator = list()
)
input <- .iotanalytics$run_pipeline_activity_input(pipelineActivity = pipelineActivity, payloads = payloads)
output <- .iotanalytics$run_pipeline_activity_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$run_pipeline_activity <- iotanalytics_run_pipeline_activity
#' Retrieves a sample of messages from the specified channel ingested
#' during the specified timeframe
#'
#' Retrieves a sample of messages from the specified channel ingested
#' during the specified timeframe. Up to 10 messages can be retrieved.
#'
#' @usage
#' iotanalytics_sample_channel_data(channelName, maxMessages, startTime,
#' endTime)
#'
#' @param channelName [required] The name of the channel whose message samples are retrieved.
#' @param maxMessages The number of sample messages to be retrieved. The limit is 10, the
#' default is also 10.
#' @param startTime The start of the time window from which sample messages are retrieved.
#' @param endTime The end of the time window from which sample messages are retrieved.
#'
#' @section Request syntax:
#' ```
#' svc$sample_channel_data(
#' channelName = "string",
#' maxMessages = 123,
#' startTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' endTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_sample_channel_data
iotanalytics_sample_channel_data <- function(channelName, maxMessages = NULL, startTime = NULL, endTime = NULL) {
op <- new_operation(
name = "SampleChannelData",
http_method = "GET",
http_path = "/channels/{channelName}/sample",
paginator = list()
)
input <- .iotanalytics$sample_channel_data_input(channelName = channelName, maxMessages = maxMessages, startTime = startTime, endTime = endTime)
output <- .iotanalytics$sample_channel_data_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$sample_channel_data <- iotanalytics_sample_channel_data
#' Starts the reprocessing of raw message data through the pipeline
#'
#' Starts the reprocessing of raw message data through the pipeline.
#'
#' @usage
#' iotanalytics_start_pipeline_reprocessing(pipelineName, startTime,
#' endTime)
#'
#' @param pipelineName [required] The name of the pipeline on which to start reprocessing.
#' @param startTime The start time (inclusive) of raw message data that is reprocessed.
#' @param endTime The end time (exclusive) of raw message data that is reprocessed.
#'
#' @section Request syntax:
#' ```
#' svc$start_pipeline_reprocessing(
#' pipelineName = "string",
#' startTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' endTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_start_pipeline_reprocessing
iotanalytics_start_pipeline_reprocessing <- function(pipelineName, startTime = NULL, endTime = NULL) {
op <- new_operation(
name = "StartPipelineReprocessing",
http_method = "POST",
http_path = "/pipelines/{pipelineName}/reprocessing",
paginator = list()
)
input <- .iotanalytics$start_pipeline_reprocessing_input(pipelineName = pipelineName, startTime = startTime, endTime = endTime)
output <- .iotanalytics$start_pipeline_reprocessing_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$start_pipeline_reprocessing <- iotanalytics_start_pipeline_reprocessing
#' Adds to or modifies the tags of the given resource
#'
#' Adds to or modifies the tags of the given resource. Tags are metadata
#' which can be used to manage a resource.
#'
#' @usage
#' iotanalytics_tag_resource(resourceArn, tags)
#'
#' @param resourceArn [required] The ARN of the resource whose tags you want to modify.
#' @param tags [required] The new or modified tags for the resource.
#'
#' @section Request syntax:
#' ```
#' svc$tag_resource(
#' resourceArn = "string",
#' tags = list(
#' list(
#' key = "string",
#' value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_tag_resource
iotanalytics_tag_resource <- function(resourceArn, tags) {
op <- new_operation(
name = "TagResource",
http_method = "POST",
http_path = "/tags",
paginator = list()
)
input <- .iotanalytics$tag_resource_input(resourceArn = resourceArn, tags = tags)
output <- .iotanalytics$tag_resource_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$tag_resource <- iotanalytics_tag_resource
#' Removes the given tags (metadata) from the resource
#'
#' Removes the given tags (metadata) from the resource.
#'
#' @usage
#' iotanalytics_untag_resource(resourceArn, tagKeys)
#'
#' @param resourceArn [required] The ARN of the resource whose tags you want to remove.
#' @param tagKeys [required] The keys of those tags which you want to remove.
#'
#' @section Request syntax:
#' ```
#' svc$untag_resource(
#' resourceArn = "string",
#' tagKeys = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_untag_resource
iotanalytics_untag_resource <- function(resourceArn, tagKeys) {
op <- new_operation(
name = "UntagResource",
http_method = "DELETE",
http_path = "/tags",
paginator = list()
)
input <- .iotanalytics$untag_resource_input(resourceArn = resourceArn, tagKeys = tagKeys)
output <- .iotanalytics$untag_resource_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$untag_resource <- iotanalytics_untag_resource
#' Updates the settings of a channel
#'
#' Updates the settings of a channel.
#'
#' @usage
#' iotanalytics_update_channel(channelName, channelStorage,
#' retentionPeriod)
#'
#' @param channelName [required] The name of the channel to be updated.
#' @param channelStorage Where channel data is stored. You may choose one of \"serviceManagedS3\"
#' or \"customerManagedS3\" storage. If not specified, the default is
#' \"serviceManagedS3\". This cannot be changed after creation of the
#' channel.
#' @param retentionPeriod How long, in days, message data is kept for the channel. The retention
#' period cannot be updated if the channel\'s S3 storage is
#' customer-managed.
#'
#' @section Request syntax:
#' ```
#' svc$update_channel(
#' channelName = "string",
#' channelStorage = list(
#' serviceManagedS3 = list(),
#' customerManagedS3 = list(
#' bucket = "string",
#' keyPrefix = "string",
#' roleArn = "string"
#' )
#' ),
#' retentionPeriod = list(
#' unlimited = TRUE|FALSE,
#' numberOfDays = 123
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_update_channel
iotanalytics_update_channel <- function(channelName, channelStorage = NULL, retentionPeriod = NULL) {
op <- new_operation(
name = "UpdateChannel",
http_method = "PUT",
http_path = "/channels/{channelName}",
paginator = list()
)
input <- .iotanalytics$update_channel_input(channelName = channelName, channelStorage = channelStorage, retentionPeriod = retentionPeriod)
output <- .iotanalytics$update_channel_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$update_channel <- iotanalytics_update_channel
#' Updates the settings of a data set
#'
#' Updates the settings of a data set.
#'
#' @usage
#' iotanalytics_update_dataset(datasetName, actions, triggers,
#' contentDeliveryRules, retentionPeriod, versioningConfiguration)
#'
#' @param datasetName [required] The name of the data set to update.
#' @param actions [required] A list of \"DatasetAction\" objects.
#' @param triggers A list of \"DatasetTrigger\" objects. The list can be empty or can
#' contain up to five **DataSetTrigger** objects.
#' @param contentDeliveryRules When data set contents are created they are delivered to destinations
#' specified here.
#' @param retentionPeriod How long, in days, data set contents are kept for the data set.
#' @param versioningConfiguration \[Optional\] How many versions of data set contents are kept. If not
#' specified or set to null, only the latest version plus the latest
#' succeeded version (if they are different) are kept for the time period
#' specified by the \"retentionPeriod\" parameter. (For more information,
#' see
#' https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html\\#aws-iot-analytics-dataset-versions)
#'
#' @section Request syntax:
#' ```
#' svc$update_dataset(
#' datasetName = "string",
#' actions = list(
#' list(
#' actionName = "string",
#' queryAction = list(
#' sqlQuery = "string",
#' filters = list(
#' list(
#' deltaTime = list(
#' offsetSeconds = 123,
#' timeExpression = "string"
#' )
#' )
#' )
#' ),
#' containerAction = list(
#' image = "string",
#' executionRoleArn = "string",
#' resourceConfiguration = list(
#' computeType = "ACU_1"|"ACU_2",
#' volumeSizeInGB = 123
#' ),
#' variables = list(
#' list(
#' name = "string",
#' stringValue = "string",
#' doubleValue = 123.0,
#' datasetContentVersionValue = list(
#' datasetName = "string"
#' ),
#' outputFileUriValue = list(
#' fileName = "string"
#' )
#' )
#' )
#' )
#' )
#' ),
#' triggers = list(
#' list(
#' schedule = list(
#' expression = "string"
#' ),
#' dataset = list(
#' name = "string"
#' )
#' )
#' ),
#' contentDeliveryRules = list(
#' list(
#' entryName = "string",
#' destination = list(
#' iotEventsDestinationConfiguration = list(
#' inputName = "string",
#' roleArn = "string"
#' ),
#' s3DestinationConfiguration = list(
#' bucket = "string",
#' key = "string",
#' glueConfiguration = list(
#' tableName = "string",
#' databaseName = "string"
#' ),
#' roleArn = "string"
#' )
#' )
#' )
#' ),
#' retentionPeriod = list(
#' unlimited = TRUE|FALSE,
#' numberOfDays = 123
#' ),
#' versioningConfiguration = list(
#' unlimited = TRUE|FALSE,
#' maxVersions = 123
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_update_dataset
iotanalytics_update_dataset <- function(datasetName, actions, triggers = NULL, contentDeliveryRules = NULL, retentionPeriod = NULL, versioningConfiguration = NULL) {
op <- new_operation(
name = "UpdateDataset",
http_method = "PUT",
http_path = "/datasets/{datasetName}",
paginator = list()
)
input <- .iotanalytics$update_dataset_input(datasetName = datasetName, actions = actions, triggers = triggers, contentDeliveryRules = contentDeliveryRules, retentionPeriod = retentionPeriod, versioningConfiguration = versioningConfiguration)
output <- .iotanalytics$update_dataset_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$update_dataset <- iotanalytics_update_dataset
#' Updates the settings of a data store
#'
#' Updates the settings of a data store.
#'
#' @usage
#' iotanalytics_update_datastore(datastoreName, retentionPeriod,
#' datastoreStorage)
#'
#' @param datastoreName [required] The name of the data store to be updated.
#' @param retentionPeriod How long, in days, message data is kept for the data store. The
#' retention period cannot be updated if the data store\'s S3 storage is
#' customer-managed.
#' @param datastoreStorage Where data store data is stored. You may choose one of
#' \"serviceManagedS3\" or \"customerManagedS3\" storage. If not specified,
#' the default is \"serviceManagedS3\". This cannot be changed after the
#' data store is created.
#'
#' @section Request syntax:
#' ```
#' svc$update_datastore(
#' datastoreName = "string",
#' retentionPeriod = list(
#' unlimited = TRUE|FALSE,
#' numberOfDays = 123
#' ),
#' datastoreStorage = list(
#' serviceManagedS3 = list(),
#' customerManagedS3 = list(
#' bucket = "string",
#' keyPrefix = "string",
#' roleArn = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_update_datastore
iotanalytics_update_datastore <- function(datastoreName, retentionPeriod = NULL, datastoreStorage = NULL) {
op <- new_operation(
name = "UpdateDatastore",
http_method = "PUT",
http_path = "/datastores/{datastoreName}",
paginator = list()
)
input <- .iotanalytics$update_datastore_input(datastoreName = datastoreName, retentionPeriod = retentionPeriod, datastoreStorage = datastoreStorage)
output <- .iotanalytics$update_datastore_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$update_datastore <- iotanalytics_update_datastore
#' Updates the settings of a pipeline
#'
#' Updates the settings of a pipeline. You must specify both a `channel`
#' and a `datastore` activity and, optionally, as many as 23 additional
#' activities in the `pipelineActivities` array.
#'
#' @usage
#' iotanalytics_update_pipeline(pipelineName, pipelineActivities)
#'
#' @param pipelineName [required] The name of the pipeline to update.
#' @param pipelineActivities [required] A list of \"PipelineActivity\" objects. Activities perform
#' transformations on your messages, such as removing, renaming or adding
#' message attributes; filtering messages based on attribute values;
#' invoking your Lambda functions on messages for advanced processing; or
#' performing mathematical transformations to normalize device data.
#'
#' The list can be 2-25 **PipelineActivity** objects and must contain both
#' a `channel` and a `datastore` activity. Each entry in the list must
#' contain only one activity, for example:
#'
#' `pipelineActivities = \\[ \{ "channel": \{ ... \} \}, \{ "lambda": \{ ... \} \}, ... \\]`
#'
#' @section Request syntax:
#' ```
#' svc$update_pipeline(
#' pipelineName = "string",
#' pipelineActivities = list(
#' list(
#' channel = list(
#' name = "string",
#' channelName = "string",
#' next = "string"
#' ),
#' lambda = list(
#' name = "string",
#' lambdaName = "string",
#' batchSize = 123,
#' next = "string"
#' ),
#' datastore = list(
#' name = "string",
#' datastoreName = "string"
#' ),
#' addAttributes = list(
#' name = "string",
#' attributes = list(
#' "string"
#' ),
#' next = "string"
#' ),
#' removeAttributes = list(
#' name = "string",
#' attributes = list(
#' "string"
#' ),
#' next = "string"
#' ),
#' selectAttributes = list(
#' name = "string",
#' attributes = list(
#' "string"
#' ),
#' next = "string"
#' ),
#' filter = list(
#' name = "string",
#' filter = "string",
#' next = "string"
#' ),
#' math = list(
#' name = "string",
#' attribute = "string",
#' math = "string",
#' next = "string"
#' ),
#' deviceRegistryEnrich = list(
#' name = "string",
#' attribute = "string",
#' thingName = "string",
#' roleArn = "string",
#' next = "string"
#' ),
#' deviceShadowEnrich = list(
#' name = "string",
#' attribute = "string",
#' thingName = "string",
#' roleArn = "string",
#' next = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_update_pipeline
iotanalytics_update_pipeline <- function(pipelineName, pipelineActivities) {
op <- new_operation(
name = "UpdatePipeline",
http_method = "PUT",
http_path = "/pipelines/{pipelineName}",
paginator = list()
)
input <- .iotanalytics$update_pipeline_input(pipelineName = pipelineName, pipelineActivities = pipelineActivities)
output <- .iotanalytics$update_pipeline_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$update_pipeline <- iotanalytics_update_pipeline
| /cran/paws.internet.of.things/R/iotanalytics_operations.R | permissive | johnnytommy/paws | R | false | false | 59,690 | r | # This file is generated by make.paws. Please do not edit here.
#' @importFrom paws.common get_config new_operation new_request send_request
#' @include iotanalytics_service.R
NULL
#' Sends messages to a channel
#'
#' Sends messages to a channel.
#'
#' @usage
#' iotanalytics_batch_put_message(channelName, messages)
#'
#' @param channelName [required] The name of the channel where the messages are sent.
#' @param messages [required] The list of messages to be sent. Each message has format: \'\{
#' \"messageId\": \"string\", \"payload\": \"string\"\}\'.
#'
#' Note that the field names of message payloads (data) that you send to
#' AWS IoT Analytics:
#'
#' - Must contain only alphanumeric characters and undescores (\\_); no
#' other special characters are allowed.
#'
#' - Must begin with an alphabetic character or single underscore (\\_).
#'
#' - Cannot contain hyphens (-).
#'
#' - In regular expression terms:
#' \"\\^\[A-Za-z\\_\](\[A-Za-z0-9\]*\\|\[A-Za-z0-9\]\[A-Za-z0-9\\_\]*)\\$\".
#'
#' - Cannot be greater than 255 characters.
#'
#' - Are case-insensitive. (Fields named \"foo\" and \"FOO\" in the same
#' payload are considered duplicates.)
#'
#' For example, \{\"temp\\_01\": 29\} or \{\"\\_temp\\_01\": 29\} are valid, but
#' \{\"temp-01\": 29\}, \{\"01\\_temp\": 29\} or \{\"\\_\\_temp\\_01\": 29\} are
#' invalid in message payloads.
#'
#' @section Request syntax:
#' ```
#' svc$batch_put_message(
#' channelName = "string",
#' messages = list(
#' list(
#' messageId = "string",
#' payload = raw
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_batch_put_message
iotanalytics_batch_put_message <- function(channelName, messages) {
op <- new_operation(
name = "BatchPutMessage",
http_method = "POST",
http_path = "/messages/batch",
paginator = list()
)
input <- .iotanalytics$batch_put_message_input(channelName = channelName, messages = messages)
output <- .iotanalytics$batch_put_message_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$batch_put_message <- iotanalytics_batch_put_message
#' Cancels the reprocessing of data through the pipeline
#'
#' Cancels the reprocessing of data through the pipeline.
#'
#' @usage
#' iotanalytics_cancel_pipeline_reprocessing(pipelineName, reprocessingId)
#'
#' @param pipelineName [required] The name of pipeline for which data reprocessing is canceled.
#' @param reprocessingId [required] The ID of the reprocessing task (returned by
#' \"StartPipelineReprocessing\").
#'
#' @section Request syntax:
#' ```
#' svc$cancel_pipeline_reprocessing(
#' pipelineName = "string",
#' reprocessingId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_cancel_pipeline_reprocessing
iotanalytics_cancel_pipeline_reprocessing <- function(pipelineName, reprocessingId) {
op <- new_operation(
name = "CancelPipelineReprocessing",
http_method = "DELETE",
http_path = "/pipelines/{pipelineName}/reprocessing/{reprocessingId}",
paginator = list()
)
input <- .iotanalytics$cancel_pipeline_reprocessing_input(pipelineName = pipelineName, reprocessingId = reprocessingId)
output <- .iotanalytics$cancel_pipeline_reprocessing_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$cancel_pipeline_reprocessing <- iotanalytics_cancel_pipeline_reprocessing
#' Creates a channel
#'
#' Creates a channel. A channel collects data from an MQTT topic and
#' archives the raw, unprocessed messages before publishing the data to a
#' pipeline.
#'
#' @usage
#' iotanalytics_create_channel(channelName, channelStorage,
#' retentionPeriod, tags)
#'
#' @param channelName [required] The name of the channel.
#' @param channelStorage Where channel data is stored. You may choose one of \"serviceManagedS3\"
#' or \"customerManagedS3\" storage. If not specified, the default is
#' \"serviceManagedS3\". This cannot be changed after creation of the
#' channel.
#' @param retentionPeriod How long, in days, message data is kept for the channel. When
#' \"customerManagedS3\" storage is selected, this parameter is ignored.
#' @param tags Metadata which can be used to manage the channel.
#'
#' @section Request syntax:
#' ```
#' svc$create_channel(
#' channelName = "string",
#' channelStorage = list(
#' serviceManagedS3 = list(),
#' customerManagedS3 = list(
#' bucket = "string",
#' keyPrefix = "string",
#' roleArn = "string"
#' )
#' ),
#' retentionPeriod = list(
#' unlimited = TRUE|FALSE,
#' numberOfDays = 123
#' ),
#' tags = list(
#' list(
#' key = "string",
#' value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_create_channel
iotanalytics_create_channel <- function(channelName, channelStorage = NULL, retentionPeriod = NULL, tags = NULL) {
op <- new_operation(
name = "CreateChannel",
http_method = "POST",
http_path = "/channels",
paginator = list()
)
input <- .iotanalytics$create_channel_input(channelName = channelName, channelStorage = channelStorage, retentionPeriod = retentionPeriod, tags = tags)
output <- .iotanalytics$create_channel_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$create_channel <- iotanalytics_create_channel
#' Creates a data set
#'
#' Creates a data set. A data set stores data retrieved from a data store
#' by applying a \"queryAction\" (a SQL query) or a \"containerAction\"
#' (executing a containerized application). This operation creates the
#' skeleton of a data set. The data set can be populated manually by
#' calling \"CreateDatasetContent\" or automatically according to a
#' \"trigger\" you specify.
#'
#' @usage
#' iotanalytics_create_dataset(datasetName, actions, triggers,
#' contentDeliveryRules, retentionPeriod, versioningConfiguration, tags)
#'
#' @param datasetName [required] The name of the data set.
#' @param actions [required] A list of actions that create the data set contents.
#' @param triggers A list of triggers. A trigger causes data set contents to be populated
#' at a specified time interval or when another data set\'s contents are
#' created. The list of triggers can be empty or contain up to five
#' **DataSetTrigger** objects.
#' @param contentDeliveryRules When data set contents are created they are delivered to destinations
#' specified here.
#' @param retentionPeriod \[Optional\] How long, in days, versions of data set contents are kept
#' for the data set. If not specified or set to null, versions of data set
#' contents are retained for at most 90 days. The number of versions of
#' data set contents retained is determined by the
#' `versioningConfiguration` parameter. (For more information, see
#' https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html\\#aws-iot-analytics-dataset-versions)
#' @param versioningConfiguration \[Optional\] How many versions of data set contents are kept. If not
#' specified or set to null, only the latest version plus the latest
#' succeeded version (if they are different) are kept for the time period
#' specified by the \"retentionPeriod\" parameter. (For more information,
#' see
#' https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html\\#aws-iot-analytics-dataset-versions)
#' @param tags Metadata which can be used to manage the data set.
#'
#' @section Request syntax:
#' ```
#' svc$create_dataset(
#' datasetName = "string",
#' actions = list(
#' list(
#' actionName = "string",
#' queryAction = list(
#' sqlQuery = "string",
#' filters = list(
#' list(
#' deltaTime = list(
#' offsetSeconds = 123,
#' timeExpression = "string"
#' )
#' )
#' )
#' ),
#' containerAction = list(
#' image = "string",
#' executionRoleArn = "string",
#' resourceConfiguration = list(
#' computeType = "ACU_1"|"ACU_2",
#' volumeSizeInGB = 123
#' ),
#' variables = list(
#' list(
#' name = "string",
#' stringValue = "string",
#' doubleValue = 123.0,
#' datasetContentVersionValue = list(
#' datasetName = "string"
#' ),
#' outputFileUriValue = list(
#' fileName = "string"
#' )
#' )
#' )
#' )
#' )
#' ),
#' triggers = list(
#' list(
#' schedule = list(
#' expression = "string"
#' ),
#' dataset = list(
#' name = "string"
#' )
#' )
#' ),
#' contentDeliveryRules = list(
#' list(
#' entryName = "string",
#' destination = list(
#' iotEventsDestinationConfiguration = list(
#' inputName = "string",
#' roleArn = "string"
#' ),
#' s3DestinationConfiguration = list(
#' bucket = "string",
#' key = "string",
#' glueConfiguration = list(
#' tableName = "string",
#' databaseName = "string"
#' ),
#' roleArn = "string"
#' )
#' )
#' )
#' ),
#' retentionPeriod = list(
#' unlimited = TRUE|FALSE,
#' numberOfDays = 123
#' ),
#' versioningConfiguration = list(
#' unlimited = TRUE|FALSE,
#' maxVersions = 123
#' ),
#' tags = list(
#' list(
#' key = "string",
#' value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_create_dataset
iotanalytics_create_dataset <- function(datasetName, actions, triggers = NULL, contentDeliveryRules = NULL, retentionPeriod = NULL, versioningConfiguration = NULL, tags = NULL) {
op <- new_operation(
name = "CreateDataset",
http_method = "POST",
http_path = "/datasets",
paginator = list()
)
input <- .iotanalytics$create_dataset_input(datasetName = datasetName, actions = actions, triggers = triggers, contentDeliveryRules = contentDeliveryRules, retentionPeriod = retentionPeriod, versioningConfiguration = versioningConfiguration, tags = tags)
output <- .iotanalytics$create_dataset_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$create_dataset <- iotanalytics_create_dataset
#' Creates the content of a data set by applying a "queryAction" (a SQL
#' query) or a "containerAction" (executing a containerized application)
#'
#' Creates the content of a data set by applying a \"queryAction\" (a SQL
#' query) or a \"containerAction\" (executing a containerized application).
#'
#' @usage
#' iotanalytics_create_dataset_content(datasetName)
#'
#' @param datasetName [required] The name of the data set.
#'
#' @section Request syntax:
#' ```
#' svc$create_dataset_content(
#' datasetName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_create_dataset_content
iotanalytics_create_dataset_content <- function(datasetName) {
op <- new_operation(
name = "CreateDatasetContent",
http_method = "POST",
http_path = "/datasets/{datasetName}/content",
paginator = list()
)
input <- .iotanalytics$create_dataset_content_input(datasetName = datasetName)
output <- .iotanalytics$create_dataset_content_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$create_dataset_content <- iotanalytics_create_dataset_content
#' Creates a data store, which is a repository for messages
#'
#' Creates a data store, which is a repository for messages.
#'
#' @usage
#' iotanalytics_create_datastore(datastoreName, datastoreStorage,
#' retentionPeriod, tags)
#'
#' @param datastoreName [required] The name of the data store.
#' @param datastoreStorage Where data store data is stored. You may choose one of
#' \"serviceManagedS3\" or \"customerManagedS3\" storage. If not specified,
#' the default is \"serviceManagedS3\". This cannot be changed after the
#' data store is created.
#' @param retentionPeriod How long, in days, message data is kept for the data store. When
#' \"customerManagedS3\" storage is selected, this parameter is ignored.
#' @param tags Metadata which can be used to manage the data store.
#'
#' @section Request syntax:
#' ```
#' svc$create_datastore(
#' datastoreName = "string",
#' datastoreStorage = list(
#' serviceManagedS3 = list(),
#' customerManagedS3 = list(
#' bucket = "string",
#' keyPrefix = "string",
#' roleArn = "string"
#' )
#' ),
#' retentionPeriod = list(
#' unlimited = TRUE|FALSE,
#' numberOfDays = 123
#' ),
#' tags = list(
#' list(
#' key = "string",
#' value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_create_datastore
iotanalytics_create_datastore <- function(datastoreName, datastoreStorage = NULL, retentionPeriod = NULL, tags = NULL) {
op <- new_operation(
name = "CreateDatastore",
http_method = "POST",
http_path = "/datastores",
paginator = list()
)
input <- .iotanalytics$create_datastore_input(datastoreName = datastoreName, datastoreStorage = datastoreStorage, retentionPeriod = retentionPeriod, tags = tags)
output <- .iotanalytics$create_datastore_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$create_datastore <- iotanalytics_create_datastore
#' Creates a pipeline
#'
#' Creates a pipeline. A pipeline consumes messages from a channel and
#' allows you to process the messages before storing them in a data store.
#' You must specify both a `channel` and a `datastore` activity and,
#' optionally, as many as 23 additional activities in the
#' `pipelineActivities` array.
#'
#' @usage
#' iotanalytics_create_pipeline(pipelineName, pipelineActivities, tags)
#'
#' @param pipelineName [required] The name of the pipeline.
#' @param pipelineActivities [required] A list of \"PipelineActivity\" objects. Activities perform
#' transformations on your messages, such as removing, renaming or adding
#' message attributes; filtering messages based on attribute values;
#' invoking your Lambda functions on messages for advanced processing; or
#' performing mathematical transformations to normalize device data.
#'
#' The list can be 2-25 **PipelineActivity** objects and must contain both
#' a `channel` and a `datastore` activity. Each entry in the list must
#' contain only one activity, for example:
#'
#' `pipelineActivities = \\[ \{ "channel": \{ ... \} \}, \{ "lambda": \{ ... \} \}, ... \\]`
#' @param tags Metadata which can be used to manage the pipeline.
#'
#' @section Request syntax:
#' ```
#' svc$create_pipeline(
#' pipelineName = "string",
#' pipelineActivities = list(
#' list(
#' channel = list(
#' name = "string",
#' channelName = "string",
#' next = "string"
#' ),
#' lambda = list(
#' name = "string",
#' lambdaName = "string",
#' batchSize = 123,
#' next = "string"
#' ),
#' datastore = list(
#' name = "string",
#' datastoreName = "string"
#' ),
#' addAttributes = list(
#' name = "string",
#' attributes = list(
#' "string"
#' ),
#' next = "string"
#' ),
#' removeAttributes = list(
#' name = "string",
#' attributes = list(
#' "string"
#' ),
#' next = "string"
#' ),
#' selectAttributes = list(
#' name = "string",
#' attributes = list(
#' "string"
#' ),
#' next = "string"
#' ),
#' filter = list(
#' name = "string",
#' filter = "string",
#' next = "string"
#' ),
#' math = list(
#' name = "string",
#' attribute = "string",
#' math = "string",
#' next = "string"
#' ),
#' deviceRegistryEnrich = list(
#' name = "string",
#' attribute = "string",
#' thingName = "string",
#' roleArn = "string",
#' next = "string"
#' ),
#' deviceShadowEnrich = list(
#' name = "string",
#' attribute = "string",
#' thingName = "string",
#' roleArn = "string",
#' next = "string"
#' )
#' )
#' ),
#' tags = list(
#' list(
#' key = "string",
#' value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_create_pipeline
iotanalytics_create_pipeline <- function(pipelineName, pipelineActivities, tags = NULL) {
op <- new_operation(
name = "CreatePipeline",
http_method = "POST",
http_path = "/pipelines",
paginator = list()
)
input <- .iotanalytics$create_pipeline_input(pipelineName = pipelineName, pipelineActivities = pipelineActivities, tags = tags)
output <- .iotanalytics$create_pipeline_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$create_pipeline <- iotanalytics_create_pipeline
#' Deletes the specified channel
#'
#' Deletes the specified channel.
#'
#' @usage
#' iotanalytics_delete_channel(channelName)
#'
#' @param channelName [required] The name of the channel to delete.
#'
#' @section Request syntax:
#' ```
#' svc$delete_channel(
#' channelName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_delete_channel
iotanalytics_delete_channel <- function(channelName) {
op <- new_operation(
name = "DeleteChannel",
http_method = "DELETE",
http_path = "/channels/{channelName}",
paginator = list()
)
input <- .iotanalytics$delete_channel_input(channelName = channelName)
output <- .iotanalytics$delete_channel_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$delete_channel <- iotanalytics_delete_channel
#' Deletes the specified data set
#'
#' Deletes the specified data set.
#'
#' You do not have to delete the content of the data set before you perform
#' this operation.
#'
#' @usage
#' iotanalytics_delete_dataset(datasetName)
#'
#' @param datasetName [required] The name of the data set to delete.
#'
#' @section Request syntax:
#' ```
#' svc$delete_dataset(
#' datasetName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_delete_dataset
iotanalytics_delete_dataset <- function(datasetName) {
op <- new_operation(
name = "DeleteDataset",
http_method = "DELETE",
http_path = "/datasets/{datasetName}",
paginator = list()
)
input <- .iotanalytics$delete_dataset_input(datasetName = datasetName)
output <- .iotanalytics$delete_dataset_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$delete_dataset <- iotanalytics_delete_dataset
#' Deletes the content of the specified data set
#'
#' Deletes the content of the specified data set.
#'
#' @usage
#' iotanalytics_delete_dataset_content(datasetName, versionId)
#'
#' @param datasetName [required] The name of the data set whose content is deleted.
#' @param versionId The version of the data set whose content is deleted. You can also use
#' the strings \"\\$LATEST\" or \"\\$LATEST\\_SUCCEEDED\" to delete the latest
#' or latest successfully completed data set. If not specified,
#' \"\\$LATEST\\_SUCCEEDED\" is the default.
#'
#' @section Request syntax:
#' ```
#' svc$delete_dataset_content(
#' datasetName = "string",
#' versionId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_delete_dataset_content
iotanalytics_delete_dataset_content <- function(datasetName, versionId = NULL) {
op <- new_operation(
name = "DeleteDatasetContent",
http_method = "DELETE",
http_path = "/datasets/{datasetName}/content",
paginator = list()
)
input <- .iotanalytics$delete_dataset_content_input(datasetName = datasetName, versionId = versionId)
output <- .iotanalytics$delete_dataset_content_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$delete_dataset_content <- iotanalytics_delete_dataset_content
#' Deletes the specified data store
#'
#' Deletes the specified data store.
#'
#' @usage
#' iotanalytics_delete_datastore(datastoreName)
#'
#' @param datastoreName [required] The name of the data store to delete.
#'
#' @section Request syntax:
#' ```
#' svc$delete_datastore(
#' datastoreName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_delete_datastore
iotanalytics_delete_datastore <- function(datastoreName) {
op <- new_operation(
name = "DeleteDatastore",
http_method = "DELETE",
http_path = "/datastores/{datastoreName}",
paginator = list()
)
input <- .iotanalytics$delete_datastore_input(datastoreName = datastoreName)
output <- .iotanalytics$delete_datastore_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$delete_datastore <- iotanalytics_delete_datastore
#' Deletes the specified pipeline
#'
#' Deletes the specified pipeline.
#'
#' @usage
#' iotanalytics_delete_pipeline(pipelineName)
#'
#' @param pipelineName [required] The name of the pipeline to delete.
#'
#' @section Request syntax:
#' ```
#' svc$delete_pipeline(
#' pipelineName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_delete_pipeline
iotanalytics_delete_pipeline <- function(pipelineName) {
op <- new_operation(
name = "DeletePipeline",
http_method = "DELETE",
http_path = "/pipelines/{pipelineName}",
paginator = list()
)
input <- .iotanalytics$delete_pipeline_input(pipelineName = pipelineName)
output <- .iotanalytics$delete_pipeline_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$delete_pipeline <- iotanalytics_delete_pipeline
#' Retrieves information about a channel
#'
#' Retrieves information about a channel.
#'
#' @usage
#' iotanalytics_describe_channel(channelName, includeStatistics)
#'
#' @param channelName [required] The name of the channel whose information is retrieved.
#' @param includeStatistics If true, additional statistical information about the channel is
#' included in the response. This feature cannot be used with a channel
#' whose S3 storage is customer-managed.
#'
#' @section Request syntax:
#' ```
#' svc$describe_channel(
#' channelName = "string",
#' includeStatistics = TRUE|FALSE
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_describe_channel
iotanalytics_describe_channel <- function(channelName, includeStatistics = NULL) {
op <- new_operation(
name = "DescribeChannel",
http_method = "GET",
http_path = "/channels/{channelName}",
paginator = list()
)
input <- .iotanalytics$describe_channel_input(channelName = channelName, includeStatistics = includeStatistics)
output <- .iotanalytics$describe_channel_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$describe_channel <- iotanalytics_describe_channel
#' Retrieves information about a data set
#'
#' Retrieves information about a data set.
#'
#' @usage
#' iotanalytics_describe_dataset(datasetName)
#'
#' @param datasetName [required] The name of the data set whose information is retrieved.
#'
#' @section Request syntax:
#' ```
#' svc$describe_dataset(
#' datasetName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_describe_dataset
iotanalytics_describe_dataset <- function(datasetName) {
op <- new_operation(
name = "DescribeDataset",
http_method = "GET",
http_path = "/datasets/{datasetName}",
paginator = list()
)
input <- .iotanalytics$describe_dataset_input(datasetName = datasetName)
output <- .iotanalytics$describe_dataset_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$describe_dataset <- iotanalytics_describe_dataset
#' Retrieves information about a data store
#'
#' Retrieves information about a data store.
#'
#' @usage
#' iotanalytics_describe_datastore(datastoreName, includeStatistics)
#'
#' @param datastoreName [required] The name of the data store
#' @param includeStatistics If true, additional statistical information about the data store is
#' included in the response. This feature cannot be used with a data store
#' whose S3 storage is customer-managed.
#'
#' @section Request syntax:
#' ```
#' svc$describe_datastore(
#' datastoreName = "string",
#' includeStatistics = TRUE|FALSE
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_describe_datastore
iotanalytics_describe_datastore <- function(datastoreName, includeStatistics = NULL) {
op <- new_operation(
name = "DescribeDatastore",
http_method = "GET",
http_path = "/datastores/{datastoreName}",
paginator = list()
)
input <- .iotanalytics$describe_datastore_input(datastoreName = datastoreName, includeStatistics = includeStatistics)
output <- .iotanalytics$describe_datastore_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$describe_datastore <- iotanalytics_describe_datastore
#' Retrieves the current settings of the AWS IoT Analytics logging options
#'
#' Retrieves the current settings of the AWS IoT Analytics logging options.
#'
#' @usage
#' iotanalytics_describe_logging_options()
#'
#' @section Request syntax:
#' ```
#' svc$describe_logging_options()
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_describe_logging_options
iotanalytics_describe_logging_options <- function() {
op <- new_operation(
name = "DescribeLoggingOptions",
http_method = "GET",
http_path = "/logging",
paginator = list()
)
input <- .iotanalytics$describe_logging_options_input()
output <- .iotanalytics$describe_logging_options_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$describe_logging_options <- iotanalytics_describe_logging_options
#' Retrieves information about a pipeline
#'
#' Retrieves information about a pipeline.
#'
#' @usage
#' iotanalytics_describe_pipeline(pipelineName)
#'
#' @param pipelineName [required] The name of the pipeline whose information is retrieved.
#'
#' @section Request syntax:
#' ```
#' svc$describe_pipeline(
#' pipelineName = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_describe_pipeline
iotanalytics_describe_pipeline <- function(pipelineName) {
op <- new_operation(
name = "DescribePipeline",
http_method = "GET",
http_path = "/pipelines/{pipelineName}",
paginator = list()
)
input <- .iotanalytics$describe_pipeline_input(pipelineName = pipelineName)
output <- .iotanalytics$describe_pipeline_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$describe_pipeline <- iotanalytics_describe_pipeline
#' Retrieves the contents of a data set as pre-signed URIs
#'
#' Retrieves the contents of a data set as pre-signed URIs.
#'
#' @usage
#' iotanalytics_get_dataset_content(datasetName, versionId)
#'
#' @param datasetName [required] The name of the data set whose contents are retrieved.
#' @param versionId The version of the data set whose contents are retrieved. You can also
#' use the strings \"\\$LATEST\" or \"\\$LATEST\\_SUCCEEDED\" to retrieve the
#' contents of the latest or latest successfully completed data set. If not
#' specified, \"\\$LATEST\\_SUCCEEDED\" is the default.
#'
#' @section Request syntax:
#' ```
#' svc$get_dataset_content(
#' datasetName = "string",
#' versionId = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_get_dataset_content
iotanalytics_get_dataset_content <- function(datasetName, versionId = NULL) {
op <- new_operation(
name = "GetDatasetContent",
http_method = "GET",
http_path = "/datasets/{datasetName}/content",
paginator = list()
)
input <- .iotanalytics$get_dataset_content_input(datasetName = datasetName, versionId = versionId)
output <- .iotanalytics$get_dataset_content_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$get_dataset_content <- iotanalytics_get_dataset_content
#' Retrieves a list of channels
#'
#' Retrieves a list of channels.
#'
#' @usage
#' iotanalytics_list_channels(nextToken, maxResults)
#'
#' @param nextToken The token for the next set of results.
#' @param maxResults The maximum number of results to return in this request.
#'
#' The default value is 100.
#'
#' @section Request syntax:
#' ```
#' svc$list_channels(
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_list_channels
iotanalytics_list_channels <- function(nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "ListChannels",
http_method = "GET",
http_path = "/channels",
paginator = list()
)
input <- .iotanalytics$list_channels_input(nextToken = nextToken, maxResults = maxResults)
output <- .iotanalytics$list_channels_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$list_channels <- iotanalytics_list_channels
#' Lists information about data set contents that have been created
#'
#' Lists information about data set contents that have been created.
#'
#' @usage
#' iotanalytics_list_dataset_contents(datasetName, nextToken, maxResults,
#' scheduledOnOrAfter, scheduledBefore)
#'
#' @param datasetName [required] The name of the data set whose contents information you want to list.
#' @param nextToken The token for the next set of results.
#' @param maxResults The maximum number of results to return in this request.
#' @param scheduledOnOrAfter A filter to limit results to those data set contents whose creation is
#' scheduled on or after the given time. See the field `triggers.schedule`
#' in the CreateDataset request. (timestamp)
#' @param scheduledBefore A filter to limit results to those data set contents whose creation is
#' scheduled before the given time. See the field `triggers.schedule` in
#' the CreateDataset request. (timestamp)
#'
#' @section Request syntax:
#' ```
#' svc$list_dataset_contents(
#' datasetName = "string",
#' nextToken = "string",
#' maxResults = 123,
#' scheduledOnOrAfter = as.POSIXct(
#' "2015-01-01"
#' ),
#' scheduledBefore = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_list_dataset_contents
iotanalytics_list_dataset_contents <- function(datasetName, nextToken = NULL, maxResults = NULL, scheduledOnOrAfter = NULL, scheduledBefore = NULL) {
op <- new_operation(
name = "ListDatasetContents",
http_method = "GET",
http_path = "/datasets/{datasetName}/contents",
paginator = list()
)
input <- .iotanalytics$list_dataset_contents_input(datasetName = datasetName, nextToken = nextToken, maxResults = maxResults, scheduledOnOrAfter = scheduledOnOrAfter, scheduledBefore = scheduledBefore)
output <- .iotanalytics$list_dataset_contents_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$list_dataset_contents <- iotanalytics_list_dataset_contents
#' Retrieves information about data sets
#'
#' Retrieves information about data sets.
#'
#' @usage
#' iotanalytics_list_datasets(nextToken, maxResults)
#'
#' @param nextToken The token for the next set of results.
#' @param maxResults The maximum number of results to return in this request.
#'
#' The default value is 100.
#'
#' @section Request syntax:
#' ```
#' svc$list_datasets(
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_list_datasets
iotanalytics_list_datasets <- function(nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "ListDatasets",
http_method = "GET",
http_path = "/datasets",
paginator = list()
)
input <- .iotanalytics$list_datasets_input(nextToken = nextToken, maxResults = maxResults)
output <- .iotanalytics$list_datasets_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$list_datasets <- iotanalytics_list_datasets
#' Retrieves a list of data stores
#'
#' Retrieves a list of data stores.
#'
#' @usage
#' iotanalytics_list_datastores(nextToken, maxResults)
#'
#' @param nextToken The token for the next set of results.
#' @param maxResults The maximum number of results to return in this request.
#'
#' The default value is 100.
#'
#' @section Request syntax:
#' ```
#' svc$list_datastores(
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_list_datastores
iotanalytics_list_datastores <- function(nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "ListDatastores",
http_method = "GET",
http_path = "/datastores",
paginator = list()
)
input <- .iotanalytics$list_datastores_input(nextToken = nextToken, maxResults = maxResults)
output <- .iotanalytics$list_datastores_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$list_datastores <- iotanalytics_list_datastores
#' Retrieves a list of pipelines
#'
#' Retrieves a list of pipelines.
#'
#' @usage
#' iotanalytics_list_pipelines(nextToken, maxResults)
#'
#' @param nextToken The token for the next set of results.
#' @param maxResults The maximum number of results to return in this request.
#'
#' The default value is 100.
#'
#' @section Request syntax:
#' ```
#' svc$list_pipelines(
#' nextToken = "string",
#' maxResults = 123
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_list_pipelines
iotanalytics_list_pipelines <- function(nextToken = NULL, maxResults = NULL) {
op <- new_operation(
name = "ListPipelines",
http_method = "GET",
http_path = "/pipelines",
paginator = list()
)
input <- .iotanalytics$list_pipelines_input(nextToken = nextToken, maxResults = maxResults)
output <- .iotanalytics$list_pipelines_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$list_pipelines <- iotanalytics_list_pipelines
#' Lists the tags (metadata) which you have assigned to the resource
#'
#' Lists the tags (metadata) which you have assigned to the resource.
#'
#' @usage
#' iotanalytics_list_tags_for_resource(resourceArn)
#'
#' @param resourceArn [required] The ARN of the resource whose tags you want to list.
#'
#' @section Request syntax:
#' ```
#' svc$list_tags_for_resource(
#' resourceArn = "string"
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_list_tags_for_resource
iotanalytics_list_tags_for_resource <- function(resourceArn) {
op <- new_operation(
name = "ListTagsForResource",
http_method = "GET",
http_path = "/tags",
paginator = list()
)
input <- .iotanalytics$list_tags_for_resource_input(resourceArn = resourceArn)
output <- .iotanalytics$list_tags_for_resource_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$list_tags_for_resource <- iotanalytics_list_tags_for_resource
#' Sets or updates the AWS IoT Analytics logging options
#'
#' Sets or updates the AWS IoT Analytics logging options.
#'
#' Note that if you update the value of any `loggingOptions` field, it
#' takes up to one minute for the change to take effect. Also, if you
#' change the policy attached to the role you specified in the roleArn
#' field (for example, to correct an invalid policy) it takes up to 5
#' minutes for that change to take effect.
#'
#' @usage
#' iotanalytics_put_logging_options(loggingOptions)
#'
#' @param loggingOptions [required] The new values of the AWS IoT Analytics logging options.
#'
#' @section Request syntax:
#' ```
#' svc$put_logging_options(
#' loggingOptions = list(
#' roleArn = "string",
#' level = "ERROR",
#' enabled = TRUE|FALSE
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_put_logging_options
iotanalytics_put_logging_options <- function(loggingOptions) {
op <- new_operation(
name = "PutLoggingOptions",
http_method = "PUT",
http_path = "/logging",
paginator = list()
)
input <- .iotanalytics$put_logging_options_input(loggingOptions = loggingOptions)
output <- .iotanalytics$put_logging_options_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$put_logging_options <- iotanalytics_put_logging_options
#' Simulates the results of running a pipeline activity on a message
#' payload
#'
#' Simulates the results of running a pipeline activity on a message
#' payload.
#'
#' @usage
#' iotanalytics_run_pipeline_activity(pipelineActivity, payloads)
#'
#' @param pipelineActivity [required] The pipeline activity that is run. This must not be a \'channel\'
#' activity or a \'datastore\' activity because these activities are used
#' in a pipeline only to load the original message and to store the
#' (possibly) transformed message. If a \'lambda\' activity is specified,
#' only short-running Lambda functions (those with a timeout of less than
#' 30 seconds or less) can be used.
#' @param payloads [required] The sample message payloads on which the pipeline activity is run.
#'
#' @section Request syntax:
#' ```
#' svc$run_pipeline_activity(
#' pipelineActivity = list(
#' channel = list(
#' name = "string",
#' channelName = "string",
#' next = "string"
#' ),
#' lambda = list(
#' name = "string",
#' lambdaName = "string",
#' batchSize = 123,
#' next = "string"
#' ),
#' datastore = list(
#' name = "string",
#' datastoreName = "string"
#' ),
#' addAttributes = list(
#' name = "string",
#' attributes = list(
#' "string"
#' ),
#' next = "string"
#' ),
#' removeAttributes = list(
#' name = "string",
#' attributes = list(
#' "string"
#' ),
#' next = "string"
#' ),
#' selectAttributes = list(
#' name = "string",
#' attributes = list(
#' "string"
#' ),
#' next = "string"
#' ),
#' filter = list(
#' name = "string",
#' filter = "string",
#' next = "string"
#' ),
#' math = list(
#' name = "string",
#' attribute = "string",
#' math = "string",
#' next = "string"
#' ),
#' deviceRegistryEnrich = list(
#' name = "string",
#' attribute = "string",
#' thingName = "string",
#' roleArn = "string",
#' next = "string"
#' ),
#' deviceShadowEnrich = list(
#' name = "string",
#' attribute = "string",
#' thingName = "string",
#' roleArn = "string",
#' next = "string"
#' )
#' ),
#' payloads = list(
#' raw
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_run_pipeline_activity
iotanalytics_run_pipeline_activity <- function(pipelineActivity, payloads) {
op <- new_operation(
name = "RunPipelineActivity",
http_method = "POST",
http_path = "/pipelineactivities/run",
paginator = list()
)
input <- .iotanalytics$run_pipeline_activity_input(pipelineActivity = pipelineActivity, payloads = payloads)
output <- .iotanalytics$run_pipeline_activity_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$run_pipeline_activity <- iotanalytics_run_pipeline_activity
#' Retrieves a sample of messages from the specified channel ingested
#' during the specified timeframe
#'
#' Retrieves a sample of messages from the specified channel ingested
#' during the specified timeframe. Up to 10 messages can be retrieved.
#'
#' @usage
#' iotanalytics_sample_channel_data(channelName, maxMessages, startTime,
#' endTime)
#'
#' @param channelName [required] The name of the channel whose message samples are retrieved.
#' @param maxMessages The number of sample messages to be retrieved. The limit is 10, the
#' default is also 10.
#' @param startTime The start of the time window from which sample messages are retrieved.
#' @param endTime The end of the time window from which sample messages are retrieved.
#'
#' @section Request syntax:
#' ```
#' svc$sample_channel_data(
#' channelName = "string",
#' maxMessages = 123,
#' startTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' endTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_sample_channel_data
iotanalytics_sample_channel_data <- function(channelName, maxMessages = NULL, startTime = NULL, endTime = NULL) {
op <- new_operation(
name = "SampleChannelData",
http_method = "GET",
http_path = "/channels/{channelName}/sample",
paginator = list()
)
input <- .iotanalytics$sample_channel_data_input(channelName = channelName, maxMessages = maxMessages, startTime = startTime, endTime = endTime)
output <- .iotanalytics$sample_channel_data_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$sample_channel_data <- iotanalytics_sample_channel_data
#' Starts the reprocessing of raw message data through the pipeline
#'
#' Starts the reprocessing of raw message data through the pipeline.
#'
#' @usage
#' iotanalytics_start_pipeline_reprocessing(pipelineName, startTime,
#' endTime)
#'
#' @param pipelineName [required] The name of the pipeline on which to start reprocessing.
#' @param startTime The start time (inclusive) of raw message data that is reprocessed.
#' @param endTime The end time (exclusive) of raw message data that is reprocessed.
#'
#' @section Request syntax:
#' ```
#' svc$start_pipeline_reprocessing(
#' pipelineName = "string",
#' startTime = as.POSIXct(
#' "2015-01-01"
#' ),
#' endTime = as.POSIXct(
#' "2015-01-01"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_start_pipeline_reprocessing
iotanalytics_start_pipeline_reprocessing <- function(pipelineName, startTime = NULL, endTime = NULL) {
op <- new_operation(
name = "StartPipelineReprocessing",
http_method = "POST",
http_path = "/pipelines/{pipelineName}/reprocessing",
paginator = list()
)
input <- .iotanalytics$start_pipeline_reprocessing_input(pipelineName = pipelineName, startTime = startTime, endTime = endTime)
output <- .iotanalytics$start_pipeline_reprocessing_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$start_pipeline_reprocessing <- iotanalytics_start_pipeline_reprocessing
#' Adds to or modifies the tags of the given resource
#'
#' Adds to or modifies the tags of the given resource. Tags are metadata
#' which can be used to manage a resource.
#'
#' @usage
#' iotanalytics_tag_resource(resourceArn, tags)
#'
#' @param resourceArn [required] The ARN of the resource whose tags you want to modify.
#' @param tags [required] The new or modified tags for the resource.
#'
#' @section Request syntax:
#' ```
#' svc$tag_resource(
#' resourceArn = "string",
#' tags = list(
#' list(
#' key = "string",
#' value = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_tag_resource
iotanalytics_tag_resource <- function(resourceArn, tags) {
op <- new_operation(
name = "TagResource",
http_method = "POST",
http_path = "/tags",
paginator = list()
)
input <- .iotanalytics$tag_resource_input(resourceArn = resourceArn, tags = tags)
output <- .iotanalytics$tag_resource_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$tag_resource <- iotanalytics_tag_resource
#' Removes the given tags (metadata) from the resource
#'
#' Removes the given tags (metadata) from the resource.
#'
#' @usage
#' iotanalytics_untag_resource(resourceArn, tagKeys)
#'
#' @param resourceArn [required] The ARN of the resource whose tags you want to remove.
#' @param tagKeys [required] The keys of those tags which you want to remove.
#'
#' @section Request syntax:
#' ```
#' svc$untag_resource(
#' resourceArn = "string",
#' tagKeys = list(
#' "string"
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_untag_resource
iotanalytics_untag_resource <- function(resourceArn, tagKeys) {
op <- new_operation(
name = "UntagResource",
http_method = "DELETE",
http_path = "/tags",
paginator = list()
)
input <- .iotanalytics$untag_resource_input(resourceArn = resourceArn, tagKeys = tagKeys)
output <- .iotanalytics$untag_resource_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$untag_resource <- iotanalytics_untag_resource
#' Updates the settings of a channel
#'
#' Updates the settings of a channel.
#'
#' @usage
#' iotanalytics_update_channel(channelName, channelStorage,
#' retentionPeriod)
#'
#' @param channelName [required] The name of the channel to be updated.
#' @param channelStorage Where channel data is stored. You may choose one of \"serviceManagedS3\"
#' or \"customerManagedS3\" storage. If not specified, the default is
#' \"serviceManagedS3\". This cannot be changed after creation of the
#' channel.
#' @param retentionPeriod How long, in days, message data is kept for the channel. The retention
#' period cannot be updated if the channel\'s S3 storage is
#' customer-managed.
#'
#' @section Request syntax:
#' ```
#' svc$update_channel(
#' channelName = "string",
#' channelStorage = list(
#' serviceManagedS3 = list(),
#' customerManagedS3 = list(
#' bucket = "string",
#' keyPrefix = "string",
#' roleArn = "string"
#' )
#' ),
#' retentionPeriod = list(
#' unlimited = TRUE|FALSE,
#' numberOfDays = 123
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_update_channel
iotanalytics_update_channel <- function(channelName, channelStorage = NULL, retentionPeriod = NULL) {
op <- new_operation(
name = "UpdateChannel",
http_method = "PUT",
http_path = "/channels/{channelName}",
paginator = list()
)
input <- .iotanalytics$update_channel_input(channelName = channelName, channelStorage = channelStorage, retentionPeriod = retentionPeriod)
output <- .iotanalytics$update_channel_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$update_channel <- iotanalytics_update_channel
#' Updates the settings of a data set
#'
#' Updates the settings of a data set.
#'
#' @usage
#' iotanalytics_update_dataset(datasetName, actions, triggers,
#' contentDeliveryRules, retentionPeriod, versioningConfiguration)
#'
#' @param datasetName [required] The name of the data set to update.
#' @param actions [required] A list of \"DatasetAction\" objects.
#' @param triggers A list of \"DatasetTrigger\" objects. The list can be empty or can
#' contain up to five **DataSetTrigger** objects.
#' @param contentDeliveryRules When data set contents are created they are delivered to destinations
#' specified here.
#' @param retentionPeriod How long, in days, data set contents are kept for the data set.
#' @param versioningConfiguration \[Optional\] How many versions of data set contents are kept. If not
#' specified or set to null, only the latest version plus the latest
#' succeeded version (if they are different) are kept for the time period
#' specified by the \"retentionPeriod\" parameter. (For more information,
#' see
#' https://docs.aws.amazon.com/iotanalytics/latest/userguide/getting-started.html\\#aws-iot-analytics-dataset-versions)
#'
#' @section Request syntax:
#' ```
#' svc$update_dataset(
#' datasetName = "string",
#' actions = list(
#' list(
#' actionName = "string",
#' queryAction = list(
#' sqlQuery = "string",
#' filters = list(
#' list(
#' deltaTime = list(
#' offsetSeconds = 123,
#' timeExpression = "string"
#' )
#' )
#' )
#' ),
#' containerAction = list(
#' image = "string",
#' executionRoleArn = "string",
#' resourceConfiguration = list(
#' computeType = "ACU_1"|"ACU_2",
#' volumeSizeInGB = 123
#' ),
#' variables = list(
#' list(
#' name = "string",
#' stringValue = "string",
#' doubleValue = 123.0,
#' datasetContentVersionValue = list(
#' datasetName = "string"
#' ),
#' outputFileUriValue = list(
#' fileName = "string"
#' )
#' )
#' )
#' )
#' )
#' ),
#' triggers = list(
#' list(
#' schedule = list(
#' expression = "string"
#' ),
#' dataset = list(
#' name = "string"
#' )
#' )
#' ),
#' contentDeliveryRules = list(
#' list(
#' entryName = "string",
#' destination = list(
#' iotEventsDestinationConfiguration = list(
#' inputName = "string",
#' roleArn = "string"
#' ),
#' s3DestinationConfiguration = list(
#' bucket = "string",
#' key = "string",
#' glueConfiguration = list(
#' tableName = "string",
#' databaseName = "string"
#' ),
#' roleArn = "string"
#' )
#' )
#' )
#' ),
#' retentionPeriod = list(
#' unlimited = TRUE|FALSE,
#' numberOfDays = 123
#' ),
#' versioningConfiguration = list(
#' unlimited = TRUE|FALSE,
#' maxVersions = 123
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_update_dataset
iotanalytics_update_dataset <- function(datasetName, actions, triggers = NULL, contentDeliveryRules = NULL, retentionPeriod = NULL, versioningConfiguration = NULL) {
op <- new_operation(
name = "UpdateDataset",
http_method = "PUT",
http_path = "/datasets/{datasetName}",
paginator = list()
)
input <- .iotanalytics$update_dataset_input(datasetName = datasetName, actions = actions, triggers = triggers, contentDeliveryRules = contentDeliveryRules, retentionPeriod = retentionPeriod, versioningConfiguration = versioningConfiguration)
output <- .iotanalytics$update_dataset_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$update_dataset <- iotanalytics_update_dataset
#' Updates the settings of a data store
#'
#' Updates the settings of a data store.
#'
#' @usage
#' iotanalytics_update_datastore(datastoreName, retentionPeriod,
#' datastoreStorage)
#'
#' @param datastoreName [required] The name of the data store to be updated.
#' @param retentionPeriod How long, in days, message data is kept for the data store. The
#' retention period cannot be updated if the data store\'s S3 storage is
#' customer-managed.
#' @param datastoreStorage Where data store data is stored. You may choose one of
#' \"serviceManagedS3\" or \"customerManagedS3\" storage. If not specified,
#' the default is \"serviceManagedS3\". This cannot be changed after the
#' data store is created.
#'
#' @section Request syntax:
#' ```
#' svc$update_datastore(
#' datastoreName = "string",
#' retentionPeriod = list(
#' unlimited = TRUE|FALSE,
#' numberOfDays = 123
#' ),
#' datastoreStorage = list(
#' serviceManagedS3 = list(),
#' customerManagedS3 = list(
#' bucket = "string",
#' keyPrefix = "string",
#' roleArn = "string"
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_update_datastore
iotanalytics_update_datastore <- function(datastoreName, retentionPeriod = NULL, datastoreStorage = NULL) {
op <- new_operation(
name = "UpdateDatastore",
http_method = "PUT",
http_path = "/datastores/{datastoreName}",
paginator = list()
)
input <- .iotanalytics$update_datastore_input(datastoreName = datastoreName, retentionPeriod = retentionPeriod, datastoreStorage = datastoreStorage)
output <- .iotanalytics$update_datastore_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$update_datastore <- iotanalytics_update_datastore
#' Updates the settings of a pipeline
#'
#' Updates the settings of a pipeline. You must specify both a `channel`
#' and a `datastore` activity and, optionally, as many as 23 additional
#' activities in the `pipelineActivities` array.
#'
#' @usage
#' iotanalytics_update_pipeline(pipelineName, pipelineActivities)
#'
#' @param pipelineName [required] The name of the pipeline to update.
#' @param pipelineActivities [required] A list of \"PipelineActivity\" objects. Activities perform
#' transformations on your messages, such as removing, renaming or adding
#' message attributes; filtering messages based on attribute values;
#' invoking your Lambda functions on messages for advanced processing; or
#' performing mathematical transformations to normalize device data.
#'
#' The list can be 2-25 **PipelineActivity** objects and must contain both
#' a `channel` and a `datastore` activity. Each entry in the list must
#' contain only one activity, for example:
#'
#' `pipelineActivities = \\[ \{ "channel": \{ ... \} \}, \{ "lambda": \{ ... \} \}, ... \\]`
#'
#' @section Request syntax:
#' ```
#' svc$update_pipeline(
#' pipelineName = "string",
#' pipelineActivities = list(
#' list(
#' channel = list(
#' name = "string",
#' channelName = "string",
#' next = "string"
#' ),
#' lambda = list(
#' name = "string",
#' lambdaName = "string",
#' batchSize = 123,
#' next = "string"
#' ),
#' datastore = list(
#' name = "string",
#' datastoreName = "string"
#' ),
#' addAttributes = list(
#' name = "string",
#' attributes = list(
#' "string"
#' ),
#' next = "string"
#' ),
#' removeAttributes = list(
#' name = "string",
#' attributes = list(
#' "string"
#' ),
#' next = "string"
#' ),
#' selectAttributes = list(
#' name = "string",
#' attributes = list(
#' "string"
#' ),
#' next = "string"
#' ),
#' filter = list(
#' name = "string",
#' filter = "string",
#' next = "string"
#' ),
#' math = list(
#' name = "string",
#' attribute = "string",
#' math = "string",
#' next = "string"
#' ),
#' deviceRegistryEnrich = list(
#' name = "string",
#' attribute = "string",
#' thingName = "string",
#' roleArn = "string",
#' next = "string"
#' ),
#' deviceShadowEnrich = list(
#' name = "string",
#' attribute = "string",
#' thingName = "string",
#' roleArn = "string",
#' next = "string"
#' )
#' )
#' )
#' )
#' ```
#'
#' @keywords internal
#'
#' @rdname iotanalytics_update_pipeline
iotanalytics_update_pipeline <- function(pipelineName, pipelineActivities) {
op <- new_operation(
name = "UpdatePipeline",
http_method = "PUT",
http_path = "/pipelines/{pipelineName}",
paginator = list()
)
input <- .iotanalytics$update_pipeline_input(pipelineName = pipelineName, pipelineActivities = pipelineActivities)
output <- .iotanalytics$update_pipeline_output()
config <- get_config()
svc <- .iotanalytics$service(config)
request <- new_request(svc, op, input, output)
response <- send_request(request)
return(response)
}
.iotanalytics$operations$update_pipeline <- iotanalytics_update_pipeline
|
#' @title Add values
#' @description A function that add values
#' @param x one value
#' @param y another value
#' @details
#'
#' @return numeric value
#'
#' @example val <- addvalue(4,3)
#' @export
addvalue <- function(x,y){
return(x+y)
}
addvalue(4,3)
| /R/addValue.R | no_license | ummehanihassi/newpackage | R | false | false | 276 | r | #' @title Add values
#' @description A function that add values
#' @param x one value
#' @param y another value
#' @details
#'
#' @return numeric value
#'
#' @example val <- addvalue(4,3)
#' @export
addvalue <- function(x,y){
return(x+y)
}
addvalue(4,3)
|
install.packages("psych")
library(psych)
setwd("C:\\Users\\DELL\\Desktop\\working files\\Project files\\Analytics Model developement\\LSTM")
Rawdata<- read.csv("Rawdata.csv")
data<-Rawdata[,c("MatureAreaHabyMonthly","FFBProductionYieldperHectareTonnesHectare","CPOProductionTONNES",
"CrudePalmOilExportCPO")]
# str((data))
corrm<- cor(data)
require(psych)
require(GPArotation)
scree(corrm, factors=T, pc=T, main="scree plot", hline=NULL, add=FALSE)
eigen(corrm)$values
require(dplyr)
# Pricipal Components Analysis
# entering raw data and extracting PCs
# from the correlation matrix
fit <- princomp(data, cor=TRUE)
summary(fit) # print variance accounted for
loadings(fit) # pc loadings
plot(fit,type="lines") # scree plot
fit$scores # the principal components
biplot(fit)
| /Factor Analysis.R | no_license | lkvi8686/Factor-Analysis | R | false | false | 877 | r | install.packages("psych")
library(psych)
setwd("C:\\Users\\DELL\\Desktop\\working files\\Project files\\Analytics Model developement\\LSTM")
Rawdata<- read.csv("Rawdata.csv")
data<-Rawdata[,c("MatureAreaHabyMonthly","FFBProductionYieldperHectareTonnesHectare","CPOProductionTONNES",
"CrudePalmOilExportCPO")]
# str((data))
corrm<- cor(data)
require(psych)
require(GPArotation)
scree(corrm, factors=T, pc=T, main="scree plot", hline=NULL, add=FALSE)
eigen(corrm)$values
require(dplyr)
# Pricipal Components Analysis
# entering raw data and extracting PCs
# from the correlation matrix
fit <- princomp(data, cor=TRUE)
summary(fit) # print variance accounted for
loadings(fit) # pc loadings
plot(fit,type="lines") # scree plot
fit$scores # the principal components
biplot(fit)
|
/Biseccion.R | no_license | davidvanegas2/Analisis-n-merico | R | false | false | 1,130 | r | ||
# This code will calculate the mean and max number
# of species for each degree of latitude and
# plot it against degrees latitude
nagrid <- read.csv('http://mtaylor4.semo.edu/~goby/biogeo/nagrid.csv', row.names=1)
meansp <- apply(nagrid,1,mean)
maxsp <- apply(nagrid,1,max)
lat <- 24:49
op <- par(mfrow=c(1,2)) # to plot the two graphs side by side
plot(lat~meansp, xlab = 'Mean Species Richness', ylab='Latitude °N', main = 'Mean Species Richness per Degree Latitude\nfor U.S. Freshwater Fishes')
plot(lat~maxsp, xlab = 'Maximum Species Richness', ylab='Latitude °N', main='Maximum Species Richness per Degree Latitude\n for U.S. Freshwater Fishes')
par(op)
#### AREA
fishArea <- read.csv('http://mtaylor4.semo.edu/~goby/biogeo/fish_area.csv', row.names=1)
plot(fishArea$lat ~ fishArea$area, xlab = 'Area', ylab = 'Latitude (degrees N)', main='Relative Area Occupied by U.S Freshwater Fishes')
### Champaign
source('http://mtaylor4.semo.edu/~goby/biogeo/rapo_champaign.r')
| /units/2_geographic_range/2c_rapoport/code_data/rapoport_cmds.r | no_license | mtaylor-semo/438 | R | false | false | 1,019 | r | # This code will calculate the mean and max number
# of species for each degree of latitude and
# plot it against degrees latitude
nagrid <- read.csv('http://mtaylor4.semo.edu/~goby/biogeo/nagrid.csv', row.names=1)
meansp <- apply(nagrid,1,mean)
maxsp <- apply(nagrid,1,max)
lat <- 24:49
op <- par(mfrow=c(1,2)) # to plot the two graphs side by side
plot(lat~meansp, xlab = 'Mean Species Richness', ylab='Latitude °N', main = 'Mean Species Richness per Degree Latitude\nfor U.S. Freshwater Fishes')
plot(lat~maxsp, xlab = 'Maximum Species Richness', ylab='Latitude °N', main='Maximum Species Richness per Degree Latitude\n for U.S. Freshwater Fishes')
par(op)
#### AREA
fishArea <- read.csv('http://mtaylor4.semo.edu/~goby/biogeo/fish_area.csv', row.names=1)
plot(fishArea$lat ~ fishArea$area, xlab = 'Area', ylab = 'Latitude (degrees N)', main='Relative Area Occupied by U.S Freshwater Fishes')
### Champaign
source('http://mtaylor4.semo.edu/~goby/biogeo/rapo_champaign.r')
|
library(testthat)
library(validate)
library(dplyr)
options(scipen=999)
context("may load")
temp_mloads <- mloads
temp_mloads$TONS_N<-as.numeric(temp_mloads$TONS)
temp_mloads$TONS_L95_N<-as.numeric(temp_mloads$TONS_L95)
temp_mloads$TONS_U95_N<-as.numeric(temp_mloads$TONS_U95)
temp_mloads_recent<-temp_mloads[temp_mloads$WY %in% max(temp_mloads$WY),]
#length(unique(temp_mloads_recent$SITE_ABB))
#looking for more thorough explanation of the 'validate' library capabilities?
#Run:
# vignette("intro", package="validate")
test_that("may load has the correct columns", {
expect_has_names(mloads, c(
"SITE_ABB",
"SITE_FLOW_ID",
"SITE_QW_ID",
"CONSTIT",
"WY",
"MODTYPE",
"TONS",
"TONS_L95",
"TONS_U95"
))
})
test_that("may load's columns are correctly typed", {
result <- validate::check_that(mloads,
is.integer(c(WY, MONTH)),
is.character(c(
SITE_ABB,
SITE_QW_ID,
SITE_FLOW_ID,TONS, TONS_L95, TONS_U95
)),
is.factor(CONSTIT),
is.factor(MODTYPE)
)
expect_no_errors(result)
})
test_that("may load has a reasonable range of values", {
result <- validate::check_that(temp_mloads,
TONS_N > 0,
TONS_N < 5E8,
TONS_L95_N < TONS_U95_N,
TONS_L95_N < TONS_N,
TONS_N < TONS_U95_N,
nchar(SITE_ABB) == 4,
WY < 2020,
WY > 1950
)
expect_no_errors(result)
})
test_that("may loads for the MISS site are included", {
miss_sites <- subset(mloads, SITE_ABB == 'MISS')
expect_gt(nrow(miss_sites), 0)
})
test_that("may loads are less than corresponding annual loads for a given site/water year/constituent", {
tt<-left_join(temp_mloads, aloads, by = c("SITE_ABB" = "SITE_ABB", "WY" = "WY","CONSTIT"="CONSTIT"))
tt<-tt[!is.na(tt$TONS.y),]
result <- validate::check_that(tt,
TONS_N < as.numeric(TONS.y)
)
expect_no_errors(result)
})
test_that("Most recent water year has all of the necessary sites ", {
expected <- sort(c("HAZL","PADU","GRAN","CLIN","WAPE","KEOS","VALL","GRAF","SIDN","OMAH","ELKH","LOUI","DESO","HERM","THEB","SEDG","HARR","KERS","BELL","MORG",
"STFR","MELV","VICK","SUMN","STTH","GULF","NEWH","CANN","MISS","HAST","LITT","LONG"))
actual <- sort(unique(temp_mloads_recent$SITE_ABB))
expect_equal(actual, expected)
})
test_that("Load data have the correct number of significant digits", {
result <- validate::check_that(temp_mloads,
count_sig_figs(temp_mloads$TONS_N/1E5) <= 3,
count_sig_figs(temp_mloads$TONS_L95_N/1E5) <= 3,
count_sig_figs(temp_mloads$TONS_U95_N/1E5) <= 3
)
expect_no_errors(result)
})
test_that("There are no duplicate values", {
mloads_without_ignored_modtypes <- subset(temp_mloads, !(MODTYPE %in% c('COMP', 'CONTIN')))
unique_columns <- mloads_without_ignored_modtypes[c('SITE_QW_ID', 'CONSTIT', 'WY')]
expect_no_duplicates(unique_columns)
})
| /tests/testthat/test_may_load.R | permissive | supercasey/nar_data | R | false | false | 3,172 | r | library(testthat)
library(validate)
library(dplyr)
options(scipen=999)
context("may load")
temp_mloads <- mloads
temp_mloads$TONS_N<-as.numeric(temp_mloads$TONS)
temp_mloads$TONS_L95_N<-as.numeric(temp_mloads$TONS_L95)
temp_mloads$TONS_U95_N<-as.numeric(temp_mloads$TONS_U95)
temp_mloads_recent<-temp_mloads[temp_mloads$WY %in% max(temp_mloads$WY),]
#length(unique(temp_mloads_recent$SITE_ABB))
#looking for more thorough explanation of the 'validate' library capabilities?
#Run:
# vignette("intro", package="validate")
test_that("may load has the correct columns", {
expect_has_names(mloads, c(
"SITE_ABB",
"SITE_FLOW_ID",
"SITE_QW_ID",
"CONSTIT",
"WY",
"MODTYPE",
"TONS",
"TONS_L95",
"TONS_U95"
))
})
test_that("may load's columns are correctly typed", {
result <- validate::check_that(mloads,
is.integer(c(WY, MONTH)),
is.character(c(
SITE_ABB,
SITE_QW_ID,
SITE_FLOW_ID,TONS, TONS_L95, TONS_U95
)),
is.factor(CONSTIT),
is.factor(MODTYPE)
)
expect_no_errors(result)
})
test_that("may load has a reasonable range of values", {
result <- validate::check_that(temp_mloads,
TONS_N > 0,
TONS_N < 5E8,
TONS_L95_N < TONS_U95_N,
TONS_L95_N < TONS_N,
TONS_N < TONS_U95_N,
nchar(SITE_ABB) == 4,
WY < 2020,
WY > 1950
)
expect_no_errors(result)
})
test_that("may loads for the MISS site are included", {
miss_sites <- subset(mloads, SITE_ABB == 'MISS')
expect_gt(nrow(miss_sites), 0)
})
test_that("may loads are less than corresponding annual loads for a given site/water year/constituent", {
tt<-left_join(temp_mloads, aloads, by = c("SITE_ABB" = "SITE_ABB", "WY" = "WY","CONSTIT"="CONSTIT"))
tt<-tt[!is.na(tt$TONS.y),]
result <- validate::check_that(tt,
TONS_N < as.numeric(TONS.y)
)
expect_no_errors(result)
})
test_that("Most recent water year has all of the necessary sites ", {
expected <- sort(c("HAZL","PADU","GRAN","CLIN","WAPE","KEOS","VALL","GRAF","SIDN","OMAH","ELKH","LOUI","DESO","HERM","THEB","SEDG","HARR","KERS","BELL","MORG",
"STFR","MELV","VICK","SUMN","STTH","GULF","NEWH","CANN","MISS","HAST","LITT","LONG"))
actual <- sort(unique(temp_mloads_recent$SITE_ABB))
expect_equal(actual, expected)
})
test_that("Load data have the correct number of significant digits", {
result <- validate::check_that(temp_mloads,
count_sig_figs(temp_mloads$TONS_N/1E5) <= 3,
count_sig_figs(temp_mloads$TONS_L95_N/1E5) <= 3,
count_sig_figs(temp_mloads$TONS_U95_N/1E5) <= 3
)
expect_no_errors(result)
})
test_that("There are no duplicate values", {
mloads_without_ignored_modtypes <- subset(temp_mloads, !(MODTYPE %in% c('COMP', 'CONTIN')))
unique_columns <- mloads_without_ignored_modtypes[c('SITE_QW_ID', 'CONSTIT', 'WY')]
expect_no_duplicates(unique_columns)
})
|
# https://www.flickr.com/photos/stringrbelle/49258162383 (bright)
# https://www.flickr.com/photos/stringrbelle/49258162383 (flat)
#' @rdname rosemary
#' @export
wispy_heart <- function(dir = NULL, version = 0, ...) {
dir <- check_dir(dir)
wsp_hrt <- function(palette, file) {
jasmines::entity_heart(200) %>%
dplyr::mutate(x = x * 5, y = y * 5) %>%
jasmines::unfold_meander(iterations = 20, output1 = "space") %>%
jasmines::unfold_tempest(
seed = 100,
iterations = 300,
scale = .001
) %>%
dplyr::mutate(order = time) %>%
jasmines::style_ribbon(
palette = palette,
alpha_init = .8,
alpha_decay = .015,
) %>%
jasmines::export_image(file)
cat("image written to:", file, "\n")
}
if(version %in% c(0, 1)) {
wsp_hrt(
palette = grDevices::rainbow,
file = file.path(dir, "wispy_heart_bright.png")
)
}
if(version %in% c(0, 2)) {
wsp_hrt(
palette = jasmines::palette_manual("white"),
file = file.path(dir, "wispy_heart_flat.png")
)
}
return(invisible(NULL))
}
| /R/wispy_heart.R | permissive | enderakay/rosemary | R | false | false | 1,119 | r | # https://www.flickr.com/photos/stringrbelle/49258162383 (bright)
# https://www.flickr.com/photos/stringrbelle/49258162383 (flat)
#' @rdname rosemary
#' @export
wispy_heart <- function(dir = NULL, version = 0, ...) {
dir <- check_dir(dir)
wsp_hrt <- function(palette, file) {
jasmines::entity_heart(200) %>%
dplyr::mutate(x = x * 5, y = y * 5) %>%
jasmines::unfold_meander(iterations = 20, output1 = "space") %>%
jasmines::unfold_tempest(
seed = 100,
iterations = 300,
scale = .001
) %>%
dplyr::mutate(order = time) %>%
jasmines::style_ribbon(
palette = palette,
alpha_init = .8,
alpha_decay = .015,
) %>%
jasmines::export_image(file)
cat("image written to:", file, "\n")
}
if(version %in% c(0, 1)) {
wsp_hrt(
palette = grDevices::rainbow,
file = file.path(dir, "wispy_heart_bright.png")
)
}
if(version %in% c(0, 2)) {
wsp_hrt(
palette = jasmines::palette_manual("white"),
file = file.path(dir, "wispy_heart_flat.png")
)
}
return(invisible(NULL))
}
|
# Base R facet grid
# data needs to be in a long format
dat <- data.frame(
position = c(1,2,3,2,3,5,2,3,10),
score = c(450,220,330,333,423,988,333,423,988),
z = c('a','a','a','b','b','b','c','c','c') # grouping variable
)
facet_wrap <- function(data, x, y, z, horiz = TRUE, ...) {
# save current par settings and return after finished
op <- par(no.readonly = TRUE)
on.exit(par(op))
zz <- unique(data[, z])
# sets up the layout to cascade horizontally or vertically
# and sets xlim and ylim appropriately
if (horiz) {
par(mfrow = c(1, length(zz)), ...)
ylim <- range(data[, y])
xlim <- NULL
} else {
par(mfrow = c(length(zz), 1), ...)
xlim <- range(data[, x])
ylim <- NULL
}
# make a subset of data for each unique by variable
# and draw a basic plot for each one
for (ii in zz) {
tmp <- data[data[, z] %in% ii, ]
plot(tmp[, x], tmp[, y], xlim = xlim, ylim = ylim)
}
}
facet_wrap(dat, 'position', 'score', 'z', horiz = FALSE)
# ggplot ------------------------------------------------------------------------------------------
library("tidyverse")
nuclear_explosions <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-08-20/nuclear_explosions.csv")
# Basic count plot
nuclear_explosions %>%
mutate(region = fct_lump(region %>% as.factor, n = 15)) %>%
count(region) %>%
ggplot(aes(x = reorder(region, n), y = n)) +
geom_col() +
coord_flip()
# Count with color
nuclear_explosions %>%
group_by(type, country) %>%
summarise(count = n()) %>%
ungroup() %>%
mutate(type = fct_lump(type %>% as.factor, n = 5)) %>%
ggplot(aes(x = reorder(type, count), y = count, fill = country)) +
geom_bar(stat = "identity") +
scale_fill_brewer() +
coord_flip()
| /ggplot_snippets.R | no_license | Brent-Morrison/Misc_scripts | R | false | false | 1,816 | r | # Base R facet grid
# data needs to be in a long format
dat <- data.frame(
position = c(1,2,3,2,3,5,2,3,10),
score = c(450,220,330,333,423,988,333,423,988),
z = c('a','a','a','b','b','b','c','c','c') # grouping variable
)
facet_wrap <- function(data, x, y, z, horiz = TRUE, ...) {
# save current par settings and return after finished
op <- par(no.readonly = TRUE)
on.exit(par(op))
zz <- unique(data[, z])
# sets up the layout to cascade horizontally or vertically
# and sets xlim and ylim appropriately
if (horiz) {
par(mfrow = c(1, length(zz)), ...)
ylim <- range(data[, y])
xlim <- NULL
} else {
par(mfrow = c(length(zz), 1), ...)
xlim <- range(data[, x])
ylim <- NULL
}
# make a subset of data for each unique by variable
# and draw a basic plot for each one
for (ii in zz) {
tmp <- data[data[, z] %in% ii, ]
plot(tmp[, x], tmp[, y], xlim = xlim, ylim = ylim)
}
}
facet_wrap(dat, 'position', 'score', 'z', horiz = FALSE)
# ggplot ------------------------------------------------------------------------------------------
library("tidyverse")
nuclear_explosions <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-08-20/nuclear_explosions.csv")
# Basic count plot
nuclear_explosions %>%
mutate(region = fct_lump(region %>% as.factor, n = 15)) %>%
count(region) %>%
ggplot(aes(x = reorder(region, n), y = n)) +
geom_col() +
coord_flip()
# Count with color
nuclear_explosions %>%
group_by(type, country) %>%
summarise(count = n()) %>%
ungroup() %>%
mutate(type = fct_lump(type %>% as.factor, n = 5)) %>%
ggplot(aes(x = reorder(type, count), y = count, fill = country)) +
geom_bar(stat = "identity") +
scale_fill_brewer() +
coord_flip()
|
#### Regresjonsoppgaver til seminar 2 ####
#### Forberedelser ####
## Sett working directory, last ned data fra github, og lagre i wd.
setwd("")
load("Seminar2.RData")
## Laster inn pakker
library(stargazer)
library(ggplot2)
library(ggthemes)
#### Oppgave 2.1 ####
reg1 <- lm(tillit~skala10, data=tillit)
summary(reg1)
nobs(reg1)
stargazer(reg1, type="text") # Viser all output du trenger til denne oppgaven
#### Oppgave 2.2 ####
plot(tillit$skala10, tillit$tillit)
abline(reg1, col="red")
ggplot(tillit, aes(x = skala10, y = tillit)) +
geom_point() +
geom_smooth(method="lm") +
theme_bw()
#### Oppgave 2.3 ####
reg2 <- lm(tillit~skala10+utdanning, data=tillit)
summary(reg2)
#### Oppgave 2.4 ####
mean(tillit$skala10, na.rm=TRUE)
sd(tillit$skala10, na.rm=TRUE)
#### Oppgave 2.5 ####
tillit$sosstat.ms <- scale(tillit$skala10, center=TRUE, scale=FALSE)
summary(tillit$sosstat.ms)
sd(tillit$sosstat.ms, na.rm=TRUE)
#### Oppgave 2.6 ####
#1. Definerer ny variabel: tillit$utd3
#2. Når utdanning har verdi 1 skal den nye variabelen også ha verdi 1
#3. Når utdanning er større enn 1, men mindre enn 5 skal den nye variabelen ha verdi 2
#4. Når utdanning er større enn 4 skal den nye variabelen ha verdi 3
#5. Resten defineres som missing (NA)
attributes(tillit$utdanning)
tillit$utd3 <- ifelse(tillit$utdanning==1, 1, NA)
tillit$utd3 <- ifelse(tillit$utdanning>1 & tillit$utdanning<5, 2, tillit$utd3)
tillit$utd3 <- ifelse(tillit$utdanning>=5, 3, tillit$utd3)
# alt i en kode
tillit$utd3 <- ifelse(tillit$utdanning==1, 1,
ifelse(tillit$utdanning>1 & tillit$utdanning<5, 2,
ifelse(tillit$utdanning>=5, 3, NA)))
table(tillit$utd3, tillit$utdanning)
str(tillit)
#### Oppgave 2.7 ####
tillit$samspill.status.vgs <- tillit$skala10*ifelse(tillit$utd3 == 2, 1, 0)
tillit$samspill.status.uni <- tillit$skala10*ifelse(tillit$utd3 == 3, 1, 0)
#### Oppgave 2.8 ####
reg3 <- lm(tillit~skala10+as.factor(utd3)+samspill.status.vgs+samspill.status.uni,data=tillit)
summary(reg3)
## Alternativt: lag en faktor basert på utd3, og kjør sampill mellom denne og skala 10:
reg3b <- lm(tillit ~ skala10+as.factor(utd3)*skala10, data=tillit)
summary(reg3b)
stargazer(reg3, reg3b, type="text")
#### Oppgave 2.9 ####
save(tillit,
file = "Seminar2_ed.RData")
names(tillit)
summary(tillit$skala10)
## Lager datasett som bare har observasjoner brukt i reg3b:
reg3b_data <- tillit %>%
select(c("skala10", "utd3" , "tillit"))
reg3b_data <- reg3b_data %>%
filter(complete.cases(reg3b_data)==T)
### Løsning tilleggsopgave
data_for_prediction <- data.frame(skala10 = rep(seq(min(reg3b_data$skala10),
max(reg3b_data$skala10), .1),3),
utd3 = as.factor(c(rep(1, 91), rep(2, 91), rep(3, 91))))
## Trinn 3: Lager nytt datasett med predikerte verdier for avhengig variabel, og standardfeil:
predicted_data <- predict(reg3b, newdata = data_for_prediction,
se=TRUE)
## Trinn 4: Kombinerer data fra trinn 2 og 3:
plot_data <- cbind(predicted_data, data_for_prediction)
## Trinn 5: Kalkulerer konfidensintervall med standardfeil fra trinn 3 og legger til plot_data fra trinn 4. Her lager jeg 95% CI med vanlige standardfeil
plot_data$low <- plot_data$fit - 1.96*plot_data$se
plot_data$high <- plot_data$fit + 1.96*plot_data$se
## Trinn 6: Plot
p <- ggplot(reg3b_data, aes(x = skala10, y = tillit)) +
geom_rangeframe() +
ggtitle("Tillit") +
theme_tufte() +
scale_x_continuous(breaks = extended_range_breaks()(reg3b_data$skala10)) +
scale_y_continuous(breaks = extended_range_breaks()(reg3b_data$tillit)) +
ylab("Tillit") +
xlab("Sosial status") +
geom_point() +
geom_ribbon(data=plot_data, aes(y=fit, ymin=low, ymax=high, fill=utd3), alpha=.2) +
geom_line(data=plot_data, aes(y=fit, colour=utd3))
p
| /Materiell fra tidl semestre/h18/Gruppe 1/scripts/Seminar 2.R | no_license | liserodland/stv4020aR | R | false | false | 3,908 | r | #### Regresjonsoppgaver til seminar 2 ####
#### Forberedelser ####
## Sett working directory, last ned data fra github, og lagre i wd.
setwd("")
load("Seminar2.RData")
## Laster inn pakker
library(stargazer)
library(ggplot2)
library(ggthemes)
#### Oppgave 2.1 ####
reg1 <- lm(tillit~skala10, data=tillit)
summary(reg1)
nobs(reg1)
stargazer(reg1, type="text") # Viser all output du trenger til denne oppgaven
#### Oppgave 2.2 ####
plot(tillit$skala10, tillit$tillit)
abline(reg1, col="red")
ggplot(tillit, aes(x = skala10, y = tillit)) +
geom_point() +
geom_smooth(method="lm") +
theme_bw()
#### Oppgave 2.3 ####
reg2 <- lm(tillit~skala10+utdanning, data=tillit)
summary(reg2)
#### Oppgave 2.4 ####
mean(tillit$skala10, na.rm=TRUE)
sd(tillit$skala10, na.rm=TRUE)
#### Oppgave 2.5 ####
tillit$sosstat.ms <- scale(tillit$skala10, center=TRUE, scale=FALSE)
summary(tillit$sosstat.ms)
sd(tillit$sosstat.ms, na.rm=TRUE)
#### Oppgave 2.6 ####
#1. Definerer ny variabel: tillit$utd3
#2. Når utdanning har verdi 1 skal den nye variabelen også ha verdi 1
#3. Når utdanning er større enn 1, men mindre enn 5 skal den nye variabelen ha verdi 2
#4. Når utdanning er større enn 4 skal den nye variabelen ha verdi 3
#5. Resten defineres som missing (NA)
attributes(tillit$utdanning)
tillit$utd3 <- ifelse(tillit$utdanning==1, 1, NA)
tillit$utd3 <- ifelse(tillit$utdanning>1 & tillit$utdanning<5, 2, tillit$utd3)
tillit$utd3 <- ifelse(tillit$utdanning>=5, 3, tillit$utd3)
# alt i en kode
tillit$utd3 <- ifelse(tillit$utdanning==1, 1,
ifelse(tillit$utdanning>1 & tillit$utdanning<5, 2,
ifelse(tillit$utdanning>=5, 3, NA)))
table(tillit$utd3, tillit$utdanning)
str(tillit)
#### Oppgave 2.7 ####
tillit$samspill.status.vgs <- tillit$skala10*ifelse(tillit$utd3 == 2, 1, 0)
tillit$samspill.status.uni <- tillit$skala10*ifelse(tillit$utd3 == 3, 1, 0)
#### Oppgave 2.8 ####
reg3 <- lm(tillit~skala10+as.factor(utd3)+samspill.status.vgs+samspill.status.uni,data=tillit)
summary(reg3)
## Alternativt: lag en faktor basert på utd3, og kjør sampill mellom denne og skala 10:
reg3b <- lm(tillit ~ skala10+as.factor(utd3)*skala10, data=tillit)
summary(reg3b)
stargazer(reg3, reg3b, type="text")
#### Oppgave 2.9 ####
save(tillit,
file = "Seminar2_ed.RData")
names(tillit)
summary(tillit$skala10)
## Lager datasett som bare har observasjoner brukt i reg3b:
reg3b_data <- tillit %>%
select(c("skala10", "utd3" , "tillit"))
reg3b_data <- reg3b_data %>%
filter(complete.cases(reg3b_data)==T)
### Løsning tilleggsopgave
data_for_prediction <- data.frame(skala10 = rep(seq(min(reg3b_data$skala10),
max(reg3b_data$skala10), .1),3),
utd3 = as.factor(c(rep(1, 91), rep(2, 91), rep(3, 91))))
## Trinn 3: Lager nytt datasett med predikerte verdier for avhengig variabel, og standardfeil:
predicted_data <- predict(reg3b, newdata = data_for_prediction,
se=TRUE)
## Trinn 4: Kombinerer data fra trinn 2 og 3:
plot_data <- cbind(predicted_data, data_for_prediction)
## Trinn 5: Kalkulerer konfidensintervall med standardfeil fra trinn 3 og legger til plot_data fra trinn 4. Her lager jeg 95% CI med vanlige standardfeil
plot_data$low <- plot_data$fit - 1.96*plot_data$se
plot_data$high <- plot_data$fit + 1.96*plot_data$se
## Trinn 6: Plot
p <- ggplot(reg3b_data, aes(x = skala10, y = tillit)) +
geom_rangeframe() +
ggtitle("Tillit") +
theme_tufte() +
scale_x_continuous(breaks = extended_range_breaks()(reg3b_data$skala10)) +
scale_y_continuous(breaks = extended_range_breaks()(reg3b_data$tillit)) +
ylab("Tillit") +
xlab("Sosial status") +
geom_point() +
geom_ribbon(data=plot_data, aes(y=fit, ymin=low, ymax=high, fill=utd3), alpha=.2) +
geom_line(data=plot_data, aes(y=fit, colour=utd3))
p
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584291863748e+77, 9.70776725605997e+295, 2.10736587628522e+101, 5.78517196954163e+98, 2.02410200510026e-79, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 5L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) | /CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615772444-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 350 | r | testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584291863748e+77, 9.70776725605997e+295, 2.10736587628522e+101, 5.78517196954163e+98, 2.02410200510026e-79, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 5L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result) |
install.packages("plyr")
library(plyr)
install.packages("knitr")
library(knitr)
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/Dataset.zip",method="curl")
unzip(zipfile="./data/Dataset.zip",exdir="./data")
dataActivityTest <- read.table(file.path(path_rf, "test" , "Y_test.txt" ),header = FALSE)
dataActivityTrain <- read.table(file.path(path_rf, "train", "Y_train.txt"),header = FALSE)
dataSubjectTrain <- read.table(file.path(path_rf, "train", "subject_train.txt"),header = FALSE)
dataSubjectTest <- read.table(file.path(path_rf, "test" , "subject_test.txt"),header = FALSE)
dataFeaturesTest <- read.table(file.path(path_rf, "test" , "X_test.txt" ),header = FALSE)
dataFeaturesTrain <- read.table(file.path(path_rf, "train", "X_train.txt"),header = FALSE)
dataSubject <- rbind(dataSubjectTrain, dataSubjectTest)
dataActivity<- rbind(dataActivityTrain, dataActivityTest)
dataFeatures<- rbind(dataFeaturesTrain, dataFeaturesTest)
names(dataSubject)<-c("subject")
names(dataActivity)<- c("activity")
dataFeaturesNames <- read.table(file.path(path_rf, "features.txt"),head=FALSE)
names(dataFeatures)<- dataFeaturesNames$V2
dataCombine <- cbind(dataSubject, dataActivity)
Data <- cbind(dataFeatures, dataCombine)
subdataFeaturesNames<-dataFeaturesNames$V2[grep("mean\\(\\)|std\\(\\)", dataFeaturesNames$V2)]
selectedNames<-c(as.character(subdataFeaturesNames), "subject", "activity" )
Data<-subset(Data,select=selectedNames)
names(Data)<-gsub("^t", "Time_", names(Data))
names(Data)<-gsub("^f", "Frequency_", names(Data))
names(Data)<-gsub("Acc", "Accelerometer", names(Data))
names(Data)<-gsub("Gyro", "Gyroscope", names(Data))
names(Data)<-gsub("Mag", "_Magnitude", names(Data))
names(Data)<-gsub("Jerk", "_Jerk", names(Data))
names(Data)<-gsub("BodyBody", "Body_", names(Data))
names(Data)<-gsub("-std()", "_Std", names(Data))
names(Data)<-gsub("-mean()", "_Mean", names(Data))
names(Data)
Data2<-aggregate(. ~subject + activity, Data, mean)
Data2<-Data2[order(Data2$subject,Data2$activity),]
write.table(Data2, file = "tidydata.txt",row.name=FALSE) | /run_analysis.R | no_license | SH414/CourseraDataCleaning_Week4Project | R | false | false | 2,181 | r | install.packages("plyr")
library(plyr)
install.packages("knitr")
library(knitr)
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/Dataset.zip",method="curl")
unzip(zipfile="./data/Dataset.zip",exdir="./data")
dataActivityTest <- read.table(file.path(path_rf, "test" , "Y_test.txt" ),header = FALSE)
dataActivityTrain <- read.table(file.path(path_rf, "train", "Y_train.txt"),header = FALSE)
dataSubjectTrain <- read.table(file.path(path_rf, "train", "subject_train.txt"),header = FALSE)
dataSubjectTest <- read.table(file.path(path_rf, "test" , "subject_test.txt"),header = FALSE)
dataFeaturesTest <- read.table(file.path(path_rf, "test" , "X_test.txt" ),header = FALSE)
dataFeaturesTrain <- read.table(file.path(path_rf, "train", "X_train.txt"),header = FALSE)
dataSubject <- rbind(dataSubjectTrain, dataSubjectTest)
dataActivity<- rbind(dataActivityTrain, dataActivityTest)
dataFeatures<- rbind(dataFeaturesTrain, dataFeaturesTest)
names(dataSubject)<-c("subject")
names(dataActivity)<- c("activity")
dataFeaturesNames <- read.table(file.path(path_rf, "features.txt"),head=FALSE)
names(dataFeatures)<- dataFeaturesNames$V2
dataCombine <- cbind(dataSubject, dataActivity)
Data <- cbind(dataFeatures, dataCombine)
subdataFeaturesNames<-dataFeaturesNames$V2[grep("mean\\(\\)|std\\(\\)", dataFeaturesNames$V2)]
selectedNames<-c(as.character(subdataFeaturesNames), "subject", "activity" )
Data<-subset(Data,select=selectedNames)
names(Data)<-gsub("^t", "Time_", names(Data))
names(Data)<-gsub("^f", "Frequency_", names(Data))
names(Data)<-gsub("Acc", "Accelerometer", names(Data))
names(Data)<-gsub("Gyro", "Gyroscope", names(Data))
names(Data)<-gsub("Mag", "_Magnitude", names(Data))
names(Data)<-gsub("Jerk", "_Jerk", names(Data))
names(Data)<-gsub("BodyBody", "Body_", names(Data))
names(Data)<-gsub("-std()", "_Std", names(Data))
names(Data)<-gsub("-mean()", "_Mean", names(Data))
names(Data)
Data2<-aggregate(. ~subject + activity, Data, mean)
Data2<-Data2[order(Data2$subject,Data2$activity),]
write.table(Data2, file = "tidydata.txt",row.name=FALSE) |
####**********************************************************************
####**********************************************************************
####
#### ----------------------------------------------------------------
#### Written by:
#### John Ehrlinger, Ph.D.
#### Assistant Staff
#### Dept of Quantitative Health Sciences
#### Learner Research Institute
#### Cleveland Clinic Foundation
####
#### email: john.ehrlinger@gmail.com
#### URL: https://github.com/ehrlinger/ggRandomForests
#### ----------------------------------------------------------------
####
####**********************************************************************
####**********************************************************************
#'
#' randomForestSRC error rate data object
#'
#' Extract the cumulative (OOB) randomForestSRC error rate as a function of
#' number of trees.
#'
#' @details The gg_error function simply returns the rfsrc$err.rate object as
#' a data.frame, and assigns the class for connecting to the \code{\link{plot.gg_error}}
#' function.
#'
#' @param object randomForestSRC object.
#' @param ... optional arguments (not used).
#'
#' @return gg_error data.frame with one column indicating the tree number,
#' and the remaining columns from the rfsrc$err.rate return value.
#'
#' @export gg_error.ggRandomForests gg_error
#'
#' @seealso \code{\link{plot.gg_error}} \code{rfsrc} \code{plot.rfsrc}
#'
#' @references
#' Breiman L. (2001). Random forests, Machine Learning, 45:5-32.
#'
#' Ishwaran H. and Kogalur U.B. (2007). Random survival forests for R, Rnews, 7(2):25-31.
#'
#' Ishwaran H. and Kogalur U.B. (2013). Random Forests for Survival, Regression
#' and Classification (RF-SRC), R package version 1.4.
#'
#' @aliases gg_error gg_error.ggRandomForests
#'
#' @examples
#' ## Examples from RFSRC package...
#' ## ------------------------------------------------------------
#' ## classification example
#' ## ------------------------------------------------------------
#' ## You can build a randomForest
#' # iris_rf <- rfsrc(Species ~ ., data = iris)
#' # ... or load a cached randomForestSRC object
#' data(iris_rf, package="ggRandomForests")
#'
#' # Get a data.frame containing error rates
#' ggrf.obj<- gg_error(iris_rf)
#'
#' # Plot the gg_error object
#' plot(ggrf.obj)
#'
#' ## ------------------------------------------------------------
#' ## Regression example
#' ## ------------------------------------------------------------
#' # airq_rf <- rfsrc(Ozone ~ ., data = airquality, na.action = "na.impute")
#' # ... or load a cached randomForestSRC object
#' data(airq_rf, package="ggRandomForests")
#'
#' # Get a data.frame containing error rates
#' ggrf.obj<- gg_error(airq_rf)
#'
#' # Plot the gg_error object
#' plot(ggrf.obj)
#'
#' ## ------------------------------------------------------------
#' ## Survival example
#' ## ------------------------------------------------------------
#' ## veteran data
#' ## randomized trial of two treatment regimens for lung cancer
#' # data(veteran, package = "randomForestSRC")
#' # veteran_rf <- rfsrc(Surv(time, status) ~ ., data = veteran, ntree = 100)
#'
#' # Load a cached randomForestSRC object
#' data(veteran_rf, package="ggRandomForests")
#'
#' ggrf.obj <- gg_error(veteran_rf)
#' plot(ggrf.obj)
#'
#'
### error rate plot
gg_error.ggRandomForests <- function(object, ...) {
## Check that the input obect is of the correct type.
if (inherits(object, "rfsrc") == FALSE){
stop("This function only works for Forests grown with the randomForestSRC package.")
}
if (is.null(object$err.rate)) {
stop("Performance values are not available for this forest.")
}
error <- data.frame(object$err.rate)
if(is.null(dim(error))){
error<- data.frame(error=cbind(error))
}
if("object.err.rate" %in% colnames(error))
colnames(error)[which(colnames(error)=="object.err.rate")] <- "error"
error$ntree <- 1:dim(error)[1]
class(error) <- c("gg_error",class(error))
invisible(error)
}
gg_error <- gg_error.ggRandomForests
| /R/gg_error.ggRandomForests.R | no_license | RmeanyMAN/ggRandomForests | R | false | false | 4,080 | r | ####**********************************************************************
####**********************************************************************
####
#### ----------------------------------------------------------------
#### Written by:
#### John Ehrlinger, Ph.D.
#### Assistant Staff
#### Dept of Quantitative Health Sciences
#### Learner Research Institute
#### Cleveland Clinic Foundation
####
#### email: john.ehrlinger@gmail.com
#### URL: https://github.com/ehrlinger/ggRandomForests
#### ----------------------------------------------------------------
####
####**********************************************************************
####**********************************************************************
#'
#' randomForestSRC error rate data object
#'
#' Extract the cumulative (OOB) randomForestSRC error rate as a function of
#' number of trees.
#'
#' @details The gg_error function simply returns the rfsrc$err.rate object as
#' a data.frame, and assigns the class for connecting to the \code{\link{plot.gg_error}}
#' function.
#'
#' @param object randomForestSRC object.
#' @param ... optional arguments (not used).
#'
#' @return gg_error data.frame with one column indicating the tree number,
#' and the remaining columns from the rfsrc$err.rate return value.
#'
#' @export gg_error.ggRandomForests gg_error
#'
#' @seealso \code{\link{plot.gg_error}} \code{rfsrc} \code{plot.rfsrc}
#'
#' @references
#' Breiman L. (2001). Random forests, Machine Learning, 45:5-32.
#'
#' Ishwaran H. and Kogalur U.B. (2007). Random survival forests for R, Rnews, 7(2):25-31.
#'
#' Ishwaran H. and Kogalur U.B. (2013). Random Forests for Survival, Regression
#' and Classification (RF-SRC), R package version 1.4.
#'
#' @aliases gg_error gg_error.ggRandomForests
#'
#' @examples
#' ## Examples from RFSRC package...
#' ## ------------------------------------------------------------
#' ## classification example
#' ## ------------------------------------------------------------
#' ## You can build a randomForest
#' # iris_rf <- rfsrc(Species ~ ., data = iris)
#' # ... or load a cached randomForestSRC object
#' data(iris_rf, package="ggRandomForests")
#'
#' # Get a data.frame containing error rates
#' ggrf.obj<- gg_error(iris_rf)
#'
#' # Plot the gg_error object
#' plot(ggrf.obj)
#'
#' ## ------------------------------------------------------------
#' ## Regression example
#' ## ------------------------------------------------------------
#' # airq_rf <- rfsrc(Ozone ~ ., data = airquality, na.action = "na.impute")
#' # ... or load a cached randomForestSRC object
#' data(airq_rf, package="ggRandomForests")
#'
#' # Get a data.frame containing error rates
#' ggrf.obj<- gg_error(airq_rf)
#'
#' # Plot the gg_error object
#' plot(ggrf.obj)
#'
#' ## ------------------------------------------------------------
#' ## Survival example
#' ## ------------------------------------------------------------
#' ## veteran data
#' ## randomized trial of two treatment regimens for lung cancer
#' # data(veteran, package = "randomForestSRC")
#' # veteran_rf <- rfsrc(Surv(time, status) ~ ., data = veteran, ntree = 100)
#'
#' # Load a cached randomForestSRC object
#' data(veteran_rf, package="ggRandomForests")
#'
#' ggrf.obj <- gg_error(veteran_rf)
#' plot(ggrf.obj)
#'
#'
### error rate plot
gg_error.ggRandomForests <- function(object, ...) {
## Check that the input obect is of the correct type.
if (inherits(object, "rfsrc") == FALSE){
stop("This function only works for Forests grown with the randomForestSRC package.")
}
if (is.null(object$err.rate)) {
stop("Performance values are not available for this forest.")
}
error <- data.frame(object$err.rate)
if(is.null(dim(error))){
error<- data.frame(error=cbind(error))
}
if("object.err.rate" %in% colnames(error))
colnames(error)[which(colnames(error)=="object.err.rate")] <- "error"
error$ntree <- 1:dim(error)[1]
class(error) <- c("gg_error",class(error))
invisible(error)
}
gg_error <- gg_error.ggRandomForests
|
library(leaflet)
library(ggplot2)
library(lubridate)
library(shiny)
library(shinydashboard)
library(maps)
library(reshape2)
library(DT)
library(scales)
library(plyr)
library(maps)
library(plotly)
library(RColorBrewer)
tornadoes <- read.csv("tornadoes.csv")
magnitudes <-c("-9", "0", "1", "2", "3", "4", "5")
hours <- hour(strptime(tornadoes$time, "%H:%M:%S"))
# Maybe add in Thunderforest.SpinalMap for fun....
provider_tiles <- c("Stamen Toner", "Open Topo Map", "Thunderforest Landscape", "Esri World Imagery", "Stamen Watercolor")
############################## Some of Jasons data ##########################################
counties_names <- read.csv("counties.csv")
IL_Code <- 17
# Get IL tornadoes from file
illinois_tornadoes <- subset(tornadoes, stf == IL_Code)
#combine all the tornadoes from the f1-f4 code counts excluding the 0th county
illinois_counties <- as.data.frame(table(a = c(illinois_tornadoes[,"f1"], illinois_tornadoes[,"f2"], illinois_tornadoes[,"f3"], illinois_tornadoes[,"f4"])))
illinois_counties <- illinois_counties[-c(1), ]
names(illinois_counties) <- c("Code", "Frequency")
countyInfo <- data.frame(County=counties_names$County, Frequency= illinois_counties$Frequency)
############################################
#sorting by largest magnitude tornadoes
magnitude_sorted <- illinois_tornadoes[order(-illinois_tornadoes[,11]),]
magnitude_sorted10 <- head(magnitude_sorted,10)
injuries_sorted <- illinois_tornadoes[order(-illinois_tornadoes[,12]),]
injuries_sorted10 <- head(injuries_sorted,10)
fatalities_sorted <- illinois_tornadoes[order(-illinois_tornadoes[,13]),]
fatalities_sorted10 <-head(fatalities_sorted, 10)
#dania's code
fips <- read.csv(file="US_FIPS_Codes.csv", header=TRUE, sep=",")
fipsIllinois <- subset(fips, State == "Illinois")
illinois <- map_data("county") %>%
filter(region == 'illinois')
tornadoesIL <- subset(tornadoes, st == "IL")
tornadoesIL$hr <- as.POSIXlt(tornadoesIL$time, format="%H:%M")$hour
tornadoesIL$countyNamef1 <- fipsIllinois$County.Name[match(tornadoesIL$f1, fipsIllinois$FIPS.County)]
tornadoesIL$countyNamef1 <- tolower(tornadoesIL$countyNamef1)
tornadoesIL$countyNamef2 <- fipsIllinois$County.Name[match(tornadoesIL$f2, fipsIllinois$FIPS.County)]
tornadoesIL$countyNamef2 <- tolower(tornadoesIL$countyNamef2)
tornadoesIL$countyNamef3 <- fipsIllinois$County.Name[match(tornadoesIL$f3, fipsIllinois$FIPS.County)]
tornadoesIL$countyNamef3 <- tolower(tornadoesIL$countyNamef3)
tornadoesIL$countyNamef4 <- fipsIllinois$County.Name[match(tornadoesIL$f4, fipsIllinois$FIPS.County)]
tornadoesIL$countyNamef4 <- tolower(tornadoesIL$countyNamef4)
#Changing the data for loss column of tornadoes to be categorical 0-7
tornadoesIL$loss <- ifelse(tornadoesIL$yr >= 2016,
(ifelse(tornadoesIL$loss > 0 & tornadoesIL$loss < 5000,1,
(ifelse(tornadoesIL$loss >= 5000 & tornadoesIL$loss < 50000,2,
(ifelse(tornadoesIL$loss >= 50000 & tornadoesIL$loss < 500000,3,
(ifelse(tornadoesIL$loss >= 500000 & tornadoesIL$loss < 5000000,4,
(ifelse(tornadoesIL$loss >= 5000000 & tornadoesIL$loss < 50000000,5,
(ifelse(tornadoesIL$loss >= 50000000 & tornadoesIL$loss < 50000000,6,
(ifelse(tornadoesIL$loss >= 50000000,7 , 0)))))))))))))),
ifelse(tornadoesIL$yr >= 1996,
(ifelse(tornadoesIL$loss > 0 & tornadoesIL$loss < 0.005, 1,
(ifelse(tornadoesIL$loss >= 0.005 & tornadoesIL$loss < 0.05,2,
(ifelse(tornadoesIL$loss >= 0.05 & tornadoesIL$loss < 0.5,3,
(ifelse(tornadoesIL$loss >= 0.5 & tornadoesIL$loss < 5,4,
(ifelse(tornadoesIL$loss >= 5 & tornadoesIL$loss < 50,5,
(ifelse(tornadoesIL$loss >= 50 & tornadoesIL$loss < 500,6,
(ifelse(tornadoesIL$loss >= 500,7 , 0)))))))))))))),
(ifelse(tornadoesIL$loss <= 3,
(ifelse(tornadoesIL$loss > 0, 1, 0)), tornadoesIL$loss - 2))))
#helper function to calculate the amount of tornadoes that had a certain loss range
countLoss <- function(loss){
ifelse(loss != 0, loss/loss, 1)
}
#get the yearly, monthly, and hourly loss tables
yearlyloss <- tornadoesIL %>%
group_by(yr, loss) %>%
summarise(yrloss= sum(countLoss(loss)))
#to present categorically in the legend
yearlyloss$loss[yearlyloss$loss == 0] <- "0:Unknown"
yearlyloss$loss[yearlyloss$loss == 1] <- "1:Between 0 and 5,000"
yearlyloss$loss[yearlyloss$loss == 2] <- "2:Between 5,000 and 50,000"
yearlyloss$loss[yearlyloss$loss == 3] <- "3:Between 50,000 and 500,000"
yearlyloss$loss[yearlyloss$loss == 4] <- "4:Between 500,000 and 5,000,000"
yearlyloss$loss[yearlyloss$loss == 5] <- "5:Between 5,000,000 and 50,000,000"
yearlyloss$loss[yearlyloss$loss == 6] <- "6:Between 50,000,000 and 500,000,000"
yearlyloss$loss[yearlyloss$loss == 7] <- "7:Greater than 500,000,000"
monthlyloss <- tornadoesIL %>%
group_by(mo, loss) %>%
summarise(moloss= sum(countLoss(loss)))
monthlyloss$loss[monthlyloss$loss == 0] <- "0:Unknown"
monthlyloss$loss[monthlyloss$loss == 1] <- "1:Between 0 and 5,000"
monthlyloss$loss[monthlyloss$loss == 2] <- "2:Between 5,000 and 50,000"
monthlyloss$loss[monthlyloss$loss == 3] <- "3:Between 50,000 and 500,000"
monthlyloss$loss[monthlyloss$loss == 4] <- "4:Between 500,000 and 5,000,000"
monthlyloss$loss[monthlyloss$loss == 5] <- "5:Between 5,000,000 and 50,000,000"
monthlyloss$loss[monthlyloss$loss == 6] <- "6:Between 50,000,000 and 500,000,000"
monthlyloss$loss[monthlyloss$loss == 7] <- "7:Greater than 500,000,000"
hourlyloss <- tornadoesIL %>%
group_by(hr, loss) %>%
summarise(hrloss=sum(countLoss(loss)))
hourlyloss$loss[hourlyloss$loss == 0] <- "0:Unknown"
hourlyloss$loss[hourlyloss$loss == 1] <- "1:Between 0 and 5,000"
hourlyloss$loss[hourlyloss$loss == 2] <- "2:Between 5,000 and 50,000"
hourlyloss$loss[hourlyloss$loss == 3] <- "3:Between 50,000 and 500,000"
hourlyloss$loss[hourlyloss$loss == 4] <- "4:Between 500,000 and 5,000,000"
hourlyloss$loss[hourlyloss$loss == 5] <- "5:Between 5,000,000 and 50,000,000"
hourlyloss$loss[hourlyloss$loss == 6] <- "6:Between 50,000,000 and 500,000,000"
hourlyloss$loss[hourlyloss$loss == 7] <- "7:Greater than 500,000,000"
tornadoesWithFips1 <- merge(illinois, tornadoesIL, by.x = "subregion", by.y = "countyNamef1")
tornadoesWithFips2 <- merge(illinois, tornadoesIL, by.x = "subregion", by.y = "countyNamef2")
tornadoesWithFips3 <- merge(illinois, tornadoesIL, by.x = "subregion", by.y = "countyNamef3")
tornadoesWithFips4 <- merge(illinois, tornadoesIL, by.x = "subregion", by.y = "countyNamef4")
#part A bullet 1 code
# --------------------------- PART A BULLET POINTS -----------------------------
# 1.) allow user to see data (injuries, fatalities, loss, total number of
# tornadoes of each magnitude) on a per county basis for all the Illinois
# counties on the map
countyInj <- aggregate(inj ~ subregion + lat + long + group + order, tornadoesWithFips1, sum)
countyInj2 <- aggregate(inj ~ subregion + lat + long + group + order, tornadoesWithFips2, sum)
names(countyInj2)[names(countyInj2) == "inj"] = "inj2"
countyInj2 <- countyInj2[ , -which(names(countyInj2) %in% c("subregion", "lat", "long", "group"))]
countyInj3 <- aggregate(inj ~ subregion + lat + long + group + order, tornadoesWithFips3, sum)
names(countyInj3)[names(countyInj3) == "inj"] = "inj3"
countyInj3 <- countyInj3[ , -which(names(countyInj3) %in% c("subregion", "lat", "long", "group"))]
countyInj4 <- aggregate(inj ~ subregion + lat + long + group + order, tornadoesWithFips4, sum)
names(countyInj4)[names(countyInj4) == "inj"] = "inj4"
countyInj4 <- countyInj4[ , -which(names(countyInj4) %in% c("subregion", "lat", "long", "group"))]
countyInj <- merge(x = countyInj, y = countyInj2, by = "order", all.x=TRUE)
countyInj <- merge(x = countyInj, y = countyInj3, by = "order", all.x=TRUE)
countyInj <- merge(x = countyInj, y = countyInj4, by = "order", all.x=TRUE)
countyInj$inj2[is.na(countyInj$inj2)] <- 0
countyInj$inj3[is.na(countyInj$inj3)] <- 0
countyInj$inj4[is.na(countyInj$inj4)] <- 0
countyInj$inj <- countyInj$inj + countyInj$inj2 + countyInj$inj3 + countyInj$inj4
countyInj<- countyInj[order(countyInj$order),]
names(countyInj)[names(countyInj) == "inj"] = "Injury"
countyInjuriesMap <- ggplot(countyInj, aes(x = countyInj$long, y = countyInj$lat, group = group, fill = Injury, text = paste('County: ', countyInj$subregion))) + geom_polygon(color='black') +
theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black")) +
scale_fill_gradient(high = "#132B43", low = "#56B1F7") +
theme(plot.title = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank())
#ggplotly(countyInjuriesMap)
#county deaths
countyDeaths <- aggregate(fat ~ subregion + lat + long + group + order, tornadoesWithFips1, sum)
countyDeaths2 <- aggregate(fat ~ subregion + lat + long + group + order, tornadoesWithFips2, sum)
names(countyDeaths2)[names(countyDeaths2) == "fat"] = "fat2"
countyDeaths2 <- countyDeaths2[ , -which(names(countyDeaths2) %in% c("subregion", "lat", "long", "group"))]
countyDeaths3 <- aggregate(fat ~ subregion + lat + long + group + order, tornadoesWithFips3, sum)
names(countyDeaths3)[names(countyDeaths3) == "fat"] = "fat3"
countyDeaths3 <- countyDeaths3[ , -which(names(countyDeaths3) %in% c("subregion", "lat", "long", "group"))]
countyDeaths4 <- aggregate(fat ~ subregion + lat + long + group + order, tornadoesWithFips4, sum)
names(countyDeaths4)[names(countyDeaths4) == "fat"] = "fat4"
countyDeaths4 <- countyDeaths4[ , -which(names(countyDeaths4) %in% c("subregion", "lat", "long", "group"))]
countyDeaths <- merge(x = countyDeaths, y = countyDeaths2, by = "order", all.x=TRUE)
countyDeaths <- merge(x = countyDeaths, y = countyDeaths3, by = "order", all.x=TRUE)
countyDeaths <- merge(x = countyDeaths, y = countyDeaths4, by = "order", all.x=TRUE)
countyDeaths$fat2[is.na(countyDeaths$fat2)] <- 0
countyDeaths$fat3[is.na(countyDeaths$fat3)] <- 0
countyDeaths$fat4[is.na(countyDeaths$fat4)] <- 0
countyDeaths$fat <- countyDeaths$fat + countyDeaths$fat2 + countyDeaths$fat3 + countyDeaths$fat4
countyDeaths <- countyDeaths[order(countyDeaths$order),]
names(countyDeaths)[names(countyDeaths) == "fat"] = "Fatalities"
countyDeathsMap <- ggplot(countyDeaths, aes(x = countyDeaths$long, y = countyDeaths$lat, group = group, fill = Fatalities, text = paste('County: ', countyDeaths$subregion))) + geom_polygon(color='black') +
theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black")) +
scale_fill_gradient(high = "#132B43", low = "#56B1F7") +
theme(plot.title = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank())
#ggplotly(countyDeathsMap)
#county loss
countyLoss <- tornadoesWithFips1
countyLoss2 <- tornadoesWithFips2
names(countyLoss2)[names(countyLoss2) == "loss"] = "loss2"
countyLoss2 <- countyLoss2[,c("order","loss2")]
countyLoss3 <- tornadoesWithFips3
names(countyLoss3)[names(countyLoss3) == "loss"] = "loss3"
countyLoss3 <- countyLoss3[,c("order","loss3")]
countyLoss4 <- tornadoesWithFips4
names(countyLoss4)[names(countyLoss4) == "loss"] = "loss4"
countyLoss4 <- countyLoss4[,c("order","loss4")]
countyLoss <- merge(x = countyLoss, y = countyLoss2, by = "order", all.x=TRUE)
countyLoss <- merge(x = countyLoss, y = countyLoss3, by = "order", all.x=TRUE)
countyLoss <- merge(x = countyLoss, y = countyLoss4, by = "order", all.x=TRUE)
countyLoss$loss2[is.na(countyLoss$loss2)] <- 0
countyLoss$loss3[is.na(countyLoss$loss3)] <- 0
countyLoss$loss4[is.na(countyLoss$loss4)] <- 0
countyLoss$loss <- pmax(countyLoss$loss, countyLoss$loss2, countyLoss$loss3, countyLoss$loss4)
countyLoss <- countyLoss[order(countyLoss$order),]
countyLoss$loss[countyLoss$loss == 0] <- "0: Unknown"
countyLoss$loss[countyLoss$loss == 1] <- "1: Between 0 and 5,000"
countyLoss$loss[countyLoss$loss == 2] <- "2: Between 5,000 and 50,000"
countyLoss$loss[countyLoss$loss == 3] <- "3: Between 50,000 and 500,000"
countyLoss$loss[countyLoss$loss == 4] <- "4: Between 500,000 and 5,000,000"
countyLoss$loss[countyLoss$loss == 5] <- "5: Between 5,000,000 and 50,000,000"
countyLoss$loss[countyLoss$loss == 6] <- "6: Between 50,000,000 and 500,000,000"
countyLoss$loss[countyLoss$loss == 7] <- "7: Greater than 500,000,000"
names(countyLoss)[names(countyLoss) == "loss"] = "Losses"
countyLossMap <- ggplot(countyLoss, aes(x = countyLoss$long, y = countyLoss$lat, group = group, fill = Losses, text = paste('County: ', countyLoss$subregion))) + geom_polygon(color='black') +
theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black")) +
scale_fill_brewer(palette="Blues")+ theme(
plot.title = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank())
#ggplotly(countyLossMap)
#graphing colors and functions
blueColorLight <- "#67b8df"
blueColorDark <- "#3d6e85"
redColorLight <- "#ec5757"
redColorDark <- "#762b2b"
dynamic_bar_graph <- function(data,x_axis, y_axis,x_label, y_label, title){
plot_ly(data, x = x_axis, y = y_axis, type = 'bar', color=I(redColorLight),
hoverinfo='text', text = ~paste('Total: ', y_axis)) %>%
layout(title = title,
xaxis = list(title = x_label,dtick=1,tickangle=45),
yaxis = list(title = y_label))
}
dynamic_bar_graph_grouped <- function(data, x_axis, y_axis1, label1, y_axis2, label2,
x_axis_label, y_axis_label, title, legend_title = "Legend"){
plot_ly(data, x = x_axis, y = y_axis1, type = 'bar', name = label1, marker = list(color = redColorLight),
hoverinfo='text', text = ~paste('Total Injuries: ', y_axis1,
'<br> Total Fatalities', y_axis2)) %>%
add_trace(data=data, x = x_axis, y = y_axis2, name = label2, marker = list(color = redColorDark)) %>%
layout(xaxis = list(title = x_axis_label, dtick=1, tickangle=45),
yaxis = list(title = y_axis_label),
title = title,
margin = list(b = 100),
barmode = 'group')
}
dynamic_bar_graph_stacked <- function(data, x_axis, y_axis,group, label,
x_axis_label, y_axis_label, title, legend_title = "Legend"){
plot_ly(data, x = x_axis, y = y_axis, type = 'bar', name = label, color= group, name = group,colors = 'Reds',
legendgroup = ~group,
hoverinfo = 'text',
text = ~paste(x_axis,
'<br> Number of Tornadoes: ', y_axis,
'<br> Loss Category: ', group)) %>%
layout(xaxis = list(title = x_axis_label, dtick=1, tickangle=45),
yaxis = list(title = y_axis_label),
title = title,
margin = list(b = 100),
barmode = 'stack')
}
#tables and plots
getTornadoInjFatPerYearTable <- function(tornadoesIL){
tornadoData <- aggregate(tornadoesIL[,12:13],by=list(tornadoesIL$yr), FUN=sum)
names(tornadoData)[1:3] <- c("Year","Injuries","Fatalities")
tornadoData
}
getTornadoLossPerYearTable <- function(yearlyloss){
tornadoData <- yearlyloss
names(tornadoData)[1:3] <- c("Year", "Loss Category", "Number of Tornadoes")
tornadoData
}
getTornadoInjFatPerMonthTable <- function(tornadoesIL){
tornadoData <- aggregate(tornadoesIL[,12:13],by=list(tornadoesIL$mo), FUN=sum)
names(tornadoData)[1:3] <- c("Month","Injuries","Fatalities")
tornadoData
}
getTornadoLossPerMonthTable <- function(monthlyloss){
tornadoData <- monthlyloss
names(tornadoData)[1:3] <- c("Month", "Loss Category", "Number of Tornadoes")
tornadoData
}
getTornadoInjFatPerHourTable <- function(tornadoesIL){
tornadoData <- aggregate(tornadoesIL[,12:13],by=list(tornadoesIL$hr), FUN=sum)
names(tornadoData)[1:3] <- c("Hour","Injuries","Fatalities")
tornadoData
}
getTornadoLossPerHourTable <- function(hourlyloss){
tornadoData <- hourlyloss
names(tornadoData)[1:3] <- c("Hour", "Loss Category", "Number of Tornadoes")
tornadoData
}
ui <- dashboardPage(skin="black",
dashboardHeader(title = "You Spin me Round"),
dashboardSidebar(
sidebarMenu(
menuItem("About", tabName = "About"),
menuItem("Tornadoes", tabName="Tornadoes",
menuSubItem("Year",
tabName="Year",
icon=icon("line-chart")),
menuSubItem("Month",
tabName="Month",
icon=icon("calendar")),
menuSubItem("Hour",
tabName="Hour",
icon=icon("hourglass")),
menuSubItem("Distance",
tabName="Distance",
icon=icon("plane"))
),
menuItem("Damages", tabName="Damages",
menuSubItem("Year",
tabName="YearDamages",
icon=icon("line-chart")),
menuSubItem("Month",
tabName="MonthDamages",
icon=icon("calendar")),
menuSubItem("Hour",
tabName="HourDamages",
icon=icon("hourglass")),
menuSubItem("County",
tabName="CountyDamages",
icon=icon("map"))
),
menuItem("Illinois", tabName="Illinois"),
menuItem("TestLeaf", tabName = "TestLeaf"),
menuItem("Heatmap", tabName="Heatmap")
)
),
dashboardBody(
tabItems(
tabItem(tabName = "About",
h1(style = "font-size: 300%","Project 3: You Spin me Round"),
h4(style = "font-size: 100%","by: Daria Azhari, Nigel Flower, Jason Guo, Ryan Nishimoto"),
h4(style = "font-size: 150%",a(href = "https://sites.google.com/uic.edu/nishimo1/cs424/project03", "Project Website")),
h2(style = "font-size: 200%","CS 424: Visualization and Visual Analytics"),
h4(style = "font-size: 150%",a(href = "https://www.evl.uic.edu/aej/424/", "Course website"))
),
tabItem(tabName="Year",
fluidRow(
box(title="Tornado Magnitudes by Year",
plotOutput("year_magnitude"), width=12)
),
fluidRow(
box(title="Percentage of Magnitudes by Year",
plotOutput("year_magnitude_percentage"), width=12)
)
),
tabItem(tabName="Month",
fluidRow(
box(title="Tornado Magnitudes by Month",
plotOutput("month_magnitude"), width=12)
),
fluidRow(
box(title="Percentage of Magnitudes by Month",
plotOutput("month_magnitude_percentage"), width=12)
)
),
tabItem(tabName="Hour",
fluidRow(
radioButtons("hour_radio", h4("Time Selection"),
choices=list("24 Hours" = 1, "AM/PM" = 2),
selected=1),
box(title="Tornado Magnitudes by Hour",
plotOutput("hour_magnitude"), width=12)
),
fluidRow(
box(title="Percentage of Magnitudes by Hour",
plotOutput("hour_magnitude_percentage"), width=12)
)
),
tabItem(tabName="Distance",
fluidRow(
box(title="Tornado Magnitude by Distance",
plotOutput("distance_magnitude"), width=12)
),
fluidRow(
box(title="Percentage of Magnitudes by Distance",
plotOutput("distance_magnitude_percentage"), width=12)
),
fluidRow(
box(title = "Distance of Tornado in Miles",
sliderInput("slider", "Number of observations:", 0, 234, c(0, 100))
)
)
),
tabItem(tabName="YearDamages",
fluidRow(
box(title = "Tornado Injuries Per Year in Illinois", solidHeader = TRUE, status = "primary", width = 6,
dataTableOutput("yearInjFatTable")),
box(title = "Tornado Loss Per Year in Illinois", solidHeader = TRUE, status = "primary", width = 6,
dataTableOutput("yearLossTable"))
),
fluidRow(
box(title="Tornado Injuries and Fatalities Per Year",
plotlyOutput("yearInjFatPlot"), width=12)
),
fluidRow(
box(title="Tornado Monetary Loss Range Per Year",
plotlyOutput("yearLossPlot"), width=12)
)
),
tabItem(tabName="MonthDamages",
fluidRow(
box(title = "Tornado Injuries Per Month in Illinois", solidHeader = TRUE, status = "primary", width = 6,
dataTableOutput("monthInjFatTable")),
box(title = "Tornado Loss Per Month in Illinois", solidHeader = TRUE, status = "primary", width = 6,
dataTableOutput("monthLossTable"))
),
fluidRow(
box(title="Tornado Injuries and Fatalities Per Month",
plotlyOutput("monthInjFatPlot"), width=12)
),
fluidRow(
box(title="Tornado Monetary Loss Range Per Month",
plotlyOutput("monthLossPlot"), width=12)
)
),
tabItem(tabName="HourDamages",
fluidRow(
radioButtons("hour_damages_radio", h4("Time Selection"),
choices=list("24 Hours" = 1, "AM/PM" = 2),
selected=2),
box(title = "Tornado Injuries Per Hour in Illinois", solidHeader = TRUE, status = "primary", width = 6,
dataTableOutput("hourInjFatTable")),
box(title = "Tornado Loss Per Hour in Illinois", solidHeader = TRUE, status = "primary", width = 6,
dataTableOutput("hourLossTable"))
),
fluidRow(
box(title="Tornado Injuries and Fatalities Per Hour",
plotlyOutput("hourInjFatPlot"), width=12)
),
fluidRow(
box(title="Tornado Monetary Loss Range Per Hour",
plotlyOutput("hourLossPlot"), width=12)
)
),
tabItem(tabName="CountyDamages",
fluidRow(
box(title="Illinois Injuries Per County",
plotlyOutput("injuryCountyPlot", height = "1500px"), width=4),
box(title="Illinois Fatalities Per County",
plotlyOutput("fatalityCountyPlot", height = "1500px"), width=4),
box(title="Illinois Loss Per County",
plotlyOutput("lossCountyPlot", height = "1500px"), width=4)
)
),
tabItem(tabName="Illinois",
fluidRow(
box(title = "Tornado County Table", solidHeader = TRUE, status = "primary", width = 12,
dataTableOutput("countyTable"))
),
fluidRow(
box(title = "Tornado Counties Graph", solidHeader = TRUE, status = "primary", width = 12,
plotOutput("countyChart"))
),
fluidRow(
box(title = "Illinois 10 Most Powerful/Destructive tornadoes", solidHeader = TRUE, status = "primary", width = 12,
selectInput("top10", "Choose to view by criteria:", choices = c('Magnitude'='1','Fatality'='2', 'Injury' = '3'), selected = 'Magnitude'),
uiOutput("reset2"),
leafletOutput("Leaf10Most")
)
)
),
tabItem(tabName="TestLeaf",
h2("Testing area for Leaflet Plotting"),
fluidRow(
box(width = 12,
sliderInput(inputId = "Slider0", label = "Year", min = 1950, max = 2016, value = 0, step = 1, animate = TRUE, sep = "")
)
),
fluidRow(
# Filter by Magnitude
column(2,
checkboxGroupInput("magnitudeFilter",
h3("Filter by Magnitude"),
choices = list("-9" = -9,
"0" = 0,
"1" = 1,
"2" = 2,
"3" = 3,
"4" = 4,
"5" = 5))
),
# Filter by Width
column(2,
box(sliderInput("widthSlider", "Filter By Width", 0, 4576, 4576))
),
# Filter by Length
column(2,
sliderInput("lengthSlider", "Filter By Length", 0, 234, 234)
),
# Filter by Injuries
column(2,
sliderInput("injurySlider", "Filter By Injuries", 0, 1740, 1740)
),
# Filter by Loss
column(2,
sliderInput("lossSlider", "Filter By Losses", 0, 22000000, 22000000)
)
),
fluidRow(
box(width = 6,
selectInput(inputId = "SelectState0", label = "State", choices = state.abb, selected = "IL"),
selectInput(inputId = "MapSelect", label="Select Map Type", choices = provider_tiles, selected="Stamen Toner"),
uiOutput("reset0"),
leafletOutput("Leaf0")
),
box(width = 6,
selectInput(inputId = "SelectState1", label = "State", choices = state.abb, selected = "IL"),
uiOutput("reset1"),
leafletOutput("Leaf1")
)
)
),
tabItem(tabName="Heatmap",
h2("Heatmap Plots for Illinois Tornadoes"),
fluidRow(
box(title="Heatmap of Illinois Tornadoes Starting Point",
selectInput(inputId="HeatmapState0", label="Select State", choices=state.abb, selected="IL"),
leafletOutput("heatmap0"), width=6),
box(title="Heatmap of Illinois Tornadoes Ending Point",
selectInput(inputId="HeatmapState1", label="Select State", choices=state.abb, selected="IL"),
leafletOutput("heatmap1"), width=6)
)
)
)
)
)
# Ryan's variables pre-server
states <- data.frame(state.name,state.abb,state.center[1],state.center[2])
fips <- state.fips
server <- function(input, output, session){
output$year_magnitude <- renderPlot({
year_mag <- data.frame(table(tornadoes$yr, tornadoes$mag))
ggplot(data=year_mag, aes(x=Var1, y=Freq, fill=Var2)) + geom_bar(stat='identity') +
theme(axis.text.x = element_text(angle = 55, hjust = 1)) +
xlab("Year") + ylab("Total Earthquakes") +
guides(fill=guide_legend(title="Magnitude")) + scale_fill_brewer(palette="Set3")
})
output$year_magnitude_percentage <- renderPlot({
year_mag_per <- data.frame(t(apply(table(tornadoes$yr, tornadoes$mag), 1, function(i) i / sum(i))))
colnames(year_mag_per) <- magnitudes
melted_ymp <- melt(as.matrix(year_mag_per))
ggplot(data=melted_ymp, aes(x=Var1, y=value, color=factor(Var2))) + geom_line(size=3) +
xlab("Year") + ylab("Percentage of Magnitudes") + scale_color_brewer(palette="Set3")
})
output$month_magnitude <- renderPlot({
mo_mag <- data.frame(table(tornadoes$mo, tornadoes$mag))
ggplot(data=mo_mag, aes(x=Var1, y=Freq, fill=Var2)) + geom_bar(stat='identity') +
theme(axis.text.x = element_text(angle = 55, hjust = 1)) +
xlab("Month") + ylab("Total Tornadoes") +
guides(fill=guide_legend(title="Magnitude")) + scale_fill_brewer(palette="Set3")
})
output$month_magnitude_percentage <- renderPlot({
mo_mag_per <- data.frame(t(apply(table(tornadoes$mo, tornadoes$mag), 1, function(i) i / sum(i))))
colnames(mo_mag_per) <- magnitudes
melted_mmp <- melt(as.matrix(mo_mag_per))
ggplot(data=melted_mmp, aes(x=Var1, y=value, color=factor(Var2))) + geom_line(size=3) +
xlab("Month") + ylab("Percentage of Magnitudes") + scale_color_brewer(palette="Set3")
})
output$hour_magnitude <- renderPlot({
# hours <- hour(strptime(tornadoes$time, "%H:%M:%S"))
hour_mag <- data.frame(table(hours, tornadoes$mag))
ggplot(data=hour_mag, aes(x=hours, y=Freq, fill=Var2)) + geom_bar(stat="identity") +
theme(axis.text.x = element_text(angle = 55, hjust = 1)) +
xlab("Hour of Day") + ylab("Total Tornadoes") +
guides(fill=guide_legend(title="Magnitude")) + scale_fill_brewer(palette="Set3")
})
output$hour_magnitude_percentage <- renderPlot({
# hours <- hour(strptime(tornadoes$time, "%H:%M:%S"))
hour_mag_per <- data.frame(t(apply(table(hours, tornadoes$mag), 1, function(i) i / sum(i))))
colnames(hour_mag_per) <- magnitudes
melted_hmp <- melt(as.matrix(hour_mag_per))
ggplot(data=melted_hmp, aes(x=Var1, y=value, color=factor(Var2))) + geom_line(size=3) +
xlab("Hours") + ylab("Percentage of Magnitudes") +
guides(fill=guide_legend(title="Magnitude")) + scale_color_brewer(palette="Set3")
})
output$distance_magnitude <- renderPlot({
filtered_tornadoes <- subset(tornadoes, len >= input$slider[1] & len <= input$slider[2])
filt_year_mag <- data.frame(table(filtered_tornadoes$yr, filtered_tornadoes$mag))
ggplot(data=filt_year_mag, aes(x=Var1, y=Freq, fill=Var2)) + geom_bar(stat='identity') +
theme(axis.text.x = element_text(angle = 55, hjust = 1)) +
xlab("Year") + ylab("Total Tornadoes") +
guides(fill=guide_legend(title="Magnitude")) + scale_fill_brewer(palette="Set3")
})
# Ryan Leaflet Server Code
# TODO: clean Reactive Variables
reactiveData <- reactive({
# Things to constrain by:
# Year
# width
# length
# injury
# fatalities
# Loss
dataset <- subset()
})
# Variables for selecting state and lat/lon (separate from tornado dataset)
state0 <- reactive({
states[state.abb == input$SelectState0,]
})
state1 <- reactive({
states[state.abb == input$SelectState1,]
})
# Plot output
output$Leaf0 <- renderLeaflet({
# Subset by Year And State
dataset <- subset(tornadoes, st == input$SelectState0)
dataset <- subset(dataset, yr <= input$Slider0)
# Subset by Magnitude
mag_filter <- input$magnitudeFilter
if(!is.null(mag_filter)){
dataset <- subset(dataset, mag %in% mag_filter)
print(strtoi(input$magnitudeFilter))
}
# Subset by Width
wid_filter <- input$widthSlider
dataset <- subset(dataset, wid < wid_filter)
# Subset by Length
len_filter <- input$lengthSlider
dataset <- subset(dataset, len < len_filter)
print(len_filter)
# Subset by Injuries
inj_filter <- input$injurySlider
dataset <- subset(dataset, inj < inj_filter)
# Subset by Loss
loss_filter <- input$lossSlider
dataset <- subset(dataset, loss < loss_filter)
# Select Provider Tiles
if(input$MapSelect == "Stamen Toner"){
tiles <- providers$Stamen.Toner
}
else if(input$MapSelect == "Open Topo Map"){
tiles <- providers$OpenTopoMap
}
else if(input$MapSelect == "Thunderforest Landscape"){
tiles <- providers$Thunderforest.Landscape
}
else if(input$MapSelect == "Esri World Imagery"){
tiles <- providers$Esri.WorldImagery
}
else if(input$MapSelect == "Stamen Watercolor"){
tiles <- providers$Stamen.Watercolor
}
else{
tiles <- providers$Stamen.Toner
}
map <- leaflet(options = leafletOptions(zoomControl= FALSE)) %>% #, dragging = FALSE, minZoom = 6, maxZoom = 6)) %>%
addTiles() %>%
addProviderTiles(tiles) %>%
setView(map,
lng = state0()[,"x"],
lat = state0()[,"y"],
zoom = 6) %>%
addCircleMarkers(lng = dataset[,"slon"], lat = dataset[,"slat"], popup = "start", radius = 5, color = 'red') %>%
addCircleMarkers(lng = dataset[,"elon"], lat = dataset[,"elat"], popup = "end", radius = 5, color = 'red')
dataset <- subset(dataset, elat != 0.00 & elon != 0.00)
for(i in 1:nrow(dataset)){
map <- addPolylines(map, lat = as.numeric(dataset[i, c(16, 18)]), lng = as.numeric(dataset[i, c(17, 19)]), weight=1)
}
map
})
output$Leaf1 <- renderLeaflet({
dataset <- subset(tornadoes, st == input$SelectState1)
dataset <- subset(dataset, yr == input$Slider0)
map <- leaflet(options = leafletOptions(zoomControl= FALSE)) %>% #, dragging = FALSE, minZoom = 6, maxZoom = 6)) %>%
addTiles() %>%
# Select leaflet provider tiles from user input
addProviderTiles(providers$Stamen.TonerLite) %>%
setView(map,
lng = state1()[,"x"],
lat = state1()[,"y"],
zoom = 6) %>%
addCircleMarkers(lng = dataset[,"slon"], lat = dataset[,"slat"], popup = "start", radius = 5, color = 'red') %>%
addCircleMarkers(lng = dataset[,"elon"], lat = dataset[,"elat"], popup = "end", radius = 5, color = 'red')
map
})
output$Leaf10Most <- renderLeaflet({
# Select dataset by critera
if(input$top10 == "1"){ ## if it is the magnitude
dataset <- magnitude_sorted10
}
else if(input$top10 == "2"){ ## if it is the fatalities
dataset <- fatalities_sorted10
}
else{ ## if it is injuries
dataset <- injuries_sorted10
}
map <- leaflet(options = leafletOptions(zoomControl= FALSE)) %>% #, dragging = FALSE, minZoom = 6, maxZoom = 6)) %>%
addTiles() %>%
# Select leaflet provider tiles from user input
addProviderTiles(providers$Stamen.TonerLite) %>%
setView(map,
lng = state1()[,"x"],
lat = state1()[,"y"],
zoom = 6) %>%
addCircleMarkers(lng = dataset[,"slon"], lat = dataset[,"slat"], popup = "start", radius = 5, color = 'red') %>%
addCircleMarkers(lng = dataset[,"elon"], lat = dataset[,"elat"], popup = "end", radius = 5, color = 'red')
dataset <- subset(dataset, elat != 0.00 & elon != 0.00)
for(i in 1:nrow(dataset)){
map <- addPolylines(map, lat = as.numeric(dataset[i, c(16, 18)]), lng = as.numeric(dataset[i, c(17, 19)]), weight=1)
}
map
})
output$distance_magnitude_percentage <- renderPlot({
filtered_tornadoes <- subset(tornadoes, len >= input$slider[1] & len <= input$slider[2])
filt_year_mag_per <- data.frame(t(apply(table(filtered_tornadoes$yr, filtered_tornadoes$mag), 1, function(i) i / sum(i))))
#colnames(filt_year_mag_per) <- magnitudes
melted_fymp <- melt(as.matrix(filt_year_mag_per))
ggplot(data=melted_fymp, aes(x=Var1, y=value, color=factor(Var2))) +
geom_line(size=3) + xlab("Year") + ylab("Percentage of Magnitudes") + scale_color_brewer(palette="Set3")
})
output$countyTable <- renderDataTable({
datatable(countyInfo,
options = list(searching = FALSE, pageLength = 8, lengthChange = FALSE))
})
output$countyChart <- renderPlot({
ggplot(data = countyInfo, aes(x=countyInfo$County, y=countyInfo$Frequency)) +
geom_bar(position="dodge", stat="identity", fill = "orange") + labs(x="County ", y = "# of Tornadoes") + theme(axis.text.x = element_text(angle = 90, vjust=0.5))
})
#Dania's output
#data tables for part c bullets 6-8 for years, months, and hours
output$yearInjFatTable <- renderDataTable(
getTornadoInjFatPerYearTable(tornadoesIL),
options = list(orderClasses = TRUE,
pageLength = 10, dom = 'tp')
)
output$yearLossTable <- renderDataTable(
getTornadoLossPerYearTable(yearlyloss),
options = list(orderClasses = TRUE,
pageLength = 10, dom = 'tp')
)
output$monthInjFatTable <- renderDataTable(
getTornadoInjFatPerMonthTable(tornadoesIL),
options = list(orderClasses = TRUE,
pageLength = 10, dom = 'tp')
)
output$monthLossTable <- renderDataTable(
getTornadoLossPerMonthTable(monthlyloss),
options = list(orderClasses = TRUE,
pageLength = 10, dom = 'tp')
)
output$hourInjFatTable <- renderDataTable(
getTornadoInjFatPerHourTable(tornadoesIL),
options = list(orderClasses = TRUE,
pageLength = 10, dom = 'tp')
)
output$hourLossTable <- renderDataTable(
getTornadoLossPerHourTable(hourlyloss),
options = list(orderClasses = TRUE,
pageLength = 10, dom = 'tp')
)
#plots for part c bullets 6-8 for years, months, and hours
output$yearInjFatPlot <- renderPlotly({
tornadoData <- aggregate(tornadoesIL[,12:13],by=list(tornadoesIL$yr), FUN=sum)
names(tornadoData )[1]<-"year"
dynamic_bar_graph_grouped(tornadoData, tornadoData$year,
tornadoData$inj, "Injuries",
tornadoData$fat, "Fatalities",
"Year", "Total Damages", "", "Type of Damage")
})
output$yearLossPlot <- renderPlotly({
dynamic_bar_graph_stacked(yearlyloss, yearlyloss$yr, yearlyloss$yrloss, yearlyloss$loss,
"Loss", "Year", "Tornadoes Per Year", "", "Year")
})
output$monthInjFatPlot <- renderPlotly({
tornadoData <- aggregate(tornadoesIL[,12:13],by=list(tornadoesIL$mo), FUN=sum)
names(tornadoData )[1]<-"month"
dynamic_bar_graph_grouped(tornadoData, tornadoData$month,
tornadoData$inj, "Injuries",
tornadoData$fat, "Fatalities",
"Month", "Total Damages", "", "Type of Damage")
})
output$monthLossPlot <- renderPlotly({
dynamic_bar_graph_stacked(monthlyloss, monthlyloss$mo, monthlyloss$moloss, monthlyloss$loss,
"Loss", "Month", "Tornadoes Per Month", "", "Month")
})
output$hourInjFatPlot <- renderPlotly({
if(input$hour_damages_radio == 2){
tornadoesIL$hr <- format(strptime(tornadoesIL$hr, "%H"),"%I %p" )
tornadoData <- aggregate(tornadoesIL[,12:13],by=list(tornadoesIL$hr), FUN=sum)
names(tornadoData )[1]<-"hour"
plot_ly(tornadoData, x =~hour, y = ~inj, type = 'bar', name = "Injuries", marker = list(color = redColorLight),
hoverinfo='text', text = ~paste('Total Injuries: ', tornadoData$inj,
'<br> Total Fatalities', tornadoData$fat)) %>%
add_trace(tornadoData, ~hour, y = ~fat, name = "Fatalities", marker = list(color = redColorDark)) %>%
layout(xaxis = list(title = "Hour of Day", dtick=1, tickangle=45, categoryorder = "array",
categoryarray = c("01 AM", "02 AM", "03 AM", "04 AM", "05 AM", "06 AM","07 AM", "08 AM", "09 AM", "10 AM", "11 AM", "12 PM",
"01 PM", "02 PM", "03 PM", "04 PM", "05 PM", "06 PM","07 PM", "08 PM", "09 PM", "10 PM", "11 PM", "12 AM")),
yaxis = list(title = "Total Damages"),
title = "",
margin = list(b = 100),
barmode = 'group')
}
else{
tornadoData <- aggregate(tornadoesIL[,12:13],by=list(tornadoesIL$hr), FUN=sum)
names(tornadoData )[1]<-"hour"
dynamic_bar_graph_grouped(tornadoData, tornadoData$hour,
tornadoData$inj, "Injuries",
tornadoData$fat, "Fatalities",
"Hour of Day", "Total Damages", "", "Type of Damage")
}
})
output$hourLossPlot <- renderPlotly({
if(input$hour_damages_radio == 2){
hourlyloss$hr <- format(strptime(hourlyloss$hr, "%H"),"%I %p" )
plot_ly(hourlyloss, x = hourlyloss$hr, y = hourlyloss$hrloss, type = 'bar', name = "Loss", color= hourlyloss$loss, name = hourlyloss$loss,colors = 'Reds',
legendgroup = ~hourlyloss$loss,
hoverinfo = 'text',
text = ~paste(hourlyloss$hr,
'<br> Number of Tornadoes: ', hourlyloss$hrloss,
'<br> Loss Category: ', hourlyloss$loss)) %>%
layout(xaxis = list(title = "Hour", dtick=1, tickangle=45, categoryorder = "array",
categoryarray = c("01 AM", "02 AM", "03 AM", "04 AM", "05 AM", "06 AM","07 AM", "08 AM", "09 AM", "10 AM", "11 AM", "12 PM",
"01 PM", "02 PM", "03 PM", "04 PM", "05 PM", "06 PM","07 PM", "08 PM", "09 PM", "10 PM", "11 PM", "12 AM")),
yaxis = list(title = "Tornadoes Per Hour"),
title = "",
margin = list(b = 100),
barmode = 'stack')
}
else{
dynamic_bar_graph_stacked(hourlyloss, hourlyloss$hr, hourlyloss$hrloss, hourlyloss$loss,
"Loss", "Hour", "Tornadoes Per Hour", "", "Hour")
}
})
output$injuryCountyPlot <- renderPlotly({
ggplotly(countyInjuriesMap)
})
output$fatalityCountyPlot <- renderPlotly({
ggplotly(countyDeathsMap)
})
output$lossCountyPlot <- renderPlotly({
ggplotly(countyLossMap)
})
}
shinyApp(ui, server) | /daniaApp/app.R | no_license | nigelflower/you_spin_me_round | R | false | false | 48,152 | r | library(leaflet)
library(ggplot2)
library(lubridate)
library(shiny)
library(shinydashboard)
library(maps)
library(reshape2)
library(DT)
library(scales)
library(plyr)
library(maps)
library(plotly)
library(RColorBrewer)
tornadoes <- read.csv("tornadoes.csv")
magnitudes <-c("-9", "0", "1", "2", "3", "4", "5")
hours <- hour(strptime(tornadoes$time, "%H:%M:%S"))
# Maybe add in Thunderforest.SpinalMap for fun....
provider_tiles <- c("Stamen Toner", "Open Topo Map", "Thunderforest Landscape", "Esri World Imagery", "Stamen Watercolor")
############################## Some of Jasons data ##########################################
counties_names <- read.csv("counties.csv")
IL_Code <- 17
# Get IL tornadoes from file
illinois_tornadoes <- subset(tornadoes, stf == IL_Code)
#combine all the tornadoes from the f1-f4 code counts excluding the 0th county
illinois_counties <- as.data.frame(table(a = c(illinois_tornadoes[,"f1"], illinois_tornadoes[,"f2"], illinois_tornadoes[,"f3"], illinois_tornadoes[,"f4"])))
illinois_counties <- illinois_counties[-c(1), ]
names(illinois_counties) <- c("Code", "Frequency")
countyInfo <- data.frame(County=counties_names$County, Frequency= illinois_counties$Frequency)
############################################
#sorting by largest magnitude tornadoes
magnitude_sorted <- illinois_tornadoes[order(-illinois_tornadoes[,11]),]
magnitude_sorted10 <- head(magnitude_sorted,10)
injuries_sorted <- illinois_tornadoes[order(-illinois_tornadoes[,12]),]
injuries_sorted10 <- head(injuries_sorted,10)
fatalities_sorted <- illinois_tornadoes[order(-illinois_tornadoes[,13]),]
fatalities_sorted10 <-head(fatalities_sorted, 10)
#dania's code
fips <- read.csv(file="US_FIPS_Codes.csv", header=TRUE, sep=",")
fipsIllinois <- subset(fips, State == "Illinois")
illinois <- map_data("county") %>%
filter(region == 'illinois')
tornadoesIL <- subset(tornadoes, st == "IL")
tornadoesIL$hr <- as.POSIXlt(tornadoesIL$time, format="%H:%M")$hour
tornadoesIL$countyNamef1 <- fipsIllinois$County.Name[match(tornadoesIL$f1, fipsIllinois$FIPS.County)]
tornadoesIL$countyNamef1 <- tolower(tornadoesIL$countyNamef1)
tornadoesIL$countyNamef2 <- fipsIllinois$County.Name[match(tornadoesIL$f2, fipsIllinois$FIPS.County)]
tornadoesIL$countyNamef2 <- tolower(tornadoesIL$countyNamef2)
tornadoesIL$countyNamef3 <- fipsIllinois$County.Name[match(tornadoesIL$f3, fipsIllinois$FIPS.County)]
tornadoesIL$countyNamef3 <- tolower(tornadoesIL$countyNamef3)
tornadoesIL$countyNamef4 <- fipsIllinois$County.Name[match(tornadoesIL$f4, fipsIllinois$FIPS.County)]
tornadoesIL$countyNamef4 <- tolower(tornadoesIL$countyNamef4)
#Changing the data for loss column of tornadoes to be categorical 0-7
tornadoesIL$loss <- ifelse(tornadoesIL$yr >= 2016,
(ifelse(tornadoesIL$loss > 0 & tornadoesIL$loss < 5000,1,
(ifelse(tornadoesIL$loss >= 5000 & tornadoesIL$loss < 50000,2,
(ifelse(tornadoesIL$loss >= 50000 & tornadoesIL$loss < 500000,3,
(ifelse(tornadoesIL$loss >= 500000 & tornadoesIL$loss < 5000000,4,
(ifelse(tornadoesIL$loss >= 5000000 & tornadoesIL$loss < 50000000,5,
(ifelse(tornadoesIL$loss >= 50000000 & tornadoesIL$loss < 50000000,6,
(ifelse(tornadoesIL$loss >= 50000000,7 , 0)))))))))))))),
ifelse(tornadoesIL$yr >= 1996,
(ifelse(tornadoesIL$loss > 0 & tornadoesIL$loss < 0.005, 1,
(ifelse(tornadoesIL$loss >= 0.005 & tornadoesIL$loss < 0.05,2,
(ifelse(tornadoesIL$loss >= 0.05 & tornadoesIL$loss < 0.5,3,
(ifelse(tornadoesIL$loss >= 0.5 & tornadoesIL$loss < 5,4,
(ifelse(tornadoesIL$loss >= 5 & tornadoesIL$loss < 50,5,
(ifelse(tornadoesIL$loss >= 50 & tornadoesIL$loss < 500,6,
(ifelse(tornadoesIL$loss >= 500,7 , 0)))))))))))))),
(ifelse(tornadoesIL$loss <= 3,
(ifelse(tornadoesIL$loss > 0, 1, 0)), tornadoesIL$loss - 2))))
#helper function to calculate the amount of tornadoes that had a certain loss range
countLoss <- function(loss){
ifelse(loss != 0, loss/loss, 1)
}
#get the yearly, monthly, and hourly loss tables
yearlyloss <- tornadoesIL %>%
group_by(yr, loss) %>%
summarise(yrloss= sum(countLoss(loss)))
#to present categorically in the legend
yearlyloss$loss[yearlyloss$loss == 0] <- "0:Unknown"
yearlyloss$loss[yearlyloss$loss == 1] <- "1:Between 0 and 5,000"
yearlyloss$loss[yearlyloss$loss == 2] <- "2:Between 5,000 and 50,000"
yearlyloss$loss[yearlyloss$loss == 3] <- "3:Between 50,000 and 500,000"
yearlyloss$loss[yearlyloss$loss == 4] <- "4:Between 500,000 and 5,000,000"
yearlyloss$loss[yearlyloss$loss == 5] <- "5:Between 5,000,000 and 50,000,000"
yearlyloss$loss[yearlyloss$loss == 6] <- "6:Between 50,000,000 and 500,000,000"
yearlyloss$loss[yearlyloss$loss == 7] <- "7:Greater than 500,000,000"
monthlyloss <- tornadoesIL %>%
group_by(mo, loss) %>%
summarise(moloss= sum(countLoss(loss)))
monthlyloss$loss[monthlyloss$loss == 0] <- "0:Unknown"
monthlyloss$loss[monthlyloss$loss == 1] <- "1:Between 0 and 5,000"
monthlyloss$loss[monthlyloss$loss == 2] <- "2:Between 5,000 and 50,000"
monthlyloss$loss[monthlyloss$loss == 3] <- "3:Between 50,000 and 500,000"
monthlyloss$loss[monthlyloss$loss == 4] <- "4:Between 500,000 and 5,000,000"
monthlyloss$loss[monthlyloss$loss == 5] <- "5:Between 5,000,000 and 50,000,000"
monthlyloss$loss[monthlyloss$loss == 6] <- "6:Between 50,000,000 and 500,000,000"
monthlyloss$loss[monthlyloss$loss == 7] <- "7:Greater than 500,000,000"
hourlyloss <- tornadoesIL %>%
group_by(hr, loss) %>%
summarise(hrloss=sum(countLoss(loss)))
hourlyloss$loss[hourlyloss$loss == 0] <- "0:Unknown"
hourlyloss$loss[hourlyloss$loss == 1] <- "1:Between 0 and 5,000"
hourlyloss$loss[hourlyloss$loss == 2] <- "2:Between 5,000 and 50,000"
hourlyloss$loss[hourlyloss$loss == 3] <- "3:Between 50,000 and 500,000"
hourlyloss$loss[hourlyloss$loss == 4] <- "4:Between 500,000 and 5,000,000"
hourlyloss$loss[hourlyloss$loss == 5] <- "5:Between 5,000,000 and 50,000,000"
hourlyloss$loss[hourlyloss$loss == 6] <- "6:Between 50,000,000 and 500,000,000"
hourlyloss$loss[hourlyloss$loss == 7] <- "7:Greater than 500,000,000"
tornadoesWithFips1 <- merge(illinois, tornadoesIL, by.x = "subregion", by.y = "countyNamef1")
tornadoesWithFips2 <- merge(illinois, tornadoesIL, by.x = "subregion", by.y = "countyNamef2")
tornadoesWithFips3 <- merge(illinois, tornadoesIL, by.x = "subregion", by.y = "countyNamef3")
tornadoesWithFips4 <- merge(illinois, tornadoesIL, by.x = "subregion", by.y = "countyNamef4")
#part A bullet 1 code
# --------------------------- PART A BULLET POINTS -----------------------------
# 1.) allow user to see data (injuries, fatalities, loss, total number of
# tornadoes of each magnitude) on a per county basis for all the Illinois
# counties on the map
countyInj <- aggregate(inj ~ subregion + lat + long + group + order, tornadoesWithFips1, sum)
countyInj2 <- aggregate(inj ~ subregion + lat + long + group + order, tornadoesWithFips2, sum)
names(countyInj2)[names(countyInj2) == "inj"] = "inj2"
countyInj2 <- countyInj2[ , -which(names(countyInj2) %in% c("subregion", "lat", "long", "group"))]
countyInj3 <- aggregate(inj ~ subregion + lat + long + group + order, tornadoesWithFips3, sum)
names(countyInj3)[names(countyInj3) == "inj"] = "inj3"
countyInj3 <- countyInj3[ , -which(names(countyInj3) %in% c("subregion", "lat", "long", "group"))]
countyInj4 <- aggregate(inj ~ subregion + lat + long + group + order, tornadoesWithFips4, sum)
names(countyInj4)[names(countyInj4) == "inj"] = "inj4"
countyInj4 <- countyInj4[ , -which(names(countyInj4) %in% c("subregion", "lat", "long", "group"))]
countyInj <- merge(x = countyInj, y = countyInj2, by = "order", all.x=TRUE)
countyInj <- merge(x = countyInj, y = countyInj3, by = "order", all.x=TRUE)
countyInj <- merge(x = countyInj, y = countyInj4, by = "order", all.x=TRUE)
countyInj$inj2[is.na(countyInj$inj2)] <- 0
countyInj$inj3[is.na(countyInj$inj3)] <- 0
countyInj$inj4[is.na(countyInj$inj4)] <- 0
countyInj$inj <- countyInj$inj + countyInj$inj2 + countyInj$inj3 + countyInj$inj4
countyInj<- countyInj[order(countyInj$order),]
names(countyInj)[names(countyInj) == "inj"] = "Injury"
countyInjuriesMap <- ggplot(countyInj, aes(x = countyInj$long, y = countyInj$lat, group = group, fill = Injury, text = paste('County: ', countyInj$subregion))) + geom_polygon(color='black') +
theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black")) +
scale_fill_gradient(high = "#132B43", low = "#56B1F7") +
theme(plot.title = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank())
#ggplotly(countyInjuriesMap)
#county deaths
countyDeaths <- aggregate(fat ~ subregion + lat + long + group + order, tornadoesWithFips1, sum)
countyDeaths2 <- aggregate(fat ~ subregion + lat + long + group + order, tornadoesWithFips2, sum)
names(countyDeaths2)[names(countyDeaths2) == "fat"] = "fat2"
countyDeaths2 <- countyDeaths2[ , -which(names(countyDeaths2) %in% c("subregion", "lat", "long", "group"))]
countyDeaths3 <- aggregate(fat ~ subregion + lat + long + group + order, tornadoesWithFips3, sum)
names(countyDeaths3)[names(countyDeaths3) == "fat"] = "fat3"
countyDeaths3 <- countyDeaths3[ , -which(names(countyDeaths3) %in% c("subregion", "lat", "long", "group"))]
countyDeaths4 <- aggregate(fat ~ subregion + lat + long + group + order, tornadoesWithFips4, sum)
names(countyDeaths4)[names(countyDeaths4) == "fat"] = "fat4"
countyDeaths4 <- countyDeaths4[ , -which(names(countyDeaths4) %in% c("subregion", "lat", "long", "group"))]
countyDeaths <- merge(x = countyDeaths, y = countyDeaths2, by = "order", all.x=TRUE)
countyDeaths <- merge(x = countyDeaths, y = countyDeaths3, by = "order", all.x=TRUE)
countyDeaths <- merge(x = countyDeaths, y = countyDeaths4, by = "order", all.x=TRUE)
countyDeaths$fat2[is.na(countyDeaths$fat2)] <- 0
countyDeaths$fat3[is.na(countyDeaths$fat3)] <- 0
countyDeaths$fat4[is.na(countyDeaths$fat4)] <- 0
countyDeaths$fat <- countyDeaths$fat + countyDeaths$fat2 + countyDeaths$fat3 + countyDeaths$fat4
countyDeaths <- countyDeaths[order(countyDeaths$order),]
names(countyDeaths)[names(countyDeaths) == "fat"] = "Fatalities"
countyDeathsMap <- ggplot(countyDeaths, aes(x = countyDeaths$long, y = countyDeaths$lat, group = group, fill = Fatalities, text = paste('County: ', countyDeaths$subregion))) + geom_polygon(color='black') +
theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black")) +
scale_fill_gradient(high = "#132B43", low = "#56B1F7") +
theme(plot.title = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank())
#ggplotly(countyDeathsMap)
#county loss
countyLoss <- tornadoesWithFips1
countyLoss2 <- tornadoesWithFips2
names(countyLoss2)[names(countyLoss2) == "loss"] = "loss2"
countyLoss2 <- countyLoss2[,c("order","loss2")]
countyLoss3 <- tornadoesWithFips3
names(countyLoss3)[names(countyLoss3) == "loss"] = "loss3"
countyLoss3 <- countyLoss3[,c("order","loss3")]
countyLoss4 <- tornadoesWithFips4
names(countyLoss4)[names(countyLoss4) == "loss"] = "loss4"
countyLoss4 <- countyLoss4[,c("order","loss4")]
countyLoss <- merge(x = countyLoss, y = countyLoss2, by = "order", all.x=TRUE)
countyLoss <- merge(x = countyLoss, y = countyLoss3, by = "order", all.x=TRUE)
countyLoss <- merge(x = countyLoss, y = countyLoss4, by = "order", all.x=TRUE)
countyLoss$loss2[is.na(countyLoss$loss2)] <- 0
countyLoss$loss3[is.na(countyLoss$loss3)] <- 0
countyLoss$loss4[is.na(countyLoss$loss4)] <- 0
countyLoss$loss <- pmax(countyLoss$loss, countyLoss$loss2, countyLoss$loss3, countyLoss$loss4)
countyLoss <- countyLoss[order(countyLoss$order),]
countyLoss$loss[countyLoss$loss == 0] <- "0: Unknown"
countyLoss$loss[countyLoss$loss == 1] <- "1: Between 0 and 5,000"
countyLoss$loss[countyLoss$loss == 2] <- "2: Between 5,000 and 50,000"
countyLoss$loss[countyLoss$loss == 3] <- "3: Between 50,000 and 500,000"
countyLoss$loss[countyLoss$loss == 4] <- "4: Between 500,000 and 5,000,000"
countyLoss$loss[countyLoss$loss == 5] <- "5: Between 5,000,000 and 50,000,000"
countyLoss$loss[countyLoss$loss == 6] <- "6: Between 50,000,000 and 500,000,000"
countyLoss$loss[countyLoss$loss == 7] <- "7: Greater than 500,000,000"
names(countyLoss)[names(countyLoss) == "loss"] = "Losses"
countyLossMap <- ggplot(countyLoss, aes(x = countyLoss$long, y = countyLoss$lat, group = group, fill = Losses, text = paste('County: ', countyLoss$subregion))) + geom_polygon(color='black') +
theme_bw() + theme(panel.border = element_blank(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(), axis.line = element_line(colour = "black")) +
scale_fill_brewer(palette="Blues")+ theme(
plot.title = element_blank(),
axis.title.x = element_blank(),
axis.title.y = element_blank())
#ggplotly(countyLossMap)
#graphing colors and functions
blueColorLight <- "#67b8df"
blueColorDark <- "#3d6e85"
redColorLight <- "#ec5757"
redColorDark <- "#762b2b"
dynamic_bar_graph <- function(data,x_axis, y_axis,x_label, y_label, title){
plot_ly(data, x = x_axis, y = y_axis, type = 'bar', color=I(redColorLight),
hoverinfo='text', text = ~paste('Total: ', y_axis)) %>%
layout(title = title,
xaxis = list(title = x_label,dtick=1,tickangle=45),
yaxis = list(title = y_label))
}
dynamic_bar_graph_grouped <- function(data, x_axis, y_axis1, label1, y_axis2, label2,
x_axis_label, y_axis_label, title, legend_title = "Legend"){
plot_ly(data, x = x_axis, y = y_axis1, type = 'bar', name = label1, marker = list(color = redColorLight),
hoverinfo='text', text = ~paste('Total Injuries: ', y_axis1,
'<br> Total Fatalities', y_axis2)) %>%
add_trace(data=data, x = x_axis, y = y_axis2, name = label2, marker = list(color = redColorDark)) %>%
layout(xaxis = list(title = x_axis_label, dtick=1, tickangle=45),
yaxis = list(title = y_axis_label),
title = title,
margin = list(b = 100),
barmode = 'group')
}
dynamic_bar_graph_stacked <- function(data, x_axis, y_axis,group, label,
x_axis_label, y_axis_label, title, legend_title = "Legend"){
plot_ly(data, x = x_axis, y = y_axis, type = 'bar', name = label, color= group, name = group,colors = 'Reds',
legendgroup = ~group,
hoverinfo = 'text',
text = ~paste(x_axis,
'<br> Number of Tornadoes: ', y_axis,
'<br> Loss Category: ', group)) %>%
layout(xaxis = list(title = x_axis_label, dtick=1, tickangle=45),
yaxis = list(title = y_axis_label),
title = title,
margin = list(b = 100),
barmode = 'stack')
}
#tables and plots
getTornadoInjFatPerYearTable <- function(tornadoesIL){
tornadoData <- aggregate(tornadoesIL[,12:13],by=list(tornadoesIL$yr), FUN=sum)
names(tornadoData)[1:3] <- c("Year","Injuries","Fatalities")
tornadoData
}
getTornadoLossPerYearTable <- function(yearlyloss){
tornadoData <- yearlyloss
names(tornadoData)[1:3] <- c("Year", "Loss Category", "Number of Tornadoes")
tornadoData
}
getTornadoInjFatPerMonthTable <- function(tornadoesIL){
tornadoData <- aggregate(tornadoesIL[,12:13],by=list(tornadoesIL$mo), FUN=sum)
names(tornadoData)[1:3] <- c("Month","Injuries","Fatalities")
tornadoData
}
getTornadoLossPerMonthTable <- function(monthlyloss){
tornadoData <- monthlyloss
names(tornadoData)[1:3] <- c("Month", "Loss Category", "Number of Tornadoes")
tornadoData
}
getTornadoInjFatPerHourTable <- function(tornadoesIL){
tornadoData <- aggregate(tornadoesIL[,12:13],by=list(tornadoesIL$hr), FUN=sum)
names(tornadoData)[1:3] <- c("Hour","Injuries","Fatalities")
tornadoData
}
getTornadoLossPerHourTable <- function(hourlyloss){
tornadoData <- hourlyloss
names(tornadoData)[1:3] <- c("Hour", "Loss Category", "Number of Tornadoes")
tornadoData
}
ui <- dashboardPage(skin="black",
dashboardHeader(title = "You Spin me Round"),
dashboardSidebar(
sidebarMenu(
menuItem("About", tabName = "About"),
menuItem("Tornadoes", tabName="Tornadoes",
menuSubItem("Year",
tabName="Year",
icon=icon("line-chart")),
menuSubItem("Month",
tabName="Month",
icon=icon("calendar")),
menuSubItem("Hour",
tabName="Hour",
icon=icon("hourglass")),
menuSubItem("Distance",
tabName="Distance",
icon=icon("plane"))
),
menuItem("Damages", tabName="Damages",
menuSubItem("Year",
tabName="YearDamages",
icon=icon("line-chart")),
menuSubItem("Month",
tabName="MonthDamages",
icon=icon("calendar")),
menuSubItem("Hour",
tabName="HourDamages",
icon=icon("hourglass")),
menuSubItem("County",
tabName="CountyDamages",
icon=icon("map"))
),
menuItem("Illinois", tabName="Illinois"),
menuItem("TestLeaf", tabName = "TestLeaf"),
menuItem("Heatmap", tabName="Heatmap")
)
),
dashboardBody(
tabItems(
tabItem(tabName = "About",
h1(style = "font-size: 300%","Project 3: You Spin me Round"),
h4(style = "font-size: 100%","by: Daria Azhari, Nigel Flower, Jason Guo, Ryan Nishimoto"),
h4(style = "font-size: 150%",a(href = "https://sites.google.com/uic.edu/nishimo1/cs424/project03", "Project Website")),
h2(style = "font-size: 200%","CS 424: Visualization and Visual Analytics"),
h4(style = "font-size: 150%",a(href = "https://www.evl.uic.edu/aej/424/", "Course website"))
),
tabItem(tabName="Year",
fluidRow(
box(title="Tornado Magnitudes by Year",
plotOutput("year_magnitude"), width=12)
),
fluidRow(
box(title="Percentage of Magnitudes by Year",
plotOutput("year_magnitude_percentage"), width=12)
)
),
tabItem(tabName="Month",
fluidRow(
box(title="Tornado Magnitudes by Month",
plotOutput("month_magnitude"), width=12)
),
fluidRow(
box(title="Percentage of Magnitudes by Month",
plotOutput("month_magnitude_percentage"), width=12)
)
),
tabItem(tabName="Hour",
fluidRow(
radioButtons("hour_radio", h4("Time Selection"),
choices=list("24 Hours" = 1, "AM/PM" = 2),
selected=1),
box(title="Tornado Magnitudes by Hour",
plotOutput("hour_magnitude"), width=12)
),
fluidRow(
box(title="Percentage of Magnitudes by Hour",
plotOutput("hour_magnitude_percentage"), width=12)
)
),
tabItem(tabName="Distance",
fluidRow(
box(title="Tornado Magnitude by Distance",
plotOutput("distance_magnitude"), width=12)
),
fluidRow(
box(title="Percentage of Magnitudes by Distance",
plotOutput("distance_magnitude_percentage"), width=12)
),
fluidRow(
box(title = "Distance of Tornado in Miles",
sliderInput("slider", "Number of observations:", 0, 234, c(0, 100))
)
)
),
tabItem(tabName="YearDamages",
fluidRow(
box(title = "Tornado Injuries Per Year in Illinois", solidHeader = TRUE, status = "primary", width = 6,
dataTableOutput("yearInjFatTable")),
box(title = "Tornado Loss Per Year in Illinois", solidHeader = TRUE, status = "primary", width = 6,
dataTableOutput("yearLossTable"))
),
fluidRow(
box(title="Tornado Injuries and Fatalities Per Year",
plotlyOutput("yearInjFatPlot"), width=12)
),
fluidRow(
box(title="Tornado Monetary Loss Range Per Year",
plotlyOutput("yearLossPlot"), width=12)
)
),
tabItem(tabName="MonthDamages",
fluidRow(
box(title = "Tornado Injuries Per Month in Illinois", solidHeader = TRUE, status = "primary", width = 6,
dataTableOutput("monthInjFatTable")),
box(title = "Tornado Loss Per Month in Illinois", solidHeader = TRUE, status = "primary", width = 6,
dataTableOutput("monthLossTable"))
),
fluidRow(
box(title="Tornado Injuries and Fatalities Per Month",
plotlyOutput("monthInjFatPlot"), width=12)
),
fluidRow(
box(title="Tornado Monetary Loss Range Per Month",
plotlyOutput("monthLossPlot"), width=12)
)
),
tabItem(tabName="HourDamages",
fluidRow(
radioButtons("hour_damages_radio", h4("Time Selection"),
choices=list("24 Hours" = 1, "AM/PM" = 2),
selected=2),
box(title = "Tornado Injuries Per Hour in Illinois", solidHeader = TRUE, status = "primary", width = 6,
dataTableOutput("hourInjFatTable")),
box(title = "Tornado Loss Per Hour in Illinois", solidHeader = TRUE, status = "primary", width = 6,
dataTableOutput("hourLossTable"))
),
fluidRow(
box(title="Tornado Injuries and Fatalities Per Hour",
plotlyOutput("hourInjFatPlot"), width=12)
),
fluidRow(
box(title="Tornado Monetary Loss Range Per Hour",
plotlyOutput("hourLossPlot"), width=12)
)
),
tabItem(tabName="CountyDamages",
fluidRow(
box(title="Illinois Injuries Per County",
plotlyOutput("injuryCountyPlot", height = "1500px"), width=4),
box(title="Illinois Fatalities Per County",
plotlyOutput("fatalityCountyPlot", height = "1500px"), width=4),
box(title="Illinois Loss Per County",
plotlyOutput("lossCountyPlot", height = "1500px"), width=4)
)
),
tabItem(tabName="Illinois",
fluidRow(
box(title = "Tornado County Table", solidHeader = TRUE, status = "primary", width = 12,
dataTableOutput("countyTable"))
),
fluidRow(
box(title = "Tornado Counties Graph", solidHeader = TRUE, status = "primary", width = 12,
plotOutput("countyChart"))
),
fluidRow(
box(title = "Illinois 10 Most Powerful/Destructive tornadoes", solidHeader = TRUE, status = "primary", width = 12,
selectInput("top10", "Choose to view by criteria:", choices = c('Magnitude'='1','Fatality'='2', 'Injury' = '3'), selected = 'Magnitude'),
uiOutput("reset2"),
leafletOutput("Leaf10Most")
)
)
),
tabItem(tabName="TestLeaf",
h2("Testing area for Leaflet Plotting"),
fluidRow(
box(width = 12,
sliderInput(inputId = "Slider0", label = "Year", min = 1950, max = 2016, value = 0, step = 1, animate = TRUE, sep = "")
)
),
fluidRow(
# Filter by Magnitude
column(2,
checkboxGroupInput("magnitudeFilter",
h3("Filter by Magnitude"),
choices = list("-9" = -9,
"0" = 0,
"1" = 1,
"2" = 2,
"3" = 3,
"4" = 4,
"5" = 5))
),
# Filter by Width
column(2,
box(sliderInput("widthSlider", "Filter By Width", 0, 4576, 4576))
),
# Filter by Length
column(2,
sliderInput("lengthSlider", "Filter By Length", 0, 234, 234)
),
# Filter by Injuries
column(2,
sliderInput("injurySlider", "Filter By Injuries", 0, 1740, 1740)
),
# Filter by Loss
column(2,
sliderInput("lossSlider", "Filter By Losses", 0, 22000000, 22000000)
)
),
fluidRow(
box(width = 6,
selectInput(inputId = "SelectState0", label = "State", choices = state.abb, selected = "IL"),
selectInput(inputId = "MapSelect", label="Select Map Type", choices = provider_tiles, selected="Stamen Toner"),
uiOutput("reset0"),
leafletOutput("Leaf0")
),
box(width = 6,
selectInput(inputId = "SelectState1", label = "State", choices = state.abb, selected = "IL"),
uiOutput("reset1"),
leafletOutput("Leaf1")
)
)
),
tabItem(tabName="Heatmap",
h2("Heatmap Plots for Illinois Tornadoes"),
fluidRow(
box(title="Heatmap of Illinois Tornadoes Starting Point",
selectInput(inputId="HeatmapState0", label="Select State", choices=state.abb, selected="IL"),
leafletOutput("heatmap0"), width=6),
box(title="Heatmap of Illinois Tornadoes Ending Point",
selectInput(inputId="HeatmapState1", label="Select State", choices=state.abb, selected="IL"),
leafletOutput("heatmap1"), width=6)
)
)
)
)
)
# Ryan's variables pre-server
states <- data.frame(state.name,state.abb,state.center[1],state.center[2])
fips <- state.fips
server <- function(input, output, session){
output$year_magnitude <- renderPlot({
year_mag <- data.frame(table(tornadoes$yr, tornadoes$mag))
ggplot(data=year_mag, aes(x=Var1, y=Freq, fill=Var2)) + geom_bar(stat='identity') +
theme(axis.text.x = element_text(angle = 55, hjust = 1)) +
xlab("Year") + ylab("Total Earthquakes") +
guides(fill=guide_legend(title="Magnitude")) + scale_fill_brewer(palette="Set3")
})
output$year_magnitude_percentage <- renderPlot({
year_mag_per <- data.frame(t(apply(table(tornadoes$yr, tornadoes$mag), 1, function(i) i / sum(i))))
colnames(year_mag_per) <- magnitudes
melted_ymp <- melt(as.matrix(year_mag_per))
ggplot(data=melted_ymp, aes(x=Var1, y=value, color=factor(Var2))) + geom_line(size=3) +
xlab("Year") + ylab("Percentage of Magnitudes") + scale_color_brewer(palette="Set3")
})
output$month_magnitude <- renderPlot({
mo_mag <- data.frame(table(tornadoes$mo, tornadoes$mag))
ggplot(data=mo_mag, aes(x=Var1, y=Freq, fill=Var2)) + geom_bar(stat='identity') +
theme(axis.text.x = element_text(angle = 55, hjust = 1)) +
xlab("Month") + ylab("Total Tornadoes") +
guides(fill=guide_legend(title="Magnitude")) + scale_fill_brewer(palette="Set3")
})
output$month_magnitude_percentage <- renderPlot({
mo_mag_per <- data.frame(t(apply(table(tornadoes$mo, tornadoes$mag), 1, function(i) i / sum(i))))
colnames(mo_mag_per) <- magnitudes
melted_mmp <- melt(as.matrix(mo_mag_per))
ggplot(data=melted_mmp, aes(x=Var1, y=value, color=factor(Var2))) + geom_line(size=3) +
xlab("Month") + ylab("Percentage of Magnitudes") + scale_color_brewer(palette="Set3")
})
output$hour_magnitude <- renderPlot({
# hours <- hour(strptime(tornadoes$time, "%H:%M:%S"))
hour_mag <- data.frame(table(hours, tornadoes$mag))
ggplot(data=hour_mag, aes(x=hours, y=Freq, fill=Var2)) + geom_bar(stat="identity") +
theme(axis.text.x = element_text(angle = 55, hjust = 1)) +
xlab("Hour of Day") + ylab("Total Tornadoes") +
guides(fill=guide_legend(title="Magnitude")) + scale_fill_brewer(palette="Set3")
})
output$hour_magnitude_percentage <- renderPlot({
# hours <- hour(strptime(tornadoes$time, "%H:%M:%S"))
hour_mag_per <- data.frame(t(apply(table(hours, tornadoes$mag), 1, function(i) i / sum(i))))
colnames(hour_mag_per) <- magnitudes
melted_hmp <- melt(as.matrix(hour_mag_per))
ggplot(data=melted_hmp, aes(x=Var1, y=value, color=factor(Var2))) + geom_line(size=3) +
xlab("Hours") + ylab("Percentage of Magnitudes") +
guides(fill=guide_legend(title="Magnitude")) + scale_color_brewer(palette="Set3")
})
output$distance_magnitude <- renderPlot({
filtered_tornadoes <- subset(tornadoes, len >= input$slider[1] & len <= input$slider[2])
filt_year_mag <- data.frame(table(filtered_tornadoes$yr, filtered_tornadoes$mag))
ggplot(data=filt_year_mag, aes(x=Var1, y=Freq, fill=Var2)) + geom_bar(stat='identity') +
theme(axis.text.x = element_text(angle = 55, hjust = 1)) +
xlab("Year") + ylab("Total Tornadoes") +
guides(fill=guide_legend(title="Magnitude")) + scale_fill_brewer(palette="Set3")
})
# Ryan Leaflet Server Code
# TODO: clean Reactive Variables
reactiveData <- reactive({
# Things to constrain by:
# Year
# width
# length
# injury
# fatalities
# Loss
dataset <- subset()
})
# Variables for selecting state and lat/lon (separate from tornado dataset)
state0 <- reactive({
states[state.abb == input$SelectState0,]
})
state1 <- reactive({
states[state.abb == input$SelectState1,]
})
# Plot output
output$Leaf0 <- renderLeaflet({
# Subset by Year And State
dataset <- subset(tornadoes, st == input$SelectState0)
dataset <- subset(dataset, yr <= input$Slider0)
# Subset by Magnitude
mag_filter <- input$magnitudeFilter
if(!is.null(mag_filter)){
dataset <- subset(dataset, mag %in% mag_filter)
print(strtoi(input$magnitudeFilter))
}
# Subset by Width
wid_filter <- input$widthSlider
dataset <- subset(dataset, wid < wid_filter)
# Subset by Length
len_filter <- input$lengthSlider
dataset <- subset(dataset, len < len_filter)
print(len_filter)
# Subset by Injuries
inj_filter <- input$injurySlider
dataset <- subset(dataset, inj < inj_filter)
# Subset by Loss
loss_filter <- input$lossSlider
dataset <- subset(dataset, loss < loss_filter)
# Select Provider Tiles
if(input$MapSelect == "Stamen Toner"){
tiles <- providers$Stamen.Toner
}
else if(input$MapSelect == "Open Topo Map"){
tiles <- providers$OpenTopoMap
}
else if(input$MapSelect == "Thunderforest Landscape"){
tiles <- providers$Thunderforest.Landscape
}
else if(input$MapSelect == "Esri World Imagery"){
tiles <- providers$Esri.WorldImagery
}
else if(input$MapSelect == "Stamen Watercolor"){
tiles <- providers$Stamen.Watercolor
}
else{
tiles <- providers$Stamen.Toner
}
map <- leaflet(options = leafletOptions(zoomControl= FALSE)) %>% #, dragging = FALSE, minZoom = 6, maxZoom = 6)) %>%
addTiles() %>%
addProviderTiles(tiles) %>%
setView(map,
lng = state0()[,"x"],
lat = state0()[,"y"],
zoom = 6) %>%
addCircleMarkers(lng = dataset[,"slon"], lat = dataset[,"slat"], popup = "start", radius = 5, color = 'red') %>%
addCircleMarkers(lng = dataset[,"elon"], lat = dataset[,"elat"], popup = "end", radius = 5, color = 'red')
dataset <- subset(dataset, elat != 0.00 & elon != 0.00)
for(i in 1:nrow(dataset)){
map <- addPolylines(map, lat = as.numeric(dataset[i, c(16, 18)]), lng = as.numeric(dataset[i, c(17, 19)]), weight=1)
}
map
})
output$Leaf1 <- renderLeaflet({
dataset <- subset(tornadoes, st == input$SelectState1)
dataset <- subset(dataset, yr == input$Slider0)
map <- leaflet(options = leafletOptions(zoomControl= FALSE)) %>% #, dragging = FALSE, minZoom = 6, maxZoom = 6)) %>%
addTiles() %>%
# Select leaflet provider tiles from user input
addProviderTiles(providers$Stamen.TonerLite) %>%
setView(map,
lng = state1()[,"x"],
lat = state1()[,"y"],
zoom = 6) %>%
addCircleMarkers(lng = dataset[,"slon"], lat = dataset[,"slat"], popup = "start", radius = 5, color = 'red') %>%
addCircleMarkers(lng = dataset[,"elon"], lat = dataset[,"elat"], popup = "end", radius = 5, color = 'red')
map
})
output$Leaf10Most <- renderLeaflet({
# Select dataset by critera
if(input$top10 == "1"){ ## if it is the magnitude
dataset <- magnitude_sorted10
}
else if(input$top10 == "2"){ ## if it is the fatalities
dataset <- fatalities_sorted10
}
else{ ## if it is injuries
dataset <- injuries_sorted10
}
map <- leaflet(options = leafletOptions(zoomControl= FALSE)) %>% #, dragging = FALSE, minZoom = 6, maxZoom = 6)) %>%
addTiles() %>%
# Select leaflet provider tiles from user input
addProviderTiles(providers$Stamen.TonerLite) %>%
setView(map,
lng = state1()[,"x"],
lat = state1()[,"y"],
zoom = 6) %>%
addCircleMarkers(lng = dataset[,"slon"], lat = dataset[,"slat"], popup = "start", radius = 5, color = 'red') %>%
addCircleMarkers(lng = dataset[,"elon"], lat = dataset[,"elat"], popup = "end", radius = 5, color = 'red')
dataset <- subset(dataset, elat != 0.00 & elon != 0.00)
for(i in 1:nrow(dataset)){
map <- addPolylines(map, lat = as.numeric(dataset[i, c(16, 18)]), lng = as.numeric(dataset[i, c(17, 19)]), weight=1)
}
map
})
output$distance_magnitude_percentage <- renderPlot({
filtered_tornadoes <- subset(tornadoes, len >= input$slider[1] & len <= input$slider[2])
filt_year_mag_per <- data.frame(t(apply(table(filtered_tornadoes$yr, filtered_tornadoes$mag), 1, function(i) i / sum(i))))
#colnames(filt_year_mag_per) <- magnitudes
melted_fymp <- melt(as.matrix(filt_year_mag_per))
ggplot(data=melted_fymp, aes(x=Var1, y=value, color=factor(Var2))) +
geom_line(size=3) + xlab("Year") + ylab("Percentage of Magnitudes") + scale_color_brewer(palette="Set3")
})
output$countyTable <- renderDataTable({
datatable(countyInfo,
options = list(searching = FALSE, pageLength = 8, lengthChange = FALSE))
})
output$countyChart <- renderPlot({
ggplot(data = countyInfo, aes(x=countyInfo$County, y=countyInfo$Frequency)) +
geom_bar(position="dodge", stat="identity", fill = "orange") + labs(x="County ", y = "# of Tornadoes") + theme(axis.text.x = element_text(angle = 90, vjust=0.5))
})
#Dania's output
#data tables for part c bullets 6-8 for years, months, and hours
output$yearInjFatTable <- renderDataTable(
getTornadoInjFatPerYearTable(tornadoesIL),
options = list(orderClasses = TRUE,
pageLength = 10, dom = 'tp')
)
output$yearLossTable <- renderDataTable(
getTornadoLossPerYearTable(yearlyloss),
options = list(orderClasses = TRUE,
pageLength = 10, dom = 'tp')
)
output$monthInjFatTable <- renderDataTable(
getTornadoInjFatPerMonthTable(tornadoesIL),
options = list(orderClasses = TRUE,
pageLength = 10, dom = 'tp')
)
output$monthLossTable <- renderDataTable(
getTornadoLossPerMonthTable(monthlyloss),
options = list(orderClasses = TRUE,
pageLength = 10, dom = 'tp')
)
output$hourInjFatTable <- renderDataTable(
getTornadoInjFatPerHourTable(tornadoesIL),
options = list(orderClasses = TRUE,
pageLength = 10, dom = 'tp')
)
output$hourLossTable <- renderDataTable(
getTornadoLossPerHourTable(hourlyloss),
options = list(orderClasses = TRUE,
pageLength = 10, dom = 'tp')
)
#plots for part c bullets 6-8 for years, months, and hours
output$yearInjFatPlot <- renderPlotly({
tornadoData <- aggregate(tornadoesIL[,12:13],by=list(tornadoesIL$yr), FUN=sum)
names(tornadoData )[1]<-"year"
dynamic_bar_graph_grouped(tornadoData, tornadoData$year,
tornadoData$inj, "Injuries",
tornadoData$fat, "Fatalities",
"Year", "Total Damages", "", "Type of Damage")
})
output$yearLossPlot <- renderPlotly({
dynamic_bar_graph_stacked(yearlyloss, yearlyloss$yr, yearlyloss$yrloss, yearlyloss$loss,
"Loss", "Year", "Tornadoes Per Year", "", "Year")
})
output$monthInjFatPlot <- renderPlotly({
tornadoData <- aggregate(tornadoesIL[,12:13],by=list(tornadoesIL$mo), FUN=sum)
names(tornadoData )[1]<-"month"
dynamic_bar_graph_grouped(tornadoData, tornadoData$month,
tornadoData$inj, "Injuries",
tornadoData$fat, "Fatalities",
"Month", "Total Damages", "", "Type of Damage")
})
output$monthLossPlot <- renderPlotly({
dynamic_bar_graph_stacked(monthlyloss, monthlyloss$mo, monthlyloss$moloss, monthlyloss$loss,
"Loss", "Month", "Tornadoes Per Month", "", "Month")
})
output$hourInjFatPlot <- renderPlotly({
if(input$hour_damages_radio == 2){
tornadoesIL$hr <- format(strptime(tornadoesIL$hr, "%H"),"%I %p" )
tornadoData <- aggregate(tornadoesIL[,12:13],by=list(tornadoesIL$hr), FUN=sum)
names(tornadoData )[1]<-"hour"
plot_ly(tornadoData, x =~hour, y = ~inj, type = 'bar', name = "Injuries", marker = list(color = redColorLight),
hoverinfo='text', text = ~paste('Total Injuries: ', tornadoData$inj,
'<br> Total Fatalities', tornadoData$fat)) %>%
add_trace(tornadoData, ~hour, y = ~fat, name = "Fatalities", marker = list(color = redColorDark)) %>%
layout(xaxis = list(title = "Hour of Day", dtick=1, tickangle=45, categoryorder = "array",
categoryarray = c("01 AM", "02 AM", "03 AM", "04 AM", "05 AM", "06 AM","07 AM", "08 AM", "09 AM", "10 AM", "11 AM", "12 PM",
"01 PM", "02 PM", "03 PM", "04 PM", "05 PM", "06 PM","07 PM", "08 PM", "09 PM", "10 PM", "11 PM", "12 AM")),
yaxis = list(title = "Total Damages"),
title = "",
margin = list(b = 100),
barmode = 'group')
}
else{
tornadoData <- aggregate(tornadoesIL[,12:13],by=list(tornadoesIL$hr), FUN=sum)
names(tornadoData )[1]<-"hour"
dynamic_bar_graph_grouped(tornadoData, tornadoData$hour,
tornadoData$inj, "Injuries",
tornadoData$fat, "Fatalities",
"Hour of Day", "Total Damages", "", "Type of Damage")
}
})
output$hourLossPlot <- renderPlotly({
if(input$hour_damages_radio == 2){
hourlyloss$hr <- format(strptime(hourlyloss$hr, "%H"),"%I %p" )
plot_ly(hourlyloss, x = hourlyloss$hr, y = hourlyloss$hrloss, type = 'bar', name = "Loss", color= hourlyloss$loss, name = hourlyloss$loss,colors = 'Reds',
legendgroup = ~hourlyloss$loss,
hoverinfo = 'text',
text = ~paste(hourlyloss$hr,
'<br> Number of Tornadoes: ', hourlyloss$hrloss,
'<br> Loss Category: ', hourlyloss$loss)) %>%
layout(xaxis = list(title = "Hour", dtick=1, tickangle=45, categoryorder = "array",
categoryarray = c("01 AM", "02 AM", "03 AM", "04 AM", "05 AM", "06 AM","07 AM", "08 AM", "09 AM", "10 AM", "11 AM", "12 PM",
"01 PM", "02 PM", "03 PM", "04 PM", "05 PM", "06 PM","07 PM", "08 PM", "09 PM", "10 PM", "11 PM", "12 AM")),
yaxis = list(title = "Tornadoes Per Hour"),
title = "",
margin = list(b = 100),
barmode = 'stack')
}
else{
dynamic_bar_graph_stacked(hourlyloss, hourlyloss$hr, hourlyloss$hrloss, hourlyloss$loss,
"Loss", "Hour", "Tornadoes Per Hour", "", "Hour")
}
})
output$injuryCountyPlot <- renderPlotly({
ggplotly(countyInjuriesMap)
})
output$fatalityCountyPlot <- renderPlotly({
ggplotly(countyDeathsMap)
})
output$lossCountyPlot <- renderPlotly({
ggplotly(countyLossMap)
})
}
shinyApp(ui, server) |
#' Create edges for Graphviz graphs
#' @description Combine several named vectors for edges and their attributes.
#' @param ... one or more named vectors for edges and associated attributes
#' @return an edge data frame
#' @export create_edges
create_edges <- function(...){
edges <- list(...)
# Stop function if there are no list components
stopifnot(!is.null(names(edges)))
# Attempt to obtain the number of edges from the 'edge_from' column
# If 'edge_from' column exists, ensure that it is classed as character
if ("edge_from" %in% names(edges)){
number_of_edges_from <- length(edges$edge_from)
edges$edge_from <- as.character(edges$edge_from)
}
# Attempt to obtain the number of edges from the 'from' column
# If 'from' column exists, ensure that it is classed as character
if ("from" %in% names(edges)){
number_of_edges_from <- length(edges$from)
edges$from <- as.character(edges$from)
}
# Attempt to obtain the number of edges from the 'edge_to' column
# If 'edge_to' column exists, ensure that it is classed as character
if ("edge_to" %in% names(edges)){
number_of_edges_to <- length(edges$edge_to)
edges$edge_to <- as.character(edges$edge_to)
}
# Attempt to obtain the number of edges from the 'to' column
# If 'to' column exists, ensure that it is classed as character
if ("to" %in% names(edges)){
number_of_edges_to <- length(edges$to)
edges$to <- as.character(edges$to)
}
stopifnot(number_of_edges_from == number_of_edges_to)
number_of_edges <- number_of_edges_from
for (i in 1:length(edges)){
# Expand vectors with single values to fill to number of edges
if (length(edges[[i]]) == 1){
edges[[i]] <- rep(edges[[i]], number_of_edges)
}
# Expand vectors with length > 1 and length < 'number_of_edges'
if (length(edges[[i]]) > 1 & length(edges[[i]]) < number_of_edges){
edges[[i]] <- c(edges[[i]], rep("", (number_of_edges - length(edges[[i]]))))
}
# Trim vectors with number of values exceeding number of edges
if (length(edges[[i]]) > number_of_edges){
edges[[i]] <- edges[[i]][1:number_of_edges]
}
}
edges_df <- as.data.frame(edges, stringsAsFactors = FALSE)
return(edges_df)
}
| /R/create_edges.R | no_license | alforj/DiagrammeR | R | false | false | 2,242 | r | #' Create edges for Graphviz graphs
#' @description Combine several named vectors for edges and their attributes.
#' @param ... one or more named vectors for edges and associated attributes
#' @return an edge data frame
#' @export create_edges
create_edges <- function(...){
edges <- list(...)
# Stop function if there are no list components
stopifnot(!is.null(names(edges)))
# Attempt to obtain the number of edges from the 'edge_from' column
# If 'edge_from' column exists, ensure that it is classed as character
if ("edge_from" %in% names(edges)){
number_of_edges_from <- length(edges$edge_from)
edges$edge_from <- as.character(edges$edge_from)
}
# Attempt to obtain the number of edges from the 'from' column
# If 'from' column exists, ensure that it is classed as character
if ("from" %in% names(edges)){
number_of_edges_from <- length(edges$from)
edges$from <- as.character(edges$from)
}
# Attempt to obtain the number of edges from the 'edge_to' column
# If 'edge_to' column exists, ensure that it is classed as character
if ("edge_to" %in% names(edges)){
number_of_edges_to <- length(edges$edge_to)
edges$edge_to <- as.character(edges$edge_to)
}
# Attempt to obtain the number of edges from the 'to' column
# If 'to' column exists, ensure that it is classed as character
if ("to" %in% names(edges)){
number_of_edges_to <- length(edges$to)
edges$to <- as.character(edges$to)
}
stopifnot(number_of_edges_from == number_of_edges_to)
number_of_edges <- number_of_edges_from
for (i in 1:length(edges)){
# Expand vectors with single values to fill to number of edges
if (length(edges[[i]]) == 1){
edges[[i]] <- rep(edges[[i]], number_of_edges)
}
# Expand vectors with length > 1 and length < 'number_of_edges'
if (length(edges[[i]]) > 1 & length(edges[[i]]) < number_of_edges){
edges[[i]] <- c(edges[[i]], rep("", (number_of_edges - length(edges[[i]]))))
}
# Trim vectors with number of values exceeding number of edges
if (length(edges[[i]]) > number_of_edges){
edges[[i]] <- edges[[i]][1:number_of_edges]
}
}
edges_df <- as.data.frame(edges, stringsAsFactors = FALSE)
return(edges_df)
}
|
#loading plyr
library(plyr)
#Reading the Files of both NEI and SCC
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Identifying the Coal cumbustion related sources (SCC.Level.One for "Combustion" and EI.Sector for "Coal" )
CoalSCC <- SCC[grepl("coal",SCC$EI.Sector,ignore.case=T) | grepl("combustion",SCC$SCC.Level.One,ignore.case=T),]
Mergeddata <- join(NEI,CoalSCC,by="SCC",type="inner")
#opening the PNG devise
png(filename="plot4.png",width = 480, height = 480, units = "px")
#spliting by Year
splityear <- split(Mergeddata,Mergeddata$year)
#summarizing the emission by year
summarizeemission<-sapply(splityear,function(x) sum(x$Emissions))
#Ploting the graph
plot(names(summarizeemission),summarizeemission,type="l",xlab="Calendar Year",ylab="Total Emissions (in Tons)")
title(main = "US Total emissions from coal cumbustion related sources over years")
#Close Devise
dev.off() | /plot4.R | no_license | kirgub/courseproject2 | R | false | false | 924 | r | #loading plyr
library(plyr)
#Reading the Files of both NEI and SCC
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Identifying the Coal cumbustion related sources (SCC.Level.One for "Combustion" and EI.Sector for "Coal" )
CoalSCC <- SCC[grepl("coal",SCC$EI.Sector,ignore.case=T) | grepl("combustion",SCC$SCC.Level.One,ignore.case=T),]
Mergeddata <- join(NEI,CoalSCC,by="SCC",type="inner")
#opening the PNG devise
png(filename="plot4.png",width = 480, height = 480, units = "px")
#spliting by Year
splityear <- split(Mergeddata,Mergeddata$year)
#summarizing the emission by year
summarizeemission<-sapply(splityear,function(x) sum(x$Emissions))
#Ploting the graph
plot(names(summarizeemission),summarizeemission,type="l",xlab="Calendar Year",ylab="Total Emissions (in Tons)")
title(main = "US Total emissions from coal cumbustion related sources over years")
#Close Devise
dev.off() |
testlist <- list(latLongs = structure(c(1.60605955906252e-314, NA, -Inf), .Dim = c(3L, 1L)), r = 0)
result <- do.call(MGDrivE::calcCos,testlist)
str(result) | /MGDrivE/inst/testfiles/calcCos/libFuzzer_calcCos/calcCos_valgrind_files/1612726820-test.R | no_license | akhikolla/updatedatatype-list2 | R | false | false | 157 | r | testlist <- list(latLongs = structure(c(1.60605955906252e-314, NA, -Inf), .Dim = c(3L, 1L)), r = 0)
result <- do.call(MGDrivE::calcCos,testlist)
str(result) |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download.PalEON.R
\name{download.PalEON}
\alias{download.PalEON}
\title{download.PalEON}
\usage{
download.PalEON(sitename, outfolder, start_date, end_date, overwrite = FALSE,
...)
}
\arguments{
\item{end_year}{}
}
\description{
Download PalEON files
}
\author{
Betsy Cowdery
}
| /modules/data.atmosphere/man/download.PalEON.Rd | permissive | Kah5/pecan | R | false | true | 357 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/download.PalEON.R
\name{download.PalEON}
\alias{download.PalEON}
\title{download.PalEON}
\usage{
download.PalEON(sitename, outfolder, start_date, end_date, overwrite = FALSE,
...)
}
\arguments{
\item{end_year}{}
}
\description{
Download PalEON files
}
\author{
Betsy Cowdery
}
|
library(vcd)
#基本
ggplot(data=ToothGrowth, mapping=aes(x=dose))+
geom_bar(stat="count")
mytable <- with(Arthritis,table(Improved))
df <- as.data.frame(mytable)
ggplot(data=df, mapping=aes(x=Improved,y=Freq))+
geom_bar(stat="identity")
#修改条形图的图形属性
ggplot(data=Arthritis, mapping=aes(x=Improved))+
geom_bar(stat="count",width=0.5, color='red',fill='steelblue')
ggplot(data=Arthritis, mapping=aes(x=Improved))+
geom_bar(stat="count",width=0.5, color='red',fill='steelblue')+
geom_text(stat='count',aes(label= ..count..), vjust=1.6, color="white", size=3.5)+
theme_minimal()
#报错
ggplot(data=Arthritis, mapping=aes(x=Improved))+
geom_bar(stat="count",width=0.5, color='red',fill='steelblue')+
geom_text(stat='count',aes(label= count), vjust=1.6, color="white", size=3.5)+
theme_minimal()
ggplot(data=Arthritis, mapping=aes(x=Improved,fill=Improved))+
geom_bar(stat="count",width=0.5)+
scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9"))+
geom_text(stat='count',aes(label=..count..), vjust=1.6, color="white", size=3.5)+
theme_minimal()
#修改图例的位置
p <- ggplot(data=Arthritis, mapping=aes(x=Improved,fill=Improved))+
geom_bar(stat="count",width=0.5)+
scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9"))+
geom_text(stat='count',aes(label=..count..), vjust=1.6, color="white", size=3.5)+
theme_minimal()
p + theme(legend.position="top")
p + theme(legend.position="bottom")
# Remove legend
p + theme(legend.position="none")
#修改条形图的顺序
p <- ggplot(data=Arthritis, mapping=aes(x=Improved,fill=Improved))+
geom_bar(stat="count",width=0.5)+
scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9"))+
geom_text(stat='count',aes(label=..count..), vjust=1.6, color="white", size=3.5)+
theme_minimal()
p + scale_x_discrete(limits=c("Marked","Some", "None"))
#包含分组的条形图
#堆叠
ggplot(data=Arthritis, mapping=aes(x=Improved,fill=Sex))+
geom_bar(stat="count",width=0.5,position='stack')+
scale_fill_manual(values=c('#999999','#E69F00'))+
geom_text(stat='count',aes(label=..count..), color="white", size=3.5,position=position_stack(0.5))+
theme_minimal()
#并行
y_max <- max(aggregate(ID~Improved+Sex,data=Arthritis,length)$ID)
ggplot(data=Arthritis, mapping=aes(x=Improved,fill=Sex))+
geom_bar(stat="count",width=0.5,position='dodge')+
scale_fill_manual(values=c('#999999','#E69F00'))+
ylim(0,y_max+5)+
geom_text(stat='count',aes(label=..count..), color="black", size=3.5,position=position_dodge(0.5),vjust=-0.5)+
theme_minimal()
#按照比例堆叠
ggplot(data=Arthritis, mapping=aes(x=Improved,fill=Sex))+
geom_bar(stat="count",width=0.5,position='fill')+
scale_fill_manual(values=c('#999999','#E69F00'))+
geom_text(stat='count',aes(label=..count..), color="white", size=3.5,position=position_fill(0.5))+
theme_minimal()
#百分比
ggplot(data=Arthritis, mapping=aes(x=Improved,fill=Sex))+
geom_bar(stat="count",width=0.5,position='fill')+
scale_fill_manual(values=c('#999999','#E69F00'))+
geom_text(stat='count',aes(label=scales::percent(..count../sum(..count..)))
, color="white", size=3.5,position=position_fill(0.5))+
theme_minimal()
library("ggplot2")
library("dplyr")
library("scales")
#win.graph(width=6, height=5,pointsize=8)
#data
df <- data.frame(
rate_cut=rep(c("0 Change", "0 - 10", "10 - 20", "20 - 30", "30 - 40","40 - 50", "50 - 60", "60 - 70","70 - 80", "80 - 90", "90 - 100", ">100"),2)
,freq=c(1,3,5,7,9,11,51,61,71,13,17,9,
5,7,9,11,15,19,61,81,93,17,21,13)
,product=c(rep('ProductA',12),rep('ProductB',12))
)
#set order
labels_order <- c("0 Change", "0 - 10", "10 - 20", "20 - 30", "30 - 40","40 - 50", "50 - 60", "60 - 70","70 - 80", "80 - 90", "90 - 100", ">100")
#set plot text
plot_legend <- c("Product A", "Product B")
plot_title <- paste0("Increase % Distribution")
annotate_title <-"Top % Increase"
annotate_prefix_1 <-"Product A = "
annotate_prefix_2 <-"Product B = "
df_sum <- df %>%
group_by(product) %>%
summarize(sumFreq=sum(freq))%>%
ungroup()%>%
select(product,sumFreq)
df <- merge(df,df_sum,by.x = 'product',by.y='product')
df <- within(df,{rate <- round(freq/sumFreq,digits=4)*100})
df <- subset(df,select=c(product,rate_cut,rate))
#set order
df$rate_cut <- factor(df$rate_cut,levels=labels_order,ordered = TRUE)
df <- df[order(df$product,df$rate_cut),]
#set position
annotate.y <- ceiling(max(round(df$rate,digits = 0))/4*2.5)
text.offset <- max(round(df$rate,digits = 0))/25
annotation <- df %>%
mutate(indicator = ifelse(substr(rate_cut,1,2) %in% c("70","80","90",'>1'),'top','increase' )) %>%
filter(indicator=='top') %>%
dplyr::group_by(product) %>%
dplyr::summarise(total = sum(rate)) %>%
select(product, total)
mytheme <- theme_classic() +
theme(
panel.background = element_blank(),
strip.background = element_blank(),
panel.grid = element_blank(),
axis.line = element_line(color = "gray95"),
axis.ticks = element_blank(),
text = element_text(family = "sans"),
axis.title = element_text(color = "gray30", size = 12),
axis.text = element_text(size = 10, color = "gray30"),
plot.title = element_text(size = 14, hjust = .5, color = "gray30"),
strip.text = element_text(color = "gray30", size = 12),
axis.line.y = element_line(size=1,linetype = 'dotted'),
axis.line.x = element_blank(),
axis.text.x = element_text(vjust = 0),
plot.margin = unit(c(0.5,0.5,0.5,0.5), "cm"),
legend.position = c(0.7, 0.9),
legend.text = element_text(color = "gray30")
)
##ggplot
ggplot(df,aes(x=rate_cut, y=rate)) +
geom_bar(stat = "identity", aes(fill = product), position = "dodge", width = 0.5) +
guides(fill = guide_legend(reverse = TRUE)) +
scale_fill_manual(values = c("#00188F","#00BCF2")
,breaks = c("ProductA","ProductB")
,labels = plot_legend
,name = "") +
geom_text(data = df
, aes(label = comma(rate), y = rate +text.offset, color = product)
,position = position_dodge(width =1)
, size = 3) +
scale_color_manual(values = c("#00BCF2", "#00188F"), guide = FALSE) +
annotate("text", x = 3, y = annotate.y, hjust = 0, color = "gray30", label = annotate_title) +
annotate("text", x = 2.5, y = annotate.y, hjust = 0, color = "gray30", label = paste0(annotate_prefix_1, annotation$total[1])) +
annotate("text", x = 2, y = annotate.y, hjust = 0, color = "gray30", label = paste0(annotate_prefix_2, annotation$total[2])) +
labs(x="Increase Percentage",y="Percent of freq",title=plot_title) +
mytheme +
coord_flip()
#添加标注
mpg %>%
group_by(class, drv) %>%
summarise(count = n()) %>%
ggplot(aes(class, count))+
geom_col(aes(fill = drv),
position = position_dodge2(preserve = 'single'))+
geom_text(aes(label = count),
position = position_dodge2(width = 0.9,
preserve = 'single'),
vjust = -0.2,
hjust = 0.5)
#堆叠的例子
mpg %>%
group_by(class, drv) %>%
summarise(count = n()) %>%
mutate(cumcount = cumsum(count)) %>%
ggplot(aes(class, count))+
geom_col(aes(fill = drv),
position = position_stack(reverse = T))+
geom_text(aes(label = cumcount),
position = position_stack(),
vjust = 0.5,
hjust = 0.5)
mpg %>%
group_by(class, drv) %>%
summarise(count = n()) %>%
mutate(cumcount = cumsum(count),
midcount = cumcount - count/2) %>%
ggplot(aes(class, count))+
geom_col(aes(fill = drv),
position = position_stack(reverse = T))+
geom_text(aes(y = midcount,
label = cumcount),
#position = position_stack(),
hjust = 0.5)
df <- tibble(
gene = factor(paste0("gene_",rep(1:16, 2)),
levels = paste0("gene_", 16:1)),
stat = c(seq(-10, -100, -10),
seq(-90, -40, 10),
seq(10, 100, 10),
seq(90, 40, -10)),
direct = rep(c("down", "up"), each = 16)
)
df
ggplot(df,
aes(gene, stat,
fill = direct))+
geom_col()+
coord_flip()+
scale_y_continuous(breaks = seq(-100, 100, 20),
labels = c(seq(100, 0, -20),
seq(20, 100, 20)))
#偏差图
df <- tibble(
gene = factor(paste0("gene_", rep(1:20)),
levels = paste0("gene_", 20:1)),
stat = c(seq(100, 10, -10),
seq(-10, -100, -10)),
direct = factor(rep(c("down", "up"), each = 10),
levels = c("up", "down"))
)
df
ggplot(df,
aes(gene, stat,
fill = direct))+
geom_col()+
coord_flip()
| /条形图.R | no_license | chenbingshun98/bsdzp | R | false | false | 8,698 | r | library(vcd)
#基本
ggplot(data=ToothGrowth, mapping=aes(x=dose))+
geom_bar(stat="count")
mytable <- with(Arthritis,table(Improved))
df <- as.data.frame(mytable)
ggplot(data=df, mapping=aes(x=Improved,y=Freq))+
geom_bar(stat="identity")
#修改条形图的图形属性
ggplot(data=Arthritis, mapping=aes(x=Improved))+
geom_bar(stat="count",width=0.5, color='red',fill='steelblue')
ggplot(data=Arthritis, mapping=aes(x=Improved))+
geom_bar(stat="count",width=0.5, color='red',fill='steelblue')+
geom_text(stat='count',aes(label= ..count..), vjust=1.6, color="white", size=3.5)+
theme_minimal()
#报错
ggplot(data=Arthritis, mapping=aes(x=Improved))+
geom_bar(stat="count",width=0.5, color='red',fill='steelblue')+
geom_text(stat='count',aes(label= count), vjust=1.6, color="white", size=3.5)+
theme_minimal()
ggplot(data=Arthritis, mapping=aes(x=Improved,fill=Improved))+
geom_bar(stat="count",width=0.5)+
scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9"))+
geom_text(stat='count',aes(label=..count..), vjust=1.6, color="white", size=3.5)+
theme_minimal()
#修改图例的位置
p <- ggplot(data=Arthritis, mapping=aes(x=Improved,fill=Improved))+
geom_bar(stat="count",width=0.5)+
scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9"))+
geom_text(stat='count',aes(label=..count..), vjust=1.6, color="white", size=3.5)+
theme_minimal()
p + theme(legend.position="top")
p + theme(legend.position="bottom")
# Remove legend
p + theme(legend.position="none")
#修改条形图的顺序
p <- ggplot(data=Arthritis, mapping=aes(x=Improved,fill=Improved))+
geom_bar(stat="count",width=0.5)+
scale_color_manual(values=c("#999999", "#E69F00", "#56B4E9"))+
geom_text(stat='count',aes(label=..count..), vjust=1.6, color="white", size=3.5)+
theme_minimal()
p + scale_x_discrete(limits=c("Marked","Some", "None"))
#包含分组的条形图
#堆叠
ggplot(data=Arthritis, mapping=aes(x=Improved,fill=Sex))+
geom_bar(stat="count",width=0.5,position='stack')+
scale_fill_manual(values=c('#999999','#E69F00'))+
geom_text(stat='count',aes(label=..count..), color="white", size=3.5,position=position_stack(0.5))+
theme_minimal()
#并行
y_max <- max(aggregate(ID~Improved+Sex,data=Arthritis,length)$ID)
ggplot(data=Arthritis, mapping=aes(x=Improved,fill=Sex))+
geom_bar(stat="count",width=0.5,position='dodge')+
scale_fill_manual(values=c('#999999','#E69F00'))+
ylim(0,y_max+5)+
geom_text(stat='count',aes(label=..count..), color="black", size=3.5,position=position_dodge(0.5),vjust=-0.5)+
theme_minimal()
#按照比例堆叠
ggplot(data=Arthritis, mapping=aes(x=Improved,fill=Sex))+
geom_bar(stat="count",width=0.5,position='fill')+
scale_fill_manual(values=c('#999999','#E69F00'))+
geom_text(stat='count',aes(label=..count..), color="white", size=3.5,position=position_fill(0.5))+
theme_minimal()
#百分比
ggplot(data=Arthritis, mapping=aes(x=Improved,fill=Sex))+
geom_bar(stat="count",width=0.5,position='fill')+
scale_fill_manual(values=c('#999999','#E69F00'))+
geom_text(stat='count',aes(label=scales::percent(..count../sum(..count..)))
, color="white", size=3.5,position=position_fill(0.5))+
theme_minimal()
library("ggplot2")
library("dplyr")
library("scales")
#win.graph(width=6, height=5,pointsize=8)
#data
df <- data.frame(
rate_cut=rep(c("0 Change", "0 - 10", "10 - 20", "20 - 30", "30 - 40","40 - 50", "50 - 60", "60 - 70","70 - 80", "80 - 90", "90 - 100", ">100"),2)
,freq=c(1,3,5,7,9,11,51,61,71,13,17,9,
5,7,9,11,15,19,61,81,93,17,21,13)
,product=c(rep('ProductA',12),rep('ProductB',12))
)
#set order
labels_order <- c("0 Change", "0 - 10", "10 - 20", "20 - 30", "30 - 40","40 - 50", "50 - 60", "60 - 70","70 - 80", "80 - 90", "90 - 100", ">100")
#set plot text
plot_legend <- c("Product A", "Product B")
plot_title <- paste0("Increase % Distribution")
annotate_title <-"Top % Increase"
annotate_prefix_1 <-"Product A = "
annotate_prefix_2 <-"Product B = "
df_sum <- df %>%
group_by(product) %>%
summarize(sumFreq=sum(freq))%>%
ungroup()%>%
select(product,sumFreq)
df <- merge(df,df_sum,by.x = 'product',by.y='product')
df <- within(df,{rate <- round(freq/sumFreq,digits=4)*100})
df <- subset(df,select=c(product,rate_cut,rate))
#set order
df$rate_cut <- factor(df$rate_cut,levels=labels_order,ordered = TRUE)
df <- df[order(df$product,df$rate_cut),]
#set position
annotate.y <- ceiling(max(round(df$rate,digits = 0))/4*2.5)
text.offset <- max(round(df$rate,digits = 0))/25
annotation <- df %>%
mutate(indicator = ifelse(substr(rate_cut,1,2) %in% c("70","80","90",'>1'),'top','increase' )) %>%
filter(indicator=='top') %>%
dplyr::group_by(product) %>%
dplyr::summarise(total = sum(rate)) %>%
select(product, total)
mytheme <- theme_classic() +
theme(
panel.background = element_blank(),
strip.background = element_blank(),
panel.grid = element_blank(),
axis.line = element_line(color = "gray95"),
axis.ticks = element_blank(),
text = element_text(family = "sans"),
axis.title = element_text(color = "gray30", size = 12),
axis.text = element_text(size = 10, color = "gray30"),
plot.title = element_text(size = 14, hjust = .5, color = "gray30"),
strip.text = element_text(color = "gray30", size = 12),
axis.line.y = element_line(size=1,linetype = 'dotted'),
axis.line.x = element_blank(),
axis.text.x = element_text(vjust = 0),
plot.margin = unit(c(0.5,0.5,0.5,0.5), "cm"),
legend.position = c(0.7, 0.9),
legend.text = element_text(color = "gray30")
)
##ggplot
ggplot(df,aes(x=rate_cut, y=rate)) +
geom_bar(stat = "identity", aes(fill = product), position = "dodge", width = 0.5) +
guides(fill = guide_legend(reverse = TRUE)) +
scale_fill_manual(values = c("#00188F","#00BCF2")
,breaks = c("ProductA","ProductB")
,labels = plot_legend
,name = "") +
geom_text(data = df
, aes(label = comma(rate), y = rate +text.offset, color = product)
,position = position_dodge(width =1)
, size = 3) +
scale_color_manual(values = c("#00BCF2", "#00188F"), guide = FALSE) +
annotate("text", x = 3, y = annotate.y, hjust = 0, color = "gray30", label = annotate_title) +
annotate("text", x = 2.5, y = annotate.y, hjust = 0, color = "gray30", label = paste0(annotate_prefix_1, annotation$total[1])) +
annotate("text", x = 2, y = annotate.y, hjust = 0, color = "gray30", label = paste0(annotate_prefix_2, annotation$total[2])) +
labs(x="Increase Percentage",y="Percent of freq",title=plot_title) +
mytheme +
coord_flip()
#添加标注
mpg %>%
group_by(class, drv) %>%
summarise(count = n()) %>%
ggplot(aes(class, count))+
geom_col(aes(fill = drv),
position = position_dodge2(preserve = 'single'))+
geom_text(aes(label = count),
position = position_dodge2(width = 0.9,
preserve = 'single'),
vjust = -0.2,
hjust = 0.5)
#堆叠的例子
mpg %>%
group_by(class, drv) %>%
summarise(count = n()) %>%
mutate(cumcount = cumsum(count)) %>%
ggplot(aes(class, count))+
geom_col(aes(fill = drv),
position = position_stack(reverse = T))+
geom_text(aes(label = cumcount),
position = position_stack(),
vjust = 0.5,
hjust = 0.5)
mpg %>%
group_by(class, drv) %>%
summarise(count = n()) %>%
mutate(cumcount = cumsum(count),
midcount = cumcount - count/2) %>%
ggplot(aes(class, count))+
geom_col(aes(fill = drv),
position = position_stack(reverse = T))+
geom_text(aes(y = midcount,
label = cumcount),
#position = position_stack(),
hjust = 0.5)
df <- tibble(
gene = factor(paste0("gene_",rep(1:16, 2)),
levels = paste0("gene_", 16:1)),
stat = c(seq(-10, -100, -10),
seq(-90, -40, 10),
seq(10, 100, 10),
seq(90, 40, -10)),
direct = rep(c("down", "up"), each = 16)
)
df
ggplot(df,
aes(gene, stat,
fill = direct))+
geom_col()+
coord_flip()+
scale_y_continuous(breaks = seq(-100, 100, 20),
labels = c(seq(100, 0, -20),
seq(20, 100, 20)))
#偏差图
df <- tibble(
gene = factor(paste0("gene_", rep(1:20)),
levels = paste0("gene_", 20:1)),
stat = c(seq(100, 10, -10),
seq(-10, -100, -10)),
direct = factor(rep(c("down", "up"), each = 10),
levels = c("up", "down"))
)
df
ggplot(df,
aes(gene, stat,
fill = direct))+
geom_col()+
coord_flip()
|
runGSEA_preRank<-function(preRank.matrix,gmt.file,outname){
#descending numerical order
#dump preRank into a tab-delimited txt file
write.table(preRank.matrix,
file='prerank.rnk',
quote=F,
sep='\t',
col.names=F,
row.names=T)
#call java gsea version
command <- paste('java -Xmx512m -cp ../gsea-3.0.jar xtools.gsea.GseaPreranked -gmx ', gmt.file, ' -norm meandiv -nperm 1000 -rnk prerank.rnk ',
' -scoring_scheme weighted -make_sets true -rnd_seed 123456 -set_max 500 -set_min 15 -zip_report false ',
' -out preRankResults -create_svgs true -gui false -rpt_label ',outname, sep='')
if(get_os() == "win"){
system(command,show.output.on.console=F)
}else{
system(command)
}
unlink(c('prerank.txt'))
}
| /6-PathwayHeterogeneity/runGSEA_preRank.R | permissive | zhengtaoxiao/Single-Cell-Metabolic-Landscape | R | false | false | 836 | r | runGSEA_preRank<-function(preRank.matrix,gmt.file,outname){
#descending numerical order
#dump preRank into a tab-delimited txt file
write.table(preRank.matrix,
file='prerank.rnk',
quote=F,
sep='\t',
col.names=F,
row.names=T)
#call java gsea version
command <- paste('java -Xmx512m -cp ../gsea-3.0.jar xtools.gsea.GseaPreranked -gmx ', gmt.file, ' -norm meandiv -nperm 1000 -rnk prerank.rnk ',
' -scoring_scheme weighted -make_sets true -rnd_seed 123456 -set_max 500 -set_min 15 -zip_report false ',
' -out preRankResults -create_svgs true -gui false -rpt_label ',outname, sep='')
if(get_os() == "win"){
system(command,show.output.on.console=F)
}else{
system(command)
}
unlink(c('prerank.txt'))
}
|
#' Split a string and retain the nth value
#'
#' Retrieve the automated Heidelberg segmentation data from an OCT object
#'
#' @param x a character vector
#' @param split a character string to use as the split pattern
#' @param n either "last" or an integer position
#' @param ... parameters to pass to strsplit
#'
#' @export
#' @return a tbl_df of the segmentation data
strsplit_nth <- function(x, split, n="last", ...) {
# First, split each character string in the vector
result <- strsplit(x, split = split, ...)
# Next, select the nth piece
result_2 <- lapply(result, function(x) {
if(n == "last") {
y <- x[[length(x)]]
} else {
y <- x[[as.numeric(n)]]
}
return(y)
})
# Return a vector of character strings
return(unlist(result_2))
}
| /R/strsplit_nth.R | permissive | barefootbiology/heyexr | R | false | false | 824 | r | #' Split a string and retain the nth value
#'
#' Retrieve the automated Heidelberg segmentation data from an OCT object
#'
#' @param x a character vector
#' @param split a character string to use as the split pattern
#' @param n either "last" or an integer position
#' @param ... parameters to pass to strsplit
#'
#' @export
#' @return a tbl_df of the segmentation data
strsplit_nth <- function(x, split, n="last", ...) {
# First, split each character string in the vector
result <- strsplit(x, split = split, ...)
# Next, select the nth piece
result_2 <- lapply(result, function(x) {
if(n == "last") {
y <- x[[length(x)]]
} else {
y <- x[[as.numeric(n)]]
}
return(y)
})
# Return a vector of character strings
return(unlist(result_2))
}
|
################################################################################
## Code to fit various outcome models
################################################################################
#' Use a separate regularized regression for each post period
#' to fit E[Y(0)|X]
#' @importFrom stats poly
#' @importFrom stats coef
#'
#' @param X Matrix of covariates/lagged outcomes
#' @param y Matrix of post-period outcomes
#' @param trt Vector of treatment indicator
#' @param alpha Mixing between L1 and L2, default: 1 (LASSO)
#' @param lambda Regularization hyperparameter, if null then CV
#' @param poly_order Order of polynomial to fit, default 1
#' @param type How to fit outcome model(s)
#' \itemize{
#' \item{sep }{Separate outcome models}
#' \item{avg }{Average responses into 1 outcome}
#' \item{multi }{Use multi response regression in glmnet}}
#' @param ... optional arguments for outcome model
#' @noRd
#' @return \itemize{
#' \item{y0hat }{Predicted outcome under control}
#' \item{params }{Regression parameters}}
fit_prog_reg <- function(X, y, trt, alpha=1, lambda=NULL,
poly_order=1, type="sep", ...) {
if(!requireNamespace("glmnet", quietly = TRUE)) {
stop("In order to fit an elastic net outcome model, you must install the glmnet package.")
}
extra_params = list(...)
if (length(extra_params) > 0) {
warning("Unused parameters when using elastic net: ", paste(names(extra_params), collapse = ", "))
}
X <- matrix(poly(matrix(X),degree=poly_order), nrow=dim(X)[1])
## helper function to fit regression with CV
outfit <- function(x, y) {
if(is.null(lambda)) {
lam <- glmnet::cv.glmnet(x, y, alpha=alpha, grouped=FALSE)$lambda.min
} else {
lam <- lambda
}
fit <- glmnet::glmnet(x, y, alpha=alpha,
lambda=lam)
return(as.matrix(coef(fit)))
}
if(type=="avg") {
## if fitting the average post period value, stack post periods together
stacky <- c(y)
stackx <- do.call(rbind,
lapply(1:dim(y)[2],
function(x) X))
stacktrt <- rep(trt, dim(y)[2])
regweights <- outfit(stackx[stacktrt==0,],
stacky[stacktrt==0])
} else if(type=="sep"){
## fit separate regressions for each post period
regweights <- apply(as.matrix(y), 2,
function(yt) outfit(X[trt==0,],
yt[trt==0]))
} else {
## fit multi response regression
lam <- glmnet::cv.glmnet(X, y, family="mgaussian",
alpha=alpha, grouped=FALSE)$lambda.min
fit <- glmnet::glmnet(X, y, family="mgaussian",
alpha=alpha,
lambda=lam)
regweights <- as.matrix(do.call(cbind, coef(fit)))
}
## Get predicted values
y0hat <- cbind(rep(1, dim(X)[1]),
X) %*% regweights
return(list(y0hat = y0hat,
params = regweights))
}
#' Use a separate random forest regression for each post period
#' to fit E[Y(0)|X]
#'
#' @param X Matrix of covariates/lagged outcomes
#' @param y Matrix of post-period outcomes
#' @param trt Vector of treatment indicator
#' @param avg Predict the average post-treatment outcome
#' @param ... optional arguments for outcome model
#' @noRd
#' @return \itemize{
#' \item{y0hat }{Predicted outcome under control}
#' \item{params }{Regression parameters}}
fit_prog_rf <- function(X, y, trt, avg=FALSE, ...) {
if(!requireNamespace("randomForest", quietly = TRUE)) {
stop("In order to fit a random forest outcome model, you must install the randomForest package.")
}
extra_params = list(...)
if (length(extra_params) > 0) {
warning("Unused parameters when using random forest: ", paste(names(extra_params), collapse = ", "))
}
## helper function to fit RF
outfit <- function(x, y) {
fit <- randomForest::randomForest(x, y)
return(fit)
}
if(avg | dim(y)[2] == 1) {
## if fitting the average post period value, stack post periods together
stacky <- c(y)
stackx <- do.call(rbind,
lapply(1:dim(y)[2],
function(x) X))
stacktrt <- rep(trt, dim(y)[2])
fit <- outfit(stackx[stacktrt==0,],
stacky[stacktrt==0])
## predict outcome
y0hat <- matrix(predict(fit, X), ncol=1)
## keep feature importances
imports <- randomForest::importance(fit)
} else {
## fit separate regressions for each post period
fits <- apply(as.matrix(y), 2,
function(yt) outfit(X[trt==0,],
yt[trt==0]))
## predict outcome
y0hat <- lapply(fits, function(fit) as.matrix(predict(fit,X))) %>%
bind_rows() %>%
as.matrix()
## keep feature importances
imports <- lapply(fits, function(fit) randomForest::importance(fit)) %>%
bind_rows() %>%
as.matrix()
}
return(list(y0hat=y0hat,
params=imports))
}
#' Use gsynth to fit factor model for E[Y(0)|X]
#'
#' @param X Matrix of covariates/lagged outcomes
#' @param y Matrix of post-period outcomes
#' @param trt Vector of treatment indicator
#' @param r Number of factors to use (or start with if CV==1)
#' @param r.end Max number of factors to consider if CV==1
#' @param force Fixed effects (0=none, 1=unit, 2=time, 3=two-way)
#' @param CV Whether to do CV (0=no CV, 1=yes CV)
#' @param ... optional arguments for outcome model
#' @noRd
#' @return \itemize{
#' \item{y0hat }{Predicted outcome under control}
#' \item{params }{Regression parameters}}
fit_prog_gsynth <- function(X, y, trt, r=0, r.end=5, force=3, CV=1, ...) {
if(!requireNamespace("gsynth", quietly = TRUE)) {
stop("In order to fit generalized synthetic controls, you must install the gsynth package.")
}
extra_params = list(...)
if (length(extra_params) > 0) {
warning("Unused parameters when using gSynth: ", paste(names(extra_params), collapse = ", "))
}
df_x = data.frame(X, check.names=FALSE)
df_x$unit = rownames(df_x)
df_x$trt = rep(0, nrow(df_x))
df_x <- df_x %>% select(unit, trt, everything())
long_df_x = gather(df_x, time, obs, -c(unit,trt))
df_y = data.frame(y, check.names=FALSE)
df_y$unit = rownames(df_y)
df_y$trt = trt
df_y <- df_y %>% select(unit, trt, everything())
long_df_y = gather(df_y, time, obs, -c(unit,trt))
long_df = rbind(long_df_x, long_df_y)
transform(long_df, time = as.numeric(time))
transform(long_df, unit = as.numeric(unit))
gsyn <- gsynth::gsynth(data = long_df, Y = "obs", D = "trt",
index = c("unit", "time"), force = force, CV = CV, r = r)
t0 <- dim(X)[2]
t_final <- t0 + dim(y)[2]
n <- dim(X)[1]
## get predicted outcomes
y0hat <- matrix(0, nrow=n, ncol=(t_final-t0))
y0hat[trt==0,] <- t(gsyn$Y.co[(t0+1):t_final,,drop=FALSE] -
gsyn$est.co$residuals[(t0+1):t_final,,drop=FALSE])
y0hat[trt==1,] <- gsyn$Y.ct[(t0+1):t_final,]
## add treated prediction for whole pre-period
gsyn$est.co$Y.ct <- gsyn$Y.ct
## control and treated residuals
gsyn$est.co$ctrl_resids <- gsyn$est.co$residuals
gsyn$est.co$trt_resids <- colMeans(cbind(X[trt==1,,drop=FALSE],
y[trt==1,,drop=FALSE])) -
rowMeans(gsyn$est.co$Y.ct)
return(list(y0hat=y0hat,
params=gsyn$est.co))
}
#' Use Athey (2017) matrix completion panel data code
#'
#' @param X Matrix of covariates/lagged outcomes
#' @param y Matrix of post-period outcomes
#' @param trt Vector of treatment indicator
#' @param unit_fixed Whether to estimate unit fixed effects
#' @param time_fixed Whether to estimate time fixed effects
#' @param ... optional arguments for outcome model
#' @noRd
#' @return \itemize{
#' \item{y0hat }{Predicted outcome under control}
#' \item{params }{Regression parameters}}
fit_prog_mcpanel <- function(X, y, trt, unit_fixed=1, time_fixed=1, ...) {
if(!requireNamespace("MCPanel", quietly = TRUE)) {
stop("In order to fit matrix completion, you must install the MCPanel package.")
}
extra_params = list(...)
if (length(extra_params) > 0) {
warning("Unused parameters when using MCPanel: ", paste(names(extra_params), collapse = ", "))
}
## create matrix and missingness matrix
t0 <- dim(X)[2]
t_final <- t0 + dim(y)[2]
n <- dim(X)[1]
fullmat <- cbind(X, y)
maskmat <- matrix(1, nrow=nrow(fullmat), ncol=ncol(fullmat))
maskmat[trt==1, (t0+1):t_final] <- 0
## estimate matrix
mcp <- MCPanel::mcnnm_cv(fullmat, maskmat,
to_estimate_u=unit_fixed, to_estimate_v=time_fixed)
## impute matrix
imp_mat <- mcp$L +
sweep(matrix(0, nrow=nrow(fullmat), ncol=ncol(fullmat)), 1, mcp$u, "+") + # unit fixed
sweep(matrix(0, nrow=nrow(fullmat), ncol=ncol(fullmat)), 2, mcp$v, "+") # time fixed
trtmat <- matrix(0, ncol=n, nrow=t_final)
trtmat[t0:t_final, trt == 1] <- 1
## get predicted outcomes
y0hat <- imp_mat[,(t0+1):t_final,drop=FALSE]
params <- mcp
params$trt_resids <- colMeans(cbind(X[trt==1,,drop=FALSE],
y[trt==1,,drop=FALSE])) -
rowMeans(imp_mat[trt==1,,drop=FALSE])
params$ctrl_resids <- t(cbind(X[trt==0,,drop=FALSE],
y[trt==0,,drop=FALSE]) - imp_mat[trt==0,,drop=FALSE])
params$Y.ct <- t(imp_mat[trt==1,,drop=FALSE])
return(list(y0hat=y0hat,
params=params))
}
#' Fit a Comparitive interupted time series
#' to fit E[Y(0)|X]
#' @importFrom stats lm
#' @importFrom stats predict
#'
#' @param X Matrix of covariates/lagged outcomes
#' @param y Matrix of post-period outcomes
#' @param trt Vector of treatment indicator
#' @param poly_order Order of time trend polynomial to fit, default 1
#' @param weights Weights to use in WLS, default is no weights
#' @param ... optional arguments for outcome model
#' @noRd
#' @return \itemize{
#' \item{y0hat }{Predicted outcome under control}
#' \item{params }{Regression parameters}}
fit_prog_cits <- function(X, y, trt, poly_order=1, weights=NULL, ...) {
extra_params = list(...)
if (length(extra_params) > 0) {
warning("Unused parameters when using CITS: ", paste(names(extra_params), collapse = ", "))
}
## combine back into a panel structure
ids <- 1:nrow(X)
t0 <- dim(X)[2]
t_final <- t0 + dim(y)[2]
n <- nrow(X)
if(is.null(weights)) {
weights <- rep(1, n)
}
pnl1 <- data.frame(X)
colnames(pnl1) <- 1:(t0)
pnl1 <- pnl1 %>% mutate(trt=trt, post=0, id=ids, weight=weights) %>%
gather(time, val, -trt, -post, -id, -weight) %>%
mutate(time=as.numeric(time))
pnl2 <- data.frame(y)
colnames(pnl2) <- (t0+1):t_final
pnl2 <- pnl2 %>% mutate(trt=trt, post=1, id=ids, weight=weights) %>%
gather(time, val, -trt, -post, -id, -weight) %>%
mutate(time=as.numeric(time))
pnl <- bind_rows(pnl1, pnl2)
## fit regression
if(poly_order == "fixed") {
fit <- pnl %>%
filter(!((post==1) & (trt==1))) %>% ## filter out post-period treated outcomes
lm(val ~ as.factor(id) + as.factor(time),
.,
weights = .$weight
)
} else if(poly_order > 0) {
fit <- pnl %>%
filter(!((post==1) & (trt==1))) %>% ## filter out post-period treated outcomes
lm(val ~ poly(time, poly_order) + post + trt + poly(time * trt, poly_order),
.,
weights = .$weight
)
} else {
fit <- pnl %>%
filter(!((post==1) & (trt==1))) %>% ## filter out post-period treated outcomes
lm(val ~ post + trt,
.,
weights = .$weight
)
}
## get predicted post-period outcomes
y0hat <- matrix(0, nrow=n, ncol=(t_final-t0))
y0hat[trt==0,] <- matrix(predict(fit,
pnl %>% filter(post==1 & trt==0)),
ncol=ncol(y))
y0hat[trt==1,] <- matrix(predict(fit,
pnl %>% filter(post==1 & trt==1)),
ncol=ncol(y))
params <- list()
## add treated prediction for whole pre-period
params$Y.ct <- matrix(predict(fit,
pnl %>% filter(trt==1),
ncol=(ncol(X) + ncol(y))))
## and control prediction
ctrl_pred <- matrix(predict(fit,
pnl %>% filter(trt==0)),
ncol=(ncol(X) + ncol(y)))
## control and treated residuals
params$ctrl_resids <- t(cbind(X[trt==0,,drop=FALSE],
y[trt==0,,drop=FALSE])) -
t(ctrl_pred)
params$trt_resids <- colMeans(cbind(X[trt==1,,drop=FALSE],
y[trt==1,,drop=FALSE])) -
rowMeans(params$Y.ct)
return(list(y0hat=y0hat,
params=params))
}
#' Fit a bayesian structural time series
#' to fit E[Y(0)|X]
#'
#' @param X Matrix of covariates/lagged outcomes
#' @param y Matrix of post-period outcomes
#' @param trt Vector of treatment indicator
#' @param ... optional arguments for outcome model
#' @noRd
#' @return \itemize{
#' \item{y0hat }{Predicted outcome under control}
#' \item{params }{Model parameters}}
fit_prog_causalimpact <- function(X, y, trt, ...) {
if(!requireNamespace("CausalImpact", quietly = TRUE)) {
stop("In order to fit bayesian structural time series, you must install the CausalImpact package.")
}
extra_params = list(...)
if (length(extra_params) > 0) {
warning("Unused parameters using Bayesian structural time series with CausalImpact: ", paste(names(extra_params), collapse = ", "))
}
## structure data accordingly
ids <- 1:nrow(X)
t0 <- dim(X)[2]
t_final <- t0 + dim(y)[2]
n <- nrow(X)
comb <- cbind(X, y)
imp_dat <- t(rbind(colMeans(comb[trt==1,,drop=F]), comb[trt==0,,drop=F]))
## get predicted post-period outcomes
## TODO: is this the way to use CausalImpact??
ci_func <- function(i) {
## fit causal impact using controls
CausalImpact::CausalImpact(t(rbind(comb[i,], comb[-i,][trt[-i]==0,])),
pre.period=c(1, t0), post.period=c(t0+1, t_final)
)$series$point.pred
}
y0hat <- t(sapply(1:n, ci_func))
params <- list()
## add treated prediction for whole pre-period
params$Y.ct <- t(y0hat[trt==1,,drop=F])
## and control prediction
ctrl_pred <- y0hat[trt==0,,drop=F]
## control and treated residuals
params$ctrl_resids <- t(cbind(X[trt==0,,drop=FALSE],
y[trt==0,,drop=FALSE])) -
t(ctrl_pred)
params$trt_resids <- colMeans(cbind(X[trt==1,,drop=FALSE],
y[trt==1,,drop=FALSE])) -
rowMeans(params$Y.ct)
return(list(y0hat=y0hat[,(t0+1):t_final, drop=F],
params=params))
}
#' Fit a seq2seq model with a feedforward net
#' to fit E[Y(0)|X]
#'
#' @param X Matrix of covariates/lagged outcomes
#' @param y Matrix of post-period outcomes
#' @param trt Vector of treatment indicator
#' @param layers List of (n_hidden_units, activation function) pairs to define layers
#' @param epochs Number of epochs for training
#' @param patience Number of epochs to wait before early stopping
#' @param val_split Proportion of control units to use for validation
#' @param verbose Whether to print training progress
#' @param ... optional arguments for outcome model
#' @noRd
#' @return \itemize{
#' \item{y0hat }{Predicted outcome under control}
#' \item{params }{Model parameters}}
fit_prog_seq2seq <- function(X, y, trt,
layers=list(c(50, "relu"), c(5, "relu")),
epochs=500,
patience=5,
val_split=0.2,
verbose=F, ...) {
if(!requireNamespace("keras", quietly = TRUE)) {
stop("In order to fit a neural network, you must install the keras package.")
}
extra_params = list(...)
if (length(extra_params) > 0) {
warning("Unused parameters when building sequence to sequence learning with feedforward nets: ", paste(names(extra_params), collapse = ", "))
}
## structure data accordingly
ids <- 1:nrow(X)
t0 <- dim(X)[2]
t_final <- t0 + dim(y)[2]
n <- nrow(X)
Xctrl <- X[trt==0,,drop=F]
yctrl <- y[trt==0,,drop=F]
## create first layer
model <- keras::keras_model_sequential() %>%
keras::layer_dense(units = layers[[1]][1], activation = layers[[1]][2],
input_shape = ncol(Xctrl))
## add layers
for(layer in layers[-1]) {
model %>% keras::layer_dense(units = layer[1], activation = layer[2])
}
## output lyaer
model %>% keras::layer_dense(units=ncol(yctrl))
## compile
model %>% keras::compile(optimizer="rmsprop", loss="mse", metrics=c("mae"))
## fit model
learn <- model %>%
keras::fit(x=Xctrl, y=yctrl,
epochs=epochs,
batch_size=nrow(Xctrl),
validation_split=val_split,
callbacks=list(keras::callback_early_stopping(patience=patience)),
verbose=verbose)
## predict for everything
y0hat <- model %>% predict(X)
params=list(model=model, learn=learn)
return(list(y0hat=y0hat,
params=params))
}
| /R/outcome_models.R | permissive | ebenmichael/augsynth | R | false | false | 18,480 | r | ################################################################################
## Code to fit various outcome models
################################################################################
#' Use a separate regularized regression for each post period
#' to fit E[Y(0)|X]
#' @importFrom stats poly
#' @importFrom stats coef
#'
#' @param X Matrix of covariates/lagged outcomes
#' @param y Matrix of post-period outcomes
#' @param trt Vector of treatment indicator
#' @param alpha Mixing between L1 and L2, default: 1 (LASSO)
#' @param lambda Regularization hyperparameter, if null then CV
#' @param poly_order Order of polynomial to fit, default 1
#' @param type How to fit outcome model(s)
#' \itemize{
#' \item{sep }{Separate outcome models}
#' \item{avg }{Average responses into 1 outcome}
#' \item{multi }{Use multi response regression in glmnet}}
#' @param ... optional arguments for outcome model
#' @noRd
#' @return \itemize{
#' \item{y0hat }{Predicted outcome under control}
#' \item{params }{Regression parameters}}
fit_prog_reg <- function(X, y, trt, alpha=1, lambda=NULL,
poly_order=1, type="sep", ...) {
if(!requireNamespace("glmnet", quietly = TRUE)) {
stop("In order to fit an elastic net outcome model, you must install the glmnet package.")
}
extra_params = list(...)
if (length(extra_params) > 0) {
warning("Unused parameters when using elastic net: ", paste(names(extra_params), collapse = ", "))
}
X <- matrix(poly(matrix(X),degree=poly_order), nrow=dim(X)[1])
## helper function to fit regression with CV
outfit <- function(x, y) {
if(is.null(lambda)) {
lam <- glmnet::cv.glmnet(x, y, alpha=alpha, grouped=FALSE)$lambda.min
} else {
lam <- lambda
}
fit <- glmnet::glmnet(x, y, alpha=alpha,
lambda=lam)
return(as.matrix(coef(fit)))
}
if(type=="avg") {
## if fitting the average post period value, stack post periods together
stacky <- c(y)
stackx <- do.call(rbind,
lapply(1:dim(y)[2],
function(x) X))
stacktrt <- rep(trt, dim(y)[2])
regweights <- outfit(stackx[stacktrt==0,],
stacky[stacktrt==0])
} else if(type=="sep"){
## fit separate regressions for each post period
regweights <- apply(as.matrix(y), 2,
function(yt) outfit(X[trt==0,],
yt[trt==0]))
} else {
## fit multi response regression
lam <- glmnet::cv.glmnet(X, y, family="mgaussian",
alpha=alpha, grouped=FALSE)$lambda.min
fit <- glmnet::glmnet(X, y, family="mgaussian",
alpha=alpha,
lambda=lam)
regweights <- as.matrix(do.call(cbind, coef(fit)))
}
## Get predicted values
y0hat <- cbind(rep(1, dim(X)[1]),
X) %*% regweights
return(list(y0hat = y0hat,
params = regweights))
}
#' Use a separate random forest regression for each post period
#' to fit E[Y(0)|X]
#'
#' @param X Matrix of covariates/lagged outcomes
#' @param y Matrix of post-period outcomes
#' @param trt Vector of treatment indicator
#' @param avg Predict the average post-treatment outcome
#' @param ... optional arguments for outcome model
#' @noRd
#' @return \itemize{
#' \item{y0hat }{Predicted outcome under control}
#' \item{params }{Regression parameters}}
fit_prog_rf <- function(X, y, trt, avg=FALSE, ...) {
if(!requireNamespace("randomForest", quietly = TRUE)) {
stop("In order to fit a random forest outcome model, you must install the randomForest package.")
}
extra_params = list(...)
if (length(extra_params) > 0) {
warning("Unused parameters when using random forest: ", paste(names(extra_params), collapse = ", "))
}
## helper function to fit RF
outfit <- function(x, y) {
fit <- randomForest::randomForest(x, y)
return(fit)
}
if(avg | dim(y)[2] == 1) {
## if fitting the average post period value, stack post periods together
stacky <- c(y)
stackx <- do.call(rbind,
lapply(1:dim(y)[2],
function(x) X))
stacktrt <- rep(trt, dim(y)[2])
fit <- outfit(stackx[stacktrt==0,],
stacky[stacktrt==0])
## predict outcome
y0hat <- matrix(predict(fit, X), ncol=1)
## keep feature importances
imports <- randomForest::importance(fit)
} else {
## fit separate regressions for each post period
fits <- apply(as.matrix(y), 2,
function(yt) outfit(X[trt==0,],
yt[trt==0]))
## predict outcome
y0hat <- lapply(fits, function(fit) as.matrix(predict(fit,X))) %>%
bind_rows() %>%
as.matrix()
## keep feature importances
imports <- lapply(fits, function(fit) randomForest::importance(fit)) %>%
bind_rows() %>%
as.matrix()
}
return(list(y0hat=y0hat,
params=imports))
}
#' Use gsynth to fit factor model for E[Y(0)|X]
#'
#' @param X Matrix of covariates/lagged outcomes
#' @param y Matrix of post-period outcomes
#' @param trt Vector of treatment indicator
#' @param r Number of factors to use (or start with if CV==1)
#' @param r.end Max number of factors to consider if CV==1
#' @param force Fixed effects (0=none, 1=unit, 2=time, 3=two-way)
#' @param CV Whether to do CV (0=no CV, 1=yes CV)
#' @param ... optional arguments for outcome model
#' @noRd
#' @return \itemize{
#' \item{y0hat }{Predicted outcome under control}
#' \item{params }{Regression parameters}}
fit_prog_gsynth <- function(X, y, trt, r=0, r.end=5, force=3, CV=1, ...) {
if(!requireNamespace("gsynth", quietly = TRUE)) {
stop("In order to fit generalized synthetic controls, you must install the gsynth package.")
}
extra_params = list(...)
if (length(extra_params) > 0) {
warning("Unused parameters when using gSynth: ", paste(names(extra_params), collapse = ", "))
}
df_x = data.frame(X, check.names=FALSE)
df_x$unit = rownames(df_x)
df_x$trt = rep(0, nrow(df_x))
df_x <- df_x %>% select(unit, trt, everything())
long_df_x = gather(df_x, time, obs, -c(unit,trt))
df_y = data.frame(y, check.names=FALSE)
df_y$unit = rownames(df_y)
df_y$trt = trt
df_y <- df_y %>% select(unit, trt, everything())
long_df_y = gather(df_y, time, obs, -c(unit,trt))
long_df = rbind(long_df_x, long_df_y)
transform(long_df, time = as.numeric(time))
transform(long_df, unit = as.numeric(unit))
gsyn <- gsynth::gsynth(data = long_df, Y = "obs", D = "trt",
index = c("unit", "time"), force = force, CV = CV, r = r)
t0 <- dim(X)[2]
t_final <- t0 + dim(y)[2]
n <- dim(X)[1]
## get predicted outcomes
y0hat <- matrix(0, nrow=n, ncol=(t_final-t0))
y0hat[trt==0,] <- t(gsyn$Y.co[(t0+1):t_final,,drop=FALSE] -
gsyn$est.co$residuals[(t0+1):t_final,,drop=FALSE])
y0hat[trt==1,] <- gsyn$Y.ct[(t0+1):t_final,]
## add treated prediction for whole pre-period
gsyn$est.co$Y.ct <- gsyn$Y.ct
## control and treated residuals
gsyn$est.co$ctrl_resids <- gsyn$est.co$residuals
gsyn$est.co$trt_resids <- colMeans(cbind(X[trt==1,,drop=FALSE],
y[trt==1,,drop=FALSE])) -
rowMeans(gsyn$est.co$Y.ct)
return(list(y0hat=y0hat,
params=gsyn$est.co))
}
#' Use Athey (2017) matrix completion panel data code
#'
#' @param X Matrix of covariates/lagged outcomes
#' @param y Matrix of post-period outcomes
#' @param trt Vector of treatment indicator
#' @param unit_fixed Whether to estimate unit fixed effects
#' @param time_fixed Whether to estimate time fixed effects
#' @param ... optional arguments for outcome model
#' @noRd
#' @return \itemize{
#' \item{y0hat }{Predicted outcome under control}
#' \item{params }{Regression parameters}}
fit_prog_mcpanel <- function(X, y, trt, unit_fixed=1, time_fixed=1, ...) {
if(!requireNamespace("MCPanel", quietly = TRUE)) {
stop("In order to fit matrix completion, you must install the MCPanel package.")
}
extra_params = list(...)
if (length(extra_params) > 0) {
warning("Unused parameters when using MCPanel: ", paste(names(extra_params), collapse = ", "))
}
## create matrix and missingness matrix
t0 <- dim(X)[2]
t_final <- t0 + dim(y)[2]
n <- dim(X)[1]
fullmat <- cbind(X, y)
maskmat <- matrix(1, nrow=nrow(fullmat), ncol=ncol(fullmat))
maskmat[trt==1, (t0+1):t_final] <- 0
## estimate matrix
mcp <- MCPanel::mcnnm_cv(fullmat, maskmat,
to_estimate_u=unit_fixed, to_estimate_v=time_fixed)
## impute matrix
imp_mat <- mcp$L +
sweep(matrix(0, nrow=nrow(fullmat), ncol=ncol(fullmat)), 1, mcp$u, "+") + # unit fixed
sweep(matrix(0, nrow=nrow(fullmat), ncol=ncol(fullmat)), 2, mcp$v, "+") # time fixed
trtmat <- matrix(0, ncol=n, nrow=t_final)
trtmat[t0:t_final, trt == 1] <- 1
## get predicted outcomes
y0hat <- imp_mat[,(t0+1):t_final,drop=FALSE]
params <- mcp
params$trt_resids <- colMeans(cbind(X[trt==1,,drop=FALSE],
y[trt==1,,drop=FALSE])) -
rowMeans(imp_mat[trt==1,,drop=FALSE])
params$ctrl_resids <- t(cbind(X[trt==0,,drop=FALSE],
y[trt==0,,drop=FALSE]) - imp_mat[trt==0,,drop=FALSE])
params$Y.ct <- t(imp_mat[trt==1,,drop=FALSE])
return(list(y0hat=y0hat,
params=params))
}
#' Fit a Comparitive interupted time series
#' to fit E[Y(0)|X]
#' @importFrom stats lm
#' @importFrom stats predict
#'
#' @param X Matrix of covariates/lagged outcomes
#' @param y Matrix of post-period outcomes
#' @param trt Vector of treatment indicator
#' @param poly_order Order of time trend polynomial to fit, default 1
#' @param weights Weights to use in WLS, default is no weights
#' @param ... optional arguments for outcome model
#' @noRd
#' @return \itemize{
#' \item{y0hat }{Predicted outcome under control}
#' \item{params }{Regression parameters}}
fit_prog_cits <- function(X, y, trt, poly_order=1, weights=NULL, ...) {
extra_params = list(...)
if (length(extra_params) > 0) {
warning("Unused parameters when using CITS: ", paste(names(extra_params), collapse = ", "))
}
## combine back into a panel structure
ids <- 1:nrow(X)
t0 <- dim(X)[2]
t_final <- t0 + dim(y)[2]
n <- nrow(X)
if(is.null(weights)) {
weights <- rep(1, n)
}
pnl1 <- data.frame(X)
colnames(pnl1) <- 1:(t0)
pnl1 <- pnl1 %>% mutate(trt=trt, post=0, id=ids, weight=weights) %>%
gather(time, val, -trt, -post, -id, -weight) %>%
mutate(time=as.numeric(time))
pnl2 <- data.frame(y)
colnames(pnl2) <- (t0+1):t_final
pnl2 <- pnl2 %>% mutate(trt=trt, post=1, id=ids, weight=weights) %>%
gather(time, val, -trt, -post, -id, -weight) %>%
mutate(time=as.numeric(time))
pnl <- bind_rows(pnl1, pnl2)
## fit regression
if(poly_order == "fixed") {
fit <- pnl %>%
filter(!((post==1) & (trt==1))) %>% ## filter out post-period treated outcomes
lm(val ~ as.factor(id) + as.factor(time),
.,
weights = .$weight
)
} else if(poly_order > 0) {
fit <- pnl %>%
filter(!((post==1) & (trt==1))) %>% ## filter out post-period treated outcomes
lm(val ~ poly(time, poly_order) + post + trt + poly(time * trt, poly_order),
.,
weights = .$weight
)
} else {
fit <- pnl %>%
filter(!((post==1) & (trt==1))) %>% ## filter out post-period treated outcomes
lm(val ~ post + trt,
.,
weights = .$weight
)
}
## get predicted post-period outcomes
y0hat <- matrix(0, nrow=n, ncol=(t_final-t0))
y0hat[trt==0,] <- matrix(predict(fit,
pnl %>% filter(post==1 & trt==0)),
ncol=ncol(y))
y0hat[trt==1,] <- matrix(predict(fit,
pnl %>% filter(post==1 & trt==1)),
ncol=ncol(y))
params <- list()
## add treated prediction for whole pre-period
params$Y.ct <- matrix(predict(fit,
pnl %>% filter(trt==1),
ncol=(ncol(X) + ncol(y))))
## and control prediction
ctrl_pred <- matrix(predict(fit,
pnl %>% filter(trt==0)),
ncol=(ncol(X) + ncol(y)))
## control and treated residuals
params$ctrl_resids <- t(cbind(X[trt==0,,drop=FALSE],
y[trt==0,,drop=FALSE])) -
t(ctrl_pred)
params$trt_resids <- colMeans(cbind(X[trt==1,,drop=FALSE],
y[trt==1,,drop=FALSE])) -
rowMeans(params$Y.ct)
return(list(y0hat=y0hat,
params=params))
}
#' Fit a bayesian structural time series
#' to fit E[Y(0)|X]
#'
#' @param X Matrix of covariates/lagged outcomes
#' @param y Matrix of post-period outcomes
#' @param trt Vector of treatment indicator
#' @param ... optional arguments for outcome model
#' @noRd
#' @return \itemize{
#' \item{y0hat }{Predicted outcome under control}
#' \item{params }{Model parameters}}
fit_prog_causalimpact <- function(X, y, trt, ...) {
if(!requireNamespace("CausalImpact", quietly = TRUE)) {
stop("In order to fit bayesian structural time series, you must install the CausalImpact package.")
}
extra_params = list(...)
if (length(extra_params) > 0) {
warning("Unused parameters using Bayesian structural time series with CausalImpact: ", paste(names(extra_params), collapse = ", "))
}
## structure data accordingly
ids <- 1:nrow(X)
t0 <- dim(X)[2]
t_final <- t0 + dim(y)[2]
n <- nrow(X)
comb <- cbind(X, y)
imp_dat <- t(rbind(colMeans(comb[trt==1,,drop=F]), comb[trt==0,,drop=F]))
## get predicted post-period outcomes
## TODO: is this the way to use CausalImpact??
ci_func <- function(i) {
## fit causal impact using controls
CausalImpact::CausalImpact(t(rbind(comb[i,], comb[-i,][trt[-i]==0,])),
pre.period=c(1, t0), post.period=c(t0+1, t_final)
)$series$point.pred
}
y0hat <- t(sapply(1:n, ci_func))
params <- list()
## add treated prediction for whole pre-period
params$Y.ct <- t(y0hat[trt==1,,drop=F])
## and control prediction
ctrl_pred <- y0hat[trt==0,,drop=F]
## control and treated residuals
params$ctrl_resids <- t(cbind(X[trt==0,,drop=FALSE],
y[trt==0,,drop=FALSE])) -
t(ctrl_pred)
params$trt_resids <- colMeans(cbind(X[trt==1,,drop=FALSE],
y[trt==1,,drop=FALSE])) -
rowMeans(params$Y.ct)
return(list(y0hat=y0hat[,(t0+1):t_final, drop=F],
params=params))
}
#' Fit a seq2seq model with a feedforward net
#' to fit E[Y(0)|X]
#'
#' @param X Matrix of covariates/lagged outcomes
#' @param y Matrix of post-period outcomes
#' @param trt Vector of treatment indicator
#' @param layers List of (n_hidden_units, activation function) pairs to define layers
#' @param epochs Number of epochs for training
#' @param patience Number of epochs to wait before early stopping
#' @param val_split Proportion of control units to use for validation
#' @param verbose Whether to print training progress
#' @param ... optional arguments for outcome model
#' @noRd
#' @return \itemize{
#' \item{y0hat }{Predicted outcome under control}
#' \item{params }{Model parameters}}
fit_prog_seq2seq <- function(X, y, trt,
layers=list(c(50, "relu"), c(5, "relu")),
epochs=500,
patience=5,
val_split=0.2,
verbose=F, ...) {
if(!requireNamespace("keras", quietly = TRUE)) {
stop("In order to fit a neural network, you must install the keras package.")
}
extra_params = list(...)
if (length(extra_params) > 0) {
warning("Unused parameters when building sequence to sequence learning with feedforward nets: ", paste(names(extra_params), collapse = ", "))
}
## structure data accordingly
ids <- 1:nrow(X)
t0 <- dim(X)[2]
t_final <- t0 + dim(y)[2]
n <- nrow(X)
Xctrl <- X[trt==0,,drop=F]
yctrl <- y[trt==0,,drop=F]
## create first layer
model <- keras::keras_model_sequential() %>%
keras::layer_dense(units = layers[[1]][1], activation = layers[[1]][2],
input_shape = ncol(Xctrl))
## add layers
for(layer in layers[-1]) {
model %>% keras::layer_dense(units = layer[1], activation = layer[2])
}
## output lyaer
model %>% keras::layer_dense(units=ncol(yctrl))
## compile
model %>% keras::compile(optimizer="rmsprop", loss="mse", metrics=c("mae"))
## fit model
learn <- model %>%
keras::fit(x=Xctrl, y=yctrl,
epochs=epochs,
batch_size=nrow(Xctrl),
validation_split=val_split,
callbacks=list(keras::callback_early_stopping(patience=patience)),
verbose=verbose)
## predict for everything
y0hat <- model %>% predict(X)
params=list(model=model, learn=learn)
return(list(y0hat=y0hat,
params=params))
}
|
# Calculate Job loss by LODES Industry Sector using NY state data
library(tidyverse)
library(jsonlite)
library(testit)
library(readxl)
##----Set Parameters----------------------------------------------------
# start_quarter: quarter from which to compare job change and
# past_unemp_weeks: # of past weeks of unemployment data to use
# filename: filename of WA unemployment data (downloaded from
# download-data.R)
start_quarter <- 3
past_unemployment_weeks <- 4
filename <- "ny-manual-input-data.xlsx"
##----Read in data------------------------------------------------------
# Read in BLS CES data
qcew_all <- read_csv("data/raw-data/big/ny_qcew.csv")
# Read in crosswalk from NAICS supersector to LODES.
lodes_crosswalk <- fromJSON("data/raw-data/small/naics-to-led.json")
# Read in crosswalk from NAICS sector to LODES
lodes_crosswalk_sector <- fromJSON("data/raw-data/small/naics-sector-to-led.json")
# Read in industry to lodes xwalk, to put names to lodes industries
lehd_types <- read_csv("data/raw-data/small/lehd_types.csv")
# Above two crosswalks were constructed manually from pg 7 of
# https://lehd.ces.census.gov/data/lodes/LODES7/LODESTechDoc7.4.pdf
##----Get total_employment by LODES industry code-------------------------
# Subset to NAICS supersector and latest quarter
qcew_sub <- qcew_all %>%
filter(agglvl_code == 54,
qtr == start_quarter)
# Aggregate all employment, taking last month of the quarter,
# By industry by supersector
qcew_agg <- qcew_sub %>%
group_by(industry_code) %>%
summarize(total_employment = sum(month3_emplvl)) %>%
filter(industry_code %in% names(lodes_crosswalk))
# Attach LODES codes "CNS{number}" and write out
add_lodes_code <- function(industry_code) {
str_glue("CNS", lodes_crosswalk[industry_code][[1]])
}
qcew_led <- qcew_agg %>%
mutate(lodes_var =
qcew_agg$industry_code
%>% map_chr(add_lodes_code))
##----Get unemploygment claims by LODES industry code-----------------------
# Read in weekly unemployment claims, allocate Construction/Utilities based on
# WA split
# Calculate WA ratio of job loss
wa_data <- read_csv("data/processed-data/job_change_wa_most_recent.csv")
utilities_wa <- wa_data %>%
filter(industry_code == "22") %>%
select(unemployment_totals) %>%
pull()
construction_wa <- wa_data %>%
filter(industry_code == "23") %>%
select(unemployment_totals) %>%
pull()
cu_ratio <- utilities_wa / (utilities_wa + construction_wa)
# Add separate NY rows for constrution and utilities
weekly_unemployment <- read_excel(str_glue("data/raw-data/small/{filename}"),
sheet = "Sheet1")
split_base <- weekly_unemployment %>%
filter(`2 Digit NAICS` == "22, 23") %>%
select_if(is.numeric) %>%
unname()
utilities_ny <- split_base %>%
map_dbl(~ .x * cu_ratio)
construction_ny <- split_base %>%
map_dbl(~ .x * (1 - cu_ratio))
weekly_unemployment <- weekly_unemployment %>%
rbind(c(c("Utilities", "22"), utilities_ny)) %>%
rbind(c(c("Construction", "23"), construction_ny)) %>%
filter(`2 Digit NAICS` != "22, 23")
add_naics_super_lodes <- function(naics) {
str_glue("CNS", lodes_crosswalk_sector[naics][[1]])
}
weekly_unemployment_sub <- weekly_unemployment %>%
mutate(lodes_var = weekly_unemployment$`2 Digit NAICS` %>%
map_chr(add_naics_super_lodes)) %>%
filter(lodes_var != "CNS00")
cols <- length(colnames(weekly_unemployment_sub))
weekly_unemployment_sub <- weekly_unemployment_sub %>%
#only keep unemp claims from last n weeks
select(c((cols - past_unemployment_weeks):cols)) %>%
mutate_at(vars(-lodes_var), as.numeric)
# Sum across rows to get total unemployment over past n weeks
# Then summarize by LODES code
weekly_unemployment_totals <- weekly_unemployment_sub %>%
data.frame(unemployment = rowSums(weekly_unemployment_sub[1:past_unemployment_weeks])) %>%
select(lodes_var, unemployment) %>%
# AN: Any reasom you're selecting by index rather than by
# select(c((past_unemployment_weeks+1):(past_unemployment_weeks+2))) %>%
group_by(lodes_var) %>%
summarize(unemployment_totals = sum(unemployment)) %>%
left_join(lehd_types %>%
transmute(lodes_var = toupper(lehd_var),
lehd_name),
by = c("lodes_var"))
##----Get % change in employment by LODES industry code-----------------------
# Note: assumes no hires, which is not true, but should generally show relative
# job change in the short term until we get BLS CES data
percent_change_industry <- qcew_led %>%
left_join(weekly_unemployment_totals, by = "lodes_var") %>%
select(lodes_var, everything()) %>%
# merge(weekly_unemployment_totals, by = "lodes_var") %>%
mutate(percent_change_employment = -unemployment_totals / total_employment) %>%
arrange(lodes_var) %>%
select(lehd_name, lodes_var, everything()) %>%
write_csv(str_glue("data/processed-data/job_change_ny_last_{past_unemployment_weeks}_weeks.csv"))
percent_change_industry %>%
write_csv("data/processed-data/job_change_ny_most_recent.csv")
| /scripts/update/2c-job-loss-by-industry-ny.R | no_license | rross0/covid-neighborhood-job-analysis | R | false | false | 5,046 | r | # Calculate Job loss by LODES Industry Sector using NY state data
library(tidyverse)
library(jsonlite)
library(testit)
library(readxl)
##----Set Parameters----------------------------------------------------
# start_quarter: quarter from which to compare job change and
# past_unemp_weeks: # of past weeks of unemployment data to use
# filename: filename of WA unemployment data (downloaded from
# download-data.R)
start_quarter <- 3
past_unemployment_weeks <- 4
filename <- "ny-manual-input-data.xlsx"
##----Read in data------------------------------------------------------
# Read in BLS CES data
qcew_all <- read_csv("data/raw-data/big/ny_qcew.csv")
# Read in crosswalk from NAICS supersector to LODES.
lodes_crosswalk <- fromJSON("data/raw-data/small/naics-to-led.json")
# Read in crosswalk from NAICS sector to LODES
lodes_crosswalk_sector <- fromJSON("data/raw-data/small/naics-sector-to-led.json")
# Read in industry to lodes xwalk, to put names to lodes industries
lehd_types <- read_csv("data/raw-data/small/lehd_types.csv")
# Above two crosswalks were constructed manually from pg 7 of
# https://lehd.ces.census.gov/data/lodes/LODES7/LODESTechDoc7.4.pdf
##----Get total_employment by LODES industry code-------------------------
# Subset to NAICS supersector and latest quarter
qcew_sub <- qcew_all %>%
filter(agglvl_code == 54,
qtr == start_quarter)
# Aggregate all employment, taking last month of the quarter,
# By industry by supersector
qcew_agg <- qcew_sub %>%
group_by(industry_code) %>%
summarize(total_employment = sum(month3_emplvl)) %>%
filter(industry_code %in% names(lodes_crosswalk))
# Attach LODES codes "CNS{number}" and write out
add_lodes_code <- function(industry_code) {
str_glue("CNS", lodes_crosswalk[industry_code][[1]])
}
qcew_led <- qcew_agg %>%
mutate(lodes_var =
qcew_agg$industry_code
%>% map_chr(add_lodes_code))
##----Get unemploygment claims by LODES industry code-----------------------
# Read in weekly unemployment claims, allocate Construction/Utilities based on
# WA split
# Calculate WA ratio of job loss
wa_data <- read_csv("data/processed-data/job_change_wa_most_recent.csv")
utilities_wa <- wa_data %>%
filter(industry_code == "22") %>%
select(unemployment_totals) %>%
pull()
construction_wa <- wa_data %>%
filter(industry_code == "23") %>%
select(unemployment_totals) %>%
pull()
cu_ratio <- utilities_wa / (utilities_wa + construction_wa)
# Add separate NY rows for constrution and utilities
weekly_unemployment <- read_excel(str_glue("data/raw-data/small/{filename}"),
sheet = "Sheet1")
split_base <- weekly_unemployment %>%
filter(`2 Digit NAICS` == "22, 23") %>%
select_if(is.numeric) %>%
unname()
utilities_ny <- split_base %>%
map_dbl(~ .x * cu_ratio)
construction_ny <- split_base %>%
map_dbl(~ .x * (1 - cu_ratio))
weekly_unemployment <- weekly_unemployment %>%
rbind(c(c("Utilities", "22"), utilities_ny)) %>%
rbind(c(c("Construction", "23"), construction_ny)) %>%
filter(`2 Digit NAICS` != "22, 23")
add_naics_super_lodes <- function(naics) {
str_glue("CNS", lodes_crosswalk_sector[naics][[1]])
}
weekly_unemployment_sub <- weekly_unemployment %>%
mutate(lodes_var = weekly_unemployment$`2 Digit NAICS` %>%
map_chr(add_naics_super_lodes)) %>%
filter(lodes_var != "CNS00")
cols <- length(colnames(weekly_unemployment_sub))
weekly_unemployment_sub <- weekly_unemployment_sub %>%
#only keep unemp claims from last n weeks
select(c((cols - past_unemployment_weeks):cols)) %>%
mutate_at(vars(-lodes_var), as.numeric)
# Sum across rows to get total unemployment over past n weeks
# Then summarize by LODES code
weekly_unemployment_totals <- weekly_unemployment_sub %>%
data.frame(unemployment = rowSums(weekly_unemployment_sub[1:past_unemployment_weeks])) %>%
select(lodes_var, unemployment) %>%
# AN: Any reasom you're selecting by index rather than by
# select(c((past_unemployment_weeks+1):(past_unemployment_weeks+2))) %>%
group_by(lodes_var) %>%
summarize(unemployment_totals = sum(unemployment)) %>%
left_join(lehd_types %>%
transmute(lodes_var = toupper(lehd_var),
lehd_name),
by = c("lodes_var"))
##----Get % change in employment by LODES industry code-----------------------
# Note: assumes no hires, which is not true, but should generally show relative
# job change in the short term until we get BLS CES data
percent_change_industry <- qcew_led %>%
left_join(weekly_unemployment_totals, by = "lodes_var") %>%
select(lodes_var, everything()) %>%
# merge(weekly_unemployment_totals, by = "lodes_var") %>%
mutate(percent_change_employment = -unemployment_totals / total_employment) %>%
arrange(lodes_var) %>%
select(lehd_name, lodes_var, everything()) %>%
write_csv(str_glue("data/processed-data/job_change_ny_last_{past_unemployment_weeks}_weeks.csv"))
percent_change_industry %>%
write_csv("data/processed-data/job_change_ny_most_recent.csv")
|
#---scalar variables
A=10
B="String"
C=1:10
import sqldf
#-----vector - Columner data
B = c(A, 8, 34, 43)
#---List
B = list(1:3,2)
#---Array
#---Seq functions
A = seq (0,100,by = 2)
#----Matrix repesentation of data
a = matrix(1:20, nrow=3, ncol=4, byrow=TRUE)
#--- Data frames, tabular representation of data
employees = data.frame(eid, fn, sal, ln)
employees = data.frame(eid, fn, sal, ln, stringsAsFactors = FALSE)
str(employees)
employees[1,]
employees[c(1,2),]
employees[,]
write.csv(employees,"c:\abc.csv")
graphics.off(employees)
employees = employees(employees, did)
employees = data.frame(employees, did)
graphics.off(employees)
A = read.csv("c:/abc2.csv")
view(A)
#------If else---------
if ( i %% 2 == 1 ) {
print("Number if odd")
print("if psrt got executed")
} else {
print("number is even")
print("else part got executed")
}
#-------nested if else------
a=10
b=20
c=30
if ( a > b & a > c ) print("a is greater") else
if ( b > c & b > a ) print("b is greater") else
if ( c > a & c > b) print("c is greater") else
if ( a == b & b== c ) print ("all are equeal")
#---ifelse statement
ifelse(a>b,a,b)
ifelse(a>b,{print("1st stmt");print("2nd stmt")},b)
#--read what is sampling with replacement
#---
A= read.csv(file.choose())
sf=sample(2,nrow(A), replace=TRUE, prob=c(0.6,0.4))
trd = A[sf==1,]
tsd = A[sf==2,]
nrow(A)
ncol(A)
set.seed(1122)
length(sf)
B=read.csv(file.choose(), col.names = c("C1","C2", "C3", "C4", "C5"), header=FALSE)
B[-2,]
#--- Install packages
install.packages("sqldf")
library(sqldf)
#--- few more packages installed with datasets
install.packages("MASS")
install.packages("ISLR")
sqldf
dplyr
2 teams
data manupuation reporting
reprting and further
sqldf("select income, student from A where student = 'Yes' and income > 100")
A = ISLR::Credit
sqldf("select * from trees")
sqldf("select `limit`, income, student from A where student = 'Yes' and income > 100")
#----------------------------
sqldf("select Income,student from A where student = 'Yes'")
sqldf("select Income,student from A where student = 'Yes' and income > 100")
sqldf("select * from A where student = 'Yes' and income > 100")
sqldf("select * from A where student = 'Yes' and income > 100")
sqldf("select `Limit`,Rating from A where student = 'Yes' and income > 100")
sqldf("select * from A where student = 'Yes' and income > 100")
sqldf("select * from A where Ethnicity like 'A%'")
#-------------AGRREGATES---------------
sqldf("select AVG(Income) from A")
sqldf("select AVG(Age) from A")
sqldf("select AVG(Balance) from A")
sqldf("select AVG(Limit) from A")
sqldf("select AVG(`Limit`) from A")
sqldf("select COUNT() from A")
sqldf("select COUNT() from A GROUP BY ethnicity")
sqldf("select ethnicity,COUNT() from A GROUP BY ethnicity")
sqldf("select ethnicity,avg(income) from A GROUP BY ethnicity")
sqldf("select ethnicity,sum(income) from A GROUP BY ethnicity")
sqldf("select ethnicity,max(income) from A GROUP BY ethnicity")
sqldf("select ethnicity,COUNT() from A GROUP BY ethnicity")
fivenum(A$Income)
library(MASS)
A=data.frame(Cars93)
str(A)
hist(A$Manufacturer)
hist(A$RPM)
A1=A[1:20,]
hist(A1$RPM)
table(A1$RPM)
A1$RPM
A1a
hist(A1$RPM, col = 2:10)s
hist(A1$RPM, col = 2)
hist(A1$RPM, col = 1:5)
hist(A1$RPM, col = c(2,2,2,2,4) )
par(mfrow=c(2,2)s)
hist(A1$RPM, col = c(2,2,2,2,4) )
hist(A1$RPM, col = 1:5)
hist(A1$RPM, col = 2)
hist(A1$RPM, col = 2, main="Histogram of RPMs with repeat frequency")
hist(A1$RPM, col = 2, main="Histogram of RPMs with repeat frequency", xlab = " RPM of Engines", ylab = "Repeatation Frequency")
regression analysis = numeric output
predictio - method, modle test data
model = profit can be predicted from rnd = formula
data frame kaunsi hai jisme naam HairEyeColor
install.packages("psych")
library(psych)
A=read.csv(file.choose())
A=na.omit(A)
sf=sample(2,nrow(A), replace=TRUE, prob=c(0.8,0.2))
trd=A[sf==1,]
tsd=A[sf==2,]
pairs.panels(A)
model1=lm(PROFIT~RND,data=trd)
prd=predict(model1,ts
d)
cbind(prd,tsd$PROFIT)
prd
model1
tsd
library(ISLR)
library(psych)
A = data.frame(Credit)
str(A)
head(A)
pairs.panels(A)
numcols = unlist(lapply(A,is.numeric))
B = A[,numcols]
pairs.panels(B)
cor(B)
sf = sample(2,nrow(A),replace = TRUE,prob = c(0.7,0.3))
trd = A[sf == 1,]
tsd = A[sf == 2,]
model1_Inc = lm(Income ~ Limit,data=trd)
#model2_Inc = lm(Income ~ Rating,data=trd)
#model3_Inc = lm(Income ~ Balance,data=trd)
#model1_Limit = lm(Limit ~ Balance,data=trd)
model2_Limit = lm(Limit ~ Rating,data=trd)
#model3_Limit = lm(Limit ~ Income,data=trd)
model1_Rating = lm(Rating ~ Balance,data=trd)
#model1_Bal = lm(Balance ~ Income,data=trd)
#model2_Bal = lm(Balance ~ Limit,data=trd)
model3_Bal = lm(Balance ~ Rating,data=trd)
pred_Inc = predict(model1_Inc,tsd)
cbind(tsd$Limit,pred_Inc,tsd$Income)
pred_Rating = predict(model1_Rating,tsd)
pred_Bal = predict(model3_Bal,tsd)
pred_Lim = predict(model2_Limit,tsd) | /new23.R | no_license | subhambare/RLearning2 | R | false | false | 5,142 | r | #---scalar variables
A=10
B="String"
C=1:10
import sqldf
#-----vector - Columner data
B = c(A, 8, 34, 43)
#---List
B = list(1:3,2)
#---Array
#---Seq functions
A = seq (0,100,by = 2)
#----Matrix repesentation of data
a = matrix(1:20, nrow=3, ncol=4, byrow=TRUE)
#--- Data frames, tabular representation of data
employees = data.frame(eid, fn, sal, ln)
employees = data.frame(eid, fn, sal, ln, stringsAsFactors = FALSE)
str(employees)
employees[1,]
employees[c(1,2),]
employees[,]
write.csv(employees,"c:\abc.csv")
graphics.off(employees)
employees = employees(employees, did)
employees = data.frame(employees, did)
graphics.off(employees)
A = read.csv("c:/abc2.csv")
view(A)
#------If else---------
if ( i %% 2 == 1 ) {
print("Number if odd")
print("if psrt got executed")
} else {
print("number is even")
print("else part got executed")
}
#-------nested if else------
a=10
b=20
c=30
if ( a > b & a > c ) print("a is greater") else
if ( b > c & b > a ) print("b is greater") else
if ( c > a & c > b) print("c is greater") else
if ( a == b & b== c ) print ("all are equeal")
#---ifelse statement
ifelse(a>b,a,b)
ifelse(a>b,{print("1st stmt");print("2nd stmt")},b)
#--read what is sampling with replacement
#---
A= read.csv(file.choose())
sf=sample(2,nrow(A), replace=TRUE, prob=c(0.6,0.4))
trd = A[sf==1,]
tsd = A[sf==2,]
nrow(A)
ncol(A)
set.seed(1122)
length(sf)
B=read.csv(file.choose(), col.names = c("C1","C2", "C3", "C4", "C5"), header=FALSE)
B[-2,]
#--- Install packages
install.packages("sqldf")
library(sqldf)
#--- few more packages installed with datasets
install.packages("MASS")
install.packages("ISLR")
sqldf
dplyr
2 teams
data manupuation reporting
reprting and further
sqldf("select income, student from A where student = 'Yes' and income > 100")
A = ISLR::Credit
sqldf("select * from trees")
sqldf("select `limit`, income, student from A where student = 'Yes' and income > 100")
#----------------------------
sqldf("select Income,student from A where student = 'Yes'")
sqldf("select Income,student from A where student = 'Yes' and income > 100")
sqldf("select * from A where student = 'Yes' and income > 100")
sqldf("select * from A where student = 'Yes' and income > 100")
sqldf("select `Limit`,Rating from A where student = 'Yes' and income > 100")
sqldf("select * from A where student = 'Yes' and income > 100")
sqldf("select * from A where Ethnicity like 'A%'")
#-------------AGRREGATES---------------
sqldf("select AVG(Income) from A")
sqldf("select AVG(Age) from A")
sqldf("select AVG(Balance) from A")
sqldf("select AVG(Limit) from A")
sqldf("select AVG(`Limit`) from A")
sqldf("select COUNT() from A")
sqldf("select COUNT() from A GROUP BY ethnicity")
sqldf("select ethnicity,COUNT() from A GROUP BY ethnicity")
sqldf("select ethnicity,avg(income) from A GROUP BY ethnicity")
sqldf("select ethnicity,sum(income) from A GROUP BY ethnicity")
sqldf("select ethnicity,max(income) from A GROUP BY ethnicity")
sqldf("select ethnicity,COUNT() from A GROUP BY ethnicity")
fivenum(A$Income)
library(MASS)
A=data.frame(Cars93)
str(A)
hist(A$Manufacturer)
hist(A$RPM)
A1=A[1:20,]
hist(A1$RPM)
table(A1$RPM)
A1$RPM
A1a
hist(A1$RPM, col = 2:10)s
hist(A1$RPM, col = 2)
hist(A1$RPM, col = 1:5)
hist(A1$RPM, col = c(2,2,2,2,4) )
par(mfrow=c(2,2)s)
hist(A1$RPM, col = c(2,2,2,2,4) )
hist(A1$RPM, col = 1:5)
hist(A1$RPM, col = 2)
hist(A1$RPM, col = 2, main="Histogram of RPMs with repeat frequency")
hist(A1$RPM, col = 2, main="Histogram of RPMs with repeat frequency", xlab = " RPM of Engines", ylab = "Repeatation Frequency")
regression analysis = numeric output
predictio - method, modle test data
model = profit can be predicted from rnd = formula
data frame kaunsi hai jisme naam HairEyeColor
install.packages("psych")
library(psych)
A=read.csv(file.choose())
A=na.omit(A)
sf=sample(2,nrow(A), replace=TRUE, prob=c(0.8,0.2))
trd=A[sf==1,]
tsd=A[sf==2,]
pairs.panels(A)
model1=lm(PROFIT~RND,data=trd)
prd=predict(model1,ts
d)
cbind(prd,tsd$PROFIT)
prd
model1
tsd
library(ISLR)
library(psych)
A = data.frame(Credit)
str(A)
head(A)
pairs.panels(A)
numcols = unlist(lapply(A,is.numeric))
B = A[,numcols]
pairs.panels(B)
cor(B)
sf = sample(2,nrow(A),replace = TRUE,prob = c(0.7,0.3))
trd = A[sf == 1,]
tsd = A[sf == 2,]
model1_Inc = lm(Income ~ Limit,data=trd)
#model2_Inc = lm(Income ~ Rating,data=trd)
#model3_Inc = lm(Income ~ Balance,data=trd)
#model1_Limit = lm(Limit ~ Balance,data=trd)
model2_Limit = lm(Limit ~ Rating,data=trd)
#model3_Limit = lm(Limit ~ Income,data=trd)
model1_Rating = lm(Rating ~ Balance,data=trd)
#model1_Bal = lm(Balance ~ Income,data=trd)
#model2_Bal = lm(Balance ~ Limit,data=trd)
model3_Bal = lm(Balance ~ Rating,data=trd)
pred_Inc = predict(model1_Inc,tsd)
cbind(tsd$Limit,pred_Inc,tsd$Income)
pred_Rating = predict(model1_Rating,tsd)
pred_Bal = predict(model3_Bal,tsd)
pred_Lim = predict(model2_Limit,tsd) |
# # # Governance in socio-economic pathways and its role # # #
# # # for future adaptive capacity # # #
# # # (Andrijevic et al., 2019) # # #
# # # # # #
# # # Data management # # #
rm(list=ls())
library(ggplot2)
library(tidyverse)
library(broom)
library(countrycode)
library(readstata13)
library(sandwich)
library(lmtest)
library(zoo)
# library(RCurl)
# library(gdata)
setwd('/Users/marinaandrijevic/PhD/Governance Projections/GitHub')
# Historical data
# World Governance Indicators
# Function to standardize the values from 0 to 1
range01 <- function(x){(x - min(x, na.rm = T))/(max(x, na.rm = T) - min(x, na.rm = T))}
wgi <- read.dta13('data/wgidataset.dta') %>%
select(code, countryname, year, contains('e'), -ges, -gen, -ger, -gel, -geu) %>%
rename(voic.ac = vae,
pol.stab = pve,
gov.eff = gee,
reg.qual = rqe,
ru.law = rle,
corr.cont = cce,
countrycode = code) %>%
gather(var, value, -countrycode, -countryname, -year) %>%
group_by(var) %>%
mutate(value = range01(value)) %>%
ungroup() %>%
mutate(year = as.integer(year)) %>%
spread(var, value) %>%
mutate(governance = rowMeans(select(., voic.ac, pol.stab, gov.eff, reg.qual, ru.law, corr.cont))) %>% # # Governance variable as the arithmetic average of the six components of WGI
mutate(scenario = 'Observed')
levels(wgi$year)[levels(wgi$year)== 1996] <- 1995
wgi$countrycode <- recode(wgi$countrycode,
"ROM" = "ROU",
"ZAR" = "COD") #Romania and Democratic Republic of the Congo had outdated codes
# ND GAIN Readiness component
nd.readiness <- read.csv('data/ndgain_readiness.csv') %>%
rename(countrycode = ISO3,
country = Name) %>%
gather(year, readiness, -countrycode, -country) %>%
mutate(year = year %>% str_replace("X", "") %>% as.numeric,
scenario = 'Observed') %>%
filter(year <= 2015)
# GDP from Penn World Tables 7.0 (until 2010) and SSP projections (2010 - 2015) (Crespo Cuaresma, 2017)
gdp.pwt <- read.csv("data/pwt70.csv", header = T, sep = ",") %>%
select(isocode, year, rgdpl) %>%
rename(gdppc = rgdpl, countrycode = isocode) %>%
mutate(scenario = 'Observed')
gdp.pwt$countrycode <- recode(gdp.pwt$countrycode, "ROM" = "ROU", "ZAR" = "COD", "GER" = "DEU") #Correct for outdated country codes
gdp.15 <- read.csv("data/gdp_ssp_5year.csv", header = T, sep = ";", dec = ",") %>%
gather(scenario, gdppc, -iso3, -year) %>%
filter((year == 2010 | year == 2015) & scenario == 'SSP2') %>%
select(-scenario) %>%
mutate(scenario = "Observed") %>%
spread(year, gdppc)
# Interpolation function from Burke et al. (2018)
ipolate <- function(mat) {
yrs <- 2010:2015
ys <- as.numeric(unlist(names(mat)))
mat1 <- array(dim = c(dim(mat)[1], length(yrs)))
est <- seq(2010, 2015, 5)
for (i in 1:length(yrs)) {
y = yrs[i]
if(y %in% names(mat) == T) {
mat1[,i] <- as.numeric(mat[,which(names(mat) == y)])
} else {
z <- y-est
yl <- est[which(z == min(z[z>0]))]
y5 <- yl + 5
el <- as.numeric(mat[,which(names(mat) == yl)])
eu <- as.numeric(mat[,which(names(mat) == y5)])
if(y > max(ys, na.rm = T)) { mat1[,i] <- el
} else { mat1[,i] <- el + (eu-el)*(y-yl)/5}
}
}
mat1 <- data.frame(mat[,1:2], mat1)
names(mat1)[3:dim(mat1)[2]] <- yrs
return(mat1)
}
gdp.15.ipol <- ipolate(gdp.15) %>%
gather(year, gdppc, -iso3, -scenario) %>%
rename(countrycode = iso3) %>%
mutate(year = as.integer(year))
gdp.yearly <- gdp.pwt %>%
bind_rows(gdp.15.ipol) %>%
arrange(countrycode)
# Education data: share of population by educational attainment and gender gap in education measured by the difference in mean years of schooling (MYS) between women and men (source: Wittgenstein Centre for Demography and Global Human Capital)
edu.prep <- read.csv("data/wic_eduatt.csv", skip = 8, sep = ",") %>%
spread(Year, Distribution)
mys.gap.prep <- read.csv("data/wic_mys_gap.csv", skip = 8) %>%
spread(Year, Years)
ipolate2 <- function(mat) {
yrs <- 1970:2099
ys <- as.numeric(unlist(names(mat)))
mat1 <- array(dim = c(dim(mat)[1], length(yrs)))
est <- seq(1970, 2100, 5)
for (i in 1:length(yrs)) {
y = yrs[i]
if(y %in% names(mat) == T) {
mat1[,i] <- as.numeric(mat[,which(names(mat) == y)])
} else {
z <- y-est
yl <- est[which(z == min(z[z>0]))]
y5 <- yl + 5
el <- as.numeric(mat[,which(names(mat) == yl)])
eu <- as.numeric(mat[,which(names(mat) == y5)])
if(y > max(ys, na.rm = T)) { mat1[,i] <- el
} else { mat1[,i] <- el + (eu-el)*(y-yl)/5}
}
}
mat1 <- data.frame(mat[,1:4], mat1)
names(mat1)[5:dim(mat1)[2]] <- yrs
return(mat1)
}
edu.ipol <- ipolate2(edu.prep) %>%
gather(year, distribution, -Area, -Scenario, -ISOCode, -Education) %>%
mutate(Education = str_replace(str_to_lower(Education), fixed(" "), ".")) %>%
spread(Education, distribution)
mys.gap.ipol <- ipolate2(mys.gap.prep) %>%
gather(year, mys.gap, -Area, -Scenario, -ISOCode) %>%
mutate(year = year %>% str_replace("X", "") %>% as.factor)
edu.master <- edu.ipol %>%
left_join(mys.gap.ipol) %>%
mutate(countrycode = countrycode(Area, 'country.name', 'iso3c')) %>%
mutate(Scenario = recode(Scenario, 'SSP2' = 'Observed'),
year = as.integer(year)) %>%
rename(scenario = Scenario) %>%
filter(scenario == 'Observed')
# Merge into one dataset
observed.yr <- wgi %>%
left_join(gdp.yearly, by = c('countrycode', 'year', 'scenario')) %>%
left_join(edu.master, by = c('countrycode', 'year', 'scenario')) %>%
left_join(nd.readiness, by = c('countrycode', 'year', 'scenario')) %>%
select(-Area) %>%
mutate_if(is.numeric, list(~na_if(., Inf))) %>%
mutate(year = as.integer(year),
lngdp = log(gdppc))
observed.yr$countrycode <- recode(observed.yr$countrycode, "ROM" = "ROU", "ZAR" = "COD") # Fix outdated country names
#write.csv(observed.yr, 'data/observed_yr.csv')
# Projections data
gdp15.2 <- read.csv("data/gdp_ssp_5year.csv", header = T, sep = ";", dec = ",") %>%
gather(scenario, gdppc, -iso3, -year) %>%
filter(year == 2015 & scenario == "SSP2") %>%
rename(countrycode = iso3) %>%
select(countrycode, year, gdppc)
gdp.proj <- read.csv("data/gdp_ssp_5year.csv", header = T, sep = ";", dec = ",") %>%
gather(scenario, gdppc, -iso3, -year) %>%
filter(year > 2015) %>%
rename(countrycode = iso3)
prep <- gdp.proj %>%
select(countrycode, scenario) %>%
merge(gdp15.2, all.x = T)
gdp.proj <- gdp.proj %>%
bind_rows(prep)
# Education projections
edu.proj <- read.csv("data/wic_eduatt.csv", skip = 8, sep = ",") %>%
select(-ISOCode) %>%
mutate(Education = str_replace(str_to_lower(Education), fixed(" "), ".")) %>%
spread(Education, Distribution)
mys.gap.proj <- read.csv("data/wic_mys_gap.csv", skip = 8, sep = ',') %>%
select(-ISOCode) %>%
rename(mys.gap = Years)
edu.proj.master <- inner_join(edu.proj, mys.gap.proj, by = c('Area', 'Year', 'Scenario')) %>%
mutate(countrycode = countrycode(Area, 'country.name', 'iso3c')) %>%
rename(scenario = Scenario,
country = Area,
year = Year) %>%
filter(year > 2005)
# Merge all data into a master dataset for projections
projections <- gdp.proj %>%
inner_join(edu.proj.master, by=c('countrycode', 'year', 'scenario')) %>%
mutate(countrycode = countrycode %>% as.factor) %>%
mutate(lngdp = log(gdppc)) %>%
arrange(countrycode, year, scenario) %>%
mutate(ID = paste(countrycode, year, scenario))
projections <- projections[!duplicated(projections$ID), ] %>%
filter(year >= 2015)
# Interpolate for yearly values
ipolate3 <- function(mat) {
yrs <- 2015:2099
ys <- as.numeric(unlist(names(mat)))
mat1 <- array(dim = c(dim(mat)[1], length(yrs)))
est <- seq(2015, 2100, 5)
for (i in 1:length(yrs)) {
y = yrs[i]
if(y %in% names(mat) == T) {
mat1[,i] <- as.numeric(mat[,which(names(mat) == y)])
} else {
z <- y-est
yl <- est[which(z == min(z[z>0]))]
y5 <- yl + 5
el <- as.numeric(mat[,which(names(mat) == yl)])
eu <- as.numeric(mat[,which(names(mat) == y5)])
if(y > max(ys, na.rm = T)) { mat1[,i] <- el
} else { mat1[,i] <- el + (eu-el)*(y-yl)/5}
}
}
mat1 <- data.frame(mat[,1:3], mat1)
names(mat1)[4:dim(mat1)[2]] <- yrs
return(mat1)
}
projections.ipol <- projections %>%
select(-ID, -country) %>%
gather(variable, value, -countrycode, -scenario, -year) %>%
spread(year, value) %>%
ipolate3()
projections.yr <- projections.ipol %>%
gather(year, value, -countrycode, -scenario, -variable) %>%
spread(variable, value) %>%
mutate(year = year %>% as.numeric())
#write.csv(projections.yr, 'data/projections_yr.csv')
| /code/data_mgmt.R | no_license | marina-andrijevic/governance2019 | R | false | false | 9,034 | r |
# # # Governance in socio-economic pathways and its role # # #
# # # for future adaptive capacity # # #
# # # (Andrijevic et al., 2019) # # #
# # # # # #
# # # Data management # # #
rm(list=ls())
library(ggplot2)
library(tidyverse)
library(broom)
library(countrycode)
library(readstata13)
library(sandwich)
library(lmtest)
library(zoo)
# library(RCurl)
# library(gdata)
setwd('/Users/marinaandrijevic/PhD/Governance Projections/GitHub')
# Historical data
# World Governance Indicators
# Function to standardize the values from 0 to 1
range01 <- function(x){(x - min(x, na.rm = T))/(max(x, na.rm = T) - min(x, na.rm = T))}
wgi <- read.dta13('data/wgidataset.dta') %>%
select(code, countryname, year, contains('e'), -ges, -gen, -ger, -gel, -geu) %>%
rename(voic.ac = vae,
pol.stab = pve,
gov.eff = gee,
reg.qual = rqe,
ru.law = rle,
corr.cont = cce,
countrycode = code) %>%
gather(var, value, -countrycode, -countryname, -year) %>%
group_by(var) %>%
mutate(value = range01(value)) %>%
ungroup() %>%
mutate(year = as.integer(year)) %>%
spread(var, value) %>%
mutate(governance = rowMeans(select(., voic.ac, pol.stab, gov.eff, reg.qual, ru.law, corr.cont))) %>% # # Governance variable as the arithmetic average of the six components of WGI
mutate(scenario = 'Observed')
levels(wgi$year)[levels(wgi$year)== 1996] <- 1995
wgi$countrycode <- recode(wgi$countrycode,
"ROM" = "ROU",
"ZAR" = "COD") #Romania and Democratic Republic of the Congo had outdated codes
# ND GAIN Readiness component
nd.readiness <- read.csv('data/ndgain_readiness.csv') %>%
rename(countrycode = ISO3,
country = Name) %>%
gather(year, readiness, -countrycode, -country) %>%
mutate(year = year %>% str_replace("X", "") %>% as.numeric,
scenario = 'Observed') %>%
filter(year <= 2015)
# GDP from Penn World Tables 7.0 (until 2010) and SSP projections (2010 - 2015) (Crespo Cuaresma, 2017)
gdp.pwt <- read.csv("data/pwt70.csv", header = T, sep = ",") %>%
select(isocode, year, rgdpl) %>%
rename(gdppc = rgdpl, countrycode = isocode) %>%
mutate(scenario = 'Observed')
gdp.pwt$countrycode <- recode(gdp.pwt$countrycode, "ROM" = "ROU", "ZAR" = "COD", "GER" = "DEU") #Correct for outdated country codes
gdp.15 <- read.csv("data/gdp_ssp_5year.csv", header = T, sep = ";", dec = ",") %>%
gather(scenario, gdppc, -iso3, -year) %>%
filter((year == 2010 | year == 2015) & scenario == 'SSP2') %>%
select(-scenario) %>%
mutate(scenario = "Observed") %>%
spread(year, gdppc)
# Interpolation function from Burke et al. (2018)
ipolate <- function(mat) {
yrs <- 2010:2015
ys <- as.numeric(unlist(names(mat)))
mat1 <- array(dim = c(dim(mat)[1], length(yrs)))
est <- seq(2010, 2015, 5)
for (i in 1:length(yrs)) {
y = yrs[i]
if(y %in% names(mat) == T) {
mat1[,i] <- as.numeric(mat[,which(names(mat) == y)])
} else {
z <- y-est
yl <- est[which(z == min(z[z>0]))]
y5 <- yl + 5
el <- as.numeric(mat[,which(names(mat) == yl)])
eu <- as.numeric(mat[,which(names(mat) == y5)])
if(y > max(ys, na.rm = T)) { mat1[,i] <- el
} else { mat1[,i] <- el + (eu-el)*(y-yl)/5}
}
}
mat1 <- data.frame(mat[,1:2], mat1)
names(mat1)[3:dim(mat1)[2]] <- yrs
return(mat1)
}
gdp.15.ipol <- ipolate(gdp.15) %>%
gather(year, gdppc, -iso3, -scenario) %>%
rename(countrycode = iso3) %>%
mutate(year = as.integer(year))
gdp.yearly <- gdp.pwt %>%
bind_rows(gdp.15.ipol) %>%
arrange(countrycode)
# Education data: share of population by educational attainment and gender gap in education measured by the difference in mean years of schooling (MYS) between women and men (source: Wittgenstein Centre for Demography and Global Human Capital)
edu.prep <- read.csv("data/wic_eduatt.csv", skip = 8, sep = ",") %>%
spread(Year, Distribution)
mys.gap.prep <- read.csv("data/wic_mys_gap.csv", skip = 8) %>%
spread(Year, Years)
ipolate2 <- function(mat) {
yrs <- 1970:2099
ys <- as.numeric(unlist(names(mat)))
mat1 <- array(dim = c(dim(mat)[1], length(yrs)))
est <- seq(1970, 2100, 5)
for (i in 1:length(yrs)) {
y = yrs[i]
if(y %in% names(mat) == T) {
mat1[,i] <- as.numeric(mat[,which(names(mat) == y)])
} else {
z <- y-est
yl <- est[which(z == min(z[z>0]))]
y5 <- yl + 5
el <- as.numeric(mat[,which(names(mat) == yl)])
eu <- as.numeric(mat[,which(names(mat) == y5)])
if(y > max(ys, na.rm = T)) { mat1[,i] <- el
} else { mat1[,i] <- el + (eu-el)*(y-yl)/5}
}
}
mat1 <- data.frame(mat[,1:4], mat1)
names(mat1)[5:dim(mat1)[2]] <- yrs
return(mat1)
}
edu.ipol <- ipolate2(edu.prep) %>%
gather(year, distribution, -Area, -Scenario, -ISOCode, -Education) %>%
mutate(Education = str_replace(str_to_lower(Education), fixed(" "), ".")) %>%
spread(Education, distribution)
mys.gap.ipol <- ipolate2(mys.gap.prep) %>%
gather(year, mys.gap, -Area, -Scenario, -ISOCode) %>%
mutate(year = year %>% str_replace("X", "") %>% as.factor)
edu.master <- edu.ipol %>%
left_join(mys.gap.ipol) %>%
mutate(countrycode = countrycode(Area, 'country.name', 'iso3c')) %>%
mutate(Scenario = recode(Scenario, 'SSP2' = 'Observed'),
year = as.integer(year)) %>%
rename(scenario = Scenario) %>%
filter(scenario == 'Observed')
# Merge into one dataset
observed.yr <- wgi %>%
left_join(gdp.yearly, by = c('countrycode', 'year', 'scenario')) %>%
left_join(edu.master, by = c('countrycode', 'year', 'scenario')) %>%
left_join(nd.readiness, by = c('countrycode', 'year', 'scenario')) %>%
select(-Area) %>%
mutate_if(is.numeric, list(~na_if(., Inf))) %>%
mutate(year = as.integer(year),
lngdp = log(gdppc))
observed.yr$countrycode <- recode(observed.yr$countrycode, "ROM" = "ROU", "ZAR" = "COD") # Fix outdated country names
#write.csv(observed.yr, 'data/observed_yr.csv')
# Projections data
gdp15.2 <- read.csv("data/gdp_ssp_5year.csv", header = T, sep = ";", dec = ",") %>%
gather(scenario, gdppc, -iso3, -year) %>%
filter(year == 2015 & scenario == "SSP2") %>%
rename(countrycode = iso3) %>%
select(countrycode, year, gdppc)
gdp.proj <- read.csv("data/gdp_ssp_5year.csv", header = T, sep = ";", dec = ",") %>%
gather(scenario, gdppc, -iso3, -year) %>%
filter(year > 2015) %>%
rename(countrycode = iso3)
prep <- gdp.proj %>%
select(countrycode, scenario) %>%
merge(gdp15.2, all.x = T)
gdp.proj <- gdp.proj %>%
bind_rows(prep)
# Education projections
edu.proj <- read.csv("data/wic_eduatt.csv", skip = 8, sep = ",") %>%
select(-ISOCode) %>%
mutate(Education = str_replace(str_to_lower(Education), fixed(" "), ".")) %>%
spread(Education, Distribution)
mys.gap.proj <- read.csv("data/wic_mys_gap.csv", skip = 8, sep = ',') %>%
select(-ISOCode) %>%
rename(mys.gap = Years)
edu.proj.master <- inner_join(edu.proj, mys.gap.proj, by = c('Area', 'Year', 'Scenario')) %>%
mutate(countrycode = countrycode(Area, 'country.name', 'iso3c')) %>%
rename(scenario = Scenario,
country = Area,
year = Year) %>%
filter(year > 2005)
# Merge all data into a master dataset for projections
projections <- gdp.proj %>%
inner_join(edu.proj.master, by=c('countrycode', 'year', 'scenario')) %>%
mutate(countrycode = countrycode %>% as.factor) %>%
mutate(lngdp = log(gdppc)) %>%
arrange(countrycode, year, scenario) %>%
mutate(ID = paste(countrycode, year, scenario))
projections <- projections[!duplicated(projections$ID), ] %>%
filter(year >= 2015)
# Interpolate for yearly values
ipolate3 <- function(mat) {
yrs <- 2015:2099
ys <- as.numeric(unlist(names(mat)))
mat1 <- array(dim = c(dim(mat)[1], length(yrs)))
est <- seq(2015, 2100, 5)
for (i in 1:length(yrs)) {
y = yrs[i]
if(y %in% names(mat) == T) {
mat1[,i] <- as.numeric(mat[,which(names(mat) == y)])
} else {
z <- y-est
yl <- est[which(z == min(z[z>0]))]
y5 <- yl + 5
el <- as.numeric(mat[,which(names(mat) == yl)])
eu <- as.numeric(mat[,which(names(mat) == y5)])
if(y > max(ys, na.rm = T)) { mat1[,i] <- el
} else { mat1[,i] <- el + (eu-el)*(y-yl)/5}
}
}
mat1 <- data.frame(mat[,1:3], mat1)
names(mat1)[4:dim(mat1)[2]] <- yrs
return(mat1)
}
projections.ipol <- projections %>%
select(-ID, -country) %>%
gather(variable, value, -countrycode, -scenario, -year) %>%
spread(year, value) %>%
ipolate3()
projections.yr <- projections.ipol %>%
gather(year, value, -countrycode, -scenario, -variable) %>%
spread(variable, value) %>%
mutate(year = year %>% as.numeric())
#write.csv(projections.yr, 'data/projections_yr.csv')
|
library(shiny)
library(plotly)
library(ggplot2)
library(tidyr)
library(dplyr)
library(grid)
library(gridExtra)
del_ed <- del_ed %>% layout(title = "Delivery Payment Option Based on Average Parental Education",
barmode = 'group',
xaxis = list(title = "Delivery Payment Options"),
yaxis = list(title = "Average Education Level"))
del_ed
del_age <- del_age %>% layout(title = "Delivery Payment Option Based on Average Parental Age",
barmode = 'group',
xaxis = list(title = "Delivery Payment Options"),
yaxis = list(title = "Average Age"))
del_age
gonorrhea_age <- gonorrhea_age %>% layout(title = "Infection with Gonorrhea in realtion to the Average age of the parents",
barmode = 'group',
xaxis = list(title = "Report on Gonorrhea; No; Yes; Unknown"),
yaxis = list(title = "Average Age"))
gonorrhea_age
syphilis_age <- syphilis_age %>% layout(title = "Infection with Syphilis in realtion to the Average age of the parents",
barmode = 'group',
xaxis = list(title = "Mothers report on Syphilis; No; Yes; Unknown"),
yaxis = list(title = "Average Age"))
syphilis_age
chlamydia_age <- chlamydia_age %>% layout(title = "Infection with Chlamydia in realtion to the Average age of the parents",
barmode = 'group',
xaxis = list(title = "Mothers report on Chlamydia; No; Yes; Unknown"),
yaxis = list(title = "Average Age"))
chlamydia_age
hepB_age <- hepB_age %>% layout(title = "Infection with HepB in realtion to the Average age of the parents",
barmode = 'group',
xaxis = list(title = "Mothers report on HepB; No; Yes; Unknown"),
yaxis = list(title = "Average Age"))
hepB_age
hepC_age <- hepC_age %>% layout(title = "Infection with HepC in realtion to the Average age of the parents",
barmode = 'group',
xaxis = list(title = "Mothers report on HepC; No; Yes; Unknown"),
yaxis = list(title = "Average Age"))
hepC_age
gonorrhea_bmi <- gonorrhea_bmi %>% layout(title = "Infection with Gonorrhea in realtion to the average BMI/Weight of the Mother",
barmode = 'group',
xaxis = list(title = "Mothers report on Gonorrhea; No; Yes; Unknown"),
yaxis = list(title = "Average BMI/Weight"))
gonorrhea_bmi
syphilis_bmi <- syphilis_bmi %>% layout(title = "Infection with Syphilis in realtion to the average BMI/Weight of the Mother",
barmode = 'group',
xaxis = list(title = "Mothers report on Syphilis; No; Yes; Unknown"),
yaxis = list(title = "Average BMI/Weight"))
syphilis_bmi
chlamydia_bmi <- chlamydia_bmi %>% layout(title = "Infection with Chlamydia in realtion to the average BMI/Weight of the Mother",
barmode = 'group',
xaxis = list(title = "Mothers report on Chlamydia; No; Yes; Unknown"),
yaxis = list(title = "Average BMI/Weight"))
chlamydia_bmi
hepB_bmi <- hepB_bmi %>% layout(title = "Infection with HepB in realtion to the average BMI/Weight of the Mother",
barmode = 'group',
xaxis = list(title = "Mothers report on HepB; No; Yes; Unknown"),
yaxis = list(title = "Average BMI/Weight"))
hepB_bmi
hepC_bmi <- hepC_bmi %>% layout(title = "Infection with HepC in realtion to the average BMI/Weight of the Mother",
barmode = 'group',
xaxis = list(title = "Mothers report on HepC; No; Yes; Unknown"),
yaxis = list(title = "Average BMI/Weight"))
hepC_bmi
# no_infection_reported_ed
#
# no_infection_reported_age
#
# no_infection_bmi
# success_ed
#
# fail_ed
# induced_ed
# aug_ed
# steroids_ed
# antibiotics_ed
# chorioamnionitis_ed
# anesthesia_ed
# success_age
# fail_age
# induced_age
# aug_age
steroids_age <- steroids_age %>% layout(title = "Use of Steroids medication in relation to parents age",
barmode = 'group',
xaxis = list(title = "Mothers report on using Steroids; No; Yes; Unknown"),
yaxis = list(title = "Average Age"))
steroids_age
antibiotics_age <- antibiotics_age %>% layout(title = "Use of Antobiotics medication in relation to parents age",
barmode = 'group',
xaxis = list(title = "Mothers report on using Antibiotics; No; Yes; Unknown"),
yaxis = list(title = "Average Age"))
antibiotics_age
# chorioamnionitis_age
anesthesia_age <- anesthesia_age %>% layout(title = "Use of Anesthesia medication in relation to parents age",
barmode = 'group',
xaxis = list(title = "Mothers report on using Anesthesia; No; Yes; Unknown"),
yaxis = list(title = "Average Age"))
anesthesia_age
# success_bmi
# fail_bmi
# induced_bmi
# aug_bmi
steroids_bmi <- steroids_bmi %>% layout(title = "Use of Steroids medication in relation to mothers BMI/Weight",
barmode = 'group',
xaxis = list(title = "Mothers report on using Steroids; No; Yes; Unknown"),
yaxis = list(title = "Average BMI/Weight"))
steroids_bmi
antibiotics_bmi <- antibiotics_bmi %>% layout(title = "Use of Antibiotics medication in relation to mothers BMI/Weight",
barmode = 'group',
xaxis = list(title = "Mothers report on using Antibiotics; No; Yes; Unknown"),
yaxis = list(title = "Average BMI/Weight"))
antibiotics_bmi
# chorioamnionitis_bmi
anesthesia_bmi <- anesthesia_bmi %>% layout(title = "Use of Anesthesia medication in relation to mothers BMI/Weight",
barmode = 'group',
xaxis = list(title = "Mothers report on using Anesthesia; No; Yes; Unknown"),
yaxis = list(title = "Average BMI/Weight"))
anesthesia_bmi
# final_del_ed
final_del_age <- final_del_age %>% layout(title = "Final Delivery Method in relation to the average age of the parents",
barmode = 'group',
xaxis = list(title = "Final Delivery Options"),
yaxis = list(title = "Average Age"))
final_del_age
final_del_bmi <- final_del_bmi %>% layout(title = "Final Delivery Method in relation to the average BMI/Weight of the mother",
barmode = 'group',
xaxis = list(title = "Final Delivery Options"),
yaxis = list(title = "Average BMI/Weight"))
final_del_bmi
# prenatal_ed_mother
# prenatal_ed_father
prenatal_age_mother= ggplot(prenatal_age_mother18, aes(x=mothers_age, y=n_prenatal_visits_average, group=1)) +
geom_line(linetype="dashed", color="blue", size=0.2)+
labs(title ="Average Prenetal Visits in Relation to the Age of the Mother")+
xlab("Mothers Age")+
ylab("Average Prenatal Visits")+
geom_point()
prenatal_age_mother
prenatal_age_father= ggplot(prenatal_age_father18, aes(x=fathers_age, y=n_prenatal_visits_average, group=1)) +
geom_line(linetype="dashed", color="blue", size=0.2)+
labs(title ="Average Prenetal Visits in Relation to the Age of the Father")+
xlab("Fathers Age")+
ylab("Average Prenatal Visits")+
geom_point()
prenatal_age_father
# prenatal_bmi #has outliers
#
# prenatal_pre_preg #has outlier
#
# prenatal_delivery #has outlier
# pat_ed
pat_age <- pat_age %>% layout(title = "Paternity Acknowledgment in relation to the average of the parents age",
barmode = 'group',
xaxis = list(title = "Paternity Acknowledgment; No, Yes, Unknown, Not Reported"),
yaxis = list(title = "Average Age"))
pat_age
pat_bmi <- pat_bmi %>% layout(title = "Paternity Acknowledgment in relation to the average BMI/Weight of the Mother",
barmode = 'group',
xaxis = list(title = "Paternity Acknowledgment; No, Yes, Unknown, Not Reported"),
yaxis = list(title = "Average BMI/Weight"))
pat_bmi
# hisp_ed
hisp_age <- hisp_age %>% layout(title = "Hispanic Origin in relation to the Average age of the Parents",
barmode = 'group',
xaxis = list(title = "Hispanic Origin"),
yaxis = list(title = "Average Age"))
hisp_age
hisp_bmi <- hisp_bmi %>% layout(title = "Hispanic Origin in relation to the Average BMI/Weight of the Mother",
barmode = 'group',
xaxis = list(title = "Hispanic Origin"),
yaxis = list(title = "Average BMI/Weight"))
hisp_bmi
# fhisp_ed
# fhisp_age
# fhisp_bmi
# mtobacco_ed1
#
# mtobacco_ed2
#
# mtobacco_ed3
mtobacco_age1= ggplot(mtobacco_age, aes(x=mothers_age, y=cigs_tri1_average, group=1)) +
geom_line(linetype="dashed", color="blue", size=0.2)+
labs(title ="Average Tobacco Use in the 1st Trimester in Relation to the Age of the Mother")+
xlab("Mothers Age")+
ylab("Average Tobacco Use in Trimester 1")+
geom_point()
mtobacco_age1
mtobacco_age2= ggplot(mtobacco_age, aes(x=mothers_age, y=cigs_tri2_average, group=1)) +
geom_line(linetype="dashed", color="blue", size=0.2)+
labs(title ="Average Tobacco Use in the 2nd Trimester in Relation to the Age of the Mother")+
xlab("Mothers Age")+
ylab("Average Tobacco Use in Trimester 2")+
geom_point()
mtobacco_age2
mtobacco_age3= ggplot(mtobacco_age, aes(x=mothers_age, y=cigs_tri3_average, group=1)) +
geom_line(linetype="dashed", color="blue", size=0.2)+
labs(title ="Average Tobacco Use in the 3rd Trimester in Relation to the Age of the Mother")+
xlab("Mothers Age")+
ylab("Average Tobacco Use in Trimester 3")+
geom_point()
mtobacco_age3
# ftobacco_ed1
#
# ftobacco_ed2
#
# ftobacco_ed3
ftobacco_age1= ggplot(ftobacco_age, aes(x=fathers_age, y=cigs_tri1_average, group=1)) +
geom_line(linetype="dashed", color="blue", size=0.2)+
labs(title ="Average Tobacco Use in the 1st Trimester in Relation to the Age of the father")+
xlab("Fathers Age")+
ylab("Average Tobacco Use in Trimester 1")+
geom_point()
ftobacco_age1
ftobacco_age2= ggplot(ftobacco_age, aes(x=fathers_age, y=cigs_tri2_average, group=1)) +
geom_line(linetype="dashed", color="blue", size=0.2)+
labs(title ="Average Tobacco Use in the 2nd Trimester in Relation to the Age of the father")+
xlab("Fathers Age")+
ylab("Average Tobacco Use in Trimester 2")+
geom_point()
ftobacco_age2
ftobacco_age3= ggplot(ftobacco_age, aes(x=fathers_age, y=cigs_tri3_average, group=1)) +
geom_line(linetype="dashed", color="blue", size=0.2)+
labs(title ="Average Tobacco Use in the 3rd Trimester in Relation to the Age of the father")+
xlab("Fathers Age")+
ylab("Average Tobacco Use in Trimester 3")+
geom_point()
ftobacco_age3
########################
#servorUI Rendor plots
#Graph 1
output$"Delivery Payment Option Based on Average Parental Education" =rendorPlot({
del_ed
})
#Graph 2
output$"Delivery Payment Option Based on Average Parental Age" =rendorPlot({
del_age
})
#################
#Graph 2
output$"Infection with Gonorrhea in realtion to the Average age of the parents" =rendorPlot({
gonorrhea_age
})
#Graph 3
output$"Infection with Syphilis in realtion to the Average age of the parents" =rendorPlot({
syphilis_age
})
#Graph 4
output$"Infection with Chlamydia in realtion to the Average age of the parents" =rendorPlot({
chlamydia_age
})
#Graph 5
output$"Infection with HepB in realtion to the Average age of the parents" =rendorPlot({
hepB_age
})
#Graph 6
output$"Infection with HepC in realtion to the Average age of the parents" =rendorPlot({
hepC_age
})
######################
#Graph 7
output$"Infection with Gonorrhea in realtion to the average BMI/Weight of the Mother" =rendorPlot({
gonorrhea_bmi
})
#Graph 8
output$"Infection with Syphilis in realtion to the average BMI/Weight of the Mother" =rendorPlot({
syphilis_bmi
})
#Graph 9
output$"Infection with Chlamydia in realtion to the average BMI/Weight of the Mother" =rendorPlot({
chlamydia_bmi
})
#Graph 10
output$"Infection with HepB in realtion to the average BMI/Weight of the Mother" =rendorPlot({
hepB_bmi
})
#Graph 11
output$"Infection with HepC in realtion to the average BMI/Weight of the Mother" =rendorPlot({
hepC_bmi
})
#################
#Graph 12
output$"Use of Steroids medication in relation to parents age" =rendorPlot({
steroids_age
})
#Graph 13
output$"Use of Antobiotics medication in relation to parents age" =rendorPlot({
antibiotics_age
})
#Graph 14
output$"Use of Anesthesia medication in relation to parents age" =rendorPlot({
anesthesia_age
})
##################
#Graph 15
output$"Use of Steroids medication in relation to mothers BMI/Weight" =rendorPlot({
steroids_bmi
})
#Graph 16
output$"Use of Antibiotics medication in relation to mothers BMI/Weight" =rendorPlot({
antibiotics_bmi
})
#Graph 17
output$"Use of Anesthesia medication in relation to mothers BMI/Weight" =rendorPlot({
anesthesia_bmi
})
#################
#Graph 18
output$"Final Delivery Method in relation to the average age of the parents" =rendorPlot({
final_del_age
})
#Graph 19
output$"Final Delivery Method in relation to the average BMI/Weight of the mother" =rendorPlot({
final_del_bmi
})
##################
#Graph 20
output$"Average Prenetal Visits in Relation to the Age of the Mother" =rendorPlot({
prenatal_age_mother
})
#Graph 21
output$"Average Prenetal Visits in Relation to the Age of the Father" =rendorPlot({
prenatal_age_father
})
###################
#Graph 22
output$"Paternity Acknowledgment in relation to the average of the parents age" =rendorPlot({
pat_age
})
#Graph 23
output$"Paternity Acknowledgment in relation to the average BMI/Weight of the Mother" =rendorPlot({
pat_bmi
})
##################
#Graph 24
output$"Hispanic Origin in relation to the Average age of the Parents" =rendorPlot({
hisp_age
})
#Graph 25
output$"Hispanic Origin in relation to the Average BMI/Weight of the Mother" =rendorPlot({
hisp_bmi
})
#################
#Graph 26
output$"Average Tobacco Use in the 1st Trimester in Relation to the Age of the Mother" =rendorPlot({
mtobacco_age1
})
#Graph 27
output$"Average Tobacco Use in the 2nd Trimester in Relation to the Age of the Mother" =rendorPlot({
mtobacco_age2
})
#Graph 28
output$"Average Tobacco Use in the 3rd Trimester in Relation to the Age of the Mother" =rendorPlot({
mtobacco_age3
})
#################
#Graph 29
output$"Average Tobacco Use in the 1st Trimester in Relation to the Age of the father" =rendorPlot({
ftobacco_age1
})
#Graph 30
output$"Average Tobacco Use in the 2nd Trimester in Relation to the Age of the father" =rendorPlot({
ftobacco_age2
})
#Graph 31
output$"Average Tobacco Use in the 3rd Trimester in Relation to the Age of the father" =rendorPlot({
ftobacco_age3
})
| /Jason/Capstone Code/Plots to use.R | no_license | jhoffme1/capstone-CDC-infants | R | false | false | 15,900 | r | library(shiny)
library(plotly)
library(ggplot2)
library(tidyr)
library(dplyr)
library(grid)
library(gridExtra)
del_ed <- del_ed %>% layout(title = "Delivery Payment Option Based on Average Parental Education",
barmode = 'group',
xaxis = list(title = "Delivery Payment Options"),
yaxis = list(title = "Average Education Level"))
del_ed
del_age <- del_age %>% layout(title = "Delivery Payment Option Based on Average Parental Age",
barmode = 'group',
xaxis = list(title = "Delivery Payment Options"),
yaxis = list(title = "Average Age"))
del_age
gonorrhea_age <- gonorrhea_age %>% layout(title = "Infection with Gonorrhea in realtion to the Average age of the parents",
barmode = 'group',
xaxis = list(title = "Report on Gonorrhea; No; Yes; Unknown"),
yaxis = list(title = "Average Age"))
gonorrhea_age
syphilis_age <- syphilis_age %>% layout(title = "Infection with Syphilis in realtion to the Average age of the parents",
barmode = 'group',
xaxis = list(title = "Mothers report on Syphilis; No; Yes; Unknown"),
yaxis = list(title = "Average Age"))
syphilis_age
chlamydia_age <- chlamydia_age %>% layout(title = "Infection with Chlamydia in realtion to the Average age of the parents",
barmode = 'group',
xaxis = list(title = "Mothers report on Chlamydia; No; Yes; Unknown"),
yaxis = list(title = "Average Age"))
chlamydia_age
hepB_age <- hepB_age %>% layout(title = "Infection with HepB in realtion to the Average age of the parents",
barmode = 'group',
xaxis = list(title = "Mothers report on HepB; No; Yes; Unknown"),
yaxis = list(title = "Average Age"))
hepB_age
hepC_age <- hepC_age %>% layout(title = "Infection with HepC in realtion to the Average age of the parents",
barmode = 'group',
xaxis = list(title = "Mothers report on HepC; No; Yes; Unknown"),
yaxis = list(title = "Average Age"))
hepC_age
gonorrhea_bmi <- gonorrhea_bmi %>% layout(title = "Infection with Gonorrhea in realtion to the average BMI/Weight of the Mother",
barmode = 'group',
xaxis = list(title = "Mothers report on Gonorrhea; No; Yes; Unknown"),
yaxis = list(title = "Average BMI/Weight"))
gonorrhea_bmi
syphilis_bmi <- syphilis_bmi %>% layout(title = "Infection with Syphilis in realtion to the average BMI/Weight of the Mother",
barmode = 'group',
xaxis = list(title = "Mothers report on Syphilis; No; Yes; Unknown"),
yaxis = list(title = "Average BMI/Weight"))
syphilis_bmi
chlamydia_bmi <- chlamydia_bmi %>% layout(title = "Infection with Chlamydia in realtion to the average BMI/Weight of the Mother",
barmode = 'group',
xaxis = list(title = "Mothers report on Chlamydia; No; Yes; Unknown"),
yaxis = list(title = "Average BMI/Weight"))
chlamydia_bmi
hepB_bmi <- hepB_bmi %>% layout(title = "Infection with HepB in realtion to the average BMI/Weight of the Mother",
barmode = 'group',
xaxis = list(title = "Mothers report on HepB; No; Yes; Unknown"),
yaxis = list(title = "Average BMI/Weight"))
hepB_bmi
hepC_bmi <- hepC_bmi %>% layout(title = "Infection with HepC in realtion to the average BMI/Weight of the Mother",
barmode = 'group',
xaxis = list(title = "Mothers report on HepC; No; Yes; Unknown"),
yaxis = list(title = "Average BMI/Weight"))
hepC_bmi
# no_infection_reported_ed
#
# no_infection_reported_age
#
# no_infection_bmi
# success_ed
#
# fail_ed
# induced_ed
# aug_ed
# steroids_ed
# antibiotics_ed
# chorioamnionitis_ed
# anesthesia_ed
# success_age
# fail_age
# induced_age
# aug_age
steroids_age <- steroids_age %>% layout(title = "Use of Steroids medication in relation to parents age",
barmode = 'group',
xaxis = list(title = "Mothers report on using Steroids; No; Yes; Unknown"),
yaxis = list(title = "Average Age"))
steroids_age
antibiotics_age <- antibiotics_age %>% layout(title = "Use of Antobiotics medication in relation to parents age",
barmode = 'group',
xaxis = list(title = "Mothers report on using Antibiotics; No; Yes; Unknown"),
yaxis = list(title = "Average Age"))
antibiotics_age
# chorioamnionitis_age
anesthesia_age <- anesthesia_age %>% layout(title = "Use of Anesthesia medication in relation to parents age",
barmode = 'group',
xaxis = list(title = "Mothers report on using Anesthesia; No; Yes; Unknown"),
yaxis = list(title = "Average Age"))
anesthesia_age
# success_bmi
# fail_bmi
# induced_bmi
# aug_bmi
steroids_bmi <- steroids_bmi %>% layout(title = "Use of Steroids medication in relation to mothers BMI/Weight",
barmode = 'group',
xaxis = list(title = "Mothers report on using Steroids; No; Yes; Unknown"),
yaxis = list(title = "Average BMI/Weight"))
steroids_bmi
antibiotics_bmi <- antibiotics_bmi %>% layout(title = "Use of Antibiotics medication in relation to mothers BMI/Weight",
barmode = 'group',
xaxis = list(title = "Mothers report on using Antibiotics; No; Yes; Unknown"),
yaxis = list(title = "Average BMI/Weight"))
antibiotics_bmi
# chorioamnionitis_bmi
anesthesia_bmi <- anesthesia_bmi %>% layout(title = "Use of Anesthesia medication in relation to mothers BMI/Weight",
barmode = 'group',
xaxis = list(title = "Mothers report on using Anesthesia; No; Yes; Unknown"),
yaxis = list(title = "Average BMI/Weight"))
anesthesia_bmi
# final_del_ed
final_del_age <- final_del_age %>% layout(title = "Final Delivery Method in relation to the average age of the parents",
barmode = 'group',
xaxis = list(title = "Final Delivery Options"),
yaxis = list(title = "Average Age"))
final_del_age
final_del_bmi <- final_del_bmi %>% layout(title = "Final Delivery Method in relation to the average BMI/Weight of the mother",
barmode = 'group',
xaxis = list(title = "Final Delivery Options"),
yaxis = list(title = "Average BMI/Weight"))
final_del_bmi
# prenatal_ed_mother
# prenatal_ed_father
prenatal_age_mother= ggplot(prenatal_age_mother18, aes(x=mothers_age, y=n_prenatal_visits_average, group=1)) +
geom_line(linetype="dashed", color="blue", size=0.2)+
labs(title ="Average Prenetal Visits in Relation to the Age of the Mother")+
xlab("Mothers Age")+
ylab("Average Prenatal Visits")+
geom_point()
prenatal_age_mother
prenatal_age_father= ggplot(prenatal_age_father18, aes(x=fathers_age, y=n_prenatal_visits_average, group=1)) +
geom_line(linetype="dashed", color="blue", size=0.2)+
labs(title ="Average Prenetal Visits in Relation to the Age of the Father")+
xlab("Fathers Age")+
ylab("Average Prenatal Visits")+
geom_point()
prenatal_age_father
# prenatal_bmi #has outliers
#
# prenatal_pre_preg #has outlier
#
# prenatal_delivery #has outlier
# pat_ed
pat_age <- pat_age %>% layout(title = "Paternity Acknowledgment in relation to the average of the parents age",
barmode = 'group',
xaxis = list(title = "Paternity Acknowledgment; No, Yes, Unknown, Not Reported"),
yaxis = list(title = "Average Age"))
pat_age
pat_bmi <- pat_bmi %>% layout(title = "Paternity Acknowledgment in relation to the average BMI/Weight of the Mother",
barmode = 'group',
xaxis = list(title = "Paternity Acknowledgment; No, Yes, Unknown, Not Reported"),
yaxis = list(title = "Average BMI/Weight"))
pat_bmi
# hisp_ed
hisp_age <- hisp_age %>% layout(title = "Hispanic Origin in relation to the Average age of the Parents",
barmode = 'group',
xaxis = list(title = "Hispanic Origin"),
yaxis = list(title = "Average Age"))
hisp_age
hisp_bmi <- hisp_bmi %>% layout(title = "Hispanic Origin in relation to the Average BMI/Weight of the Mother",
barmode = 'group',
xaxis = list(title = "Hispanic Origin"),
yaxis = list(title = "Average BMI/Weight"))
hisp_bmi
# fhisp_ed
# fhisp_age
# fhisp_bmi
# mtobacco_ed1
#
# mtobacco_ed2
#
# mtobacco_ed3
mtobacco_age1= ggplot(mtobacco_age, aes(x=mothers_age, y=cigs_tri1_average, group=1)) +
geom_line(linetype="dashed", color="blue", size=0.2)+
labs(title ="Average Tobacco Use in the 1st Trimester in Relation to the Age of the Mother")+
xlab("Mothers Age")+
ylab("Average Tobacco Use in Trimester 1")+
geom_point()
mtobacco_age1
mtobacco_age2= ggplot(mtobacco_age, aes(x=mothers_age, y=cigs_tri2_average, group=1)) +
geom_line(linetype="dashed", color="blue", size=0.2)+
labs(title ="Average Tobacco Use in the 2nd Trimester in Relation to the Age of the Mother")+
xlab("Mothers Age")+
ylab("Average Tobacco Use in Trimester 2")+
geom_point()
mtobacco_age2
mtobacco_age3= ggplot(mtobacco_age, aes(x=mothers_age, y=cigs_tri3_average, group=1)) +
geom_line(linetype="dashed", color="blue", size=0.2)+
labs(title ="Average Tobacco Use in the 3rd Trimester in Relation to the Age of the Mother")+
xlab("Mothers Age")+
ylab("Average Tobacco Use in Trimester 3")+
geom_point()
mtobacco_age3
# ftobacco_ed1
#
# ftobacco_ed2
#
# ftobacco_ed3
ftobacco_age1= ggplot(ftobacco_age, aes(x=fathers_age, y=cigs_tri1_average, group=1)) +
geom_line(linetype="dashed", color="blue", size=0.2)+
labs(title ="Average Tobacco Use in the 1st Trimester in Relation to the Age of the father")+
xlab("Fathers Age")+
ylab("Average Tobacco Use in Trimester 1")+
geom_point()
ftobacco_age1
ftobacco_age2= ggplot(ftobacco_age, aes(x=fathers_age, y=cigs_tri2_average, group=1)) +
geom_line(linetype="dashed", color="blue", size=0.2)+
labs(title ="Average Tobacco Use in the 2nd Trimester in Relation to the Age of the father")+
xlab("Fathers Age")+
ylab("Average Tobacco Use in Trimester 2")+
geom_point()
ftobacco_age2
ftobacco_age3= ggplot(ftobacco_age, aes(x=fathers_age, y=cigs_tri3_average, group=1)) +
geom_line(linetype="dashed", color="blue", size=0.2)+
labs(title ="Average Tobacco Use in the 3rd Trimester in Relation to the Age of the father")+
xlab("Fathers Age")+
ylab("Average Tobacco Use in Trimester 3")+
geom_point()
ftobacco_age3
########################
#servorUI Rendor plots
#Graph 1
output$"Delivery Payment Option Based on Average Parental Education" =rendorPlot({
del_ed
})
#Graph 2
output$"Delivery Payment Option Based on Average Parental Age" =rendorPlot({
del_age
})
#################
#Graph 2
output$"Infection with Gonorrhea in realtion to the Average age of the parents" =rendorPlot({
gonorrhea_age
})
#Graph 3
output$"Infection with Syphilis in realtion to the Average age of the parents" =rendorPlot({
syphilis_age
})
#Graph 4
output$"Infection with Chlamydia in realtion to the Average age of the parents" =rendorPlot({
chlamydia_age
})
#Graph 5
output$"Infection with HepB in realtion to the Average age of the parents" =rendorPlot({
hepB_age
})
#Graph 6
output$"Infection with HepC in realtion to the Average age of the parents" =rendorPlot({
hepC_age
})
######################
#Graph 7
output$"Infection with Gonorrhea in realtion to the average BMI/Weight of the Mother" =rendorPlot({
gonorrhea_bmi
})
#Graph 8
output$"Infection with Syphilis in realtion to the average BMI/Weight of the Mother" =rendorPlot({
syphilis_bmi
})
#Graph 9
output$"Infection with Chlamydia in realtion to the average BMI/Weight of the Mother" =rendorPlot({
chlamydia_bmi
})
#Graph 10
output$"Infection with HepB in realtion to the average BMI/Weight of the Mother" =rendorPlot({
hepB_bmi
})
#Graph 11
output$"Infection with HepC in realtion to the average BMI/Weight of the Mother" =rendorPlot({
hepC_bmi
})
#################
#Graph 12
output$"Use of Steroids medication in relation to parents age" =rendorPlot({
steroids_age
})
#Graph 13
output$"Use of Antobiotics medication in relation to parents age" =rendorPlot({
antibiotics_age
})
#Graph 14
output$"Use of Anesthesia medication in relation to parents age" =rendorPlot({
anesthesia_age
})
##################
#Graph 15
output$"Use of Steroids medication in relation to mothers BMI/Weight" =rendorPlot({
steroids_bmi
})
#Graph 16
output$"Use of Antibiotics medication in relation to mothers BMI/Weight" =rendorPlot({
antibiotics_bmi
})
#Graph 17
output$"Use of Anesthesia medication in relation to mothers BMI/Weight" =rendorPlot({
anesthesia_bmi
})
#################
#Graph 18
output$"Final Delivery Method in relation to the average age of the parents" =rendorPlot({
final_del_age
})
#Graph 19
output$"Final Delivery Method in relation to the average BMI/Weight of the mother" =rendorPlot({
final_del_bmi
})
##################
#Graph 20
output$"Average Prenetal Visits in Relation to the Age of the Mother" =rendorPlot({
prenatal_age_mother
})
#Graph 21
output$"Average Prenetal Visits in Relation to the Age of the Father" =rendorPlot({
prenatal_age_father
})
###################
#Graph 22
output$"Paternity Acknowledgment in relation to the average of the parents age" =rendorPlot({
pat_age
})
#Graph 23
output$"Paternity Acknowledgment in relation to the average BMI/Weight of the Mother" =rendorPlot({
pat_bmi
})
##################
#Graph 24
output$"Hispanic Origin in relation to the Average age of the Parents" =rendorPlot({
hisp_age
})
#Graph 25
output$"Hispanic Origin in relation to the Average BMI/Weight of the Mother" =rendorPlot({
hisp_bmi
})
#################
#Graph 26
output$"Average Tobacco Use in the 1st Trimester in Relation to the Age of the Mother" =rendorPlot({
mtobacco_age1
})
#Graph 27
output$"Average Tobacco Use in the 2nd Trimester in Relation to the Age of the Mother" =rendorPlot({
mtobacco_age2
})
#Graph 28
output$"Average Tobacco Use in the 3rd Trimester in Relation to the Age of the Mother" =rendorPlot({
mtobacco_age3
})
#################
#Graph 29
output$"Average Tobacco Use in the 1st Trimester in Relation to the Age of the father" =rendorPlot({
ftobacco_age1
})
#Graph 30
output$"Average Tobacco Use in the 2nd Trimester in Relation to the Age of the father" =rendorPlot({
ftobacco_age2
})
#Graph 31
output$"Average Tobacco Use in the 3rd Trimester in Relation to the Age of the father" =rendorPlot({
ftobacco_age3
})
|
/analises_ano_a_ano_novas.R | no_license | FMAndrade/Scripts-Dissertacao | R | false | false | 4,844 | r | ||
library(dpa)
### Name: dpa.analysis.performDPA
### Title: Perfrom DPA analysis
### Aliases: dpa.analysis.performDPA
### ** Examples
#dpa.analysis.performDPA()
| /data/genthat_extracted_code/dpa/examples/dpa.analysis.performDPA.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 166 | r | library(dpa)
### Name: dpa.analysis.performDPA
### Title: Perfrom DPA analysis
### Aliases: dpa.analysis.performDPA
### ** Examples
#dpa.analysis.performDPA()
|
library(shinycssloaders)
addResourcePath("res", snap_res())
ht <- "512px"
dashboardPage(skin = "red",
dashboardHeader(
title = "JFSP",
tags$li(class = "dropdown",
tags$a(href = "http://snap.uaf.edu", target="_blank",
tags$img(src = "res/snap_acronym_white.png", width="100%", height="30px"), style = "padding: 10px; margin: 0px;"))
),
dashboardSidebar(
use_apputils(),
sidebarMenu(
id = "tabs",
menuItem("Management Cost", icon = icon("dollar"), tabName = "mc"),
menuItem("Burn Area", icon = icon("fire", lib = "glyphicon"), tabName = "ba"),
menuItem("Fire Size", icon = icon("fire", lib = "glyphicon"), tabName = "fs"),
menuItem("Vegetation", icon = icon("tree-conifer", lib = "glyphicon"), tabName = "veg"),
menuItem("Information", icon = icon("info-circle"), tabName = "info")
),
div(hr(), h4("Global options"), style = "margin:10px;"),
checkboxInput("by_rcp", "Condition on RCPs", TRUE),
checkboxInput("by_tx", "Include treatment", FALSE),
div(hr(), em("This app shows summarized results. Visit the alternate JFSP app for finer detail."),
actionButton("jfsp_full", "Detail View", onclick ="window.open('https://uasnap.shinyapps.io/jfsp/', '_blank')", width = "180px"),
style = "margin:10px;"),
dashboard_footer("http://snap.uaf.edu/", "res/snap_white.svg", "SNAP Dashboards")
),
dashboardBody(
tabItems(
tabItem(tabName = "mc",
fluidRow(column(12, em("This website was developed as part of project (#16-1-01-18) funded by the Joint Fire Science Program. If you would be interested in participating in an interview to guide the direction of the management scenarios implemented as part of this work, please contact the project PI, Courtney Schultz (courtney.schultz@colostate.edu)"))), br(),
uiOutput("mc_box"),
fluidRow(
column(3, div(selectInput("mc_domain", "Spatial domain", regions, width = "100%"), style = "height:260px;")),
column(3, checkboxGroupInput("fmo", "Fire management options", fmos[2:5], inline = TRUE, width = "100%")),
conditionalPanel("input.mc_domain === 'Alaska' && input.fmo == ''", column(3, checkboxInput("mc_obs", "Overlay mean historical cost", FALSE)))
)
),
tabItem(tabName = "ba",
fluidRow(column(12, em("This website was developed as part of project (#16-1-01-18) funded by the Joint Fire Science Program. If you would be interested in participating in an interview to guide the direction of the management scenarios implemented as part of this work, please contact the project PI, Courtney Schultz (courtney.schultz@colostate.edu)"))), br(),
fluidRow(
tabBox(
tabPanel("Variability", plotOutput("plot_bavar", height = "512px")),
tabPanel("Totals", plotOutput("plot_babox", height = ht)),
selected = "Totals", title = "Burn area totals and inter-annual variability", side = "right", width = 12, id = "tb_ba"
)
),
fluidRow(
column(3, selectInput("ba_domain", "Spatial domain", c("Alaska (ALFRESCO)", "Full vs. Critical FMO"), width = "100%")),
column(3, checkboxInput("log_bp", "Log scale box plots", TRUE)),
conditionalPanel("input.ba_domain === 'Alaska (ALFRESCO)' && input.tb_ba === 'Variability'",
column(3, checkboxInput("basd_obs", "Overlay mean historical SD", FALSE)))
)
),
tabItem(tabName = "fs",
fluidRow(column(12, em("This website was developed as part of project (#16-1-01-18) funded by the Joint Fire Science Program. If you would be interested in participating in an interview to guide the direction of the management scenarios implemented as part of this work, please contact the project PI, Courtney Schultz (courtney.schultz@colostate.edu)"))), br(),
fluidRow(
box(plotOutput("plot_fs", height = ht),
title = "Fire size distributions", width = 12)#,
#box(withSpinner(rglwidgetOutput("plot_fsrgl", width = "100%")),
# title = "Mean Fire size (Interactive 3D)", width = 4)
),
fluidRow(
column(3, checkboxInput("log_fs", "Log scale", TRUE))
)
),
tabItem(tabName = "veg",
fluidRow(column(12, em("This website was developed as part of project (#16-1-01-18) funded by the Joint Fire Science Program. If you would be interested in participating in an interview to guide the direction of the management scenarios implemented as part of this work, please contact the project PI, Courtney Schultz (courtney.schultz@colostate.edu)"))), br(),
fluidRow(
tabBox(
tabPanel("Veg map", img(src = "https://github.com/leonawicz/jfsp-archive/blob/master/plots/ak_cdratio_2000-2040.png?raw=true", width = "100%", height = "auto")),
tabPanel("Veg ratio", plotOutput("plot_cdratio", height = "512px")),
tabPanel("Burn area", plotOutput("plot_cdba", height = "512px")),
selected = "Burn area", title = "Alaska coniferous:deciduous ratios and burn area", side = "right", width = 12
)
)
),
tabItem(tabName = "info",
h2("About this application"),
HTML("This app shows summary outputs for an overview of ALFRESCO wildfire simulations. There is also a more detailed application <a href='https://uasnap.shinyapps.io/jfsp/' >here</a>, which offers less aggregated data as well as greater user customization and app features."),
#about_app,
h2("Frequently asked questions"),
h4("Placeholder"),
h5("This is where"),
h6("FAQ information goes..."),
"Other widgets available but not shown.",
#faq(faqs, bscollapse_args = list(id = "faq", open = "apps"), showcase_args = list(drop = "climdist")),
contactinfo(snap = "res/snap_color.svg", iarc = "res/iarc.jpg", uaf = "res/uaf.png"), br()
)
)
),
title = "JFSP"
)
| /docs/jfsp/ui.R | no_license | leonawicz/jfsp-archive | R | false | false | 6,321 | r | library(shinycssloaders)
addResourcePath("res", snap_res())
ht <- "512px"
dashboardPage(skin = "red",
dashboardHeader(
title = "JFSP",
tags$li(class = "dropdown",
tags$a(href = "http://snap.uaf.edu", target="_blank",
tags$img(src = "res/snap_acronym_white.png", width="100%", height="30px"), style = "padding: 10px; margin: 0px;"))
),
dashboardSidebar(
use_apputils(),
sidebarMenu(
id = "tabs",
menuItem("Management Cost", icon = icon("dollar"), tabName = "mc"),
menuItem("Burn Area", icon = icon("fire", lib = "glyphicon"), tabName = "ba"),
menuItem("Fire Size", icon = icon("fire", lib = "glyphicon"), tabName = "fs"),
menuItem("Vegetation", icon = icon("tree-conifer", lib = "glyphicon"), tabName = "veg"),
menuItem("Information", icon = icon("info-circle"), tabName = "info")
),
div(hr(), h4("Global options"), style = "margin:10px;"),
checkboxInput("by_rcp", "Condition on RCPs", TRUE),
checkboxInput("by_tx", "Include treatment", FALSE),
div(hr(), em("This app shows summarized results. Visit the alternate JFSP app for finer detail."),
actionButton("jfsp_full", "Detail View", onclick ="window.open('https://uasnap.shinyapps.io/jfsp/', '_blank')", width = "180px"),
style = "margin:10px;"),
dashboard_footer("http://snap.uaf.edu/", "res/snap_white.svg", "SNAP Dashboards")
),
dashboardBody(
tabItems(
tabItem(tabName = "mc",
fluidRow(column(12, em("This website was developed as part of project (#16-1-01-18) funded by the Joint Fire Science Program. If you would be interested in participating in an interview to guide the direction of the management scenarios implemented as part of this work, please contact the project PI, Courtney Schultz (courtney.schultz@colostate.edu)"))), br(),
uiOutput("mc_box"),
fluidRow(
column(3, div(selectInput("mc_domain", "Spatial domain", regions, width = "100%"), style = "height:260px;")),
column(3, checkboxGroupInput("fmo", "Fire management options", fmos[2:5], inline = TRUE, width = "100%")),
conditionalPanel("input.mc_domain === 'Alaska' && input.fmo == ''", column(3, checkboxInput("mc_obs", "Overlay mean historical cost", FALSE)))
)
),
tabItem(tabName = "ba",
fluidRow(column(12, em("This website was developed as part of project (#16-1-01-18) funded by the Joint Fire Science Program. If you would be interested in participating in an interview to guide the direction of the management scenarios implemented as part of this work, please contact the project PI, Courtney Schultz (courtney.schultz@colostate.edu)"))), br(),
fluidRow(
tabBox(
tabPanel("Variability", plotOutput("plot_bavar", height = "512px")),
tabPanel("Totals", plotOutput("plot_babox", height = ht)),
selected = "Totals", title = "Burn area totals and inter-annual variability", side = "right", width = 12, id = "tb_ba"
)
),
fluidRow(
column(3, selectInput("ba_domain", "Spatial domain", c("Alaska (ALFRESCO)", "Full vs. Critical FMO"), width = "100%")),
column(3, checkboxInput("log_bp", "Log scale box plots", TRUE)),
conditionalPanel("input.ba_domain === 'Alaska (ALFRESCO)' && input.tb_ba === 'Variability'",
column(3, checkboxInput("basd_obs", "Overlay mean historical SD", FALSE)))
)
),
tabItem(tabName = "fs",
fluidRow(column(12, em("This website was developed as part of project (#16-1-01-18) funded by the Joint Fire Science Program. If you would be interested in participating in an interview to guide the direction of the management scenarios implemented as part of this work, please contact the project PI, Courtney Schultz (courtney.schultz@colostate.edu)"))), br(),
fluidRow(
box(plotOutput("plot_fs", height = ht),
title = "Fire size distributions", width = 12)#,
#box(withSpinner(rglwidgetOutput("plot_fsrgl", width = "100%")),
# title = "Mean Fire size (Interactive 3D)", width = 4)
),
fluidRow(
column(3, checkboxInput("log_fs", "Log scale", TRUE))
)
),
tabItem(tabName = "veg",
fluidRow(column(12, em("This website was developed as part of project (#16-1-01-18) funded by the Joint Fire Science Program. If you would be interested in participating in an interview to guide the direction of the management scenarios implemented as part of this work, please contact the project PI, Courtney Schultz (courtney.schultz@colostate.edu)"))), br(),
fluidRow(
tabBox(
tabPanel("Veg map", img(src = "https://github.com/leonawicz/jfsp-archive/blob/master/plots/ak_cdratio_2000-2040.png?raw=true", width = "100%", height = "auto")),
tabPanel("Veg ratio", plotOutput("plot_cdratio", height = "512px")),
tabPanel("Burn area", plotOutput("plot_cdba", height = "512px")),
selected = "Burn area", title = "Alaska coniferous:deciduous ratios and burn area", side = "right", width = 12
)
)
),
tabItem(tabName = "info",
h2("About this application"),
HTML("This app shows summary outputs for an overview of ALFRESCO wildfire simulations. There is also a more detailed application <a href='https://uasnap.shinyapps.io/jfsp/' >here</a>, which offers less aggregated data as well as greater user customization and app features."),
#about_app,
h2("Frequently asked questions"),
h4("Placeholder"),
h5("This is where"),
h6("FAQ information goes..."),
"Other widgets available but not shown.",
#faq(faqs, bscollapse_args = list(id = "faq", open = "apps"), showcase_args = list(drop = "climdist")),
contactinfo(snap = "res/snap_color.svg", iarc = "res/iarc.jpg", uaf = "res/uaf.png"), br()
)
)
),
title = "JFSP"
)
|
library(shiny)
library(bs4Dash)
library(highcharter)
library(tidyverse)
library(lubridate)
library(RcppRoll)
library(scales)
library(shinyWidgets) # spinner
library(geojsonio)
library(scales)
library(mindicador) # insta
library(markdown)
library(forecast)
# library(here)
source("R/helpers-shiny.R", encoding = "utf-8")
source("R/helpers-data.R", encoding = "utf-8")
source("R/helpers-graficos.R", encoding = "UTF-8")
source("R/helpers-vb.R", encoding = "utf-8")
source("R/helpers-series.R", encoding = "utf-8")
PARS <- list(
debug = FALSE,
classcol = "col-xg-2 col-lg-2 col-md-6 col-sm-12",
color = list(
sparkline = "#F4F6F9", # color de fondo de value boxes "blancos"
primary = "#007bff",
danger = "#DC3545",
gray = "#C0C0C0"
),
hc = list(
duration = 2500
),
font = '-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"'
)
Sys.setlocale("LC_ALL", "Spanish_Spain.1252")
# Sys.setlocale("LC_ALL","English")
# f <- Sys.Date()
# dias <- weekdays((f - lubridate::days(lubridate::wday(f) - 1)) + lubridate::days(0:6))
newlang_opts <- getOption("highcharter.lang")
newlang_opts$weekdays <- c("domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado")
newlang_opts$months <- c("enero", "febrero", "marzo", "abril", "mayo", "junio", "julio",
"agosto", "septiembre", "octubre", "noviembre", "diciembre")
newlang_opts$shortMonths <- c("ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sep",
"oct", "nov", "dic")
newlang_opts$thousandsSep <- "."
newlang_opts$decimalPoint <- ","
options(
highcharter.lang = newlang_opts,
highcharter.google_fonts = FALSE,
highcharter.theme =
hc_theme_smpl(
title = list(
style = list(fontSize = "1.2em", fontFamily = PARS$font)
),
subtitle = list(
style = list(fontFamily = PARS$font, fontSize = "0.85em")
),
xAxis = list(
title = list(
align = "high",
style = list(
fontSize = "0.85em"
)
)
),
yAxis = list(
title = list(
align = "high",
style = list(
fontSize = "0.85em"
)
)
),
chart = list(
backgroundColor = "white",
style = list(fontFamily = PARS$font, fontSize = "1.0em")
),
plotOptions = list(
series = list(
dataLabels = list(color = "#222d32", style = list(fontWeight = "normal", textShadow = FALSE, textOutline = FALSE)),
animation = list(duration = PARS$hc$duration)
),
line = list(
lineWidth = 4
),
arearange = list(
lineWidth = 1,
fillOpacity = 0.25
)
),
exporting = list(
buttons = list(
contextButton = list(
symbol = 'url(https://www.iconsdb.com/icons/preview/gray/download-2-xxl.png)',
symbolSize = 18,
symbolX = 21,
symbolY = 20,
titleKey = "Descargar",
y = -05
)
)
),
tooltip = list(
useHTML = TRUE
),
legend = list(
verticalAlign = "top",
align = "left",
itemStyle = list(
fontWeight = "normal"
)
)
)
)
| /global.R | no_license | alonsosilvaallende/Dashboard-Covid19 | R | false | false | 3,466 | r | library(shiny)
library(bs4Dash)
library(highcharter)
library(tidyverse)
library(lubridate)
library(RcppRoll)
library(scales)
library(shinyWidgets) # spinner
library(geojsonio)
library(scales)
library(mindicador) # insta
library(markdown)
library(forecast)
# library(here)
source("R/helpers-shiny.R", encoding = "utf-8")
source("R/helpers-data.R", encoding = "utf-8")
source("R/helpers-graficos.R", encoding = "UTF-8")
source("R/helpers-vb.R", encoding = "utf-8")
source("R/helpers-series.R", encoding = "utf-8")
PARS <- list(
debug = FALSE,
classcol = "col-xg-2 col-lg-2 col-md-6 col-sm-12",
color = list(
sparkline = "#F4F6F9", # color de fondo de value boxes "blancos"
primary = "#007bff",
danger = "#DC3545",
gray = "#C0C0C0"
),
hc = list(
duration = 2500
),
font = '-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Helvetica, Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"'
)
Sys.setlocale("LC_ALL", "Spanish_Spain.1252")
# Sys.setlocale("LC_ALL","English")
# f <- Sys.Date()
# dias <- weekdays((f - lubridate::days(lubridate::wday(f) - 1)) + lubridate::days(0:6))
newlang_opts <- getOption("highcharter.lang")
newlang_opts$weekdays <- c("domingo", "lunes", "martes", "miércoles", "jueves", "viernes", "sábado")
newlang_opts$months <- c("enero", "febrero", "marzo", "abril", "mayo", "junio", "julio",
"agosto", "septiembre", "octubre", "noviembre", "diciembre")
newlang_opts$shortMonths <- c("ene", "feb", "mar", "abr", "may", "jun", "jul", "ago", "sep",
"oct", "nov", "dic")
newlang_opts$thousandsSep <- "."
newlang_opts$decimalPoint <- ","
options(
highcharter.lang = newlang_opts,
highcharter.google_fonts = FALSE,
highcharter.theme =
hc_theme_smpl(
title = list(
style = list(fontSize = "1.2em", fontFamily = PARS$font)
),
subtitle = list(
style = list(fontFamily = PARS$font, fontSize = "0.85em")
),
xAxis = list(
title = list(
align = "high",
style = list(
fontSize = "0.85em"
)
)
),
yAxis = list(
title = list(
align = "high",
style = list(
fontSize = "0.85em"
)
)
),
chart = list(
backgroundColor = "white",
style = list(fontFamily = PARS$font, fontSize = "1.0em")
),
plotOptions = list(
series = list(
dataLabels = list(color = "#222d32", style = list(fontWeight = "normal", textShadow = FALSE, textOutline = FALSE)),
animation = list(duration = PARS$hc$duration)
),
line = list(
lineWidth = 4
),
arearange = list(
lineWidth = 1,
fillOpacity = 0.25
)
),
exporting = list(
buttons = list(
contextButton = list(
symbol = 'url(https://www.iconsdb.com/icons/preview/gray/download-2-xxl.png)',
symbolSize = 18,
symbolX = 21,
symbolY = 20,
titleKey = "Descargar",
y = -05
)
)
),
tooltip = list(
useHTML = TRUE
),
legend = list(
verticalAlign = "top",
align = "left",
itemStyle = list(
fontWeight = "normal"
)
)
)
)
|
library(shiny)
shinyUI(fluidPage(
headerPanel(span("Inventory Management System", style = "color:blue")),br(),
selectInput(inputId = "product_id", label = h4("Select Product:"),
c("Apple Juice" = 1,
"Mango" = 2,
"Strawberry Candy" = 3,
"Coke" = 4,
"Potato" = 5,
"Basketball" = 6,
"Chair"= 7,
"Macbook" = 8,
"Iphone6" = 9)), br(),
titlePanel(h3(textOutput("product_text"))), br(),
sidebarPanel(
radioButtons("method", h4( "Forecast Technique: ", style = "color:blue"),
c("Naive" = "naive",
"Moving Average" = "ma",
"Exponential Smoothing" = "es")),
br(),
h4("Calculation", style = "color:blue"), br(),
strong(textOutput("lead_time")),br(),
strong(textOutput("safety_stock")),br(),
strong(textOutput("reorder_point")),br(),
class = 'leftAlign'
),
sidebarPanel(
plotOutput("product_plot"),
width = 8,
class = 'leftAlign'
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Forecast",
strong("Naive"),verbatimTextOutput("forecast_naive_output"),
strong("Moving Average"),verbatimTextOutput("forecast_sma_output"),
strong("Exponential Smoothing"),verbatimTextOutput("forecast_es_output")
),
tabPanel("Error rates",
strong("Naive"),verbatimTextOutput("forecast_naive_accuracy"),
strong("Moving Average"), verbatimTextOutput("forecast_sma_accuracy"),
strong("Exponential Smoothing"), verbatimTextOutput("forecast_es_accuracy")
),
tabPanel("Plots",
plotOutput("naive_plot"),
plotOutput("sma_plot"),
plotOutput("es_plot"),
class = 'rightAlign'),
tabPanel("Data",
dataTableOutput("product_dataHead")
)
)
)
)
) | /ui.R | no_license | jamesliao2016/Inventory-Management-System-DEMO-3 | R | false | false | 2,219 | r | library(shiny)
shinyUI(fluidPage(
headerPanel(span("Inventory Management System", style = "color:blue")),br(),
selectInput(inputId = "product_id", label = h4("Select Product:"),
c("Apple Juice" = 1,
"Mango" = 2,
"Strawberry Candy" = 3,
"Coke" = 4,
"Potato" = 5,
"Basketball" = 6,
"Chair"= 7,
"Macbook" = 8,
"Iphone6" = 9)), br(),
titlePanel(h3(textOutput("product_text"))), br(),
sidebarPanel(
radioButtons("method", h4( "Forecast Technique: ", style = "color:blue"),
c("Naive" = "naive",
"Moving Average" = "ma",
"Exponential Smoothing" = "es")),
br(),
h4("Calculation", style = "color:blue"), br(),
strong(textOutput("lead_time")),br(),
strong(textOutput("safety_stock")),br(),
strong(textOutput("reorder_point")),br(),
class = 'leftAlign'
),
sidebarPanel(
plotOutput("product_plot"),
width = 8,
class = 'leftAlign'
),
mainPanel(
tabsetPanel(type = "tabs",
tabPanel("Forecast",
strong("Naive"),verbatimTextOutput("forecast_naive_output"),
strong("Moving Average"),verbatimTextOutput("forecast_sma_output"),
strong("Exponential Smoothing"),verbatimTextOutput("forecast_es_output")
),
tabPanel("Error rates",
strong("Naive"),verbatimTextOutput("forecast_naive_accuracy"),
strong("Moving Average"), verbatimTextOutput("forecast_sma_accuracy"),
strong("Exponential Smoothing"), verbatimTextOutput("forecast_es_accuracy")
),
tabPanel("Plots",
plotOutput("naive_plot"),
plotOutput("sma_plot"),
plotOutput("es_plot"),
class = 'rightAlign'),
tabPanel("Data",
dataTableOutput("product_dataHead")
)
)
)
)
) |
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 32651
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 31643
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 31643
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/nreachq_query10_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 6547
c no.of clauses 32651
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 31643
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/nreachq_query10_1344.qdimacs 6547 32651 E1 [19 62 104 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6104 6105 6106 6107 6108 6109 6110 6111 6113 6114 6115 6117 6118 6119 6120 6121 6123 6124 6126 6127 6129 6130 6132 6133 6135 6136 6138 6139 6141 6142 6144 6145 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6237 6238 6239 6240 6241 6242 6243 6244 6246 6247 6248 6250 6251 6252 6253 6254 6256 6257 6259 6260 6262 6263 6265 6266 6268 6269 6271 6272 6274 6275 6277 6278 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6370 6371 6372 6373 6374 6375 6376 6377 6379 6380 6381 6383 6384 6385 6386 6387 6389 6390 6392 6393 6395 6396 6398 6399 6401 6402 6404 6405 6407 6408 6410 6411 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6503 6504 6505 6506 6507 6508 6509 6510 6512 6513 6514 6516 6517 6518 6519 6520 6522 6523 6525 6526 6528 6529 6531 6532 6534 6535 6537 6538 6540 6541 6543 6544] 0 16 6045 31643 RED
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/nreachq_query10_1344/nreachq_query10_1344.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 3,213 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 32651
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 31643
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 31643
c
c Input Parameter (command line, file):
c input filename QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/nreachq_query10_1344.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 6547
c no.of clauses 32651
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 31643
c
c QBFLIB/Jordan-Kaiser/reduction-finding-full-set-params-k1c3n4/nreachq_query10_1344.qdimacs 6547 32651 E1 [19 62 104 6015 6016 6017 6018 6019 6020 6021 6022 6023 6024 6025 6026 6027 6028 6029 6030 6031 6032 6033 6034 6035 6036 6037 6038 6039 6040 6041 6042 6043 6044 6045 6046 6047 6048 6049 6050 6051 6052 6053 6054 6055 6056 6057 6058 6059 6060 6061 6062 6063 6064 6065 6066 6067 6068 6069 6070 6071 6072 6073 6074 6075 6076 6077 6078 6079 6080 6081 6082 6083 6084 6085 6086 6087 6088 6089 6090 6091 6092 6093 6094 6095 6096 6097 6098 6099 6100 6101 6102 6104 6105 6106 6107 6108 6109 6110 6111 6113 6114 6115 6117 6118 6119 6120 6121 6123 6124 6126 6127 6129 6130 6132 6133 6135 6136 6138 6139 6141 6142 6144 6145 6148 6149 6150 6151 6152 6153 6154 6155 6156 6157 6158 6159 6160 6161 6162 6163 6164 6165 6166 6167 6168 6169 6170 6171 6172 6173 6174 6175 6176 6177 6178 6179 6180 6181 6182 6183 6184 6185 6186 6187 6188 6189 6190 6191 6192 6193 6194 6195 6196 6197 6198 6199 6200 6201 6202 6203 6204 6205 6206 6207 6208 6209 6210 6211 6212 6213 6214 6215 6216 6217 6218 6219 6220 6221 6222 6223 6224 6225 6226 6227 6228 6229 6230 6231 6232 6233 6234 6235 6237 6238 6239 6240 6241 6242 6243 6244 6246 6247 6248 6250 6251 6252 6253 6254 6256 6257 6259 6260 6262 6263 6265 6266 6268 6269 6271 6272 6274 6275 6277 6278 6281 6282 6283 6284 6285 6286 6287 6288 6289 6290 6291 6292 6293 6294 6295 6296 6297 6298 6299 6300 6301 6302 6303 6304 6305 6306 6307 6308 6309 6310 6311 6312 6313 6314 6315 6316 6317 6318 6319 6320 6321 6322 6323 6324 6325 6326 6327 6328 6329 6330 6331 6332 6333 6334 6335 6336 6337 6338 6339 6340 6341 6342 6343 6344 6345 6346 6347 6348 6349 6350 6351 6352 6353 6354 6355 6356 6357 6358 6359 6360 6361 6362 6363 6364 6365 6366 6367 6368 6370 6371 6372 6373 6374 6375 6376 6377 6379 6380 6381 6383 6384 6385 6386 6387 6389 6390 6392 6393 6395 6396 6398 6399 6401 6402 6404 6405 6407 6408 6410 6411 6414 6415 6416 6417 6418 6419 6420 6421 6422 6423 6424 6425 6426 6427 6428 6429 6430 6431 6432 6433 6434 6435 6436 6437 6438 6439 6440 6441 6442 6443 6444 6445 6446 6447 6448 6449 6450 6451 6452 6453 6454 6455 6456 6457 6458 6459 6460 6461 6462 6463 6464 6465 6466 6467 6468 6469 6470 6471 6472 6473 6474 6475 6476 6477 6478 6479 6480 6481 6482 6483 6484 6485 6486 6487 6488 6489 6490 6491 6492 6493 6494 6495 6496 6497 6498 6499 6500 6501 6503 6504 6505 6506 6507 6508 6509 6510 6512 6513 6514 6516 6517 6518 6519 6520 6522 6523 6525 6526 6528 6529 6531 6532 6534 6535 6537 6538 6540 6541 6543 6544] 0 16 6045 31643 RED
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MultiGSEAResult-methods.R
\name{geneSetsStats}
\alias{geneSetsStats}
\title{Summarizes useful statistics per gene set from a MultiGSEAResult}
\usage{
geneSetsStats(
x,
feature.min.logFC = 1,
feature.max.padj = 0.1,
trim = 0.1,
reannotate.significance = FALSE,
as.dt = FALSE
)
}
\arguments{
\item{x}{A \code{MultiGSEAResult} object}
\item{feature.min.logFC}{used with \code{feature.max.padj} to identify
the individual features that are to be considered differentially
expressed.}
\item{feature.max.padj}{used with \code{feature.min.logFC} to identify
the individual features that are to be considered differentially
expressed.}
\item{trim}{The amount to trim when calculated trimmed \code{t} and
\code{logFC} statistics for each geneset.}
}
\value{
A data.table with statistics at the gene set level across the
prescribed contrast run on \code{x}. These statistics are independent
of any particular GSEA method, but rather summarize aggregate shifts
of the gene sets individual features. The columns included in the output
are summarized below:
\itemize{
\item \code{n.sig}: The number of individual features whose \code{abs(logFC)} and padj
thersholds satisfy the criteria of the \code{feature.min.logFC} and
\code{feature.max.padj} parameters of the original \code{\link[=multiGSEA]{multiGSEA()}} call
\item \code{n.neutral}: The number of individual features whose abs(logFC) and padj
thersholds do not satisfy the \verb{feature.*} criteria named above.
\item \verb{n.up, n.down}: The number of individual features with \code{logFC > 0} or
\code{logFC < 0}, respectively, irrespective of the \verb{feature.*} thresholds
referenced above.
\item \verb{n.sig.up, n.sig.down}: The number of individual features that pass the
\verb{feature.*} thresholds and have logFC > 0 or logFC < 0, respectively.
\item \verb{mean.logFC, mean.logFC.trim}: The mean (or trimmed mean) of the individual
logFC estimates for the features in the gene set. The amount of trim is
specified in the \code{trim} parameter of the \code{\link[=multiGSEA]{multiGSEA()}} call.
\item \verb{mean.t, mean.t.trim}: The mean (or trimmed mean) of the individual
t-statistics for the features in the gene sets. These are \code{NA} if the input
expression object was a \code{DGEList}.
}
}
\description{
This function calculates the number of genes that move up/down for the
given contrasts, as well as mean and trimmed mean of the logFC and
t-statistics. Note that the statistics calculated and returned here are
purely a function of the statistics generated at the gene-level stage
of the analysis.
}
\examples{
vm <- exampleExpressionSet(do.voom=TRUE)
gdb <- exampleGeneSetDb()
mg <- multiGSEA(gdb, vm, vm$design, 'tumor')
head(geneSetsStats(mg))
}
| /man/geneSetsStats.Rd | permissive | lianos/multiGSEA | R | false | true | 2,807 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/MultiGSEAResult-methods.R
\name{geneSetsStats}
\alias{geneSetsStats}
\title{Summarizes useful statistics per gene set from a MultiGSEAResult}
\usage{
geneSetsStats(
x,
feature.min.logFC = 1,
feature.max.padj = 0.1,
trim = 0.1,
reannotate.significance = FALSE,
as.dt = FALSE
)
}
\arguments{
\item{x}{A \code{MultiGSEAResult} object}
\item{feature.min.logFC}{used with \code{feature.max.padj} to identify
the individual features that are to be considered differentially
expressed.}
\item{feature.max.padj}{used with \code{feature.min.logFC} to identify
the individual features that are to be considered differentially
expressed.}
\item{trim}{The amount to trim when calculated trimmed \code{t} and
\code{logFC} statistics for each geneset.}
}
\value{
A data.table with statistics at the gene set level across the
prescribed contrast run on \code{x}. These statistics are independent
of any particular GSEA method, but rather summarize aggregate shifts
of the gene sets individual features. The columns included in the output
are summarized below:
\itemize{
\item \code{n.sig}: The number of individual features whose \code{abs(logFC)} and padj
thersholds satisfy the criteria of the \code{feature.min.logFC} and
\code{feature.max.padj} parameters of the original \code{\link[=multiGSEA]{multiGSEA()}} call
\item \code{n.neutral}: The number of individual features whose abs(logFC) and padj
thersholds do not satisfy the \verb{feature.*} criteria named above.
\item \verb{n.up, n.down}: The number of individual features with \code{logFC > 0} or
\code{logFC < 0}, respectively, irrespective of the \verb{feature.*} thresholds
referenced above.
\item \verb{n.sig.up, n.sig.down}: The number of individual features that pass the
\verb{feature.*} thresholds and have logFC > 0 or logFC < 0, respectively.
\item \verb{mean.logFC, mean.logFC.trim}: The mean (or trimmed mean) of the individual
logFC estimates for the features in the gene set. The amount of trim is
specified in the \code{trim} parameter of the \code{\link[=multiGSEA]{multiGSEA()}} call.
\item \verb{mean.t, mean.t.trim}: The mean (or trimmed mean) of the individual
t-statistics for the features in the gene sets. These are \code{NA} if the input
expression object was a \code{DGEList}.
}
}
\description{
This function calculates the number of genes that move up/down for the
given contrasts, as well as mean and trimmed mean of the logFC and
t-statistics. Note that the statistics calculated and returned here are
purely a function of the statistics generated at the gene-level stage
of the analysis.
}
\examples{
vm <- exampleExpressionSet(do.voom=TRUE)
gdb <- exampleGeneSetDb()
mg <- multiGSEA(gdb, vm, vm$design, 'tumor')
head(geneSetsStats(mg))
}
|
# Combine and Tidy Data Files
# Eliana Marostica, Maria Nakhoul, Sunny Mahesh
# May 4, 2019
#Might need to install this package
#install.packages("rio")
library(tidyverse)
library(rio)
## CDC DEATH RATES DATA
#change this
ocdr_orig <- read_csv("data/Motor_Vehicle_Occupant_Death_Rate__by_Age_and_Gender__2012___2014__All_States.csv")
df <- read.csv("https://raw.githubusercontent.com/plotly/datasets/master/2011_us_ag_exports.csv")
codes <- read.csv("https://raw.githubusercontent.com/plotly/datasets/master/2011_us_ag_exports.csv") %>%
dplyr::select(code, state)
ocdr <- ocdr_orig %>%
mutate(code = codes$code[match(ocdr_orig$State,codes$state)]) %>%
gather(`All Ages, 2012`, `All Ages, 2014`, `Age 0-20, 2012`,`Age 0-20, 2014`,`Age 21-34, 2012`,`Age 21-34, 2014`,
`Age 35-54, 2012`,`Age 35-54, 2014`,`Age 55+, 2012`,`Age 55+, 2014`, `Male, 2012`,`Male, 2014`,
`Female, 2012`,`Female, 2014`, key="Var and Year", value="Death_Rate") %>%
separate(`Var and Year`, into = c("Var", "Year"), sep=", ") %>%
filter(!is.na(State))
us_ind <- which(ocdr$State=="United States")
ocdr <- ocdr[-us_ind,]
ocdr_gender <- ocdr_orig %>%
mutate(code = codes$code[match(ocdr_orig$State,codes$state)]) %>%
filter(!is.na(State)) %>%
gather(`Male, 2012`,`Male, 2014`, `Female, 2012`,`Female, 2014`, key="GenderYr",value="Death_Rate") %>%
separate(GenderYr, into = c("Gender", "Year"), sep=", ") %>%
mutate(Age = "All Ages") %>%
dplyr::select(State,Location,code,Age,Gender,Year,Death_Rate)
ocdr_tidy <- ocdr_orig %>%
mutate(code = codes$code[match(ocdr_orig$State,codes$state)]) %>%
filter(!is.na(State)) %>%
gather(`All Ages, 2012`, `All Ages, 2014`, `Age 0-20, 2012`,`Age 0-20, 2014`,`Age 21-34, 2012`,`Age 21-34, 2014`,
`Age 35-54, 2012`,`Age 35-54, 2014`,`Age 55+, 2012`,`Age 55+, 2014`, key="AgeYr",value="Death_Rate") %>%
separate(AgeYr, into = c("Age", "Year"), sep=", ") %>%
mutate(Gender = "All Genders") %>%
dplyr::select(State,Location,code,Age,Year,Gender,Death_Rate) %>%
rbind(ocdr_gender)
##---
## Insurance Institute for Highway Safety Highway Loss Data Institute DATA
fatal_car_crashes<-import_list("data/fatal car_crash.xlsx",setclass = "tbl",rbind = TRUE)
deaths_by_road_users<-import_list("data/Deaths by road users.xlsx",setclass="tbl",rbind=T)
indicies=c()
for( i in 1:13){
indicies[i]=52*i
}
deaths_car_crashes=fatal_car_crashes[-indicies,]
line_graph<-fatal_car_crashes[indicies,]
colnames(line_graph)=c("State","Population","Deaths","Deaths_per_100000_Population","Year")
line_graph$Year=c("2017","2016","2015","2014","2013","2012","2011","2010","2009","2008","2007","2006","2005")
na_indicies=which(is.na(deaths_by_road_users$State))
deaths_by_road_users=deaths_by_road_users[-na_indicies,]
road_users_deaths=deaths_by_road_users[-indicies,]
year=c(rep(2017,51),rep(2016,51),rep(2015,51),rep(2014,51),rep(2013,51),rep(2012,51),rep(2011,51),rep(2010,51),rep(2009,51),rep(2008,51),rep(2007,51),rep(2006,51),rep(2005,51))
deaths_car_crashes=deaths_car_crashes[,1:4]
deaths_car_crashes$Year=year
seatbelt<-import_list("data/percent_of_seatbelt_use.xlsx",setclass = "tbl",rbind = TRUE)
seatbelt
year=c(rep(2017,51),rep(2016,51),rep(2015,51),rep(2014,51),rep(2013,51),rep(2012,51),rep(2011,51),rep(2010,51),rep(2009,51),rep(2008,51),rep(2007,51),rep(2006,51),rep(2005,51))
year2=c(rep(2017,51),rep(2016,51),rep(2015,51),rep(2014,51),rep(2013,51),rep(2012,51),rep(2011,51),rep(2010,51),rep(2009,51))
seatbelt$`_file`=year2
colnames(seatbelt)=c("State","Percentage_of_observed_seatbelt_use","Year")
deaths<-deaths_car_crashes%>%filter(Year!="2008",Year!="2007",Year!="2006",Year!="2005")%>%dplyr::select(`Deaths`,`Year`,`State`)
seatbelt_with_deaths<-inner_join(seatbelt,deaths,by=c("Year","State"))
dui<-import_list("data/DUI.xlsx",setclass = "tbl",rbind = TRUE)
US_total_DUI<-dui[indicies,]
dui<-dui[-indicies,]
dui$`_file`<-year2
US_total_DUI<-US_total_DUI%>%filter(!is.na(State))
US_total_DUI$`_file`<-c("2017","2016","2015","2014","2013","2012","2011","2010","2009")
colnames(US_total_DUI)<-c("State","Total","Year")
colnames(dui)<-c("State","Total","Year")
state_code<-ocdr_tidy%>%dplyr::select(`State`,`code`)%>%unique()
codes<-c(as.character(df$code[1:8]),"DC",as.character(df$code[9:50]))
road_users_deaths$code=codes
deaths_car_crashes$code=codes
deaths_car_crashes$hover <- with(deaths_car_crashes, paste(State, '<br>', "Population",Population, "<br>","Deaths", Deaths, "<br>",
"Year", Year,"<br>", "Deaths per 100,000 population", `Deaths per 100,000 population`))
road_users_deaths_column_names=c("State","Car_Occupant_Death_Number","Car_Occupant_Death_Percent","Pickup_and_SUV_Occupant_Death_Number","Pickup_and_SUV_Occupant_Death_Percent","Large_Truck_Occupant_Death_Number","Large_Truck_Occupant_Death_Percent","Motorcyclists_Occupant_Death_Number","Motorcyclists_Occupant_Death_Percent","Pedestrians_Occupant_Death_Number","Pedestrians_Occupant_Death_Percent","Bicyclists_Occupant_Death_Number","Bicyclists_Occupant_Death_Percent","Total_Occupant_Death_Number","Total_Occupant_Death_Percent","Year","Code")
colnames(road_users_deaths)=road_users_deaths_column_names
road_users_deaths$Year=year
colnames(deaths_car_crashes)=c("State","Population","Deaths","Deaths_per_100000_Population","Year","Code","Hover")
##---
save(ocdr,ocdr_gender,ocdr_orig,ocdr_tidy,state_code,road_users_deaths,deaths_car_crashes,seatbelt_with_deaths,line_graph,dui,US_total_DUI, file="data/deathrate.Rdata")
| /src/tidy_data.R | no_license | emarosti/bmi706-visualization-project | R | false | false | 5,550 | r | # Combine and Tidy Data Files
# Eliana Marostica, Maria Nakhoul, Sunny Mahesh
# May 4, 2019
#Might need to install this package
#install.packages("rio")
library(tidyverse)
library(rio)
## CDC DEATH RATES DATA
#change this
ocdr_orig <- read_csv("data/Motor_Vehicle_Occupant_Death_Rate__by_Age_and_Gender__2012___2014__All_States.csv")
df <- read.csv("https://raw.githubusercontent.com/plotly/datasets/master/2011_us_ag_exports.csv")
codes <- read.csv("https://raw.githubusercontent.com/plotly/datasets/master/2011_us_ag_exports.csv") %>%
dplyr::select(code, state)
ocdr <- ocdr_orig %>%
mutate(code = codes$code[match(ocdr_orig$State,codes$state)]) %>%
gather(`All Ages, 2012`, `All Ages, 2014`, `Age 0-20, 2012`,`Age 0-20, 2014`,`Age 21-34, 2012`,`Age 21-34, 2014`,
`Age 35-54, 2012`,`Age 35-54, 2014`,`Age 55+, 2012`,`Age 55+, 2014`, `Male, 2012`,`Male, 2014`,
`Female, 2012`,`Female, 2014`, key="Var and Year", value="Death_Rate") %>%
separate(`Var and Year`, into = c("Var", "Year"), sep=", ") %>%
filter(!is.na(State))
us_ind <- which(ocdr$State=="United States")
ocdr <- ocdr[-us_ind,]
ocdr_gender <- ocdr_orig %>%
mutate(code = codes$code[match(ocdr_orig$State,codes$state)]) %>%
filter(!is.na(State)) %>%
gather(`Male, 2012`,`Male, 2014`, `Female, 2012`,`Female, 2014`, key="GenderYr",value="Death_Rate") %>%
separate(GenderYr, into = c("Gender", "Year"), sep=", ") %>%
mutate(Age = "All Ages") %>%
dplyr::select(State,Location,code,Age,Gender,Year,Death_Rate)
ocdr_tidy <- ocdr_orig %>%
mutate(code = codes$code[match(ocdr_orig$State,codes$state)]) %>%
filter(!is.na(State)) %>%
gather(`All Ages, 2012`, `All Ages, 2014`, `Age 0-20, 2012`,`Age 0-20, 2014`,`Age 21-34, 2012`,`Age 21-34, 2014`,
`Age 35-54, 2012`,`Age 35-54, 2014`,`Age 55+, 2012`,`Age 55+, 2014`, key="AgeYr",value="Death_Rate") %>%
separate(AgeYr, into = c("Age", "Year"), sep=", ") %>%
mutate(Gender = "All Genders") %>%
dplyr::select(State,Location,code,Age,Year,Gender,Death_Rate) %>%
rbind(ocdr_gender)
##---
## Insurance Institute for Highway Safety Highway Loss Data Institute DATA
fatal_car_crashes<-import_list("data/fatal car_crash.xlsx",setclass = "tbl",rbind = TRUE)
deaths_by_road_users<-import_list("data/Deaths by road users.xlsx",setclass="tbl",rbind=T)
indicies=c()
for( i in 1:13){
indicies[i]=52*i
}
deaths_car_crashes=fatal_car_crashes[-indicies,]
line_graph<-fatal_car_crashes[indicies,]
colnames(line_graph)=c("State","Population","Deaths","Deaths_per_100000_Population","Year")
line_graph$Year=c("2017","2016","2015","2014","2013","2012","2011","2010","2009","2008","2007","2006","2005")
na_indicies=which(is.na(deaths_by_road_users$State))
deaths_by_road_users=deaths_by_road_users[-na_indicies,]
road_users_deaths=deaths_by_road_users[-indicies,]
year=c(rep(2017,51),rep(2016,51),rep(2015,51),rep(2014,51),rep(2013,51),rep(2012,51),rep(2011,51),rep(2010,51),rep(2009,51),rep(2008,51),rep(2007,51),rep(2006,51),rep(2005,51))
deaths_car_crashes=deaths_car_crashes[,1:4]
deaths_car_crashes$Year=year
seatbelt<-import_list("data/percent_of_seatbelt_use.xlsx",setclass = "tbl",rbind = TRUE)
seatbelt
year=c(rep(2017,51),rep(2016,51),rep(2015,51),rep(2014,51),rep(2013,51),rep(2012,51),rep(2011,51),rep(2010,51),rep(2009,51),rep(2008,51),rep(2007,51),rep(2006,51),rep(2005,51))
year2=c(rep(2017,51),rep(2016,51),rep(2015,51),rep(2014,51),rep(2013,51),rep(2012,51),rep(2011,51),rep(2010,51),rep(2009,51))
seatbelt$`_file`=year2
colnames(seatbelt)=c("State","Percentage_of_observed_seatbelt_use","Year")
deaths<-deaths_car_crashes%>%filter(Year!="2008",Year!="2007",Year!="2006",Year!="2005")%>%dplyr::select(`Deaths`,`Year`,`State`)
seatbelt_with_deaths<-inner_join(seatbelt,deaths,by=c("Year","State"))
dui<-import_list("data/DUI.xlsx",setclass = "tbl",rbind = TRUE)
US_total_DUI<-dui[indicies,]
dui<-dui[-indicies,]
dui$`_file`<-year2
US_total_DUI<-US_total_DUI%>%filter(!is.na(State))
US_total_DUI$`_file`<-c("2017","2016","2015","2014","2013","2012","2011","2010","2009")
colnames(US_total_DUI)<-c("State","Total","Year")
colnames(dui)<-c("State","Total","Year")
state_code<-ocdr_tidy%>%dplyr::select(`State`,`code`)%>%unique()
codes<-c(as.character(df$code[1:8]),"DC",as.character(df$code[9:50]))
road_users_deaths$code=codes
deaths_car_crashes$code=codes
deaths_car_crashes$hover <- with(deaths_car_crashes, paste(State, '<br>', "Population",Population, "<br>","Deaths", Deaths, "<br>",
"Year", Year,"<br>", "Deaths per 100,000 population", `Deaths per 100,000 population`))
road_users_deaths_column_names=c("State","Car_Occupant_Death_Number","Car_Occupant_Death_Percent","Pickup_and_SUV_Occupant_Death_Number","Pickup_and_SUV_Occupant_Death_Percent","Large_Truck_Occupant_Death_Number","Large_Truck_Occupant_Death_Percent","Motorcyclists_Occupant_Death_Number","Motorcyclists_Occupant_Death_Percent","Pedestrians_Occupant_Death_Number","Pedestrians_Occupant_Death_Percent","Bicyclists_Occupant_Death_Number","Bicyclists_Occupant_Death_Percent","Total_Occupant_Death_Number","Total_Occupant_Death_Percent","Year","Code")
colnames(road_users_deaths)=road_users_deaths_column_names
road_users_deaths$Year=year
colnames(deaths_car_crashes)=c("State","Population","Deaths","Deaths_per_100000_Population","Year","Code","Hover")
##---
save(ocdr,ocdr_gender,ocdr_orig,ocdr_tidy,state_code,road_users_deaths,deaths_car_crashes,seatbelt_with_deaths,line_graph,dui,US_total_DUI, file="data/deathrate.Rdata")
|
testlist <- list(hi = 1.2136247081529e+132, lo = 1.2136247081529e+132, mu = 1.2136247081529e+132, sig = 1.2136247081529e+132)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result) | /gjam/inst/testfiles/tnormRcpp/libFuzzer_tnormRcpp/tnormRcpp_valgrind_files/1610044977-test.R | no_license | akhikolla/updated-only-Issues | R | false | false | 187 | r | testlist <- list(hi = 1.2136247081529e+132, lo = 1.2136247081529e+132, mu = 1.2136247081529e+132, sig = 1.2136247081529e+132)
result <- do.call(gjam:::tnormRcpp,testlist)
str(result) |
##' Data Constructor
##'
##'
##' @param Y the output marix
##' @return an object of class datax
##' @author cayek
##' @export
ExpRdata <- function(...)
{
res <- list(...)
class(res) <- c("ExpRdata")
res
}
| /R/data.R | permissive | cayek/ExpRiment | R | false | false | 212 | r | ##' Data Constructor
##'
##'
##' @param Y the output marix
##' @return an object of class datax
##' @author cayek
##' @export
ExpRdata <- function(...)
{
res <- list(...)
class(res) <- c("ExpRdata")
res
}
|
context("landscape level lsm_l_split metric")
landscapemetrics_landscape_landscape_value <- lsm_l_split(landscape)
test_that("lsm_l_split is typestable", {
expect_is(lsm_l_split(landscape), "tbl_df")
expect_is(lsm_l_split(landscape_stack), "tbl_df")
expect_is(lsm_l_split(landscape_brick), "tbl_df")
expect_is(lsm_l_split(landscape_list), "tbl_df")
})
test_that("lsm_l_split returns the desired number of columns", {
expect_equal(ncol(landscapemetrics_landscape_landscape_value), 6)
})
test_that("lsm_l_split returns in every column the correct type", {
expect_type(landscapemetrics_landscape_landscape_value$layer, "integer")
expect_type(landscapemetrics_landscape_landscape_value$level, "character")
expect_type(landscapemetrics_landscape_landscape_value$class, "integer")
expect_type(landscapemetrics_landscape_landscape_value$id, "integer")
expect_type(landscapemetrics_landscape_landscape_value$metric, "character")
expect_type(landscapemetrics_landscape_landscape_value$value, "double")
})
| /tests/testthat/test-lsm-l-split.R | no_license | cran/landscapemetrics | R | false | false | 1,069 | r | context("landscape level lsm_l_split metric")
landscapemetrics_landscape_landscape_value <- lsm_l_split(landscape)
test_that("lsm_l_split is typestable", {
expect_is(lsm_l_split(landscape), "tbl_df")
expect_is(lsm_l_split(landscape_stack), "tbl_df")
expect_is(lsm_l_split(landscape_brick), "tbl_df")
expect_is(lsm_l_split(landscape_list), "tbl_df")
})
test_that("lsm_l_split returns the desired number of columns", {
expect_equal(ncol(landscapemetrics_landscape_landscape_value), 6)
})
test_that("lsm_l_split returns in every column the correct type", {
expect_type(landscapemetrics_landscape_landscape_value$layer, "integer")
expect_type(landscapemetrics_landscape_landscape_value$level, "character")
expect_type(landscapemetrics_landscape_landscape_value$class, "integer")
expect_type(landscapemetrics_landscape_landscape_value$id, "integer")
expect_type(landscapemetrics_landscape_landscape_value$metric, "character")
expect_type(landscapemetrics_landscape_landscape_value$value, "double")
})
|
require("data.table")
require("dplyr")
run_analysis <- function()
{
setwd("./UCI HAR Dataset/")
#Loads information from my 'variables' file where I define which rows to use
features <- read.table("../variables.txt", header=TRUE)
#Loads all of the data sets
print("Loading Data Sets...")
subjectTrain <- read.table("./train/subject_train.txt")
subjectTest <- read.table("./test/subject_test.txt")
yTrain <- read.table("./train/y_train.txt")
yTest <- read.table("./test/y_test.txt")
#subsetting right away to the variables with mead or std in the name
xTrain <- read.table("./train/X_train.txt")[, features$colNumber]
xTest <- read.table("./test/X_test.txt")[, features$colNumber]
print("Done.")
print("Merging, reshaping, and summarizing data")
#Merges the datasets which share columns
xMerged <- rbind(xTrain, xTest)
subjectMerged <- rbind(subjectTrain, subjectTest)
yMerged <- rbind(yTrain, yTest)
#Grabs the final row names as defined in my variables file
finalRowNames = as.character(features$finalName)
#converts the activity numbers into descriptive names
activityNames <- c("WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS", "SITTING", "STANDING", "LAYING")
yMerged[,] <- as.factor(activityNames[yMerged[,]])
#converts the subject numbers into descriptive names
subjectNames <- vector(mode = "character", length=30)
for (i in 1:30)
{
subjectNames[i] <- paste("SUBJECT", i, sep=" ")
}
subjectMerged[,] <- as.factor(subjectNames[subjectMerged[,]])
#Applies the variable names from the features file
setnames(xMerged, as.character(features$feature))
setnames(subjectMerged, "subject")
setnames(yMerged, "activity")
#Finally puts all the data together
mergedDataSet <- data.table(cbind(xMerged, subjectMerged, yMerged))
#Cleans up variables that are no longer used
rm(list=setdiff(ls(), c("mergedDataSet", "finalRowNames")))
#Now that we have the data entirely merged, we can build the desired tidy data set.
#First we group the data by subject and activity
bySubAct <- group_by(mergedDataSet, subject, activity)
#Now we get the mean of each of the 79 variables by subject and activity
tidySummary <- summarise_each(bySubAct, funs(mean))
#Finally we apply the appropriate variable names, completing the tidy dataset
setnames(tidySummary, c("subject", "activity", finalRowNames))
print("Done")
#Outputs the summary file to the parent directory(i.e. the directory with the run_analysis script)
print("Outputing summary file")
write.table(tidySummary, file="../tidySummary.txt", row.name = FALSE)
print("Done.")
} | /run_analysis.R | no_license | EvanOman/Coursera-GCD-Project | R | false | false | 2,795 | r | require("data.table")
require("dplyr")
run_analysis <- function()
{
setwd("./UCI HAR Dataset/")
#Loads information from my 'variables' file where I define which rows to use
features <- read.table("../variables.txt", header=TRUE)
#Loads all of the data sets
print("Loading Data Sets...")
subjectTrain <- read.table("./train/subject_train.txt")
subjectTest <- read.table("./test/subject_test.txt")
yTrain <- read.table("./train/y_train.txt")
yTest <- read.table("./test/y_test.txt")
#subsetting right away to the variables with mead or std in the name
xTrain <- read.table("./train/X_train.txt")[, features$colNumber]
xTest <- read.table("./test/X_test.txt")[, features$colNumber]
print("Done.")
print("Merging, reshaping, and summarizing data")
#Merges the datasets which share columns
xMerged <- rbind(xTrain, xTest)
subjectMerged <- rbind(subjectTrain, subjectTest)
yMerged <- rbind(yTrain, yTest)
#Grabs the final row names as defined in my variables file
finalRowNames = as.character(features$finalName)
#converts the activity numbers into descriptive names
activityNames <- c("WALKING", "WALKING_UPSTAIRS", "WALKING_DOWNSTAIRS", "SITTING", "STANDING", "LAYING")
yMerged[,] <- as.factor(activityNames[yMerged[,]])
#converts the subject numbers into descriptive names
subjectNames <- vector(mode = "character", length=30)
for (i in 1:30)
{
subjectNames[i] <- paste("SUBJECT", i, sep=" ")
}
subjectMerged[,] <- as.factor(subjectNames[subjectMerged[,]])
#Applies the variable names from the features file
setnames(xMerged, as.character(features$feature))
setnames(subjectMerged, "subject")
setnames(yMerged, "activity")
#Finally puts all the data together
mergedDataSet <- data.table(cbind(xMerged, subjectMerged, yMerged))
#Cleans up variables that are no longer used
rm(list=setdiff(ls(), c("mergedDataSet", "finalRowNames")))
#Now that we have the data entirely merged, we can build the desired tidy data set.
#First we group the data by subject and activity
bySubAct <- group_by(mergedDataSet, subject, activity)
#Now we get the mean of each of the 79 variables by subject and activity
tidySummary <- summarise_each(bySubAct, funs(mean))
#Finally we apply the appropriate variable names, completing the tidy dataset
setnames(tidySummary, c("subject", "activity", finalRowNames))
print("Done")
#Outputs the summary file to the parent directory(i.e. the directory with the run_analysis script)
print("Outputing summary file")
write.table(tidySummary, file="../tidySummary.txt", row.name = FALSE)
print("Done.")
} |
library(logconcens)
### Name: lc.control
### Title: Set the control parameters for logcon.
### Aliases: lc.control control
### ** Examples
## See the examples for logcon
| /data/genthat_extracted_code/logconcens/examples/lc.control.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 177 | r | library(logconcens)
### Name: lc.control
### Title: Set the control parameters for logcon.
### Aliases: lc.control control
### ** Examples
## See the examples for logcon
|
#'
#'@title Server for the shiny Cohort Progression app
#'
#'@description Server for the shiny Cohort Progression app.
#'
#' @param input - the usual shiny input variable
#' @param output - the usual shiny output variable
#' @param session - the usual shiny session variable
#'
#'@return the html UI for the app
#'
#'@details Creates the UI for the app.
#'
#'@import shiny
#'
#shinyServer(
shinyServer<-function(input, output, session) {
#model configuration info
configInfo <- callModule(configServer,"config",session=session);#return reactive variable
#natural mortality rates
nmResults <- callModule(natmortServer,"natmort",configInfo=configInfo,session=session);
#molt probability
mltResults <- callModule(moltServer,"molt",configInfo=configInfo,session=session);
#growth
grwResults<-callModule(growthServer,"growth",configInfo=configInfo,session=session);
#probability of molt-to-maurity
m2mResults<-callModule(m2mServer,"m2m",configInfo=configInfo,session=session);
#reccruitment size distribution
recResults<-callModule(recServer,"rec",
configInfo=configInfo,
session=session);
#cohort progression
cpResults<-callModule(cohortServer,"cohort",
configInfo=configInfo,
nmResults =nmResults,
mltResults=mltResults,
grwResults=grwResults,
m2mResults=m2mResults,
recResults=recResults,
session=session);
#equilibrium size distribution
eqzResults<-callModule(eqzServer,"eqz",
configInfo=configInfo,
nmResults =nmResults,
mltResults=mltResults,
grwResults=grwResults,
m2mResults=m2mResults,
recResults=recResults,
cpResults=cpResults,
session=session);
} #function(input,output,session)
#) #shinyServer
| /R/shinyServer.R | permissive | wStockhausen/shinyTC.CohortProgression | R | false | false | 2,121 | r | #'
#'@title Server for the shiny Cohort Progression app
#'
#'@description Server for the shiny Cohort Progression app.
#'
#' @param input - the usual shiny input variable
#' @param output - the usual shiny output variable
#' @param session - the usual shiny session variable
#'
#'@return the html UI for the app
#'
#'@details Creates the UI for the app.
#'
#'@import shiny
#'
#shinyServer(
shinyServer<-function(input, output, session) {
#model configuration info
configInfo <- callModule(configServer,"config",session=session);#return reactive variable
#natural mortality rates
nmResults <- callModule(natmortServer,"natmort",configInfo=configInfo,session=session);
#molt probability
mltResults <- callModule(moltServer,"molt",configInfo=configInfo,session=session);
#growth
grwResults<-callModule(growthServer,"growth",configInfo=configInfo,session=session);
#probability of molt-to-maurity
m2mResults<-callModule(m2mServer,"m2m",configInfo=configInfo,session=session);
#reccruitment size distribution
recResults<-callModule(recServer,"rec",
configInfo=configInfo,
session=session);
#cohort progression
cpResults<-callModule(cohortServer,"cohort",
configInfo=configInfo,
nmResults =nmResults,
mltResults=mltResults,
grwResults=grwResults,
m2mResults=m2mResults,
recResults=recResults,
session=session);
#equilibrium size distribution
eqzResults<-callModule(eqzServer,"eqz",
configInfo=configInfo,
nmResults =nmResults,
mltResults=mltResults,
grwResults=grwResults,
m2mResults=m2mResults,
recResults=recResults,
cpResults=cpResults,
session=session);
} #function(input,output,session)
#) #shinyServer
|
gcd.coef<-function(mat, indices, pcindices = NULL)
{
#
# calcula o GCD entre um subconjunto ("indices") de variaveis
# e um subconjunto ("pcindices") das CPs de todas as variaveis,
# cuja matriz de covariancias e "mat".
# error checking
if (sum(!(as.integer(indices) == indices)) > 0) stop("\n The variable indices must be integers")
if (!is.null(pcindices) & (sum(!(as.integer(pcindices) == pcindices)) > 0)) stop("\n The PC indices must be integers")
if (!is.matrix(mat)) {
stop("Data is missing or is not given in matrix form")}
if (dim(mat)[1] != dim(mat)[2]) {
mat<-cor(mat)
warning("Data must be given as a covariance or correlation matrix. \n It has been assumed that you wanted the correlation matrix of the \n data matrix which was supplied.")
}
# body of function
# initializations
if (!is.null(pcindices)) {
if (!is.vector(pcindices)) stop("If Principal Components are user-specified, only one set of PCs is allowed for each function call")
}
dvsmat <- svd(mat)
tr<-function(mat){sum(diag(mat))}
gcd.1d<-function(mat,indices, pcindices){
if (is.null(pcindices)) {pcindices <- 1:sum(!indices==0)}
indices<-indices[!indices == 0]
svdapprox <- function(mat, indices) {
t(dvsmat$v[, indices] %*% (t(dvsmat$u[, indices]) * dvsmat$d[indices]))
}
sum(diag(solve(mat[indices, indices]) %*% svdapprox(mat,
pcindices)[indices, indices]))/sqrt(length(indices) *
length(pcindices))
}
dimension<-length(dim(indices))
# output for each dimension of input array
if (dimension > 1){
gcd.2d<-function(mat,subsets,pcindices){
apply(subsets,1,function(indices){gcd.1d(mat,indices,pcindices)})
}
if (dimension > 2) {
gcd.3d<-function(mat,array3d,pcindices){
apply(array3d,3,function(subsets){gcd.2d(mat,subsets,pcindices)})
}
output<-gcd.3d(mat,indices,pcindices)
}
if (dimension == 2) {output<-gcd.2d(mat,indices,pcindices)}
}
if (dimension < 2) {output<-gcd.1d(mat,indices,pcindices)}
output
}
| /R/gcd.R | no_license | cran/subselect | R | false | false | 2,323 | r | gcd.coef<-function(mat, indices, pcindices = NULL)
{
#
# calcula o GCD entre um subconjunto ("indices") de variaveis
# e um subconjunto ("pcindices") das CPs de todas as variaveis,
# cuja matriz de covariancias e "mat".
# error checking
if (sum(!(as.integer(indices) == indices)) > 0) stop("\n The variable indices must be integers")
if (!is.null(pcindices) & (sum(!(as.integer(pcindices) == pcindices)) > 0)) stop("\n The PC indices must be integers")
if (!is.matrix(mat)) {
stop("Data is missing or is not given in matrix form")}
if (dim(mat)[1] != dim(mat)[2]) {
mat<-cor(mat)
warning("Data must be given as a covariance or correlation matrix. \n It has been assumed that you wanted the correlation matrix of the \n data matrix which was supplied.")
}
# body of function
# initializations
if (!is.null(pcindices)) {
if (!is.vector(pcindices)) stop("If Principal Components are user-specified, only one set of PCs is allowed for each function call")
}
dvsmat <- svd(mat)
tr<-function(mat){sum(diag(mat))}
gcd.1d<-function(mat,indices, pcindices){
if (is.null(pcindices)) {pcindices <- 1:sum(!indices==0)}
indices<-indices[!indices == 0]
svdapprox <- function(mat, indices) {
t(dvsmat$v[, indices] %*% (t(dvsmat$u[, indices]) * dvsmat$d[indices]))
}
sum(diag(solve(mat[indices, indices]) %*% svdapprox(mat,
pcindices)[indices, indices]))/sqrt(length(indices) *
length(pcindices))
}
dimension<-length(dim(indices))
# output for each dimension of input array
if (dimension > 1){
gcd.2d<-function(mat,subsets,pcindices){
apply(subsets,1,function(indices){gcd.1d(mat,indices,pcindices)})
}
if (dimension > 2) {
gcd.3d<-function(mat,array3d,pcindices){
apply(array3d,3,function(subsets){gcd.2d(mat,subsets,pcindices)})
}
output<-gcd.3d(mat,indices,pcindices)
}
if (dimension == 2) {output<-gcd.2d(mat,indices,pcindices)}
}
if (dimension < 2) {output<-gcd.1d(mat,indices,pcindices)}
output
}
|
# shp file downloaded from:
# https://earthworks.stanford.edu/catalog/stanford-gs418bw0551
# I downloaded the shapefile under the "Generated" heading...
# because that data has latitude and longitude in degrees, rather than
# WGS84 (I think) in the not-generated shapefile data.
# Note on the parcel number format (apnnodash):
# Each Parcel is identified by an Assessor's Parcel Number (APN), which
# is an 8 digit number separated by dashes (e.g. 049-103-12). The first
# 3 digits represent the Assessor's mapbook containing the Parcel (Book
# 049 in the above example). The next 2 digits represent the page number
# within that mapbook (Page 10 in the example). The next digit represents
# the block number on that page (Block 3 in the example). The last 2
# digits represent the Assessor's Parcel Number on that block (12 in the
# example)
library(rvest)
get_plot_data <- function(area = "006") {
if (file.exists("data/shp.rda")) {
load("data/shp.rda")
} else {
shpfile <- "gs418bw0551/gs418bw0551.shp"
shp <- readShapeSpatial(shpfile)
save(shp, file="data/shp.rda")
}
plot_file <- paste0("data/final.plot.", area, ".rda")
if (file.exists(plot_file)) {
print(paste0("Using cached plot data from ", plot_file))
load(plot_file)
} else {
area_regexp <- paste0("^", area)
sm <- shp[grepl(area_regexp, lapply(shp$apnnodash, as.character)),]
# Make a df for parcel data
len <- length(sm)
parcel_data <- data.frame(
apnnodash = sm$apnnodash,
id = sm$objectid,
tax = numeric(len),
exemption = numeric(len),
assessment = numeric(len),
addr = character(len),
type = character(len),
year_built = integer(len),
effective_year = integer(len),
num_units = integer(len),
num_rooms = integer(len),
bedrooms = integer(len),
bathrooms = integer(len),
roof = character(len),
heat = character(len),
fireplaces = integer(len),
pools = integer(len)
)
# hack - a few fields shouldn't be factor
parcel_data$addr <- as.character(parcel_data$addr)
parcel_data$type <- as.character(parcel_data$type)
parcel_data$roof <- as.character(parcel_data$roof)
parcel_data$heat <- as.character(parcel_data$heat)
active_record <- logical(len)
inactive_apns <- character(len)
for (i in 1:len) {
apn <- parcel_data[i,]$apnnodash
print(paste("scraping data", "(", i, "of", len, ") for parcel", apn))
char_html <- get_apn_characteristics_html(apn)
char_data <- get_apn_characteristics_data(apn, char_html)
if (is.null(char_data)) {
# Inactive parcel, or other form of "no data"
active_record[i] <- FALSE
inactive_apns[i] <- as.character(apn)
next
}
active_record[i] <- TRUE
print(paste("parsing data", "(", i, "of", len, ") for parcel", apn))
tax_html <- get_apn_tax_html(apn)
apn_data <- get_apn_tax_data(apn, tax_html)
parcel_data[i,]$tax <- apn_data$tax
parcel_data[i,]$addr <- apn_data$addr
parcel_data[i,]$type <- apn_data$type
parcel_data[i,]$exemption <- apn_data$exemption
parcel_data[i,]$assessment <- apn_data$assessment
parcel_data[i,]$year_built = char_data$year_built
parcel_data[i,]$effective_year = char_data$effective_year
parcel_data[i,]$num_units = char_data$num_units
parcel_data[i,]$num_rooms = char_data$num_rooms
parcel_data[i,]$bedrooms = char_data$bedrooms
parcel_data[i,]$bathrooms = char_data$bathrooms
parcel_data[i,]$roof = char_data$roof
parcel_data[i,]$heat = char_data$heat
parcel_data[i,]$fireplaces = char_data$fireplaces
parcel_data[i,]$pools = char_data$pools
}
inactive_apns <- inactive_apns[inactive_apns != ""]
if (length(inactive_apns > 0)) {
print("Omitting these inactive apns:")
print(inactive_apns[inactive_apns != ""])
# Trim the inactives
parcel_data <- parcel_data[active_record,]
sm <- sm[active_record,]
}
# some derived parcel data
parcel_data$homeowner <- parcel_data$exemption == 7000
# test that sm and parcel_data have the same APNNODASH
all(sm$objectid == parcel_data$id)
library(ggplot2)
library(rgdal)
library(rgeos)
sm.f <- fortify(sm, region="objectid")
class(sm.f)
merge.shp.coef<-merge(sm.f, parcel_data, by="id", all.x=TRUE)
final.plot <- merge.shp.coef[order(merge.shp.coef$order), ]
save(final.plot, file=plot_file)
}
return(final.plot)
}
get_url <- function(page, apn) {
prefix <- paste0("http://sccounty01.co.santa-cruz.ca.us/ASR/", page, "/linkHREF")
# The website suggests this "outside" value...
outside <- "outSide=true"
apnquery <- paste("txtAPN", sep = "=", apn)
query <- paste(apnquery, outside, sep = "&")
url <- paste(prefix, query, sep="?")
return(url)
}
get_apn_characteristics_url <- function(apn) {
return(get_url("Characteristics", apn))
}
get_apn_tax_url <- function(apn) {
return(get_url("ParcelList", apn))
}
get_apn_characteristics_html <- function(apn) {
url <- get_apn_characteristics_url(apn)
html <- read_html(url)
return(html)
}
get_apn_tax_html <- function(apn) {
url <- get_apn_tax_url(apn)
html <- read_html(url)
return(html)
}
# To do: what did apn 00654127 look like previously? It's "inactive"
get_apn_characteristics_data_raw <- function(apn, char_html=NULL) {
char_url <- get_apn_characteristics_url(apn)
if (is.null(char_html)) {
char_html <- read_html(char_url)
}
all_nodes <- html_nodes(char_html, ".tablePrintOnly .trPrintOnly")
data <- sapply(all_nodes, function(n) strsplit(html_text(n), "\\r\\n\\s*", perl=T))
# expect a list of lists, like this:
# [[1]]
# [1] "Parcel Info"
#
# [[2]]
# [1] "APN" "Situs Address" "Class"
#
# [[3]]
# [1] "" "00649409"
# [3] "105 WEEKS AVE, SANTA CRUZ , 95060-4247 " "020-SINGLE RESIDENCE"
#
# [[4]]
# [1] "Parcel #" "00649409"
#
# [[5]]
# [1] "View" "NO VIEW"
#
# [[6]]
# [1] "Topography" "LEVEL"
# ...
tidy <- list()
if (data[[1]] != "Parcel Info") {
print(paste0("Failed to find 'Parcel Info' in 1st element for apn ", apn))
return(tidy)
}
if (!all(data[[2]] == list("APN", "Situs Address", "Class"))) {
print(paste0("Didn't find expected keys in 2nd element"))
print(data[[2]])
return(tidy)
}
if (data[[3]][2] != apn) {
print(paste0("Didn't find apn as data[[3]][2] (found:", data[[3]][2], ")"))
return(tidy)
}
if (grepl("(Inactive)", data[[3]][3])) {
print(paste0("apn ", apn, " is (Inactive); omitting it."))
return(list())
}
tidy[["Situs Address"]] <- data[[3]][3]
tidy[["Class"]] <- data[[3]][4]
# list elements 4 and up are more regular
if (length(data) >= 4) {
for (i in 4:length(data)) {
item <- data[[i]]
if (length(item) == 2)
tidy[[item[1]]] <- item[2]
}
}
tidy[['url']] <- char_url
return(tidy)
}
get_int_or_NA <- function(val) {
if (is.null(val))
return(NA)
if (val == "None")
return(NA)
if (val == "N/A")
return(NA)
return(as.integer(val))
}
get_apn_characteristics_data <- function(apn, html=NULL) {
raw_data <- get_apn_characteristics_data_raw(apn, html)
if (length(raw_data) == 0) {
return(NULL)
}
year_built <- get_int_or_NA(raw_data[["Year Built"]])
effective_year <- get_int_or_NA(raw_data[["Effective Year"]])
num_units <- get_int_or_NA(raw_data[["# of Units"]])
num_rooms <- get_int_or_NA(raw_data[["Room Count"]])
bedrooms <- get_int_or_NA(raw_data[["Bedrooms"]])
bathrooms <- raw_data[["Bathrooms (F/H)"]]
roof <- raw_data[["Roof"]]
heat <- raw_data[["Heat"]]
fireplaces <- get_int_or_NA(raw_data[["Fireplaces"]])
pools <- raw_data[["Pool"]]
return(
list(
year_built=year_built,
effective_year=effective_year,
num_units=num_units,
num_rooms=num_rooms,
bedrooms=bedrooms,
bathrooms=bathrooms,
roof=roof,
heat=heat,
fireplaces=fireplaces,
pools=pools
)
)
}
# Usually, send in just the apn; pre-compute and provide the tax_html to avoid
# hitting the web site repeatedly during debugging.
# The apn must be always be provided: it's used to verify the returned data.
get_apn_tax_data_raw <- function(apn, tax_html=NULL) {
tax_url <- get_apn_tax_url(apn)
if (is.null(tax_html)) {
tax_html <- read_html(tax_url)
}
all_nodes <- html_nodes(tax_html, ".tablePrintOnly .trPrintOnly")
data <- sapply(all_nodes, function(n) strsplit(html_text(n), "\\r\\n\\s*", perl=T))
# I expect to have a list of lists, like
# [[1]]
# [1] "Parcel Info"
#
# [[2]]
# [1] "APN" "Situs Address" "Class"
#
# [[3]]
# [1] ""
# [2] "00649103"
# [3] "127 OTIS ST, SANTA CRUZ , 95060-4245 "
# [4] "711-OTHER CHURCH PROPERTY"
#
# [[4]]
# [1] "Assessed Value"
#
# [[5]]
# [1] "Year" "2015/2016"
# ...
# Unfortunately there are two years' worth of data munged in here. I'm going to assume
# that the most recent year appears last.
tidy <- list()
if (data[[1]] != "Parcel Info") {
print(paste0("Failed to find 'Parcel Info' in 1st element for apn ", apn))
return(tidy)
}
if (!all(data[[2]] == list("APN", "Situs Address", "Class"))) {
print(paste0("Didn't find expected keys in 2nd element"))
print(data[[2]])
return(tidy)
}
if (data[[3]][2] != apn) {
print(paste0("Didn't find apn as data[[3]][2] (found:", data[[3]][2], ")"))
return(tidy)
}
tidy[["Situs Address"]] <- data[[3]][3]
tidy[["Class"]] <- data[[3]][4]
# After list element 4, things get more regular.
if (data[[4]] == "Assessed Value") {
for (i in 5:length(data)) {
item <- data[[i]]
if (length(item) == 2)
tidy[[item[1]]] <- item[2]
}
}
tidy[['url']] <- tax_url
return(tidy)
}
get_apn_tax_data <- function(apn, html=NULL) {
raw_data <- get_apn_tax_data_raw(apn, html)
tax <- getDollarsToNumeric(raw_data, "Total")
addr <- trim_address(trim_info(raw_data[["Situs Address"]]))
type <- raw_data[["Class"]]
exemption <- getDollarsToNumeric(raw_data, "Homeowners Exemption")
assessment <- getDollarsToNumeric(raw_data, "Net Assessment")
return(list(tax=tax, addr=addr, type=type, exemption=exemption, assessment=assessment))
}
dollarsToNumeric <- function(x) {
# Drop '$', ','
p1 <- sub('$', '', x, fixed=TRUE)
p2 <- gsub(',', '', p1, fixed=TRUE)
return(as.numeric(p2))
}
getDollarsToNumeric <- function(data, title) {
total <- data[[title]]
if (is.null(total)) {
total <- 0
} else {
total <- dollarsToNumeric(total)
}
return(total)
}
trim_info <- function (x) {
# remove leading, trailing whitespace
x <- gsub("^\\s+|\\s+$", "", x)
# remove space before comma
x <- gsub("\\s+,", ",", x)
# collapse any remaining sequences of space to single space
x <- gsub("\\s+", " ", x)
return(x)
}
trim_address <- function(addr) {
addr <- gsub(", SANTA CRUZ,.*$", "", addr)
return(addr)
}
| /utils.R | no_license | aaronferrucci/proptaxchoropleth | R | false | false | 11,574 | r | # shp file downloaded from:
# https://earthworks.stanford.edu/catalog/stanford-gs418bw0551
# I downloaded the shapefile under the "Generated" heading...
# because that data has latitude and longitude in degrees, rather than
# WGS84 (I think) in the not-generated shapefile data.
# Note on the parcel number format (apnnodash):
# Each Parcel is identified by an Assessor's Parcel Number (APN), which
# is an 8 digit number separated by dashes (e.g. 049-103-12). The first
# 3 digits represent the Assessor's mapbook containing the Parcel (Book
# 049 in the above example). The next 2 digits represent the page number
# within that mapbook (Page 10 in the example). The next digit represents
# the block number on that page (Block 3 in the example). The last 2
# digits represent the Assessor's Parcel Number on that block (12 in the
# example)
library(rvest)
get_plot_data <- function(area = "006") {
if (file.exists("data/shp.rda")) {
load("data/shp.rda")
} else {
shpfile <- "gs418bw0551/gs418bw0551.shp"
shp <- readShapeSpatial(shpfile)
save(shp, file="data/shp.rda")
}
plot_file <- paste0("data/final.plot.", area, ".rda")
if (file.exists(plot_file)) {
print(paste0("Using cached plot data from ", plot_file))
load(plot_file)
} else {
area_regexp <- paste0("^", area)
sm <- shp[grepl(area_regexp, lapply(shp$apnnodash, as.character)),]
# Make a df for parcel data
len <- length(sm)
parcel_data <- data.frame(
apnnodash = sm$apnnodash,
id = sm$objectid,
tax = numeric(len),
exemption = numeric(len),
assessment = numeric(len),
addr = character(len),
type = character(len),
year_built = integer(len),
effective_year = integer(len),
num_units = integer(len),
num_rooms = integer(len),
bedrooms = integer(len),
bathrooms = integer(len),
roof = character(len),
heat = character(len),
fireplaces = integer(len),
pools = integer(len)
)
# hack - a few fields shouldn't be factor
parcel_data$addr <- as.character(parcel_data$addr)
parcel_data$type <- as.character(parcel_data$type)
parcel_data$roof <- as.character(parcel_data$roof)
parcel_data$heat <- as.character(parcel_data$heat)
active_record <- logical(len)
inactive_apns <- character(len)
for (i in 1:len) {
apn <- parcel_data[i,]$apnnodash
print(paste("scraping data", "(", i, "of", len, ") for parcel", apn))
char_html <- get_apn_characteristics_html(apn)
char_data <- get_apn_characteristics_data(apn, char_html)
if (is.null(char_data)) {
# Inactive parcel, or other form of "no data"
active_record[i] <- FALSE
inactive_apns[i] <- as.character(apn)
next
}
active_record[i] <- TRUE
print(paste("parsing data", "(", i, "of", len, ") for parcel", apn))
tax_html <- get_apn_tax_html(apn)
apn_data <- get_apn_tax_data(apn, tax_html)
parcel_data[i,]$tax <- apn_data$tax
parcel_data[i,]$addr <- apn_data$addr
parcel_data[i,]$type <- apn_data$type
parcel_data[i,]$exemption <- apn_data$exemption
parcel_data[i,]$assessment <- apn_data$assessment
parcel_data[i,]$year_built = char_data$year_built
parcel_data[i,]$effective_year = char_data$effective_year
parcel_data[i,]$num_units = char_data$num_units
parcel_data[i,]$num_rooms = char_data$num_rooms
parcel_data[i,]$bedrooms = char_data$bedrooms
parcel_data[i,]$bathrooms = char_data$bathrooms
parcel_data[i,]$roof = char_data$roof
parcel_data[i,]$heat = char_data$heat
parcel_data[i,]$fireplaces = char_data$fireplaces
parcel_data[i,]$pools = char_data$pools
}
inactive_apns <- inactive_apns[inactive_apns != ""]
if (length(inactive_apns > 0)) {
print("Omitting these inactive apns:")
print(inactive_apns[inactive_apns != ""])
# Trim the inactives
parcel_data <- parcel_data[active_record,]
sm <- sm[active_record,]
}
# some derived parcel data
parcel_data$homeowner <- parcel_data$exemption == 7000
# test that sm and parcel_data have the same APNNODASH
all(sm$objectid == parcel_data$id)
library(ggplot2)
library(rgdal)
library(rgeos)
sm.f <- fortify(sm, region="objectid")
class(sm.f)
merge.shp.coef<-merge(sm.f, parcel_data, by="id", all.x=TRUE)
final.plot <- merge.shp.coef[order(merge.shp.coef$order), ]
save(final.plot, file=plot_file)
}
return(final.plot)
}
get_url <- function(page, apn) {
prefix <- paste0("http://sccounty01.co.santa-cruz.ca.us/ASR/", page, "/linkHREF")
# The website suggests this "outside" value...
outside <- "outSide=true"
apnquery <- paste("txtAPN", sep = "=", apn)
query <- paste(apnquery, outside, sep = "&")
url <- paste(prefix, query, sep="?")
return(url)
}
get_apn_characteristics_url <- function(apn) {
return(get_url("Characteristics", apn))
}
get_apn_tax_url <- function(apn) {
return(get_url("ParcelList", apn))
}
get_apn_characteristics_html <- function(apn) {
url <- get_apn_characteristics_url(apn)
html <- read_html(url)
return(html)
}
get_apn_tax_html <- function(apn) {
url <- get_apn_tax_url(apn)
html <- read_html(url)
return(html)
}
# To do: what did apn 00654127 look like previously? It's "inactive"
get_apn_characteristics_data_raw <- function(apn, char_html=NULL) {
char_url <- get_apn_characteristics_url(apn)
if (is.null(char_html)) {
char_html <- read_html(char_url)
}
all_nodes <- html_nodes(char_html, ".tablePrintOnly .trPrintOnly")
data <- sapply(all_nodes, function(n) strsplit(html_text(n), "\\r\\n\\s*", perl=T))
# expect a list of lists, like this:
# [[1]]
# [1] "Parcel Info"
#
# [[2]]
# [1] "APN" "Situs Address" "Class"
#
# [[3]]
# [1] "" "00649409"
# [3] "105 WEEKS AVE, SANTA CRUZ , 95060-4247 " "020-SINGLE RESIDENCE"
#
# [[4]]
# [1] "Parcel #" "00649409"
#
# [[5]]
# [1] "View" "NO VIEW"
#
# [[6]]
# [1] "Topography" "LEVEL"
# ...
tidy <- list()
if (data[[1]] != "Parcel Info") {
print(paste0("Failed to find 'Parcel Info' in 1st element for apn ", apn))
return(tidy)
}
if (!all(data[[2]] == list("APN", "Situs Address", "Class"))) {
print(paste0("Didn't find expected keys in 2nd element"))
print(data[[2]])
return(tidy)
}
if (data[[3]][2] != apn) {
print(paste0("Didn't find apn as data[[3]][2] (found:", data[[3]][2], ")"))
return(tidy)
}
if (grepl("(Inactive)", data[[3]][3])) {
print(paste0("apn ", apn, " is (Inactive); omitting it."))
return(list())
}
tidy[["Situs Address"]] <- data[[3]][3]
tidy[["Class"]] <- data[[3]][4]
# list elements 4 and up are more regular
if (length(data) >= 4) {
for (i in 4:length(data)) {
item <- data[[i]]
if (length(item) == 2)
tidy[[item[1]]] <- item[2]
}
}
tidy[['url']] <- char_url
return(tidy)
}
get_int_or_NA <- function(val) {
if (is.null(val))
return(NA)
if (val == "None")
return(NA)
if (val == "N/A")
return(NA)
return(as.integer(val))
}
get_apn_characteristics_data <- function(apn, html=NULL) {
raw_data <- get_apn_characteristics_data_raw(apn, html)
if (length(raw_data) == 0) {
return(NULL)
}
year_built <- get_int_or_NA(raw_data[["Year Built"]])
effective_year <- get_int_or_NA(raw_data[["Effective Year"]])
num_units <- get_int_or_NA(raw_data[["# of Units"]])
num_rooms <- get_int_or_NA(raw_data[["Room Count"]])
bedrooms <- get_int_or_NA(raw_data[["Bedrooms"]])
bathrooms <- raw_data[["Bathrooms (F/H)"]]
roof <- raw_data[["Roof"]]
heat <- raw_data[["Heat"]]
fireplaces <- get_int_or_NA(raw_data[["Fireplaces"]])
pools <- raw_data[["Pool"]]
return(
list(
year_built=year_built,
effective_year=effective_year,
num_units=num_units,
num_rooms=num_rooms,
bedrooms=bedrooms,
bathrooms=bathrooms,
roof=roof,
heat=heat,
fireplaces=fireplaces,
pools=pools
)
)
}
# Usually, send in just the apn; pre-compute and provide the tax_html to avoid
# hitting the web site repeatedly during debugging.
# The apn must be always be provided: it's used to verify the returned data.
get_apn_tax_data_raw <- function(apn, tax_html=NULL) {
tax_url <- get_apn_tax_url(apn)
if (is.null(tax_html)) {
tax_html <- read_html(tax_url)
}
all_nodes <- html_nodes(tax_html, ".tablePrintOnly .trPrintOnly")
data <- sapply(all_nodes, function(n) strsplit(html_text(n), "\\r\\n\\s*", perl=T))
# I expect to have a list of lists, like
# [[1]]
# [1] "Parcel Info"
#
# [[2]]
# [1] "APN" "Situs Address" "Class"
#
# [[3]]
# [1] ""
# [2] "00649103"
# [3] "127 OTIS ST, SANTA CRUZ , 95060-4245 "
# [4] "711-OTHER CHURCH PROPERTY"
#
# [[4]]
# [1] "Assessed Value"
#
# [[5]]
# [1] "Year" "2015/2016"
# ...
# Unfortunately there are two years' worth of data munged in here. I'm going to assume
# that the most recent year appears last.
tidy <- list()
if (data[[1]] != "Parcel Info") {
print(paste0("Failed to find 'Parcel Info' in 1st element for apn ", apn))
return(tidy)
}
if (!all(data[[2]] == list("APN", "Situs Address", "Class"))) {
print(paste0("Didn't find expected keys in 2nd element"))
print(data[[2]])
return(tidy)
}
if (data[[3]][2] != apn) {
print(paste0("Didn't find apn as data[[3]][2] (found:", data[[3]][2], ")"))
return(tidy)
}
tidy[["Situs Address"]] <- data[[3]][3]
tidy[["Class"]] <- data[[3]][4]
# After list element 4, things get more regular.
if (data[[4]] == "Assessed Value") {
for (i in 5:length(data)) {
item <- data[[i]]
if (length(item) == 2)
tidy[[item[1]]] <- item[2]
}
}
tidy[['url']] <- tax_url
return(tidy)
}
get_apn_tax_data <- function(apn, html=NULL) {
raw_data <- get_apn_tax_data_raw(apn, html)
tax <- getDollarsToNumeric(raw_data, "Total")
addr <- trim_address(trim_info(raw_data[["Situs Address"]]))
type <- raw_data[["Class"]]
exemption <- getDollarsToNumeric(raw_data, "Homeowners Exemption")
assessment <- getDollarsToNumeric(raw_data, "Net Assessment")
return(list(tax=tax, addr=addr, type=type, exemption=exemption, assessment=assessment))
}
dollarsToNumeric <- function(x) {
# Drop '$', ','
p1 <- sub('$', '', x, fixed=TRUE)
p2 <- gsub(',', '', p1, fixed=TRUE)
return(as.numeric(p2))
}
getDollarsToNumeric <- function(data, title) {
total <- data[[title]]
if (is.null(total)) {
total <- 0
} else {
total <- dollarsToNumeric(total)
}
return(total)
}
trim_info <- function (x) {
# remove leading, trailing whitespace
x <- gsub("^\\s+|\\s+$", "", x)
# remove space before comma
x <- gsub("\\s+,", ",", x)
# collapse any remaining sequences of space to single space
x <- gsub("\\s+", " ", x)
return(x)
}
trim_address <- function(addr) {
addr <- gsub(", SANTA CRUZ,.*$", "", addr)
return(addr)
}
|
# Exploratory Data Analysis Project John Hopkins Coursera
# Author: Mehdi BENYAHIA
# Packages and Data
install.packages("data.table")
install.packages("dplyr")
library(data.table)
library(dplyr)
path <- getwd()
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url, file.path(path, "dataFiles.zip"))
unzip(zipfile = "dataFiles.zip")
#Load Data
powerdata <- read.table("./household_power_consumption.txt", header = TRUE, sep = ";")
powerdata$Date <- as.Date(powerdata$Date, "%d/%m/%Y")
powerdata <- filter(powerdata,(Date >= "2007-02-01") & (Date <= "2007-02-02"))
powerdata <- mutate(powerdata, DateTime = paste (Date, Time))
powerdata <- select(powerdata, DateTime, Global_active_power:Sub_metering_3)
powerdata$DateTime <- as.POSIXct(powerdata$DateTime)
# Plot 2
powerdata$Global_active_power <- as.numeric(as.character(powerdata$Global_active_power))
png("plot2.png", width=480, height=480)
plot(powerdata$DateTime , powerdata$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off() | /Data Science Specialization/Exploratory Data Analysis/Project 1/Plot2.R | no_license | m-benyahia/Learning-Projects | R | false | false | 1,081 | r | # Exploratory Data Analysis Project John Hopkins Coursera
# Author: Mehdi BENYAHIA
# Packages and Data
install.packages("data.table")
install.packages("dplyr")
library(data.table)
library(dplyr)
path <- getwd()
url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(url, file.path(path, "dataFiles.zip"))
unzip(zipfile = "dataFiles.zip")
#Load Data
powerdata <- read.table("./household_power_consumption.txt", header = TRUE, sep = ";")
powerdata$Date <- as.Date(powerdata$Date, "%d/%m/%Y")
powerdata <- filter(powerdata,(Date >= "2007-02-01") & (Date <= "2007-02-02"))
powerdata <- mutate(powerdata, DateTime = paste (Date, Time))
powerdata <- select(powerdata, DateTime, Global_active_power:Sub_metering_3)
powerdata$DateTime <- as.POSIXct(powerdata$DateTime)
# Plot 2
powerdata$Global_active_power <- as.numeric(as.character(powerdata$Global_active_power))
png("plot2.png", width=480, height=480)
plot(powerdata$DateTime , powerdata$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.off() |
#Dot plot & Box plot
dotchart(WC_AT$AT, main = "Dot Plot of AT data",color = "RED")
dotchart(WC_AT$Waist, main = "Dot Plot of Waist data",color = "RED")
boxplot(WC_AT$AT,col = "RED")
boxplot(WC_AT$Waist,col = "RED")
#Scatter plot
plot(WC_AT$Waist,WC_AT$AT,main = "Scatter plot",col = "RED",col.main = "RED",col.lab = "RED",
xlab = "Waist",ylab = "AT")
#Regression model & its summary
mymodel<-lm(AT~Waist,data = WC_AT)
summary(mymodel)
#Prediction Value
predict(mymodel,data.frame(Waist=60))
pred<-predict(mymodel)
pred
#Error
finaldata<-data.frame(WC_AT,pred,"Error" = WC_AT$AT-pred)
| /Class_Exercise.R | no_license | Rohan-hub/DataScience | R | false | false | 595 | r |
#Dot plot & Box plot
dotchart(WC_AT$AT, main = "Dot Plot of AT data",color = "RED")
dotchart(WC_AT$Waist, main = "Dot Plot of Waist data",color = "RED")
boxplot(WC_AT$AT,col = "RED")
boxplot(WC_AT$Waist,col = "RED")
#Scatter plot
plot(WC_AT$Waist,WC_AT$AT,main = "Scatter plot",col = "RED",col.main = "RED",col.lab = "RED",
xlab = "Waist",ylab = "AT")
#Regression model & its summary
mymodel<-lm(AT~Waist,data = WC_AT)
summary(mymodel)
#Prediction Value
predict(mymodel,data.frame(Waist=60))
pred<-predict(mymodel)
pred
#Error
finaldata<-data.frame(WC_AT,pred,"Error" = WC_AT$AT-pred)
|
## TF motif enrichment
rm(list=ls())
library(pheatmap)
library(plyr)
library(dplyr)
library(tidyverse)
library(stringr)
library(Rtsne)
library(ggplot)
## define fpath
#fpath = '/home/bsharmi6/NA_TF_project/scMethylome/ETRMs/Enrichment_motifs_in_neurons/all_motif_locations/DMS/'
fpath = paste0('/home/bsharmi6/NA_TF_project/scMethylome/ETRMS_dev_cell_sc/ChIP_seq_analysis/Egr1/')
## read annotated table. it is annotated with clusters and TF peaks
tmp = read.delim(paste0(fpath, list.files(fpath, pattern = '*annotated.txt')), header = T)
## remove _dms
colnames(tmp) <- gsub('_dms', '', colnames(tmp))
## create a column with cluster annotation
tmp$DMS <- apply(tmp[,4:ncol(tmp)], 1, function(x) paste0(names(which(x!=0)),collapse = ',', sep = ''))
## remove not enriched in any
tmp <- tmp[apply(tmp, 1, function(x) nchar(x['DMS'])>1),]
## rename column
tmp <- rename(tmp, cluster_annotated = DMS)
## split tmp by DMS to get individual rows
tmp <- tmp %>% separate_rows(cluster_annotated, sep = ',')
## rearrange
tmp <- tmp %>% select(chrom, start, end, cluster_annotated,everything())
## set excitatory
#tmp$cluster_annotated = gsub("mDL-2|mDL-1|mDL-3|mL23|mL4|mL6-2|mL6-1|mL5-1|mL5-2|mIn-1", "excitatory", tmp$cluster_annotated)
#tmp$cluster_annotated = gsub("mPv|mSst-1|mNdnf-2|mNdnf-1|mSst-2|mVip", "inhibitory", tmp$cluster_annotated)
# ## set names for each
# for(iTRM in 1:length(trm_cols)){
# tmp$cluster_annotated[! tmp[trm_cols[iTRM]] == 0] = paste0(str_to_title(strsplit(trm_cols[iTRM], '\\.')[[1]][1]), '_', 'ETRM')
# }
# ## remove no TMRs
# tmp = tmp[! tmp$cluster_annotated %in% 'Non-TRMs',]
## delete trm columns
#tmp = tmp[,c(1:3,64,4:55)]
#tmp <- tmp[, -grep('.trm', colnames(tmp), ignore.case = FALSE)]
#tmp = tmp %>% select(1:3, ncol(tmp), everything())
## rename 1:3
#colnames(tmp)[1:3] = c('chrom', 'start', 'end')
## remove .motifs
#colnames(tmp) <- gsub('.motifs.HOMER', '', colnames(tmp))
## remove end space
#colnames(tmp) <- gsub('\\.$', '', colnames(tmp))
## capitalize
#colnames(tmp) <- str_to_title(colnames(tmp))
## add ETRM extenstion to name
#colnames(tmp)[5:ncol(tmp)] <- paste0(colnames(tmp)[5:ncol(tmp)], '_ETRM')
#tmp$cluster_annotated[! tmp$cluster_5_dms %in% 0] = 'cluster_5'
## remove cluster 0
#tmp = tmp[! tmp$cluster_annotated %in% '0',]
#tmp = tmp[,c(1:3,52,4:48)]
# if(TF =='Tet1'){
# ## order for Tet1
# tmp = tmp[,c(1:3,60,4:55)]
# }else{
# ## order for Tet1
# tmp = tmp[,c(1:3,59,4:55)]
# }
## enrichment
.binom.test <- function(x, N, p, alternative="two.sided", name=NA) {
test <- c()
for(i in 1:length(x)) {
t <- binom.test(x[i], N, p[i], alternative=alternative)
test <- rbind(test, c(t$null, t$estimate, t$estimate/t$null, t$conf.int[1]/t$null, t$conf.int[2]/t$null, t$p.val))
}
colnames(test) <- c("expected", "estimated", "OR", "OR_0.025", "OR_0.975", "p.value")
rownames(test) <- names(x)
write.table(data.frame(feature=row.names(test), test), file=sprintf("Enrichment.stat(%s).txt", name), quote=F, sep="\t", row.names=F)
test
}
.ggplot.bar <- function(pdat, labSize=25, no.names=TRUE, is.single=FALSE, name=NA, x.limits=NA, x.label=NA, fwidth=1000, fheight=500, x.anno=1, y.anno=10, ylim.max=2) {
if(is.na(x.limits) || is.na(x.label)) {
x.limits <- c("Distal_promoter", "Promoter", "5'UTR", "Exon", "Intron", "3'UTR", "Intergenic",
"CGI", "CGI_shore", "CGI_shelf", "LINE", "SINE", "LTR", "Low_complexity", "Simple_repeat", "DNA", "Satellite")
x.label <- c("Distal promoter", "Promoter", "5'UTR", "Exon", "Intron", "3'UTR", "Intergenic",
"CGI", "CGI_shore", "CGI_shelf", "LINE", "SINE", "LTR", "Low_complexity", "Simple_repeat", "DNA", "Satellite")
}
library(ggplot2)
ylim.max <- ylim.max
labSize <- labSize
label <- function(p.value=as.double(paste(df.dat[,"p.value"]))) {
labs <- rep("", length(p.value))
labs[p.value < 0.05] <- "*"
labs[p.value < 0.01] <- "**"
labs[p.value < 1e-3] <- "***"
labs
}
if(is.single) {
tiff(sprintf('Enrichment(%s, Binomal.test).tiff', name), width=fwidth, height=fheight)
par(mfrow=c(1,1), mai=c(1,1,1,1))
if(no.names) df.dat <- data.frame(feature=row.names(pdat), pdat) else no.names <- pdat
p <- ggplot(df.dat, aes(x=factor(feature), y=as.double(OR), fill= factor(feature))) + #factor(label, levels=sort(levels(df.dat[,"label"]))))) +
geom_bar(stat="identity", position=position_dodge(width=0.95)) +
labs(title="", x="", y = "Odds ratio") + theme_classic() + scale_fill_discrete(name="Feature") +
scale_x_discrete(limits=x.limits, label=x.label) +
theme(axis.text.y = element_text(size=labSize, color = "black"), axis.text.x = element_text(size=labSize, color = "black", vjust=0.5, hjust=1, angle=90)) +
theme(axis.title.y = element_text(size=labSize)) + theme(legend.text = element_text(size = labSize)) + theme(legend.title = element_text(size=labSize)) +
theme(legend.position="none", legend.key.size = unit(1, "cm")) + ylim(0,ylim.max) +
geom_hline(yintercept=1, linetype="dashed", color = "grey30", size=1.2) +
geom_text(aes(x=factor(feature), y=as.double(OR), label = label()), position = position_dodge(width=0.95), vjust = -0.1, size = labSize*0.2) +
annotate(geom="text", x=x.anno, y=y.anno, label="*: p-value<0.05, **: p-value<0.01, ***: p-value<0.001", color="black", size=labSize*0.3)
print(p)
dev.off()
} else {
tiff(sprintf('Enrichment(%s, Binomal.test).tiff', name), width=fwidth, height=fheight)
par(mfrow=c(1,1), mai=c(1,1,1,1))
if(no.names) df.dat <- data.frame(feature=row.names(pdat), pdat) else no.names <- pdat
p <- ggplot(df.dat, aes(x=factor(feature), y=as.double(OR), fill=factor(label))) +
geom_bar(stat="identity", position=position_dodge(width=0.95)) +
labs(title="", x="", y = "Odds ratio") + theme_classic() + scale_fill_discrete(name="Comparison") +
scale_x_discrete(limits=x.limits, label=x.label) +
theme(axis.text.y = element_text(size=labSize, color = "black"), axis.text.x = element_text(size=labSize, color = "black", vjust=0, hjust=1, angle=90)) +
theme(axis.title.y = element_text(size=labSize)) + theme(legend.text = element_text(size = labSize)) + theme(legend.title = element_text(size=labSize)) +
theme(legend.position="right", legend.key.size = unit(1, "cm")) + ylim(0,ylim.max) +
geom_hline(yintercept=1, linetype="dashed", color = "grey30", size=1.2) +
geom_text(aes(x=factor(feature), y=as.double(OR), label = label()), position = position_dodge(width=0.95), vjust = -0.1, size = labSize*0.2) +
annotate(geom="text", x=x.anno, y=y.anno, label="*: p-value<0.05, **: p-value<0.01, ***: p-value<0.001", color="black", size=labSize*0.35)
print(p)
dev.off()
}
}
.get.pdat <- function(case, control, name=NA) {
.getRate <- function(x) {
idx <- which(grepl("X3.UTR", colnames(x)))
y <- x[,idx:(ncol(x)-1)]
colnames(y)[1:2] <- c("3'UTR", "5'UTR")
z <- y!="null"
list(x=colSums(z), n=nrow(z), rate=colSums(z)/nrow(z))
}
case.list <- .getRate(read.table(case, h=T))
control.list <- .getRate(read.table(control, h=T))
.binom.test(case.list$x, case.list$n, control.list$rate, name=name)
}
file <- "DMS.all.annotated.txt"
x <- read.table(file, h=T)
idx <- which(grepl("X3.UTR", colnames(x)))
anno <- list(CD1P23_Math5P23="P23 WT vs. P23 Math5KO", CD1P6_CD1P23="P6 WT vs. P23 WT", CD1P6_Math5P6="P6 WT vs. P6 Math5KO",
Math5P6_Math5P23="P6 Math5KO vs. P23 Math5KO")
# single case
for(type in levels(x[,"type"])) {
y <- x[x[,"type"] %in% type,idx:(ncol(x)-1)]
colnames(y)[1:2] <- c("3'UTR", "5'UTR")
z <- y!="null"; r <- colSums(z)/nrow(z)
t <- read.table('Distribution.CpG(17million).txt.gz', h=T)
pdat <- .binom.test(colSums(z), nrow(z), t[,2], name=anno[[type]])
df.dat <- data.frame(feature=rownames(pdat), pdat, label="All")
.ggplot.bar(df.dat, is.single=T, name=anno[[type]], x.anno=6, y.anno=1.8, ylim.max=2)
}
# combination
df.dat <- c()
for(type in c("CD1P6_CD1P23", "Math5P6_Math5P23", "CD1P6_Math5P6", "CD1P23_Math5P23")) {
y <- x[x[,"type"] %in% type,idx:(ncol(x)-1)]
colnames(y)[1:2] <- c("3'UTR", "5'UTR")
z <- y!="null"; r <- colSums(z)/nrow(z)
t <- read.table('Distribution.CpG(17million).txt.gz', h=T)
pdat <- .binom.test(colSums(z), nrow(z), t[,2], name=type)
df.dat <- rbind(df.dat, data.frame(feature=rownames(pdat), pdat, label=anno[[type]]))
}
write.table(data.frame(feature=row.names(df.dat), df.dat), file=sprintf("Enrichment.stat(All).txt"), quote=F, sep="\t", row.names=F)
.ggplot.bar(df.dat, labSize=30, no.names=F, is.single=F, name="All", x.anno=6, y.anno=1.8, ylim.max=2, fwidth=2000, fheight=1300)
################################################# simple binomial test ############################
dat.pval <- vector('list', length(unique(tmp$cluster_annotated)))
names(dat.pval) <- unique(tmp$cluster_annotated)
dat.est <- vector('list', length(unique(tmp$cluster_annotated)))
names(dat.est) <- unique(tmp$cluster_annotated)
for(itype in 1:length(dat.pval)){
r <- sum(grepl(names(dat.pval)[itype],tmp$cluster_annotated))
N <- nrow(tmp)
binom.res <- binom.test(r,N,0.0625,alternative="greater")
dat.pval[[itype]] <- binom.res$p.value
dat.est[[itype]] <- binom.res$estimate
}
| /Figure4/code/Enrichment_of_Egr1_peakclusters_genomic_regions.R | no_license | BSharmi/Neuronal-Activity- | R | false | false | 9,350 | r | ## TF motif enrichment
rm(list=ls())
library(pheatmap)
library(plyr)
library(dplyr)
library(tidyverse)
library(stringr)
library(Rtsne)
library(ggplot)
## define fpath
#fpath = '/home/bsharmi6/NA_TF_project/scMethylome/ETRMs/Enrichment_motifs_in_neurons/all_motif_locations/DMS/'
fpath = paste0('/home/bsharmi6/NA_TF_project/scMethylome/ETRMS_dev_cell_sc/ChIP_seq_analysis/Egr1/')
## read annotated table. it is annotated with clusters and TF peaks
tmp = read.delim(paste0(fpath, list.files(fpath, pattern = '*annotated.txt')), header = T)
## remove _dms
colnames(tmp) <- gsub('_dms', '', colnames(tmp))
## create a column with cluster annotation
tmp$DMS <- apply(tmp[,4:ncol(tmp)], 1, function(x) paste0(names(which(x!=0)),collapse = ',', sep = ''))
## remove not enriched in any
tmp <- tmp[apply(tmp, 1, function(x) nchar(x['DMS'])>1),]
## rename column
tmp <- rename(tmp, cluster_annotated = DMS)
## split tmp by DMS to get individual rows
tmp <- tmp %>% separate_rows(cluster_annotated, sep = ',')
## rearrange
tmp <- tmp %>% select(chrom, start, end, cluster_annotated,everything())
## set excitatory
#tmp$cluster_annotated = gsub("mDL-2|mDL-1|mDL-3|mL23|mL4|mL6-2|mL6-1|mL5-1|mL5-2|mIn-1", "excitatory", tmp$cluster_annotated)
#tmp$cluster_annotated = gsub("mPv|mSst-1|mNdnf-2|mNdnf-1|mSst-2|mVip", "inhibitory", tmp$cluster_annotated)
# ## set names for each
# for(iTRM in 1:length(trm_cols)){
# tmp$cluster_annotated[! tmp[trm_cols[iTRM]] == 0] = paste0(str_to_title(strsplit(trm_cols[iTRM], '\\.')[[1]][1]), '_', 'ETRM')
# }
# ## remove no TMRs
# tmp = tmp[! tmp$cluster_annotated %in% 'Non-TRMs',]
## delete trm columns
#tmp = tmp[,c(1:3,64,4:55)]
#tmp <- tmp[, -grep('.trm', colnames(tmp), ignore.case = FALSE)]
#tmp = tmp %>% select(1:3, ncol(tmp), everything())
## rename 1:3
#colnames(tmp)[1:3] = c('chrom', 'start', 'end')
## remove .motifs
#colnames(tmp) <- gsub('.motifs.HOMER', '', colnames(tmp))
## remove end space
#colnames(tmp) <- gsub('\\.$', '', colnames(tmp))
## capitalize
#colnames(tmp) <- str_to_title(colnames(tmp))
## add ETRM extenstion to name
#colnames(tmp)[5:ncol(tmp)] <- paste0(colnames(tmp)[5:ncol(tmp)], '_ETRM')
#tmp$cluster_annotated[! tmp$cluster_5_dms %in% 0] = 'cluster_5'
## remove cluster 0
#tmp = tmp[! tmp$cluster_annotated %in% '0',]
#tmp = tmp[,c(1:3,52,4:48)]
# if(TF =='Tet1'){
# ## order for Tet1
# tmp = tmp[,c(1:3,60,4:55)]
# }else{
# ## order for Tet1
# tmp = tmp[,c(1:3,59,4:55)]
# }
## enrichment
.binom.test <- function(x, N, p, alternative="two.sided", name=NA) {
test <- c()
for(i in 1:length(x)) {
t <- binom.test(x[i], N, p[i], alternative=alternative)
test <- rbind(test, c(t$null, t$estimate, t$estimate/t$null, t$conf.int[1]/t$null, t$conf.int[2]/t$null, t$p.val))
}
colnames(test) <- c("expected", "estimated", "OR", "OR_0.025", "OR_0.975", "p.value")
rownames(test) <- names(x)
write.table(data.frame(feature=row.names(test), test), file=sprintf("Enrichment.stat(%s).txt", name), quote=F, sep="\t", row.names=F)
test
}
.ggplot.bar <- function(pdat, labSize=25, no.names=TRUE, is.single=FALSE, name=NA, x.limits=NA, x.label=NA, fwidth=1000, fheight=500, x.anno=1, y.anno=10, ylim.max=2) {
if(is.na(x.limits) || is.na(x.label)) {
x.limits <- c("Distal_promoter", "Promoter", "5'UTR", "Exon", "Intron", "3'UTR", "Intergenic",
"CGI", "CGI_shore", "CGI_shelf", "LINE", "SINE", "LTR", "Low_complexity", "Simple_repeat", "DNA", "Satellite")
x.label <- c("Distal promoter", "Promoter", "5'UTR", "Exon", "Intron", "3'UTR", "Intergenic",
"CGI", "CGI_shore", "CGI_shelf", "LINE", "SINE", "LTR", "Low_complexity", "Simple_repeat", "DNA", "Satellite")
}
library(ggplot2)
ylim.max <- ylim.max
labSize <- labSize
label <- function(p.value=as.double(paste(df.dat[,"p.value"]))) {
labs <- rep("", length(p.value))
labs[p.value < 0.05] <- "*"
labs[p.value < 0.01] <- "**"
labs[p.value < 1e-3] <- "***"
labs
}
if(is.single) {
tiff(sprintf('Enrichment(%s, Binomal.test).tiff', name), width=fwidth, height=fheight)
par(mfrow=c(1,1), mai=c(1,1,1,1))
if(no.names) df.dat <- data.frame(feature=row.names(pdat), pdat) else no.names <- pdat
p <- ggplot(df.dat, aes(x=factor(feature), y=as.double(OR), fill= factor(feature))) + #factor(label, levels=sort(levels(df.dat[,"label"]))))) +
geom_bar(stat="identity", position=position_dodge(width=0.95)) +
labs(title="", x="", y = "Odds ratio") + theme_classic() + scale_fill_discrete(name="Feature") +
scale_x_discrete(limits=x.limits, label=x.label) +
theme(axis.text.y = element_text(size=labSize, color = "black"), axis.text.x = element_text(size=labSize, color = "black", vjust=0.5, hjust=1, angle=90)) +
theme(axis.title.y = element_text(size=labSize)) + theme(legend.text = element_text(size = labSize)) + theme(legend.title = element_text(size=labSize)) +
theme(legend.position="none", legend.key.size = unit(1, "cm")) + ylim(0,ylim.max) +
geom_hline(yintercept=1, linetype="dashed", color = "grey30", size=1.2) +
geom_text(aes(x=factor(feature), y=as.double(OR), label = label()), position = position_dodge(width=0.95), vjust = -0.1, size = labSize*0.2) +
annotate(geom="text", x=x.anno, y=y.anno, label="*: p-value<0.05, **: p-value<0.01, ***: p-value<0.001", color="black", size=labSize*0.3)
print(p)
dev.off()
} else {
tiff(sprintf('Enrichment(%s, Binomal.test).tiff', name), width=fwidth, height=fheight)
par(mfrow=c(1,1), mai=c(1,1,1,1))
if(no.names) df.dat <- data.frame(feature=row.names(pdat), pdat) else no.names <- pdat
p <- ggplot(df.dat, aes(x=factor(feature), y=as.double(OR), fill=factor(label))) +
geom_bar(stat="identity", position=position_dodge(width=0.95)) +
labs(title="", x="", y = "Odds ratio") + theme_classic() + scale_fill_discrete(name="Comparison") +
scale_x_discrete(limits=x.limits, label=x.label) +
theme(axis.text.y = element_text(size=labSize, color = "black"), axis.text.x = element_text(size=labSize, color = "black", vjust=0, hjust=1, angle=90)) +
theme(axis.title.y = element_text(size=labSize)) + theme(legend.text = element_text(size = labSize)) + theme(legend.title = element_text(size=labSize)) +
theme(legend.position="right", legend.key.size = unit(1, "cm")) + ylim(0,ylim.max) +
geom_hline(yintercept=1, linetype="dashed", color = "grey30", size=1.2) +
geom_text(aes(x=factor(feature), y=as.double(OR), label = label()), position = position_dodge(width=0.95), vjust = -0.1, size = labSize*0.2) +
annotate(geom="text", x=x.anno, y=y.anno, label="*: p-value<0.05, **: p-value<0.01, ***: p-value<0.001", color="black", size=labSize*0.35)
print(p)
dev.off()
}
}
.get.pdat <- function(case, control, name=NA) {
.getRate <- function(x) {
idx <- which(grepl("X3.UTR", colnames(x)))
y <- x[,idx:(ncol(x)-1)]
colnames(y)[1:2] <- c("3'UTR", "5'UTR")
z <- y!="null"
list(x=colSums(z), n=nrow(z), rate=colSums(z)/nrow(z))
}
case.list <- .getRate(read.table(case, h=T))
control.list <- .getRate(read.table(control, h=T))
.binom.test(case.list$x, case.list$n, control.list$rate, name=name)
}
file <- "DMS.all.annotated.txt"
x <- read.table(file, h=T)
idx <- which(grepl("X3.UTR", colnames(x)))
anno <- list(CD1P23_Math5P23="P23 WT vs. P23 Math5KO", CD1P6_CD1P23="P6 WT vs. P23 WT", CD1P6_Math5P6="P6 WT vs. P6 Math5KO",
Math5P6_Math5P23="P6 Math5KO vs. P23 Math5KO")
# single case
for(type in levels(x[,"type"])) {
y <- x[x[,"type"] %in% type,idx:(ncol(x)-1)]
colnames(y)[1:2] <- c("3'UTR", "5'UTR")
z <- y!="null"; r <- colSums(z)/nrow(z)
t <- read.table('Distribution.CpG(17million).txt.gz', h=T)
pdat <- .binom.test(colSums(z), nrow(z), t[,2], name=anno[[type]])
df.dat <- data.frame(feature=rownames(pdat), pdat, label="All")
.ggplot.bar(df.dat, is.single=T, name=anno[[type]], x.anno=6, y.anno=1.8, ylim.max=2)
}
# combination
df.dat <- c()
for(type in c("CD1P6_CD1P23", "Math5P6_Math5P23", "CD1P6_Math5P6", "CD1P23_Math5P23")) {
y <- x[x[,"type"] %in% type,idx:(ncol(x)-1)]
colnames(y)[1:2] <- c("3'UTR", "5'UTR")
z <- y!="null"; r <- colSums(z)/nrow(z)
t <- read.table('Distribution.CpG(17million).txt.gz', h=T)
pdat <- .binom.test(colSums(z), nrow(z), t[,2], name=type)
df.dat <- rbind(df.dat, data.frame(feature=rownames(pdat), pdat, label=anno[[type]]))
}
write.table(data.frame(feature=row.names(df.dat), df.dat), file=sprintf("Enrichment.stat(All).txt"), quote=F, sep="\t", row.names=F)
.ggplot.bar(df.dat, labSize=30, no.names=F, is.single=F, name="All", x.anno=6, y.anno=1.8, ylim.max=2, fwidth=2000, fheight=1300)
################################################# simple binomial test ############################
dat.pval <- vector('list', length(unique(tmp$cluster_annotated)))
names(dat.pval) <- unique(tmp$cluster_annotated)
dat.est <- vector('list', length(unique(tmp$cluster_annotated)))
names(dat.est) <- unique(tmp$cluster_annotated)
for(itype in 1:length(dat.pval)){
r <- sum(grepl(names(dat.pval)[itype],tmp$cluster_annotated))
N <- nrow(tmp)
binom.res <- binom.test(r,N,0.0625,alternative="greater")
dat.pval[[itype]] <- binom.res$p.value
dat.est[[itype]] <- binom.res$estimate
}
|
# 在卫星地图上标记地震发生的地点和震级
demo("eqMaps", package = "MSG")
| /inst/examples/eqMaps-demo.R | no_license | minghao2016/MSG | R | false | false | 89 | r | # 在卫星地图上标记地震发生的地点和震级
demo("eqMaps", package = "MSG")
|
#' Path to example file
#'
#' @param filename filename of the example file
#'
#' @return path to example file
#' @export
#'
#' @examples
rmf_example_file <- function(filename = NULL) {
filename <- system.file(paste0("extdata/", filename), package = "RMODFLOW")
if(filename[1] == "") {
stop("Example file not found. Please check the list of example files with rmf_example_files().")
} else {
return(filename)
}
}
| /R/rmf-example-file.R | no_license | CasillasMX/RMODFLOW | R | false | false | 428 | r | #' Path to example file
#'
#' @param filename filename of the example file
#'
#' @return path to example file
#' @export
#'
#' @examples
rmf_example_file <- function(filename = NULL) {
filename <- system.file(paste0("extdata/", filename), package = "RMODFLOW")
if(filename[1] == "") {
stop("Example file not found. Please check the list of example files with rmf_example_files().")
} else {
return(filename)
}
}
|
library(MESS)
### Name: ks_cumtest
### Title: Kolmogorov-Smirnov goodness of fit test for cumulative discrete
### data
### Aliases: ks_cumtest
### ** Examples
x <- 1:6
ks_cumtest(x)
| /data/genthat_extracted_code/MESS/examples/ks_cumtest.Rd.R | no_license | surayaaramli/typeRrh | R | false | false | 193 | r | library(MESS)
### Name: ks_cumtest
### Title: Kolmogorov-Smirnov goodness of fit test for cumulative discrete
### data
### Aliases: ks_cumtest
### ** Examples
x <- 1:6
ks_cumtest(x)
|
library(leaflet)
library(shiny)
library(ggplot2)
library(tidyverse)
library(gifski)
library(png)
library(gganimate)
library(gapminder)
library(plotly)
# Import Data Set
USHospitalBeds <- read.csv("USHospitalBeds.csv")
ByState <- group_by(USHospitalBeds, state, type)
ByStatetotal <- summarize(ByState,
TotalBeds = sum(beds))
ByStatetotal$state <- as.factor(ByStatetotal$state)
SelectStates<-c("CA","NY","FL")
#Select CA, NY, FL
TriStates<-subset(ByStatetotal, subset = state%in%SelectStates)
# Example data frame
Trimap <- data.frame(Site = c("California: Total 173,787 Beds",
"New York: Total 151,678 Beds",
"Florida: Total 250,486 Beds"),
Latitude = c(36.951968, 43.29943, 27.66483),
Longitude = c(-122.064873, -74.21793, -81.51575))
# Define UI for application that draws a histogram
ui <- navbarPage(title = "Covid-19 US Hospital Beds",
tabPanel(title = "Maps",
column(7, leafletOutput("Trimap", height = "600px")),
br(),
br(),
plotOutput("output$RoomType2")
),
tabPanel(title = "Individual States Information",
selectInput("SelectRoomType",
"Select a Room Type:",
c( "ACUTE" = "ACUTE",
"ICU"= "ICU",
"OTHER"= "OTHER",
"PSYCHIATRIC"= "PSYCHIATRIC"),multiple = TRUE),
plotlyOutput(outputId = "RoomType3"),
plotlyOutput(outputId = "RoomType4"),
plotlyOutput(outputId = "RoomType5")
),
tabPanel(title = "CA, NY, FL Information",
plotlyOutput(outputId = "RoomType6"),
plotlyOutput(outputId = "RoomType7"),
plotOutput(outputId = "RoomType8")
),
tabPanel(title = "Animations",
imageOutput("plot9"),
br(),
br(),
imageOutput("plot10")
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
## leaflet map
output$Trimap <- renderLeaflet({
leaflet() %>%
addTiles() %>%
addCircleMarkers(data = Trimap, ~unique(Longitude), ~unique(Latitude),
layerId = ~unique(Site),
popup = ~unique(Site))
})
# generate data in reactive
ggplot_data <- reactive({
site <- input$wsmap_marker_click$id
Trimap[Trimap$Site %in% site,]
})
#-------------------------------------------------------------------------------------------------------------------
output$RoomType2 <- renderPlotly({
states <- map_data("state")
CA <- subset(states, region == "california")
counties <- map_data("county")
CA_county <- subset(counties, region == "california")
# Data Frame of points(2)
CAlabs <- data.frame(
long = -122.064873,
lat = 36.951968,
names = "CA",
stringsAsFactors = FALSE)
ggplot(data = CA, mapping = aes(x = long, y = lat)) +
geom_polygon(color = "black", fill = "hotpink") +
coord_fixed(1) +
geom_point(data = CAlabs, aes(x = long, y = lat), color = "red", size = 4) +
geom_point(data = CAlabs, aes(x = long, y = lat), color = "blue", size = 3) +
geom_polygon(color = "black", fill = NA) +
theme(legend.position = 'none') +
labs(title = "Hospital Beds in California States",
x = "Longitude",
y = "Latitude")
guides(fill = FALSE) # do this to leave off the color legend
})
#-------------------------------------------------------------------------------------------------------------------
output$RoomType3 <- renderPlotly({
California <- subset (ByStatetotal,state == "CA")
option <- subset(California,
type %in% input$SelectRoomType)
ggplot(option,
aes(x = type,y = TotalBeds,fill = type)) +
geom_bar(stat = "identity") +
theme(legend.position = 'none') +
labs(title = "Room Type VS. Availability",
subtitle = "Acute Unit have the widest range concerning the availability in State of California",
x = "Room Type",
y = "Total beds in California per 1000 HAB")
})
#-------------------------------------------------------------------------------------------------------------------
output$RoomType4 <- renderPlotly({
NewYork <- subset (ByStatetotal,state == "NY")
option <- subset(NewYork,type %in% input$SelectRoomType)
ggplot(option,
aes(x = type,y = TotalBeds, fill = type)) +
geom_bar(stat = "identity") +
theme(legend.position = 'none') +
labs(title = "Room Type VS. Availability",
subtitle = "Acute Unit have the widest range concerning the availability in State of New York",
x = "Room Type",
y = "Total beds in New York per 1000 HAB")
})
#-------------------------------------------------------------------------------------------------------------------
output$RoomType5 <- renderPlotly({
Florida <- subset (ByStatetotal,state == "FL")
option <- subset(Florida,type %in% input$SelectRoomType)
ggplot(option,
aes(x = type,y = TotalBeds, fill = type)) +
geom_bar(stat = "identity") +
theme(legend.position = 'none') +
labs(title = "Room Type VS. Availability",
subtitle = "Acute Unit have the widest range concerning the availability in State of Florida",
x = "Room Type",
y = "Total beds in Florida per 1000 HAB")
})
#-------------------------------------------------------------------------------------------------------------------
output$RoomType6 <- renderPlotly({
ByStatetotal$state <- as.factor(ByStatetotal$state)
SelectStates<-c("CA","NY","FL")
#Select CA, NY, FL
TriStates<-subset(ByStatetotal, subset = state%in%SelectStates)
option <- subset(TriStates)
ggplot(TriStates,
aes(x = type,y = TotalBeds, fill = type)) +
geom_bar(stat = "identity") +
facet_wrap(~ state) +
theme(legend.position = 'none',axis.text.x = element_text(angle = 45,size=8)) +
labs(title = "Room Type VS. Availability",
subtitle = "Acute Unit have the widest range concerning the availability in State of CA,NY,FL",
x = "Room Type",
y = "Total beds in CA,NY,FL per 1000 HAB")
})
#-------------------------------------------------------------------------------------------------------------------
output$RoomType7 <- renderPlotly({
Acute <- subset(USHospitalBeds, type == "ACUTE")
AcuteTriStates <- subset(Acute, subset = state%in%SelectStates)
option <- subset(AcuteTriStates)
ggplot(option,
aes(x = type,y = beds)) +
geom_boxplot(mapping = aes(color = state)) +
theme(legend.position = 'none') +
labs(title = "Room Type VS. Availability",
subtitle = "Acute Unit have the widest range concerning the availability in State of CA,NY,FL",
x = "ACUTE",
y = "Total beds in CA,NY,FL per 1000 HAB")
})
#-------------------------------------------------------------------------------------------------------------------
output$RoomType8 <- renderPlot({
StatePercentage <- TriStates %>% count(TotalBeds)
p8 <- ggplot(StatePercentage,
aes(x = "",y = n, fill = TotalBeds)) +
geom_bar(stat="identity", width=1)
pie = p8+coord_polar("y",start=0)
pie
})
#-------------------------------------------------------------------------------------------------------------------
output$plot9 <- renderImage({
# A temp file to save the output.
# This file will be removed later by renderImage
outfile <- tempfile(fileext='.gif')
# now make the animation
#option <- subset(TriStates, type %in% input$AnimationSelection)
p9 <- ggplot(TriStates,
aes(x = type,y = TotalBeds,fill = type)) +
geom_boxplot() +
theme(legend.position = 'none') +
labs(title = "Room Type VS. Availability",
subtitle = "Acute Unit have the widest range concerning the availability in State of CA,NY,FL",
x = "Room Type",
y = "Total Beds in CA,NY,FL per 1000 HAB") +
#Here comes the gganimate code
transition_states(type, transition_length = 1, state_length = 2) +
enter_grow() + enter_drift(x_mod = -1) +
exit_shrink() + exit_drift(x_mod = 6)
ease_aes('sine-in-out')
p9 # New
anim_save("outfile.gif", animate(p9)) # New
# Return a list containing the filename
list(src = "outfile.gif",
contentType = 'image/gif'
# width = 400,
# height = 300,
# alt = "This is alternate text"
)}, deleteFile = TRUE)
#-------------------------------------------------------------------------------------------------------------------
output$plot10 <- renderImage({
# A temp file to save the output.
# This file will be removed later by renderImage
outfile <- tempfile(fileext='.gif')
# now make the animation
p10 <- ggplot(TriStates,
aes(x = type,y = TotalBeds, fill = type)) +
geom_bar(stat = "identity") +
facet_wrap(~ state) +
theme(legend.position = 'none',axis.text.x = element_text(angle = 45,size=8)) +
labs(title = "Room Type VS. Availability",
subtitle = "Acute Unit have the widest range concerning the availability in State of CA,NY,FL",
x = "Room Type",
y = "Total beds in CA,NY,FL per 1000 HAB") +
#Here comes the gganimate code
transition_states(type, transition_length = 1, state_length = 2) +
enter_grow() +
exit_shrink() +
ease_aes('sine-in-out')
p10 # New
anim_save("outfile.gif", animate(p10)) # New
# Return a list containing the filename
list(src = "outfile.gif",
contentType = 'image/gif'
# width = 400,
# height = 300,
# alt = "This is alternate text"
)}, deleteFile = TRUE)
}
# Run the application
shinyApp(ui = ui, server = server)
| /02_Codes/app.R | no_license | athenal8/TermProjectDataVisualization | R | false | false | 11,574 | r | library(leaflet)
library(shiny)
library(ggplot2)
library(tidyverse)
library(gifski)
library(png)
library(gganimate)
library(gapminder)
library(plotly)
# Import Data Set
USHospitalBeds <- read.csv("USHospitalBeds.csv")
ByState <- group_by(USHospitalBeds, state, type)
ByStatetotal <- summarize(ByState,
TotalBeds = sum(beds))
ByStatetotal$state <- as.factor(ByStatetotal$state)
SelectStates<-c("CA","NY","FL")
#Select CA, NY, FL
TriStates<-subset(ByStatetotal, subset = state%in%SelectStates)
# Example data frame
Trimap <- data.frame(Site = c("California: Total 173,787 Beds",
"New York: Total 151,678 Beds",
"Florida: Total 250,486 Beds"),
Latitude = c(36.951968, 43.29943, 27.66483),
Longitude = c(-122.064873, -74.21793, -81.51575))
# Define UI for application that draws a histogram
ui <- navbarPage(title = "Covid-19 US Hospital Beds",
tabPanel(title = "Maps",
column(7, leafletOutput("Trimap", height = "600px")),
br(),
br(),
plotOutput("output$RoomType2")
),
tabPanel(title = "Individual States Information",
selectInput("SelectRoomType",
"Select a Room Type:",
c( "ACUTE" = "ACUTE",
"ICU"= "ICU",
"OTHER"= "OTHER",
"PSYCHIATRIC"= "PSYCHIATRIC"),multiple = TRUE),
plotlyOutput(outputId = "RoomType3"),
plotlyOutput(outputId = "RoomType4"),
plotlyOutput(outputId = "RoomType5")
),
tabPanel(title = "CA, NY, FL Information",
plotlyOutput(outputId = "RoomType6"),
plotlyOutput(outputId = "RoomType7"),
plotOutput(outputId = "RoomType8")
),
tabPanel(title = "Animations",
imageOutput("plot9"),
br(),
br(),
imageOutput("plot10")
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
## leaflet map
output$Trimap <- renderLeaflet({
leaflet() %>%
addTiles() %>%
addCircleMarkers(data = Trimap, ~unique(Longitude), ~unique(Latitude),
layerId = ~unique(Site),
popup = ~unique(Site))
})
# generate data in reactive
ggplot_data <- reactive({
site <- input$wsmap_marker_click$id
Trimap[Trimap$Site %in% site,]
})
#-------------------------------------------------------------------------------------------------------------------
output$RoomType2 <- renderPlotly({
states <- map_data("state")
CA <- subset(states, region == "california")
counties <- map_data("county")
CA_county <- subset(counties, region == "california")
# Data Frame of points(2)
CAlabs <- data.frame(
long = -122.064873,
lat = 36.951968,
names = "CA",
stringsAsFactors = FALSE)
ggplot(data = CA, mapping = aes(x = long, y = lat)) +
geom_polygon(color = "black", fill = "hotpink") +
coord_fixed(1) +
geom_point(data = CAlabs, aes(x = long, y = lat), color = "red", size = 4) +
geom_point(data = CAlabs, aes(x = long, y = lat), color = "blue", size = 3) +
geom_polygon(color = "black", fill = NA) +
theme(legend.position = 'none') +
labs(title = "Hospital Beds in California States",
x = "Longitude",
y = "Latitude")
guides(fill = FALSE) # do this to leave off the color legend
})
#-------------------------------------------------------------------------------------------------------------------
output$RoomType3 <- renderPlotly({
California <- subset (ByStatetotal,state == "CA")
option <- subset(California,
type %in% input$SelectRoomType)
ggplot(option,
aes(x = type,y = TotalBeds,fill = type)) +
geom_bar(stat = "identity") +
theme(legend.position = 'none') +
labs(title = "Room Type VS. Availability",
subtitle = "Acute Unit have the widest range concerning the availability in State of California",
x = "Room Type",
y = "Total beds in California per 1000 HAB")
})
#-------------------------------------------------------------------------------------------------------------------
output$RoomType4 <- renderPlotly({
NewYork <- subset (ByStatetotal,state == "NY")
option <- subset(NewYork,type %in% input$SelectRoomType)
ggplot(option,
aes(x = type,y = TotalBeds, fill = type)) +
geom_bar(stat = "identity") +
theme(legend.position = 'none') +
labs(title = "Room Type VS. Availability",
subtitle = "Acute Unit have the widest range concerning the availability in State of New York",
x = "Room Type",
y = "Total beds in New York per 1000 HAB")
})
#-------------------------------------------------------------------------------------------------------------------
output$RoomType5 <- renderPlotly({
Florida <- subset (ByStatetotal,state == "FL")
option <- subset(Florida,type %in% input$SelectRoomType)
ggplot(option,
aes(x = type,y = TotalBeds, fill = type)) +
geom_bar(stat = "identity") +
theme(legend.position = 'none') +
labs(title = "Room Type VS. Availability",
subtitle = "Acute Unit have the widest range concerning the availability in State of Florida",
x = "Room Type",
y = "Total beds in Florida per 1000 HAB")
})
#-------------------------------------------------------------------------------------------------------------------
output$RoomType6 <- renderPlotly({
ByStatetotal$state <- as.factor(ByStatetotal$state)
SelectStates<-c("CA","NY","FL")
#Select CA, NY, FL
TriStates<-subset(ByStatetotal, subset = state%in%SelectStates)
option <- subset(TriStates)
ggplot(TriStates,
aes(x = type,y = TotalBeds, fill = type)) +
geom_bar(stat = "identity") +
facet_wrap(~ state) +
theme(legend.position = 'none',axis.text.x = element_text(angle = 45,size=8)) +
labs(title = "Room Type VS. Availability",
subtitle = "Acute Unit have the widest range concerning the availability in State of CA,NY,FL",
x = "Room Type",
y = "Total beds in CA,NY,FL per 1000 HAB")
})
#-------------------------------------------------------------------------------------------------------------------
output$RoomType7 <- renderPlotly({
Acute <- subset(USHospitalBeds, type == "ACUTE")
AcuteTriStates <- subset(Acute, subset = state%in%SelectStates)
option <- subset(AcuteTriStates)
ggplot(option,
aes(x = type,y = beds)) +
geom_boxplot(mapping = aes(color = state)) +
theme(legend.position = 'none') +
labs(title = "Room Type VS. Availability",
subtitle = "Acute Unit have the widest range concerning the availability in State of CA,NY,FL",
x = "ACUTE",
y = "Total beds in CA,NY,FL per 1000 HAB")
})
#-------------------------------------------------------------------------------------------------------------------
output$RoomType8 <- renderPlot({
StatePercentage <- TriStates %>% count(TotalBeds)
p8 <- ggplot(StatePercentage,
aes(x = "",y = n, fill = TotalBeds)) +
geom_bar(stat="identity", width=1)
pie = p8+coord_polar("y",start=0)
pie
})
#-------------------------------------------------------------------------------------------------------------------
output$plot9 <- renderImage({
# A temp file to save the output.
# This file will be removed later by renderImage
outfile <- tempfile(fileext='.gif')
# now make the animation
#option <- subset(TriStates, type %in% input$AnimationSelection)
p9 <- ggplot(TriStates,
aes(x = type,y = TotalBeds,fill = type)) +
geom_boxplot() +
theme(legend.position = 'none') +
labs(title = "Room Type VS. Availability",
subtitle = "Acute Unit have the widest range concerning the availability in State of CA,NY,FL",
x = "Room Type",
y = "Total Beds in CA,NY,FL per 1000 HAB") +
#Here comes the gganimate code
transition_states(type, transition_length = 1, state_length = 2) +
enter_grow() + enter_drift(x_mod = -1) +
exit_shrink() + exit_drift(x_mod = 6)
ease_aes('sine-in-out')
p9 # New
anim_save("outfile.gif", animate(p9)) # New
# Return a list containing the filename
list(src = "outfile.gif",
contentType = 'image/gif'
# width = 400,
# height = 300,
# alt = "This is alternate text"
)}, deleteFile = TRUE)
#-------------------------------------------------------------------------------------------------------------------
output$plot10 <- renderImage({
# A temp file to save the output.
# This file will be removed later by renderImage
outfile <- tempfile(fileext='.gif')
# now make the animation
p10 <- ggplot(TriStates,
aes(x = type,y = TotalBeds, fill = type)) +
geom_bar(stat = "identity") +
facet_wrap(~ state) +
theme(legend.position = 'none',axis.text.x = element_text(angle = 45,size=8)) +
labs(title = "Room Type VS. Availability",
subtitle = "Acute Unit have the widest range concerning the availability in State of CA,NY,FL",
x = "Room Type",
y = "Total beds in CA,NY,FL per 1000 HAB") +
#Here comes the gganimate code
transition_states(type, transition_length = 1, state_length = 2) +
enter_grow() +
exit_shrink() +
ease_aes('sine-in-out')
p10 # New
anim_save("outfile.gif", animate(p10)) # New
# Return a list containing the filename
list(src = "outfile.gif",
contentType = 'image/gif'
# width = 400,
# height = 300,
# alt = "This is alternate text"
)}, deleteFile = TRUE)
}
# Run the application
shinyApp(ui = ui, server = server)
|
#' Dynamic Time Warping (DTW) Plot
#'
#' This function plots temporal alignment of query and reference
#'
#' @import segmented dtw
#'
#' @param query A vector containing temporal gene expression values of query
#' @param timePoints_query A vector containing time points of query
#' @param reference A vector containing temporal gene expression values of reference
#' @param timePoints_reference A vector containing time points of reference
#' @param alignment The output from dtw() function
#' @param title Title of the figure
#' @param ref_type Reference plot type: "p": Points, "l": Lines, "b": Both. "l"=default
#' @param ref_lty Reference plot line type: 1:solid, 2:dashed, 3:dotted, 4:dotdash, 5:longdash, 6:twodash. 1=default
#' @param ref_lwd Reference plot line line width. 1.5=default
#' @param ref_col Reference plot color. "black"=default
#' @param query_type Query plot type: "p": Points, "l": Lines, "b": Both. "l"=default
#' @param query_lty Query plot line type: 1:solid, 2:dashed, 3:dotted, 4:dotdash, 5:longdash, 6:twodash. 1=default
#' @param query_lwd Query plot line line width. Default:1.5
#' @param query_col Query plot color. "red"=default
#' @param cex_main The title fond size. 1=default, 1.5 is 50\% larger, 0.5 is 50\% smaller, etc.
#' @param cex_lab Magnification of x and y labels relative to cex
#' @param cex_axis Magnification of axis annotation relative to cex
#' @param xlabText X-axis label text. "Time"=default
#' @param ylabText Y-axis label text. "Gene Expression Values"=default
#'
#' @return It returns a plot.
#'
#' @examples
#' data(simData)
#' data=simdata$TimeShift_10
#' gene=data$gene
#' query=data$query
#' timePoints_query=data$timePoints_query
#' reference=data$reference
#' timePoints_reference=data$timePoints_reference
#' alignment=dtw(query,reference)
#' dtw_results=list(alignment$index1,alignment$index2)
#' index_1=dtw_results[[1]]
#' index_2=dtw_results[[2]]
#' aligned_values_query=query[index_1]
#' aligned_values_reference=reference[index_2]
#' aligned_timePoints_query=timePoints_query[index_1]
#' aligned_timePoints_reference=timePoints_reference[index_2]
#' index_alignableRegion=alignableRegionIndex(aligned_timePoints_query,aligned_timePoints_reference)
#' alignableRegion_values_query=aligned_values_query[index_alignableRegion]
#' alignableRegion_values_reference=aligned_values_reference[index_alignableRegion]
#' alignableRegion_timePoints_query=aligned_timePoints_query[index_alignableRegion]
#' alignableRegion_timePoints_reference=aligned_timePoints_reference[index_alignableRegion]
#' percentageAlignmentQuery=percentageAlignment(timePoints_query,
#' timePoints_reference,
#' alignableRegion_timePoints_query,
#' alignableRegion_timePoints_reference)['percentage_alignment_query']
#' percentageAlignmentReference=percentageAlignment(timePoints_query,
#' timePoints_reference,
#' alignableRegion_timePoints_query,
#' alignableRegion_timePoints_reference)['percentage_alignment_reference']
#' Rho=spearmanCorrelation(alignableRegion_values_query,alignableRegion_values_reference)
#' pValueRho=getPValueRho(Rho,query,timePoints_query,reference,timePoints_reference)
#' alignableRegion_timePoints_query_merged=mergeReferencePoints(alignableRegion_timePoints_query,
#' alignableRegion_timePoints_reference)["aligned_timePoints_query_merged"][[1]]
#' alignableRegion_timePoints_reference_merged=mergeReferencePoints(alignableRegion_timePoints_query,
#' alignableRegion_timePoints_reference)["aligned_timePoints_reference_merged"][[1]]
#' segmentedRegression_out=segmentedRegression(alignableRegion_timePoints_query_merged,
#' alignableRegion_timePoints_reference_merged)
#' breakPointsMatrix=fetchBreakPoints(segmentedRegression_out)
#' PAS=getPAS(segmentedRegression_out)$PAS
#' PASVector=getPAS(segmentedRegression_out)$PASVector
#' plotDTW(query,timePoints_query,reference,timePoints_reference,alignment,title="DTW")
#'
#'
#'
#' @export
#' @author Peng Jiang \email{PJiang@morgridge.org}
plotDTW <- function(query,
timePoints_query,
reference,
timePoints_reference,
alignment,
title,
ref_type="l",
ref_lty=1,
ref_lwd=1.5,
ref_col="black",
query_type="l",
query_lty=1,
query_lwd=1.5,
query_col="red",
cex_main=1,
cex_lab=1.2,
cex_axis=1.2,
xlabText="Time",
ylabText="Gene Expression Values") {
dtw_results=list(alignment$index1,alignment$index2);
index_1=dtw_results[[1]];
index_2=dtw_results[[2]];
aligned_values_query=query[index_1];
aligned_values_reference=reference[index_2];
aligned_timePoints_query=timePoints_query[index_1];
aligned_timePoints_reference=timePoints_reference[index_2];
x_min=min(c(timePoints_reference,timePoints_query));
x_max=max(c(timePoints_reference,timePoints_query));
y_min=min(c(reference,query));
y_max=max(c(reference,query));
plot(x=timePoints_reference, y=reference, xlim=c(x_min,x_max), ylim=c(y_min,y_max),type=ref_type,lty=ref_lty, lwd=ref_lwd, xlab='',ylab='',cex.axis=cex_axis, cex.lab=cex_lab, xaxt='n',yaxt='n',col=ref_col);
par(new=T)
plot(x=timePoints_query, y=query, xlim=c(x_min,x_max),ylim=c(y_min,y_max),type=query_type,lty=query_lty, lwd=query_lwd, xlab=xlabText,ylab=ylabText, cex.lab=cex_lab, col=query_col,main=title, cex.main=cex_main);
par(new=T)
for(i in 1:length(index_1)) {
x_vector=c(timePoints_query[index_1[i]],timePoints_reference[index_2[i]])
y_vector=c(query[index_1[i]],reference[index_2[i]])
points(x_vector,y_vector,type='l',lty=1, col='grey')
}
}
| /R/plotDTW.R | no_license | pjiang1105/TimeMeter | R | false | false | 6,068 | r | #' Dynamic Time Warping (DTW) Plot
#'
#' This function plots temporal alignment of query and reference
#'
#' @import segmented dtw
#'
#' @param query A vector containing temporal gene expression values of query
#' @param timePoints_query A vector containing time points of query
#' @param reference A vector containing temporal gene expression values of reference
#' @param timePoints_reference A vector containing time points of reference
#' @param alignment The output from dtw() function
#' @param title Title of the figure
#' @param ref_type Reference plot type: "p": Points, "l": Lines, "b": Both. "l"=default
#' @param ref_lty Reference plot line type: 1:solid, 2:dashed, 3:dotted, 4:dotdash, 5:longdash, 6:twodash. 1=default
#' @param ref_lwd Reference plot line line width. 1.5=default
#' @param ref_col Reference plot color. "black"=default
#' @param query_type Query plot type: "p": Points, "l": Lines, "b": Both. "l"=default
#' @param query_lty Query plot line type: 1:solid, 2:dashed, 3:dotted, 4:dotdash, 5:longdash, 6:twodash. 1=default
#' @param query_lwd Query plot line line width. Default:1.5
#' @param query_col Query plot color. "red"=default
#' @param cex_main The title fond size. 1=default, 1.5 is 50\% larger, 0.5 is 50\% smaller, etc.
#' @param cex_lab Magnification of x and y labels relative to cex
#' @param cex_axis Magnification of axis annotation relative to cex
#' @param xlabText X-axis label text. "Time"=default
#' @param ylabText Y-axis label text. "Gene Expression Values"=default
#'
#' @return It returns a plot.
#'
#' @examples
#' data(simData)
#' data=simdata$TimeShift_10
#' gene=data$gene
#' query=data$query
#' timePoints_query=data$timePoints_query
#' reference=data$reference
#' timePoints_reference=data$timePoints_reference
#' alignment=dtw(query,reference)
#' dtw_results=list(alignment$index1,alignment$index2)
#' index_1=dtw_results[[1]]
#' index_2=dtw_results[[2]]
#' aligned_values_query=query[index_1]
#' aligned_values_reference=reference[index_2]
#' aligned_timePoints_query=timePoints_query[index_1]
#' aligned_timePoints_reference=timePoints_reference[index_2]
#' index_alignableRegion=alignableRegionIndex(aligned_timePoints_query,aligned_timePoints_reference)
#' alignableRegion_values_query=aligned_values_query[index_alignableRegion]
#' alignableRegion_values_reference=aligned_values_reference[index_alignableRegion]
#' alignableRegion_timePoints_query=aligned_timePoints_query[index_alignableRegion]
#' alignableRegion_timePoints_reference=aligned_timePoints_reference[index_alignableRegion]
#' percentageAlignmentQuery=percentageAlignment(timePoints_query,
#' timePoints_reference,
#' alignableRegion_timePoints_query,
#' alignableRegion_timePoints_reference)['percentage_alignment_query']
#' percentageAlignmentReference=percentageAlignment(timePoints_query,
#' timePoints_reference,
#' alignableRegion_timePoints_query,
#' alignableRegion_timePoints_reference)['percentage_alignment_reference']
#' Rho=spearmanCorrelation(alignableRegion_values_query,alignableRegion_values_reference)
#' pValueRho=getPValueRho(Rho,query,timePoints_query,reference,timePoints_reference)
#' alignableRegion_timePoints_query_merged=mergeReferencePoints(alignableRegion_timePoints_query,
#' alignableRegion_timePoints_reference)["aligned_timePoints_query_merged"][[1]]
#' alignableRegion_timePoints_reference_merged=mergeReferencePoints(alignableRegion_timePoints_query,
#' alignableRegion_timePoints_reference)["aligned_timePoints_reference_merged"][[1]]
#' segmentedRegression_out=segmentedRegression(alignableRegion_timePoints_query_merged,
#' alignableRegion_timePoints_reference_merged)
#' breakPointsMatrix=fetchBreakPoints(segmentedRegression_out)
#' PAS=getPAS(segmentedRegression_out)$PAS
#' PASVector=getPAS(segmentedRegression_out)$PASVector
#' plotDTW(query,timePoints_query,reference,timePoints_reference,alignment,title="DTW")
#'
#'
#'
#' @export
#' @author Peng Jiang \email{PJiang@morgridge.org}
plotDTW <- function(query,
timePoints_query,
reference,
timePoints_reference,
alignment,
title,
ref_type="l",
ref_lty=1,
ref_lwd=1.5,
ref_col="black",
query_type="l",
query_lty=1,
query_lwd=1.5,
query_col="red",
cex_main=1,
cex_lab=1.2,
cex_axis=1.2,
xlabText="Time",
ylabText="Gene Expression Values") {
dtw_results=list(alignment$index1,alignment$index2);
index_1=dtw_results[[1]];
index_2=dtw_results[[2]];
aligned_values_query=query[index_1];
aligned_values_reference=reference[index_2];
aligned_timePoints_query=timePoints_query[index_1];
aligned_timePoints_reference=timePoints_reference[index_2];
x_min=min(c(timePoints_reference,timePoints_query));
x_max=max(c(timePoints_reference,timePoints_query));
y_min=min(c(reference,query));
y_max=max(c(reference,query));
plot(x=timePoints_reference, y=reference, xlim=c(x_min,x_max), ylim=c(y_min,y_max),type=ref_type,lty=ref_lty, lwd=ref_lwd, xlab='',ylab='',cex.axis=cex_axis, cex.lab=cex_lab, xaxt='n',yaxt='n',col=ref_col);
par(new=T)
plot(x=timePoints_query, y=query, xlim=c(x_min,x_max),ylim=c(y_min,y_max),type=query_type,lty=query_lty, lwd=query_lwd, xlab=xlabText,ylab=ylabText, cex.lab=cex_lab, col=query_col,main=title, cex.main=cex_main);
par(new=T)
for(i in 1:length(index_1)) {
x_vector=c(timePoints_query[index_1[i]],timePoints_reference[index_2[i]])
y_vector=c(query[index_1[i]],reference[index_2[i]])
points(x_vector,y_vector,type='l',lty=1, col='grey')
}
}
|
##---------------------------------------------------------------------------
##
## list
##
##---------------------------------------------------------------------------
#' @param pedigree an object of class \code{Pedigree}
#' @aliases mad2,list-method
#' @rdname mad2
setMethod("mad2", signature(object="list"),
function(object, byrow, pedigree, ...){
madList(object, byrow, pedigree, ...)
})
#' @aliases mad2,TrioSetList-method
#' @rdname mad2
setMethod("mad2", signature(object="TrioSetList"),
function(object, byrow, ...){
madTrioSetList(object, byrow)
})
madTrioSetList <- function(object, byrow){
madList(lrr(object), byrow=byrow, pedigree=pedigree(object))
}
#' @aliases mad2,matrix-method
#' @rdname mad2
setMethod("mad2", signature(object="matrix"),
function(object, byrow, pedigree, ...){
madList(list(object), byrow, pedigree, ...)
})
#' @aliases mad2,array-method
#' @rdname mad2
setMethod("mad2", signature(object="array"),
function(object, byrow, pedigree, ...){
madList(list(object), byrow, pedigree, ...)
})
madList <- function(object, byrow, pedigree, ...){
dims <- dim(object[[1]])
if(length(dims) != 2 && length(dims) != 3)
stop("Elements of list must be a matrix or an array")
isff <- is(object[[1]], "ff")
if(isff) lapply(object, open)
is.matrix <- ifelse(length(dims) == 2, TRUE, FALSE)
if(!byrow){ ## by column
if(is.matrix){
mads <- madFromMatrixList(object, byrow=FALSE)
} else { ## array
## for parallelization, it would be better to
## pass the ff object to the worker nodes,
## calculate the mad, and return the mad.
if(!isff){
F <- lapply(object, function(x) as.matrix(x[, , 1]))
M <- lapply(object, function(x) as.matrix(x[, , 2]))
O <- lapply(object, function(x) as.matrix(x[, , 3]))
mads.father <- madFromMatrixList(F, byrow=FALSE)
mads.mother <- madFromMatrixList(M, byrow=FALSE)
mads.offspr <- madFromMatrixList(O, byrow=FALSE)
if(!missing(pedigree)){
names(mads.father) <- fatherNames(pedigree)
names(mads.mother) <- motherNames(pedigree)
names(mads.offspr) <- offspringNames(pedigree)
mads <- data.frame(F=I(mads.father),
M=I(mads.mother),
O=I(mads.offspr))
} else {
mads <- cbind(mads.father, mads.mother, mads.offspr)
colnames(mads) <- c("F", "M", "O")
}
} else { ## big data
madForFFmatrix <- function(xlist, i, j){
## j=1 father
## j=2 mother
## j=3 offspring
res <- lapply(xlist, function(x, i, j){
m <- as.matrix(x[, i, j])
}, i=i, j=j)
res <- do.call("rbind", res)/100
mads <- apply(res, 2, mad, na.rm=TRUE)
mads
}
indexList <- splitIndicesByLength(seq_len(ncol(object[[1]])), 50)
i <- NULL
mads.father <- foreach(i=indexList, .combine="c") %do% madForFFmatrix(xlist=object, i=i, j=1)
mads.mother <- foreach(i=indexList, .combine="c") %do% madForFFmatrix(xlist=object, i=i, j=2)
mads.offspr <- foreach(i=indexList, .combine="c") %do% madForFFmatrix(xlist=object, i=i, j=3)
if(!missing(pedigree)){
names(mads.father) <- fatherNames(pedigree)
names(mads.mother) <- motherNames(pedigree)
names(mads.offspr) <- offspringNames(pedigree)
mads <- data.frame(F=I(mads.father),
M=I(mads.mother),
O=I(mads.offspr))
} else {
mads <- cbind(mads.father, mads.mother, mads.offspr)
colnames(mads) <- c("F", "M", "O")
}
}
}
} else {## by row
if(is.matrix){
mads <- madFromMatrixList(object, byrow=FALSE)
} else {
if(ncol(object[[1]]) > 2){
## for parallelization, it would be better to
## pass the ff object to the worker nodes,
## calculate the mad, and return the mad.
stopifnot(!missing(pedigree))
colindex <- which(!duplicated(fatherNames(pedigree)) & !duplicated(motherNames(pedigree)))
##mindex <- which(!duplicated(motherNames(pedigree)))
##F <- lapply(object, function(x) x[, findex, 1])
##M <- lapply(object, function(x) x[, mindex, 2])
O <- lapply(object, function(x) as.matrix(x[, colindex, 3]))
##mads.f <- madFromMatrixList(F, byrow=TRUE)
##mads.m <- madFromMatrixList(M, byrow=TRUE)
mads <- madFromMatrixList(O, byrow=TRUE)
names(mads) <- names(object)
##mads <- cbind(mads.f, mads.m, mads.o)
##colnames(mads) <- c("F", "M", "O")
} else {
warning("Too few samples to calculate across sample variance. Returning NULL.")
mads <- NULL
}
}
}
if(isff) lapply(object, close)
return(mads)
}
madFromMatrixList <- function(object, byrow=TRUE){
if(isPackageLoaded("ff")) pkgs <- c("ff", "MinimumDistance") else pkgs <- "MinimumDistance"
if(!byrow){
## this could be done more efficiently by following the
## apply example in the foreach documentation...
ilist <- splitIndicesByLength(seq_len(ncol(object[[1]])), 100)
i <- NULL
Xlist <- foreach(i=ilist, .packages=pkgs) %dopar% stackListByColIndex(object, i)
mads <- foreach(i = Xlist, .packages=pkgs) %dopar% apply(i/100, 2, mad, na.rm=TRUE)
mads <- unlist(mads)
names(mads) <- colnames(object[[1]])
mads
} else {
x <- NULL
mads <- foreach(x = object, .packages=pkgs) %do% rowMAD(x/100, na.rm=TRUE)
if( !is.null(dim(mads[[1]])) & !is.null(rownames(object[[1]]))){
labelrows <- function(x, fns) {
rownames(x) <- fns
return(x)
}
fns <- NULL
mads <- foreach(x=mads, fns=lapply(object, rownames)) %do% labelrows(x=x, fns=fns)
}
}
return(mads)
}
| /R/mad-methods.R | no_license | rscharpf/MinimumDistance-release | R | false | false | 5,516 | r | ##---------------------------------------------------------------------------
##
## list
##
##---------------------------------------------------------------------------
#' @param pedigree an object of class \code{Pedigree}
#' @aliases mad2,list-method
#' @rdname mad2
setMethod("mad2", signature(object="list"),
function(object, byrow, pedigree, ...){
madList(object, byrow, pedigree, ...)
})
#' @aliases mad2,TrioSetList-method
#' @rdname mad2
setMethod("mad2", signature(object="TrioSetList"),
function(object, byrow, ...){
madTrioSetList(object, byrow)
})
madTrioSetList <- function(object, byrow){
madList(lrr(object), byrow=byrow, pedigree=pedigree(object))
}
#' @aliases mad2,matrix-method
#' @rdname mad2
setMethod("mad2", signature(object="matrix"),
function(object, byrow, pedigree, ...){
madList(list(object), byrow, pedigree, ...)
})
#' @aliases mad2,array-method
#' @rdname mad2
setMethod("mad2", signature(object="array"),
function(object, byrow, pedigree, ...){
madList(list(object), byrow, pedigree, ...)
})
madList <- function(object, byrow, pedigree, ...){
dims <- dim(object[[1]])
if(length(dims) != 2 && length(dims) != 3)
stop("Elements of list must be a matrix or an array")
isff <- is(object[[1]], "ff")
if(isff) lapply(object, open)
is.matrix <- ifelse(length(dims) == 2, TRUE, FALSE)
if(!byrow){ ## by column
if(is.matrix){
mads <- madFromMatrixList(object, byrow=FALSE)
} else { ## array
## for parallelization, it would be better to
## pass the ff object to the worker nodes,
## calculate the mad, and return the mad.
if(!isff){
F <- lapply(object, function(x) as.matrix(x[, , 1]))
M <- lapply(object, function(x) as.matrix(x[, , 2]))
O <- lapply(object, function(x) as.matrix(x[, , 3]))
mads.father <- madFromMatrixList(F, byrow=FALSE)
mads.mother <- madFromMatrixList(M, byrow=FALSE)
mads.offspr <- madFromMatrixList(O, byrow=FALSE)
if(!missing(pedigree)){
names(mads.father) <- fatherNames(pedigree)
names(mads.mother) <- motherNames(pedigree)
names(mads.offspr) <- offspringNames(pedigree)
mads <- data.frame(F=I(mads.father),
M=I(mads.mother),
O=I(mads.offspr))
} else {
mads <- cbind(mads.father, mads.mother, mads.offspr)
colnames(mads) <- c("F", "M", "O")
}
} else { ## big data
madForFFmatrix <- function(xlist, i, j){
## j=1 father
## j=2 mother
## j=3 offspring
res <- lapply(xlist, function(x, i, j){
m <- as.matrix(x[, i, j])
}, i=i, j=j)
res <- do.call("rbind", res)/100
mads <- apply(res, 2, mad, na.rm=TRUE)
mads
}
indexList <- splitIndicesByLength(seq_len(ncol(object[[1]])), 50)
i <- NULL
mads.father <- foreach(i=indexList, .combine="c") %do% madForFFmatrix(xlist=object, i=i, j=1)
mads.mother <- foreach(i=indexList, .combine="c") %do% madForFFmatrix(xlist=object, i=i, j=2)
mads.offspr <- foreach(i=indexList, .combine="c") %do% madForFFmatrix(xlist=object, i=i, j=3)
if(!missing(pedigree)){
names(mads.father) <- fatherNames(pedigree)
names(mads.mother) <- motherNames(pedigree)
names(mads.offspr) <- offspringNames(pedigree)
mads <- data.frame(F=I(mads.father),
M=I(mads.mother),
O=I(mads.offspr))
} else {
mads <- cbind(mads.father, mads.mother, mads.offspr)
colnames(mads) <- c("F", "M", "O")
}
}
}
} else {## by row
if(is.matrix){
mads <- madFromMatrixList(object, byrow=FALSE)
} else {
if(ncol(object[[1]]) > 2){
## for parallelization, it would be better to
## pass the ff object to the worker nodes,
## calculate the mad, and return the mad.
stopifnot(!missing(pedigree))
colindex <- which(!duplicated(fatherNames(pedigree)) & !duplicated(motherNames(pedigree)))
##mindex <- which(!duplicated(motherNames(pedigree)))
##F <- lapply(object, function(x) x[, findex, 1])
##M <- lapply(object, function(x) x[, mindex, 2])
O <- lapply(object, function(x) as.matrix(x[, colindex, 3]))
##mads.f <- madFromMatrixList(F, byrow=TRUE)
##mads.m <- madFromMatrixList(M, byrow=TRUE)
mads <- madFromMatrixList(O, byrow=TRUE)
names(mads) <- names(object)
##mads <- cbind(mads.f, mads.m, mads.o)
##colnames(mads) <- c("F", "M", "O")
} else {
warning("Too few samples to calculate across sample variance. Returning NULL.")
mads <- NULL
}
}
}
if(isff) lapply(object, close)
return(mads)
}
madFromMatrixList <- function(object, byrow=TRUE){
if(isPackageLoaded("ff")) pkgs <- c("ff", "MinimumDistance") else pkgs <- "MinimumDistance"
if(!byrow){
## this could be done more efficiently by following the
## apply example in the foreach documentation...
ilist <- splitIndicesByLength(seq_len(ncol(object[[1]])), 100)
i <- NULL
Xlist <- foreach(i=ilist, .packages=pkgs) %dopar% stackListByColIndex(object, i)
mads <- foreach(i = Xlist, .packages=pkgs) %dopar% apply(i/100, 2, mad, na.rm=TRUE)
mads <- unlist(mads)
names(mads) <- colnames(object[[1]])
mads
} else {
x <- NULL
mads <- foreach(x = object, .packages=pkgs) %do% rowMAD(x/100, na.rm=TRUE)
if( !is.null(dim(mads[[1]])) & !is.null(rownames(object[[1]]))){
labelrows <- function(x, fns) {
rownames(x) <- fns
return(x)
}
fns <- NULL
mads <- foreach(x=mads, fns=lapply(object, rownames)) %do% labelrows(x=x, fns=fns)
}
}
return(mads)
}
|
library(tidyverse)
voters <- read_csv("data/voters.csv")
# How do the responses on the survey vary with voting behavior?
voters %>%
___(turnout16_2016) %>%
___(`Elections don't matter` = mean(RIGGED_SYSTEM_1_2016 <= 2),
`Economy is getting better` = mean(econtrend_2016 == 1),
`Crime is very important` = mean(imiss_a_2016 == 2))
| /exercises/exc_03_03_2.R | permissive | snowdj/supervised-ML-case-studies-course | R | false | false | 369 | r | library(tidyverse)
voters <- read_csv("data/voters.csv")
# How do the responses on the survey vary with voting behavior?
voters %>%
___(turnout16_2016) %>%
___(`Elections don't matter` = mean(RIGGED_SYSTEM_1_2016 <= 2),
`Economy is getting better` = mean(econtrend_2016 == 1),
`Crime is very important` = mean(imiss_a_2016 == 2))
|
#' the response to a textDocument/signatureHelp Request
#'
#' If the symbol at the current position is a function, return its arguments
#' (as with [base::args()]).
#'
#' @keywords internal
signature_reply <- function(id, uri, workspace, document, point) {
if (!check_scope(uri, document, point)) {
return(Response$new(id, list(signatures = NULL)))
}
result <- document$detect_call(point)
SignatureInformation <- list()
activeSignature <- -1
if (nzchar(result$token)) {
sig <- workspace$get_signature(result$token, result$package,
exported_only = result$accessor != ":::")
logger$info("sig: ", sig)
if (!is.null(sig)) {
doc <- workspace$get_documentation(result$token, result$package, isf = TRUE)
doc_string <- ""
if (is.character(doc)) {
doc_string <- doc
} else if (is.list(doc)) {
doc_string <- doc$description
}
documentation <- list(kind = "markdown", value = doc_string)
SignatureInformation <- list(list(
label = sig,
documentation = documentation
))
activeSignature <- 0
}
}
Response$new(
id,
result = list(
signatures = SignatureInformation,
activeSignature = activeSignature
)
)
}
| /R/signature.R | no_license | hongooi73/languageserver | R | false | false | 1,405 | r | #' the response to a textDocument/signatureHelp Request
#'
#' If the symbol at the current position is a function, return its arguments
#' (as with [base::args()]).
#'
#' @keywords internal
signature_reply <- function(id, uri, workspace, document, point) {
if (!check_scope(uri, document, point)) {
return(Response$new(id, list(signatures = NULL)))
}
result <- document$detect_call(point)
SignatureInformation <- list()
activeSignature <- -1
if (nzchar(result$token)) {
sig <- workspace$get_signature(result$token, result$package,
exported_only = result$accessor != ":::")
logger$info("sig: ", sig)
if (!is.null(sig)) {
doc <- workspace$get_documentation(result$token, result$package, isf = TRUE)
doc_string <- ""
if (is.character(doc)) {
doc_string <- doc
} else if (is.list(doc)) {
doc_string <- doc$description
}
documentation <- list(kind = "markdown", value = doc_string)
SignatureInformation <- list(list(
label = sig,
documentation = documentation
))
activeSignature <- 0
}
}
Response$new(
id,
result = list(
signatures = SignatureInformation,
activeSignature = activeSignature
)
)
}
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinythemes)
fluidPage(theme = shinytheme("cerulean"),
# Application title
titlePanel("Old Faithful Geyser Data"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("bins",
"Number of bins:",
min = 1,
max = 50,
value = 30)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$distPlot <- renderPlot({
# generate bins based on input$bins from ui.R
x <- faithful[, 2]
bins <- seq(min(x), max(x), length.out = input$bins + 1)
# draw the histogram with the specified number of bins
hist(x, breaks = bins, col = 'darkgray', border = 'white')
})
}
# Run the application
shinyApp(ui = ui, server = server)
| /app.R | no_license | sahilsangani98/VizClean | R | false | false | 1,304 | r | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shinythemes)
fluidPage(theme = shinytheme("cerulean"),
# Application title
titlePanel("Old Faithful Geyser Data"),
# Sidebar with a slider input for number of bins
sidebarLayout(
sidebarPanel(
sliderInput("bins",
"Number of bins:",
min = 1,
max = 50,
value = 30)
),
# Show a plot of the generated distribution
mainPanel(
plotOutput("distPlot")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$distPlot <- renderPlot({
# generate bins based on input$bins from ui.R
x <- faithful[, 2]
bins <- seq(min(x), max(x), length.out = input$bins + 1)
# draw the histogram with the specified number of bins
hist(x, breaks = bins, col = 'darkgray', border = 'white')
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
#!/usr/bin/Rscript
library(plotrix)
library(dplR)
library(fields)
library(reshape2)
library(plyr)
# source("config_HMC")
dataDir = '~/Documents/projects/npp-stat-model/data'
site = 'NORTHROUND'
dvers = "v0.1"
mvers = "v0.1"
nPlots <- 4
ftPerMeter <- 3.2808399
lastYear <- 2015
firstYear <- 1940
years <- firstYear:lastYear
nT <- length(years)
rwFiles <- list.files(paste0("data/", site, '/', "rwl"))
rwFiles <- rwFiles[grep(".rwl$", rwFiles)]
rwData <- list()
for(fn in rwFiles) {
id <- gsub(".rw", "", fn)
rwData[[id]] <- t(read.tucson(file.path("data", site, "rwl", fn))) # rows are tree, cols are times
}
treeMeta = read.csv("data/NORTHROUND/NorthRoundPondAllPlots.csv", skip=3)
incr = ldply(rwData, rbind)
incr = incr[,c(".id", sort(colnames(incr)[2:ncol(incr)]))]
rownames(incr) = as.vector(unlist(lapply(rwData, rownames)))
incr[,1] = rownames(incr)
######################################################################################################################################
## make nimble data
######################################################################################################################################
if (!file.exists('data/dump')){
dir.create('data/dump')
}
incr_data = melt(incr)
colnames(incr_data) = c('id', 'year', 'incr')
incr_data$plot = as.numeric(substr(incr_data$id, 4, 4))
incr_data$TreeID = incr_data$id
incr_data$id = as.numeric(substr(incr_data$id, 5, 7))
incr_data$year = as.vector(incr_data$year)
tree_ids = unique(substr(incr_data$TreeID, 1, 7))
N_trees = length(tree_ids)
stat_ids = seq(1, N_trees)
incr_data$stat_id = stat_ids[match(substr(incr_data$TreeID, 1, 7), tree_ids)]
for (n in 1:nrow(incr_data)){
print(n)
incr_data$taxon[n] = as.vector(treeMeta$Species[which((as.numeric(substr(treeMeta$Site, 4, 4))==incr_data$plot[n])&
(treeMeta$Tree.Number == incr_data$id[n]))])
}
N_taxa = length(unique(incr_data$taxon))
taxaMatch = data.frame(species=sort(unique(incr_data$taxon)), number=seq(1, N_taxa))
taxon = aggregate(taxon~stat_id, incr_data, unique)[,2]
taxon = taxaMatch$number[match(taxon, taxaMatch$species)]
plot_id = aggregate(plot~stat_id, incr_data, unique)[,2]
##########################################################################################################################
## STAN DATA
##########################################################################################################################
# incr = incr[,-1]
year_end = max(as.numeric(incr_data$year), na.rm=TRUE)
year_start = min(as.numeric(incr_data$year), na.rm=TRUE)
N_years = year_end - year_start + 1
years = seq(year_start, year_end)
# order by tree and year
incr_data = incr_data[order(incr_data$stat_id, incr_data$year),]
incr_data = incr_data[which(!is.na(incr_data$incr)),]
incr_data$year = as.numeric(incr_data$year) - year_start + 1
N_inc = nrow(incr_data) # number of measurements
m2t = incr_data$year
m2tree = incr_data$stat_id
m2plot = incr_data$plot
m2taxon = taxaMatch$number[match(incr_data$taxon, taxaMatch$species)]
Xobs = incr_data$incr
Xobs[Xobs==0] = 0.0001
logXobs = log(Xobs)
year_idx = data.frame(year_start=as.numeric(aggregate(year~stat_id, data=incr_data, FUN=min, na.rm=TRUE)[,2]), year_end=as.numeric(aggregate(year~stat_id, incr_data, max)[,2]))
# year_idx[,2] = rep(N_years, nrow(year_idx))
# year_idx = year_idx - year_start + 1
# make pdbh
pdbh = aggregate(year~stat_id+plot+id, incr_data, max, na.rm=TRUE)
pdbh = pdbh[order(pdbh$stat_id),]
for (n in 1:nrow(pdbh)){
pdbh$dbh[n] = treeMeta$DBH[which((as.numeric(substr(treeMeta$Site, 4, 4))==pdbh$plot[n])&
(treeMeta$Tree.Number == pdbh$id[n]))]
pdbh$distance[n] = treeMeta[which((as.numeric(substr(treeMeta$Site, 4, 4))==pdbh$plot[n])&
(treeMeta$Tree.Number == pdbh$id[n])), 'Distance..base.']
}
N_pdbh = nrow(pdbh)
logPDobs = log(pdbh$dbh)
pdbh_tree_id = pdbh$stat_id
# logPDobs[is.na(logPDobs)] = -999
pdbh_year_id = pdbh$year
distance = pdbh$distance
idx_stack = data.frame(meas=numeric(0), tree_id=numeric(0), year=numeric(0))
n = 1
for (tree in 1:N_trees){
year = seq(year_idx[tree,1], year_idx[tree,2])
meas = seq(n, n+length(year)-1)
n = n + length(year)
idx_stack = rbind(idx_stack, data.frame(meas=meas, tree_id=rep(tree, length(year)), year=year))
}
idx_tree = which(!duplicated(idx_stack$tree_id))
idx_tree = data.frame(idx_tree, c(idx_tree[-1]-1, nrow(idx_stack)))
x2tree = idx_stack$tree_id
x2year = idx_stack$year
N_vals = nrow(idx_stack)
meas2x = vector(length=N_inc)
for (i in 1:N_inc) {
print(i)
id = incr_data$stat_id[i]
year = incr_data$year[i]
meas2x[i] = which((idx_stack$tree_id == id) & (idx_stack$year == year))
}
# pdbh$year = rep(N_years, nrow(pdbh))
pdbh2val = vector(length=N_pdbh)
for (i in 1:N_pdbh){
id = pdbh$stat_id[i]
year = pdbh$year[i]
print(i)
which((idx_stack$tree_id == id) & (idx_stack$year == year))
pdbh2val[i] = which((idx_stack$tree_id == id) & (idx_stack$year == year))
}
site_dir <- file.path('sites',site)
if (!file.exists(site_dir)){
dir.create(site_dir)
}
dir.create(file.path(site_dir,'data'))
dir.create(file.path(site_dir,'output'))
dir.create(file.path(site_dir, 'figures'))
saveRDS(list(N_trees=N_trees,
N_years=N_years,
N_vals=N_vals,
N_inc = N_inc,
logXobs=logXobs,
logPDobs=logPDobs,
year_idx=year_idx,
N_taxa=N_taxa,
pdbh_year=pdbh_year_id,
idx_tree =idx_tree,
pdbh2val=pdbh2val,
x2tree=x2tree,
x2year=x2year,
meas2x = meas2x,
m2taxon = m2taxon,
taxon = taxon,
taxaMatch=taxaMatch,
plot_id = plot_id,
years = years,
m2tree = m2tree,
m2t = m2t,
distance=distance),
file=paste0('sites/', site, '/data/tree_data_', site ,'_STAN_', dvers, '.RDS'))
| /r/build_data_NORTHROUND.R | no_license | andydawson/npp-stat-model | R | false | false | 6,036 | r | #!/usr/bin/Rscript
library(plotrix)
library(dplR)
library(fields)
library(reshape2)
library(plyr)
# source("config_HMC")
dataDir = '~/Documents/projects/npp-stat-model/data'
site = 'NORTHROUND'
dvers = "v0.1"
mvers = "v0.1"
nPlots <- 4
ftPerMeter <- 3.2808399
lastYear <- 2015
firstYear <- 1940
years <- firstYear:lastYear
nT <- length(years)
rwFiles <- list.files(paste0("data/", site, '/', "rwl"))
rwFiles <- rwFiles[grep(".rwl$", rwFiles)]
rwData <- list()
for(fn in rwFiles) {
id <- gsub(".rw", "", fn)
rwData[[id]] <- t(read.tucson(file.path("data", site, "rwl", fn))) # rows are tree, cols are times
}
treeMeta = read.csv("data/NORTHROUND/NorthRoundPondAllPlots.csv", skip=3)
incr = ldply(rwData, rbind)
incr = incr[,c(".id", sort(colnames(incr)[2:ncol(incr)]))]
rownames(incr) = as.vector(unlist(lapply(rwData, rownames)))
incr[,1] = rownames(incr)
######################################################################################################################################
## make nimble data
######################################################################################################################################
if (!file.exists('data/dump')){
dir.create('data/dump')
}
incr_data = melt(incr)
colnames(incr_data) = c('id', 'year', 'incr')
incr_data$plot = as.numeric(substr(incr_data$id, 4, 4))
incr_data$TreeID = incr_data$id
incr_data$id = as.numeric(substr(incr_data$id, 5, 7))
incr_data$year = as.vector(incr_data$year)
tree_ids = unique(substr(incr_data$TreeID, 1, 7))
N_trees = length(tree_ids)
stat_ids = seq(1, N_trees)
incr_data$stat_id = stat_ids[match(substr(incr_data$TreeID, 1, 7), tree_ids)]
for (n in 1:nrow(incr_data)){
print(n)
incr_data$taxon[n] = as.vector(treeMeta$Species[which((as.numeric(substr(treeMeta$Site, 4, 4))==incr_data$plot[n])&
(treeMeta$Tree.Number == incr_data$id[n]))])
}
N_taxa = length(unique(incr_data$taxon))
taxaMatch = data.frame(species=sort(unique(incr_data$taxon)), number=seq(1, N_taxa))
taxon = aggregate(taxon~stat_id, incr_data, unique)[,2]
taxon = taxaMatch$number[match(taxon, taxaMatch$species)]
plot_id = aggregate(plot~stat_id, incr_data, unique)[,2]
##########################################################################################################################
## STAN DATA
##########################################################################################################################
# incr = incr[,-1]
year_end = max(as.numeric(incr_data$year), na.rm=TRUE)
year_start = min(as.numeric(incr_data$year), na.rm=TRUE)
N_years = year_end - year_start + 1
years = seq(year_start, year_end)
# order by tree and year
incr_data = incr_data[order(incr_data$stat_id, incr_data$year),]
incr_data = incr_data[which(!is.na(incr_data$incr)),]
incr_data$year = as.numeric(incr_data$year) - year_start + 1
N_inc = nrow(incr_data) # number of measurements
m2t = incr_data$year
m2tree = incr_data$stat_id
m2plot = incr_data$plot
m2taxon = taxaMatch$number[match(incr_data$taxon, taxaMatch$species)]
Xobs = incr_data$incr
Xobs[Xobs==0] = 0.0001
logXobs = log(Xobs)
year_idx = data.frame(year_start=as.numeric(aggregate(year~stat_id, data=incr_data, FUN=min, na.rm=TRUE)[,2]), year_end=as.numeric(aggregate(year~stat_id, incr_data, max)[,2]))
# year_idx[,2] = rep(N_years, nrow(year_idx))
# year_idx = year_idx - year_start + 1
# make pdbh
pdbh = aggregate(year~stat_id+plot+id, incr_data, max, na.rm=TRUE)
pdbh = pdbh[order(pdbh$stat_id),]
for (n in 1:nrow(pdbh)){
pdbh$dbh[n] = treeMeta$DBH[which((as.numeric(substr(treeMeta$Site, 4, 4))==pdbh$plot[n])&
(treeMeta$Tree.Number == pdbh$id[n]))]
pdbh$distance[n] = treeMeta[which((as.numeric(substr(treeMeta$Site, 4, 4))==pdbh$plot[n])&
(treeMeta$Tree.Number == pdbh$id[n])), 'Distance..base.']
}
N_pdbh = nrow(pdbh)
logPDobs = log(pdbh$dbh)
pdbh_tree_id = pdbh$stat_id
# logPDobs[is.na(logPDobs)] = -999
pdbh_year_id = pdbh$year
distance = pdbh$distance
idx_stack = data.frame(meas=numeric(0), tree_id=numeric(0), year=numeric(0))
n = 1
for (tree in 1:N_trees){
year = seq(year_idx[tree,1], year_idx[tree,2])
meas = seq(n, n+length(year)-1)
n = n + length(year)
idx_stack = rbind(idx_stack, data.frame(meas=meas, tree_id=rep(tree, length(year)), year=year))
}
idx_tree = which(!duplicated(idx_stack$tree_id))
idx_tree = data.frame(idx_tree, c(idx_tree[-1]-1, nrow(idx_stack)))
x2tree = idx_stack$tree_id
x2year = idx_stack$year
N_vals = nrow(idx_stack)
meas2x = vector(length=N_inc)
for (i in 1:N_inc) {
print(i)
id = incr_data$stat_id[i]
year = incr_data$year[i]
meas2x[i] = which((idx_stack$tree_id == id) & (idx_stack$year == year))
}
# pdbh$year = rep(N_years, nrow(pdbh))
pdbh2val = vector(length=N_pdbh)
for (i in 1:N_pdbh){
id = pdbh$stat_id[i]
year = pdbh$year[i]
print(i)
which((idx_stack$tree_id == id) & (idx_stack$year == year))
pdbh2val[i] = which((idx_stack$tree_id == id) & (idx_stack$year == year))
}
site_dir <- file.path('sites',site)
if (!file.exists(site_dir)){
dir.create(site_dir)
}
dir.create(file.path(site_dir,'data'))
dir.create(file.path(site_dir,'output'))
dir.create(file.path(site_dir, 'figures'))
saveRDS(list(N_trees=N_trees,
N_years=N_years,
N_vals=N_vals,
N_inc = N_inc,
logXobs=logXobs,
logPDobs=logPDobs,
year_idx=year_idx,
N_taxa=N_taxa,
pdbh_year=pdbh_year_id,
idx_tree =idx_tree,
pdbh2val=pdbh2val,
x2tree=x2tree,
x2year=x2year,
meas2x = meas2x,
m2taxon = m2taxon,
taxon = taxon,
taxaMatch=taxaMatch,
plot_id = plot_id,
years = years,
m2tree = m2tree,
m2t = m2t,
distance=distance),
file=paste0('sites/', site, '/data/tree_data_', site ,'_STAN_', dvers, '.RDS'))
|
makePhen <- function(allphenfile, idfile, row, phenfile, filesplits=1000)
{
split <- sprintf("%03d", floor(row / filesplits))
newrow <- row %% filesplits
newfile <- paste(allphenfile, split, sep=".")
print(c(newfile, newrow))
cmd <- paste("head -n ", newrow, " ", newfile, " | tail -n 1 | tr ' ' '\n' > ", phenfile, ".temp", sep="")
system(cmd)
cmd <- paste("paste -d ' ' ", idfile, " ", phenfile, ".temp > ", phenfile, sep="")
system(cmd)
# system(paste("rm ", phenfile, ".temp", sep=""))
}
getCisChr <- function(probeinfo, cpg)
{
return(probeinfo$CHR[probeinfo$TargetID == cpg][1])
}
runGcta <- function(cpg, grmroot, phenfile, outfile, flags="")
{
chr <- getCisChr(probeinfo, cpg)
mgrmfile <- paste(grmroot, chr, ".mgrm", sep="")
cmd <- paste("gcta64 ", flags, " --mgrm ", mgrmfile, " --reml --reml-no-lrt --pheno ", phenfile, " --reml-pred-rand --out ", outfile, sep="")
system(cmd)
}
readPreds <- function(rootname)
{
filename <- paste(rootname, ".indi.blp", sep="")
if(file.exists(filename))
{
a <- read.table(filename, colClass=c("character", "character", "numeric", "numeric", "numeric", "numeric"))
a$probe <- basename(rootname)
return(a)
}
}
readHsqs <- function(rootname)
{
filename <- paste(rootname, ".hsq", sep="")
if(file.exists(filename))
{
a <- read.table(filename, he=T, fill=TRUE)
a$probe <- basename(rootname)
return(a)
} else {
return(NULL)
}
}
removeFiles <- function(rootname)
{
a <- paste(rootname, c(".phen", ".hsq", ".indi.blp", ".log", ".grm.id", ".grm.bin", ".grm.N.bin", ".snplist", ".mgrm"), sep="")
# unlink(a)
}
arguments <- commandArgs(T)
jid <- as.numeric(arguments[1])
nrun <- as.numeric(arguments[2])
savefile1 <- paste("~/repo/methylation_residuals/res_ct_chr/results_ct_hsq", jid, ".RData", sep="")
savefile2 <- paste("~/repo/methylation_residuals/res_ct_chr/results_ct_pred", jid, ".RData", sep="")
if(all(file.exists(c(savefile1, savefile2)))) q()
load("~/repo/methylation_residuals/data/parameters.RData")
win <- 1000000
first <- (jid - 1) * nrun + 1
last <- min(nrow(params), jid * nrun)
print(c(first, last))
preds <- list()
hsqs <- list()
nom <- list()
j <- 1
for(i in first:last)
{
cat("Running param", i, "\n")
outfile <- paste("~/repo/methylation_residuals/res_ct_chr/", params$timepoint[i], "/", params$cpg[i], sep="")
if(!file.exists(paste(outfile, ".hsq", sep="")) & params$cpg[i] %in% probeinfo$TargetID)
{
grmroot <- paste("~/repo/methylation_residuals/grms/", params$group[i], sep="")
allphenfile <- paste("~/repo/methylation_residuals/data/splitfiles/", params$timepoint[i], "norm.phen", sep="")
idfile <- paste("~/repo/methylation_residuals/data/", params$timepoint[i], ".id", sep="")
cisgrmfile <- outfile
phenfile <- paste(outfile, ".phen", sep="")
makePhen(allphenfile, idfile, params$index[i], phenfile)
runGcta(params$cpg[i], grmroot, phenfile, outfile)
nom[[j]] <- c(params$cpg[i], params$timepoint[i])
preds[[j]] <- readPreds(outfile)
hsqs[[j]] <- readHsqs(outfile)
j <- j + 1
removeFiles(outfile)
}
}
save(nom, hsqs, file=savefile1)
save(nom, preds, file=savefile2)
| /run_gcta_ct_chr.R | no_license | explodecomputer/methylation_residuals | R | false | false | 3,115 | r | makePhen <- function(allphenfile, idfile, row, phenfile, filesplits=1000)
{
split <- sprintf("%03d", floor(row / filesplits))
newrow <- row %% filesplits
newfile <- paste(allphenfile, split, sep=".")
print(c(newfile, newrow))
cmd <- paste("head -n ", newrow, " ", newfile, " | tail -n 1 | tr ' ' '\n' > ", phenfile, ".temp", sep="")
system(cmd)
cmd <- paste("paste -d ' ' ", idfile, " ", phenfile, ".temp > ", phenfile, sep="")
system(cmd)
# system(paste("rm ", phenfile, ".temp", sep=""))
}
getCisChr <- function(probeinfo, cpg)
{
return(probeinfo$CHR[probeinfo$TargetID == cpg][1])
}
runGcta <- function(cpg, grmroot, phenfile, outfile, flags="")
{
chr <- getCisChr(probeinfo, cpg)
mgrmfile <- paste(grmroot, chr, ".mgrm", sep="")
cmd <- paste("gcta64 ", flags, " --mgrm ", mgrmfile, " --reml --reml-no-lrt --pheno ", phenfile, " --reml-pred-rand --out ", outfile, sep="")
system(cmd)
}
readPreds <- function(rootname)
{
filename <- paste(rootname, ".indi.blp", sep="")
if(file.exists(filename))
{
a <- read.table(filename, colClass=c("character", "character", "numeric", "numeric", "numeric", "numeric"))
a$probe <- basename(rootname)
return(a)
}
}
readHsqs <- function(rootname)
{
filename <- paste(rootname, ".hsq", sep="")
if(file.exists(filename))
{
a <- read.table(filename, he=T, fill=TRUE)
a$probe <- basename(rootname)
return(a)
} else {
return(NULL)
}
}
removeFiles <- function(rootname)
{
a <- paste(rootname, c(".phen", ".hsq", ".indi.blp", ".log", ".grm.id", ".grm.bin", ".grm.N.bin", ".snplist", ".mgrm"), sep="")
# unlink(a)
}
arguments <- commandArgs(T)
jid <- as.numeric(arguments[1])
nrun <- as.numeric(arguments[2])
savefile1 <- paste("~/repo/methylation_residuals/res_ct_chr/results_ct_hsq", jid, ".RData", sep="")
savefile2 <- paste("~/repo/methylation_residuals/res_ct_chr/results_ct_pred", jid, ".RData", sep="")
if(all(file.exists(c(savefile1, savefile2)))) q()
load("~/repo/methylation_residuals/data/parameters.RData")
win <- 1000000
first <- (jid - 1) * nrun + 1
last <- min(nrow(params), jid * nrun)
print(c(first, last))
preds <- list()
hsqs <- list()
nom <- list()
j <- 1
for(i in first:last)
{
cat("Running param", i, "\n")
outfile <- paste("~/repo/methylation_residuals/res_ct_chr/", params$timepoint[i], "/", params$cpg[i], sep="")
if(!file.exists(paste(outfile, ".hsq", sep="")) & params$cpg[i] %in% probeinfo$TargetID)
{
grmroot <- paste("~/repo/methylation_residuals/grms/", params$group[i], sep="")
allphenfile <- paste("~/repo/methylation_residuals/data/splitfiles/", params$timepoint[i], "norm.phen", sep="")
idfile <- paste("~/repo/methylation_residuals/data/", params$timepoint[i], ".id", sep="")
cisgrmfile <- outfile
phenfile <- paste(outfile, ".phen", sep="")
makePhen(allphenfile, idfile, params$index[i], phenfile)
runGcta(params$cpg[i], grmroot, phenfile, outfile)
nom[[j]] <- c(params$cpg[i], params$timepoint[i])
preds[[j]] <- readPreds(outfile)
hsqs[[j]] <- readHsqs(outfile)
j <- j + 1
removeFiles(outfile)
}
}
save(nom, hsqs, file=savefile1)
save(nom, preds, file=savefile2)
|
#' Returns draws for random parameters in a latent class model model
#'
#' Returns draws (unconditionals) for random parameters in model, including interactions with deterministic covariates
#'
#' This functions is only meant for use with continuous distributions
#' @param model Model object. Estimated model object as returned by function \link{apollo_estimate}.
#' @param apollo_probabilities Function. Returns probabilities of the model to be estimated. Must receive three arguments:
#' \itemize{
#' \item apollo_beta: Named numeric vector. Names and values of model parameters.
#' \item apollo_inputs: List containing options of the model. See \link{apollo_validateInputs}.
#' \item functionality: Character. Can be either "estimate" (default), "prediction", "validate", "conditionals", "zero_LL", or "raw".
#' }
#' @param apollo_inputs List grouping most common inputs. Created by function \link{apollo_validateInputs}.
#' @return List of object, one per random coefficient.
#' With inter-individual draws only, this will be a matrix, with one row per individual, and one column per draw.
#' With intra-individual draws, this will be a three-dimensional array, with one row per observation, inter-individual draws in the second dimension, and intra-individual draws in the third dimension.
#' @export
apollo_unconditionals <- function(model, apollo_probabilities, apollo_inputs){
if(is.null(apollo_inputs$silent)) silent = FALSE else silent = apollo_inputs$silent
apollo_beta = model$estimate
apollo_fixed = model$apollo_fixed
#if(!silent) apollo_print("Updating inputs...")
#apollo_inputs <- apollo_validateInputs(silent=TRUE, recycle=TRUE)
### Warn the user in case elements in apollo_inputs are different from those in the global environment
apollo_compareInputs(apollo_inputs)
apollo_control = apollo_inputs[["apollo_control"]]
database = apollo_inputs[["database"]]
draws = apollo_inputs[["draws"]]
apollo_randCoeff = apollo_inputs[["apollo_randCoeff"]]
apollo_draws = apollo_inputs[["apollo_draws"]]
apollo_lcPars = apollo_inputs[["apollo_lcPars"]]
apollo_checkArguments(apollo_probabilities,apollo_randCoeff,apollo_lcPars)
if(is.null(apollo_control$HB)) apollo_control$HB=FALSE
if(apollo_control$HB) stop("The function \"apollo_unconditionals\" is not applicables for models estimated using HB!")
if(is.function(apollo_inputs$apollo_lcPars)) stop("The function \"apollo_unconditionals\" is not applicables for models containing latent class components!")
### Validate input
if(!apollo_control$mixing) stop("Sample level random parameters can only be produced for mixture models!")
if(anyNA(draws)) stop("Random draws have not been specified despite setting mixing=TRUE")
### Run apollo_randCoeff
env <- list2env( c(as.list(apollo_beta), apollo_inputs$database, apollo_inputs$draws),
hash=TRUE, parent=parent.frame() )
environment(apollo_randCoeff) <- env
randcoeff <- apollo_randCoeff(apollo_beta, apollo_inputs)
if(any(sapply(randcoeff, is.function))){
randcoeff = lapply(randcoeff,
function(f) if(is.function(f)){ environment(f) <- env; return(f()) } else { return(f) })
}
if(apollo_draws$intraNDraws==0){
nObsPerIndiv <- as.vector(table(database[,apollo_control$indivID]))
nIndiv <- length(nObsPerIndiv)
firstRows <- rep(1, nIndiv)
for(i in 2:nIndiv) firstRows[i] <- firstRows[i-1] + nObsPerIndiv[i-1]
j=1
for(j in 1:length(randcoeff)){
randcoeff[[j]]=randcoeff[[j]][firstRows,]
}
}
if(!silent) apollo_print("Unconditional distributions computed")
return(randcoeff)
} | /apollo/R/apollo_unconditionals.R | no_license | akhikolla/TestedPackages-NoIssues | R | false | false | 3,839 | r | #' Returns draws for random parameters in a latent class model model
#'
#' Returns draws (unconditionals) for random parameters in model, including interactions with deterministic covariates
#'
#' This functions is only meant for use with continuous distributions
#' @param model Model object. Estimated model object as returned by function \link{apollo_estimate}.
#' @param apollo_probabilities Function. Returns probabilities of the model to be estimated. Must receive three arguments:
#' \itemize{
#' \item apollo_beta: Named numeric vector. Names and values of model parameters.
#' \item apollo_inputs: List containing options of the model. See \link{apollo_validateInputs}.
#' \item functionality: Character. Can be either "estimate" (default), "prediction", "validate", "conditionals", "zero_LL", or "raw".
#' }
#' @param apollo_inputs List grouping most common inputs. Created by function \link{apollo_validateInputs}.
#' @return List of object, one per random coefficient.
#' With inter-individual draws only, this will be a matrix, with one row per individual, and one column per draw.
#' With intra-individual draws, this will be a three-dimensional array, with one row per observation, inter-individual draws in the second dimension, and intra-individual draws in the third dimension.
#' @export
apollo_unconditionals <- function(model, apollo_probabilities, apollo_inputs){
if(is.null(apollo_inputs$silent)) silent = FALSE else silent = apollo_inputs$silent
apollo_beta = model$estimate
apollo_fixed = model$apollo_fixed
#if(!silent) apollo_print("Updating inputs...")
#apollo_inputs <- apollo_validateInputs(silent=TRUE, recycle=TRUE)
### Warn the user in case elements in apollo_inputs are different from those in the global environment
apollo_compareInputs(apollo_inputs)
apollo_control = apollo_inputs[["apollo_control"]]
database = apollo_inputs[["database"]]
draws = apollo_inputs[["draws"]]
apollo_randCoeff = apollo_inputs[["apollo_randCoeff"]]
apollo_draws = apollo_inputs[["apollo_draws"]]
apollo_lcPars = apollo_inputs[["apollo_lcPars"]]
apollo_checkArguments(apollo_probabilities,apollo_randCoeff,apollo_lcPars)
if(is.null(apollo_control$HB)) apollo_control$HB=FALSE
if(apollo_control$HB) stop("The function \"apollo_unconditionals\" is not applicables for models estimated using HB!")
if(is.function(apollo_inputs$apollo_lcPars)) stop("The function \"apollo_unconditionals\" is not applicables for models containing latent class components!")
### Validate input
if(!apollo_control$mixing) stop("Sample level random parameters can only be produced for mixture models!")
if(anyNA(draws)) stop("Random draws have not been specified despite setting mixing=TRUE")
### Run apollo_randCoeff
env <- list2env( c(as.list(apollo_beta), apollo_inputs$database, apollo_inputs$draws),
hash=TRUE, parent=parent.frame() )
environment(apollo_randCoeff) <- env
randcoeff <- apollo_randCoeff(apollo_beta, apollo_inputs)
if(any(sapply(randcoeff, is.function))){
randcoeff = lapply(randcoeff,
function(f) if(is.function(f)){ environment(f) <- env; return(f()) } else { return(f) })
}
if(apollo_draws$intraNDraws==0){
nObsPerIndiv <- as.vector(table(database[,apollo_control$indivID]))
nIndiv <- length(nObsPerIndiv)
firstRows <- rep(1, nIndiv)
for(i in 2:nIndiv) firstRows[i] <- firstRows[i-1] + nObsPerIndiv[i-1]
j=1
for(j in 1:length(randcoeff)){
randcoeff[[j]]=randcoeff[[j]][firstRows,]
}
}
if(!silent) apollo_print("Unconditional distributions computed")
return(randcoeff)
} |
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/regressions.R
\name{reg}
\alias{reg}
\title{Run regression}
\usage{
reg(method = "lm", depvar = "1", covars = list(c("cyl", "disp"), c("drat",
"vs", "am")), data, stepwise = FALSE, stargazer = FALSE, ...)
}
\arguments{
\item{...}{}
\item{...}{Other parameters passed into the regression function}
}
\description{
...
}
\section{Integration into ggplot2}{
You can simply add to a plot created by \code{run2elev} by using the usual \code{ggplot2}-syntax.
}
\examples{
\dontrun{
reg(method = "lm",
depvar = "mpg",
covars = list(c("cyl","disp"),
c("drat","vs","am")),
data = "mtcars",
stepwise = T,
stargazer = T)
}
}
| /man/reg.Rd | permissive | schliebs/rforceone | R | false | true | 735 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/regressions.R
\name{reg}
\alias{reg}
\title{Run regression}
\usage{
reg(method = "lm", depvar = "1", covars = list(c("cyl", "disp"), c("drat",
"vs", "am")), data, stepwise = FALSE, stargazer = FALSE, ...)
}
\arguments{
\item{...}{}
\item{...}{Other parameters passed into the regression function}
}
\description{
...
}
\section{Integration into ggplot2}{
You can simply add to a plot created by \code{run2elev} by using the usual \code{ggplot2}-syntax.
}
\examples{
\dontrun{
reg(method = "lm",
depvar = "mpg",
covars = list(c("cyl","disp"),
c("drat","vs","am")),
data = "mtcars",
stepwise = T,
stargazer = T)
}
}
|
convert.header <- function(header1, header2){
names(header1) <- toupper(header1)
names(header2) <- toupper(header2)
hd1 <- names(header1)
hd2 <- names(header2)
if(any(duplicated(hd1))){
return(header1)
}
id <- which(hd1 %in% hd2)
if(length(id) == 0){
return(header1)
}
hd <- hd1[id]
header1[id] <- header2[hd]
header1
}
| /R/convert.header.R | no_license | zhangh12/SCAT | R | false | false | 358 | r |
convert.header <- function(header1, header2){
names(header1) <- toupper(header1)
names(header2) <- toupper(header2)
hd1 <- names(header1)
hd2 <- names(header2)
if(any(duplicated(hd1))){
return(header1)
}
id <- which(hd1 %in% hd2)
if(length(id) == 0){
return(header1)
}
hd <- hd1[id]
header1[id] <- header2[hd]
header1
}
|
## Creat a object stores a matrix and cache its inverse
## This script contains two functions "makeCacheMatrix" and "cacheSolve"
## "makeCacheMatrix" creats a matrix which cache its inverse
## "cacheSolve" computes the invesreof matrix created by "makeCacheMatrix"
## FUnciton "makeCacheMatrix" creats a matrix which can cache its inverse, which is a list contaning
## a funciton to
## 1 set the value of the matrix
## 2 get the value of the matrix
## 3 set the value of the inverse of the matrix
## 4 get the value of the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inverseMatrix<-NULL
set<-function(y){
x<<-y
inverseMatrix<<-NULL
}
get<-function() x
setinverse<-function(inverse) inverseMatrix<<-inverse
getinverse<-function() inverseMatrix
list(set=set,get=get,setinverse=setinverse,getinverse=getinverse)
}
## Function "cachesolve" computes the inverse of the special "matrix" returned
## by function "makeCacheMatrix" above. If the inverse has already been calculated
## (and the matrix has not changed), then "cachesolve" should retrieve the inverse
## from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverseMatrix<-x$getinverse()
if(!is.null(inverseMatrix)){
message("getting cached data")
retrun(inverseMatrix)
}
data<-x$get()
inverseMatrix<-solve(data,...)
x$setInverse(inverseMatrix)
inverseMatrix
}
| /cachematrix.R | no_license | jiajun6/ProgrammingAssignment2 | R | false | false | 1,437 | r | ## Creat a object stores a matrix and cache its inverse
## This script contains two functions "makeCacheMatrix" and "cacheSolve"
## "makeCacheMatrix" creats a matrix which cache its inverse
## "cacheSolve" computes the invesreof matrix created by "makeCacheMatrix"
## FUnciton "makeCacheMatrix" creats a matrix which can cache its inverse, which is a list contaning
## a funciton to
## 1 set the value of the matrix
## 2 get the value of the matrix
## 3 set the value of the inverse of the matrix
## 4 get the value of the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inverseMatrix<-NULL
set<-function(y){
x<<-y
inverseMatrix<<-NULL
}
get<-function() x
setinverse<-function(inverse) inverseMatrix<<-inverse
getinverse<-function() inverseMatrix
list(set=set,get=get,setinverse=setinverse,getinverse=getinverse)
}
## Function "cachesolve" computes the inverse of the special "matrix" returned
## by function "makeCacheMatrix" above. If the inverse has already been calculated
## (and the matrix has not changed), then "cachesolve" should retrieve the inverse
## from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverseMatrix<-x$getinverse()
if(!is.null(inverseMatrix)){
message("getting cached data")
retrun(inverseMatrix)
}
data<-x$get()
inverseMatrix<-solve(data,...)
x$setInverse(inverseMatrix)
inverseMatrix
}
|
# Generate an MA plot that compares the expected isoform-wise quantification
# base on short reads to the actual values measured in PacBio.
# Since we are looking at GENCODE-annotated transcripts only, no filtering
# of the PacBio data is needed.
main <-function() {
set.seed(100)
load_packages()
opt <- parse_options()
# Get colors
if (opt$color_scheme == "red") {
fill_color <- "red2"
} else if (opt$color_scheme == "blue") {
fill_color <- "navy"
} else if (opt$color_scheme == "green") {
fill_color <- "#009E73"
}
# Get the names of the first and second dataset that we will be working with
data_names <- str_split(opt$datasets, ",")[[1]]
dataset1 <- data_names[1]
dataset2 <- data_names[2]
# Get the transcripts expressed in the Illumina data from Kallisto
illumina_table <- filter_kallisto_illumina_transcripts(opt$illumina_kallisto)
illumina_table <- illumina_table[,c(1,ncol(illumina_table))]
colnames(illumina_table) <- c("annot_transcript_name", "illumina_TPM")
# Read PacBio abundance file
pb_abundance <- as.data.frame(read_delim(opt$infile, "\t", escape_double = FALSE,
col_names = TRUE, trim_ws = TRUE, na = "NA"))
# Known transcripts only
pb_abundance <- subset(pb_abundance, transcript_status == "KNOWN")
pb_abundance <- pb_abundance[, c("annot_transcript_name", dataset1, dataset2)]
# Sum together the PacBio abundances
pb_abundance$both_pacbio <- pb_abundance[,dataset1] + pb_abundance[,dataset2]
total_pacbio_reads <- sum(pb_abundance[,dataset1]) + sum(pb_abundance[,dataset2])
# Merge PacBio with Illumina on annot_transcript_name
merged_illumina_pacbio <- merge(illumina_table, pb_abundance,
by = "annot_transcript_name",
all.x = T, all.y = T)
merged_illumina_pacbio[is.na(merged_illumina_pacbio)] <- 0
print(nrow(merged_illumina_pacbio))
final_table <- merged_illumina_pacbio[, c("both_pacbio", "illumina_TPM")]z
# Compute the p-values
final_table$illumina_TPM <- round(final_table$illumina_TPM)
total_illumina <- sum(final_table$illumina_TPM)
final_table[, c("pvals", "expected")] <- t(apply(final_table, 1,
run_chisquare_test,
total_pacbio_reads,
total_illumina))
final_table$transcript_name <- merged_illumina_pacbio$annot_transcript_name
final_table$pvals <- p.adjust(final_table$pvals, method = "bonferroni")
data <- plot_MA_observed_expected(final_table, fill_color, opt$outdir)
printable <- subset(data, status == "Bonf. p-value <= 0.01")
printable <- printable[,c("transcript_name", "illumina_TPM", "expected",
"observed", "pvals", "A", "M")]
colnames(printable) <- c("transcript_name", "illumina_TPM", "expected_TPM",
"observed_pacbio_TPM", "corrected_p-value", "A", "M")
write.table(printable, paste(opt$outdir, "/MA_plot_gene_table.tsv", sep=""),
row.names=F, col.names=T, quote=F, sep="\t")
}
plot_MA_observed_expected <- function(data, fillcolor, outdir) {
# Perform quantile normalization
counts <- as.matrix(data[, c("both_pacbio", "expected")])
counts <- as.data.frame(normalize.quantiles(counts))
data$observed <- counts[,1] + 1
data$expected <- counts[,2] + 1
data$M <- log(data$observed, base=2) - log(data$expected, base=2)
data$A <- 0.5*(log(data$observed, base=2) + log(data$expected, base=2))
data$fold_change <- (data$observed - data$expected) / data$expected
data$status <- as.factor(ifelse(abs(data$M) >= 1 & data$pvals <= 0.01, "Bonf. p-value <= 0.01", "Bonf. p-value > 0.01"))
print(nrow(data))
print(nrow(subset(data, status == "Bonf. p-value <= 0.01")))
fname <- paste(outdir, "/transcript_obs_expected_MA_plot.png", sep="")
xlabel <- "0.5*(log2(observed*expected PacBio counts))"
ylabel <- "log2(ratio of observed to expected PacBio counts)"
png(filename = fname,
width = 2500, height = 2500, units = "px",
bg = "white", res = 300)
p = ggplot(data, aes(x = A, y = M, color = status)) +
geom_jitter(alpha = 0.4, size = 2.5) +
xlab(xlabel) + ylab(ylabel) + theme_bw() +
scale_color_manual(values = c("orange", fillcolor),
labels = c("Significant", "Not significant")) +
#labels = c("Bonf. p-value <= 0.01 \nor log2 fold change > 1", "Bonf. p-value > 0.01")) +
guides(colour = guide_legend(override.aes = list(alpha=1, size=2.5))) +
theme(axis.text.x = element_text(color="black", size=20),
axis.text.y = element_text(color="black", size=20),
axis.title.x = element_text(color="black", size=16),
axis.title.y = element_text(color="black", size=16)) +
theme(legend.position=c(0.8,0.2),
legend.title = element_blank(),
legend.background = element_rect(fill="white", color = "black"),
legend.key = element_rect(fill="transparent"),
legend.text = element_text(colour = 'black', size = 16))
print(p)
dev.off()
return(data)
}
run_chisquare_test <- function(reads_vector, total_pacbio, total_illumina) {
reads_pacbio <- reads_vector[1]
reads_illumina <- reads_vector[2]
M <- as.table(rbind(c(reads_pacbio, total_pacbio - reads_pacbio),
c(reads_illumina, total_illumina - reads_illumina)))
dimnames(M) <- list(platform = c("PacBio", "Illumina"),
transcript = c("query_transcript", "not_query_transcript"))
Xsq <- chisq.test(M)
return(c(Xsq$p.value, Xsq$expected[1,1]))
}
load_packages <- function() {
suppressPackageStartupMessages(library("ggplot2"))
suppressPackageStartupMessages(library("plyr"))
suppressPackageStartupMessages(library("Hmisc"))
suppressPackageStartupMessages(library("optparse"))
suppressPackageStartupMessages(library("readr"))
suppressPackageStartupMessages(library("reshape"))
suppressPackageStartupMessages(library("stringr"))
suppressPackageStartupMessages(library("data.table"))
suppressPackageStartupMessages(library("preprocessCore"))
# Load my custom functions
#source("/pub/dwyman/TALON-paper-2019/analysis_scripts/filter_kallisto_illumina_genes.R")
source("/pub/dwyman/TALON-paper-2019/analysis_scripts/filter_kallisto_illumina_transcripts.R")
return
}
parse_options <- function() {
option_list <- list(
make_option(c("--f"), action = "store", dest = "infile",
default = NULL, help = "TALON abundance file (filtered)"),
make_option(c("--datasets"), action = "store", dest = "datasets",
default = NULL, help = "Comma-delimited list of two dataset names to include in the analysis."),
make_option(c("--ik"), action = "store", dest = "illumina_kallisto",
default = NULL, help = "Illumina Kallisto file."),
make_option(c("--color"), action = "store", dest = "color_scheme",
default = NULL, help = "blue, red, or green"),
make_option(c("-o","--outdir"), action = "store", dest = "outdir",
default = NULL, help = "Output directory for plots and outfiles"))
opt <- parse_args(OptionParser(option_list=option_list))
return(opt)
}
main()
| /analysis_scripts/MA_plot_for_transcripts.R | permissive | dewyman/TALON-paper-2019 | R | false | false | 7,708 | r | # Generate an MA plot that compares the expected isoform-wise quantification
# base on short reads to the actual values measured in PacBio.
# Since we are looking at GENCODE-annotated transcripts only, no filtering
# of the PacBio data is needed.
main <-function() {
set.seed(100)
load_packages()
opt <- parse_options()
# Get colors
if (opt$color_scheme == "red") {
fill_color <- "red2"
} else if (opt$color_scheme == "blue") {
fill_color <- "navy"
} else if (opt$color_scheme == "green") {
fill_color <- "#009E73"
}
# Get the names of the first and second dataset that we will be working with
data_names <- str_split(opt$datasets, ",")[[1]]
dataset1 <- data_names[1]
dataset2 <- data_names[2]
# Get the transcripts expressed in the Illumina data from Kallisto
illumina_table <- filter_kallisto_illumina_transcripts(opt$illumina_kallisto)
illumina_table <- illumina_table[,c(1,ncol(illumina_table))]
colnames(illumina_table) <- c("annot_transcript_name", "illumina_TPM")
# Read PacBio abundance file
pb_abundance <- as.data.frame(read_delim(opt$infile, "\t", escape_double = FALSE,
col_names = TRUE, trim_ws = TRUE, na = "NA"))
# Known transcripts only
pb_abundance <- subset(pb_abundance, transcript_status == "KNOWN")
pb_abundance <- pb_abundance[, c("annot_transcript_name", dataset1, dataset2)]
# Sum together the PacBio abundances
pb_abundance$both_pacbio <- pb_abundance[,dataset1] + pb_abundance[,dataset2]
total_pacbio_reads <- sum(pb_abundance[,dataset1]) + sum(pb_abundance[,dataset2])
# Merge PacBio with Illumina on annot_transcript_name
merged_illumina_pacbio <- merge(illumina_table, pb_abundance,
by = "annot_transcript_name",
all.x = T, all.y = T)
merged_illumina_pacbio[is.na(merged_illumina_pacbio)] <- 0
print(nrow(merged_illumina_pacbio))
final_table <- merged_illumina_pacbio[, c("both_pacbio", "illumina_TPM")]z
# Compute the p-values
final_table$illumina_TPM <- round(final_table$illumina_TPM)
total_illumina <- sum(final_table$illumina_TPM)
final_table[, c("pvals", "expected")] <- t(apply(final_table, 1,
run_chisquare_test,
total_pacbio_reads,
total_illumina))
final_table$transcript_name <- merged_illumina_pacbio$annot_transcript_name
final_table$pvals <- p.adjust(final_table$pvals, method = "bonferroni")
data <- plot_MA_observed_expected(final_table, fill_color, opt$outdir)
printable <- subset(data, status == "Bonf. p-value <= 0.01")
printable <- printable[,c("transcript_name", "illumina_TPM", "expected",
"observed", "pvals", "A", "M")]
colnames(printable) <- c("transcript_name", "illumina_TPM", "expected_TPM",
"observed_pacbio_TPM", "corrected_p-value", "A", "M")
write.table(printable, paste(opt$outdir, "/MA_plot_gene_table.tsv", sep=""),
row.names=F, col.names=T, quote=F, sep="\t")
}
plot_MA_observed_expected <- function(data, fillcolor, outdir) {
# Perform quantile normalization
counts <- as.matrix(data[, c("both_pacbio", "expected")])
counts <- as.data.frame(normalize.quantiles(counts))
data$observed <- counts[,1] + 1
data$expected <- counts[,2] + 1
data$M <- log(data$observed, base=2) - log(data$expected, base=2)
data$A <- 0.5*(log(data$observed, base=2) + log(data$expected, base=2))
data$fold_change <- (data$observed - data$expected) / data$expected
data$status <- as.factor(ifelse(abs(data$M) >= 1 & data$pvals <= 0.01, "Bonf. p-value <= 0.01", "Bonf. p-value > 0.01"))
print(nrow(data))
print(nrow(subset(data, status == "Bonf. p-value <= 0.01")))
fname <- paste(outdir, "/transcript_obs_expected_MA_plot.png", sep="")
xlabel <- "0.5*(log2(observed*expected PacBio counts))"
ylabel <- "log2(ratio of observed to expected PacBio counts)"
png(filename = fname,
width = 2500, height = 2500, units = "px",
bg = "white", res = 300)
p = ggplot(data, aes(x = A, y = M, color = status)) +
geom_jitter(alpha = 0.4, size = 2.5) +
xlab(xlabel) + ylab(ylabel) + theme_bw() +
scale_color_manual(values = c("orange", fillcolor),
labels = c("Significant", "Not significant")) +
#labels = c("Bonf. p-value <= 0.01 \nor log2 fold change > 1", "Bonf. p-value > 0.01")) +
guides(colour = guide_legend(override.aes = list(alpha=1, size=2.5))) +
theme(axis.text.x = element_text(color="black", size=20),
axis.text.y = element_text(color="black", size=20),
axis.title.x = element_text(color="black", size=16),
axis.title.y = element_text(color="black", size=16)) +
theme(legend.position=c(0.8,0.2),
legend.title = element_blank(),
legend.background = element_rect(fill="white", color = "black"),
legend.key = element_rect(fill="transparent"),
legend.text = element_text(colour = 'black', size = 16))
print(p)
dev.off()
return(data)
}
run_chisquare_test <- function(reads_vector, total_pacbio, total_illumina) {
reads_pacbio <- reads_vector[1]
reads_illumina <- reads_vector[2]
M <- as.table(rbind(c(reads_pacbio, total_pacbio - reads_pacbio),
c(reads_illumina, total_illumina - reads_illumina)))
dimnames(M) <- list(platform = c("PacBio", "Illumina"),
transcript = c("query_transcript", "not_query_transcript"))
Xsq <- chisq.test(M)
return(c(Xsq$p.value, Xsq$expected[1,1]))
}
load_packages <- function() {
suppressPackageStartupMessages(library("ggplot2"))
suppressPackageStartupMessages(library("plyr"))
suppressPackageStartupMessages(library("Hmisc"))
suppressPackageStartupMessages(library("optparse"))
suppressPackageStartupMessages(library("readr"))
suppressPackageStartupMessages(library("reshape"))
suppressPackageStartupMessages(library("stringr"))
suppressPackageStartupMessages(library("data.table"))
suppressPackageStartupMessages(library("preprocessCore"))
# Load my custom functions
#source("/pub/dwyman/TALON-paper-2019/analysis_scripts/filter_kallisto_illumina_genes.R")
source("/pub/dwyman/TALON-paper-2019/analysis_scripts/filter_kallisto_illumina_transcripts.R")
return
}
parse_options <- function() {
option_list <- list(
make_option(c("--f"), action = "store", dest = "infile",
default = NULL, help = "TALON abundance file (filtered)"),
make_option(c("--datasets"), action = "store", dest = "datasets",
default = NULL, help = "Comma-delimited list of two dataset names to include in the analysis."),
make_option(c("--ik"), action = "store", dest = "illumina_kallisto",
default = NULL, help = "Illumina Kallisto file."),
make_option(c("--color"), action = "store", dest = "color_scheme",
default = NULL, help = "blue, red, or green"),
make_option(c("-o","--outdir"), action = "store", dest = "outdir",
default = NULL, help = "Output directory for plots and outfiles"))
opt <- parse_args(OptionParser(option_list=option_list))
return(opt)
}
main()
|
aliscore <- function(x, gaps = "5state", w = 6, r, t, l, s, o,
path = "/Applications/Aliscore_v.2.0"){
rwd <- getwd()
setwd(path)
write.fas(x, "input.fas")
## parse options
## -------------
N <- ifelse( gaps == "5state", "", "-N") # treatment of gaps
w <- paste("-w", w) # window size
r <- ifelse( missing(r), "", paste("-r", r ))
if ( !missing(t) ) stop("option -t not yet implemented")
if ( !missing(l) ) stop("option -l not yet implemented")
if ( !missing(s) ) stop("option -s not yet implemented")
o <- ifelse( missing(o), "", paste("-o", paste(o, collapse = ",") ))
call <- paste("perl Aliscore.02.2.pl -i input.fas",
N, w, r, o)
system(call)
id <- scan("input.fas_List_l_all.txt",
sep = " ", quiet = TRUE)
id <- as.numeric(id)
nid <- length(id)
if ( nid == 0 ) {
cat("\nALISCORE did not remove any characters.")
} else {
x <- x[, -id]
cat("\nALISCORE removed", nid, "characters.")
}
setwd(rwd)
x
} | /R/aliscore.R | no_license | richelbilderbeek/ips | R | false | false | 1,010 | r | aliscore <- function(x, gaps = "5state", w = 6, r, t, l, s, o,
path = "/Applications/Aliscore_v.2.0"){
rwd <- getwd()
setwd(path)
write.fas(x, "input.fas")
## parse options
## -------------
N <- ifelse( gaps == "5state", "", "-N") # treatment of gaps
w <- paste("-w", w) # window size
r <- ifelse( missing(r), "", paste("-r", r ))
if ( !missing(t) ) stop("option -t not yet implemented")
if ( !missing(l) ) stop("option -l not yet implemented")
if ( !missing(s) ) stop("option -s not yet implemented")
o <- ifelse( missing(o), "", paste("-o", paste(o, collapse = ",") ))
call <- paste("perl Aliscore.02.2.pl -i input.fas",
N, w, r, o)
system(call)
id <- scan("input.fas_List_l_all.txt",
sep = " ", quiet = TRUE)
id <- as.numeric(id)
nid <- length(id)
if ( nid == 0 ) {
cat("\nALISCORE did not remove any characters.")
} else {
x <- x[, -id]
cat("\nALISCORE removed", nid, "characters.")
}
setwd(rwd)
x
} |
# creating
x <- matrix(c(1:4), nrow=2)
x
x <- matrix(nrow=2, ncol=2)
x[1,1] <- 1
x[1,2] <- 0
x[2,1] <- 0
x[2,2] <- 1
x
x <- matrix(c(1:6), nrow=2, byrow=TRUE)
x
# operations
x <- matrix(c(1:4), nrow=2)
x %*% x
3 * x
x * 3
x + x
# indexing
x <- matrix(c(1:9), nrow=3)
x
x[,2:3]
x[2:3,]
x[c(2,3), c(1,2)]
x[c(2,3), c(1,2)] <- matrix(c(0,0,1,0), nrow=2)
x
# assigning submatrices
x <- matrix(nrow=3, ncol=3)
y <- matrix(c(1,0,0,1), nrow=2)
x[2:3,2:3] <- y
x
x <- matrix(1:9, nrow=3)
x[-2,]
# filtering
x <- matrix(c(1:3,2:4), ncol=2)
x
x[x[,2] >= 3,]
v <- x[,2] >= 3 # second column of `x`, elements >= 3
v # output [1] FALSE TRUE TRUE
x[v,] # apply vector `v` to first column of `x`
# x[v,] == rows of `x` specified by `v` element is `TRUE` or `FALSE`
# first element of `v` is `FALSE`, thus first row of `x` is skipped
# second and thrid element of `x` are `TRUE`, thus second and thrid row of `x` used
x <- matrix(c(1:6), nrow=3)
x[x[,1] > 1 & x[,2] > 5,] # output [1] 3 6
x <- matrix(c(5,2,10,-3,10,23), ncol=2)
x
which(x > 3) # output [1] 1 3 5 6
# row()/col()
# covarianz matrix
cov <- function(rho, n) {
m <- matrix(nrow=n, ncol=n)
m <- ifelse(row(m) == col(m), 1, rho)
return(m)
}
x <- cov(0.2, 3)
x
# apply()
x <- matrix(c(1:6), nrow=3)
f <- function(x) {
x/c(2, 8) # no return()
}
# if f() return a vector of `k` elements, the returned matrix of `apply()` will have k rows
# if f() return a scalar, the result of `apply()` will a vector
x <- apply(x, 1, f) # returns a matrix of 2 rows and 3 columns
x
t(x) # transpose matrix
# a matrix of `0` and `1`; check the majority of the first d elements in a row a `1`
f <- function(m, d) {
maj <- sum(m[1:d]) / d
return(ifelse(maj > 0.5, 1, 0))
}
x <- matrix(c(1,0,1,1,0,1,1,1,1,0,1,0,0,1,1), nrow=3, byrow=TRUE)
x
apply(x, 1, f, 3) # output [1] 1 1 0
apply(x, 1, f, 2) # output [1] 0 1 0
# find outliers
f <- function(m) {
g <- function(r) {
mdn <- median(r)
devs <- abs(r-mdn)
return(which.max(devs))
}
return(apply(m, 1, g))
}
x <- matrix(c(1:6), nrow=2)
x
f(x) # output [1] 1 1
# changing size of a matrix
x <- rep(1,4)
x
y <- matrix(c(1:12), nrow=4)
y
cbind(x, y)
x <- cbind(c(1, 2), c(3, 4))
x
x <- matrix(1:6, nrow=3)
x
x <- x[c(1,3),]
x
# vector/matrix distinction
x <- matrix(1:8, nrow=4)
x
length(x)
class(x)
attributes(x) # attirbute `dim` containing the number of rows and columns
nrow(x) # number of rows of `x`
ncol(x) # numner of columns of `x`
# avoiding dimension reduction
x <- matrix(1:8, nrow=4)
x
v <- x[2,]
v # output [1] 2 6 , `v` is a vector
v <- x[2,, drop=FALSE] # avoid dimension reduction
v # `v` is a 1x2 matrix
x <- 1:4
x
attributes(x) # output NULL
y <- as.matrix(x) # tread vector as matrix
attributes(y) # output $dim [1] 4 1
# naming matrix rows and columns
x <- matrix(1:8, nrow=4)
x
colnames(x) # output NULL
colnames(x) <- c("x","y")
x
colnames(x) # output x y
rownames(x) <- c("a","b","c","d")
x
# array == tensor
t1 <- matrix(1:6, nrow=3)
t1
t2 <- matrix(10:15, nrow=3)
t2
tens <- array(data=c(t1,t2), dim=c(3,2,2))
attributes(tens)
tens
| /matrix.R | no_license | olk/examples_R | R | false | false | 3,100 | r | # creating
x <- matrix(c(1:4), nrow=2)
x
x <- matrix(nrow=2, ncol=2)
x[1,1] <- 1
x[1,2] <- 0
x[2,1] <- 0
x[2,2] <- 1
x
x <- matrix(c(1:6), nrow=2, byrow=TRUE)
x
# operations
x <- matrix(c(1:4), nrow=2)
x %*% x
3 * x
x * 3
x + x
# indexing
x <- matrix(c(1:9), nrow=3)
x
x[,2:3]
x[2:3,]
x[c(2,3), c(1,2)]
x[c(2,3), c(1,2)] <- matrix(c(0,0,1,0), nrow=2)
x
# assigning submatrices
x <- matrix(nrow=3, ncol=3)
y <- matrix(c(1,0,0,1), nrow=2)
x[2:3,2:3] <- y
x
x <- matrix(1:9, nrow=3)
x[-2,]
# filtering
x <- matrix(c(1:3,2:4), ncol=2)
x
x[x[,2] >= 3,]
v <- x[,2] >= 3 # second column of `x`, elements >= 3
v # output [1] FALSE TRUE TRUE
x[v,] # apply vector `v` to first column of `x`
# x[v,] == rows of `x` specified by `v` element is `TRUE` or `FALSE`
# first element of `v` is `FALSE`, thus first row of `x` is skipped
# second and thrid element of `x` are `TRUE`, thus second and thrid row of `x` used
x <- matrix(c(1:6), nrow=3)
x[x[,1] > 1 & x[,2] > 5,] # output [1] 3 6
x <- matrix(c(5,2,10,-3,10,23), ncol=2)
x
which(x > 3) # output [1] 1 3 5 6
# row()/col()
# covarianz matrix
cov <- function(rho, n) {
m <- matrix(nrow=n, ncol=n)
m <- ifelse(row(m) == col(m), 1, rho)
return(m)
}
x <- cov(0.2, 3)
x
# apply()
x <- matrix(c(1:6), nrow=3)
f <- function(x) {
x/c(2, 8) # no return()
}
# if f() return a vector of `k` elements, the returned matrix of `apply()` will have k rows
# if f() return a scalar, the result of `apply()` will a vector
x <- apply(x, 1, f) # returns a matrix of 2 rows and 3 columns
x
t(x) # transpose matrix
# a matrix of `0` and `1`; check the majority of the first d elements in a row a `1`
f <- function(m, d) {
maj <- sum(m[1:d]) / d
return(ifelse(maj > 0.5, 1, 0))
}
x <- matrix(c(1,0,1,1,0,1,1,1,1,0,1,0,0,1,1), nrow=3, byrow=TRUE)
x
apply(x, 1, f, 3) # output [1] 1 1 0
apply(x, 1, f, 2) # output [1] 0 1 0
# find outliers
f <- function(m) {
g <- function(r) {
mdn <- median(r)
devs <- abs(r-mdn)
return(which.max(devs))
}
return(apply(m, 1, g))
}
x <- matrix(c(1:6), nrow=2)
x
f(x) # output [1] 1 1
# changing size of a matrix
x <- rep(1,4)
x
y <- matrix(c(1:12), nrow=4)
y
cbind(x, y)
x <- cbind(c(1, 2), c(3, 4))
x
x <- matrix(1:6, nrow=3)
x
x <- x[c(1,3),]
x
# vector/matrix distinction
x <- matrix(1:8, nrow=4)
x
length(x)
class(x)
attributes(x) # attirbute `dim` containing the number of rows and columns
nrow(x) # number of rows of `x`
ncol(x) # numner of columns of `x`
# avoiding dimension reduction
x <- matrix(1:8, nrow=4)
x
v <- x[2,]
v # output [1] 2 6 , `v` is a vector
v <- x[2,, drop=FALSE] # avoid dimension reduction
v # `v` is a 1x2 matrix
x <- 1:4
x
attributes(x) # output NULL
y <- as.matrix(x) # tread vector as matrix
attributes(y) # output $dim [1] 4 1
# naming matrix rows and columns
x <- matrix(1:8, nrow=4)
x
colnames(x) # output NULL
colnames(x) <- c("x","y")
x
colnames(x) # output x y
rownames(x) <- c("a","b","c","d")
x
# array == tensor
t1 <- matrix(1:6, nrow=3)
t1
t2 <- matrix(10:15, nrow=3)
t2
tens <- array(data=c(t1,t2), dim=c(3,2,2))
attributes(tens)
tens
|
# ObsDiurnalCycleQg.R
# Variable requirements: Qg
#
# This script plots the average diurnal cycle of Qg for
# an observation time series.
# Four plots are produced, one for each season, over an
# entire, integer-year single-site data set. Dataset
# **MUST START AT JAN 1**
#
# Gab Abramowitz UNSW 2010 (palshelp at gmail dot com)
library(pals)
analysisType = 'ObsAnalysis'
# Other arguments to DiurnalCycle:
varname=QgNames
units=QgUnits
ytext = expression("Ground heat flux W/"~m^{2})
legendtext=c('Observed')
ObsDiurnalCycle(analysisType,varname,units,ytext,legendtext) | /palsweb/WebContent/r/ObsDiurnalCycleQg.R | no_license | edenduthie/pals | R | false | false | 578 | r | # ObsDiurnalCycleQg.R
# Variable requirements: Qg
#
# This script plots the average diurnal cycle of Qg for
# an observation time series.
# Four plots are produced, one for each season, over an
# entire, integer-year single-site data set. Dataset
# **MUST START AT JAN 1**
#
# Gab Abramowitz UNSW 2010 (palshelp at gmail dot com)
library(pals)
analysisType = 'ObsAnalysis'
# Other arguments to DiurnalCycle:
varname=QgNames
units=QgUnits
ytext = expression("Ground heat flux W/"~m^{2})
legendtext=c('Observed')
ObsDiurnalCycle(analysisType,varname,units,ytext,legendtext) |
rm(list=ls())
#library(tidyverse)
library(dplyr)
library(tidyr)
#library(data.table)
sub_results <- read.csv("example_sub.csv", stringsAsFactors = FALSE)
rankings <- read.csv("massey.csv",stringsAsFactors = FALSE)%>%
spread(key = SystemName, value = OrdinalRank)
teams <- read.csv("teams2019.csv", stringsAsFactors = FALSE)
tourney <- read.csv("tourney.csv", stringsAsFactors = FALSE)
season <- read.csv("season.csv", stringsAsFactors = FALSE)
colnames(rankings)[2] <- "DayNum"
rankings <- rankings[rankings$DayNum == 133, ]
rankings$Mean <- rowMeans(rankings[, 3:ncol(rankings)], na.rm=TRUE)
rankings <- rankings[,c("Season","TeamID","Mean")]
train <- season %>%
select(Season, WTeamID, LTeamID) %>%
mutate(team_id_diff = WTeamID - LTeamID,
Team1 = if_else(team_id_diff < 0, WTeamID, LTeamID),
Team2 = if_else(team_id_diff > 0, WTeamID, LTeamID),
result = if_else(WTeamID == Team1, 1, 0)) %>%
select(Season, Team1, Team2, result) %>%
subset(Season >= 2003)
train <- train %>%
left_join(rankings, by = c("Season", "Team1" = "TeamID")) %>%
left_join(rankings, by = c("Season", "Team2" = "TeamID"))
train <- subset(train, Season < 2014)
#linearMod <- lm(result ~ Mean.x + Mean.y,data = train)
fit <- glm(result ~ Mean.x + Mean.y,
data = train,
family = "binomial")
sub_results <- read.csv("example_sub.csv", stringsAsFactors = FALSE) %>%
select(id) %>%
separate(id, sep = "_", into = c("Season","id", "Team1", "Team2"), convert = TRUE) %>%
left_join(rankings, by = c("Team1" = "TeamID")) %>%
left_join(rankings, by = c("Team2" = "TeamID")) %>% filter(Season == Season.y)
sub_results$Pred <- predict(fit, sub_results, type = "response")
submit <- sub_results %>%
select(Season.x, id, Team1, Team2, Pred) %>%
unite("id", Season.x, id, Team1, Team2, sep = "_") %>%
group_by(id) %>% summarise(result = mean(Pred)) %>%
write.csv("submit.csv", row.names = FALSE)
#2019_122_1410_1465
#2019_124_1308_1465
| /Stat380_Midterm(2ndresult).R | no_license | HungMai5391/STAT380_Projects | R | false | false | 2,078 | r | rm(list=ls())
#library(tidyverse)
library(dplyr)
library(tidyr)
#library(data.table)
sub_results <- read.csv("example_sub.csv", stringsAsFactors = FALSE)
rankings <- read.csv("massey.csv",stringsAsFactors = FALSE)%>%
spread(key = SystemName, value = OrdinalRank)
teams <- read.csv("teams2019.csv", stringsAsFactors = FALSE)
tourney <- read.csv("tourney.csv", stringsAsFactors = FALSE)
season <- read.csv("season.csv", stringsAsFactors = FALSE)
colnames(rankings)[2] <- "DayNum"
rankings <- rankings[rankings$DayNum == 133, ]
rankings$Mean <- rowMeans(rankings[, 3:ncol(rankings)], na.rm=TRUE)
rankings <- rankings[,c("Season","TeamID","Mean")]
train <- season %>%
select(Season, WTeamID, LTeamID) %>%
mutate(team_id_diff = WTeamID - LTeamID,
Team1 = if_else(team_id_diff < 0, WTeamID, LTeamID),
Team2 = if_else(team_id_diff > 0, WTeamID, LTeamID),
result = if_else(WTeamID == Team1, 1, 0)) %>%
select(Season, Team1, Team2, result) %>%
subset(Season >= 2003)
train <- train %>%
left_join(rankings, by = c("Season", "Team1" = "TeamID")) %>%
left_join(rankings, by = c("Season", "Team2" = "TeamID"))
train <- subset(train, Season < 2014)
#linearMod <- lm(result ~ Mean.x + Mean.y,data = train)
fit <- glm(result ~ Mean.x + Mean.y,
data = train,
family = "binomial")
sub_results <- read.csv("example_sub.csv", stringsAsFactors = FALSE) %>%
select(id) %>%
separate(id, sep = "_", into = c("Season","id", "Team1", "Team2"), convert = TRUE) %>%
left_join(rankings, by = c("Team1" = "TeamID")) %>%
left_join(rankings, by = c("Team2" = "TeamID")) %>% filter(Season == Season.y)
sub_results$Pred <- predict(fit, sub_results, type = "response")
submit <- sub_results %>%
select(Season.x, id, Team1, Team2, Pred) %>%
unite("id", Season.x, id, Team1, Team2, sep = "_") %>%
group_by(id) %>% summarise(result = mean(Pred)) %>%
write.csv("submit.csv", row.names = FALSE)
#2019_122_1410_1465
#2019_124_1308_1465
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/term_embeddings.R
\name{embed_terms}
\alias{embed_terms}
\title{Generate Embeddings of Terms}
\usage{
embed_terms(merged_terms, embedding_size = 20L, term_count_min = 5L,
x_max = 10L, n_iter = 15L)
}
\arguments{
\item{merged_terms}{A character vector of visits' descriptions with terms
separated by \code{", "}}
\item{embedding_size}{An integer (default: 20)}
\item{term_count_min}{A minimum number of occurences of term to be embedded (default: 5)}
\item{x_max}{A \code{x_max} parameter of GloVe, see \code{?text2vec::GlobalVectors} (default: 10)}
\item{n_iter}{A number of epochs of GloVe (default: 15)}
}
\value{
A matrix of embeddings of the terms.
}
\description{
Generate embeddings of terms based on descriptions of visits with
using the GloVe algorithm.
By default the order of the terms is skipped
(all weights in the term coocurrence matrix are equal to 1) and
only terms occurring at least 5 times are embedded.
}
\examples{
inter_term_vectors <- embed_terms(interviews,
term_count_min = 1L)
inter_term_vectors
inter_term_vectors <- embed_terms(interviews,
term_count_min = 1L, embedding_size = 10L)
inter_term_vectors
inter_term_vectors <- embed_terms(interviews, embedding_size = 10L,
term_count_min = 1, n_iter = 50, x_max = 20)
inter_term_vectors
}
| /man/embed_terms.Rd | permissive | karthik/memr | R | false | true | 1,353 | rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/term_embeddings.R
\name{embed_terms}
\alias{embed_terms}
\title{Generate Embeddings of Terms}
\usage{
embed_terms(merged_terms, embedding_size = 20L, term_count_min = 5L,
x_max = 10L, n_iter = 15L)
}
\arguments{
\item{merged_terms}{A character vector of visits' descriptions with terms
separated by \code{", "}}
\item{embedding_size}{An integer (default: 20)}
\item{term_count_min}{A minimum number of occurences of term to be embedded (default: 5)}
\item{x_max}{A \code{x_max} parameter of GloVe, see \code{?text2vec::GlobalVectors} (default: 10)}
\item{n_iter}{A number of epochs of GloVe (default: 15)}
}
\value{
A matrix of embeddings of the terms.
}
\description{
Generate embeddings of terms based on descriptions of visits with
using the GloVe algorithm.
By default the order of the terms is skipped
(all weights in the term coocurrence matrix are equal to 1) and
only terms occurring at least 5 times are embedded.
}
\examples{
inter_term_vectors <- embed_terms(interviews,
term_count_min = 1L)
inter_term_vectors
inter_term_vectors <- embed_terms(interviews,
term_count_min = 1L, embedding_size = 10L)
inter_term_vectors
inter_term_vectors <- embed_terms(interviews, embedding_size = 10L,
term_count_min = 1, n_iter = 50, x_max = 20)
inter_term_vectors
}
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 58694
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 58408
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 58408
c
c Input Parameter (command line, file):
c input filename QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-IPF02-c05.blif-biu.inv.prop.bb-bmc.conf02.01X-QBF.BB1-01X.BB2-Zi.BB3-01X.with-IOC.unfold-008.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 24270
c no.of clauses 58694
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 58408
c
c QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-IPF02-c05.blif-biu.inv.prop.bb-bmc.conf02.01X-QBF.BB1-01X.BB2-Zi.BB3-01X.with-IOC.unfold-008.qdimacs 24270 58694 E1 [1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1066 1067 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1120 1121 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1610 1611 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1664 1665 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2154 2155 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2208 2209 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2698 2699 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2752 2753 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3242 3243 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3296 3297 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3786 3787 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3840 3841 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4330 4331 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4384 4385 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4874 4875 4877 4879 4883 4885 4887 4889 4891 4893 4895 4897 4899 4901 4903 4905 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4928 4929] 0 113 20810 58408 RED
| /code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-IPF02-c05.blif-biu.inv.prop.bb-bmc.conf02.01X-QBF.BB1-01X.BB2-Zi.BB3-01X.with-IOC.unfold-008/biu.mv.xl_ao.bb-b003-p020-IPF02-c05.blif-biu.inv.prop.bb-bmc.conf02.01X-QBF.BB1-01X.BB2-Zi.BB3-01X.with-IOC.unfold-008.R | no_license | arey0pushpa/dcnf-autarky | R | false | false | 2,304 | r | c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 58694
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 58408
c
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 58408
c
c Input Parameter (command line, file):
c input filename QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-IPF02-c05.blif-biu.inv.prop.bb-bmc.conf02.01X-QBF.BB1-01X.BB2-Zi.BB3-01X.with-IOC.unfold-008.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 24270
c no.of clauses 58694
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 58408
c
c QBFLIB/Herbstritt/blackbox-01X-QBF/biu.mv.xl_ao.bb-b003-p020-IPF02-c05.blif-biu.inv.prop.bb-bmc.conf02.01X-QBF.BB1-01X.BB2-Zi.BB3-01X.with-IOC.unfold-008.qdimacs 24270 58694 E1 [1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1066 1067 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1120 1121 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1610 1611 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1664 1665 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2154 2155 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2208 2209 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2698 2699 2732 2733 2734 2735 2736 2737 2738 2739 2740 2741 2742 2743 2744 2745 2746 2747 2752 2753 3218 3219 3220 3221 3222 3223 3224 3225 3226 3227 3228 3229 3242 3243 3276 3277 3278 3279 3280 3281 3282 3283 3284 3285 3286 3287 3288 3289 3290 3291 3296 3297 3762 3763 3764 3765 3766 3767 3768 3769 3770 3771 3772 3773 3786 3787 3820 3821 3822 3823 3824 3825 3826 3827 3828 3829 3830 3831 3832 3833 3834 3835 3840 3841 4306 4307 4308 4309 4310 4311 4312 4313 4314 4315 4316 4317 4330 4331 4364 4365 4366 4367 4368 4369 4370 4371 4372 4373 4374 4375 4376 4377 4378 4379 4384 4385 4850 4851 4852 4853 4854 4855 4856 4857 4858 4859 4860 4861 4874 4875 4877 4879 4883 4885 4887 4889 4891 4893 4895 4897 4899 4901 4903 4905 4907 4908 4909 4910 4911 4912 4913 4914 4915 4916 4917 4918 4919 4920 4921 4922 4923 4928 4929] 0 113 20810 58408 RED
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.