content
large_stringlengths 0
6.46M
| path
large_stringlengths 3
331
| license_type
large_stringclasses 2
values | repo_name
large_stringlengths 5
125
| language
large_stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 4
6.46M
| extension
large_stringclasses 75
values | text
stringlengths 0
6.46M
|
|---|---|---|---|---|---|---|---|---|---|
library(partycalls)
set.seed(1634538933, kind = "L'Ecuyer")
# load party calls data
load("test_data/house_party_calls_lm.RData")
names(house_party_calls) <- paste0("hou", 93:112)
# use LEP data from website to build baseline dataset
lep_data_93_110 <-
readstata13::read.dta13("inst/extdata/LEPData93to110Congresses.dta")
setDT(lep_data_93_110)
lep_data_111_112 <-
readstata13::read.dta13("inst/extdata/LEPData111to113Congresses.dta")
setDT(lep_data_111_112)
# drop congress 113
lep_data_111_112 <- lep_data_111_112[congress <= 112, ]
# load aggregate legislative effectiveness data for some missing variables
lep_aggregate <- readstata13::read.dta13("inst/extdata/LEP93to113.dta")
# drop Tim Ryan's first entry (shorter of two)
lep_data_93_110 <- subset(lep_data_93_110, !(congress == 108 & icpsr == 20343 &
thomas_num == 7031))
lep_aggregate <- subset(lep_aggregate, !(congress == 108 & icpsr == 20343 &
thomas_num == 7031))
# prep data for merge
setDT(lep_aggregate)
lep_aggregate <- lep_aggregate[congress %in% c(111:112), ]
stabb_to_drop <- c("PR", "DC", "GU", "VI", "AS", "MP")
lep_data_93_110 <- subset(lep_data_93_110, !(st_name %in% stabb_to_drop))
lep_data_111_112 <- subset(lep_data_111_112, !(st_name %in% stabb_to_drop))
lep_aggregate <- subset(lep_aggregate, !(st_name %in% stabb_to_drop))
lep_aggregate <- lep_aggregate[, .(congress, icpsr, afam, latino,
freshman, sophomore, south, leader)]
lep_data_111_112 <- merge(lep_data_111_112, lep_aggregate,
by = c("congress", "icpsr"))
# select variables for analysis
lep_data_93_110 <- lep_data_93_110[, .(thomas_name, icpsr, congress, st_name,
cd, dem, majority, female, afam, latino, votepct, speaker, chair, subchr,
power, seniority, maj_leader, min_leader, south, les)]
lep_data_111_112 <- lep_data_111_112[, .(thomas_name, icpsr, congress, st_name,
cd, dem, majority, female, afam, latino, votepct, speaker, chair, subchr,
power, seniority, maj_leader, min_leader, south, les)]
# merge data sets
lep_data <- rbind(lep_data_93_110, lep_data_111_112)
# clean data and add variables needed
lep_data[thomas_name == "Albert, Carl", icpsr := 62]
lep_data[thomas_name == "Lambert, Blanche", icpsr := 29305]
lep_data[thomas_name == "Sekula Gibbs, Shelley", icpsr := 20541]
lep_data[icpsr == 62, dem := 1]
lep_data[icpsr == 20301, latino := 0]
lep_data[icpsr == 20301, afam := 0]
# fix south variable
south_stabb <- c("OK", "AR", "NC", "TX", "FL", "TN", "AL", "GA", "LA", "MS",
"KY", "VA", "SC")
lep_data[is.na(south) == TRUE, south := 0]
lep_data[st_name %in% south_stabb, south := 1]
# missing votepct means appointee
# mark these as drop
lep_data[, drop := 0]
lep_data[is.na(votepct), drop := 1]
lep_data[, freshman := 0]
lep_data[seniority == 1, freshman := 1]
lep_data[, leader := 0]
lep_data[maj_leader == 1, leader := 1]
lep_data[min_leader == 1, leader := 1]
# create state_cd
state_fips <- fread("inst/extdata/statefips.csv")
setnames(lep_data, "st_name", "stabb")
state_fips <- state_fips[order(statename), ]
state_fips <- state_fips[stabb != "DC", ]
state_fips[, state_alphabetical_order := c(1:50)]
lep_data <- merge(lep_data, state_fips, by = "stabb")
lep_data[, state_cd := as.numeric(paste0(state_alphabetical_order,
sprintf("%02.f", cd)))]
# load jacobson presidential vote data
jacobson <- gdata::read.xls("inst/extdata/HR4614.xls")
setDT(jacobson)
# prep data for merge
jacobson[, congress := calc_congress(year) + 1]
setnames(jacobson, "stcd", "state_cd")
jacobson1 <- jacobson[congress >= 93 & congress <= 112, ]
jacobson1 <- jacobson[, .(congress, state_cd, dv, dpres, po1, po2.)]
jacobson2 <- jacobson[congress >= 94 & congress <= 113, ]
jacobson2[, congress := congress - 1]
jacobson2 <- jacobson[, .(congress, state_cd, dvp)]
member_year_data <- merge(lep_data, jacobson1,
by = c("congress", "state_cd"),
all.x = TRUE)
member_year_data <- merge(member_year_data, jacobson2,
by = c("congress", "state_cd"),
all.x = TRUE)
member_year_data[is.na(dv) == TRUE, dv := dvp]
# # find missing dpres values
# member_year_data[is.na(dpres) == TRUE, .(icpsr, thomas_name, congress, state_cd)]
# # replace these with previous values
# member_year_data[state_cd == 3212 & congress == 93, dpres]
# member_year_data[state_cd == 3213 & congress == 93, dpres]
# member_year_data[state_cd == 3214 & congress == 93, dpres]
# member_year_data[state_cd == 3215 & congress == 93, dpres]
member_year_data[state_cd == 3212 & congress == 94, dpres := 84.42]
member_year_data[state_cd == 3213 & congress == 94, dpres := 51.45]
member_year_data[state_cd == 3214 & congress == 94, dpres := 52.22]
member_year_data[state_cd == 3215 & congress == 94, dpres := 32.29]
# fix dem and majority variables for analysis
setnames(member_year_data, "icpsr", "icpsrLegis")
# Eugene Atkinson, party changer
member_year_data[icpsrLegis == 94602 & congress == 97, dem := 0]
member_year_data[icpsrLegis == 94602 & congress == 97, majority := 0]
# Phil Gramm, party changer
member_year_data[icpsrLegis == 14628 & congress == 98, dem := 0]
member_year_data[icpsrLegis == 14628 & congress == 98, majority := 0]
# Bill Grant, party changer
member_year_data[icpsrLegis == 15415 & congress == 101, dem := 0]
member_year_data[icpsrLegis == 15415 & congress == 101, majority := 0]
# Bill Redmond, miscoded
member_year_data[icpsrLegis == 29772 & congress == 105, dem := 0]
member_year_data[icpsrLegis == 29772 & congress == 105, majority := 1]
# J. Randy Forbes, miscoded
member_year_data[icpsrLegis == 20143 & congress == 107, dem := 0]
member_year_data[icpsrLegis == 20143 & congress == 107, majority := 1]
# John Moakley, miscoded
member_year_data[icpsrLegis == 14039 & congress == 93, dem := 1]
member_year_data[icpsrLegis == 14039 & congress == 93, majority := 1]
# Joseph Smith, miscoded
member_year_data[icpsrLegis == 14876 & congress == 97, dem := 1]
member_year_data[icpsrLegis == 14876 & congress == 97, majority := 1]
# Jill Long, miscoded
member_year_data[icpsrLegis == 15631 & congress == 101, dem := 1]
member_year_data[icpsrLegis == 15631 & congress == 101, majority := 1]
# John Oliver, miscoded
member_year_data[icpsrLegis == 29123 & congress == 102, dem := 1]
member_year_data[icpsrLegis == 29123 & congress == 102, dem := 1]
# Bernie Sanders, independent who we don't want to count as Republican
member_year_data[icpsrLegis == 29147 & congress >= 102, dem := 1]
member_year_data[icpsrLegis == 29147 & congress >= 102,
majority := abs(majority - 1)]
# there are minority party members listed as chairs, fix this
member_year_data[icpsrLegis == 11036 & congress == 100, chair := 0]
member_year_data[icpsrLegis == 14829 & congress == 102, chair := 0]
member_year_data[icpsrLegis == 14248 & congress == 107, chair := 0]
# create presidential vote share for same party candidate
member_year_data[dem == 1, pres_vote_share := dpres]
member_year_data[dem == 0, pres_vote_share := 100 - dpres]
member_year_data[dem == 1, vote_share := dv]
member_year_data[dem == 0, vote_share := 100 - dv]
# load replication data for committee data
old_committee <- foreign::read.dta("inst/extdata/who-heeds-replication-archive.dta")
setDT(old_committee)
old_best_committee <- old_committee[, .(congress, icpsr, bestgrosswart)]
setnames(old_best_committee, "icpsr", "icpsrLegis")
# get stewart committee data for congress 110-112
new_committee <- fread("inst/extdata/house_assignments_103-114-1.csv")
setnames(new_committee, "Congress", "congress")
setnames(new_committee, "Committee code", "code")
setnames(new_committee, "ID #", "icpsrLegis")
setnames(new_committee, "Committee Name", "committee_name")
setnames(new_committee, "State Name", "stabb")
setnames(new_committee, "CD", "cd")
setnames(new_committee, "Maj/Min", "maj")
new_committee_value <- fread("inst/extdata/committee_values_house_110-112.csv")
new_committee <- merge(new_committee, new_committee_value, by = "code",
all.x = TRUE)
new_committee <- new_committee[is.na(congress) == FALSE,]
new_committee <- new_committee[congress >= 110, ]
new_committee <- new_committee[congress <= 112, ]
new_committee[, drop := 1 * (stabb %in% stabb_to_drop)]
new_committee <- new_committee[drop != 1, ]
# fix icpsrLegis numbers
lep_new_data <- lep_data[congress >= 110,]
new_committee[, in_lep_data := 1 * (icpsrLegis %in% lep_new_data$icpsr)]
new_committee[in_lep_data == 0, .(congress, icpsrLegis, Name, stabb, cd)]
new_committee[icpsrLegis == 21169, icpsrLegis := 20524] # mike fitzpatrick
new_committee[icpsrLegis == 21144, icpsrLegis := 20725] # tim walburg
new_committee[icpsrLegis == 90901, icpsrLegis := 20901] # parker griffith
new_committee[icpsrLegis == 29335, icpsrLegis := 20959] # theodore deutch
new_committee[icpsrLegis == 21161, icpsrLegis := 29550] # steve chabot
new_committee[icpsrLegis == 39310, icpsrLegis := 20917] # ahn cao
new_committee[icpsrLegis == 15006, icpsrLegis := 20758] # gus bilirakis
# dan miller was not in congress at this time
# correct NA values
# no committee takes lower rank than worst committee
new_committee[is.na(committee) == TRUE, rank := 22]
new_committee[is.na(rank) == TRUE, rank := 22]
# get best committee for mc
new_best_committee <- new_committee[,
.(bestgrosswart = min(rank, na.rm = TRUE)), .(congress, icpsrLegis, Name)]
new_best_committee[, best_grosswart := 22 - bestgrosswart]
new_best_committee <- new_best_committee[, .(congress, icpsrLegis, bestgrosswart)]
# merge in bestgrosswart data
best_committee <- rbind(old_best_committee, new_best_committee)
member_year_data <- merge(member_year_data, best_committee,
by = c("icpsrLegis", "congress"), all.x = TRUE)
member_year_data[is.na(bestgrosswart) == TRUE, bestgrosswart := 0]
# get responsiveness rates
new_responsiveness <- rbindlist(lapply(93:112, function(congress) {
cat(congress, " ")
rc <- make_member_year_data(congress, house_party_calls)
DATA <- rc$member_year_data
DATA[, .(congress,
icpsrLegis,
party_free_ideal_point = pf_ideal,
pirate100 = 100 * responsiveness_party_calls,
pfrate100 = 100 * responsiveness_noncalls,
ideological_extremism)]
}))
new_whoheeds13 <- merge(member_year_data, new_responsiveness,
by = c("congress", "icpsrLegis"), all = TRUE)
setDT(new_whoheeds13)
# # correct some values
# check_dem <- new_whoheeds13[dem == 1 &
# ideological_extremism != -party_free_ideal_point, ]
# check_rep <- new_whoheeds13[dem == 0 &
# ideological_extremism != party_free_ideal_point, ]
new_whoheeds13[dem == 1 & ideological_extremism != -party_free_ideal_point,
ideological_extremism := -party_free_ideal_point]
new_whoheeds13[dem == 0 & ideological_extremism != party_free_ideal_point,
ideological_extremism := party_free_ideal_point]
# drop members with missing values in variables used for analysis
new_whoheeds13[, drop := 0]
new_whoheeds13[is.na(majority) == TRUE, drop := 1]
new_whoheeds13[is.na(pirate100) == TRUE, drop := 1]
new_whoheeds13[is.na(pfrate100) == TRUE, drop := 1]
new_whoheeds13[is.na(ideological_extremism) == TRUE, drop := 1]
new_whoheeds13[is.na(party_free_ideal_point) == TRUE, drop := 1]
# drop uneeded variables
new_whoheeds13[, `:=`(c("fips", "statename", "dvp", "po1", "po2.",
"state_alphabetical_order"), NULL)]
# drop appointees
new_whoheeds13[is.na(votepct) == TRUE, drop := 1]
# party changers and special elections miscoded; correct them
new_whoheeds13[vote_share < 50 & drop == 0, vote_share := 100 - vote_share]
save(new_whoheeds13,
file = "test_data/new_whoheeds13_lm.RData")
house_data <- new_whoheeds13[drop == 0, ]
devtools::use_data(house_data, overwrite = TRUE)
|
/old/dev/make_new_whoheeds13_lm.R
|
no_license
|
Hershberger/partycalls
|
R
| false
| false
| 11,531
|
r
|
library(partycalls)
set.seed(1634538933, kind = "L'Ecuyer")
# load party calls data
load("test_data/house_party_calls_lm.RData")
names(house_party_calls) <- paste0("hou", 93:112)
# use LEP data from website to build baseline dataset
lep_data_93_110 <-
readstata13::read.dta13("inst/extdata/LEPData93to110Congresses.dta")
setDT(lep_data_93_110)
lep_data_111_112 <-
readstata13::read.dta13("inst/extdata/LEPData111to113Congresses.dta")
setDT(lep_data_111_112)
# drop congress 113
lep_data_111_112 <- lep_data_111_112[congress <= 112, ]
# load aggregate legislative effectiveness data for some missing variables
lep_aggregate <- readstata13::read.dta13("inst/extdata/LEP93to113.dta")
# drop Tim Ryan's first entry (shorter of two)
lep_data_93_110 <- subset(lep_data_93_110, !(congress == 108 & icpsr == 20343 &
thomas_num == 7031))
lep_aggregate <- subset(lep_aggregate, !(congress == 108 & icpsr == 20343 &
thomas_num == 7031))
# prep data for merge
setDT(lep_aggregate)
lep_aggregate <- lep_aggregate[congress %in% c(111:112), ]
stabb_to_drop <- c("PR", "DC", "GU", "VI", "AS", "MP")
lep_data_93_110 <- subset(lep_data_93_110, !(st_name %in% stabb_to_drop))
lep_data_111_112 <- subset(lep_data_111_112, !(st_name %in% stabb_to_drop))
lep_aggregate <- subset(lep_aggregate, !(st_name %in% stabb_to_drop))
lep_aggregate <- lep_aggregate[, .(congress, icpsr, afam, latino,
freshman, sophomore, south, leader)]
lep_data_111_112 <- merge(lep_data_111_112, lep_aggregate,
by = c("congress", "icpsr"))
# select variables for analysis
lep_data_93_110 <- lep_data_93_110[, .(thomas_name, icpsr, congress, st_name,
cd, dem, majority, female, afam, latino, votepct, speaker, chair, subchr,
power, seniority, maj_leader, min_leader, south, les)]
lep_data_111_112 <- lep_data_111_112[, .(thomas_name, icpsr, congress, st_name,
cd, dem, majority, female, afam, latino, votepct, speaker, chair, subchr,
power, seniority, maj_leader, min_leader, south, les)]
# merge data sets
lep_data <- rbind(lep_data_93_110, lep_data_111_112)
# clean data and add variables needed
lep_data[thomas_name == "Albert, Carl", icpsr := 62]
lep_data[thomas_name == "Lambert, Blanche", icpsr := 29305]
lep_data[thomas_name == "Sekula Gibbs, Shelley", icpsr := 20541]
lep_data[icpsr == 62, dem := 1]
lep_data[icpsr == 20301, latino := 0]
lep_data[icpsr == 20301, afam := 0]
# fix south variable
south_stabb <- c("OK", "AR", "NC", "TX", "FL", "TN", "AL", "GA", "LA", "MS",
"KY", "VA", "SC")
lep_data[is.na(south) == TRUE, south := 0]
lep_data[st_name %in% south_stabb, south := 1]
# missing votepct means appointee
# mark these as drop
lep_data[, drop := 0]
lep_data[is.na(votepct), drop := 1]
lep_data[, freshman := 0]
lep_data[seniority == 1, freshman := 1]
lep_data[, leader := 0]
lep_data[maj_leader == 1, leader := 1]
lep_data[min_leader == 1, leader := 1]
# create state_cd
state_fips <- fread("inst/extdata/statefips.csv")
setnames(lep_data, "st_name", "stabb")
state_fips <- state_fips[order(statename), ]
state_fips <- state_fips[stabb != "DC", ]
state_fips[, state_alphabetical_order := c(1:50)]
lep_data <- merge(lep_data, state_fips, by = "stabb")
lep_data[, state_cd := as.numeric(paste0(state_alphabetical_order,
sprintf("%02.f", cd)))]
# load jacobson presidential vote data
jacobson <- gdata::read.xls("inst/extdata/HR4614.xls")
setDT(jacobson)
# prep data for merge
jacobson[, congress := calc_congress(year) + 1]
setnames(jacobson, "stcd", "state_cd")
jacobson1 <- jacobson[congress >= 93 & congress <= 112, ]
jacobson1 <- jacobson[, .(congress, state_cd, dv, dpres, po1, po2.)]
jacobson2 <- jacobson[congress >= 94 & congress <= 113, ]
jacobson2[, congress := congress - 1]
jacobson2 <- jacobson[, .(congress, state_cd, dvp)]
member_year_data <- merge(lep_data, jacobson1,
by = c("congress", "state_cd"),
all.x = TRUE)
member_year_data <- merge(member_year_data, jacobson2,
by = c("congress", "state_cd"),
all.x = TRUE)
member_year_data[is.na(dv) == TRUE, dv := dvp]
# # find missing dpres values
# member_year_data[is.na(dpres) == TRUE, .(icpsr, thomas_name, congress, state_cd)]
# # replace these with previous values
# member_year_data[state_cd == 3212 & congress == 93, dpres]
# member_year_data[state_cd == 3213 & congress == 93, dpres]
# member_year_data[state_cd == 3214 & congress == 93, dpres]
# member_year_data[state_cd == 3215 & congress == 93, dpres]
member_year_data[state_cd == 3212 & congress == 94, dpres := 84.42]
member_year_data[state_cd == 3213 & congress == 94, dpres := 51.45]
member_year_data[state_cd == 3214 & congress == 94, dpres := 52.22]
member_year_data[state_cd == 3215 & congress == 94, dpres := 32.29]
# fix dem and majority variables for analysis
setnames(member_year_data, "icpsr", "icpsrLegis")
# Eugene Atkinson, party changer
member_year_data[icpsrLegis == 94602 & congress == 97, dem := 0]
member_year_data[icpsrLegis == 94602 & congress == 97, majority := 0]
# Phil Gramm, party changer
member_year_data[icpsrLegis == 14628 & congress == 98, dem := 0]
member_year_data[icpsrLegis == 14628 & congress == 98, majority := 0]
# Bill Grant, party changer
member_year_data[icpsrLegis == 15415 & congress == 101, dem := 0]
member_year_data[icpsrLegis == 15415 & congress == 101, majority := 0]
# Bill Redmond, miscoded
member_year_data[icpsrLegis == 29772 & congress == 105, dem := 0]
member_year_data[icpsrLegis == 29772 & congress == 105, majority := 1]
# J. Randy Forbes, miscoded
member_year_data[icpsrLegis == 20143 & congress == 107, dem := 0]
member_year_data[icpsrLegis == 20143 & congress == 107, majority := 1]
# John Moakley, miscoded
member_year_data[icpsrLegis == 14039 & congress == 93, dem := 1]
member_year_data[icpsrLegis == 14039 & congress == 93, majority := 1]
# Joseph Smith, miscoded
member_year_data[icpsrLegis == 14876 & congress == 97, dem := 1]
member_year_data[icpsrLegis == 14876 & congress == 97, majority := 1]
# Jill Long, miscoded
member_year_data[icpsrLegis == 15631 & congress == 101, dem := 1]
member_year_data[icpsrLegis == 15631 & congress == 101, majority := 1]
# John Oliver, miscoded
member_year_data[icpsrLegis == 29123 & congress == 102, dem := 1]
member_year_data[icpsrLegis == 29123 & congress == 102, dem := 1]
# Bernie Sanders, independent who we don't want to count as Republican
member_year_data[icpsrLegis == 29147 & congress >= 102, dem := 1]
member_year_data[icpsrLegis == 29147 & congress >= 102,
majority := abs(majority - 1)]
# there are minority party members listed as chairs, fix this
member_year_data[icpsrLegis == 11036 & congress == 100, chair := 0]
member_year_data[icpsrLegis == 14829 & congress == 102, chair := 0]
member_year_data[icpsrLegis == 14248 & congress == 107, chair := 0]
# create presidential vote share for same party candidate
member_year_data[dem == 1, pres_vote_share := dpres]
member_year_data[dem == 0, pres_vote_share := 100 - dpres]
member_year_data[dem == 1, vote_share := dv]
member_year_data[dem == 0, vote_share := 100 - dv]
# load replication data for committee data
old_committee <- foreign::read.dta("inst/extdata/who-heeds-replication-archive.dta")
setDT(old_committee)
old_best_committee <- old_committee[, .(congress, icpsr, bestgrosswart)]
setnames(old_best_committee, "icpsr", "icpsrLegis")
# get stewart committee data for congress 110-112
new_committee <- fread("inst/extdata/house_assignments_103-114-1.csv")
setnames(new_committee, "Congress", "congress")
setnames(new_committee, "Committee code", "code")
setnames(new_committee, "ID #", "icpsrLegis")
setnames(new_committee, "Committee Name", "committee_name")
setnames(new_committee, "State Name", "stabb")
setnames(new_committee, "CD", "cd")
setnames(new_committee, "Maj/Min", "maj")
new_committee_value <- fread("inst/extdata/committee_values_house_110-112.csv")
new_committee <- merge(new_committee, new_committee_value, by = "code",
all.x = TRUE)
new_committee <- new_committee[is.na(congress) == FALSE,]
new_committee <- new_committee[congress >= 110, ]
new_committee <- new_committee[congress <= 112, ]
new_committee[, drop := 1 * (stabb %in% stabb_to_drop)]
new_committee <- new_committee[drop != 1, ]
# fix icpsrLegis numbers
lep_new_data <- lep_data[congress >= 110,]
new_committee[, in_lep_data := 1 * (icpsrLegis %in% lep_new_data$icpsr)]
new_committee[in_lep_data == 0, .(congress, icpsrLegis, Name, stabb, cd)]
new_committee[icpsrLegis == 21169, icpsrLegis := 20524] # mike fitzpatrick
new_committee[icpsrLegis == 21144, icpsrLegis := 20725] # tim walburg
new_committee[icpsrLegis == 90901, icpsrLegis := 20901] # parker griffith
new_committee[icpsrLegis == 29335, icpsrLegis := 20959] # theodore deutch
new_committee[icpsrLegis == 21161, icpsrLegis := 29550] # steve chabot
new_committee[icpsrLegis == 39310, icpsrLegis := 20917] # ahn cao
new_committee[icpsrLegis == 15006, icpsrLegis := 20758] # gus bilirakis
# dan miller was not in congress at this time
# correct NA values
# no committee takes lower rank than worst committee
new_committee[is.na(committee) == TRUE, rank := 22]
new_committee[is.na(rank) == TRUE, rank := 22]
# get best committee for mc
new_best_committee <- new_committee[,
.(bestgrosswart = min(rank, na.rm = TRUE)), .(congress, icpsrLegis, Name)]
new_best_committee[, best_grosswart := 22 - bestgrosswart]
new_best_committee <- new_best_committee[, .(congress, icpsrLegis, bestgrosswart)]
# merge in bestgrosswart data
best_committee <- rbind(old_best_committee, new_best_committee)
member_year_data <- merge(member_year_data, best_committee,
by = c("icpsrLegis", "congress"), all.x = TRUE)
member_year_data[is.na(bestgrosswart) == TRUE, bestgrosswart := 0]
# get responsiveness rates
new_responsiveness <- rbindlist(lapply(93:112, function(congress) {
cat(congress, " ")
rc <- make_member_year_data(congress, house_party_calls)
DATA <- rc$member_year_data
DATA[, .(congress,
icpsrLegis,
party_free_ideal_point = pf_ideal,
pirate100 = 100 * responsiveness_party_calls,
pfrate100 = 100 * responsiveness_noncalls,
ideological_extremism)]
}))
new_whoheeds13 <- merge(member_year_data, new_responsiveness,
by = c("congress", "icpsrLegis"), all = TRUE)
setDT(new_whoheeds13)
# # correct some values
# check_dem <- new_whoheeds13[dem == 1 &
# ideological_extremism != -party_free_ideal_point, ]
# check_rep <- new_whoheeds13[dem == 0 &
# ideological_extremism != party_free_ideal_point, ]
new_whoheeds13[dem == 1 & ideological_extremism != -party_free_ideal_point,
ideological_extremism := -party_free_ideal_point]
new_whoheeds13[dem == 0 & ideological_extremism != party_free_ideal_point,
ideological_extremism := party_free_ideal_point]
# drop members with missing values in variables used for analysis
new_whoheeds13[, drop := 0]
new_whoheeds13[is.na(majority) == TRUE, drop := 1]
new_whoheeds13[is.na(pirate100) == TRUE, drop := 1]
new_whoheeds13[is.na(pfrate100) == TRUE, drop := 1]
new_whoheeds13[is.na(ideological_extremism) == TRUE, drop := 1]
new_whoheeds13[is.na(party_free_ideal_point) == TRUE, drop := 1]
# drop uneeded variables
new_whoheeds13[, `:=`(c("fips", "statename", "dvp", "po1", "po2.",
"state_alphabetical_order"), NULL)]
# drop appointees
new_whoheeds13[is.na(votepct) == TRUE, drop := 1]
# party changers and special elections miscoded; correct them
new_whoheeds13[vote_share < 50 & drop == 0, vote_share := 100 - vote_share]
save(new_whoheeds13,
file = "test_data/new_whoheeds13_lm.RData")
house_data <- new_whoheeds13[drop == 0, ]
devtools::use_data(house_data, overwrite = TRUE)
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/fsRankCoxAUC_fct.R
\name{fsRankCoxAUC_fct}
\alias{fsRankCoxAUC_fct}
\title{Wrapper function using the univariate Cox models as ranking method}
\usage{
fsRankCoxAUC_fct(data, fold, ncl, cv.out, cv.in, nr.var, t = 1, sd1 = 0.9,
c.time, ...)
}
\arguments{
\item{...}{other arguments, not used now}
\item{input}{see details in \code{\link{CVrankSurv_fct}}}
}
\description{
This wrapper function passes the ranking method to further functions. Setting up for parallel computing of different folds via foreach.
}
\keyword{internal}
|
/man/fsRankCoxAUC_fct.Rd
|
no_license
|
krumsieklab/SurvRank
|
R
| false
| false
| 635
|
rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/fsRankCoxAUC_fct.R
\name{fsRankCoxAUC_fct}
\alias{fsRankCoxAUC_fct}
\title{Wrapper function using the univariate Cox models as ranking method}
\usage{
fsRankCoxAUC_fct(data, fold, ncl, cv.out, cv.in, nr.var, t = 1, sd1 = 0.9,
c.time, ...)
}
\arguments{
\item{...}{other arguments, not used now}
\item{input}{see details in \code{\link{CVrankSurv_fct}}}
}
\description{
This wrapper function passes the ranking method to further functions. Setting up for parallel computing of different folds via foreach.
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nn-loss.R
\name{nn_bce_with_logits_loss}
\alias{nn_bce_with_logits_loss}
\title{BCE with logits loss}
\usage{
nn_bce_with_logits_loss(weight = NULL, reduction = "mean", pos_weight = NULL)
}
\arguments{
\item{weight}{(Tensor, optional): a manual rescaling weight given to the loss
of each batch element. If given, has to be a Tensor of size \code{nbatch}.}
\item{reduction}{(string, optional): Specifies the reduction to apply to the output:
\code{'none'} | \code{'mean'} | \code{'sum'}. \code{'none'}: no reduction will be applied,
\code{'mean'}: the sum of the output will be divided by the number of
elements in the output, \code{'sum'}: the output will be summed. Note: \code{size_average}
and \code{reduce} are in the process of being deprecated, and in the meantime,
specifying either of those two args will override \code{reduction}. Default: \code{'mean'}}
\item{pos_weight}{(Tensor, optional): a weight of positive examples.
Must be a vector with length equal to the number of classes.}
}
\description{
This loss combines a \code{Sigmoid} layer and the \code{BCELoss} in one single
class. This version is more numerically stable than using a plain \code{Sigmoid}
followed by a \code{BCELoss} as, by combining the operations into one layer,
we take advantage of the log-sum-exp trick for numerical stability.
}
\details{
The unreduced (i.e. with \code{reduction} set to \code{'none'}) loss can be described as:
\deqn{
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = - w_n \left[ y_n \cdot \log \sigma(x_n)
+ (1 - y_n) \cdot \log (1 - \sigma(x_n)) \right],
}
where \eqn{N} is the batch size. If \code{reduction} is not \code{'none'}
(default \code{'mean'}), then
\deqn{
\ell(x, y) = \begin{array}{ll}
\mbox{mean}(L), & \mbox{if reduction} = \mbox{'mean';}\\
\mbox{sum}(L), & \mbox{if reduction} = \mbox{'sum'.}
\end{array}
}
This is used for measuring the error of a reconstruction in for example
an auto-encoder. Note that the targets \code{t[i]} should be numbers
between 0 and 1.
It's possible to trade off recall and precision by adding weights to positive examples.
In the case of multi-label classification the loss can be described as:
\deqn{
\ell_c(x, y) = L_c = \{l_{1,c},\dots,l_{N,c}\}^\top, \quad
l_{n,c} = - w_{n,c} \left[ p_c y_{n,c} \cdot \log \sigma(x_{n,c})
+ (1 - y_{n,c}) \cdot \log (1 - \sigma(x_{n,c})) \right],
}
where \eqn{c} is the class number (\eqn{c > 1} for multi-label binary
classification,
\eqn{c = 1} for single-label binary classification),
\eqn{n} is the number of the sample in the batch and
\eqn{p_c} is the weight of the positive answer for the class \eqn{c}.
\eqn{p_c > 1} increases the recall, \eqn{p_c < 1} increases the precision.
For example, if a dataset contains 100 positive and 300 negative examples of a single class,
then \code{pos_weight} for the class should be equal to \eqn{\frac{300}{100}=3}.
The loss would act as if the dataset contains \eqn{3\times 100=300} positive examples.
}
\section{Shape}{
\itemize{
\item Input: \eqn{(N, *)} where \eqn{*} means, any number of additional dimensions
\item Target: \eqn{(N, *)}, same shape as the input
\item Output: scalar. If \code{reduction} is \code{'none'}, then \eqn{(N, *)}, same
shape as input.
}
}
\examples{
if (torch_is_installed()) {
loss <- nn_bce_with_logits_loss()
input <- torch_randn(3, requires_grad=TRUE)
target <- torch_empty(3)$random_(1, 2)
output <- loss(input, target)
output$backward()
target <- torch_ones(10, 64, dtype=torch_float32()) # 64 classes, batch size = 10
output <- torch_full(c(10, 64), 1.5) # A prediction (logit)
pos_weight <- torch_ones(64) # All weights are equal to 1
criterion <- nn_bce_with_logits_loss(pos_weight=pos_weight)
criterion(output, target) # -log(sigmoid(1.5))
}
}
|
/man/nn_bce_with_logits_loss.Rd
|
permissive
|
krzjoa/torch
|
R
| false
| true
| 3,845
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/nn-loss.R
\name{nn_bce_with_logits_loss}
\alias{nn_bce_with_logits_loss}
\title{BCE with logits loss}
\usage{
nn_bce_with_logits_loss(weight = NULL, reduction = "mean", pos_weight = NULL)
}
\arguments{
\item{weight}{(Tensor, optional): a manual rescaling weight given to the loss
of each batch element. If given, has to be a Tensor of size \code{nbatch}.}
\item{reduction}{(string, optional): Specifies the reduction to apply to the output:
\code{'none'} | \code{'mean'} | \code{'sum'}. \code{'none'}: no reduction will be applied,
\code{'mean'}: the sum of the output will be divided by the number of
elements in the output, \code{'sum'}: the output will be summed. Note: \code{size_average}
and \code{reduce} are in the process of being deprecated, and in the meantime,
specifying either of those two args will override \code{reduction}. Default: \code{'mean'}}
\item{pos_weight}{(Tensor, optional): a weight of positive examples.
Must be a vector with length equal to the number of classes.}
}
\description{
This loss combines a \code{Sigmoid} layer and the \code{BCELoss} in one single
class. This version is more numerically stable than using a plain \code{Sigmoid}
followed by a \code{BCELoss} as, by combining the operations into one layer,
we take advantage of the log-sum-exp trick for numerical stability.
}
\details{
The unreduced (i.e. with \code{reduction} set to \code{'none'}) loss can be described as:
\deqn{
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = - w_n \left[ y_n \cdot \log \sigma(x_n)
+ (1 - y_n) \cdot \log (1 - \sigma(x_n)) \right],
}
where \eqn{N} is the batch size. If \code{reduction} is not \code{'none'}
(default \code{'mean'}), then
\deqn{
\ell(x, y) = \begin{array}{ll}
\mbox{mean}(L), & \mbox{if reduction} = \mbox{'mean';}\\
\mbox{sum}(L), & \mbox{if reduction} = \mbox{'sum'.}
\end{array}
}
This is used for measuring the error of a reconstruction in for example
an auto-encoder. Note that the targets \code{t[i]} should be numbers
between 0 and 1.
It's possible to trade off recall and precision by adding weights to positive examples.
In the case of multi-label classification the loss can be described as:
\deqn{
\ell_c(x, y) = L_c = \{l_{1,c},\dots,l_{N,c}\}^\top, \quad
l_{n,c} = - w_{n,c} \left[ p_c y_{n,c} \cdot \log \sigma(x_{n,c})
+ (1 - y_{n,c}) \cdot \log (1 - \sigma(x_{n,c})) \right],
}
where \eqn{c} is the class number (\eqn{c > 1} for multi-label binary
classification,
\eqn{c = 1} for single-label binary classification),
\eqn{n} is the number of the sample in the batch and
\eqn{p_c} is the weight of the positive answer for the class \eqn{c}.
\eqn{p_c > 1} increases the recall, \eqn{p_c < 1} increases the precision.
For example, if a dataset contains 100 positive and 300 negative examples of a single class,
then \code{pos_weight} for the class should be equal to \eqn{\frac{300}{100}=3}.
The loss would act as if the dataset contains \eqn{3\times 100=300} positive examples.
}
\section{Shape}{
\itemize{
\item Input: \eqn{(N, *)} where \eqn{*} means, any number of additional dimensions
\item Target: \eqn{(N, *)}, same shape as the input
\item Output: scalar. If \code{reduction} is \code{'none'}, then \eqn{(N, *)}, same
shape as input.
}
}
\examples{
if (torch_is_installed()) {
loss <- nn_bce_with_logits_loss()
input <- torch_randn(3, requires_grad=TRUE)
target <- torch_empty(3)$random_(1, 2)
output <- loss(input, target)
output$backward()
target <- torch_ones(10, 64, dtype=torch_float32()) # 64 classes, batch size = 10
output <- torch_full(c(10, 64), 1.5) # A prediction (logit)
pos_weight <- torch_ones(64) # All weights are equal to 1
criterion <- nn_bce_with_logits_loss(pos_weight=pos_weight)
criterion(output, target) # -log(sigmoid(1.5))
}
}
|
## Put comments here that give an overall description of what your
### The following function creates a special "matrix" object (a list) containing the following functions:
# set the value of the matrix
# get the value of the matrix
# set the value of its inverse
# get the value of its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
### The following function returns the inverse of a matrix if it has not already been computed.
### On the contrary it returned the cached result without computing it again.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
/cachematrix.R
|
no_license
|
costa-11/ProgrammingAssignment2
|
R
| false
| false
| 1,011
|
r
|
## Put comments here that give an overall description of what your
### The following function creates a special "matrix" object (a list) containing the following functions:
# set the value of the matrix
# get the value of the matrix
# set the value of its inverse
# get the value of its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinv <- function(inverse) inv <<- inverse
getinv <- function() inv
list(set = set, get = get,
setinv = setinv,
getinv = getinv)
}
### The following function returns the inverse of a matrix if it has not already been computed.
### On the contrary it returned the cached result without computing it again.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setinv(inv)
inv
}
|
#!/usr/bin/Rscript
# Plots descriptive statistics from the biomass survey data.
library(ggplot2)
DPI <- 300
#WIDTH <- 8.33
#HEIGHT <- 5.53
WIDTH <- 6.5
HEIGHT <- 4
trees <- read.csv("Data/Trees.csv", skip=1)
canopy <- read.csv("Data/Canopy.csv", skip=1)
trees$ID.Plot <- factor(trees$ID.Plot)
trees$ID.Strata <- factor(trees$ID.Strata)
trees$ID.Row <- factor(trees$ID.Row)
save(trees, file="Data/tree_data.Rdata")
qplot(DBH, data=trees)
ggsave("tree_dbh.png", width=WIDTH, height=HEIGHT, dpi=DPI)
qplot(Height, data=trees)
ggsave("tree_height.png", width=WIDTH, height=HEIGHT, dpi=DPI)
qplot(ID.Strata, Height, geom="boxplot", data=trees,
ylab="Height (meters)", xlab="Strata")
ggsave("tree_height_strata.png", width=WIDTH, height=HEIGHT, dpi=DPI)
qplot(ID.Row, Height, geom="boxplot", data=trees,
ylab="Height (meters)", xlab="Row")
ggsave("tree_height_row.png", width=WIDTH, height=HEIGHT, dpi=DPI)
qplot(ID.Strata, DBH, geom="boxplot", data=trees,
ylab="Diameter at Breast Height (cm)", xlab="Strata")
ggsave("tree_dbh_strata.png", width=WIDTH, height=HEIGHT, dpi=DPI)
qplot(ID.Row, DBH, geom="boxplot", data=trees,
ylab="Diameter at Breast Height (cm)", xlab="Row")
ggsave("tree_dbh_row.png", width=WIDTH, height=HEIGHT, dpi=DPI)
canopy$ID.Plot <- factor(canopy$ID.Plot)
canopy$ID.Strata <- factor(canopy$ID.Strata)
canopy$ID.Row <- factor(canopy$ID.Row)
save(canopy, file="Data/canopy_data.Rdata")
qplot(ID.Strata, Overstory.Density, geom="boxplot", data=canopy)
ggsave("canopy_density_strata.png", width=WIDTH, height=HEIGHT, dpi=DPI)
qplot(ID.Row, Overstory.Density, geom="boxplot", data=canopy)
ggsave("canopy_density_row.png", width=WIDTH, height=HEIGHT, dpi=DPI)
qplot(Height, DBH, colour=ID.Strata, data=trees)
ggsave("dbh_vs_height.png", width=WIDTH, height=HEIGHT, dpi=DPI)
qplot(Species, geom="histogram", data=trees, ylab="Frequency")
ggsave("tree_species.png", width=WIDTH, height=HEIGHT, dpi=DPI)
p <- qplot(Species, geom="histogram", facets=.~ID.Strata, data=trees,
ylab="Frequency")
#p + theme(axis.text.x=element_text(size=10))
#ggsave("tree_species_strata.png", width=12, height=5, dpi=DPI)
p + theme(axis.text.x=element_text(size=8))
ggsave("tree_species_strata.png", width=WIDTH, height=HEIGHT, dpi=DPI)
p <- qplot(Species, geom="histogram", facets=.~ID.Row, data=trees,
ylab="Frequency")
#p + theme(axis.text.x=element_text(angle=90, hjust=1, size=8))
#ggsave("tree_species_row.png", width=12, height=5, dpi=DPI)
p + theme(axis.text.x=element_text(angle=90, hjust=1, size=6))
ggsave("tree_species_row.png", width=WIDTH, height=HEIGHT, dpi=DPI)
|
/Biomass_Prediction/1_plot_biomass_data.R
|
no_license
|
azvoleff/Biomass_Mapping
|
R
| false
| false
| 2,606
|
r
|
#!/usr/bin/Rscript
# Plots descriptive statistics from the biomass survey data.
library(ggplot2)
DPI <- 300
#WIDTH <- 8.33
#HEIGHT <- 5.53
WIDTH <- 6.5
HEIGHT <- 4
trees <- read.csv("Data/Trees.csv", skip=1)
canopy <- read.csv("Data/Canopy.csv", skip=1)
trees$ID.Plot <- factor(trees$ID.Plot)
trees$ID.Strata <- factor(trees$ID.Strata)
trees$ID.Row <- factor(trees$ID.Row)
save(trees, file="Data/tree_data.Rdata")
qplot(DBH, data=trees)
ggsave("tree_dbh.png", width=WIDTH, height=HEIGHT, dpi=DPI)
qplot(Height, data=trees)
ggsave("tree_height.png", width=WIDTH, height=HEIGHT, dpi=DPI)
qplot(ID.Strata, Height, geom="boxplot", data=trees,
ylab="Height (meters)", xlab="Strata")
ggsave("tree_height_strata.png", width=WIDTH, height=HEIGHT, dpi=DPI)
qplot(ID.Row, Height, geom="boxplot", data=trees,
ylab="Height (meters)", xlab="Row")
ggsave("tree_height_row.png", width=WIDTH, height=HEIGHT, dpi=DPI)
qplot(ID.Strata, DBH, geom="boxplot", data=trees,
ylab="Diameter at Breast Height (cm)", xlab="Strata")
ggsave("tree_dbh_strata.png", width=WIDTH, height=HEIGHT, dpi=DPI)
qplot(ID.Row, DBH, geom="boxplot", data=trees,
ylab="Diameter at Breast Height (cm)", xlab="Row")
ggsave("tree_dbh_row.png", width=WIDTH, height=HEIGHT, dpi=DPI)
canopy$ID.Plot <- factor(canopy$ID.Plot)
canopy$ID.Strata <- factor(canopy$ID.Strata)
canopy$ID.Row <- factor(canopy$ID.Row)
save(canopy, file="Data/canopy_data.Rdata")
qplot(ID.Strata, Overstory.Density, geom="boxplot", data=canopy)
ggsave("canopy_density_strata.png", width=WIDTH, height=HEIGHT, dpi=DPI)
qplot(ID.Row, Overstory.Density, geom="boxplot", data=canopy)
ggsave("canopy_density_row.png", width=WIDTH, height=HEIGHT, dpi=DPI)
qplot(Height, DBH, colour=ID.Strata, data=trees)
ggsave("dbh_vs_height.png", width=WIDTH, height=HEIGHT, dpi=DPI)
qplot(Species, geom="histogram", data=trees, ylab="Frequency")
ggsave("tree_species.png", width=WIDTH, height=HEIGHT, dpi=DPI)
p <- qplot(Species, geom="histogram", facets=.~ID.Strata, data=trees,
ylab="Frequency")
#p + theme(axis.text.x=element_text(size=10))
#ggsave("tree_species_strata.png", width=12, height=5, dpi=DPI)
p + theme(axis.text.x=element_text(size=8))
ggsave("tree_species_strata.png", width=WIDTH, height=HEIGHT, dpi=DPI)
p <- qplot(Species, geom="histogram", facets=.~ID.Row, data=trees,
ylab="Frequency")
#p + theme(axis.text.x=element_text(angle=90, hjust=1, size=8))
#ggsave("tree_species_row.png", width=12, height=5, dpi=DPI)
p + theme(axis.text.x=element_text(angle=90, hjust=1, size=6))
ggsave("tree_species_row.png", width=WIDTH, height=HEIGHT, dpi=DPI)
|
# Data handling
# Data analysis with autompg.txt
# 2.Read txt file with variable name
# http://archive.ics.uci.edu/ml/datasets/Auto+MPG
# 1. Data reading in R
car<-read.table(file="autompg.txt", na=" ", header=TRUE)
#car<-read.csv(file="autompg.csv")
head(car)
dim(car)
# 2. Data checking : numeric factor integer variables
str(car) #string
# 3. Data summary => fivenum + mean
summary(car)
# 4. basic statistics & graph
attach(car)
# frequency
table(origin)
table(year)
# mean and standard deviation
mean(mpg)
mean(hp)
mean(wt)
# mean of some variables=> apply(list, (1=row.2=cal), mean)
apply (car[, 1:6], 2, mean)
# barplot using frequency
freq_cyl<-table(cyl)
names(freq_cyl) <- c ("3cyl", "4cyl", "5cyl", "6cyl",
"8cyl")
barplot(freq_cyl)
barplot(freq_cyl, main="Cylinders Distribution") #main title
# histogram of MPG
hist(mpg, main="Mile per gallon:1970-1982",
col="lightblue")
# scatterplot3d
# install.packages("scatterplot3d")
library(scatterplot3d)
scatterplot3d(wt,hp,mpg, type="h", highlight.3d=TRUE,
angle=55, scale.y=0.7, pch=16, main="3dimensional plot for autompg data")
# apply a function over a list
lapply (car[, 1:6], mean)
a1<-lapply (car[, 1:6], mean)
a2<-lapply (car[, 1:6], sd)
a3<-lapply (car[, 1:6], min)
a4<-lapply (car[, 1:6], max)
table1<-cbind(a1,a2,a3,a4)
colnames(table1) <- c("mean", "sd", "min", "max")
table1
#################################
|
/lec3_3.R
|
no_license
|
wjdtpghk96/R-start
|
R
| false
| false
| 1,446
|
r
|
# Data handling
# Data analysis with autompg.txt
# 2.Read txt file with variable name
# http://archive.ics.uci.edu/ml/datasets/Auto+MPG
# 1. Data reading in R
car<-read.table(file="autompg.txt", na=" ", header=TRUE)
#car<-read.csv(file="autompg.csv")
head(car)
dim(car)
# 2. Data checking : numeric factor integer variables
str(car) #string
# 3. Data summary => fivenum + mean
summary(car)
# 4. basic statistics & graph
attach(car)
# frequency
table(origin)
table(year)
# mean and standard deviation
mean(mpg)
mean(hp)
mean(wt)
# mean of some variables=> apply(list, (1=row.2=cal), mean)
apply (car[, 1:6], 2, mean)
# barplot using frequency
freq_cyl<-table(cyl)
names(freq_cyl) <- c ("3cyl", "4cyl", "5cyl", "6cyl",
"8cyl")
barplot(freq_cyl)
barplot(freq_cyl, main="Cylinders Distribution") #main title
# histogram of MPG
hist(mpg, main="Mile per gallon:1970-1982",
col="lightblue")
# scatterplot3d
# install.packages("scatterplot3d")
library(scatterplot3d)
scatterplot3d(wt,hp,mpg, type="h", highlight.3d=TRUE,
angle=55, scale.y=0.7, pch=16, main="3dimensional plot for autompg data")
# apply a function over a list
lapply (car[, 1:6], mean)
a1<-lapply (car[, 1:6], mean)
a2<-lapply (car[, 1:6], sd)
a3<-lapply (car[, 1:6], min)
a4<-lapply (car[, 1:6], max)
table1<-cbind(a1,a2,a3,a4)
colnames(table1) <- c("mean", "sd", "min", "max")
table1
#################################
|
# Dev quadratic approximation v2 blocks, like in H&Y 2009
library(devtools)
load_all(".")
set.seed(20)
source("tests/0-make-test-set-1.R")
verb <- 1
lmin <- 0.001
eps <- 1e-3
t0 <- system.time( f0 <- glbin_lcd(X, y, eps=eps, nlambda=100, index = index, verb=verb, lambda.min = lmin) )
|
/tests/1-quadratic-with-blocks-r.R
|
no_license
|
jeliason/glbinc
|
R
| false
| false
| 291
|
r
|
# Dev quadratic approximation v2 blocks, like in H&Y 2009
library(devtools)
load_all(".")
set.seed(20)
source("tests/0-make-test-set-1.R")
verb <- 1
lmin <- 0.001
eps <- 1e-3
t0 <- system.time( f0 <- glbin_lcd(X, y, eps=eps, nlambda=100, index = index, verb=verb, lambda.min = lmin) )
|
#########################################################
##
## N-folds based cross validation data split mechanism
## Parameters:
## cvIdx: From 1 to combn(totalFold, TrainingFold).index of one specific data split in cross validatoin
## testPer: percentage of test dataset, e.g. testPer = 20
##
## Return:
## List of training data x,y and validation data x,y
##
#########################################################
CvDataSplit = function(input, output, totalFold, trainFoldNum, cvIdx, seed, test = FALSE, testPer = NULL) {
input <- as.data.frame(input); output <- as.data.frame(output)
set.seed(seed)
# Get training folds index
foldComb = combn(totalFold, trainFoldNum)[, sample(ncol(combn(totalFold, trainFoldNum)))]
trainFoldIdx = foldComb[,cvIdx]
# Shuffle the observation index of input
indexShuffled = sample(1:nrow(input),nrow(input), replace = FALSE)
input = input[indexShuffled,]
output = as.data.frame(output[indexShuffled,])
if(test){
testNum <- trunc(nrow(input) * (testPer/100),0)
testX <- input[1:testNum,]
input <- input[(testNum + 1):nrow(input), ]
testY <- output[1:testNum,]
output <- output[(testNum + 1):nrow(output), ]
}
# Generate the training slicing indicator vector
foldIdx <- rep(1:totalFold, each = nrow(input)/totalFold)
foldIdx <- c(foldIdx, rep(totalFold, times = nrow(input)%%totalFold))
#foldIdx <- append(foldIdx, rep(totalFold, nrow(input) - length(foldIdx)))
TrainIndicator <- sapply(foldIdx, function(foldIdxx) if(foldIdxx %in% trainFoldIdx) return(TRUE)
else return(FALSE))
input <- as.data.frame(input); output <- as.data.frame(output)
if(test){
return(list(trainX = input[TrainIndicator,], trainY = output[TrainIndicator,],
validX = input[!TrainIndicator,], validY = output[!TrainIndicator,],
testX = testX, testY = testY))
}else{
return(list(trainX = input[TrainIndicator,], trainY = output[TrainIndicator,],
validX = input[!TrainIndicator,], validY = output[!TrainIndicator,]))
}
}
|
/CvDataSplit.R
|
no_license
|
Zhenshan-Jin/Machine_Learning_Tool_Box
|
R
| false
| false
| 2,094
|
r
|
#########################################################
##
## N-folds based cross validation data split mechanism
## Parameters:
## cvIdx: From 1 to combn(totalFold, TrainingFold).index of one specific data split in cross validatoin
## testPer: percentage of test dataset, e.g. testPer = 20
##
## Return:
## List of training data x,y and validation data x,y
##
#########################################################
CvDataSplit = function(input, output, totalFold, trainFoldNum, cvIdx, seed, test = FALSE, testPer = NULL) {
input <- as.data.frame(input); output <- as.data.frame(output)
set.seed(seed)
# Get training folds index
foldComb = combn(totalFold, trainFoldNum)[, sample(ncol(combn(totalFold, trainFoldNum)))]
trainFoldIdx = foldComb[,cvIdx]
# Shuffle the observation index of input
indexShuffled = sample(1:nrow(input),nrow(input), replace = FALSE)
input = input[indexShuffled,]
output = as.data.frame(output[indexShuffled,])
if(test){
testNum <- trunc(nrow(input) * (testPer/100),0)
testX <- input[1:testNum,]
input <- input[(testNum + 1):nrow(input), ]
testY <- output[1:testNum,]
output <- output[(testNum + 1):nrow(output), ]
}
# Generate the training slicing indicator vector
foldIdx <- rep(1:totalFold, each = nrow(input)/totalFold)
foldIdx <- c(foldIdx, rep(totalFold, times = nrow(input)%%totalFold))
#foldIdx <- append(foldIdx, rep(totalFold, nrow(input) - length(foldIdx)))
TrainIndicator <- sapply(foldIdx, function(foldIdxx) if(foldIdxx %in% trainFoldIdx) return(TRUE)
else return(FALSE))
input <- as.data.frame(input); output <- as.data.frame(output)
if(test){
return(list(trainX = input[TrainIndicator,], trainY = output[TrainIndicator,],
validX = input[!TrainIndicator,], validY = output[!TrainIndicator,],
testX = testX, testY = testY))
}else{
return(list(trainX = input[TrainIndicator,], trainY = output[TrainIndicator,],
validX = input[!TrainIndicator,], validY = output[!TrainIndicator,]))
}
}
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/lung_other.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.6,family="gaussian",standardize=TRUE)
sink('./Model/EN/Classifier/lung_other/lung_other_065.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/Classifier/lung_other/lung_other_065.R
|
no_license
|
leon1003/QSMART
|
R
| false
| false
| 361
|
r
|
library(glmnet)
mydata = read.table("./TrainingSet/RF/lung_other.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.6,family="gaussian",standardize=TRUE)
sink('./Model/EN/Classifier/lung_other/lung_other_065.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
library(httr)
library(XML)
library(magrittr)
library(rvest)
library(tidyverse)
#retrives links to list of agencys and schools
agency_links <- read_html("http://transparentcalifornia.com/agencies/salaries/") %>% html_nodes("td:nth-child(1) a") %>% html_attr("href")
school_dis_links <- read_html("http://transparentcalifornia.com/agencies/salaries/school-districts/") %>% html_nodes("td:nth-child(1) a") %>% html_attr("href")
spec_dis_links <- read_html("http://transparentcalifornia.com/agencies/salaries/special-districts/") %>% html_nodes("td:nth-child(1) a") %>% html_attr("href")
charter_sch_links <- read_html("http://transparentcalifornia.com/agencies/salaries/charter-schools/") %>% html_nodes("td:nth-child(1) a") %>% html_attr("href")
all_links <- c(agency_links, school_dis_links, spec_dis_links, charter_sch_links)
#gets list of all .csvs on site
other_sal_export_links <- data.frame()
for(i in 1:length(all_links)) {
export_link <- read_html(paste0("http://transparentcalifornia.com", all_links[i])) %>%
html_nodes(".export-link .export-link") %>%
html_attr("href") %>%
data.frame() %>% unique() %>% mutate(orgin = all_links[i])
other_sal_export_links <- rbind(other_sal_export_links, export_link)
print(i)
}
#downloading all .csv on the sites
for(p in 1:length(other_sal_export_links$.)) {
downloadCSV <- paste0("http://transparentcalifornia.com", as.character(other_sal_export_links$.[p]))
#downloads a files, if 404 then that URL will be logged outside of loop
fail <- tryCatch({
download.file(url = downloadCSV, destfile = paste0("data/", basename(as.character(other_sal_export_links$.[p]))), method='libcurl')},
error=function(err) {
write(downloadCSV, file="log_error.txt", append = TRUE)
return(TRUE)
})
if(fail == TRUE){
next
}
print(p)
}
|
/scripts/core_scrape.R
|
no_license
|
brettkobo/open-state-salaries
|
R
| false
| false
| 1,825
|
r
|
library(httr)
library(XML)
library(magrittr)
library(rvest)
library(tidyverse)
#retrives links to list of agencys and schools
agency_links <- read_html("http://transparentcalifornia.com/agencies/salaries/") %>% html_nodes("td:nth-child(1) a") %>% html_attr("href")
school_dis_links <- read_html("http://transparentcalifornia.com/agencies/salaries/school-districts/") %>% html_nodes("td:nth-child(1) a") %>% html_attr("href")
spec_dis_links <- read_html("http://transparentcalifornia.com/agencies/salaries/special-districts/") %>% html_nodes("td:nth-child(1) a") %>% html_attr("href")
charter_sch_links <- read_html("http://transparentcalifornia.com/agencies/salaries/charter-schools/") %>% html_nodes("td:nth-child(1) a") %>% html_attr("href")
all_links <- c(agency_links, school_dis_links, spec_dis_links, charter_sch_links)
#gets list of all .csvs on site
other_sal_export_links <- data.frame()
for(i in 1:length(all_links)) {
export_link <- read_html(paste0("http://transparentcalifornia.com", all_links[i])) %>%
html_nodes(".export-link .export-link") %>%
html_attr("href") %>%
data.frame() %>% unique() %>% mutate(orgin = all_links[i])
other_sal_export_links <- rbind(other_sal_export_links, export_link)
print(i)
}
#downloading all .csv on the sites
for(p in 1:length(other_sal_export_links$.)) {
downloadCSV <- paste0("http://transparentcalifornia.com", as.character(other_sal_export_links$.[p]))
#downloads a files, if 404 then that URL will be logged outside of loop
fail <- tryCatch({
download.file(url = downloadCSV, destfile = paste0("data/", basename(as.character(other_sal_export_links$.[p]))), method='libcurl')},
error=function(err) {
write(downloadCSV, file="log_error.txt", append = TRUE)
return(TRUE)
})
if(fail == TRUE){
next
}
print(p)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wflow_start.R
\name{wflow_start}
\alias{wflow_start}
\title{Start a new workflowr project}
\usage{
wflow_start(directory, name = NULL, git = TRUE, existing = FALSE,
overwrite = FALSE, change_wd = TRUE, user.name = NULL,
user.email = NULL)
}
\arguments{
\item{directory}{character. The directory for the project, e.g.
"~/new-project". When \code{existing = FALSE}, the directory will
be created.}
\item{name}{character (default: NULL). Project name, e.g. "My Project". When
\code{name = NULL}, the project name is automatically set based on the
argument \code{directory}. For example, if \code{directory =
"~/projects/myproject"}, then \code{name} is set to \code{"myproject"}.
\code{name} is displayed on the site's navigation bar and the README.md.}
\item{git}{logical (default: TRUE). Should Git be used for version
control? If \code{directory} is a new Git repository and \code{git
= TRUE}, \code{wflow_start} will initialize the repository and make
an initial commit. If \code{git = TRUE} and \code{directory} is
already a Git repository, \code{wflow_start} will make an
additional commit. In both cases, only files needed for the
workflowr project will be included in the commit.}
\item{existing}{logical (default: FALSE). Indicate if the specified
\code{directory} already exists. The default prevents injecting the
workflowr files into an unwanted location. Only set to TRUE if you wish to
add the workflowr files to an existing project.}
\item{overwrite}{logical (default: FALSE). Control whether to overwrite
existing files. Only relevant if \code{existing = TRUE}.}
\item{change_wd}{logical (default: TRUE). Change the working directory to the
\code{directory}.}
\item{user.name}{character (default: NULL). The user name used by Git to sign
commits, e.g. "My Name". This setting will only apply to this specific
workflowr project being created. To create a Git user name to apply to all
workflowr projects (and Git repositories) on this computer, instead use
\code{\link{wflow_git_config}}.}
\item{user.email}{character (default: NULL). The email addresse used by Git
to sign commits, e.g. "email@domain". This setting will only apply to this
specific workflowr project being created. To create a Git email address to
apply to all workflowr projects (and Git repositories) on this computer,
instead use \code{\link{wflow_git_config}}.}
}
\value{
Invisibly returns absolute path to workflowr project.
}
\description{
\code{wflow_start} creates a minimal workflowr project. The default
behaviour is to add these files to a new directory, but it is also
possible to populate an already existing project. By default, it
also changes the working directory to the workflowr project.
}
\details{
This is the initial function that organizes the infrastructure to
create a research website for your project. Note that while you do
not need to use RStudio with workflowr, do not delete the Rproj
file because it is required by other functions.
}
\examples{
\dontrun{
wflow_start("path/to/new-project")
# Provide a custom name for the project.
wflow_start("path/to/new-project", name = "My Project")
# Add workflowr files to an existing project.
wflow_start("path/to/current-project", existing = TRUE)
# Add workflowr files to an existing project, but do not automatically
# commit them.
wflow_start("path/to/current-project", git = FALSE, existing = TRUE)
}
}
\seealso{
vignette("wflow-01-getting-started")
}
|
/man/wflow_start.Rd
|
permissive
|
pcarbo/workflowr
|
R
| false
| true
| 3,502
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/wflow_start.R
\name{wflow_start}
\alias{wflow_start}
\title{Start a new workflowr project}
\usage{
wflow_start(directory, name = NULL, git = TRUE, existing = FALSE,
overwrite = FALSE, change_wd = TRUE, user.name = NULL,
user.email = NULL)
}
\arguments{
\item{directory}{character. The directory for the project, e.g.
"~/new-project". When \code{existing = FALSE}, the directory will
be created.}
\item{name}{character (default: NULL). Project name, e.g. "My Project". When
\code{name = NULL}, the project name is automatically set based on the
argument \code{directory}. For example, if \code{directory =
"~/projects/myproject"}, then \code{name} is set to \code{"myproject"}.
\code{name} is displayed on the site's navigation bar and the README.md.}
\item{git}{logical (default: TRUE). Should Git be used for version
control? If \code{directory} is a new Git repository and \code{git
= TRUE}, \code{wflow_start} will initialize the repository and make
an initial commit. If \code{git = TRUE} and \code{directory} is
already a Git repository, \code{wflow_start} will make an
additional commit. In both cases, only files needed for the
workflowr project will be included in the commit.}
\item{existing}{logical (default: FALSE). Indicate if the specified
\code{directory} already exists. The default prevents injecting the
workflowr files into an unwanted location. Only set to TRUE if you wish to
add the workflowr files to an existing project.}
\item{overwrite}{logical (default: FALSE). Control whether to overwrite
existing files. Only relevant if \code{existing = TRUE}.}
\item{change_wd}{logical (default: TRUE). Change the working directory to the
\code{directory}.}
\item{user.name}{character (default: NULL). The user name used by Git to sign
commits, e.g. "My Name". This setting will only apply to this specific
workflowr project being created. To create a Git user name to apply to all
workflowr projects (and Git repositories) on this computer, instead use
\code{\link{wflow_git_config}}.}
\item{user.email}{character (default: NULL). The email addresse used by Git
to sign commits, e.g. "email@domain". This setting will only apply to this
specific workflowr project being created. To create a Git email address to
apply to all workflowr projects (and Git repositories) on this computer,
instead use \code{\link{wflow_git_config}}.}
}
\value{
Invisibly returns absolute path to workflowr project.
}
\description{
\code{wflow_start} creates a minimal workflowr project. The default
behaviour is to add these files to a new directory, but it is also
possible to populate an already existing project. By default, it
also changes the working directory to the workflowr project.
}
\details{
This is the initial function that organizes the infrastructure to
create a research website for your project. Note that while you do
not need to use RStudio with workflowr, do not delete the Rproj
file because it is required by other functions.
}
\examples{
\dontrun{
wflow_start("path/to/new-project")
# Provide a custom name for the project.
wflow_start("path/to/new-project", name = "My Project")
# Add workflowr files to an existing project.
wflow_start("path/to/current-project", existing = TRUE)
# Add workflowr files to an existing project, but do not automatically
# commit them.
wflow_start("path/to/current-project", git = FALSE, existing = TRUE)
}
}
\seealso{
vignette("wflow-01-getting-started")
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/acmpca_operations.R
\name{acmpca_import_certificate_authority_certificate}
\alias{acmpca_import_certificate_authority_certificate}
\title{Imports a signed private CA certificate into Amazon Web Services Private
CA}
\usage{
acmpca_import_certificate_authority_certificate(
CertificateAuthorityArn,
Certificate,
CertificateChain = NULL
)
}
\arguments{
\item{CertificateAuthorityArn}{[required] The Amazon Resource Name (ARN) that was returned when you called
\code{\link[=acmpca_create_certificate_authority]{create_certificate_authority}}.
This must be of the form:
\code{arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 }}
\item{Certificate}{[required] The PEM-encoded certificate for a private CA. This may be a self-signed
certificate in the case of a root CA, or it may be signed by another CA
that you control.}
\item{CertificateChain}{A PEM-encoded file that contains all of your certificates, other than
the certificate you're importing, chaining up to your root CA. Your
Amazon Web Services Private CA-hosted or on-premises root certificate is
the last in the chain, and each certificate in the chain signs the one
preceding.
This parameter must be supplied when you import a subordinate CA. When
you import a root CA, there is no chain.}
}
\description{
Imports a signed private CA certificate into Amazon Web Services Private CA. This action is used when you are using a chain of trust whose root is located outside Amazon Web Services Private CA. Before you can call this action, the following preparations must in place:
See \url{https://www.paws-r-sdk.com/docs/acmpca_import_certificate_authority_certificate/} for full documentation.
}
\keyword{internal}
|
/cran/paws.security.identity/man/acmpca_import_certificate_authority_certificate.Rd
|
permissive
|
paws-r/paws
|
R
| false
| true
| 1,798
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/acmpca_operations.R
\name{acmpca_import_certificate_authority_certificate}
\alias{acmpca_import_certificate_authority_certificate}
\title{Imports a signed private CA certificate into Amazon Web Services Private
CA}
\usage{
acmpca_import_certificate_authority_certificate(
CertificateAuthorityArn,
Certificate,
CertificateChain = NULL
)
}
\arguments{
\item{CertificateAuthorityArn}{[required] The Amazon Resource Name (ARN) that was returned when you called
\code{\link[=acmpca_create_certificate_authority]{create_certificate_authority}}.
This must be of the form:
\code{arn:aws:acm-pca:region:account:certificate-authority/12345678-1234-1234-1234-123456789012 }}
\item{Certificate}{[required] The PEM-encoded certificate for a private CA. This may be a self-signed
certificate in the case of a root CA, or it may be signed by another CA
that you control.}
\item{CertificateChain}{A PEM-encoded file that contains all of your certificates, other than
the certificate you're importing, chaining up to your root CA. Your
Amazon Web Services Private CA-hosted or on-premises root certificate is
the last in the chain, and each certificate in the chain signs the one
preceding.
This parameter must be supplied when you import a subordinate CA. When
you import a root CA, there is no chain.}
}
\description{
Imports a signed private CA certificate into Amazon Web Services Private CA. This action is used when you are using a chain of trust whose root is located outside Amazon Web Services Private CA. Before you can call this action, the following preparations must in place:
See \url{https://www.paws-r-sdk.com/docs/acmpca_import_certificate_authority_certificate/} for full documentation.
}
\keyword{internal}
|
### nohup R CMD BATCH --vanilla /home/gmatthews1/shapeAnalysis/R/simulation_script2.R &
### tail -f /home/gmatthews1/shapeAnalysis/R/simulation_script2.Rout
### nohup R CMD BATCH --vanilla /home/gmatthews1/shapeAnalysis/R/simulation_script1.R &
### tail -f /home/gmatthews1/shapeAnalysis/R/simulation_script1.Rout
### nohup R CMD BATCH --vanilla /home/gmatthew/Work/shapeanalysis/R/simulation_script_for_server.R /home/gmatthew/Work/shapeanalysis/simulation_script_for_server_side1.Rout &
### tail -f /home/gmatthew/Work/shapeanalysis/simulation_script_for_server_side2.Rout
# nohup R CMD BATCH --vanilla R/simulation_script_for_server.R simulation_script_for_server_side2.Rout &
# tail -f simulation_script_for_server_side2.Rout
# chmod +x /home/gmatthew/Work/shapeanalysis/shape_script.sh
# qsub -A SE_HPC -t 720 -n 1 -q pubnet /home/gmatthew/Work/shapeanalysis/shape_script.sh
#!/usr/bin/bash
#nohup R CMD BATCH --vanilla /home/gmatthews1/Work/shapeanalysis/R/simulation_script_for_server_LM2_side2_20190610_k5_M5_scaled.R /home/gmatthews1/Work/shapeanalysis/simulation_script_for_server_LM2_side2_20190610_k_M5_scaled.Rout
# chmod +x /home/gmatthew/Work/shapeanalysis/shape_script_LM1_1_k10_M10_scaled.sh
# qsub -A SE_HPC -t 720 -n 1 -q pubnet /home/gmatthew/Work/shapeanalysis/shape_script_LM1_1_k10_M10_scaled.sh
# cd /home/gmatthews1/Work/shapeanalysis
# nohup R CMD BATCH --vanilla /home/gmatthews1/Work/shapeanalysis/R/simulation_script_for_server_LM2_side2_20190610_k5_M5scaled.R /home/gmatthews1/Work/shapeanalysis/R/simulation_script_for_server_LM2_side2_20190610_k5_M5scaled.Rout &
# 4825: UM1
# 4980: LM3
# 4983: LM3
# 4990: LM3
# 5139:LM3
# 9973: LM3
# 5514: maxillary molar.. probably UM2 but could me UM1.
# This is what I asked you about the other day if you could tell the tooth type. It is just a single lobe and in 2009 I did not look hard enough. I could likely tell you now but I did not look at other features at the time
classifier_tribe <- list()
classifier_species <- list()
classifier_imputations <- list()
#These tooth type classifications are from Juliet
tooth_type_list <- list()
tooth_type_list[["4825"]] <- "UM1"
tooth_type_list[["4980"]] <- "LM3"
tooth_type_list[["4983"]] <- "LM3"
tooth_type_list[["4990"]] <- "LM3"
tooth_type_list[["5139"]] <- "LM3"
tooth_type_list[["9973"]] <- "LM3"
tooth_type_list[["5514"]] <- "UM2"
start_all <- Sys.time()
library(fdasrvf)
library(parallel)
#setwd("/home/gmatthews1/shapeAnalysis")
source("./R/utility.R")
source("./R/curve_functions.R")
source("./R/calc_shape_dist_partial.R")
source("./R/calc_shape_dist_complete.R")
source("./R/complete_partial_shape.R")
source("./R/impute_partial_shape.R")
source("./R/tooth_cutter.R")
#Loading in the full teeth
ref_file <- read.csv("./data/reference_db.csv")
load("./data/data_set_of_full_teeth.RData")
load("./data/ptsTrainList.RData")
#save(ptsTrainList, file = "/Users/gregorymatthews/Dropbox/shapeanalysisgit/data/ptsTrainList.RData")
#partial_shape2 <- t(tooth_cutter(ptsTrainList[[tooth]][[d]])[[side]])
#Note: Traveling from start to stop should always be in a clowckwise direction!
#Which tooth it is
for (ggg in 1:length(tooth_type_list)){print(ggg)
tooth <- tooth_type_list[[names(tooth_type_list)[ggg]]]
#Load the actual partial tooth.
partial_shape <- read.csv(paste0("/Users/gregorymatthews/Dropbox/shapeanalysisgit/partial_teeth/bw_images_data/DSCN",names(tooth_type_list)[ggg],"bw.csv"), header = FALSE)
partial_shape <- as.matrix(partial_shape)
start_stop <- read.csv(paste0("/Users/gregorymatthews/Dropbox/shapeanalysisgit/partial_teeth/bw_images_data/DSCN",names(tooth_type_list)[ggg],"bwstart_stop.csv"), header = FALSE)
#points(start_stop[1,1],start_stop[1,2],pch = 16, col = "green")
#points(start_stop[2,1],start_stop[2,2],pch = 16, col = "red")
start_stop <- as.matrix(start_stop)
#Ok now cut off the part i don't need.
start <- start_stop[1,]
stop <- start_stop[2,]
#Measure distance between start and all points
d_start <- (partial_shape[,1] - start[1])^2 + (partial_shape[,2] - start[2])^2
d_end <- (partial_shape[,1] - stop[1])^2 + (partial_shape[,2] - stop[2])^2
if(which.min(d_start) < which.min(d_end)){
partial_shape <- partial_shape[which.min(d_start):which.min(d_end),]
} else {
partial_shape <- partial_shape[c(which.min(d_start):nrow(partial_shape),1:which.min(d_end)),]
}
#check partial shape
#plot((partial_shape))
#points(start_stop[1,1],start_stop[1,2], col = "green", pch = 16)
#points(start_stop[2,1],start_stop[2,2], col = "red", pch = 16)
#Now store it wide rather than long.
partial_shape <- t(partial_shape)
#Now resample it to 250 points
partial_shape <- resamplecurve(partial_shape, 40, mode = "O")
#Remember N_partial must be less than or equal to N_complete
#partial_shape <- t(ptsTrainList[[1]][[1]][11:42,])
complete_shape_list <- lapply(ptsTrainList[[tooth]], t)
#Resampling so that each complete shape has N points
#complete_shape_list <- lapply(complete_shape_list, resamplecurve, N = 250, mode = "C")
##complete_shape_list <- list(complete_shape_list[[1]],complete_shape_list[[2]],complete_shape_list[[3]],complete_shape_list[[4]],complete_shape_list[[5]],complete_shape_list[[6]],complete_shape_list[[7]],complete_shape_list[[8]],complete_shape_list[[9]],complete_shape_list[[10]])
#names(complete_shape_list) <- names(ptsTrainList[[tooth]])[1:10]
#I can't impute the partial shape with itself!
#complete_shape_list[[d]]<-NULL
# M <- 5
# k <- 5
# scale <- TRUE
for (M in c(20)){print(paste0("M = ",M))
for (k in c(20)){ print(paste0("k = ",M))
for (scale in c(TRUE,FALSE)){ print(paste0("scale = ",scale))
library(parallel)
start1 <- Sys.time()
imputed_partial_shape <- impute_partial_shape(complete_shape_list,partial_shape, M = M, k = k, scale = scale)
end1 <- Sys.time()
end1-start1 #1.4 minutes with 4 cores on server. Using detectCores()-1 it takes
plot(t(imputed_partial_shape$imputed[[4]]), col = "red")
points(t(imputed_partial_shape$partial_obs), col = "blue")
# colMeans(ptsTrainList[[tooth]][[d]])
#
# plot(t(imputed_partial_shape$imputed[[1]]), xlim = c(-120, 500), ylim = c(-170,210))
# points(t(imputed_partial_shape$imputed[[2]]))
# points(t(imputed_partial_shape$imputed[[3]]))
# points(t(imputed_partial_shape$imputed[[4]]))
# points(t(imputed_partial_shape$imputed[[5]]))
# points(t(imputed_partial_shape$imputed[[5]]),col = "red")
# points(t(imputed_partial_shape$imputed[[4]]), col = "blue")
# points(t(beta1+c(150,0)),col = "gold", type = "l", lwd = 3)
#
# beta1 <- t(ptsTrainList[[tooth]][[d]])
# T1 = ncol(beta1)
# centroid1 = calculatecentroid(beta1)
# dim(centroid1) = c(length(centroid1),1)
# beta1 = beta1 - repmat(centroid1, 1, T1)
#Now do classification on the completed shapes just using closest
ref_file <- read.csv("./data/reference_db.csv")
#DSCN_target <- names(ptsTrainList[[tooth]])[[d]]
# truth <- subset(ref_file,Image.Name == DSCN_target)
# ref_file[ref_file$tooth == "LM1",]
# whole <- complete_shape_list[["DSCN2879"]]
# part <- imputed_partial_shape$imputed[[1]]
dist_imputed_to_whole <- function(whole,part, scale = scale){
whole <- resamplecurve(whole,N = dim(part)[2], mode = "C")
print(Sys.time())
out <- calc_shape_dist_complete(whole,part, scale)
return(out)
}
#out <- mclapply(complete_shape_list, dist_imputed_to_whole, part = imputed_partial_shape[[1]][[1]]) #3.183962 minutes with lapply. #2.110835 with mclapply #With 4 cores:1.751686 minutes
#doesitwork <- list(complete_shape_list[[1]],complete_shape_list[[2]])
#greg <- lapply(doesitwork, dist_imputed_to_whole, part = imputed_partial_shape[[1]][[m]])
dist_imputed_to_whole2 <- function(part){
#out <- lapply(complete_shape_list, dist_imputed_to_whole, part = part) #takes about 3 minutes. 2.11 minutes with mclapply
out <- mclapply(complete_shape_list, dist_imputed_to_whole, part = part, scale = scale, mc.cores = 12) #takes about 3 minutes. 2.11 minutes with mclapply
return(out)
}
start <- Sys.time()
dist_list <- lapply(imputed_partial_shape$imputed,dist_imputed_to_whole2)
end <- Sys.time()
end-start
print(Sys.time())
dist <- t(do.call(rbind,lapply(dist_list,unlist)))
row.names(dist) <- names(complete_shape_list)
dist <- as.data.frame(dist)
dist$DSCN <- row.names(dist)
################################################################################################################################
# whole <- resamplecurve(whole,N = dim(part)[2], mode = "C")
# out <- calc_shape_dist(whole,part,mode="C")
#
#
#
# calc_shape_dist(whole,imputed_partial_shape$imputed[[3]], mode = "C")
#
# part <- imputed_partial_shape$imputed[[1]]
# whole <- resamplecurve(t(ptsTrainList[[1]][["DSCN5630"]]),N = dim(imputed_partial_shape$imputed[[1]])[2], mode = "C")
# whole <- resamplecurve(t(ptsTrainList[[1]][["DSCN2879"]]),N = dim(imputed_partial_shape$imputed[[1]])[2], mode = "C")
# calc_shape_dist(whole,part,mode="C")
#
# plot(t(whole))
# points(t(part+c(150,-100)))
################################################################################################################################
dist <- merge(dist, ref_file, by.x = "DSCN", by.y = "Image.Name", all.x = TRUE)
#Smallest to largest
#knn <- 5
# table(as.character(dist$tribe[order(dist$V1)][1:knn]))
# table(as.character(dist$tribe[order(dist$V2)][1:knn]))
# table(as.character(dist$tribe[order(dist$V3)][1:knn]))
# table(as.character(dist$tribe[order(dist$V4)][1:knn]))
# table(as.character(dist$tribe[order(dist$V5)][1:knn]))
#Classify based on closest match between partial and all full teeth.
#fret <- mclapply(complete_shape_list,calc_shape_dist_partial,partial_shape = partial_shape)
#dist_partial <- data.frame(DSCN = names(unlist(fret)), dist = unlist(fret))
dist_partial <- data.frame(DSCN = names(unlist(imputed_partial_shape$dist_vec)), dist = unlist(imputed_partial_shape$dist_vec))
dist_partial <- merge(dist_partial,ref_file,by.x = "DSCN",by.y = "Image.Name", all.x = TRUE)
results <- list(dist = dist , dist_partial = dist_partial, imputed_partial_shape = imputed_partial_shape)
end_all <- Sys.time()
end_all-start_all
knn_partial_matching <- function(knn){
temp <- results$dist_partial
temp$inv_dist <- 1/temp$dist
temp$Tribe <- factor(temp$Tribe, levels = unique(sort(temp$Tribe)))
#Compute probabilities
#library(dplyr)
#probs <- arrange(temp,dist) %>% top_n(knn) %>% group_by(Tribe) %>% summarise(tot = sum(inv_dist))
#probs$prob <- probs$tot / sum(probs$tot)
dat <- data.frame(t(data.frame(c(table(temp$Tribe[order(temp$dist)][1:knn])/knn))))
#Weighted KNN
#wts <- c(table(temp$Tribe[order(temp$dist)][1:knn])/table(temp$Tribe))
#dat <- data.frame(t(data.frame((wts/sum(wts)))))
row.names(dat) <- NULL
return(dat)
}
#plot(ptsTrainList[[1]][["DSCN2879"]])
#plot(t(results_list[[DSCN]]$imputed_partial_shape$imputed[[1]]))
#DSCN <- "DSCN2871"
#temp <- results_list[[DSCN]]$dist
#temp[order(temp[[paste0("V",i)]]),]
#full <- resamplecurve(t(ptsTrainList[[1]][["DSCN3753"]]),199)
#calc_shape_dist(full,(results_list[[DSCN]]$imputed_partial_shape$imputed[[4]]))
#1199.961
#Now for the imputed teeth
knn_imputed <- function(knn){
temp <- results$dist
temp$Tribe <- factor(temp$Tribe, levels = unique(sort(temp$Tribe)))
dat_list <- list()
for (i in 1:M){
pro <- data.frame(t(data.frame(c(table(temp$Tribe[order(temp[[paste0("V",i)]])][1:knn])/knn))))
#Weighted KNN
#wts <- c(table(temp$Tribe[order(temp[[paste0("V",i)]])][1:knn])/table(temp$Tribe))
#pro <- data.frame(t(data.frame((wts/sum(wts)))))
row.names(pro) <- NULL
dat_list[[i]] <- pro
}
df <- do.call(rbind,dat_list)
dat <- data.frame(t(data.frame(unlist(apply(df,2,mean)))))
row.names(dat) <- NULL
return(dat)
}
#Now classify Species
knn_partial_matching_species <- function(knn){
temp <- results$dist_partial
temp$Species <- factor(temp$Species, levels = unique(sort(temp$Species)))
dat <- data.frame(t(data.frame(c(table(temp$Species[order(temp$dist)][1:knn])/knn))))
#Weighted KNN
#wts <- c(table(temp$Tribe[order(temp$dist)][1:knn])/table(temp$Tribe))
#dat <- data.frame(t(data.frame((wts/sum(wts)))))
row.names(dat) <- NULL
# dat$true <- results_list[[DSCN]]$truth$Species[1]
# dat$DSCN <- DSCN
return(dat)
}
#plot(ptsTrainList[[1]][["DSCN2879"]])
#plot(t(results_list[[DSCN]]$imputed_partial_shape$imputed[[1]]))
#DSCN <- "DSCN2871"
#temp <- results_list[[DSCN]]$dist
#temp[order(temp[[paste0("V",i)]]),]
#full <- resamplecurve(t(ptsTrainList[[1]][["DSCN3753"]]),199)
#calc_shape_dist(full,(results_list[[DSCN]]$imputed_partial_shape$imputed[[4]]))
#1199.961
#Now for the imputed teeth
knn_imputed_species <- function(knn){
temp <- results$dist
temp$Species <- factor(temp$Species, levels = unique(sort(temp$Species)))
dat_list <- list()
for (i in 1:M){
pro <- data.frame(t(data.frame(c(table(temp$Species[order(temp[[paste0("V",i)]])][1:knn])/knn))))
#Weighted KNN
#wts <- c(table(temp$Tribe[order(temp[[paste0("V",i)]])][1:knn])/table(temp$Tribe))
#pro <- data.frame(t(data.frame((wts/sum(wts)))))
row.names(pro) <- NULL
dat_list[[i]] <- pro
}
df <- do.call(rbind,dat_list)
dat <- data.frame(t(data.frame(unlist(apply(df,2,mean)))))
row.names(dat) <- NULL
# dat$true <- results_list[[DSCN]]$truth$Species[1]
# dat$DSCN <- DSCN
return(dat)
}
nam <- paste0(names(tooth_type_list)[ggg],"_M_",M,"_k_",k,"_scale_",scale)
classifier_tribe[[nam]] <- list()
classifier_species[[nam]] <- list()
#10 rows are because I'm uasing different choices of knn.
classifier_tribe[[nam]]$partial_matching <- matrix(NA, nrow = 10, ncol = 7)
classifier_tribe[[nam]]$imputed <- matrix(NA, nrow = 10, ncol = 7)
classifier_species[[nam]]$partial_matching <- matrix(NA, nrow = 10, ncol = 20)
classifier_species[[nam]]$imputed <- matrix(NA, nrow = 10, ncol = 20)
for (i_knn in 1:10){
#Tribe classification
classifier_tribe[[nam]]$partial_matching[i_knn,] <- unlist(knn_partial_matching(i_knn))
classifier_tribe[[nam]]$imputed[i_knn,] <- unlist(knn_imputed(i_knn))
#Species classification
classifier_species[[nam]]$partial_matching[i_knn,] <- unlist(knn_partial_matching_species(i_knn))
classifier_species[[nam]]$imputed[i_knn,] <- unlist(knn_imputed_species(i_knn))
}
classifier_tribe[[nam]]$partial_matching <- as.data.frame(classifier_tribe[[nam]]$partial_matching)
classifier_tribe[[nam]]$imputed <- as.data.frame(classifier_tribe[[nam]]$imputed)
names(classifier_tribe[[nam]]$partial_matching) <- names(classifier_tribe[[nam]]$imputed) <- c("Alcelaphini","Antilopini","Bovini","Hippotragini","Neotragini","Reduncini","Tragelaphini")
classifier_species[[nam]]$partial_matching <- as.data.frame(classifier_species[[nam]]$partial_matching)
classifier_species[[nam]]$imputed <- as.data.frame(classifier_species[[nam]]$imputed)
names(classifier_species[[nam]]$partial_matching) <- names(classifier_species[[nam]]$imputed) <- names(unlist(knn_imputed_species(i_knn)))
#Store the actual imputations
classifier_imputations[[nam]] <- results
print(classifier_tribe)
print(classifier_species)
save(classifier_tribe, file = "/Users/gregorymatthews/Dropbox/shapeanalysisgit/results/classifier_tribe.RData")
save(classifier_species, file = "/Users/gregorymatthews/Dropbox/shapeanalysisgit/results/classifier_species.RData")
save(classifier_imputations, file = "/Users/gregorymatthews/Dropbox/shapeanalysisgit/results/classifier_imputations.RData")
}}}
}
#Results tables.
tab <- classifier_tribe[[1]]$partial_matching[5,]
for (i in 2:length(classifier_tribe)){
tab <- rbind(tab,classifier_tribe[[i]]$partial_matching[5,])
}
library(xtable)
xtable(tab, caption = "here")
#Results tables.
tab_imp <- classifier_tribe[[1]]$imputed[5,]
for (i in 2:length(classifier_tribe)){
tab_imp <- rbind(tab_imp,classifier_tribe[[i]]$imputed[5,])
}
row.names(tab_imp) <- paste0("IMG",names(tooth_type_list))
xtable(tab_imp, caption = "here")
#Species classifier
#Results tables.
tab_species <- classifier_species[[1]]$partial_matching[5,]
for (i in 2:length(classifier_species)){
tab_species <- rbind(tab_species,classifier_species[[i]]$partial_matching[5,])
}
row.names(tab_species) <- names(tooth_type_list)
keep <- apply(tab_species,2,function(x){sum(x)>0})
tab_species[,keep]
library(xtable)
xtable(tab_species, caption = "here")
#Results tables.
tab_species_imp <- classifier_species[[1]]$imputed[5,]
for (i in 2:length(classifier_species)){
tab_species_imp <- rbind(tab_species_imp,classifier_species[[i]]$imputed[5,])
}
row.names(tab_species_imp) <- paste0("IMG",names(tooth_type_list))
keep <- apply(tab_species_imp,2,function(x){sum(x)>0})
tab_species_imp[,keep]
xtable(tab_species_imp, caption = "here")
#Plots of completed shapes
png("/Users/gregorymatthews/Dropbox/shapeanalysisgit/IMG4825_imputed.png", h = 5, w = 8, res = 300, units = "in")
plot(t(classifier_imputations[[1]]$imputed_partial_shape$imputed[[1]]), col = "white", type = "l", xlim = c(-450,250), ylim = c(-250, 250), xlab = "", ylab = "", main = "IMG4825 - LM1")
for (i in 1:length(classifier_imputations[[1]]$imputed_partial_shape$imputed)){
points(t(classifier_imputations[[1]]$imputed_partial_shape$imputed[[i]]), col = "red", type = "l")
}
points(t(classifier_imputations[[1]]$imputed_partial_shape$partial_obs), col = "black", type = "l")
dev.off()
#IMG2
plot(t(classifier_imputations[[2]]$imputed_partial_shape$imputed[[1]]), col = "white", type = "l", xlim = c(-450,660), ylim = c(-250, 250), xlab = "", ylab = "", main = paste0("IMG",names(tooth_type_list)[2]," - LM3"))
for (i in 1:length(classifier_imputations[[2]]$imputed_partial_shape$imputed)){
points(t(classifier_imputations[[2]]$imputed_partial_shape$imputed[[i]]), col = "red", type = "l")
}
points(t(classifier_imputations[[2]]$imputed_partial_shape$partial_obs), col = "black", type = "l")
#IMG3
plot(t(classifier_imputations[[3]]$imputed_partial_shape$imputed[[1]]), col = "white", type = "l", xlim = c(-450,500), ylim = c(-250, 250), xlab = "", ylab = "", main = paste0("IMG",names(tooth_type_list)[3]," - LM3"))
for (i in 1:length(classifier_imputations[[3]]$imputed_partial_shape$imputed)){
points(t(classifier_imputations[[3]]$imputed_partial_shape$imputed[[i]]), col = "red", type = "l")
}
points(t(classifier_imputations[[3]]$imputed_partial_shape$partial_obs), col = "black", type = "l")
#IMG4
plot(t(classifier_imputations[[4]]$imputed_partial_shape$imputed[[1]]), col = "white", type = "l", xlim = c(-1050,500), ylim = c(-250, 450), xlab = "", ylab = "", main = paste0("IMG",names(tooth_type_list)[4]," - LM3"))
for (i in 1:length(classifier_imputations[[4]]$imputed_partial_shape$imputed)){
points(t(classifier_imputations[[4]]$imputed_partial_shape$imputed[[i]]), col = "red", type = "l")
}
points(t(classifier_imputations[[4]]$imputed_partial_shape$partial_obs), col = "black", type = "l")
#IMG5
plot(t(classifier_imputations[[5]]$imputed_partial_shape$imputed[[1]]), col = "white", type = "l", xlim = c(-250,600), ylim = c(-250, 250), xlab = "", ylab = "", main = paste0("IMG",names(tooth_type_list)[5]," - LM3"))
for (i in 1:length(classifier_imputations[[5]]$imputed_partial_shape$imputed)){
points(t(classifier_imputations[[5]]$imputed_partial_shape$imputed[[i]]), col = "red", type = "l")
}
points(t(classifier_imputations[[5]]$imputed_partial_shape$partial_obs), col = "black", type = "l")
|
/R/classify_unknown_tooth.R
|
no_license
|
gjm112/shapeanalysis
|
R
| false
| false
| 21,734
|
r
|
### nohup R CMD BATCH --vanilla /home/gmatthews1/shapeAnalysis/R/simulation_script2.R &
### tail -f /home/gmatthews1/shapeAnalysis/R/simulation_script2.Rout
### nohup R CMD BATCH --vanilla /home/gmatthews1/shapeAnalysis/R/simulation_script1.R &
### tail -f /home/gmatthews1/shapeAnalysis/R/simulation_script1.Rout
### nohup R CMD BATCH --vanilla /home/gmatthew/Work/shapeanalysis/R/simulation_script_for_server.R /home/gmatthew/Work/shapeanalysis/simulation_script_for_server_side1.Rout &
### tail -f /home/gmatthew/Work/shapeanalysis/simulation_script_for_server_side2.Rout
# nohup R CMD BATCH --vanilla R/simulation_script_for_server.R simulation_script_for_server_side2.Rout &
# tail -f simulation_script_for_server_side2.Rout
# chmod +x /home/gmatthew/Work/shapeanalysis/shape_script.sh
# qsub -A SE_HPC -t 720 -n 1 -q pubnet /home/gmatthew/Work/shapeanalysis/shape_script.sh
#!/usr/bin/bash
#nohup R CMD BATCH --vanilla /home/gmatthews1/Work/shapeanalysis/R/simulation_script_for_server_LM2_side2_20190610_k5_M5_scaled.R /home/gmatthews1/Work/shapeanalysis/simulation_script_for_server_LM2_side2_20190610_k_M5_scaled.Rout
# chmod +x /home/gmatthew/Work/shapeanalysis/shape_script_LM1_1_k10_M10_scaled.sh
# qsub -A SE_HPC -t 720 -n 1 -q pubnet /home/gmatthew/Work/shapeanalysis/shape_script_LM1_1_k10_M10_scaled.sh
# cd /home/gmatthews1/Work/shapeanalysis
# nohup R CMD BATCH --vanilla /home/gmatthews1/Work/shapeanalysis/R/simulation_script_for_server_LM2_side2_20190610_k5_M5scaled.R /home/gmatthews1/Work/shapeanalysis/R/simulation_script_for_server_LM2_side2_20190610_k5_M5scaled.Rout &
# 4825: UM1
# 4980: LM3
# 4983: LM3
# 4990: LM3
# 5139:LM3
# 9973: LM3
# 5514: maxillary molar.. probably UM2 but could me UM1.
# This is what I asked you about the other day if you could tell the tooth type. It is just a single lobe and in 2009 I did not look hard enough. I could likely tell you now but I did not look at other features at the time
classifier_tribe <- list()
classifier_species <- list()
classifier_imputations <- list()
#These tooth type classifications are from Juliet
tooth_type_list <- list()
tooth_type_list[["4825"]] <- "UM1"
tooth_type_list[["4980"]] <- "LM3"
tooth_type_list[["4983"]] <- "LM3"
tooth_type_list[["4990"]] <- "LM3"
tooth_type_list[["5139"]] <- "LM3"
tooth_type_list[["9973"]] <- "LM3"
tooth_type_list[["5514"]] <- "UM2"
start_all <- Sys.time()
library(fdasrvf)
library(parallel)
#setwd("/home/gmatthews1/shapeAnalysis")
source("./R/utility.R")
source("./R/curve_functions.R")
source("./R/calc_shape_dist_partial.R")
source("./R/calc_shape_dist_complete.R")
source("./R/complete_partial_shape.R")
source("./R/impute_partial_shape.R")
source("./R/tooth_cutter.R")
#Loading in the full teeth
ref_file <- read.csv("./data/reference_db.csv")
load("./data/data_set_of_full_teeth.RData")
load("./data/ptsTrainList.RData")
#save(ptsTrainList, file = "/Users/gregorymatthews/Dropbox/shapeanalysisgit/data/ptsTrainList.RData")
#partial_shape2 <- t(tooth_cutter(ptsTrainList[[tooth]][[d]])[[side]])
#Note: Traveling from start to stop should always be in a clowckwise direction!
#Which tooth it is
for (ggg in 1:length(tooth_type_list)){print(ggg)
tooth <- tooth_type_list[[names(tooth_type_list)[ggg]]]
#Load the actual partial tooth.
partial_shape <- read.csv(paste0("/Users/gregorymatthews/Dropbox/shapeanalysisgit/partial_teeth/bw_images_data/DSCN",names(tooth_type_list)[ggg],"bw.csv"), header = FALSE)
partial_shape <- as.matrix(partial_shape)
start_stop <- read.csv(paste0("/Users/gregorymatthews/Dropbox/shapeanalysisgit/partial_teeth/bw_images_data/DSCN",names(tooth_type_list)[ggg],"bwstart_stop.csv"), header = FALSE)
#points(start_stop[1,1],start_stop[1,2],pch = 16, col = "green")
#points(start_stop[2,1],start_stop[2,2],pch = 16, col = "red")
start_stop <- as.matrix(start_stop)
#Ok now cut off the part i don't need.
start <- start_stop[1,]
stop <- start_stop[2,]
#Measure distance between start and all points
d_start <- (partial_shape[,1] - start[1])^2 + (partial_shape[,2] - start[2])^2
d_end <- (partial_shape[,1] - stop[1])^2 + (partial_shape[,2] - stop[2])^2
if(which.min(d_start) < which.min(d_end)){
partial_shape <- partial_shape[which.min(d_start):which.min(d_end),]
} else {
partial_shape <- partial_shape[c(which.min(d_start):nrow(partial_shape),1:which.min(d_end)),]
}
#check partial shape
#plot((partial_shape))
#points(start_stop[1,1],start_stop[1,2], col = "green", pch = 16)
#points(start_stop[2,1],start_stop[2,2], col = "red", pch = 16)
#Now store it wide rather than long.
partial_shape <- t(partial_shape)
#Now resample it to 250 points
partial_shape <- resamplecurve(partial_shape, 40, mode = "O")
#Remember N_partial must be less than or equal to N_complete
#partial_shape <- t(ptsTrainList[[1]][[1]][11:42,])
complete_shape_list <- lapply(ptsTrainList[[tooth]], t)
#Resampling so that each complete shape has N points
#complete_shape_list <- lapply(complete_shape_list, resamplecurve, N = 250, mode = "C")
##complete_shape_list <- list(complete_shape_list[[1]],complete_shape_list[[2]],complete_shape_list[[3]],complete_shape_list[[4]],complete_shape_list[[5]],complete_shape_list[[6]],complete_shape_list[[7]],complete_shape_list[[8]],complete_shape_list[[9]],complete_shape_list[[10]])
#names(complete_shape_list) <- names(ptsTrainList[[tooth]])[1:10]
#I can't impute the partial shape with itself!
#complete_shape_list[[d]]<-NULL
# M <- 5
# k <- 5
# scale <- TRUE
for (M in c(20)){print(paste0("M = ",M))
for (k in c(20)){ print(paste0("k = ",M))
for (scale in c(TRUE,FALSE)){ print(paste0("scale = ",scale))
library(parallel)
start1 <- Sys.time()
imputed_partial_shape <- impute_partial_shape(complete_shape_list,partial_shape, M = M, k = k, scale = scale)
end1 <- Sys.time()
end1-start1 #1.4 minutes with 4 cores on server. Using detectCores()-1 it takes
plot(t(imputed_partial_shape$imputed[[4]]), col = "red")
points(t(imputed_partial_shape$partial_obs), col = "blue")
# colMeans(ptsTrainList[[tooth]][[d]])
#
# plot(t(imputed_partial_shape$imputed[[1]]), xlim = c(-120, 500), ylim = c(-170,210))
# points(t(imputed_partial_shape$imputed[[2]]))
# points(t(imputed_partial_shape$imputed[[3]]))
# points(t(imputed_partial_shape$imputed[[4]]))
# points(t(imputed_partial_shape$imputed[[5]]))
# points(t(imputed_partial_shape$imputed[[5]]),col = "red")
# points(t(imputed_partial_shape$imputed[[4]]), col = "blue")
# points(t(beta1+c(150,0)),col = "gold", type = "l", lwd = 3)
#
# beta1 <- t(ptsTrainList[[tooth]][[d]])
# T1 = ncol(beta1)
# centroid1 = calculatecentroid(beta1)
# dim(centroid1) = c(length(centroid1),1)
# beta1 = beta1 - repmat(centroid1, 1, T1)
#Now do classification on the completed shapes just using closest
ref_file <- read.csv("./data/reference_db.csv")
#DSCN_target <- names(ptsTrainList[[tooth]])[[d]]
# truth <- subset(ref_file,Image.Name == DSCN_target)
# ref_file[ref_file$tooth == "LM1",]
# whole <- complete_shape_list[["DSCN2879"]]
# part <- imputed_partial_shape$imputed[[1]]
dist_imputed_to_whole <- function(whole,part, scale = scale){
whole <- resamplecurve(whole,N = dim(part)[2], mode = "C")
print(Sys.time())
out <- calc_shape_dist_complete(whole,part, scale)
return(out)
}
#out <- mclapply(complete_shape_list, dist_imputed_to_whole, part = imputed_partial_shape[[1]][[1]]) #3.183962 minutes with lapply. #2.110835 with mclapply #With 4 cores:1.751686 minutes
#doesitwork <- list(complete_shape_list[[1]],complete_shape_list[[2]])
#greg <- lapply(doesitwork, dist_imputed_to_whole, part = imputed_partial_shape[[1]][[m]])
dist_imputed_to_whole2 <- function(part){
#out <- lapply(complete_shape_list, dist_imputed_to_whole, part = part) #takes about 3 minutes. 2.11 minutes with mclapply
out <- mclapply(complete_shape_list, dist_imputed_to_whole, part = part, scale = scale, mc.cores = 12) #takes about 3 minutes. 2.11 minutes with mclapply
return(out)
}
start <- Sys.time()
dist_list <- lapply(imputed_partial_shape$imputed,dist_imputed_to_whole2)
end <- Sys.time()
end-start
print(Sys.time())
dist <- t(do.call(rbind,lapply(dist_list,unlist)))
row.names(dist) <- names(complete_shape_list)
dist <- as.data.frame(dist)
dist$DSCN <- row.names(dist)
################################################################################################################################
# whole <- resamplecurve(whole,N = dim(part)[2], mode = "C")
# out <- calc_shape_dist(whole,part,mode="C")
#
#
#
# calc_shape_dist(whole,imputed_partial_shape$imputed[[3]], mode = "C")
#
# part <- imputed_partial_shape$imputed[[1]]
# whole <- resamplecurve(t(ptsTrainList[[1]][["DSCN5630"]]),N = dim(imputed_partial_shape$imputed[[1]])[2], mode = "C")
# whole <- resamplecurve(t(ptsTrainList[[1]][["DSCN2879"]]),N = dim(imputed_partial_shape$imputed[[1]])[2], mode = "C")
# calc_shape_dist(whole,part,mode="C")
#
# plot(t(whole))
# points(t(part+c(150,-100)))
################################################################################################################################
dist <- merge(dist, ref_file, by.x = "DSCN", by.y = "Image.Name", all.x = TRUE)
#Smallest to largest
#knn <- 5
# table(as.character(dist$tribe[order(dist$V1)][1:knn]))
# table(as.character(dist$tribe[order(dist$V2)][1:knn]))
# table(as.character(dist$tribe[order(dist$V3)][1:knn]))
# table(as.character(dist$tribe[order(dist$V4)][1:knn]))
# table(as.character(dist$tribe[order(dist$V5)][1:knn]))
#Classify based on closest match between partial and all full teeth.
#fret <- mclapply(complete_shape_list,calc_shape_dist_partial,partial_shape = partial_shape)
#dist_partial <- data.frame(DSCN = names(unlist(fret)), dist = unlist(fret))
dist_partial <- data.frame(DSCN = names(unlist(imputed_partial_shape$dist_vec)), dist = unlist(imputed_partial_shape$dist_vec))
dist_partial <- merge(dist_partial,ref_file,by.x = "DSCN",by.y = "Image.Name", all.x = TRUE)
results <- list(dist = dist , dist_partial = dist_partial, imputed_partial_shape = imputed_partial_shape)
end_all <- Sys.time()
end_all-start_all
knn_partial_matching <- function(knn){
temp <- results$dist_partial
temp$inv_dist <- 1/temp$dist
temp$Tribe <- factor(temp$Tribe, levels = unique(sort(temp$Tribe)))
#Compute probabilities
#library(dplyr)
#probs <- arrange(temp,dist) %>% top_n(knn) %>% group_by(Tribe) %>% summarise(tot = sum(inv_dist))
#probs$prob <- probs$tot / sum(probs$tot)
dat <- data.frame(t(data.frame(c(table(temp$Tribe[order(temp$dist)][1:knn])/knn))))
#Weighted KNN
#wts <- c(table(temp$Tribe[order(temp$dist)][1:knn])/table(temp$Tribe))
#dat <- data.frame(t(data.frame((wts/sum(wts)))))
row.names(dat) <- NULL
return(dat)
}
#plot(ptsTrainList[[1]][["DSCN2879"]])
#plot(t(results_list[[DSCN]]$imputed_partial_shape$imputed[[1]]))
#DSCN <- "DSCN2871"
#temp <- results_list[[DSCN]]$dist
#temp[order(temp[[paste0("V",i)]]),]
#full <- resamplecurve(t(ptsTrainList[[1]][["DSCN3753"]]),199)
#calc_shape_dist(full,(results_list[[DSCN]]$imputed_partial_shape$imputed[[4]]))
#1199.961
#Now for the imputed teeth
knn_imputed <- function(knn){
temp <- results$dist
temp$Tribe <- factor(temp$Tribe, levels = unique(sort(temp$Tribe)))
dat_list <- list()
for (i in 1:M){
pro <- data.frame(t(data.frame(c(table(temp$Tribe[order(temp[[paste0("V",i)]])][1:knn])/knn))))
#Weighted KNN
#wts <- c(table(temp$Tribe[order(temp[[paste0("V",i)]])][1:knn])/table(temp$Tribe))
#pro <- data.frame(t(data.frame((wts/sum(wts)))))
row.names(pro) <- NULL
dat_list[[i]] <- pro
}
df <- do.call(rbind,dat_list)
dat <- data.frame(t(data.frame(unlist(apply(df,2,mean)))))
row.names(dat) <- NULL
return(dat)
}
#Now classify Species
knn_partial_matching_species <- function(knn){
temp <- results$dist_partial
temp$Species <- factor(temp$Species, levels = unique(sort(temp$Species)))
dat <- data.frame(t(data.frame(c(table(temp$Species[order(temp$dist)][1:knn])/knn))))
#Weighted KNN
#wts <- c(table(temp$Tribe[order(temp$dist)][1:knn])/table(temp$Tribe))
#dat <- data.frame(t(data.frame((wts/sum(wts)))))
row.names(dat) <- NULL
# dat$true <- results_list[[DSCN]]$truth$Species[1]
# dat$DSCN <- DSCN
return(dat)
}
#plot(ptsTrainList[[1]][["DSCN2879"]])
#plot(t(results_list[[DSCN]]$imputed_partial_shape$imputed[[1]]))
#DSCN <- "DSCN2871"
#temp <- results_list[[DSCN]]$dist
#temp[order(temp[[paste0("V",i)]]),]
#full <- resamplecurve(t(ptsTrainList[[1]][["DSCN3753"]]),199)
#calc_shape_dist(full,(results_list[[DSCN]]$imputed_partial_shape$imputed[[4]]))
#1199.961
#Now for the imputed teeth
knn_imputed_species <- function(knn){
temp <- results$dist
temp$Species <- factor(temp$Species, levels = unique(sort(temp$Species)))
dat_list <- list()
for (i in 1:M){
pro <- data.frame(t(data.frame(c(table(temp$Species[order(temp[[paste0("V",i)]])][1:knn])/knn))))
#Weighted KNN
#wts <- c(table(temp$Tribe[order(temp[[paste0("V",i)]])][1:knn])/table(temp$Tribe))
#pro <- data.frame(t(data.frame((wts/sum(wts)))))
row.names(pro) <- NULL
dat_list[[i]] <- pro
}
df <- do.call(rbind,dat_list)
dat <- data.frame(t(data.frame(unlist(apply(df,2,mean)))))
row.names(dat) <- NULL
# dat$true <- results_list[[DSCN]]$truth$Species[1]
# dat$DSCN <- DSCN
return(dat)
}
nam <- paste0(names(tooth_type_list)[ggg],"_M_",M,"_k_",k,"_scale_",scale)
classifier_tribe[[nam]] <- list()
classifier_species[[nam]] <- list()
#10 rows are because I'm uasing different choices of knn.
classifier_tribe[[nam]]$partial_matching <- matrix(NA, nrow = 10, ncol = 7)
classifier_tribe[[nam]]$imputed <- matrix(NA, nrow = 10, ncol = 7)
classifier_species[[nam]]$partial_matching <- matrix(NA, nrow = 10, ncol = 20)
classifier_species[[nam]]$imputed <- matrix(NA, nrow = 10, ncol = 20)
for (i_knn in 1:10){
#Tribe classification
classifier_tribe[[nam]]$partial_matching[i_knn,] <- unlist(knn_partial_matching(i_knn))
classifier_tribe[[nam]]$imputed[i_knn,] <- unlist(knn_imputed(i_knn))
#Species classification
classifier_species[[nam]]$partial_matching[i_knn,] <- unlist(knn_partial_matching_species(i_knn))
classifier_species[[nam]]$imputed[i_knn,] <- unlist(knn_imputed_species(i_knn))
}
classifier_tribe[[nam]]$partial_matching <- as.data.frame(classifier_tribe[[nam]]$partial_matching)
classifier_tribe[[nam]]$imputed <- as.data.frame(classifier_tribe[[nam]]$imputed)
names(classifier_tribe[[nam]]$partial_matching) <- names(classifier_tribe[[nam]]$imputed) <- c("Alcelaphini","Antilopini","Bovini","Hippotragini","Neotragini","Reduncini","Tragelaphini")
classifier_species[[nam]]$partial_matching <- as.data.frame(classifier_species[[nam]]$partial_matching)
classifier_species[[nam]]$imputed <- as.data.frame(classifier_species[[nam]]$imputed)
names(classifier_species[[nam]]$partial_matching) <- names(classifier_species[[nam]]$imputed) <- names(unlist(knn_imputed_species(i_knn)))
#Store the actual imputations
classifier_imputations[[nam]] <- results
print(classifier_tribe)
print(classifier_species)
save(classifier_tribe, file = "/Users/gregorymatthews/Dropbox/shapeanalysisgit/results/classifier_tribe.RData")
save(classifier_species, file = "/Users/gregorymatthews/Dropbox/shapeanalysisgit/results/classifier_species.RData")
save(classifier_imputations, file = "/Users/gregorymatthews/Dropbox/shapeanalysisgit/results/classifier_imputations.RData")
}}}
}
#Results tables.
tab <- classifier_tribe[[1]]$partial_matching[5,]
for (i in 2:length(classifier_tribe)){
tab <- rbind(tab,classifier_tribe[[i]]$partial_matching[5,])
}
library(xtable)
xtable(tab, caption = "here")
#Results tables.
tab_imp <- classifier_tribe[[1]]$imputed[5,]
for (i in 2:length(classifier_tribe)){
tab_imp <- rbind(tab_imp,classifier_tribe[[i]]$imputed[5,])
}
row.names(tab_imp) <- paste0("IMG",names(tooth_type_list))
xtable(tab_imp, caption = "here")
#Species classifier
#Results tables.
tab_species <- classifier_species[[1]]$partial_matching[5,]
for (i in 2:length(classifier_species)){
tab_species <- rbind(tab_species,classifier_species[[i]]$partial_matching[5,])
}
row.names(tab_species) <- names(tooth_type_list)
keep <- apply(tab_species,2,function(x){sum(x)>0})
tab_species[,keep]
library(xtable)
xtable(tab_species, caption = "here")
#Results tables.
tab_species_imp <- classifier_species[[1]]$imputed[5,]
for (i in 2:length(classifier_species)){
tab_species_imp <- rbind(tab_species_imp,classifier_species[[i]]$imputed[5,])
}
row.names(tab_species_imp) <- paste0("IMG",names(tooth_type_list))
keep <- apply(tab_species_imp,2,function(x){sum(x)>0})
tab_species_imp[,keep]
xtable(tab_species_imp, caption = "here")
#Plots of completed shapes
png("/Users/gregorymatthews/Dropbox/shapeanalysisgit/IMG4825_imputed.png", h = 5, w = 8, res = 300, units = "in")
plot(t(classifier_imputations[[1]]$imputed_partial_shape$imputed[[1]]), col = "white", type = "l", xlim = c(-450,250), ylim = c(-250, 250), xlab = "", ylab = "", main = "IMG4825 - LM1")
for (i in 1:length(classifier_imputations[[1]]$imputed_partial_shape$imputed)){
points(t(classifier_imputations[[1]]$imputed_partial_shape$imputed[[i]]), col = "red", type = "l")
}
points(t(classifier_imputations[[1]]$imputed_partial_shape$partial_obs), col = "black", type = "l")
dev.off()
#IMG2
plot(t(classifier_imputations[[2]]$imputed_partial_shape$imputed[[1]]), col = "white", type = "l", xlim = c(-450,660), ylim = c(-250, 250), xlab = "", ylab = "", main = paste0("IMG",names(tooth_type_list)[2]," - LM3"))
for (i in 1:length(classifier_imputations[[2]]$imputed_partial_shape$imputed)){
points(t(classifier_imputations[[2]]$imputed_partial_shape$imputed[[i]]), col = "red", type = "l")
}
points(t(classifier_imputations[[2]]$imputed_partial_shape$partial_obs), col = "black", type = "l")
#IMG3
plot(t(classifier_imputations[[3]]$imputed_partial_shape$imputed[[1]]), col = "white", type = "l", xlim = c(-450,500), ylim = c(-250, 250), xlab = "", ylab = "", main = paste0("IMG",names(tooth_type_list)[3]," - LM3"))
for (i in 1:length(classifier_imputations[[3]]$imputed_partial_shape$imputed)){
points(t(classifier_imputations[[3]]$imputed_partial_shape$imputed[[i]]), col = "red", type = "l")
}
points(t(classifier_imputations[[3]]$imputed_partial_shape$partial_obs), col = "black", type = "l")
#IMG4
plot(t(classifier_imputations[[4]]$imputed_partial_shape$imputed[[1]]), col = "white", type = "l", xlim = c(-1050,500), ylim = c(-250, 450), xlab = "", ylab = "", main = paste0("IMG",names(tooth_type_list)[4]," - LM3"))
for (i in 1:length(classifier_imputations[[4]]$imputed_partial_shape$imputed)){
points(t(classifier_imputations[[4]]$imputed_partial_shape$imputed[[i]]), col = "red", type = "l")
}
points(t(classifier_imputations[[4]]$imputed_partial_shape$partial_obs), col = "black", type = "l")
#IMG5
plot(t(classifier_imputations[[5]]$imputed_partial_shape$imputed[[1]]), col = "white", type = "l", xlim = c(-250,600), ylim = c(-250, 250), xlab = "", ylab = "", main = paste0("IMG",names(tooth_type_list)[5]," - LM3"))
for (i in 1:length(classifier_imputations[[5]]$imputed_partial_shape$imputed)){
points(t(classifier_imputations[[5]]$imputed_partial_shape$imputed[[i]]), col = "red", type = "l")
}
points(t(classifier_imputations[[5]]$imputed_partial_shape$partial_obs), col = "black", type = "l")
|
#Pipeline Part I: This part takes in the raw data and outputs the quality profile. The user should
#use this information to decide on filtering parameters. Filtering is the first step of Part II.
library(optparse)
library(dada2)
####This is the flag function####
option_list = list(
make_option(c("-f", "--file"), type="character", default=NULL,
help="Path to working directory folder", metavar="character")
);
opt_parser = OptionParser(option_list=option_list);
opt = parse_args(opt_parser);
if (is.null(opt$file)){
print_help(opt_parser)
stop("At least one argument must be supplied (input file).\n", call.=FALSE)
}
now <- Sys.time()
now
#used to determine run time^
####Getting Set Up####
args = commandArgs(trailingOnly = TRUE)
path <- args[1]
#the user should put their working directory in the command to run the script
#path = "/homes/sgreenwood1/crossteam"
setwd(path)
print("Here is what we're working with: ")
list.files(path)
#this lets us see the contents of the working directory to make sure we're looking at the right
#files and they're all reading in
dir.create("Output")
print("An Output folder has been created in the working directory.")
# Forward and reverse fastq filenames have format: SAMPLENAME_R1_001.fastq and SAMPLENAME_R2_001.fastq
fnFs = sort(list.files(path, pattern= "R1_001.fastq", full.names = TRUE))
fnRs = sort(list.files(path, pattern= "R2_001.fastq", full.names = TRUE))
sample.names = sapply(strsplit(basename(fnFs), "_"), `[`, 1)
#now we have all our forward and reverse files grouped
###Next we take a look at quality###
print("Assessing read quality.")
png(filename = "Output/fqual.png")
plotQualityProfile(fnFs) #some trash reads but most are alright
#plots the quality profiles for all forward reads and outputs to Output
dev.off()
print("Forward read quality assessed.")
png(filename = "Output/rqual.png")
plotQualityProfile(fnRs)
#plots the quality profiles for all reverse reads. Outputs to Output
dev.off()
print("Reverse read quality assessed.")
print("The quality profiles for the forward and reverse reads have been saved in Output. Use this quality information to choose parameters for filtering and trimming.")
now = Sys.time()
now
#lets us know the run time for part I
|
/Pipeline/part1.R
|
no_license
|
cprintzis/hot_METS
|
R
| false
| false
| 2,260
|
r
|
#Pipeline Part I: This part takes in the raw data and outputs the quality profile. The user should
#use this information to decide on filtering parameters. Filtering is the first step of Part II.
library(optparse)
library(dada2)
####This is the flag function####
option_list = list(
make_option(c("-f", "--file"), type="character", default=NULL,
help="Path to working directory folder", metavar="character")
);
opt_parser = OptionParser(option_list=option_list);
opt = parse_args(opt_parser);
if (is.null(opt$file)){
print_help(opt_parser)
stop("At least one argument must be supplied (input file).\n", call.=FALSE)
}
now <- Sys.time()
now
#used to determine run time^
####Getting Set Up####
args = commandArgs(trailingOnly = TRUE)
path <- args[1]
#the user should put their working directory in the command to run the script
#path = "/homes/sgreenwood1/crossteam"
setwd(path)
print("Here is what we're working with: ")
list.files(path)
#this lets us see the contents of the working directory to make sure we're looking at the right
#files and they're all reading in
dir.create("Output")
print("An Output folder has been created in the working directory.")
# Forward and reverse fastq filenames have format: SAMPLENAME_R1_001.fastq and SAMPLENAME_R2_001.fastq
fnFs = sort(list.files(path, pattern= "R1_001.fastq", full.names = TRUE))
fnRs = sort(list.files(path, pattern= "R2_001.fastq", full.names = TRUE))
sample.names = sapply(strsplit(basename(fnFs), "_"), `[`, 1)
#now we have all our forward and reverse files grouped
###Next we take a look at quality###
print("Assessing read quality.")
png(filename = "Output/fqual.png")
plotQualityProfile(fnFs) #some trash reads but most are alright
#plots the quality profiles for all forward reads and outputs to Output
dev.off()
print("Forward read quality assessed.")
png(filename = "Output/rqual.png")
plotQualityProfile(fnRs)
#plots the quality profiles for all reverse reads. Outputs to Output
dev.off()
print("Reverse read quality assessed.")
print("The quality profiles for the forward and reverse reads have been saved in Output. Use this quality information to choose parameters for filtering and trimming.")
now = Sys.time()
now
#lets us know the run time for part I
|
#' Helper for creating small images for each rule
library(dynbenchmark)
library(esfiji)
experiment("02-metrics/02-metric_conformity")
svg_location <- raw_file("perturbations.svg")
folder <- result_file("images")
dir.create(folder)
svg_groups_split(svg_location, folder = folder)
|
/scripts/02-metrics/02-metric_conformity/helper-create_perturbation_images.R
|
permissive
|
dynverse/dynbenchmark
|
R
| false
| false
| 283
|
r
|
#' Helper for creating small images for each rule
library(dynbenchmark)
library(esfiji)
experiment("02-metrics/02-metric_conformity")
svg_location <- raw_file("perturbations.svg")
folder <- result_file("images")
dir.create(folder)
svg_groups_split(svg_location, folder = folder)
|
compute_single_loading = function(X, c, k, V_init, U_result, orth, tolerance, max_iter) {
v_old = V_init[, k, drop = TRUE]
diff = tolerance * 10
iter = 1
while((iter < max_iter) & (diff > tolerance)) {
if (orth & (k > 1)) {
u_new = update_orthogonal_u(X, v_old, U_result[, 1:(k - 1), drop = FALSE])
} else {
u_new = update_u(X, v_old)
}
v_new = update_v(X, u_new, c)
d = crossprod(u_new, X) %*% v_new
diff = max(abs(v_new - v_old))
v_old = v_new
iter = iter + 1
}
return(list(u = u_new, d = d, v = v_new))
}
|
/R/compute_single_loading.R
|
no_license
|
keshav-motwani/sparsePCA
|
R
| false
| false
| 572
|
r
|
compute_single_loading = function(X, c, k, V_init, U_result, orth, tolerance, max_iter) {
v_old = V_init[, k, drop = TRUE]
diff = tolerance * 10
iter = 1
while((iter < max_iter) & (diff > tolerance)) {
if (orth & (k > 1)) {
u_new = update_orthogonal_u(X, v_old, U_result[, 1:(k - 1), drop = FALSE])
} else {
u_new = update_u(X, v_old)
}
v_new = update_v(X, u_new, c)
d = crossprod(u_new, X) %*% v_new
diff = max(abs(v_new - v_old))
v_old = v_new
iter = iter + 1
}
return(list(u = u_new, d = d, v = v_new))
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/testStatistics.R
\name{A4TempStatistic}
\alias{A4TempStatistic}
\title{generate the randomization statistic based on A4}
\usage{
A4TempStatistic(theData, S)
}
\arguments{
\item{theData}{the data}
}
\value{
ranStat a function of random group transformation
}
\description{
generate the randomization statistic based on A4
}
|
/man/A4TempStatistic.Rd
|
no_license
|
kfeng123/randomizationTest
|
R
| false
| true
| 401
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/testStatistics.R
\name{A4TempStatistic}
\alias{A4TempStatistic}
\title{generate the randomization statistic based on A4}
\usage{
A4TempStatistic(theData, S)
}
\arguments{
\item{theData}{the data}
}
\value{
ranStat a function of random group transformation
}
\description{
generate the randomization statistic based on A4
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/api_tags_queries.R
\name{query_tag_samples}
\alias{query_tag_samples}
\title{Tag Samples}
\usage{
query_tag_samples(
datasets,
parent_tags,
tags = NA,
features = NA,
feature_classes = NA,
samples = NA,
...
)
}
\arguments{
\item{datasets}{A vector of strings}
\item{parent_tags}{A vector of strings}
\item{tags}{A vector of strings}
\item{features}{A vector of strings}
\item{feature_classes}{A vector of strings}
\item{samples}{A vector of strings}
\item{...}{Arguments to create_result_from_api_query}
}
\description{
Tag Samples
}
|
/man/query_tag_samples.Rd
|
permissive
|
jaybee84/iatlas.api.client
|
R
| false
| true
| 631
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/api_tags_queries.R
\name{query_tag_samples}
\alias{query_tag_samples}
\title{Tag Samples}
\usage{
query_tag_samples(
datasets,
parent_tags,
tags = NA,
features = NA,
feature_classes = NA,
samples = NA,
...
)
}
\arguments{
\item{datasets}{A vector of strings}
\item{parent_tags}{A vector of strings}
\item{tags}{A vector of strings}
\item{features}{A vector of strings}
\item{feature_classes}{A vector of strings}
\item{samples}{A vector of strings}
\item{...}{Arguments to create_result_from_api_query}
}
\description{
Tag Samples
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/big.municipios.R
\docType{data}
\name{big.municipios}
\alias{big.municipios}
\title{Big Municipios not part of a Metro Area}
\format{
A data frame with 66 observations on the following 4 variables.
}
\usage{
big.municipios
}
\description{
This dataset contains all municipios which were not part of a metro area in 2010 but had
a larger population than the smallest metro area (> 110,000)
\url{http://www.conapo.gob.mx/es/CONAPO/Delimitacion_de_Zonas_Metropolitanas}
}
\section{Variables}{
\itemize{
\item{\code{state_code}}{a numeric vector}
\item{\code{mun_code}}{a numeric vector}
\item{\code{population}}{a numeric vector}
\item{\code{name}}{a character vector}
}
}
\examples{
head(big.municipios)
}
|
/man/big.municipios.Rd
|
permissive
|
diegovalle/mxmortalitydb
|
R
| false
| true
| 785
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/big.municipios.R
\docType{data}
\name{big.municipios}
\alias{big.municipios}
\title{Big Municipios not part of a Metro Area}
\format{
A data frame with 66 observations on the following 4 variables.
}
\usage{
big.municipios
}
\description{
This dataset contains all municipios which were not part of a metro area in 2010 but had
a larger population than the smallest metro area (> 110,000)
\url{http://www.conapo.gob.mx/es/CONAPO/Delimitacion_de_Zonas_Metropolitanas}
}
\section{Variables}{
\itemize{
\item{\code{state_code}}{a numeric vector}
\item{\code{mun_code}}{a numeric vector}
\item{\code{population}}{a numeric vector}
\item{\code{name}}{a character vector}
}
}
\examples{
head(big.municipios)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/writeDABOM.R
\name{writeDABOM}
\alias{writeDABOM}
\title{Write DABOM JAGS model}
\usage{
writeDABOM(
file_name = NULL,
parent_child = NULL,
configuration = NULL,
time_varying = FALSE
)
}
\arguments{
\item{file_name}{name (with file path) to save the model as}
\item{parent_child}{data frame with at least `parent` and `child` columns.
Can be created with `buildParentChild()` function in the `PITcleanr` package.}
\item{configuration}{is a data frame which assigns node names to unique SiteID, AntennaID, and
site configuration ID combinations. One example can be built with the function `buildConfig`}
\item{time_varying}{Should the initial movement probabilities be time-varying? Default value is `FALSE`}
}
\description{
This writes the overall JAGS model for a generic DABOM as a text file. It can then be modified depending on the observations for a particular valid tag list.
}
\examples{
writeDABOM()
}
\author{
Kevin See
}
|
/man/writeDABOM.Rd
|
permissive
|
KevinSee/DABOM
|
R
| false
| true
| 1,020
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/writeDABOM.R
\name{writeDABOM}
\alias{writeDABOM}
\title{Write DABOM JAGS model}
\usage{
writeDABOM(
file_name = NULL,
parent_child = NULL,
configuration = NULL,
time_varying = FALSE
)
}
\arguments{
\item{file_name}{name (with file path) to save the model as}
\item{parent_child}{data frame with at least `parent` and `child` columns.
Can be created with `buildParentChild()` function in the `PITcleanr` package.}
\item{configuration}{is a data frame which assigns node names to unique SiteID, AntennaID, and
site configuration ID combinations. One example can be built with the function `buildConfig`}
\item{time_varying}{Should the initial movement probabilities be time-varying? Default value is `FALSE`}
}
\description{
This writes the overall JAGS model for a generic DABOM as a text file. It can then be modified depending on the observations for a particular valid tag list.
}
\examples{
writeDABOM()
}
\author{
Kevin See
}
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/ReliefF/autonomic_ganglia.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.5,family="gaussian",standardize=FALSE)
sink('./autonomic_ganglia_058.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
/Model/EN/ReliefF/autonomic_ganglia/autonomic_ganglia_058.R
|
no_license
|
esbgkannan/QSMART
|
R
| false
| false
| 368
|
r
|
library(glmnet)
mydata = read.table("../../../../TrainingSet/FullSet/ReliefF/autonomic_ganglia.csv",head=T,sep=",")
x = as.matrix(mydata[,4:ncol(mydata)])
y = as.matrix(mydata[,1])
set.seed(123)
glm = cv.glmnet(x,y,nfolds=10,type.measure="mse",alpha=0.5,family="gaussian",standardize=FALSE)
sink('./autonomic_ganglia_058.txt',append=TRUE)
print(glm$glmnet.fit)
sink()
|
## Chandler Lutz
## Questions/comments: cl.eco@cbs.dk
## $Revisions: 1.0.0 $Date: 2019-08-06
##Clear the workspace
##Delete all objects and detach packages
rm(list = ls())
R.files <- list.files("R", full.names = TRUE)
R.files <- R.files[grepl("\\.R$", x = R.files)]
f_run <- function(file) {
print(file)
source(file, chdir = TRUE)
return(invisible())
}
lapply(R.files, f_run)
|
/CFPL_Border/_RunAll_.R
|
no_license
|
Allisterh/Replication_CFPLCode
|
R
| false
| false
| 410
|
r
|
## Chandler Lutz
## Questions/comments: cl.eco@cbs.dk
## $Revisions: 1.0.0 $Date: 2019-08-06
##Clear the workspace
##Delete all objects and detach packages
rm(list = ls())
R.files <- list.files("R", full.names = TRUE)
R.files <- R.files[grepl("\\.R$", x = R.files)]
f_run <- function(file) {
print(file)
source(file, chdir = TRUE)
return(invisible())
}
lapply(R.files, f_run)
|
#Get data from particular source which is mentioned in project
data <- read.csv("G:/Data Scientist/Coursera/4. Exploratory Data analysis/project 1/household_power_consumption.txt", sep=";")
#convert date column into character format in order to transform into date formate
data$Date <- as.character(data$Date)
data$Date <- as.Date(data$Date,format = "%d/%m/%Y")
#select data which is required to be captured and processing
subdate <- subset(data, Date=="2007-02-01" | Date == "2007-02-02")
#convert Active power column into character format in order to transform into numeric format
subdate$Global_active_power <- as.character(subdate$Global_active_power)
subdate$Global_active_power <- as.numeric(subdate$Global_active_power)
#ploting result
hist(subdate$Global_active_power,main = "Global Active Power",xlab = "Global Active Power(Kilowatts)",col="red")
|
/plot1.r
|
no_license
|
Jineshpanchal/ExData_Plotting1
|
R
| false
| false
| 864
|
r
|
#Get data from particular source which is mentioned in project
data <- read.csv("G:/Data Scientist/Coursera/4. Exploratory Data analysis/project 1/household_power_consumption.txt", sep=";")
#convert date column into character format in order to transform into date formate
data$Date <- as.character(data$Date)
data$Date <- as.Date(data$Date,format = "%d/%m/%Y")
#select data which is required to be captured and processing
subdate <- subset(data, Date=="2007-02-01" | Date == "2007-02-02")
#convert Active power column into character format in order to transform into numeric format
subdate$Global_active_power <- as.character(subdate$Global_active_power)
subdate$Global_active_power <- as.numeric(subdate$Global_active_power)
#ploting result
hist(subdate$Global_active_power,main = "Global Active Power",xlab = "Global Active Power(Kilowatts)",col="red")
|
dir <- "/home/julesy/workspace/ttp-results/";
file <- "IEEE.csv";
csv <- read.csv(paste0(dir, file), sep = ",")
csv$result <- round(csv$result, 4)
csv$problem <- factor(csv$problem, levels(csv$problem)[unique(csv$problem)] )
df_min <- aggregate(csv$result, by=list(csv$problem), FUN=min)
colnames(df_min) <- c("problem","value")
df_max <- aggregate(csv$result, by=list(csv$problem), FUN=max)
colnames(df_max) <- c("problem","value")
csv$min <- apply(csv, 1, function(x) df_min[df_min$problem==x[1],]$value)
csv$max <- apply(csv, 1, function(x) df_max[df_max$problem==x[1],]$value)
csv$norm <- (csv$result - csv$min) / (csv$max - csv$min)
library(reshape)
pivot <- cast(csv, problem ~ algorithm, fun.aggregate=median, value="result")
pivot_norm <- cast(csv, problem ~ algorithm, fun.aggregate=median, value="norm")
write.csv(pivot, file = paste0(dir, substr(file, 1, nchar(file) - 4), "_pivot.csv"))
write.csv(pivot_norm, file = paste0(dir, substr(file, 1, nchar(file) - 4), "_pivot_norm.csv"))
library(ggplot2)
agg_norm <- aggregate(csv$norm, by=list(csv$problem, csv$algorithm), FUN=median)
colnames(agg_norm) <- c("problem","algorithm", "norm")
p <- ggplot(agg_norm, aes(x=problem, y=norm, shape=algorithm, color=algorithm)) + geom_point(size=3)
p <- p + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + scale_shape_manual(values=c(15,16,17,18,4,23,6,7,8))
p <- p + xlab("problem") + ylab("normalized values")
print(p)
|
/scripts/pivot_table.R
|
no_license
|
blankjul/ttp-java
|
R
| false
| false
| 1,449
|
r
|
dir <- "/home/julesy/workspace/ttp-results/";
file <- "IEEE.csv";
csv <- read.csv(paste0(dir, file), sep = ",")
csv$result <- round(csv$result, 4)
csv$problem <- factor(csv$problem, levels(csv$problem)[unique(csv$problem)] )
df_min <- aggregate(csv$result, by=list(csv$problem), FUN=min)
colnames(df_min) <- c("problem","value")
df_max <- aggregate(csv$result, by=list(csv$problem), FUN=max)
colnames(df_max) <- c("problem","value")
csv$min <- apply(csv, 1, function(x) df_min[df_min$problem==x[1],]$value)
csv$max <- apply(csv, 1, function(x) df_max[df_max$problem==x[1],]$value)
csv$norm <- (csv$result - csv$min) / (csv$max - csv$min)
library(reshape)
pivot <- cast(csv, problem ~ algorithm, fun.aggregate=median, value="result")
pivot_norm <- cast(csv, problem ~ algorithm, fun.aggregate=median, value="norm")
write.csv(pivot, file = paste0(dir, substr(file, 1, nchar(file) - 4), "_pivot.csv"))
write.csv(pivot_norm, file = paste0(dir, substr(file, 1, nchar(file) - 4), "_pivot_norm.csv"))
library(ggplot2)
agg_norm <- aggregate(csv$norm, by=list(csv$problem, csv$algorithm), FUN=median)
colnames(agg_norm) <- c("problem","algorithm", "norm")
p <- ggplot(agg_norm, aes(x=problem, y=norm, shape=algorithm, color=algorithm)) + geom_point(size=3)
p <- p + theme(axis.text.x = element_text(angle = 90, hjust = 1)) + scale_shape_manual(values=c(15,16,17,18,4,23,6,7,8))
p <- p + xlab("problem") + ylab("normalized values")
print(p)
|
test_that("geo_rect class works", {
rect <- geo_rect(xmin = 0, ymin = 0, xmax = 1, ymax = 1)
expect_output(print(rect), "geovctrs_rect")
expect_output(print(tibble(rect)), "rect")
expect_is(rect, "geovctrs_rect")
expect_true(is_geovctrs_rect(rect))
expect_true(vec_is(rect))
})
test_that("geo_rect c() works", {
rect <- geo_rect(xmin = 0:5, ymin = 0:5, xmax = 1:6, ymax = 1:6)
expect_is(c(rect, geo_wkt("POINT (30 10)")), "geovctrs_wkt")
expect_is(c(rect, as_geo_wkb(geo_wkt("POINT (30 10)"))), "geovctrs_wkb")
expect_is(c(rect, rect), "geovctrs_rect")
expect_error(vec_c(5, rect), class = "vctrs_error_incompatible_type")
})
test_that("geo_rect casting works", {
rect <- geo_rect(xmin = 0:5, ymin = 0:5, xmax = 1:6, ymax = 1:6)
expect_equal(
as.data.frame(rect),
data.frame(xmin = 0:5, ymin = 0:5, xmax = 1:6, ymax = 1:6, srid = 0)
)
expect_equal(
tibble::as_tibble(rect),
tibble(xmin = 0:5, ymin = 0:5, xmax = 1:6, ymax = 1:6, srid = 0)
)
})
test_that("coersion to rect works", {
# self-cast
expect_identical(vec_cast(geo_rect(), geo_rect()), geo_rect())
expect_identical(as_geo_rect(geo_rect()), geo_rect())
# error cast
expect_error(vec_cast(394, geo_rect()), class = "vctrs_error_incompatible_cast")
})
|
/tests/testthat/test-geo-rect.R
|
no_license
|
mdsumner/geovctrs
|
R
| false
| false
| 1,275
|
r
|
test_that("geo_rect class works", {
rect <- geo_rect(xmin = 0, ymin = 0, xmax = 1, ymax = 1)
expect_output(print(rect), "geovctrs_rect")
expect_output(print(tibble(rect)), "rect")
expect_is(rect, "geovctrs_rect")
expect_true(is_geovctrs_rect(rect))
expect_true(vec_is(rect))
})
test_that("geo_rect c() works", {
rect <- geo_rect(xmin = 0:5, ymin = 0:5, xmax = 1:6, ymax = 1:6)
expect_is(c(rect, geo_wkt("POINT (30 10)")), "geovctrs_wkt")
expect_is(c(rect, as_geo_wkb(geo_wkt("POINT (30 10)"))), "geovctrs_wkb")
expect_is(c(rect, rect), "geovctrs_rect")
expect_error(vec_c(5, rect), class = "vctrs_error_incompatible_type")
})
test_that("geo_rect casting works", {
rect <- geo_rect(xmin = 0:5, ymin = 0:5, xmax = 1:6, ymax = 1:6)
expect_equal(
as.data.frame(rect),
data.frame(xmin = 0:5, ymin = 0:5, xmax = 1:6, ymax = 1:6, srid = 0)
)
expect_equal(
tibble::as_tibble(rect),
tibble(xmin = 0:5, ymin = 0:5, xmax = 1:6, ymax = 1:6, srid = 0)
)
})
test_that("coersion to rect works", {
# self-cast
expect_identical(vec_cast(geo_rect(), geo_rect()), geo_rect())
expect_identical(as_geo_rect(geo_rect()), geo_rect())
# error cast
expect_error(vec_cast(394, geo_rect()), class = "vctrs_error_incompatible_cast")
})
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.interface.ranger.r
\name{model.interface.ranger-class (ranger)}
\alias{model.interface.ranger-class (ranger)}
\alias{model.interface.ranger.class}
\title{(Internal) model.interface class for ranger}
\description{
This reference class contains methods for \code{\link[ranger]{ranger}} in
\emph{ranger} package.
}
\section{Super class}{
\code{\link[model.adapter:model.interface]{model.adapter::model.interface}} -> \code{model.interface.ranger}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-model.interface.ranger-predict}{\code{model.interface.ranger.class$predict()}}
\item \href{#method-model.interface.ranger-get.formula}{\code{model.interface.ranger.class$get.formula()}}
\item \href{#method-model.interface.ranger-clone}{\code{model.interface.ranger.class$clone()}}
}
}
\if{html}{\out{
<details><summary>Inherited methods</summary>
<ul>
<li><span class="pkg-link" data-pkg="model.adapter" data-topic="model.interface" data-id="adjust.offset"><a href='../../model.adapter/html/model.interface.html#method-model.interface-adjust.offset'><code>model.adapter::model.interface$adjust.offset()</code></a></span></li>
<li><span class="pkg-link" data-pkg="model.adapter" data-topic="model.interface" data-id="expand.formula"><a href='../../model.adapter/html/model.interface.html#method-model.interface-expand.formula'><code>model.adapter::model.interface$expand.formula()</code></a></span></li>
<li><span class="pkg-link" data-pkg="model.adapter" data-topic="model.interface" data-id="get.call"><a href='../../model.adapter/html/model.interface.html#method-model.interface-get.call'><code>model.adapter::model.interface$get.call()</code></a></span></li>
<li><span class="pkg-link" data-pkg="model.adapter" data-topic="model.interface" data-id="get.data"><a href='../../model.adapter/html/model.interface.html#method-model.interface-get.data'><code>model.adapter::model.interface$get.data()</code></a></span></li>
<li><span class="pkg-link" data-pkg="model.adapter" data-topic="model.interface" data-id="get.family"><a href='../../model.adapter/html/model.interface.html#method-model.interface-get.family'><code>model.adapter::model.interface$get.family()</code></a></span></li>
<li><span class="pkg-link" data-pkg="model.adapter" data-topic="model.interface" data-id="get.link"><a href='../../model.adapter/html/model.interface.html#method-model.interface-get.link'><code>model.adapter::model.interface$get.link()</code></a></span></li>
<li><span class="pkg-link" data-pkg="model.adapter" data-topic="model.interface" data-id="get.linkinv"><a href='../../model.adapter/html/model.interface.html#method-model.interface-get.linkinv'><code>model.adapter::model.interface$get.linkinv()</code></a></span></li>
<li><span class="pkg-link" data-pkg="model.adapter" data-topic="model.interface" data-id="get.model.type"><a href='../../model.adapter/html/model.interface.html#method-model.interface-get.model.type'><code>model.adapter::model.interface$get.model.type()</code></a></span></li>
<li><span class="pkg-link" data-pkg="model.adapter" data-topic="model.interface" data-id="get.offset.names"><a href='../../model.adapter/html/model.interface.html#method-model.interface-get.offset.names'><code>model.adapter::model.interface$get.offset.names()</code></a></span></li>
<li><span class="pkg-link" data-pkg="model.adapter" data-topic="model.interface" data-id="initialize"><a href='../../model.adapter/html/model.interface.html#method-model.interface-initialize'><code>model.adapter::model.interface$initialize()</code></a></span></li>
</ul>
</details>
}}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-model.interface.ranger-predict"></a>}}
\if{latex}{\out{\hypertarget{method-model.interface.ranger-predict}{}}}
\subsection{Method \code{predict()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{model.interface.ranger.class$predict(object, newdata = NULL, type, ...)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-model.interface.ranger-get.formula"></a>}}
\if{latex}{\out{\hypertarget{method-model.interface.ranger-get.formula}{}}}
\subsection{Method \code{get.formula()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{model.interface.ranger.class$get.formula(x, envir, package = "")}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-model.interface.ranger-clone"></a>}}
\if{latex}{\out{\hypertarget{method-model.interface.ranger-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{model.interface.ranger.class$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
/man/model.interface.ranger-class-open-paren-ranger-close-paren.Rd
|
permissive
|
Marchen/model.adapter
|
R
| false
| true
| 4,957
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.interface.ranger.r
\name{model.interface.ranger-class (ranger)}
\alias{model.interface.ranger-class (ranger)}
\alias{model.interface.ranger.class}
\title{(Internal) model.interface class for ranger}
\description{
This reference class contains methods for \code{\link[ranger]{ranger}} in
\emph{ranger} package.
}
\section{Super class}{
\code{\link[model.adapter:model.interface]{model.adapter::model.interface}} -> \code{model.interface.ranger}
}
\section{Methods}{
\subsection{Public methods}{
\itemize{
\item \href{#method-model.interface.ranger-predict}{\code{model.interface.ranger.class$predict()}}
\item \href{#method-model.interface.ranger-get.formula}{\code{model.interface.ranger.class$get.formula()}}
\item \href{#method-model.interface.ranger-clone}{\code{model.interface.ranger.class$clone()}}
}
}
\if{html}{\out{
<details><summary>Inherited methods</summary>
<ul>
<li><span class="pkg-link" data-pkg="model.adapter" data-topic="model.interface" data-id="adjust.offset"><a href='../../model.adapter/html/model.interface.html#method-model.interface-adjust.offset'><code>model.adapter::model.interface$adjust.offset()</code></a></span></li>
<li><span class="pkg-link" data-pkg="model.adapter" data-topic="model.interface" data-id="expand.formula"><a href='../../model.adapter/html/model.interface.html#method-model.interface-expand.formula'><code>model.adapter::model.interface$expand.formula()</code></a></span></li>
<li><span class="pkg-link" data-pkg="model.adapter" data-topic="model.interface" data-id="get.call"><a href='../../model.adapter/html/model.interface.html#method-model.interface-get.call'><code>model.adapter::model.interface$get.call()</code></a></span></li>
<li><span class="pkg-link" data-pkg="model.adapter" data-topic="model.interface" data-id="get.data"><a href='../../model.adapter/html/model.interface.html#method-model.interface-get.data'><code>model.adapter::model.interface$get.data()</code></a></span></li>
<li><span class="pkg-link" data-pkg="model.adapter" data-topic="model.interface" data-id="get.family"><a href='../../model.adapter/html/model.interface.html#method-model.interface-get.family'><code>model.adapter::model.interface$get.family()</code></a></span></li>
<li><span class="pkg-link" data-pkg="model.adapter" data-topic="model.interface" data-id="get.link"><a href='../../model.adapter/html/model.interface.html#method-model.interface-get.link'><code>model.adapter::model.interface$get.link()</code></a></span></li>
<li><span class="pkg-link" data-pkg="model.adapter" data-topic="model.interface" data-id="get.linkinv"><a href='../../model.adapter/html/model.interface.html#method-model.interface-get.linkinv'><code>model.adapter::model.interface$get.linkinv()</code></a></span></li>
<li><span class="pkg-link" data-pkg="model.adapter" data-topic="model.interface" data-id="get.model.type"><a href='../../model.adapter/html/model.interface.html#method-model.interface-get.model.type'><code>model.adapter::model.interface$get.model.type()</code></a></span></li>
<li><span class="pkg-link" data-pkg="model.adapter" data-topic="model.interface" data-id="get.offset.names"><a href='../../model.adapter/html/model.interface.html#method-model.interface-get.offset.names'><code>model.adapter::model.interface$get.offset.names()</code></a></span></li>
<li><span class="pkg-link" data-pkg="model.adapter" data-topic="model.interface" data-id="initialize"><a href='../../model.adapter/html/model.interface.html#method-model.interface-initialize'><code>model.adapter::model.interface$initialize()</code></a></span></li>
</ul>
</details>
}}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-model.interface.ranger-predict"></a>}}
\if{latex}{\out{\hypertarget{method-model.interface.ranger-predict}{}}}
\subsection{Method \code{predict()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{model.interface.ranger.class$predict(object, newdata = NULL, type, ...)}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-model.interface.ranger-get.formula"></a>}}
\if{latex}{\out{\hypertarget{method-model.interface.ranger-get.formula}{}}}
\subsection{Method \code{get.formula()}}{
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{model.interface.ranger.class$get.formula(x, envir, package = "")}\if{html}{\out{</div>}}
}
}
\if{html}{\out{<hr>}}
\if{html}{\out{<a id="method-model.interface.ranger-clone"></a>}}
\if{latex}{\out{\hypertarget{method-model.interface.ranger-clone}{}}}
\subsection{Method \code{clone()}}{
The objects of this class are cloneable with this method.
\subsection{Usage}{
\if{html}{\out{<div class="r">}}\preformatted{model.interface.ranger.class$clone(deep = FALSE)}\if{html}{\out{</div>}}
}
\subsection{Arguments}{
\if{html}{\out{<div class="arguments">}}
\describe{
\item{\code{deep}}{Whether to make a deep clone.}
}
\if{html}{\out{</div>}}
}
}
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/check-GSC.R
\docType{methods}
\name{check}
\alias{check}
\alias{check,GeneSetCollection-method}
\alias{geneIdType,GeneSetCollection-method}
\alias{collectionType,GeneSetCollection-method}
\title{Checks a GeneSetCollection}
\usage{
check(object)
\S4method{check}{GeneSetCollection}(object)
\S4method{geneIdType}{GeneSetCollection}(object)
\S4method{collectionType}{GeneSetCollection}(object)
}
\arguments{
\item{object}{A GeneSetCollection}
}
\value{
A geneSetCollection
}
\description{
Checks that all the collection types is the same. Issues a warning when a
GOCollection is detected. Checks tat all the geneIdTypes is the same for
all the GeneSets. Checks that a GeneSet is bigger or equal to two genes.
}
\section{Methods (by class)}{
\itemize{
\item \code{GeneSetCollection}: Applies the checks
\item \code{GeneSetCollection}: Returns the geneIdType present in the GeneSetCollection
\item \code{GeneSetCollection}: Returns the collectionType present in the GeneSetCollection
}}
\examples{
isTRUE(check(Info))
data(sample.ExpressionSet)
ai <- AnnotationIdentifier(annotation(sample.ExpressionSet))
geneIds <- featureNames(sample.ExpressionSet)[100:109]
gs3 <- GeneSet(geneIds=geneIds, type=ai,
setName="sample1", setIdentifier="102")
uprotIds <- c("Q9Y6Q1", "A6NJZ7", "Q9BXI6", "Q15035", "A1X283",
"P55957")
gs4 <- GeneSet(uprotIds, geneIdType=UniprotIdentifier())
gsc <- GeneSetCollection(list(gs3, gs4))
gsc
\donttest{check(gsc)}
geneIdType(Info)
collectionType(Info)
}
|
/man/check.Rd
|
no_license
|
llrs/GSEAdv
|
R
| false
| true
| 1,605
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/check-GSC.R
\docType{methods}
\name{check}
\alias{check}
\alias{check,GeneSetCollection-method}
\alias{geneIdType,GeneSetCollection-method}
\alias{collectionType,GeneSetCollection-method}
\title{Checks a GeneSetCollection}
\usage{
check(object)
\S4method{check}{GeneSetCollection}(object)
\S4method{geneIdType}{GeneSetCollection}(object)
\S4method{collectionType}{GeneSetCollection}(object)
}
\arguments{
\item{object}{A GeneSetCollection}
}
\value{
A geneSetCollection
}
\description{
Checks that all the collection types is the same. Issues a warning when a
GOCollection is detected. Checks tat all the geneIdTypes is the same for
all the GeneSets. Checks that a GeneSet is bigger or equal to two genes.
}
\section{Methods (by class)}{
\itemize{
\item \code{GeneSetCollection}: Applies the checks
\item \code{GeneSetCollection}: Returns the geneIdType present in the GeneSetCollection
\item \code{GeneSetCollection}: Returns the collectionType present in the GeneSetCollection
}}
\examples{
isTRUE(check(Info))
data(sample.ExpressionSet)
ai <- AnnotationIdentifier(annotation(sample.ExpressionSet))
geneIds <- featureNames(sample.ExpressionSet)[100:109]
gs3 <- GeneSet(geneIds=geneIds, type=ai,
setName="sample1", setIdentifier="102")
uprotIds <- c("Q9Y6Q1", "A6NJZ7", "Q9BXI6", "Q15035", "A1X283",
"P55957")
gs4 <- GeneSet(uprotIds, geneIdType=UniprotIdentifier())
gsc <- GeneSetCollection(list(gs3, gs4))
gsc
\donttest{check(gsc)}
geneIdType(Info)
collectionType(Info)
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/waterfall_palette_names.R
\name{waterfall_palette_names}
\alias{waterfall_palette_names}
\title{waterfall_palette_names}
\usage{
waterfall_palette_names(palette, file_type, data_frame)
}
\arguments{
\item{palette}{Named colour vector as input}
\item{file_type}{Which file type is involved?}
\item{data_frame}{Only used if file_type is "custom"}
}
\value{
a named list of "breaks" and "labels"
}
\description{
Make labels and breaks for palettes
}
\details{
waterfall_palette_names
}
|
/man/waterfall_palette_names.Rd
|
permissive
|
pradyumnasagar/GenVisR
|
R
| false
| true
| 564
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/waterfall_palette_names.R
\name{waterfall_palette_names}
\alias{waterfall_palette_names}
\title{waterfall_palette_names}
\usage{
waterfall_palette_names(palette, file_type, data_frame)
}
\arguments{
\item{palette}{Named colour vector as input}
\item{file_type}{Which file type is involved?}
\item{data_frame}{Only used if file_type is "custom"}
}
\value{
a named list of "breaks" and "labels"
}
\description{
Make labels and breaks for palettes
}
\details{
waterfall_palette_names
}
|
a <- 5
b <- 3
a+b
a*b
56677*888888909
|
/Suma.R
|
no_license
|
JimmyReyesVelasco/Seminario
|
R
| false
| false
| 38
|
r
|
a <- 5
b <- 3
a+b
a*b
56677*888888909
|
library(ggplot2)
populationBoxPlot <- ggplot(mergedData, aes(y= mergedData$population)) +
geom_boxplot()
murderBoxPlot <- ggplot(mergedData, aes(y= mergedData$Murder)) +
geom_boxplot()
populationBoxPlot
murderBoxPlot
|
/boxplot.R
|
no_license
|
fall2018-wallace/snehab_dataviz
|
R
| false
| false
| 231
|
r
|
library(ggplot2)
populationBoxPlot <- ggplot(mergedData, aes(y= mergedData$population)) +
geom_boxplot()
murderBoxPlot <- ggplot(mergedData, aes(y= mergedData$Murder)) +
geom_boxplot()
populationBoxPlot
murderBoxPlot
|
#library(sqldf)
# Reading the dataset
# keep in mind to set the path before running the code
watt <- read.csv("household_power_consumption.txt",sep=";", stringsAsFactor=FALSE)
watt1 <- subset(watt,watt[,1]=='1/2/2007' )
watt2 <- subset(watt,watt[,1]=='2/2/2007')
watt1$newd <- as.POSIXct(paste(watt1$Date, watt1$Time), format = "%d/%m/%Y %T")
watt2$newd <- as.POSIXct(paste(watt2$Date, watt2$Time), format = "%d/%m/%Y %T")
watt3 <- rbind(watt1,watt2)
watt4<- na.omit(watt3)
#plot 1 Histogram
png("plot1.png")
hist(as.numeric(watt4$Global_active_power),col ="red", main ="Global Active Power",xlab="Global Active Power (kilowatts)")
dev.off()
|
/plot1.R
|
no_license
|
kanudutta/ExData_Plotting1
|
R
| false
| false
| 655
|
r
|
#library(sqldf)
# Reading the dataset
# keep in mind to set the path before running the code
watt <- read.csv("household_power_consumption.txt",sep=";", stringsAsFactor=FALSE)
watt1 <- subset(watt,watt[,1]=='1/2/2007' )
watt2 <- subset(watt,watt[,1]=='2/2/2007')
watt1$newd <- as.POSIXct(paste(watt1$Date, watt1$Time), format = "%d/%m/%Y %T")
watt2$newd <- as.POSIXct(paste(watt2$Date, watt2$Time), format = "%d/%m/%Y %T")
watt3 <- rbind(watt1,watt2)
watt4<- na.omit(watt3)
#plot 1 Histogram
png("plot1.png")
hist(as.numeric(watt4$Global_active_power),col ="red", main ="Global Active Power",xlab="Global Active Power (kilowatts)")
dev.off()
|
gbmCV <- function(x, y, folds, tree_inc, dist="adaboost", id=1, bf=.5, sh=.001, singlefold=F) {
getDev <- function(p, y) -mean(ifelse(y==1, log(p), log(1-p)), na.rm=TRUE)
getDevmod <- function(p, y) getDev(pmin(pmax(p, .001), .999), y)
getMse <- function(p, y) mean((y-p)^2, na.rm=TRUE)
getMisclass <- function(p, y) mean(abs((p>.5)-y), na.rm=TRUE)
getAUC <- function(p, y) auc(y,p)
gbmInit2 <- function(y, x, tree_inc) {
tree_ct <- tree_inc
gbm_fit <- gbm(y~., data=x, distribution=dist, n.tree=tree_inc,
shrinkage=sh, interaction.depth=id, bag.fraction=bf)
best_tree <- gbm.perf(gbm_fit, method="OOB")
while (best_tree/tree_ct>.99) {
tree_ct <- tree_ct + tree_inc
gbm_fit <- gbm.more(gbm_fit, n.new.trees=tree_inc)
best_tree <- gbm.perf(gbm_fit, method="OOB")
}
list(gbm_fit=gbm_fit, best_tree=best_tree)
}
best_tree <- vector()
dev <- vector()
devmod <- vector()
mse <- vector()
misclass <- vector()
auc1 <- vector()
for (i in 1:cv_num) {
train_id <- Reduce(union, folds[-i])
test_id <- folds[[i]]
xtrain <- x[train_id,,drop=F]
ytrain <- y[train_id]
xtest <- x[test_id,,drop=F]
ytest <- y[test_id]
init <- gbmInit2(ytrain, xtrain, tree_inc)
gbm_fit <- init$gbm_fit
best_tree[i] <- init$best_tree
## predicted probability of class 1
pred <- predict(gbm_fit, newdata=xtest, n.trees=best_tree[i], type="response")
dev[i] <- getDev(pred, ytest)
devmod[i] <- getDevmod(pred, ytest)
mse[i] <- getMse(pred, ytest)
misclass[i] <- getMisclass(pred, ytest)
auc1[i] <- getAUC(pred, ytest)
}
cbind(dev, devmod, mse, misclass, auc=auc1, best_tree)
} # end gbmCV
|
/gbmcv.R
|
permissive
|
fboehm/stat998-project2
|
R
| false
| false
| 1,721
|
r
|
gbmCV <- function(x, y, folds, tree_inc, dist="adaboost", id=1, bf=.5, sh=.001, singlefold=F) {
getDev <- function(p, y) -mean(ifelse(y==1, log(p), log(1-p)), na.rm=TRUE)
getDevmod <- function(p, y) getDev(pmin(pmax(p, .001), .999), y)
getMse <- function(p, y) mean((y-p)^2, na.rm=TRUE)
getMisclass <- function(p, y) mean(abs((p>.5)-y), na.rm=TRUE)
getAUC <- function(p, y) auc(y,p)
gbmInit2 <- function(y, x, tree_inc) {
tree_ct <- tree_inc
gbm_fit <- gbm(y~., data=x, distribution=dist, n.tree=tree_inc,
shrinkage=sh, interaction.depth=id, bag.fraction=bf)
best_tree <- gbm.perf(gbm_fit, method="OOB")
while (best_tree/tree_ct>.99) {
tree_ct <- tree_ct + tree_inc
gbm_fit <- gbm.more(gbm_fit, n.new.trees=tree_inc)
best_tree <- gbm.perf(gbm_fit, method="OOB")
}
list(gbm_fit=gbm_fit, best_tree=best_tree)
}
best_tree <- vector()
dev <- vector()
devmod <- vector()
mse <- vector()
misclass <- vector()
auc1 <- vector()
for (i in 1:cv_num) {
train_id <- Reduce(union, folds[-i])
test_id <- folds[[i]]
xtrain <- x[train_id,,drop=F]
ytrain <- y[train_id]
xtest <- x[test_id,,drop=F]
ytest <- y[test_id]
init <- gbmInit2(ytrain, xtrain, tree_inc)
gbm_fit <- init$gbm_fit
best_tree[i] <- init$best_tree
## predicted probability of class 1
pred <- predict(gbm_fit, newdata=xtest, n.trees=best_tree[i], type="response")
dev[i] <- getDev(pred, ytest)
devmod[i] <- getDevmod(pred, ytest)
mse[i] <- getMse(pred, ytest)
misclass[i] <- getMisclass(pred, ytest)
auc1[i] <- getAUC(pred, ytest)
}
cbind(dev, devmod, mse, misclass, auc=auc1, best_tree)
} # end gbmCV
|
library(readr)
marBasketData <- read_csv("D:/z_kaushal/ISIDMBA/DataSets/Mar_Basket.csv")
View(marBasketData)
target = factor(marBasketData$items)
ident = marBasketData$Id
library(arules)
transactions=as(split(target,ident),)
transactions = as(split(target,ident), "transactions")
rules = apriori(transactions, parameter = list(support = 0.25, confidence = 0.05, minlen = 2))
rules
rules = sort(rules, decreasing = TRUE, by="lift")
inspect(rules)
install.packages("arulesViz")
install.packages("kernlab")
install.packages("grid")
library("arulesViz")
plot(rules)
|
/code/tutorial/Day6-multRsMBA.R
|
no_license
|
kd303/ML-Training
|
R
| false
| false
| 575
|
r
|
library(readr)
marBasketData <- read_csv("D:/z_kaushal/ISIDMBA/DataSets/Mar_Basket.csv")
View(marBasketData)
target = factor(marBasketData$items)
ident = marBasketData$Id
library(arules)
transactions=as(split(target,ident),)
transactions = as(split(target,ident), "transactions")
rules = apriori(transactions, parameter = list(support = 0.25, confidence = 0.05, minlen = 2))
rules
rules = sort(rules, decreasing = TRUE, by="lift")
inspect(rules)
install.packages("arulesViz")
install.packages("kernlab")
install.packages("grid")
library("arulesViz")
plot(rules)
|
library(dplyr)
library(readxl)
library(tidyr)
library(stringr)
xlsx <- "F:/Data/Medals PD WOW.xlsx"
country_code_map <- read_excel(xlsx, sheet = 'Country Codes')
finalA <- read_excel(xlsx, sheet = 'Medallists', col_types = c("text",
"text", "text", "text", "text", "text",
"text", "text", "text")) %>%
merge(country_code_map, by = 'Country', all.x = TRUE) %>%
mutate('Code' = if_else(is.na(`Code`),`Country Code`,`Code`)) %>%
rename('CountryDrop' = 'Country') %>%
merge(country_code_map, by = 'Code') %>%
mutate('Event' = str_replace_all(`Event`,'(?<!kilo)(metre(s*))','m'),
'Event' = str_replace_all(`Event`,'(kilometre(s*))','km'),
'Sport' = str_replace_all(`Sport`,'^Canoe.*','Canoeing'),
'Sport' = str_replace_all(`Sport`,'^Swimming$','Aquatics'),
'Discipline' = str_replace_all(`Discipline`,'Beach volley.*','Beach Volleyball'),
'Discipline' = str_replace_all(`Discipline`,'Wrestling.*','Wrestling'),
'Discipline' = str_replace_all(`Discipline`,'Rhythmic.*','Rhythmic'),
'Discipline' = str_replace_all(`Discipline`,'Artistic.*','Artistic'),
'Discipline' = str_replace_all(`Discipline`,'Mountain (B|b)ik.*','Mountain Bike'),
'Discipline' = str_replace_all(`Discipline`,'Modern (P|p)en.*','Modern Pentath.'),
'Discipline' = str_replace_all(`Discipline`,'(.*) cycling','Cycling \\1')) %>%
select(c('Country', 'Code', 'Sport', 'Medal', 'Event', 'Athlete', 'Year', 'Event_Gender', 'Discipline'))
finalB <- finalA %>%
group_by(`Country`, `Year`, `Medal`) %>%
summarise('Value' = n()) %>%
pivot_wider(., names_from = `Medal`, values_from = `Value`, values_fn = list(Value=sum)) %>%
select(c('Country', 'Year', 'Gold', 'Silver', 'Bronze'))
finalC <- read_excel(xlsx, sheet = 'Hosts',col_types = c("text",
"text", "text", "text", "numeric",
"numeric", "numeric")) %>%
separate(., `Host`, c('Host City', 'Host Country'), sep = ',\\s') %>%
mutate('Start Date' = if_else(str_detect(`Start Date`,'/'),
as.Date(`Start Date`,format='%m/%d/%Y'),
as.Date(as.numeric(`Start Date`), origin = '1899-12-30')),
'End Date' = if_else(str_detect(`End Date`,'/'),
as.Date(`End Date`,format='%m/%d/%Y'),
as.Date(as.numeric(`End Date`), origin = '1899-12-30')),
'Year' = as.integer(strftime(`Start Date`,format = '%Y'))) %>%
select(c('Year', 'Host Country', 'Host City', 'Start Date', 'End Date', 'Games', 'Nations', 'Sports', 'Events'))
View(finalA)
View(finalB)
View(finalC)
|
/2020/2020W31/preppindataw31.R
|
no_license
|
ArseneXie/Preppindata
|
R
| false
| false
| 2,867
|
r
|
library(dplyr)
library(readxl)
library(tidyr)
library(stringr)
xlsx <- "F:/Data/Medals PD WOW.xlsx"
country_code_map <- read_excel(xlsx, sheet = 'Country Codes')
finalA <- read_excel(xlsx, sheet = 'Medallists', col_types = c("text",
"text", "text", "text", "text", "text",
"text", "text", "text")) %>%
merge(country_code_map, by = 'Country', all.x = TRUE) %>%
mutate('Code' = if_else(is.na(`Code`),`Country Code`,`Code`)) %>%
rename('CountryDrop' = 'Country') %>%
merge(country_code_map, by = 'Code') %>%
mutate('Event' = str_replace_all(`Event`,'(?<!kilo)(metre(s*))','m'),
'Event' = str_replace_all(`Event`,'(kilometre(s*))','km'),
'Sport' = str_replace_all(`Sport`,'^Canoe.*','Canoeing'),
'Sport' = str_replace_all(`Sport`,'^Swimming$','Aquatics'),
'Discipline' = str_replace_all(`Discipline`,'Beach volley.*','Beach Volleyball'),
'Discipline' = str_replace_all(`Discipline`,'Wrestling.*','Wrestling'),
'Discipline' = str_replace_all(`Discipline`,'Rhythmic.*','Rhythmic'),
'Discipline' = str_replace_all(`Discipline`,'Artistic.*','Artistic'),
'Discipline' = str_replace_all(`Discipline`,'Mountain (B|b)ik.*','Mountain Bike'),
'Discipline' = str_replace_all(`Discipline`,'Modern (P|p)en.*','Modern Pentath.'),
'Discipline' = str_replace_all(`Discipline`,'(.*) cycling','Cycling \\1')) %>%
select(c('Country', 'Code', 'Sport', 'Medal', 'Event', 'Athlete', 'Year', 'Event_Gender', 'Discipline'))
finalB <- finalA %>%
group_by(`Country`, `Year`, `Medal`) %>%
summarise('Value' = n()) %>%
pivot_wider(., names_from = `Medal`, values_from = `Value`, values_fn = list(Value=sum)) %>%
select(c('Country', 'Year', 'Gold', 'Silver', 'Bronze'))
finalC <- read_excel(xlsx, sheet = 'Hosts',col_types = c("text",
"text", "text", "text", "numeric",
"numeric", "numeric")) %>%
separate(., `Host`, c('Host City', 'Host Country'), sep = ',\\s') %>%
mutate('Start Date' = if_else(str_detect(`Start Date`,'/'),
as.Date(`Start Date`,format='%m/%d/%Y'),
as.Date(as.numeric(`Start Date`), origin = '1899-12-30')),
'End Date' = if_else(str_detect(`End Date`,'/'),
as.Date(`End Date`,format='%m/%d/%Y'),
as.Date(as.numeric(`End Date`), origin = '1899-12-30')),
'Year' = as.integer(strftime(`Start Date`,format = '%Y'))) %>%
select(c('Year', 'Host Country', 'Host City', 'Start Date', 'End Date', 'Games', 'Nations', 'Sports', 'Events'))
View(finalA)
View(finalB)
View(finalC)
|
#package
library(dplyr)
#introduce dataset
filename <- "UCI HAR Dataset"
# Checking if folder exists
if (!file.exists("UCI HAR Dataset")) {
unzip(filename)
}
#assign dataframes
features <- read.table("UCI HAR Dataset/features.txt", col.names = c("n","functions"))
activities <- read.table("UCI HAR Dataset/activity_labels.txt", col.names = c("code", "activity"))
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt", col.names = "subject")
x_test <- read.table("UCI HAR Dataset/test/X_test.txt", col.names = features$functions)
y_test <- read.table("UCI HAR Dataset/test/y_test.txt", col.names = "code")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names = "subject")
x_train <- read.table("UCI HAR Dataset/train/X_train.txt", col.names = features$functions)
y_train <- read.table("UCI HAR Dataset/train/y_train.txt", col.names = "code")
#Merge training and test sets to create one data set
X <- rbind(x_train, x_test)
Y <- rbind(y_train, y_test)
Subject <- rbind(subject_train, subject_test)
Merged_Data <- cbind(Subject, Y, X)
#Extracts only the measurements on the mean and standard deviation for each measurement.
TidyData <- Merged_Data %>% select(subject, code, contains("mean"), contains("std"))
#descriptive activity names
TidyData$code <- activities[TidyData$code, 2]
#label the data set with descriptive variable names
names(TidyData)[2] = "activity"
names(TidyData)<-gsub("Acc", "Accelerometer", names(TidyData))
names(TidyData)<-gsub("Gyro", "Gyroscope", names(TidyData))
names(TidyData)<-gsub("BodyBody", "Body", names(TidyData))
names(TidyData)<-gsub("Mag", "Magnitude", names(TidyData))
names(TidyData)<-gsub("^t", "Time", names(TidyData))
names(TidyData)<-gsub("^f", "Frequency", names(TidyData))
names(TidyData)<-gsub("tBody", "TimeBody", names(TidyData))
names(TidyData)<-gsub("-mean()", "Mean", names(TidyData), ignore.case = TRUE)
names(TidyData)<-gsub("-std()", "STD", names(TidyData), ignore.case = TRUE)
names(TidyData)<-gsub("-freq()", "Frequency", names(TidyData), ignore.case = TRUE)
names(TidyData)<-gsub("angle", "Angle", names(TidyData))
names(TidyData)<-gsub("gravity", "Gravity", names(TidyData))
#create a second, independent tidy data set with the average of each variable for each activity and each subject
FinalData <- TidyData %>%
group_by(subject, activity) %>%
summarise_all(funs(mean))
write.table(FinalData, "FinalData.txt", row.name=FALSE)
|
/run_analysis.R
|
no_license
|
neptunelaw/GACD-WK-4
|
R
| false
| false
| 2,445
|
r
|
#package
library(dplyr)
#introduce dataset
filename <- "UCI HAR Dataset"
# Checking if folder exists
if (!file.exists("UCI HAR Dataset")) {
unzip(filename)
}
#assign dataframes
features <- read.table("UCI HAR Dataset/features.txt", col.names = c("n","functions"))
activities <- read.table("UCI HAR Dataset/activity_labels.txt", col.names = c("code", "activity"))
subject_test <- read.table("UCI HAR Dataset/test/subject_test.txt", col.names = "subject")
x_test <- read.table("UCI HAR Dataset/test/X_test.txt", col.names = features$functions)
y_test <- read.table("UCI HAR Dataset/test/y_test.txt", col.names = "code")
subject_train <- read.table("UCI HAR Dataset/train/subject_train.txt", col.names = "subject")
x_train <- read.table("UCI HAR Dataset/train/X_train.txt", col.names = features$functions)
y_train <- read.table("UCI HAR Dataset/train/y_train.txt", col.names = "code")
#Merge training and test sets to create one data set
X <- rbind(x_train, x_test)
Y <- rbind(y_train, y_test)
Subject <- rbind(subject_train, subject_test)
Merged_Data <- cbind(Subject, Y, X)
#Extracts only the measurements on the mean and standard deviation for each measurement.
TidyData <- Merged_Data %>% select(subject, code, contains("mean"), contains("std"))
#descriptive activity names
TidyData$code <- activities[TidyData$code, 2]
#label the data set with descriptive variable names
names(TidyData)[2] = "activity"
names(TidyData)<-gsub("Acc", "Accelerometer", names(TidyData))
names(TidyData)<-gsub("Gyro", "Gyroscope", names(TidyData))
names(TidyData)<-gsub("BodyBody", "Body", names(TidyData))
names(TidyData)<-gsub("Mag", "Magnitude", names(TidyData))
names(TidyData)<-gsub("^t", "Time", names(TidyData))
names(TidyData)<-gsub("^f", "Frequency", names(TidyData))
names(TidyData)<-gsub("tBody", "TimeBody", names(TidyData))
names(TidyData)<-gsub("-mean()", "Mean", names(TidyData), ignore.case = TRUE)
names(TidyData)<-gsub("-std()", "STD", names(TidyData), ignore.case = TRUE)
names(TidyData)<-gsub("-freq()", "Frequency", names(TidyData), ignore.case = TRUE)
names(TidyData)<-gsub("angle", "Angle", names(TidyData))
names(TidyData)<-gsub("gravity", "Gravity", names(TidyData))
#create a second, independent tidy data set with the average of each variable for each activity and each subject
FinalData <- TidyData %>%
group_by(subject, activity) %>%
summarise_all(funs(mean))
write.table(FinalData, "FinalData.txt", row.name=FALSE)
|
subset = list(lat = -90:10)
levels = c(50, 200, 500)
years <- seq(1979, 2018)
uv <- data.table::rbindlist(lapply(years, function(y) {
cat("Processing year ", y, "\r")
ufile <- paste0("datos/NCEP Reanalysis/daily/uwnd.", y, ".nc")
vfile <- paste0("datos/NCEP Reanalysis/daily/vwnd.", y, ".nc")
wnd <- metR::ReadNetCDF(ufile, "uwnd", subset = subset)
wnd[, vwnd := metR::ReadNetCDF(vfile, "vwnd", subset = subset, out = "vector")[[1]]]
wnd <- wnd[level %in% levels]
wnd[, year := year(time[1]), by = time]
wnd[, season := metR::season(time[1]), by = time]
wnd[, .(uv = cov(uwnd, vwnd)), by = .(level, lon, lat, year, season)]
}))
data.table::setnames(uv, "level", "lev")
uv[, dataset := "ncep"]
cat("Saving data.")
saveRDS(uv, "datos/uv.Rds")
|
/analysis/scripts/01-compute_uv.R
|
no_license
|
YTHsieh/shceof
|
R
| false
| false
| 772
|
r
|
subset = list(lat = -90:10)
levels = c(50, 200, 500)
years <- seq(1979, 2018)
uv <- data.table::rbindlist(lapply(years, function(y) {
cat("Processing year ", y, "\r")
ufile <- paste0("datos/NCEP Reanalysis/daily/uwnd.", y, ".nc")
vfile <- paste0("datos/NCEP Reanalysis/daily/vwnd.", y, ".nc")
wnd <- metR::ReadNetCDF(ufile, "uwnd", subset = subset)
wnd[, vwnd := metR::ReadNetCDF(vfile, "vwnd", subset = subset, out = "vector")[[1]]]
wnd <- wnd[level %in% levels]
wnd[, year := year(time[1]), by = time]
wnd[, season := metR::season(time[1]), by = time]
wnd[, .(uv = cov(uwnd, vwnd)), by = .(level, lon, lat, year, season)]
}))
data.table::setnames(uv, "level", "lev")
uv[, dataset := "ncep"]
cat("Saving data.")
saveRDS(uv, "datos/uv.Rds")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-spatial.R
\name{get_rastervalue}
\alias{get_rastervalue}
\title{Function to extract directly the raster value of provided points}
\usage{
get_rastervalue(coords, env, ngb_fill = TRUE, rm.na = FALSE)
}
\arguments{
\item{coords}{A \code{\link{data.frame}}, \code{\link{matrix}} or \code{\link{sf}} object.}
\item{env}{A \code{\link{SpatRaster}} object with the provided predictors.}
\item{ngb_fill}{\code{\link{logical}} on whether cells should be interpolated from
neighbouring values.}
\item{rm.na}{\code{\link{logical}} parameter which - if set - removes all rows with a
missing data point (\code{NA}) from the result.}
}
\value{
A \code{\link{data.frame}} with the extracted covariate data from each provided
data point.
}
\description{
This function simply extracts the values from a provided
\code{\link{SpatRaster}}, \code{\link{SpatRasterDataset}} or \code{\link{SpatRasterCollection}} object. For
points where or NA values were extracted a small buffer is applied to try and
obtain the remaining values.
}
\details{
It is essentially a wrapper for \code{\link[terra:extract]{terra::extract}}.
}
\examples{
\dontrun{
# Extract values
vals <- get_rastervalue(coords, env)
}
}
\keyword{utils}
|
/man/get_rastervalue.Rd
|
permissive
|
iiasa/ibis.iSDM
|
R
| false
| true
| 1,284
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-spatial.R
\name{get_rastervalue}
\alias{get_rastervalue}
\title{Function to extract directly the raster value of provided points}
\usage{
get_rastervalue(coords, env, ngb_fill = TRUE, rm.na = FALSE)
}
\arguments{
\item{coords}{A \code{\link{data.frame}}, \code{\link{matrix}} or \code{\link{sf}} object.}
\item{env}{A \code{\link{SpatRaster}} object with the provided predictors.}
\item{ngb_fill}{\code{\link{logical}} on whether cells should be interpolated from
neighbouring values.}
\item{rm.na}{\code{\link{logical}} parameter which - if set - removes all rows with a
missing data point (\code{NA}) from the result.}
}
\value{
A \code{\link{data.frame}} with the extracted covariate data from each provided
data point.
}
\description{
This function simply extracts the values from a provided
\code{\link{SpatRaster}}, \code{\link{SpatRasterDataset}} or \code{\link{SpatRasterCollection}} object. For
points where or NA values were extracted a small buffer is applied to try and
obtain the remaining values.
}
\details{
It is essentially a wrapper for \code{\link[terra:extract]{terra::extract}}.
}
\examples{
\dontrun{
# Extract values
vals <- get_rastervalue(coords, env)
}
}
\keyword{utils}
|
library(tidyr)
library(dplyr)
library(repurrrsive)
# there are 30 rows. And a named list for each one.
# each list has 18 elements.
# there is only one column
chars <- tibble(char = got_chars)
# now we have 30 rows by 18 columns.
# some columns are simple types while some are simple types.
chars2 <- chars %>% unnest_wider(char)
chars2
# just show the list types.
chars2 %>% select_if(is.list)
# "books" is a list of character vectors Uenven length.
# "tvSeries is a list of chacter vectors. Uneven length. Nulls allowed.
# "name" is unique (simple type)
chars2 %>%
select(name, books, tvSeries) %>%
# this will result in "name", "media", "value" (i.e. list) about 60 rows.
# is uniquen by "name" and "media"
pivot_longer(c(books, tvSeries), names_to = "media", values_to = "value") %>%
# this will repeat "name" and "media" and then peel out "value" 180 rows.
unnest_longer(value)
|
/rectangularising_game_thrones.r
|
no_license
|
thefactmachine/tidy_json
|
R
| false
| false
| 911
|
r
|
library(tidyr)
library(dplyr)
library(repurrrsive)
# there are 30 rows. And a named list for each one.
# each list has 18 elements.
# there is only one column
chars <- tibble(char = got_chars)
# now we have 30 rows by 18 columns.
# some columns are simple types while some are simple types.
chars2 <- chars %>% unnest_wider(char)
chars2
# just show the list types.
chars2 %>% select_if(is.list)
# "books" is a list of character vectors Uenven length.
# "tvSeries is a list of chacter vectors. Uneven length. Nulls allowed.
# "name" is unique (simple type)
chars2 %>%
select(name, books, tvSeries) %>%
# this will result in "name", "media", "value" (i.e. list) about 60 rows.
# is uniquen by "name" and "media"
pivot_longer(c(books, tvSeries), names_to = "media", values_to = "value") %>%
# this will repeat "name" and "media" and then peel out "value" 180 rows.
unnest_longer(value)
|
DFMClass<-function(id,parameters) {
if (!is.numeric(id) || !all(is.finite(id)))
stop("invalid arguments")
## Check to determine whether the DFM object already exists
st<-paste("DFM",id,sep="")
found=0
if(exists(st,where=1)) {
data<-get(st)
found<-1
if(AreParametersEqual(parameters,data$Parameters)==FALSE)
data<-ChangeParameterObject(data,parameters)
}
## If doesn't exist, get and create
if(found==0) {
file<-paste("DFM_",id,".csv",sep="")
dfm<-read.csv(file,header=TRUE)
## Get Minutes from Sample column only if ElapsedTime is not
## there
if('Seconds' %in% colnames(dfm)) {
Minutes<-dfm$Seconds/60
dfm<-data.frame(Minutes,dfm)
} else if(('Date' %in% colnames(dfm))&&('Time' %in% colnames(dfm))&&('MSec' %in% colnames(dfm))){
Seconds<-GetElapsedSeconds(dfm)
Minutes<-Seconds/60.0
dfm<-data.frame(Minutes,Seconds,dfm)
} else {
stop("Time information missing from DFM data.")
}
data=list(ID=id,Parameters=parameters,RawData=dfm)
class(data)="DFM"
if(!is.na(FindDataBreaks(data,multiplier=4,returnvals=FALSE))){
cat("Data lapses found. Use FindDataBreaks for details.")
flush.console()
}
data<-CalculateBaseline(data)
assign(st,data,pos=1)
}
data
}
## This function will look for consecutive entries in the
## RawData$Sec column whose difference is larger than it
## should be based on the Samples.Per.Sec parameter.
FindDataBreaks<-function(dfm,multiplier=4,returnvals=TRUE){
Interval<-diff(dfm$RawData$Seconds)
Interval<-c(0,Interval)
thresh<-(1.0/dfm$Parameters$Samples.Per.Second)*multiplier
Index<-1:length(Interval)
Index<-Index[Interval>thresh]
Interval<-Interval[Interval>thresh]
if(returnvals==TRUE) {
if(length(Interval)==0)
c(NA)
else
cbind(Index,Interval,dfm$RawData[Index,])
}
else {
if(length(Interval)==0)
c(NA)
else
c(1)
}
}
## This function takes a vector of dates (as strings), a vector
## of times (24 hour as string) and a parameters object.
## it returns the elapsed seconds.
GetElapsedSeconds<-function(dfm){
dates<-dfm$Date
times<-dfm$Time
ms <-dfm$MSec
fulltimes<-as.POSIXct(paste(dates,times),format="%m/%d/%Y %H:%M:%S")
diffs<-c(difftime(fulltimes,fulltimes[1],units="secs"))
diffs<-diffs+(ms/1000)
diffs
}
CalculateBaseline=function(dfm){
window.min=dfm$Parameters$Baseline.Window.Minutes
newData<-dfm$RawData
# the number of samples in those minutes
window<-window.min*60*5
if(window %% 2 ==0)
window=window+1
for(i in 1:12) {
cname <-paste("W",i,sep="")
tmp<-runmed(newData[,cname],window)
newData[,cname]<-newData[,cname]-tmp
}
dfm$BaselineData=newData
## Now remove conflicting signals
dfm=CleanupChamberConflicts(dfm)
## Everything else must be recalculated
dfm<-SetThreshold(dfm)
dfm
}
SetThreshold = function(dfm,getStandard=TRUE) {
## First set the threshold...
if(is.null(dfm$BaselineData)) {
stop("DFM must have baseline.")
}
if(dfm$Parameters$Use.Adaptive.Threshold) {
if(getStandard==TRUE)
dfm<-Set.Adaptive.Standard(dfm)
dfm<-Set.Adaptive.Threshold(dfm)
}
else
dfm<-Set.Fixed.Threshold(dfm)
## Now update the licks and PI
dfm<-Set.Feeding.Data(dfm)
dfm<-Set.Tasting.Data(dfm)
if(dfm$Parameters$Chamber.Size==2){
dfm<-Set.PI.Data(dfm)
}
#Other measures
dfm<-Set.Durations.And.Intervals(dfm)
dfm
}
Set.Feeding.Data<-function(dfm){
if(is.null(dfm$BaselineData))
stop("Baseline must be calculated")
newData<-dfm$BaselineData
newData2<-dfm$BaselineData
for(i in 1:12) {
tmp<-Set.Feeding.Data.Well(dfm,i)
cname <-paste("W",i,sep="")
newData[,cname]<-tmp[,1]
newData2[,cname]<-tmp[,2]
}
dfm$LickData<-newData
dfm$EventData<-newData2
dfm
}
Set.Feeding.Data.Well<-function(dfm,well){
## Get all possible feeding Licks
thresh<-Thresholds.Well(dfm,well)
data<-BaselinedData.Well(dfm,well)
Feeding.Licks.Min<-(data > thresh$FeedingMin)
Feeding.Licks.Max<-(data > thresh$FeedingMax)
## Find continguous events above min threshold with at least one value above max threshold.
## The result of this function is also equivalent to the Events vector
Events<-Get.Surviving.Events(Feeding.Licks.Min,Feeding.Licks.Max)
## Now remove events that are too short
Events[Events<dfm$Parameters$Feeding.Minevents]<-0
## Now expand the licks to TRUE/FALSE entries
FeedingLicks<-Expand.Events(Events)
data.frame(FeedingLicks,Events)
}
Set.PI.Data<-function(dfm){
## Get the Feeding.PI
cnames<-paste("C",1:nrow(dfm$Parameters$Chamber.Sets),sep="")
Minutes<-dfm$BaselineData$Minutes
for(i in 1:nrow(dfm$Parameters$Chamber.Sets)) {
## Conflicts are defined as both pads with signal greater than the
## minimum value of all feeding and tasting thresholds
wellA<-dfm$Parameters$Chamber.Sets[i,1]
wellB<-dfm$Parameters$Chamber.Sets[i,2]
FeedingLicksA<-FeedingData.Well.Licks(dfm,wellA)
FeedingLicksB<-FeedingData.Well.Licks(dfm,wellB)
## Here it is the instantaneous PI
Feeding.PI<-FeedingLicksA - FeedingLicksB
## Temporarily eliminate duration information for EventPI.
tmpA<-FeedingData.Well.Events(dfm,wellA)
tmpA[tmpA>0]<-1
tmpB<-FeedingData.Well.Events(dfm,wellB)
tmpB[tmpB>0]<-1
Feeding.EventPI<-tmpA-tmpB
TastingLicksA<-TastingData.Well(dfm,wellA)
TastingLicksB<-TastingData.Well(dfm,wellB)
## Here it is the instantaneous PI
Tasting.PI<-TastingLicksA - TastingLicksB
results<-data.frame(Minutes,Feeding.PI,Feeding.EventPI,Tasting.PI)
names(results)<-c("Minutes","Feeding.PI", "Feeding.EventPI","Tasting.PI")
if(i==1){
PIData=list(C1=results)
}
else {
s<-paste("C",i,sep="")
PIData[[s]]<-results
}
}
row.names(PIData)<-NULL
dfm$PIData<-PIData
dfm
}
## This new function uses a new parameter, Signal.Threshold,
## to remove positive signals that conflict. The higher signal
## is kept. The lower one is set to baseline.
CleanupChamberConflicts<-function(dfm){
## This function normally takes baselined data.
if(is.null(dfm$BaselineData))
stop("Baseline must be calculated")
## Note that we don't need to do anything if the chamber size is 1
## because there is no conflict by definition.
if(dfm$Parameters$Chamber.Size==2) {
cat("\n")
flush.console()
for(i in 1:nrow(dfm$Parameters$Chamber.Sets)) {
wellA<-dfm$Parameters$Chamber.Sets[i,1]
wellB<-dfm$Parameters$Chamber.Sets[i,2]
dataA<-BaselinedData.Well(dfm,wellA)
dataB<-BaselinedData.Well(dfm,wellB)
signalA<-(dataA>dfm$Parameters$Signal.Threshold)
signalB<-(dataB>dfm$Parameters$Signal.Threshold)
awins<-dataA>dataB
bwins<-dataB>dataA
conflicts<-0
#Clean the feeding vectors
conflicts<-sum(signalA & signalB)
## conflict resolution involves accepting the plate with the larger value
## and setting the other to baseline.
## cat("DFM: ",dfm$ID," Chamber:",i," Cleaning ",conflicts," conflicts.\n")
flush.console()
if(conflicts>0) {
dataA[(signalA & signalB & bwins)]<-0
dataB[(signalA & signalB & awins)]<-0
## Correct the data
cname <-paste("W",wellA,sep="")
dfm$BaselineData[,cname]<-dataA
cname <-paste("W",wellB,sep="")
dfm$BaselineData[,cname]<-dataB
}
}
}
if(dfm$Parameters$Chamber.Size>2) {
stop("Clean chambers not implemented for chamber size >2.")
}
dfm
}
## This depricated function does not change the baselined data
## it now only alters the feeding and tasting licks
## to ensure that single flies can not feed from both
## simultaneously. It will replace feeding and tasting data.
CleanupChamberConflictsOLD<-function(dfm){
## This function normally takes baselined data.
if(is.null(dfm$LickData))
stop("Feeding Lick Data must be calculated")
if(is.null(dfm$TastingData))
stop("TastingData must be calculated")
## Note that we don't need to do anything if the chamber size is 1
## because there is no conflict by definition.
if(dfm$Parameters$Chamber.Size==2) {
cat("\n")
flush.console()
for(i in 1:nrow(dfm$Parameters$Chamber.Sets)) {
wellA<-dfm$Parameters$Chamber.Sets[i,1]
wellB<-dfm$Parameters$Chamber.Sets[i,2]
dataA<-BaselinedData.Well(dfm,wellA)
dataB<-BaselinedData.Well(dfm,wellB)
feedingA<-FeedingData.Well.Licks(dfm,wellA)
feedingB<-FeedingData.Well.Licks(dfm,wellB)
tastingA<-TastingData.Well(dfm,wellA)
tastingB<-TastingData.Well(dfm,wellB)
awins<-dataA>dataB
bwins<-dataB>dataA
conflicts<-0
#Clean the feeding vectors
conflicts<-conflicts+sum(feedingA & feedingB & bwins)+sum(feedingA & feedingB & awins)
#Clean the tasting vectors
conflicts<-conflicts+sum(feedingB & tastingA)+sum(feedingA & tastingB)
conflicts<-conflicts+sum(tastingB & tastingA & bwins)+sum(tastingB & tastingA & awins)
## conflict resolution involves accepting the plate with the larger value
## and setting the other to baseline.
cat("DFM: ",dfm$ID," Chamber:",i," Cleaning ",conflicts," conflicts.\n")
flush.console()
if(conflicts>0) {
feedingA[(feedingA & feedingB & bwins)]<-FALSE
feedingB[(feedingA & feedingB & awins)]<-FALSE
tastingA[(feedingB & tastingA)]<-FALSE
tastingB[(feedingA & tastingB)]<-FALSE
tastingA[(tastingB & tastingA & bwins)]<-FALSE
tastingB[(tastingB & tastingA & awins)]<-FALSE
## Correct the feeding and tasting entries
cname <-paste("W",wellA,sep="")
dfm$FeedingData[,cname]<-feedingA
dfm$TastingData[,cname]<-tastingA
cname <-paste("W",wellB,sep="")
dfm$FeedingData[,cname]<-feedingB
dfm$TastingData[,cname]<-tastingB
}
}
}
if(dfm$Parameters$Chamber.Size>2) {
stop("Clean chambers not implements for chamber size >2.")
}
dfm
}
Set.Adaptive.Standard<-function(dfm){
stand<-Set.Adaptive.Standard.Well(dfm,1)
for(i in 2:12){
tmp<-Set.Adaptive.Standard.Well(dfm,i)
stand<-cbind(stand,tmp)
}
AdaptiveStandard<-data.frame(stand)
names(AdaptiveStandard)<-paste("W",1:12,sep="")
dfm$AdapativeStandard<-AdaptiveStandard
dfm
}
Set.Adaptive.Standard.Well<-function(dfm,well){
sps<-dfm$Parameters$Samples.Per.Sec
data<-BaselinedData.Well(dfm,well)
Standard.thresh<-rep(-1,length(data))
## Note that window.size will be the complete size
## (two-sided) of the window.
window.size<-dfm$Parameters$Adaptive.Threshold.Window.Minutes*60*sps
if(window.size %%2 == 0)
window.size<-window.size+1
window.arm<-(window.size-1)/2
mA<-length(data)
sq<-dfm$Parameters$Adaptive.Threshold.Selection.Quan
for(i in 1:mA){
lindex<-max(1,(i-window.arm))
hindex<-min(mA,(i+window.arm))
Standard.thresh[i]<-quantile(data[lindex:hindex],sq)
if(i%%10000==0) {
print(paste(i,"of",mA,"in well",well))
flush.console()
}
}
Standard.thresh
}
set.Adaptive.Threshold<-function(dfm){
if(is.null(dfm$AdaptiveStandard)) {
stop("DFM must have standard.")
}
tmp<-Set.Adaptive.Threshold.Well(dfm,1)
Thresholds$W1<-tmp
for(i in 2:12){
s<-paste("W",i,sep="")
tmp<-Set.Adaptive.Threshold.Well(dfm,i)
Thresholds[[s]]<-tmp
}
dfm$Thresholds<-Thresholds
dfm
}
Set.Adaptive.Threshold.Well<-function(dfm,well){
cname<-paste("W",well,sep="")
stand<-dfm$AdaptiveStandard[,cname]
feeding.max.thresh<-chamber$Parameters$Feeding.Threshold.Value*stand
feeding.min.thresh<-chamber$Parameters$Feeding.Interval.Minimum*stand
tasting.max.thresh<-chamber$Parameters$Tasting.Threshold.Interval.Low*stand
tasting.min.thresh<-chamber$Parameters$Tasting.Threshold.Interval.High*stand
min.thresh<-chamber$Parameters$Adaptive.Threshold.Minimum
feeding.max.thresh[feeding.max.thresh<min.thresh]<-min.thresh
feeding.min.thresh[feeding.min.thresh<min.thresh]<-min.thresh
tasting.max.thresh[tasting.max.thresh<min.thresh]<-min.thresh
tasting.min.thresh[tasting.min.thresh<min.thresh]<-min.thresh
r.tmp<-data.frame(feeding.max.thresh,feeding.min.thresh,tasting.max.thresh,tasting.min.thresh)
names(r.tmp)<-c("FeedingMax","FeedingMin","TastingMax","TastingMin")
r.tmp
}
Set.Fixed.Threshold<-function(dfm){
tmp<-Set.Fixed.Threshold.Well(dfm,1)
Thresholds=list(W1=tmp)
for(i in 2:12){
s<-paste("W",i,sep="")
tmp<-Set.Fixed.Threshold.Well(dfm,i)
Thresholds[[s]]<-tmp
}
dfm$Thresholds<-Thresholds
dfm
}
Set.Fixed.Threshold.Well<-function(dfm,well){
n<-SampleCount(dfm)
## Get well specific thresholds if the values are < 0
if(dfm$Parameters$Feeding.Threshold.Value<0){
## Find maximum reading
tmp<-max(BaselinedData.Well(dfm,well))
tmpA <- round(tmp*abs(dfm$Parameters$Feeding.Threshold.Value),0)
tmpB <- round(tmp*abs(dfm$Parameters$Feeding.Interval.Minimum),0)
tmpC <- round(tmp*abs(dfm$Parameters$Tasting.Threshold.Interval.Low),0)
tmpD <-round(tmp*abs(dfm$Parameters$Tasting.Threshold.Interval.High),0)
}
else {
tmpA<-dfm$Parameters$Feeding.Threshold.Value
tmpB<-dfm$Parameters$Feeding.Interval.Minimum
tmpC<-dfm$Parameters$Tasting.Threshold.Interval.Low
tmpD<-dfm$Parameters$Tasting.Threshold.Interval.High
}
feeding.max.thresh<-rep(tmpA,n)
feeding.min.thresh<-rep(tmpB,n)
tasting.min.thresh<-rep(tmpC,n)
tasting.max.thresh<-rep(tmpD,n)
r.tmp<-data.frame(feeding.max.thresh,feeding.min.thresh,tasting.max.thresh,tasting.min.thresh)
names(r.tmp)<-c("FeedingMax","FeedingMin","TastingMax","TastingMin")
r.tmp
}
Set.Tasting.Data<-function(dfm){
if(is.null(dfm$BaselineData))
stop("Baseline must be calculated")
if(is.null(dfm$LickData))
stop("Feeding Licks must be calculated")
newData<-dfm$BaselineData
for(i in 1:12) {
tmp<-Set.Tasting.Data.Well(dfm,i)
cname <-paste("W",i,sep="")
newData[,cname]<-tmp
}
dfm$TastingData<-newData
dfm
}
Set.Tasting.Data.Well<-function(dfm,well){
## Get Tasting Licks
## Note that Feeding Licks have to be calculated first because if the fly is
## feeding, then tasting events have to be cancelled.
thresh<-Thresholds.Well(dfm,well)
data<-BaselinedData.Well(dfm,well)
Licks<-(data > thresh$TastingMin &
data < thresh$TastingMax)
FeedingLicks<-FeedingData.Well.Licks(dfm,well)
## Keep only taste licks that are not feeding licks
Licks[FeedingLicks]<-FALSE
Licks
}
Set.Durations.And.Intervals<-function(dfm){
tmp<-Set.Durations.And.Intervals.Well(dfm,1)
Durations = list(W1=tmp$Durations)
Intervals = list(W1=tmp$Intervals)
for(i in 2:12){
s<-paste("W",i,sep="")
tmp<-Set.Durations.And.Intervals.Well(dfm,i)
Durations[[s]]<-tmp$Durations
Intervals[[s]]<-tmp$Intervals
}
dfm$Durations<-Durations
dfm$Intervals<-Intervals
dfm
}
Set.Durations.And.Intervals.Well<-function(dfm,well){
data<-BaselineData.Well(dfm,well)
events<-FeedingData.Well.Events(dfm,well)
## Now we need to update the event durations
## Indices will be used for summary duration characteristics
indices<-1:length(events)
indices<-indices[events>0]
boutDurs<-events[events>0]
Durations<-0
if(length(boutDurs)>0) {
max.inten<-rep(0,length(indices))
min.inten<-rep(0,length(indices))
sum.inten<-rep(0,length(indices))
avg.inten<-rep(0,length(indices))
var.inten<-rep(0,length(indices))
for(i in 1:length(indices)){
dataindex<-indices[i]
eventlength<-boutDurs[i]
tmp2<-data[dataindex:(dataindex+(eventlength-1))]
max.inten[i]<-max(tmp2)
min.inten[i]<-min(tmp2)
sum.inten[i]<-sum(tmp2)
avg.inten[i]<-mean(tmp2)
var.inten[i]<-var(tmp2)
}
BoutData<-data.frame(min.inten,max.inten,sum.inten,avg.inten,var.inten)
names(BoutData)<-c("MinIntensity","MaxIntensity","SumIntensity","MeanIntensity","VarIntensity")
tmp<-BaselineData(dfm)
tmp<-tmp[indices,]
Minutes<-tmp$Minutes
Events<-boutDurs
Duration<-Events/dfm$Parameters$Samples.Per.Sec
AvgInten<-BoutData$MeanIntensity
MaxInten<-BoutData$MaxIntensity
MinInten<-BoutData$MinIntensity
SumInten<-BoutData$SumIntensity
VarInten<-BoutData$VarIntensity
Durations<-data.frame(Minutes,Events,Duration,SumInten,AvgInten,MinInten,MaxInten,VarInten)
names(Durations)<-c("Minutes","Licks","Duration","TotalIntensity","AvgIntensity","MinIntensity","MaxIntensity","VarIntensity")
}
result<-list(Durations=Durations)
## Now intervals
## Collapse feeding data to time BETWEEN events.
boutInt<-Get.Intervals(FeedingData.Well.Licks(dfm,well))
indices<-1:length(boutInt)
indices<-indices[boutInt>0]
boutInt<-boutInt[boutInt>0]
spm<-dfm$Parameters$Samples.Per.Sec
intA<-boutInt/spm
Ints<-0
if(length(intA)>0) {
tmp<-BaselineData(dfm)
tmp<-tmp[indices,]
Minutes<-tmp$Minutes
Sample<-tmp$Sample
IntervalSec<-intA
Ints<-data.frame(Minutes,Sample,IntervalSec)
}
result<-list(Durations=Durations,Intervals=Ints)
result
}
Thresholds.Well<-function(dfm,well,range=c(0,0)){
cname=paste("W",well,sep="")
tmp<-dfm$Thresholds[[cname]]
if(sum(range)!=0) {
tmp<- tmp[(dfm$BaselineData$Minutes>range[1]) & (dfm$BaselineData$Minutes<range[2]),]
}
tmp
}
BaselinedData.Well<-function(dfm,well,range=c(0,0)) {
cname=paste("W",well,sep="")
tmp<-dfm$BaselineData[,cname]
if(sum(range)!=0) {
tmp<- tmp[(dfm$BaselineData$Minutes>range[1]) & (dfm$BaselineData$Minutes<range[2])]
}
tmp
}
BaselinedData<-function(dfm,range=c(0,0)) {
tmp<-dfm$BaselineData
if(sum(range)!=0) {
tmp<- tmp[(dfm$BaselineData$Minutes>range[1]) & (dfm$BaselineData$Minutes<range[2]),]
}
tmp
}
SampleCount<-function(dfm,range=c(0,0)){
nrow(BaselinedData(dfm,range))
}
FeedingData.Well.Licks<-function(dfm,well,range=c(0,0)){
cname=paste("W",well,sep="")
tmp<-FeedingData.Licks(dfm,range)
tmp[,cname]
}
## Remember that this function returns a vector with
## duration of event information as well.
## Need to set these to 1 to get number of events.
FeedingData.Well.Events<-function(dfm,well,range=c(0,0)){
cname=paste("W",well,sep="")
tmp<-FeedingData.Events(dfm,range)
tmp[,cname]
}
TastingData.Well<-function(dfm,well,range=c(0,0)){
cname=paste("W",well,sep="")
tmp<-dfm$TastingData[,cname]
if(sum(range)!=0) {
tmp<- tmp[(tmp$Minutes>range[1]) & (tmp$Minutes<range[2])]
}
tmp
}
FeedingData.Licks<-function(dfm,range=c(0,0)){
data<-dfm$LickData
if(sum(range)!=0) {
data<- data[(data$Minutes>range[1] & data$Minutes<range[2]),]
}
data
}
## Remember that this function returns a vector with
## duration of event information as well.
## Need to set these to 1 to get number of events.
FeedingData.Events<-function(dfm,range=c(0,0)){
data<-dfm$EventData
if(sum(range)!=0) {
data<- data[(data$Minutes>range[1] & data$Minutes<range[2]),]
}
data
}
TastingData<-function(dfm,range=c(0,0)){
data<-dfm$TastingData
if(sum(range)!=0) {
data<- data[(data$Minutes>range[1] & data$Minutes<range[2]),]
}
data
}
Feeding.TotalLicks<-function(dfm,range=c(0,0)){
result<-rep(-1,12)
data<-FeedingData.Licks(dfm,range)
for(i in 1:12) {
cname=paste("W",i,sep="")
tmp<-data[,cname]
result[i]<-sum(tmp)
}
names(result)<-paste("W",1:12,sep="")
result
}
Feeding.TotalLicks.Well<-function(dfm,well,range=c(0,0)){
tmp<-Feeding.TotalLicks(dfm,range)
tmp[well]
}
Feeding.TotalEvents<-function(dfm,range=c(0,0)){
result<-rep(-1,12)
data<-FeedingData.Events(dfm,range)
for(i in 1:12) {
cname=paste("W",i,sep="")
tmp<-data[,cname]
result[i]<-sum(tmp>0)
}
names(result)<-paste("W",1:12,sep="")
result
}
Feeding.TotalEvents.Well<-function(dfm,well,range=c(0,0)){
tmp<-Feeding.TotalEvents(dfm,range)
tmp[well]
}
Tasting.TotalLicks<-function(dfm,range=c(0,0)){
result<-rep(-1,12)
data<-TastingData(dfm,range)
for(i in 1:12) {
cname=paste("W",i,sep="")
tmp<-data[,cname]
result[i]<-sum(tmp)
}
names(result)<-paste("W",1:12,sep="")
result
}
Tasting.TotalLicks.Well<-function(dfm,well,range=c(0,0)){
tmp<-Tasting.TotalLicks(dfm,range)
tmp[well]
}
BaselineData<-function(dfm,range=c(0,0)){
tmp<-dfm$BaselineData
if(sum(range)!=0) {
tmp<- tmp[(tmp$Minutes>range[1]) & (tmp$Minutes<range[2]),]
}
tmp
}
BaselineData.Well=function(dfm,well,range=c(0,0)) {
cname=paste("W",well,sep="")
tmp<-BaselineData(dfm,range)
tmp[,cname]
}
RawData=function(dfm,range=c(0,0)) {
tmp<-dfm$RawData
if(sum(range)!=0) {
tmp<- tmp[(tmp$Minutes>range[1]) & (tmp$Minutes<range[2]),]
}
tmp
}
Feeding.IntervalSummary.Well<-function(dfm,well,range=c(0,0)){
cname=paste("W",well,sep="")
adurs<-dfm$Intervals[[cname]]
if(sum(range)!=0){
if(!is.data.frame(adurs)){
a<-0
aa<-0
}
else {
adurs<-adurs[(adurs$Minutes>range[1]) & (adurs$Minutes<range[2]),]
if(nrow(adurs)==0){
a<-0
aa<-0
}
else {
a<-mean(adurs$IntervalSec)
aa<-median(adurs$IntervalSec)
}
}
}
else {
if(!is.data.frame(adurs)){
a<-0
aa<-0
} else {
a<-mean(adurs$IntervalSec)
aa<-median(adurs$IntervalSec)
}
}
if(is.na(a)||is.nan(a)) a<-0
if(is.na(aa)||is.nan(aa)) aa<-0
tmp<-data.frame(a,aa)
names(tmp)<-c("MeanTimeBtw","MedTimeBtw")
tmp
}
Feeding.DurationSummary.Well<-function(dfm,well,range=c(0,0)){
cname=paste("W",well,sep="")
adurs<-dfm$Durations[[cname]]
if(sum(range)!=0){
if(!is.data.frame(adurs)){
a<-0
aa<-0
}
else {
adurs<-adurs[(adurs$Minutes>range[1]) & (adurs$Minutes<range[2]),]
if(nrow(adurs)==0){
a<-0
aa<-0
}
else {
a<-mean(adurs$Duration)
aa<-median(adurs$Duration)
}
}
}
else {
if(!is.data.frame(adurs)){
a<-0
aa<-0
} else {
a<-mean(adurs$Duration)
aa<-median(adurs$Duration)
}
}
if(is.na(a)||is.nan(a)) a<-0
if(is.na(aa)||is.nan(aa)) aa<-0
tmp<-data.frame(a,aa)
names(tmp)<-c("MeanDur","MedianDur")
tmp
}
Feeding.IntensitySummary.Well<-function(dfm,well,range=c(0,0)){
d<-BaselineData.Well(dfm,well,range)
l<-FeedingData.Well.Licks(dfm,well,range)
da<-d[l]
if(length(da)==0){
a<-0
aa<-0
}
else {
a<-mean(da)
aa<-median(da)
}
tmp<-data.frame(a,aa)
names(tmp)<-c("MeanInt","MedianInt")
tmp
}
IsThresholdAdaptive<-function(dfm) {
dfm$Parameters$Use.Adaptive.Threshold
}
BaselinedData.Range.Well<-function(dfm,well,range=c(0,0)){
tmp<-BaselinedData.Well(dfm,well,range)
x1<-min(tmp)
x2<-max(tmp)
c(x1,x2)
}
Minutes<-function(dfm) {
dfm$BaselineData$Minutes
}
Feeding.Durations.Well<-function(dfm,well){
cname=paste("W",well,sep="")
adurs<-dfm$Durations[[cname]]
adurs
}
Feeding.Intervals.Well<-function(dfm,well){
cname=paste("W",well,sep="")
adurs<-dfm$Intervals[[cname]]
adurs
}
LastSampleData.Well<-function(dfm,well){
tmp<-BaselinedData.Well(dfm,well)
tmp[length(tmp)]
}
FirstSampleData.Well<-function(dfm,well){
tmp<-BaselinedData.Well(dfm,well)
tmp[1]
}
LastSampleData<-function(dfm){
tmp<-BaselinedData(dfm)
nr<-nrow(tmp)
tmp[nr,]
}
FirstSampleData<-function(dfm){
tmp<-BaselinedData(dfm)
tmp[1,]
}
#########################
## Utilities
## This function takes 2 vectors, one with the events
## above a minimal threshold (minvec) and one that
## specifies events that pass a more stringent threshold (maxvec).
## Contiguous events are only kept if at least one
## value in the event, which is defined by minvec, is above
## the higher threshold, which is defined by max vec
## z <- c(TRUE,TRUE,FALSE,FALSE,TRUE,FALSE,TRUE,TRUE,TRUE)
## zz <- c(FALSE,TRUE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,TRUE)
## Get.Surviving.Events(z,zz) -> (2 0 0 0 0 0 3 0 0)
Get.Surviving.Events<-function(minvec,maxvec){
tmp<-Get.Events(minvec)
result<-tmp
indices<-(1:length(minvec))[tmp>0]
for(i in indices){
tmp2<-maxvec[i:(i+(tmp[i]-1))]
if(sum(tmp2)==0)
result[i]<-0
}
result
}
## This function is the reverse of Get.Events
## (2 0 0 0 1 0 3 0 0) -> c(TRUE,TRUE,FALSE,FALSE,TRUE,FALSE,TRUE,TRUE,TRUE)
Expand.Events<-function(eventvec){
result<-rep(FALSE,length(eventvec))
indices<-(1:length(eventvec))[eventvec>0]
for(i in indices){
result[i:(i+eventvec[i]-1)]<-TRUE
}
result
}
## These functions are helper functions for the basic calculations
# This function replaces continuing events with zero and make the first event of that
# episode equal to its duration.
## c(TRUE,TRUE,FALSE,FALSE,TRUE,FALSE,TRUE,TRUE,TRUE) -> (2 0 0 0 1 0 3 0 0)
Get.Events<-function(z){
tmp<-rle(z)
result<-c(-1)
for(i in 1:length(tmp$lengths)){
if(tmp$values[i]){
tmp2<-c(tmp$lengths[i],rep(0,tmp$lengths[i]-1))
result<-c(result,tmp2)
}
else {
tmp2<-c(rep(0,tmp$lengths[i]))
result<-c(result,tmp2)
}
}
result[-1]
}
Get.Events.And.Intensities<-function(z,data){
z<-Get.Events(z)
max.inten<-rep(0,length(z))
min.inten<-rep(0,length(z))
sum.inten<-rep(0,length(z))
avg.inten<-rep(0,length(z))
indices<-(1:length(z))[z>0]
for(i in indices){
tmp2<-data[i:(i+(z[i]-1))]
max.inten[i]<-max(tmp2)
min.inten[i]<-min(tmp2)
sum.inten[i]<-sum(tmp2)
avg.inten[i]<-mean(tmp2)
}
result<-data.frame(z,min.inten,max.inten,sum.inten,avg.inten)
names(result)<-c("FeedingEvent","MinIntensity","MaxIntensity","SumIntensity","MeanIntensity")
result
}
# This function replaces continuing events with zero and make the first event of that
# episode equal to its duration.
## c(TRUE,TRUE,FALSE,FALSE,TRUE,FALSE,TRUE,TRUE,TRUE) -> (0 0 2 0 0 1 0 0 0)
Get.Intervals<-function(z){
tmp<-rle(z)
result<-c(-1)
for(i in 1:length(tmp$lengths)){
if(!tmp$values[i]){
tmp2<-c(tmp$lengths[i],rep(0,tmp$lengths[i]-1))
result<-c(result,tmp2)
}
else {
tmp2<-c(rep(0,tmp$lengths[i]))
result<-c(result,tmp2)
}
}
result[-1]
}
CleanDFM<-function(){
tmp<-ls(pattern="DFM.",pos=1)
rm(list=tmp,pos=1)
tmp<-ls(pattern="DFM..",pos=1)
rm(list=tmp,pos=1)
}
UpdateHiddenDFMObject<-function(dfm){
st<-paste("DFM",dfm$ID,sep="")
assign(st,dfm,pos=1)
}
GetDFMParameterVector<-function(dfm){
GetParameterVector(dfm$Parameters)
}
ChangeParameterObject<-function(dfm,newP) {
p<-dfm$Parameters
baseline.flag<-FALSE
threshold.flag<-FALSE
adaptive.baseline.flag<-FALSE
eventpi.flag<-FALSE
tmp.O<-options()
options(warn=-1)
dfm$Parameters<-newP
## Change only those that are listed
if(p$Baseline.Window.Minutes!=newP$Baseline.Window.Minutes) {
baseline.flag<-TRUE
}
if(p$Feeding.Threshold.Value!=newP$Feeding.Threshold.Value) {
threshold.flag<-TRUE
}
if(p$Feeding.Interval.Minimum!=newP$Feeding.Interval.Minimum) {
threshold.flag<-TRUE
}
if(p$Tasting.Threshold.Interval.Low!=newP$Tasting.Threshold.Interval.Low) {
threshold.flag<-TRUE
}
if(p$Tasting.Threshold.Interval.High!=newP$Tasting.Threshold.Interval.High) {
threshold.flag<-TRUE
}
if(p$Adaptive.Threshold.Minimum!=newP$Adaptive.Threshold.Minimum){
threshold.flag<-TRUE
}
if(p$Adaptive.Threshold.Window.Minutes!=newP$Adaptive.Threshold.Window.Minutes){
adaptive.baseline.flag<-TRUE
}
if(p$Adaptive.Threshold.Selection.Quant!=newP$Adaptive.Threshold.Selection.Quant){
adaptive.baseline.flag<-TRUE
}
if(p$Use.Adaptive.Threshold!=newP$Use.Adaptive.Threshold){
adaptive.baseline.flag<-TRUE
}
if(p$Feeding.Minevents!=newP$Feeding.Minevents){
eventpi.flag<-TRUE
}
if(p$Samples.Per.Second!=newP$Samples.Per.Second){
adaptive.baseline.flag<-TRUE
}
if(p$Chamber.Size !=newP$Chamber.Size){
baseline.flag<-TRUE
}
if(sum(c(p$Chamber.Sets)!=c(newP$Chamber.Sets))!=0){
baseline.flag<-TRUE
}
if(p$Signal.Threshold!=newP$Signal.Threshold){
baseline.flag<-TRUE
}
## Now update the stats needed
if(baseline.flag==TRUE) {
dfm<-CalculateBaseline(dfm)
}
else if(adaptive.baseline.flag==TRUE){
dfm<-SetThreshold(dfm)
}
else if(threshold.flag==TRUE) {
dfm<-SetThreshold(dfm,getStandard=FALSE)
}
else if(eventpi.flag==TRUE) {
dfm<-Set.Feeding.Data(dfm)
dfm<-Set.Tasting.Data(dfm)
if(dfm$Parameters$Chamber.Size==2){
dfm<-Set.PI.Data(dfm)
}
dfm<-Set.Durations.And.Intervals(dfm)
}
options(tmp.O)
UpdateHiddenDFMObject(dfm)
dfm
}
|
/FLIC/FLIC R Code Files/DFM.R
|
no_license
|
jpinzonc/Sleep_in_Drosophila
|
R
| false
| false
| 30,319
|
r
|
DFMClass<-function(id,parameters) {
if (!is.numeric(id) || !all(is.finite(id)))
stop("invalid arguments")
## Check to determine whether the DFM object already exists
st<-paste("DFM",id,sep="")
found=0
if(exists(st,where=1)) {
data<-get(st)
found<-1
if(AreParametersEqual(parameters,data$Parameters)==FALSE)
data<-ChangeParameterObject(data,parameters)
}
## If doesn't exist, get and create
if(found==0) {
file<-paste("DFM_",id,".csv",sep="")
dfm<-read.csv(file,header=TRUE)
## Get Minutes from Sample column only if ElapsedTime is not
## there
if('Seconds' %in% colnames(dfm)) {
Minutes<-dfm$Seconds/60
dfm<-data.frame(Minutes,dfm)
} else if(('Date' %in% colnames(dfm))&&('Time' %in% colnames(dfm))&&('MSec' %in% colnames(dfm))){
Seconds<-GetElapsedSeconds(dfm)
Minutes<-Seconds/60.0
dfm<-data.frame(Minutes,Seconds,dfm)
} else {
stop("Time information missing from DFM data.")
}
data=list(ID=id,Parameters=parameters,RawData=dfm)
class(data)="DFM"
if(!is.na(FindDataBreaks(data,multiplier=4,returnvals=FALSE))){
cat("Data lapses found. Use FindDataBreaks for details.")
flush.console()
}
data<-CalculateBaseline(data)
assign(st,data,pos=1)
}
data
}
## This function will look for consecutive entries in the
## RawData$Sec column whose difference is larger than it
## should be based on the Samples.Per.Sec parameter.
FindDataBreaks<-function(dfm,multiplier=4,returnvals=TRUE){
Interval<-diff(dfm$RawData$Seconds)
Interval<-c(0,Interval)
thresh<-(1.0/dfm$Parameters$Samples.Per.Second)*multiplier
Index<-1:length(Interval)
Index<-Index[Interval>thresh]
Interval<-Interval[Interval>thresh]
if(returnvals==TRUE) {
if(length(Interval)==0)
c(NA)
else
cbind(Index,Interval,dfm$RawData[Index,])
}
else {
if(length(Interval)==0)
c(NA)
else
c(1)
}
}
## This function takes a vector of dates (as strings), a vector
## of times (24 hour as string) and a parameters object.
## it returns the elapsed seconds.
GetElapsedSeconds<-function(dfm){
dates<-dfm$Date
times<-dfm$Time
ms <-dfm$MSec
fulltimes<-as.POSIXct(paste(dates,times),format="%m/%d/%Y %H:%M:%S")
diffs<-c(difftime(fulltimes,fulltimes[1],units="secs"))
diffs<-diffs+(ms/1000)
diffs
}
CalculateBaseline=function(dfm){
window.min=dfm$Parameters$Baseline.Window.Minutes
newData<-dfm$RawData
# the number of samples in those minutes
window<-window.min*60*5
if(window %% 2 ==0)
window=window+1
for(i in 1:12) {
cname <-paste("W",i,sep="")
tmp<-runmed(newData[,cname],window)
newData[,cname]<-newData[,cname]-tmp
}
dfm$BaselineData=newData
## Now remove conflicting signals
dfm=CleanupChamberConflicts(dfm)
## Everything else must be recalculated
dfm<-SetThreshold(dfm)
dfm
}
SetThreshold = function(dfm,getStandard=TRUE) {
## First set the threshold...
if(is.null(dfm$BaselineData)) {
stop("DFM must have baseline.")
}
if(dfm$Parameters$Use.Adaptive.Threshold) {
if(getStandard==TRUE)
dfm<-Set.Adaptive.Standard(dfm)
dfm<-Set.Adaptive.Threshold(dfm)
}
else
dfm<-Set.Fixed.Threshold(dfm)
## Now update the licks and PI
dfm<-Set.Feeding.Data(dfm)
dfm<-Set.Tasting.Data(dfm)
if(dfm$Parameters$Chamber.Size==2){
dfm<-Set.PI.Data(dfm)
}
#Other measures
dfm<-Set.Durations.And.Intervals(dfm)
dfm
}
Set.Feeding.Data<-function(dfm){
if(is.null(dfm$BaselineData))
stop("Baseline must be calculated")
newData<-dfm$BaselineData
newData2<-dfm$BaselineData
for(i in 1:12) {
tmp<-Set.Feeding.Data.Well(dfm,i)
cname <-paste("W",i,sep="")
newData[,cname]<-tmp[,1]
newData2[,cname]<-tmp[,2]
}
dfm$LickData<-newData
dfm$EventData<-newData2
dfm
}
Set.Feeding.Data.Well<-function(dfm,well){
## Get all possible feeding Licks
thresh<-Thresholds.Well(dfm,well)
data<-BaselinedData.Well(dfm,well)
Feeding.Licks.Min<-(data > thresh$FeedingMin)
Feeding.Licks.Max<-(data > thresh$FeedingMax)
## Find continguous events above min threshold with at least one value above max threshold.
## The result of this function is also equivalent to the Events vector
Events<-Get.Surviving.Events(Feeding.Licks.Min,Feeding.Licks.Max)
## Now remove events that are too short
Events[Events<dfm$Parameters$Feeding.Minevents]<-0
## Now expand the licks to TRUE/FALSE entries
FeedingLicks<-Expand.Events(Events)
data.frame(FeedingLicks,Events)
}
Set.PI.Data<-function(dfm){
## Get the Feeding.PI
cnames<-paste("C",1:nrow(dfm$Parameters$Chamber.Sets),sep="")
Minutes<-dfm$BaselineData$Minutes
for(i in 1:nrow(dfm$Parameters$Chamber.Sets)) {
## Conflicts are defined as both pads with signal greater than the
## minimum value of all feeding and tasting thresholds
wellA<-dfm$Parameters$Chamber.Sets[i,1]
wellB<-dfm$Parameters$Chamber.Sets[i,2]
FeedingLicksA<-FeedingData.Well.Licks(dfm,wellA)
FeedingLicksB<-FeedingData.Well.Licks(dfm,wellB)
## Here it is the instantaneous PI
Feeding.PI<-FeedingLicksA - FeedingLicksB
## Temporarily eliminate duration information for EventPI.
tmpA<-FeedingData.Well.Events(dfm,wellA)
tmpA[tmpA>0]<-1
tmpB<-FeedingData.Well.Events(dfm,wellB)
tmpB[tmpB>0]<-1
Feeding.EventPI<-tmpA-tmpB
TastingLicksA<-TastingData.Well(dfm,wellA)
TastingLicksB<-TastingData.Well(dfm,wellB)
## Here it is the instantaneous PI
Tasting.PI<-TastingLicksA - TastingLicksB
results<-data.frame(Minutes,Feeding.PI,Feeding.EventPI,Tasting.PI)
names(results)<-c("Minutes","Feeding.PI", "Feeding.EventPI","Tasting.PI")
if(i==1){
PIData=list(C1=results)
}
else {
s<-paste("C",i,sep="")
PIData[[s]]<-results
}
}
row.names(PIData)<-NULL
dfm$PIData<-PIData
dfm
}
## This new function uses a new parameter, Signal.Threshold,
## to remove positive signals that conflict. The higher signal
## is kept. The lower one is set to baseline.
CleanupChamberConflicts<-function(dfm){
## This function normally takes baselined data.
if(is.null(dfm$BaselineData))
stop("Baseline must be calculated")
## Note that we don't need to do anything if the chamber size is 1
## because there is no conflict by definition.
if(dfm$Parameters$Chamber.Size==2) {
cat("\n")
flush.console()
for(i in 1:nrow(dfm$Parameters$Chamber.Sets)) {
wellA<-dfm$Parameters$Chamber.Sets[i,1]
wellB<-dfm$Parameters$Chamber.Sets[i,2]
dataA<-BaselinedData.Well(dfm,wellA)
dataB<-BaselinedData.Well(dfm,wellB)
signalA<-(dataA>dfm$Parameters$Signal.Threshold)
signalB<-(dataB>dfm$Parameters$Signal.Threshold)
awins<-dataA>dataB
bwins<-dataB>dataA
conflicts<-0
#Clean the feeding vectors
conflicts<-sum(signalA & signalB)
## conflict resolution involves accepting the plate with the larger value
## and setting the other to baseline.
## cat("DFM: ",dfm$ID," Chamber:",i," Cleaning ",conflicts," conflicts.\n")
flush.console()
if(conflicts>0) {
dataA[(signalA & signalB & bwins)]<-0
dataB[(signalA & signalB & awins)]<-0
## Correct the data
cname <-paste("W",wellA,sep="")
dfm$BaselineData[,cname]<-dataA
cname <-paste("W",wellB,sep="")
dfm$BaselineData[,cname]<-dataB
}
}
}
if(dfm$Parameters$Chamber.Size>2) {
stop("Clean chambers not implemented for chamber size >2.")
}
dfm
}
## This depricated function does not change the baselined data
## it now only alters the feeding and tasting licks
## to ensure that single flies can not feed from both
## simultaneously. It will replace feeding and tasting data.
CleanupChamberConflictsOLD<-function(dfm){
## This function normally takes baselined data.
if(is.null(dfm$LickData))
stop("Feeding Lick Data must be calculated")
if(is.null(dfm$TastingData))
stop("TastingData must be calculated")
## Note that we don't need to do anything if the chamber size is 1
## because there is no conflict by definition.
if(dfm$Parameters$Chamber.Size==2) {
cat("\n")
flush.console()
for(i in 1:nrow(dfm$Parameters$Chamber.Sets)) {
wellA<-dfm$Parameters$Chamber.Sets[i,1]
wellB<-dfm$Parameters$Chamber.Sets[i,2]
dataA<-BaselinedData.Well(dfm,wellA)
dataB<-BaselinedData.Well(dfm,wellB)
feedingA<-FeedingData.Well.Licks(dfm,wellA)
feedingB<-FeedingData.Well.Licks(dfm,wellB)
tastingA<-TastingData.Well(dfm,wellA)
tastingB<-TastingData.Well(dfm,wellB)
awins<-dataA>dataB
bwins<-dataB>dataA
conflicts<-0
#Clean the feeding vectors
conflicts<-conflicts+sum(feedingA & feedingB & bwins)+sum(feedingA & feedingB & awins)
#Clean the tasting vectors
conflicts<-conflicts+sum(feedingB & tastingA)+sum(feedingA & tastingB)
conflicts<-conflicts+sum(tastingB & tastingA & bwins)+sum(tastingB & tastingA & awins)
## conflict resolution involves accepting the plate with the larger value
## and setting the other to baseline.
cat("DFM: ",dfm$ID," Chamber:",i," Cleaning ",conflicts," conflicts.\n")
flush.console()
if(conflicts>0) {
feedingA[(feedingA & feedingB & bwins)]<-FALSE
feedingB[(feedingA & feedingB & awins)]<-FALSE
tastingA[(feedingB & tastingA)]<-FALSE
tastingB[(feedingA & tastingB)]<-FALSE
tastingA[(tastingB & tastingA & bwins)]<-FALSE
tastingB[(tastingB & tastingA & awins)]<-FALSE
## Correct the feeding and tasting entries
cname <-paste("W",wellA,sep="")
dfm$FeedingData[,cname]<-feedingA
dfm$TastingData[,cname]<-tastingA
cname <-paste("W",wellB,sep="")
dfm$FeedingData[,cname]<-feedingB
dfm$TastingData[,cname]<-tastingB
}
}
}
if(dfm$Parameters$Chamber.Size>2) {
stop("Clean chambers not implements for chamber size >2.")
}
dfm
}
Set.Adaptive.Standard<-function(dfm){
stand<-Set.Adaptive.Standard.Well(dfm,1)
for(i in 2:12){
tmp<-Set.Adaptive.Standard.Well(dfm,i)
stand<-cbind(stand,tmp)
}
AdaptiveStandard<-data.frame(stand)
names(AdaptiveStandard)<-paste("W",1:12,sep="")
dfm$AdapativeStandard<-AdaptiveStandard
dfm
}
Set.Adaptive.Standard.Well<-function(dfm,well){
sps<-dfm$Parameters$Samples.Per.Sec
data<-BaselinedData.Well(dfm,well)
Standard.thresh<-rep(-1,length(data))
## Note that window.size will be the complete size
## (two-sided) of the window.
window.size<-dfm$Parameters$Adaptive.Threshold.Window.Minutes*60*sps
if(window.size %%2 == 0)
window.size<-window.size+1
window.arm<-(window.size-1)/2
mA<-length(data)
sq<-dfm$Parameters$Adaptive.Threshold.Selection.Quan
for(i in 1:mA){
lindex<-max(1,(i-window.arm))
hindex<-min(mA,(i+window.arm))
Standard.thresh[i]<-quantile(data[lindex:hindex],sq)
if(i%%10000==0) {
print(paste(i,"of",mA,"in well",well))
flush.console()
}
}
Standard.thresh
}
set.Adaptive.Threshold<-function(dfm){
if(is.null(dfm$AdaptiveStandard)) {
stop("DFM must have standard.")
}
tmp<-Set.Adaptive.Threshold.Well(dfm,1)
Thresholds$W1<-tmp
for(i in 2:12){
s<-paste("W",i,sep="")
tmp<-Set.Adaptive.Threshold.Well(dfm,i)
Thresholds[[s]]<-tmp
}
dfm$Thresholds<-Thresholds
dfm
}
Set.Adaptive.Threshold.Well<-function(dfm,well){
cname<-paste("W",well,sep="")
stand<-dfm$AdaptiveStandard[,cname]
feeding.max.thresh<-chamber$Parameters$Feeding.Threshold.Value*stand
feeding.min.thresh<-chamber$Parameters$Feeding.Interval.Minimum*stand
tasting.max.thresh<-chamber$Parameters$Tasting.Threshold.Interval.Low*stand
tasting.min.thresh<-chamber$Parameters$Tasting.Threshold.Interval.High*stand
min.thresh<-chamber$Parameters$Adaptive.Threshold.Minimum
feeding.max.thresh[feeding.max.thresh<min.thresh]<-min.thresh
feeding.min.thresh[feeding.min.thresh<min.thresh]<-min.thresh
tasting.max.thresh[tasting.max.thresh<min.thresh]<-min.thresh
tasting.min.thresh[tasting.min.thresh<min.thresh]<-min.thresh
r.tmp<-data.frame(feeding.max.thresh,feeding.min.thresh,tasting.max.thresh,tasting.min.thresh)
names(r.tmp)<-c("FeedingMax","FeedingMin","TastingMax","TastingMin")
r.tmp
}
Set.Fixed.Threshold<-function(dfm){
tmp<-Set.Fixed.Threshold.Well(dfm,1)
Thresholds=list(W1=tmp)
for(i in 2:12){
s<-paste("W",i,sep="")
tmp<-Set.Fixed.Threshold.Well(dfm,i)
Thresholds[[s]]<-tmp
}
dfm$Thresholds<-Thresholds
dfm
}
Set.Fixed.Threshold.Well<-function(dfm,well){
n<-SampleCount(dfm)
## Get well specific thresholds if the values are < 0
if(dfm$Parameters$Feeding.Threshold.Value<0){
## Find maximum reading
tmp<-max(BaselinedData.Well(dfm,well))
tmpA <- round(tmp*abs(dfm$Parameters$Feeding.Threshold.Value),0)
tmpB <- round(tmp*abs(dfm$Parameters$Feeding.Interval.Minimum),0)
tmpC <- round(tmp*abs(dfm$Parameters$Tasting.Threshold.Interval.Low),0)
tmpD <-round(tmp*abs(dfm$Parameters$Tasting.Threshold.Interval.High),0)
}
else {
tmpA<-dfm$Parameters$Feeding.Threshold.Value
tmpB<-dfm$Parameters$Feeding.Interval.Minimum
tmpC<-dfm$Parameters$Tasting.Threshold.Interval.Low
tmpD<-dfm$Parameters$Tasting.Threshold.Interval.High
}
feeding.max.thresh<-rep(tmpA,n)
feeding.min.thresh<-rep(tmpB,n)
tasting.min.thresh<-rep(tmpC,n)
tasting.max.thresh<-rep(tmpD,n)
r.tmp<-data.frame(feeding.max.thresh,feeding.min.thresh,tasting.max.thresh,tasting.min.thresh)
names(r.tmp)<-c("FeedingMax","FeedingMin","TastingMax","TastingMin")
r.tmp
}
Set.Tasting.Data<-function(dfm){
if(is.null(dfm$BaselineData))
stop("Baseline must be calculated")
if(is.null(dfm$LickData))
stop("Feeding Licks must be calculated")
newData<-dfm$BaselineData
for(i in 1:12) {
tmp<-Set.Tasting.Data.Well(dfm,i)
cname <-paste("W",i,sep="")
newData[,cname]<-tmp
}
dfm$TastingData<-newData
dfm
}
Set.Tasting.Data.Well<-function(dfm,well){
## Get Tasting Licks
## Note that Feeding Licks have to be calculated first because if the fly is
## feeding, then tasting events have to be cancelled.
thresh<-Thresholds.Well(dfm,well)
data<-BaselinedData.Well(dfm,well)
Licks<-(data > thresh$TastingMin &
data < thresh$TastingMax)
FeedingLicks<-FeedingData.Well.Licks(dfm,well)
## Keep only taste licks that are not feeding licks
Licks[FeedingLicks]<-FALSE
Licks
}
Set.Durations.And.Intervals<-function(dfm){
tmp<-Set.Durations.And.Intervals.Well(dfm,1)
Durations = list(W1=tmp$Durations)
Intervals = list(W1=tmp$Intervals)
for(i in 2:12){
s<-paste("W",i,sep="")
tmp<-Set.Durations.And.Intervals.Well(dfm,i)
Durations[[s]]<-tmp$Durations
Intervals[[s]]<-tmp$Intervals
}
dfm$Durations<-Durations
dfm$Intervals<-Intervals
dfm
}
Set.Durations.And.Intervals.Well<-function(dfm,well){
data<-BaselineData.Well(dfm,well)
events<-FeedingData.Well.Events(dfm,well)
## Now we need to update the event durations
## Indices will be used for summary duration characteristics
indices<-1:length(events)
indices<-indices[events>0]
boutDurs<-events[events>0]
Durations<-0
if(length(boutDurs)>0) {
max.inten<-rep(0,length(indices))
min.inten<-rep(0,length(indices))
sum.inten<-rep(0,length(indices))
avg.inten<-rep(0,length(indices))
var.inten<-rep(0,length(indices))
for(i in 1:length(indices)){
dataindex<-indices[i]
eventlength<-boutDurs[i]
tmp2<-data[dataindex:(dataindex+(eventlength-1))]
max.inten[i]<-max(tmp2)
min.inten[i]<-min(tmp2)
sum.inten[i]<-sum(tmp2)
avg.inten[i]<-mean(tmp2)
var.inten[i]<-var(tmp2)
}
BoutData<-data.frame(min.inten,max.inten,sum.inten,avg.inten,var.inten)
names(BoutData)<-c("MinIntensity","MaxIntensity","SumIntensity","MeanIntensity","VarIntensity")
tmp<-BaselineData(dfm)
tmp<-tmp[indices,]
Minutes<-tmp$Minutes
Events<-boutDurs
Duration<-Events/dfm$Parameters$Samples.Per.Sec
AvgInten<-BoutData$MeanIntensity
MaxInten<-BoutData$MaxIntensity
MinInten<-BoutData$MinIntensity
SumInten<-BoutData$SumIntensity
VarInten<-BoutData$VarIntensity
Durations<-data.frame(Minutes,Events,Duration,SumInten,AvgInten,MinInten,MaxInten,VarInten)
names(Durations)<-c("Minutes","Licks","Duration","TotalIntensity","AvgIntensity","MinIntensity","MaxIntensity","VarIntensity")
}
result<-list(Durations=Durations)
## Now intervals
## Collapse feeding data to time BETWEEN events.
boutInt<-Get.Intervals(FeedingData.Well.Licks(dfm,well))
indices<-1:length(boutInt)
indices<-indices[boutInt>0]
boutInt<-boutInt[boutInt>0]
spm<-dfm$Parameters$Samples.Per.Sec
intA<-boutInt/spm
Ints<-0
if(length(intA)>0) {
tmp<-BaselineData(dfm)
tmp<-tmp[indices,]
Minutes<-tmp$Minutes
Sample<-tmp$Sample
IntervalSec<-intA
Ints<-data.frame(Minutes,Sample,IntervalSec)
}
result<-list(Durations=Durations,Intervals=Ints)
result
}
Thresholds.Well<-function(dfm,well,range=c(0,0)){
cname=paste("W",well,sep="")
tmp<-dfm$Thresholds[[cname]]
if(sum(range)!=0) {
tmp<- tmp[(dfm$BaselineData$Minutes>range[1]) & (dfm$BaselineData$Minutes<range[2]),]
}
tmp
}
BaselinedData.Well<-function(dfm,well,range=c(0,0)) {
cname=paste("W",well,sep="")
tmp<-dfm$BaselineData[,cname]
if(sum(range)!=0) {
tmp<- tmp[(dfm$BaselineData$Minutes>range[1]) & (dfm$BaselineData$Minutes<range[2])]
}
tmp
}
BaselinedData<-function(dfm,range=c(0,0)) {
tmp<-dfm$BaselineData
if(sum(range)!=0) {
tmp<- tmp[(dfm$BaselineData$Minutes>range[1]) & (dfm$BaselineData$Minutes<range[2]),]
}
tmp
}
SampleCount<-function(dfm,range=c(0,0)){
nrow(BaselinedData(dfm,range))
}
FeedingData.Well.Licks<-function(dfm,well,range=c(0,0)){
cname=paste("W",well,sep="")
tmp<-FeedingData.Licks(dfm,range)
tmp[,cname]
}
## Remember that this function returns a vector with
## duration of event information as well.
## Need to set these to 1 to get number of events.
FeedingData.Well.Events<-function(dfm,well,range=c(0,0)){
cname=paste("W",well,sep="")
tmp<-FeedingData.Events(dfm,range)
tmp[,cname]
}
TastingData.Well<-function(dfm,well,range=c(0,0)){
cname=paste("W",well,sep="")
tmp<-dfm$TastingData[,cname]
if(sum(range)!=0) {
tmp<- tmp[(tmp$Minutes>range[1]) & (tmp$Minutes<range[2])]
}
tmp
}
FeedingData.Licks<-function(dfm,range=c(0,0)){
data<-dfm$LickData
if(sum(range)!=0) {
data<- data[(data$Minutes>range[1] & data$Minutes<range[2]),]
}
data
}
## Remember that this function returns a vector with
## duration of event information as well.
## Need to set these to 1 to get number of events.
FeedingData.Events<-function(dfm,range=c(0,0)){
data<-dfm$EventData
if(sum(range)!=0) {
data<- data[(data$Minutes>range[1] & data$Minutes<range[2]),]
}
data
}
TastingData<-function(dfm,range=c(0,0)){
data<-dfm$TastingData
if(sum(range)!=0) {
data<- data[(data$Minutes>range[1] & data$Minutes<range[2]),]
}
data
}
Feeding.TotalLicks<-function(dfm,range=c(0,0)){
result<-rep(-1,12)
data<-FeedingData.Licks(dfm,range)
for(i in 1:12) {
cname=paste("W",i,sep="")
tmp<-data[,cname]
result[i]<-sum(tmp)
}
names(result)<-paste("W",1:12,sep="")
result
}
Feeding.TotalLicks.Well<-function(dfm,well,range=c(0,0)){
tmp<-Feeding.TotalLicks(dfm,range)
tmp[well]
}
Feeding.TotalEvents<-function(dfm,range=c(0,0)){
result<-rep(-1,12)
data<-FeedingData.Events(dfm,range)
for(i in 1:12) {
cname=paste("W",i,sep="")
tmp<-data[,cname]
result[i]<-sum(tmp>0)
}
names(result)<-paste("W",1:12,sep="")
result
}
Feeding.TotalEvents.Well<-function(dfm,well,range=c(0,0)){
tmp<-Feeding.TotalEvents(dfm,range)
tmp[well]
}
Tasting.TotalLicks<-function(dfm,range=c(0,0)){
result<-rep(-1,12)
data<-TastingData(dfm,range)
for(i in 1:12) {
cname=paste("W",i,sep="")
tmp<-data[,cname]
result[i]<-sum(tmp)
}
names(result)<-paste("W",1:12,sep="")
result
}
Tasting.TotalLicks.Well<-function(dfm,well,range=c(0,0)){
tmp<-Tasting.TotalLicks(dfm,range)
tmp[well]
}
BaselineData<-function(dfm,range=c(0,0)){
tmp<-dfm$BaselineData
if(sum(range)!=0) {
tmp<- tmp[(tmp$Minutes>range[1]) & (tmp$Minutes<range[2]),]
}
tmp
}
BaselineData.Well=function(dfm,well,range=c(0,0)) {
cname=paste("W",well,sep="")
tmp<-BaselineData(dfm,range)
tmp[,cname]
}
RawData=function(dfm,range=c(0,0)) {
tmp<-dfm$RawData
if(sum(range)!=0) {
tmp<- tmp[(tmp$Minutes>range[1]) & (tmp$Minutes<range[2]),]
}
tmp
}
Feeding.IntervalSummary.Well<-function(dfm,well,range=c(0,0)){
cname=paste("W",well,sep="")
adurs<-dfm$Intervals[[cname]]
if(sum(range)!=0){
if(!is.data.frame(adurs)){
a<-0
aa<-0
}
else {
adurs<-adurs[(adurs$Minutes>range[1]) & (adurs$Minutes<range[2]),]
if(nrow(adurs)==0){
a<-0
aa<-0
}
else {
a<-mean(adurs$IntervalSec)
aa<-median(adurs$IntervalSec)
}
}
}
else {
if(!is.data.frame(adurs)){
a<-0
aa<-0
} else {
a<-mean(adurs$IntervalSec)
aa<-median(adurs$IntervalSec)
}
}
if(is.na(a)||is.nan(a)) a<-0
if(is.na(aa)||is.nan(aa)) aa<-0
tmp<-data.frame(a,aa)
names(tmp)<-c("MeanTimeBtw","MedTimeBtw")
tmp
}
Feeding.DurationSummary.Well<-function(dfm,well,range=c(0,0)){
cname=paste("W",well,sep="")
adurs<-dfm$Durations[[cname]]
if(sum(range)!=0){
if(!is.data.frame(adurs)){
a<-0
aa<-0
}
else {
adurs<-adurs[(adurs$Minutes>range[1]) & (adurs$Minutes<range[2]),]
if(nrow(adurs)==0){
a<-0
aa<-0
}
else {
a<-mean(adurs$Duration)
aa<-median(adurs$Duration)
}
}
}
else {
if(!is.data.frame(adurs)){
a<-0
aa<-0
} else {
a<-mean(adurs$Duration)
aa<-median(adurs$Duration)
}
}
if(is.na(a)||is.nan(a)) a<-0
if(is.na(aa)||is.nan(aa)) aa<-0
tmp<-data.frame(a,aa)
names(tmp)<-c("MeanDur","MedianDur")
tmp
}
Feeding.IntensitySummary.Well<-function(dfm,well,range=c(0,0)){
d<-BaselineData.Well(dfm,well,range)
l<-FeedingData.Well.Licks(dfm,well,range)
da<-d[l]
if(length(da)==0){
a<-0
aa<-0
}
else {
a<-mean(da)
aa<-median(da)
}
tmp<-data.frame(a,aa)
names(tmp)<-c("MeanInt","MedianInt")
tmp
}
IsThresholdAdaptive<-function(dfm) {
dfm$Parameters$Use.Adaptive.Threshold
}
BaselinedData.Range.Well<-function(dfm,well,range=c(0,0)){
tmp<-BaselinedData.Well(dfm,well,range)
x1<-min(tmp)
x2<-max(tmp)
c(x1,x2)
}
Minutes<-function(dfm) {
dfm$BaselineData$Minutes
}
Feeding.Durations.Well<-function(dfm,well){
cname=paste("W",well,sep="")
adurs<-dfm$Durations[[cname]]
adurs
}
Feeding.Intervals.Well<-function(dfm,well){
cname=paste("W",well,sep="")
adurs<-dfm$Intervals[[cname]]
adurs
}
LastSampleData.Well<-function(dfm,well){
tmp<-BaselinedData.Well(dfm,well)
tmp[length(tmp)]
}
FirstSampleData.Well<-function(dfm,well){
tmp<-BaselinedData.Well(dfm,well)
tmp[1]
}
LastSampleData<-function(dfm){
tmp<-BaselinedData(dfm)
nr<-nrow(tmp)
tmp[nr,]
}
FirstSampleData<-function(dfm){
tmp<-BaselinedData(dfm)
tmp[1,]
}
#########################
## Utilities
## This function takes 2 vectors, one with the events
## above a minimal threshold (minvec) and one that
## specifies events that pass a more stringent threshold (maxvec).
## Contiguous events are only kept if at least one
## value in the event, which is defined by minvec, is above
## the higher threshold, which is defined by max vec
## z <- c(TRUE,TRUE,FALSE,FALSE,TRUE,FALSE,TRUE,TRUE,TRUE)
## zz <- c(FALSE,TRUE,FALSE,FALSE,FALSE,FALSE,FALSE,FALSE,TRUE)
## Get.Surviving.Events(z,zz) -> (2 0 0 0 0 0 3 0 0)
Get.Surviving.Events<-function(minvec,maxvec){
tmp<-Get.Events(minvec)
result<-tmp
indices<-(1:length(minvec))[tmp>0]
for(i in indices){
tmp2<-maxvec[i:(i+(tmp[i]-1))]
if(sum(tmp2)==0)
result[i]<-0
}
result
}
## This function is the reverse of Get.Events
## (2 0 0 0 1 0 3 0 0) -> c(TRUE,TRUE,FALSE,FALSE,TRUE,FALSE,TRUE,TRUE,TRUE)
Expand.Events<-function(eventvec){
result<-rep(FALSE,length(eventvec))
indices<-(1:length(eventvec))[eventvec>0]
for(i in indices){
result[i:(i+eventvec[i]-1)]<-TRUE
}
result
}
## These functions are helper functions for the basic calculations
# This function replaces continuing events with zero and make the first event of that
# episode equal to its duration.
## c(TRUE,TRUE,FALSE,FALSE,TRUE,FALSE,TRUE,TRUE,TRUE) -> (2 0 0 0 1 0 3 0 0)
Get.Events<-function(z){
tmp<-rle(z)
result<-c(-1)
for(i in 1:length(tmp$lengths)){
if(tmp$values[i]){
tmp2<-c(tmp$lengths[i],rep(0,tmp$lengths[i]-1))
result<-c(result,tmp2)
}
else {
tmp2<-c(rep(0,tmp$lengths[i]))
result<-c(result,tmp2)
}
}
result[-1]
}
Get.Events.And.Intensities<-function(z,data){
z<-Get.Events(z)
max.inten<-rep(0,length(z))
min.inten<-rep(0,length(z))
sum.inten<-rep(0,length(z))
avg.inten<-rep(0,length(z))
indices<-(1:length(z))[z>0]
for(i in indices){
tmp2<-data[i:(i+(z[i]-1))]
max.inten[i]<-max(tmp2)
min.inten[i]<-min(tmp2)
sum.inten[i]<-sum(tmp2)
avg.inten[i]<-mean(tmp2)
}
result<-data.frame(z,min.inten,max.inten,sum.inten,avg.inten)
names(result)<-c("FeedingEvent","MinIntensity","MaxIntensity","SumIntensity","MeanIntensity")
result
}
# This function replaces continuing events with zero and make the first event of that
# episode equal to its duration.
## c(TRUE,TRUE,FALSE,FALSE,TRUE,FALSE,TRUE,TRUE,TRUE) -> (0 0 2 0 0 1 0 0 0)
Get.Intervals<-function(z){
tmp<-rle(z)
result<-c(-1)
for(i in 1:length(tmp$lengths)){
if(!tmp$values[i]){
tmp2<-c(tmp$lengths[i],rep(0,tmp$lengths[i]-1))
result<-c(result,tmp2)
}
else {
tmp2<-c(rep(0,tmp$lengths[i]))
result<-c(result,tmp2)
}
}
result[-1]
}
CleanDFM<-function(){
tmp<-ls(pattern="DFM.",pos=1)
rm(list=tmp,pos=1)
tmp<-ls(pattern="DFM..",pos=1)
rm(list=tmp,pos=1)
}
UpdateHiddenDFMObject<-function(dfm){
st<-paste("DFM",dfm$ID,sep="")
assign(st,dfm,pos=1)
}
GetDFMParameterVector<-function(dfm){
GetParameterVector(dfm$Parameters)
}
ChangeParameterObject<-function(dfm,newP) {
p<-dfm$Parameters
baseline.flag<-FALSE
threshold.flag<-FALSE
adaptive.baseline.flag<-FALSE
eventpi.flag<-FALSE
tmp.O<-options()
options(warn=-1)
dfm$Parameters<-newP
## Change only those that are listed
if(p$Baseline.Window.Minutes!=newP$Baseline.Window.Minutes) {
baseline.flag<-TRUE
}
if(p$Feeding.Threshold.Value!=newP$Feeding.Threshold.Value) {
threshold.flag<-TRUE
}
if(p$Feeding.Interval.Minimum!=newP$Feeding.Interval.Minimum) {
threshold.flag<-TRUE
}
if(p$Tasting.Threshold.Interval.Low!=newP$Tasting.Threshold.Interval.Low) {
threshold.flag<-TRUE
}
if(p$Tasting.Threshold.Interval.High!=newP$Tasting.Threshold.Interval.High) {
threshold.flag<-TRUE
}
if(p$Adaptive.Threshold.Minimum!=newP$Adaptive.Threshold.Minimum){
threshold.flag<-TRUE
}
if(p$Adaptive.Threshold.Window.Minutes!=newP$Adaptive.Threshold.Window.Minutes){
adaptive.baseline.flag<-TRUE
}
if(p$Adaptive.Threshold.Selection.Quant!=newP$Adaptive.Threshold.Selection.Quant){
adaptive.baseline.flag<-TRUE
}
if(p$Use.Adaptive.Threshold!=newP$Use.Adaptive.Threshold){
adaptive.baseline.flag<-TRUE
}
if(p$Feeding.Minevents!=newP$Feeding.Minevents){
eventpi.flag<-TRUE
}
if(p$Samples.Per.Second!=newP$Samples.Per.Second){
adaptive.baseline.flag<-TRUE
}
if(p$Chamber.Size !=newP$Chamber.Size){
baseline.flag<-TRUE
}
if(sum(c(p$Chamber.Sets)!=c(newP$Chamber.Sets))!=0){
baseline.flag<-TRUE
}
if(p$Signal.Threshold!=newP$Signal.Threshold){
baseline.flag<-TRUE
}
## Now update the stats needed
if(baseline.flag==TRUE) {
dfm<-CalculateBaseline(dfm)
}
else if(adaptive.baseline.flag==TRUE){
dfm<-SetThreshold(dfm)
}
else if(threshold.flag==TRUE) {
dfm<-SetThreshold(dfm,getStandard=FALSE)
}
else if(eventpi.flag==TRUE) {
dfm<-Set.Feeding.Data(dfm)
dfm<-Set.Tasting.Data(dfm)
if(dfm$Parameters$Chamber.Size==2){
dfm<-Set.PI.Data(dfm)
}
dfm<-Set.Durations.And.Intervals(dfm)
}
options(tmp.O)
UpdateHiddenDFMObject(dfm)
dfm
}
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 7.0657915712477e-304, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
/DLMtool/inst/testfiles/LBSPRgen/AFL_LBSPRgen/LBSPRgen_valgrind_files/1615837405-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 2,047
|
r
|
testlist <- list(Beta = 0, CVLinf = 86341236051411296, FM = 7.0657915712477e-304, L50 = 0, L95 = 0, LenBins = c(2.0975686864138e+162, -2.68131210337361e-144, -1.11215735981244e+199, -4.48649879577108e+143, 1.6611802228813e+218, 900371.947279558, 1.07063092954708e+238, 2.88003257377011e-142, 1.29554141202795e-89, -1.87294312860528e-75, 3.04319010211815e+31, 191.463561345044, 1.58785813294449e+217, 1.90326589719466e-118, -3.75494418025505e-296, -2.63346094087863e+200, -5.15510035957975e+44, 2.59028521047075e+149, 1.60517426337473e+72, 1.74851929178852e+35, 1.32201752290843e-186, -1.29599553894715e-227, 3.20314220604904e+207, 584155875718587, 1.71017833066717e-283, -3.96505607598107e+51, 5.04440990041945e-163, -5.09127626480085e+268, 2.88137633290038e+175, 6.22724404181897e-256, 4.94195713773372e-295, 5.80049493946414e+160, -5612008.23597089, -2.68347267272935e-262, 1.28861520348431e-305, -5.05455182157157e-136, 4.44386438170367e+50, -2.07294901774837e+254, -3.56325845332496e+62, -1.38575911145229e-262, -1.19026551334786e-217, -3.54406233509625e-43, -4.15938611724176e-209, -3.06799941292011e-106, 1.78044357763692e+244, -1.24657398993838e+190, 1.14089212334828e-90, 136766.715673668, -1.47333345730049e-67, -2.92763930406321e+21 ), LenMids = c(-1.121210344879e+131, -1.121210344879e+131, NaN), Linf = 2.81991272491703e-308, MK = -2.08633459786369e-239, Ml = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), Prob = structure(c(4.48157192325537e-103, 2.43305969276274e+59, 6.5730975202806e-96, 2.03987918888949e-104, 4.61871336464985e-39, 1.10811931066926e+139), .Dim = c(1L, 6L)), SL50 = 9.97941197291525e-316, SL95 = 2.12248160522076e-314, nage = 682962941L, nlen = 1623851345L, rLens = c(4.74956174024781e+199, -7.42049538387034e+278, -5.82966399158032e-71, -6.07988133887702e-34, 4.62037926128924e-295, -8.48833146280612e+43, 2.71954993859316e-126 ))
result <- do.call(DLMtool::LBSPRgen,testlist)
str(result)
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
## @x: a square invertible matrix
## return: functions with following input
## fix the matrix retrieve the matrix fix the inverse retrieve the inverse
inv = NULL
set = function(y) {
# `<<-` to assign object a value not in current env but in different env
x <<- y
inv <<- NULL
}
get = function() x
setinv = function(inverse) inv <<- inverse
getinv = function() inv
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
cacheSolve <- function(x, ...) {
## @x: using makeCacheMatrix()
## inverse of the original matrix input to makeCacheMatrix() is returened
inv = x$getinv()
# if the inverse has already been calculated
if (!is.null(inv)){
# get it from the cache and skips the computation.
return(inv)
}
# otherwise, calculates the inverse
mat.data = x$get()
inv = solve(mat.data, ...)
# sets the value of the inverse in the cache via the setinv function.
x$setinv(inv)
return(inv)
}
|
/cachematrix.R
|
no_license
|
gaganarora1/ProgrammingAssignment2
|
R
| false
| false
| 1,370
|
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
makeCacheMatrix <- function(x = matrix()) {
## @x: a square invertible matrix
## return: functions with following input
## fix the matrix retrieve the matrix fix the inverse retrieve the inverse
inv = NULL
set = function(y) {
# `<<-` to assign object a value not in current env but in different env
x <<- y
inv <<- NULL
}
get = function() x
setinv = function(inverse) inv <<- inverse
getinv = function() inv
list(set=set, get=get, setinv=setinv, getinv=getinv)
}
cacheSolve <- function(x, ...) {
## @x: using makeCacheMatrix()
## inverse of the original matrix input to makeCacheMatrix() is returened
inv = x$getinv()
# if the inverse has already been calculated
if (!is.null(inv)){
# get it from the cache and skips the computation.
return(inv)
}
# otherwise, calculates the inverse
mat.data = x$get()
inv = solve(mat.data, ...)
# sets the value of the inverse in the cache via the setinv function.
x$setinv(inv)
return(inv)
}
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
# Initializes the inverse to null
# Defines setters and getters function
# Returns the setters and getters
makeCacheMatrix <- function(m = matrix()) {
inverse <- NULL
# setters and getters
set <- function(y) {
x <<- y # different scope with <<-
inverse <<- NULL # different scope with <<-
}
get <- function() x
setInverse <- function(inv) inverse <<- inv # different scope with <<-
getInverse<- function() inverse
# returns the list of available functions
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
# Gets the inverse of the matrix then checks if it is null
# If it is not null, it get the matrix
# Solve for the inverse
# Set the inverse
# Return the inverse
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
cachedInverse <- x$getInverse() # gets cached inverse
if(!is.null(cachedInverse)) { # check if it is null
message("getting cached data")
return(m)
}
data <- x$get() # gets the matrix data
calculatedInverse <- solve(data, ...) # computes for the inverse
x$setInverse(calculatedInverse) # sets the cached inverse
calculatedInverse # return the inverse
}
|
/cachematrix.R
|
no_license
|
jbdelmundo/ProgrammingAssignment2
|
R
| false
| false
| 1,564
|
r
|
## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
# Initializes the inverse to null
# Defines setters and getters function
# Returns the setters and getters
makeCacheMatrix <- function(m = matrix()) {
inverse <- NULL
# setters and getters
set <- function(y) {
x <<- y # different scope with <<-
inverse <<- NULL # different scope with <<-
}
get <- function() x
setInverse <- function(inv) inverse <<- inv # different scope with <<-
getInverse<- function() inverse
# returns the list of available functions
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## Write a short comment describing this function
# Gets the inverse of the matrix then checks if it is null
# If it is not null, it get the matrix
# Solve for the inverse
# Set the inverse
# Return the inverse
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
cachedInverse <- x$getInverse() # gets cached inverse
if(!is.null(cachedInverse)) { # check if it is null
message("getting cached data")
return(m)
}
data <- x$get() # gets the matrix data
calculatedInverse <- solve(data, ...) # computes for the inverse
x$setInverse(calculatedInverse) # sets the cached inverse
calculatedInverse # return the inverse
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{county_points}
\alias{county_points}
\title{Eastern U.S. county latitude and longitudes}
\format{
A dataframe with 2,396 rows and 3 variables:
\describe{
\item{fips}{A character vector giving the county's five-digit Federal
Information Processing Standard (FIPS) code}
\item{glat}{A numeric vector giving the latitude of the population mean
center of each county}
\item{glon}{A numeric vector giving the longitude of the population mean
center of each county}
\item{glandsea}{A logical vector specifying whether each grid point is over
land (TRUE) or over water (FALSE).}
}
}
\source{
\url{http://www2.census.gov/geo/docs/reference/cenpop2010/county/CenPop2010_Mean_CO.txt}
}
\usage{
county_points
}
\description{
A dataframe containing locations of population mean centers for counties in
the eastern United States. Each county is identified by its 5-digit Federal
Information Processing Standard (FIPS) code. This dataframe can be used to
model storm winds at each county center. This dataset was put together using
a dataframe from the U.S. Census Bureau, which was pulled from the website
listed in "Source".
}
\keyword{datasets}
|
/man/county_points.Rd
|
no_license
|
geanders/stormwindmodel
|
R
| false
| true
| 1,326
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{county_points}
\alias{county_points}
\title{Eastern U.S. county latitude and longitudes}
\format{
A dataframe with 2,396 rows and 3 variables:
\describe{
\item{fips}{A character vector giving the county's five-digit Federal
Information Processing Standard (FIPS) code}
\item{glat}{A numeric vector giving the latitude of the population mean
center of each county}
\item{glon}{A numeric vector giving the longitude of the population mean
center of each county}
\item{glandsea}{A logical vector specifying whether each grid point is over
land (TRUE) or over water (FALSE).}
}
}
\source{
\url{http://www2.census.gov/geo/docs/reference/cenpop2010/county/CenPop2010_Mean_CO.txt}
}
\usage{
county_points
}
\description{
A dataframe containing locations of population mean centers for counties in
the eastern United States. Each county is identified by its 5-digit Federal
Information Processing Standard (FIPS) code. This dataframe can be used to
model storm winds at each county center. This dataset was put together using
a dataframe from the U.S. Census Bureau, which was pulled from the website
listed in "Source".
}
\keyword{datasets}
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.45429485869326e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
/CNull/inst/testfiles/communities_individual_based_sampling_alpha/AFL_communities_individual_based_sampling_alpha/communities_individual_based_sampling_alpha_valgrind_files/1615771481-test.R
|
no_license
|
akhikolla/updatedatatype-list2
|
R
| false
| false
| 362
|
r
|
testlist <- list(m = NULL, repetitions = 0L, in_m = structure(c(2.31584307392677e+77, 9.45429485869326e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), .Dim = c(5L, 7L)))
result <- do.call(CNull:::communities_individual_based_sampling_alpha,testlist)
str(result)
|
library(ape)
testtree <- read.tree("1238_9.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1238_9_unrooted.txt")
|
/codeml_files/newick_trees_processed_and_cleaned/1238_9/rinput.R
|
no_license
|
DaniBoo/cyanobacteria_project
|
R
| false
| false
| 135
|
r
|
library(ape)
testtree <- read.tree("1238_9.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="1238_9_unrooted.txt")
|
library(oilabs)
library(readr)
setwd("~/GitHub/A-DA460/R")
# read data set
run <- read_csv("run10.csv")
# remove the NA in divTot
run <- run[!is.na(run$divTot), ]
run$gender <- as.factor(run$gender)
inference(y = run$divTot, x = run$gender, est = "mean", type = "ci", null = 0,
alternative = "twosided", method = "theoretical",
order = c("M","F"))
load("nc.RData")
inference(y = nc$weight, x = nc$habit, est = "mean", type = "ci", null = 0,
alternative = "twosided", method = "theoretical",
order = c("smoker","nonsmoker"))
|
/R/final.R
|
no_license
|
mnblanco/DA460
|
R
| false
| false
| 573
|
r
|
library(oilabs)
library(readr)
setwd("~/GitHub/A-DA460/R")
# read data set
run <- read_csv("run10.csv")
# remove the NA in divTot
run <- run[!is.na(run$divTot), ]
run$gender <- as.factor(run$gender)
inference(y = run$divTot, x = run$gender, est = "mean", type = "ci", null = 0,
alternative = "twosided", method = "theoretical",
order = c("M","F"))
load("nc.RData")
inference(y = nc$weight, x = nc$habit, est = "mean", type = "ci", null = 0,
alternative = "twosided", method = "theoretical",
order = c("smoker","nonsmoker"))
|
##
## Begin shankarz code
##
## The original consturct is not shankarz's
## Shankarz only made incremental changes
.onAttach <- function(libname, pkgname) {
# For debugging purpose
# packageStartupMessage("shankarz.TexST package from Natarajan Shankar attached")
}
# .onLoad borrowed as-is from documentation
# This routine is NOT code written by shankarz
.onLoad <- function(libname, pkgname) {
op <- options()
op.devtools <- list(
devtools.path = "~/R-dev",
devtools.install.args = "",
devtools.desc.suggests = NULL,
devtools.desc = list()
)
toset <- !(names(op.devtools) %in% names(op))
if(any(toset)) options(op.devtools[toset])
invisible()
}
.onUnload <- function(libpath) {
# For debugging purpose
#packageStartupMessage("shankarz.TexST package from Natarajan Shankar unloaded")
}
##
## End shankarz code
##
|
/shankarz.TexST/R/zzz.R
|
no_license
|
shankar2016/Stanford-Data-Mining
|
R
| false
| false
| 874
|
r
|
##
## Begin shankarz code
##
## The original consturct is not shankarz's
## Shankarz only made incremental changes
.onAttach <- function(libname, pkgname) {
# For debugging purpose
# packageStartupMessage("shankarz.TexST package from Natarajan Shankar attached")
}
# .onLoad borrowed as-is from documentation
# This routine is NOT code written by shankarz
.onLoad <- function(libname, pkgname) {
op <- options()
op.devtools <- list(
devtools.path = "~/R-dev",
devtools.install.args = "",
devtools.desc.suggests = NULL,
devtools.desc = list()
)
toset <- !(names(op.devtools) %in% names(op))
if(any(toset)) options(op.devtools[toset])
invisible()
}
.onUnload <- function(libpath) {
# For debugging purpose
#packageStartupMessage("shankarz.TexST package from Natarajan Shankar unloaded")
}
##
## End shankarz code
##
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% SnpInformation.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{SnpInformation}
\docType{class}
\alias{SnpInformation}
\title{The SnpInformation class}
\description{
Package: aroma.affymetrix \cr
\bold{Class SnpInformation}\cr
\code{\link[R.oo]{Object}}\cr
\code{~~|}\cr
\code{~~+--}\code{\link[R.filesets]{FullNameInterface}}\cr
\code{~~~~~~~|}\cr
\code{~~~~~~~+--}\code{\link[R.filesets]{GenericDataFile}}\cr
\code{~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~+--}\code{\link[aroma.core]{CacheKeyInterface}}\cr
\code{~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.core]{FileCacheKeyInterface}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~+--}\emph{\code{SnpInformation}}\cr
\bold{Directly known subclasses:}\cr
\emph{\link[aroma.affymetrix]{DChipSnpInformation}}, \emph{\link[aroma.affymetrix]{UflSnpInformation}}\cr
public abstract static class \bold{SnpInformation}\cr
extends \link[aroma.core]{FileCacheKeyInterface}\cr
}
\usage{
SnpInformation(...)
}
\arguments{
\item{...}{Arguments passed to \code{\link[R.filesets]{GenericDataFile}}.}
}
\section{Fields and Methods}{
\bold{Methods:}\cr
\tabular{rll}{
\tab \code{byChipType} \tab -\cr
\tab \code{getChipType} \tab -\cr
\tab \code{getData} \tab -\cr
\tab \code{getFragmentLengths} \tab -\cr
\tab \code{getFragmentStarts} \tab -\cr
\tab \code{getFragmentStops} \tab -\cr
\tab \code{nbrOfEnzymes} \tab -\cr
\tab \code{nbrOfUnits} \tab -\cr
\tab \code{readDataFrame} \tab -\cr
}
\bold{Methods inherited from FileCacheKeyInterface}:\cr
getCacheKey
\bold{Methods inherited from CacheKeyInterface}:\cr
getCacheKey
\bold{Methods inherited from GenericDataFile}:\cr
as.character, clone, compareChecksum, copyTo, equals, fromFile, getAttribute, getAttributes, getChecksum, getChecksumFile, getCreatedOn, getDefaultFullName, getExtension, getExtensionPattern, getFileSize, getFileType, getFilename, getFilenameExtension, getLastAccessedOn, getLastModifiedOn, getOutputExtension, getPath, getPathname, gunzip, gzip, hasBeenModified, is.na, isFile, isGzipped, linkTo, readChecksum, renameTo, renameToUpperCaseExt, setAttribute, setAttributes, setAttributesBy, setAttributesByTags, setExtensionPattern, testAttributes, validate, validateChecksum, writeChecksum, getParentName
\bold{Methods inherited from FullNameInterface}:\cr
appendFullNameTranslator, appendFullNameTranslatorByNULL, appendFullNameTranslatorByTabularTextFile, appendFullNameTranslatorByTabularTextFileSet, appendFullNameTranslatorBycharacter, appendFullNameTranslatorBydata.frame, appendFullNameTranslatorByfunction, appendFullNameTranslatorBylist, clearFullNameTranslator, clearListOfFullNameTranslators, getDefaultFullName, getFullName, getFullNameTranslator, getListOfFullNameTranslators, getName, getTags, hasTag, hasTags, resetFullName, setFullName, setFullNameTranslator, setListOfFullNameTranslators, setName, setTags, updateFullName
\bold{Methods inherited from Object}:\cr
$, $<-, [[, [[<-, as.character, attach, attachLocally, clearCache, clearLookupCache, clone, detach, equals, extend, finalize, getEnvironment, getFieldModifier, getFieldModifiers, getFields, getInstantiationTime, getStaticInstance, hasField, hashCode, ll, load, objectSize, print, save, asThis
}
\author{Henrik Bengtsson}
\keyword{classes}
|
/man/SnpInformation.Rd
|
no_license
|
microarray/aroma.affymetrix
|
R
| false
| false
| 3,552
|
rd
|
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Do not modify this file since it was automatically generated from:
%
% SnpInformation.R
%
% by the Rdoc compiler part of the R.oo package.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\name{SnpInformation}
\docType{class}
\alias{SnpInformation}
\title{The SnpInformation class}
\description{
Package: aroma.affymetrix \cr
\bold{Class SnpInformation}\cr
\code{\link[R.oo]{Object}}\cr
\code{~~|}\cr
\code{~~+--}\code{\link[R.filesets]{FullNameInterface}}\cr
\code{~~~~~~~|}\cr
\code{~~~~~~~+--}\code{\link[R.filesets]{GenericDataFile}}\cr
\code{~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~+--}\code{\link[aroma.core]{CacheKeyInterface}}\cr
\code{~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~+--}\code{\link[aroma.core]{FileCacheKeyInterface}}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~|}\cr
\code{~~~~~~~~~~~~~~~~~~~~~~+--}\emph{\code{SnpInformation}}\cr
\bold{Directly known subclasses:}\cr
\emph{\link[aroma.affymetrix]{DChipSnpInformation}}, \emph{\link[aroma.affymetrix]{UflSnpInformation}}\cr
public abstract static class \bold{SnpInformation}\cr
extends \link[aroma.core]{FileCacheKeyInterface}\cr
}
\usage{
SnpInformation(...)
}
\arguments{
\item{...}{Arguments passed to \code{\link[R.filesets]{GenericDataFile}}.}
}
\section{Fields and Methods}{
\bold{Methods:}\cr
\tabular{rll}{
\tab \code{byChipType} \tab -\cr
\tab \code{getChipType} \tab -\cr
\tab \code{getData} \tab -\cr
\tab \code{getFragmentLengths} \tab -\cr
\tab \code{getFragmentStarts} \tab -\cr
\tab \code{getFragmentStops} \tab -\cr
\tab \code{nbrOfEnzymes} \tab -\cr
\tab \code{nbrOfUnits} \tab -\cr
\tab \code{readDataFrame} \tab -\cr
}
\bold{Methods inherited from FileCacheKeyInterface}:\cr
getCacheKey
\bold{Methods inherited from CacheKeyInterface}:\cr
getCacheKey
\bold{Methods inherited from GenericDataFile}:\cr
as.character, clone, compareChecksum, copyTo, equals, fromFile, getAttribute, getAttributes, getChecksum, getChecksumFile, getCreatedOn, getDefaultFullName, getExtension, getExtensionPattern, getFileSize, getFileType, getFilename, getFilenameExtension, getLastAccessedOn, getLastModifiedOn, getOutputExtension, getPath, getPathname, gunzip, gzip, hasBeenModified, is.na, isFile, isGzipped, linkTo, readChecksum, renameTo, renameToUpperCaseExt, setAttribute, setAttributes, setAttributesBy, setAttributesByTags, setExtensionPattern, testAttributes, validate, validateChecksum, writeChecksum, getParentName
\bold{Methods inherited from FullNameInterface}:\cr
appendFullNameTranslator, appendFullNameTranslatorByNULL, appendFullNameTranslatorByTabularTextFile, appendFullNameTranslatorByTabularTextFileSet, appendFullNameTranslatorBycharacter, appendFullNameTranslatorBydata.frame, appendFullNameTranslatorByfunction, appendFullNameTranslatorBylist, clearFullNameTranslator, clearListOfFullNameTranslators, getDefaultFullName, getFullName, getFullNameTranslator, getListOfFullNameTranslators, getName, getTags, hasTag, hasTags, resetFullName, setFullName, setFullNameTranslator, setListOfFullNameTranslators, setName, setTags, updateFullName
\bold{Methods inherited from Object}:\cr
$, $<-, [[, [[<-, as.character, attach, attachLocally, clearCache, clearLookupCache, clone, detach, equals, extend, finalize, getEnvironment, getFieldModifier, getFieldModifiers, getFields, getInstantiationTime, getStaticInstance, hasField, hashCode, ll, load, objectSize, print, save, asThis
}
\author{Henrik Bengtsson}
\keyword{classes}
|
#' Basic arithmetic
#'
#' @param x,y numeric vectors.
add <- function(x, y) x + y
#' @rdname add
times <- function(x, y) x * y
|
/R/basic.R
|
no_license
|
kang-yu/visar
|
R
| false
| false
| 128
|
r
|
#' Basic arithmetic
#'
#' @param x,y numeric vectors.
add <- function(x, y) x + y
#' @rdname add
times <- function(x, y) x * y
|
library(readxl)
library(tidyverse)
library(stringi)
library(readr)
library(sqldf)
# help match --------------
# program_cohort__c
program_cohort <- data.frame(
"year" = 2006:2008,
"PROGRAM_COHORT__C" = c(
"a2C39000002zYsyEAE",
"a2C39000002zYt3EAE",
"a2C39000002zYt8EAE"
),
"RECORDTYPEID" = "01239000000Ap02AAC",
"PROPOSAL_FUNDER__C" = "The Lemelson Foundation"
)
## for commit
extract_c <- read_csv("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/salesforce_examples/Organization_extract.csv") %>%
rename("GRANTED_INSTITUTION__C" = "NAME") %>%
select(-ORGANIZATION_ALIAS_NAME__C)
extract_alias_c <- read_csv("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/salesforce_examples/Organization_extract.csv") %>%
rename("GRANTED_INSTITUTION__C" = "ORGANIZATION_ALIAS_NAME__C") %>%
select(-NAME)
## for proposal
extract_p <- read_csv("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/salesforce_examples/Organization_extract.csv") %>%
rename("APPLYING_INSTITUTION_NAME__C" = "NAME") %>%
select(-ORGANIZATION_ALIAS_NAME__C)
extract_alias_p <- read_csv("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/salesforce_examples/Organization_extract.csv") %>%
rename("APPLYING_INSTITUTION_NAME__C" = "ORGANIZATION_ALIAS_NAME__C") %>%
select(-NAME) %>%
na.omit()
## match Zenn ID and team name
match_2006 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2006_proposals.xlsx",
col_types = c("numeric", "text", "text",
"text", "text", "numeric", "text",
"text", "text", "text", "text", "text",
"text", "text", "numeric", "text",
"numeric", "text", "text", "text",
"numeric", "text", "text", "text",
"numeric", "text", "text", "numeric",
"text", "text", "numeric", "text",
"text", "text", "text", "text")) %>%
select(`Zenn ID`, `Grant Title`, `Institution Name`)
match_c_2006 <- match_2006 %>%
rename("GRANTED_INSTITUTION__C" = "Institution Name") %>%
select(-`Grant Title`)
match_p_2006 <- match_2006 %>%
rename("NAME" = "Grant Title") %>%
select(-`Institution Name`)
match_2007 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2007_proposals.xlsx",
col_types = c("numeric", "text", "text",
"text", "text", "numeric", "text",
"text", "text", "text", "text", "text",
"text", "text", "numeric", "text",
"numeric", "text", "text", "text",
"numeric", "text", "text", "text",
"numeric", "text", "text", "numeric",
"text", "text", "numeric", "text",
"text", "text", "text", "text")) %>%
select(`Zenn ID`, `Grant Title`, `Institution Name`)
match_c_2007 <- match_2007 %>%
rename("GRANTED_INSTITUTION__C" = "Institution Name") %>%
select(-`Grant Title`)
match_p_2007 <- match_2007 %>%
rename("NAME" = "Grant Title") %>%
select(-`Institution Name`)
match_2008 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2008_proposals.xlsx",
col_types = c("numeric", "text", "text",
"text", "text", "numeric", "text",
"text", "text", "text", "text", "text",
"text", "text", "numeric", "text",
"numeric", "text", "text", "text",
"numeric", "text", "text", "text",
"numeric", "text", "text", "numeric",
"text", "text", "numeric", "text",
"text", "text", "text", "text")) %>%
select(`Zenn ID`, `Grant Title`, `Institution Name`)
match_c_2008 <- match_2008 %>%
rename("GRANTED_INSTITUTION__C" = "Institution Name") %>%
select(-`Grant Title`)
match_p_2008 <- match_2008 %>%
rename("NAME" = "Grant Title") %>%
select(-`Institution Name`)
# for membership match
contacts <- read_csv("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/salesforce_examples/Contact_Extract.csv") %>%
select(ID, EMAIL, NPE01__ALTERNATEEMAIL__C, NPE01__HOMEEMAIL__C,
NPE01__WORKEMAIL__C, PREVIOUS_EMAIL_ADDRESSES__C, BKUP_EMAIL_ADDRESS__C)
contacts_1 <- contacts %>%
select(ID, EMAIL)
# 2006 --------------
advisors_full_2006 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2006_advisors.xlsx") %>%
rename("ZENN_ID__C" = "Zenn ID",
"ROLE__C" = "Team Role",
"EMAIL" = "Email") %>%
mutate(EMAIL = tolower(EMAIL))
advisors_2006 <- advisors_full_2006 %>%
select(ZENN_ID__C, ROLE__C, EMAIL)
# proposal
proposal_2006 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2006_proposals.xlsx") %>%
rename(
"NAME" = "Grant Title",
"PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C" = "Proposal Summary",
"EXTERNAL_PROPOSAL_ID__C" = "External Proposal ID"
) %>%
mutate(
"year" = as.numeric(format(as.Date(`Date Created`),'%Y')),
"STATUS__C" = ifelse(`Application Status` == "invite resubmit", "Invited Resubmit", stri_trans_totitle(`Application Status`)),
"PROPOSAL_NAME_LONG_VERSION__C" = as.character(NAME),
"DATE_CREATED__C" = as.Date(`Date Created`),
"DATE_SUBMITTED__C" = as.Date(`Date Application Submitted`),
"GRANT_PERIOD_END__C" = as.Date(`Actual Period End`),
"GRANT_PERIOD_START__C" = as.Date(`Actual Period Begin`),
"AMOUNT_REQUESTED__C" = as.double(`Amount Requested`),
"ZENN_ID__C" = as.double(`Zenn ID`),
"AWARD_AMOUNT__C" = as.double(`Amount Approved`),
"APPLYING_INSTITUTION_NAME__C" = ifelse(`Institution Name` == "University of Maryland, College Park", "University of Maryland-College Park",
ifelse(`Institution Name` == "Arizona State University at the Tempe Campus", "Arizona State University",
ifelse(`Institution Name` == "The City College of New York", "CUNY City College",
ifelse(`Institution Name` == "University of Oklahoma", "University of Oklahoma Norman Campus",
`Institution Name`))))
) %>%
select(
year, NAME, AMOUNT_REQUESTED__C, PROPOSAL_NAME_LONG_VERSION__C, APPLYING_INSTITUTION_NAME__C,
AWARD_AMOUNT__C, DATE_CREATED__C, DATE_SUBMITTED__C, GRANT_PERIOD_END__C,
GRANT_PERIOD_START__C,
PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C, ZENN_ID__C, STATUS__C,
EXTERNAL_PROPOSAL_ID__C
) %>%
filter(is.na(APPLYING_INSTITUTION_NAME__C) == FALSE) %>%
left_join(extract_p) %>%
left_join(extract_alias_p, by = "APPLYING_INSTITUTION_NAME__C") %>%
mutate(ID = coalesce(ID.x, ID.y)) %>%
select(-ID.x, -ID.y) %>%
rename("APPLYING_INSTITUTION__C" = "ID") %>%
left_join(match_p_2006) %>%
left_join(program_cohort) %>%
select( - `Zenn ID`, -year) %>%
unique()
proposal_2006$PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C <- str_replace_all(proposal_2006$PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C, "[:cntrl:]", " ")
proposal_2006 <- sapply(proposal_2006, as.character)
proposal_2006[is.na(proposal_2006)] <- " "
proposal_2006 <- as.data.frame(proposal_2006)
write_csv(proposal_2006, "new/2006/proposal_2006.csv")
proposal_2006_narrow <- proposal_2006 %>%
select(NAME, ZENN_ID__C, EXTERNAL_PROPOSAL_ID__C, PROGRAM_COHORT__C, RECORDTYPEID) %>%
rename("TEAM_NAME_TEXT_ONLY_HIDDEN__C" = "NAME") %>%
left_join(teamid_2006, by = "EXTERNAL_PROPOSAL_ID__C")
# team
team_2006 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2006_proposals.xlsx") %>%
rename(
"NAME" = "Grant Title"
) %>%
mutate(
"RECORDTYPEID" = "012390000009qKOAAY",
"ALIAS__C" = ifelse(nchar(NAME) > 80, NAME, "")
) %>%
select(
NAME, RECORDTYPEID, ALIAS__C
) %>%
left_join(match_p_2006) %>%
write_csv("new/2006/team_2006.csv")
# note_task
task_2006 <- read_excel("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/sustainable_vision_grants_2006_post_award_notes.xlsx",
col_types = c("numeric", "text", "text",
"text")) %>%
set_names(c("Zenn ID", "Created Date", "Created by", "Note")) %>%
left_join(match_2006) %>%
rename("WHATID" = "Zenn ID",
"DESCRIPTION" = "Note") %>%
mutate(STATUS = "Completed",
PRIORITY = "Normal",
TYPE = "Internal Note",
TASKSUBTYPE = "Call",
ACTIVITYDATE = as.Date(`Created Date`),
SUBJECT = "Post Award Note--",
OWNER = ifelse(`Created by` == "Brenna Breeding", "00539000005UlQaAAK",
ifelse(`Created by` == "Michael Norton", "00539000004pukIAAQ",
ifelse(`Created by` == "Patricia Boynton", "00570000001K3bpAAC",
ifelse(`Created by` == "Rachel Agoglia", "00570000003QASWAA4",
"00570000004VlXPAA0"))
)
)
) %>%
unite("SUBJECT", c(SUBJECT, `Created Date`), sep = "", remove = FALSE) %>%
unite("SUBJECT", c(SUBJECT, `Created by`), sep = " ", remove = FALSE) %>%
select(
WHATID, ACTIVITYDATE, `Created by`, DESCRIPTION, TYPE, STATUS, PRIORITY, OWNER, SUBJECT
) %>%
write_csv("new/2006/note_task_2006.csv")
# memebrship
teamid_2006 <- read_csv("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/new_dataset_migrate/2006/proposal_2006_extract.csv") %>%
select(ID, ZENN_ID__C, TEAM__C) %>%
rename("PROPOSAL__C" = "ID") %>%
mutate(ZENN_ID__C = as.character(ZENN_ID__C)) %>%
na.omit()
proposal_2006_narrow <- proposal_2006 %>%
select(NAME, ZENN_ID__C, EXTERNAL_PROPOSAL_ID__C, PROGRAM_COHORT__C, RECORDTYPEID) %>%
rename("TEAM_NAME_TEXT_ONLY_HIDDEN__C" = "NAME") %>%
mutate(ZENN_ID__C = as.character(ZENN_ID__C)) %>%
left_join(teamid_2006, by = "ZENN_ID__C")
membership_2006 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2006_proposals.xlsx") %>%
select(
`Zenn ID`, `External Proposal ID`, `Application Status`,
`Grant Title`, `Institution ID`, `Date Application Submitted`,
`Actual Period Begin`, `Actual Period End`
) %>%
mutate("Application Status" = ifelse(`Application Status` == "invite resubmit", "Invited Resubmit", stri_trans_totitle(`Application Status`)),
"Actual Period Begin" = ifelse(`Application Status` == "Funded", `Actual Period Begin`, `Date Application Submitted`),
"Actual Period End" = ifelse(`Application Status` == "Funded", `Actual Period End`, `Date Application Submitted`),
"STATUS__C" = ifelse(`Application Status` == "Funded", "Completed", "Inactive")
) %>%
rename("ZENN_ID__C" = "Zenn ID") %>%
left_join(proposal_2006_narrow, by = "ZENN_ID__C") %>%
rename(
"PROGRAM_COHORT_LOOKUP__C" = "PROGRAM_COHORT__C",
"START_DATE__C" = "Actual Period Begin",
"END_DATE__C" = "Actual Period End"
) %>%
right_join(advisors_2006) %>%
select(
EMAIL, ZENN_ID__C,
TEAM__C, PROPOSAL__C, PROGRAM_COHORT_LOOKUP__C,
ROLE__C, STATUS__C, START_DATE__C, END_DATE__C, RECORDTYPEID
) %>%
mutate(EMAIL = tolower(EMAIL),
ROLE__C = ifelse(ROLE__C == "Dean of Faculty", "Dean", ROLE__C)) %>%
na.omit() %>%
left_join(contacts_1, by = "EMAIL") %>%
rename("MEMBER__C" = "ID") %>%
na.omit() %>%
mutate(RECORDTYPEID = "012390000009qIDAAY") %>%
write_csv("new/2006/member_2006.csv")
membership_2006_small <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2006_proposals.xlsx") %>%
select(
`Zenn ID`, `External Proposal ID`, `Application Status`,
`Grant Title`, `Institution ID`, `Date Application Submitted`,
`Actual Period Begin`, `Actual Period End`
) %>%
mutate("Application Status" = ifelse(`Application Status` == "invite resubmit", "Invited Resubmit", stri_trans_totitle(`Application Status`)),
"Actual Period Begin" = ifelse(`Application Status` == "Funded", `Actual Period Begin`, `Date Application Submitted`),
"Actual Period End" = ifelse(`Application Status` == "Funded", `Actual Period End`, `Date Application Submitted`),
"STATUS__C" = ifelse(`Application Status` == "Funded", "Completed", "Inactive")
) %>%
rename("ZENN_ID__C" = "Zenn ID") %>%
left_join(proposal_2006_narrow) %>%
rename(
"PROGRAM_COHORT_LOOKUP__C" = "PROGRAM_COHORT__C",
"START_DATE__C" = "Actual Period Begin",
"END_DATE__C" = "Actual Period End"
) %>%
right_join(advisors_2006) %>%
select(
EMAIL, ZENN_ID__C,
TEAM__C, PROPOSAL__C, PROGRAM_COHORT_LOOKUP__C,
ROLE__C, STATUS__C, START_DATE__C, END_DATE__C, RECORDTYPEID
) %>%
mutate(ROLE__C = ifelse(ROLE__C == "Dean of Faculty", "Dean", ROLE__C)) %>%
na.omit() %>%
left_join(contacts_1) %>%
rename("MEMBER__C" = "ID") %>%
select(-MEMBER__C)
membership_2006_big <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2006_proposals.xlsx") %>%
select(
`Zenn ID`, `External Proposal ID`, `Application Status`,
`Grant Title`, `Institution ID`, `Date Application Submitted`,
`Actual Period Begin`, `Actual Period End`
) %>%
mutate("Application Status" = ifelse(`Application Status` == "invite resubmit", "Invited Resubmit", stri_trans_totitle(`Application Status`)),
"Actual Period Begin" = ifelse(`Application Status` == "Funded", `Actual Period Begin`, `Date Application Submitted`),
"Actual Period End" = ifelse(`Application Status` == "Funded", `Actual Period End`, `Date Application Submitted`),
"STATUS__C" = ifelse(`Application Status` == "Funded", "Completed", "Inactive")
) %>%
rename("ZENN_ID__C" = "Zenn ID") %>%
left_join(proposal_2006_narrow) %>%
rename(
"PROGRAM_COHORT_LOOKUP__C" = "PROGRAM_COHORT__C",
"START_DATE__C" = "Actual Period Begin",
"END_DATE__C" = "Actual Period End"
) %>%
right_join(advisors_2006) %>%
select(
EMAIL, ZENN_ID__C,
TEAM__C, PROPOSAL__C, PROGRAM_COHORT_LOOKUP__C,
ROLE__C, STATUS__C, START_DATE__C, END_DATE__C, RECORDTYPEID
) %>%
mutate(ROLE__C = ifelse(ROLE__C == "Dean of Faculty", "Dean", ROLE__C))
no_id_2006 <- dplyr::anti_join(membership_2006_big, membership_2006_small) %>%
left_join(advisors_full_2006) %>%
drop_na(TEAM__C) %>%
write_csv("new/2006/no_id_2006.csv")
# 2007 --------------
advisors_full_2007 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2007_advisors.xlsx") %>%
rename("ZENN_ID__C" = "Zenn ID",
"ROLE__C" = "Team Role",
"EMAIL" = "Email") %>%
mutate(EMAIL = tolower(EMAIL))
advisors_2007 <- advisors_full_2007 %>%
select(ZENN_ID__C, ROLE__C, EMAIL)
# proposal
proposal_2007 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2007_proposals.xlsx") %>%
rename(
"NAME" = "Grant Title",
"PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C" = "Proposal Summary",
"EXTERNAL_PROPOSAL_ID__C" = "External Proposal ID"
) %>%
mutate(
"year" = as.numeric(format(as.Date(`Date Created`),'%Y')),
"STATUS__C" = ifelse(`Application Status` == "invite resubmit", "Invited Resubmit", stri_trans_totitle(`Application Status`)),
"PROPOSAL_NAME_LONG_VERSION__C" = as.character(NAME),
"DATE_CREATED__C" = as.Date(`Date Created`),
"DATE_SUBMITTED__C" = as.Date(`Date Application Submitted`),
"GRANT_PERIOD_END__C" = as.Date(`Actual Period End`),
"GRANT_PERIOD_START__C" = as.Date(`Actual Period Begin`),
"AMOUNT_REQUESTED__C" = as.double(`Amount Requested`),
"ZENN_ID__C" = as.double(`Zenn ID`),
"AWARD_AMOUNT__C" = as.double(`Amount Approved`),
"APPLYING_INSTITUTION_NAME__C" = ifelse(`Institution Name` == "University of Maryland, College Park", "University of Maryland-College Park",
ifelse(`Institution Name` == "Arizona State University at the Tempe Campus", "Arizona State University",
ifelse(`Institution Name` == "The City College of New York", "CUNY City College",
ifelse(`Institution Name` == "University of Oklahoma", "University of Oklahoma Norman Campus",
ifelse(`Institution Name` == "University of Texas at Arlington", "The University of Texas at Arlington",
ifelse(`Institution Name` == "University of Tennessee, Knoxville", "The University of Tennessee",
`Institution Name`))))))
) %>%
select(
year, NAME, AMOUNT_REQUESTED__C, PROPOSAL_NAME_LONG_VERSION__C, APPLYING_INSTITUTION_NAME__C,
AWARD_AMOUNT__C, DATE_CREATED__C, DATE_SUBMITTED__C, GRANT_PERIOD_END__C,
GRANT_PERIOD_START__C,
PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C, ZENN_ID__C, STATUS__C,
EXTERNAL_PROPOSAL_ID__C
) %>%
filter(is.na(APPLYING_INSTITUTION_NAME__C) == FALSE) %>%
left_join(extract_p) %>%
left_join(extract_alias_p, by = "APPLYING_INSTITUTION_NAME__C") %>%
mutate(ID = coalesce(ID.x, ID.y)) %>%
select(-ID.x, -ID.y) %>%
rename("APPLYING_INSTITUTION__C" = "ID") %>%
left_join(match_p_2007) %>%
left_join(program_cohort) %>%
select( - `Zenn ID`, -year) %>%
unique()
proposal_2007$PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C <- str_replace_all(proposal_2007$PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C, "[:cntrl:]", " ")
proposal_2007 <- sapply(proposal_2007, as.character)
proposal_2007[is.na(proposal_2007)] <- " "
proposal_2007 <- as.data.frame(proposal_2007)
write_csv(proposal_2007, "new/2007/proposal_2007.csv")
proposal_2007_narrow <- proposal_2007 %>%
select(NAME, ZENN_ID__C, EXTERNAL_PROPOSAL_ID__C, PROGRAM_COHORT__C, RECORDTYPEID) %>%
rename("TEAM_NAME_TEXT_ONLY_HIDDEN__C" = "NAME") %>%
left_join(teamid_2007, by = "EXTERNAL_PROPOSAL_ID__C")
# team
team_2007 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2007_proposals.xlsx") %>%
rename(
"NAME" = "Grant Title"
) %>%
mutate(
"RECORDTYPEID" = "012390000009qKOAAY",
"ALIAS__C" = ifelse(nchar(NAME) > 80, NAME, "")
) %>%
select(
NAME, RECORDTYPEID, ALIAS__C
) %>%
left_join(match_p_2007) %>%
write_csv("new/2007/team_2007.csv")
# note_task
task_2007 <- read_excel("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/sustainable_vision_grants_2007_post_award_notes.xlsx",
col_types = c("numeric", "text", "text",
"text")) %>%
set_names(c("Zenn ID", "Created Date", "Created by", "Note")) %>%
left_join(match_2007) %>%
rename("WHATID" = "Zenn ID",
"DESCRIPTION" = "Note") %>%
mutate(STATUS = "Completed",
PRIORITY = "Normal",
TYPE = "Internal Note",
TASKSUBTYPE = "Call",
ACTIVITYDATE = as.Date(`Created Date`),
SUBJECT = "Post Award Note--",
OWNER = ifelse(`Created by` == "Brenna Breeding", "00539000005UlQaAAK",
ifelse(`Created by` == "Michael Norton", "00539000004pukIAAQ",
ifelse(`Created by` == "Patricia Boynton", "00570000001K3bpAAC",
ifelse(`Created by` == "Rachel Agoglia", "00570000003QASWAA4",
"00570000004VlXPAA0"))
)
)
) %>%
unite("SUBJECT", c(SUBJECT, `Created Date`), sep = "", remove = FALSE) %>%
unite("SUBJECT", c(SUBJECT, `Created by`), sep = " ", remove = FALSE) %>%
select(
WHATID, ACTIVITYDATE, `Created by`, DESCRIPTION, TYPE, STATUS, PRIORITY, OWNER, SUBJECT
) %>%
write_csv("new/2007/note_task_2007.csv")
# memebrship
teamid_2007 <- read_csv("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/new_dataset_migrate/2007/proposal_2007_extract.csv") %>%
select(ID, ZENN_ID__C, TEAM__C) %>%
rename("PROPOSAL__C" = "ID") %>%
mutate(ZENN_ID__C = as.character(ZENN_ID__C)) %>%
na.omit()
proposal_2007_narrow <- proposal_2007 %>%
select(NAME, ZENN_ID__C, EXTERNAL_PROPOSAL_ID__C, PROGRAM_COHORT__C, RECORDTYPEID) %>%
rename("TEAM_NAME_TEXT_ONLY_HIDDEN__C" = "NAME") %>%
mutate(ZENN_ID__C = as.character(ZENN_ID__C)) %>%
left_join(teamid_2007, by = "ZENN_ID__C")
membership_2007 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2007_proposals.xlsx") %>%
select(
`Zenn ID`, `External Proposal ID`, `Application Status`,
`Grant Title`, `Institution ID`, `Date Application Submitted`,
`Actual Period Begin`, `Actual Period End`
) %>%
mutate("Application Status" = ifelse(`Application Status` == "invite resubmit", "Invited Resubmit", stri_trans_totitle(`Application Status`)),
"Actual Period Begin" = ifelse(`Application Status` == "Funded", `Actual Period Begin`, `Date Application Submitted`),
"Actual Period End" = ifelse(`Application Status` == "Funded", `Actual Period End`, `Date Application Submitted`),
"STATUS__C" = ifelse(`Application Status` == "Funded", "Completed", "Inactive")
) %>%
rename("ZENN_ID__C" = "Zenn ID") %>%
left_join(proposal_2007_narrow) %>%
rename(
"PROGRAM_COHORT_LOOKUP__C" = "PROGRAM_COHORT__C",
"START_DATE__C" = "Actual Period Begin",
"END_DATE__C" = "Actual Period End"
) %>%
right_join(advisors_2007) %>%
select(
EMAIL, ZENN_ID__C,
TEAM__C, PROPOSAL__C, PROGRAM_COHORT_LOOKUP__C,
ROLE__C, STATUS__C, START_DATE__C, END_DATE__C, RECORDTYPEID
) %>%
mutate(EMAIL = tolower(EMAIL),
ROLE__C = ifelse(ROLE__C == "Dean of Faculty", "Dean", ROLE__C)) %>%
na.omit() %>%
left_join(contacts_1, by = "EMAIL") %>%
rename("MEMBER__C" = "ID") %>%
na.omit() %>%
mutate(RECORDTYPEID = "012390000009qIDAAY") %>%
write_csv("new/2007/member_2007.csv")
membership_2007_small <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2007_proposals.xlsx") %>%
select(
`Zenn ID`, `External Proposal ID`, `Application Status`,
`Grant Title`, `Institution ID`, `Date Application Submitted`,
`Actual Period Begin`, `Actual Period End`
) %>%
mutate("Application Status" = ifelse(`Application Status` == "invite resubmit", "Invited Resubmit", stri_trans_totitle(`Application Status`)),
"Actual Period Begin" = ifelse(`Application Status` == "Funded", `Actual Period Begin`, `Date Application Submitted`),
"Actual Period End" = ifelse(`Application Status` == "Funded", `Actual Period End`, `Date Application Submitted`),
"STATUS__C" = ifelse(`Application Status` == "Funded", "Completed", "Inactive")
) %>%
rename("ZENN_ID__C" = "Zenn ID") %>%
left_join(proposal_2007_narrow) %>%
rename(
"PROGRAM_COHORT_LOOKUP__C" = "PROGRAM_COHORT__C",
"START_DATE__C" = "Actual Period Begin",
"END_DATE__C" = "Actual Period End"
) %>%
right_join(advisors_2007) %>%
select(
EMAIL, ZENN_ID__C,
TEAM__C, PROPOSAL__C, PROGRAM_COHORT_LOOKUP__C,
ROLE__C, STATUS__C, START_DATE__C, END_DATE__C, RECORDTYPEID
) %>%
mutate(ROLE__C = ifelse(ROLE__C == "Dean of Faculty", "Dean", ROLE__C)) %>%
na.omit() %>%
left_join(contacts_1) %>%
rename("MEMBER__C" = "ID") %>%
select(-MEMBER__C)
membership_2007_big <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2007_proposals.xlsx") %>%
select(
`Zenn ID`, `External Proposal ID`, `Application Status`,
`Grant Title`, `Institution ID`, `Date Application Submitted`,
`Actual Period Begin`, `Actual Period End`
) %>%
mutate("Application Status" = ifelse(`Application Status` == "invite resubmit", "Invited Resubmit", stri_trans_totitle(`Application Status`)),
"Actual Period Begin" = ifelse(`Application Status` == "Funded", `Actual Period Begin`, `Date Application Submitted`),
"Actual Period End" = ifelse(`Application Status` == "Funded", `Actual Period End`, `Date Application Submitted`),
"STATUS__C" = ifelse(`Application Status` == "Funded", "Completed", "Inactive")
) %>%
rename("ZENN_ID__C" = "Zenn ID") %>%
left_join(proposal_2007_narrow) %>%
rename(
"PROGRAM_COHORT_LOOKUP__C" = "PROGRAM_COHORT__C",
"START_DATE__C" = "Actual Period Begin",
"END_DATE__C" = "Actual Period End"
) %>%
right_join(advisors_2007) %>%
select(
EMAIL, ZENN_ID__C,
TEAM__C, PROPOSAL__C, PROGRAM_COHORT_LOOKUP__C,
ROLE__C, STATUS__C, START_DATE__C, END_DATE__C, RECORDTYPEID
) %>%
mutate(ROLE__C = ifelse(ROLE__C == "Dean of Faculty", "Dean", ROLE__C))
no_id_2007 <- dplyr::setdiff(membership_2007_big, membership_2007_small) %>%
left_join(advisors_full_2007) %>%
drop_na(TEAM__C) %>%
write_csv("new/2007/no_id_2007.csv")
# 2008 --------------
advisors_full_2008 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2008_advisors.xlsx") %>%
rename("ZENN_ID__C" = "Zenn ID",
"ROLE__C" = "Team Role",
"EMAIL" = "Email") %>%
mutate(EMAIL = tolower(EMAIL))
advisors_2008 <- advisors_full_2008 %>%
select(ZENN_ID__C, ROLE__C, EMAIL)
# proposal
proposal_2008 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2008_proposals.xlsx") %>%
rename(
"NAME" = "Grant Title",
"PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C" = "Proposal Summary",
"EXTERNAL_PROPOSAL_ID__C" = "External Proposal ID"
) %>%
mutate(
"year" = as.numeric(format(as.Date(`Date Created`),'%Y')),
"STATUS__C" = ifelse(`Application Status` == "invite resubmit", "Invited Resubmit", stri_trans_totitle(`Application Status`)),
"PROPOSAL_NAME_LONG_VERSION__C" = as.character(NAME),
"DATE_CREATED__C" = as.Date(`Date Created`),
"DATE_SUBMITTED__C" = as.Date(`Date Application Submitted`),
"GRANT_PERIOD_END__C" = as.Date(`Actual Period End`),
"GRANT_PERIOD_START__C" = as.Date(`Actual Period Begin`),
"AMOUNT_REQUESTED__C" = as.double(`Amount Requested`),
"ZENN_ID__C" = as.double(`Zenn ID`),
"AWARD_AMOUNT__C" = as.double(`Amount Approved`),
"APPLYING_INSTITUTION_NAME__C" = ifelse(`Institution Name` == "University of Maryland, College Park", "University of Maryland-College Park",
ifelse(`Institution Name` == "Arizona State University at the Tempe Campus", "Arizona State University",
ifelse(`Institution Name` == "The City College of New York", "CUNY City College",
ifelse(`Institution Name` == "University of Oklahoma", "University of Oklahoma Norman Campus",
ifelse(`Institution Name` == "University of Texas at Arlington", "The University of Texas at Arlington",
ifelse(`Institution Name` == "University of Tennessee, Knoxville", "The University of Tennessee",
`Institution Name`))))))
) %>%
select(
year, NAME, AMOUNT_REQUESTED__C, PROPOSAL_NAME_LONG_VERSION__C, APPLYING_INSTITUTION_NAME__C,
AWARD_AMOUNT__C, DATE_CREATED__C, DATE_SUBMITTED__C, GRANT_PERIOD_END__C,
GRANT_PERIOD_START__C,
PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C, ZENN_ID__C, STATUS__C,
EXTERNAL_PROPOSAL_ID__C
) %>%
filter(is.na(APPLYING_INSTITUTION_NAME__C) == FALSE) %>%
left_join(extract_p) %>%
left_join(extract_alias_p, by = "APPLYING_INSTITUTION_NAME__C") %>%
mutate(ID = coalesce(ID.x, ID.y)) %>%
select(-ID.x, -ID.y) %>%
rename("APPLYING_INSTITUTION__C" = "ID") %>%
left_join(match_p_2008) %>%
left_join(program_cohort) %>%
select( - `Zenn ID`, -year) %>%
unique()
proposal_2008$PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C <- str_replace_all(proposal_2008$PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C, "[:cntrl:]", " ")
proposal_2008 <- sapply(proposal_2008, as.character)
proposal_2008[is.na(proposal_2008)] <- " "
proposal_2008 <- as.data.frame(proposal_2008)
write_csv(proposal_2008, "new/2008/proposal_2008.csv")
proposal_2008_narrow <- proposal_2008 %>%
select(NAME, ZENN_ID__C, EXTERNAL_PROPOSAL_ID__C, PROGRAM_COHORT__C, RECORDTYPEID) %>%
rename("TEAM_NAME_TEXT_ONLY_HIDDEN__C" = "NAME") %>%
left_join(teamid_2008, by = "EXTERNAL_PROPOSAL_ID__C")
# team
team_2008 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2008_proposals.xlsx") %>%
rename(
"NAME" = "Grant Title"
) %>%
mutate(
"RECORDTYPEID" = "012390000009qKOAAY",
"ALIAS__C" = ifelse(nchar(NAME) > 80, NAME, "")
) %>%
select(
NAME, RECORDTYPEID, ALIAS__C
) %>%
left_join(match_p_2008) %>%
write_csv("new/2008/team_2008.csv")
# note_task
task_2008 <- read_excel("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/sustainable_vision_grants_2008_post_award_notes.xlsx",
col_types = c("numeric", "text", "text",
"text")) %>%
set_names(c("Zenn ID", "Created Date", "Created by", "Note")) %>%
left_join(match_2008) %>%
rename("WHATID" = "Zenn ID",
"DESCRIPTION" = "Note") %>%
mutate(STATUS = "Completed",
PRIORITY = "Normal",
TYPE = "Internal Note",
TASKSUBTYPE = "Call",
ACTIVITYDATE = as.Date(`Created Date`),
SUBJECT = "Post Award Note--",
OWNER = ifelse(`Created by` == "Brenna Breeding", "00539000005UlQaAAK",
ifelse(`Created by` == "Michael Norton", "00539000004pukIAAQ",
ifelse(`Created by` == "Patricia Boynton", "00570000001K3bpAAC",
ifelse(`Created by` == "Rachel Agoglia", "00570000003QASWAA4",
"00570000004VlXPAA0"))
)
)
) %>%
unite("SUBJECT", c(SUBJECT, `Created Date`), sep = "", remove = FALSE) %>%
unite("SUBJECT", c(SUBJECT, `Created by`), sep = " ", remove = FALSE) %>%
select(
WHATID, ACTIVITYDATE, `Created by`, DESCRIPTION, TYPE, STATUS, PRIORITY, OWNER, SUBJECT
) %>%
write_csv("new/2008/note_task_2008.csv")
# memebrship
teamid_2008 <- read_csv("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/new_dataset_migrate/2008/proposal_2008_extract.csv") %>%
select(ID, ZENN_ID__C, TEAM__C) %>%
rename("PROPOSAL__C" = "ID") %>%
mutate(ZENN_ID__C = as.character(ZENN_ID__C)) %>%
na.omit()
proposal_2008_narrow <- proposal_2008 %>%
select(NAME, ZENN_ID__C, EXTERNAL_PROPOSAL_ID__C, PROGRAM_COHORT__C, RECORDTYPEID) %>%
rename("TEAM_NAME_TEXT_ONLY_HIDDEN__C" = "NAME") %>%
mutate(ZENN_ID__C = as.character(ZENN_ID__C)) %>%
left_join(teamid_2008, by = "ZENN_ID__C")
membership_2008 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2008_proposals.xlsx") %>%
select(
`Zenn ID`, `External Proposal ID`, `Application Status`,
`Grant Title`, `Institution ID`, `Date Application Submitted`,
`Actual Period Begin`, `Actual Period End`
) %>%
mutate("Application Status" = ifelse(`Application Status` == "invite resubmit", "Invited Resubmit", stri_trans_totitle(`Application Status`)),
"Actual Period Begin" = ifelse(`Application Status` == "Funded", `Actual Period Begin`, `Date Application Submitted`),
"Actual Period End" = ifelse(`Application Status` == "Funded", `Actual Period End`, `Date Application Submitted`),
"STATUS__C" = ifelse(`Application Status` == "Funded", "Completed", "Inactive")
) %>%
rename("ZENN_ID__C" = "Zenn ID") %>%
left_join(proposal_2008_narrow) %>%
rename(
"PROGRAM_COHORT_LOOKUP__C" = "PROGRAM_COHORT__C",
"START_DATE__C" = "Actual Period Begin",
"END_DATE__C" = "Actual Period End"
) %>%
right_join(advisors_2008) %>%
select(
EMAIL, ZENN_ID__C,
TEAM__C, PROPOSAL__C, PROGRAM_COHORT_LOOKUP__C,
ROLE__C, STATUS__C, START_DATE__C, END_DATE__C, RECORDTYPEID
) %>%
mutate(EMAIL = tolower(EMAIL),
ROLE__C = ifelse(ROLE__C == "Dean of Faculty", "Dean", ROLE__C)) %>%
na.omit() %>%
left_join(contacts_1, by = "EMAIL") %>%
rename("MEMBER__C" = "ID") %>%
na.omit() %>%
mutate(RECORDTYPEID = "012390000009qIDAAY") %>%
write_csv("new/2008/member_2008.csv")
membership_2008_small <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2008_proposals.xlsx") %>%
select(
`Zenn ID`, `External Proposal ID`, `Application Status`,
`Grant Title`, `Institution ID`, `Date Application Submitted`,
`Actual Period Begin`, `Actual Period End`
) %>%
mutate("Application Status" = ifelse(`Application Status` == "invite resubmit", "Invited Resubmit", stri_trans_totitle(`Application Status`)),
"Actual Period Begin" = ifelse(`Application Status` == "Funded", `Actual Period Begin`, `Date Application Submitted`),
"Actual Period End" = ifelse(`Application Status` == "Funded", `Actual Period End`, `Date Application Submitted`),
"STATUS__C" = ifelse(`Application Status` == "Funded", "Completed", "Inactive")
) %>%
rename("ZENN_ID__C" = "Zenn ID") %>%
left_join(proposal_2008_narrow) %>%
rename(
"PROGRAM_COHORT_LOOKUP__C" = "PROGRAM_COHORT__C",
"START_DATE__C" = "Actual Period Begin",
"END_DATE__C" = "Actual Period End"
) %>%
right_join(advisors_2008) %>%
select(
EMAIL, ZENN_ID__C,
TEAM__C, PROPOSAL__C, PROGRAM_COHORT_LOOKUP__C,
ROLE__C, STATUS__C, START_DATE__C, END_DATE__C, RECORDTYPEID
) %>%
mutate(ROLE__C = ifelse(ROLE__C == "Dean of Faculty", "Dean", ROLE__C)) %>%
na.omit() %>%
left_join(contacts_1) %>%
rename("MEMBER__C" = "ID") %>%
select(-MEMBER__C)
membership_2008_big <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2008_proposals.xlsx") %>%
select(
`Zenn ID`, `External Proposal ID`, `Application Status`,
`Grant Title`, `Institution ID`, `Date Application Submitted`,
`Actual Period Begin`, `Actual Period End`
) %>%
mutate("Application Status" = ifelse(`Application Status` == "invite resubmit", "Invited Resubmit", stri_trans_totitle(`Application Status`)),
"Actual Period Begin" = ifelse(`Application Status` == "Funded", `Actual Period Begin`, `Date Application Submitted`),
"Actual Period End" = ifelse(`Application Status` == "Funded", `Actual Period End`, `Date Application Submitted`),
"STATUS__C" = ifelse(`Application Status` == "Funded", "Completed", "Inactive")
) %>%
rename("ZENN_ID__C" = "Zenn ID") %>%
left_join(proposal_2008_narrow) %>%
rename(
"PROGRAM_COHORT_LOOKUP__C" = "PROGRAM_COHORT__C",
"START_DATE__C" = "Actual Period Begin",
"END_DATE__C" = "Actual Period End"
) %>%
right_join(advisors_2008) %>%
select(
EMAIL, ZENN_ID__C,
TEAM__C, PROPOSAL__C, PROGRAM_COHORT_LOOKUP__C,
ROLE__C, STATUS__C, START_DATE__C, END_DATE__C, RECORDTYPEID
) %>%
mutate(ROLE__C = ifelse(ROLE__C == "Dean of Faculty", "Dean", ROLE__C))
no_id_2008 <- dplyr::setdiff(membership_2008_big, membership_2008_small) %>%
left_join(advisors_full_2008) %>%
drop_na(TEAM__C) %>%
write_csv("new/2008/no_id_2008.csv")
|
/yr06_08.R
|
no_license
|
Starryz/VW-Summer-Internship
|
R
| false
| false
| 36,021
|
r
|
library(readxl)
library(tidyverse)
library(stringi)
library(readr)
library(sqldf)
# help match --------------
# program_cohort__c
program_cohort <- data.frame(
"year" = 2006:2008,
"PROGRAM_COHORT__C" = c(
"a2C39000002zYsyEAE",
"a2C39000002zYt3EAE",
"a2C39000002zYt8EAE"
),
"RECORDTYPEID" = "01239000000Ap02AAC",
"PROPOSAL_FUNDER__C" = "The Lemelson Foundation"
)
## for commit
extract_c <- read_csv("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/salesforce_examples/Organization_extract.csv") %>%
rename("GRANTED_INSTITUTION__C" = "NAME") %>%
select(-ORGANIZATION_ALIAS_NAME__C)
extract_alias_c <- read_csv("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/salesforce_examples/Organization_extract.csv") %>%
rename("GRANTED_INSTITUTION__C" = "ORGANIZATION_ALIAS_NAME__C") %>%
select(-NAME)
## for proposal
extract_p <- read_csv("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/salesforce_examples/Organization_extract.csv") %>%
rename("APPLYING_INSTITUTION_NAME__C" = "NAME") %>%
select(-ORGANIZATION_ALIAS_NAME__C)
extract_alias_p <- read_csv("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/salesforce_examples/Organization_extract.csv") %>%
rename("APPLYING_INSTITUTION_NAME__C" = "ORGANIZATION_ALIAS_NAME__C") %>%
select(-NAME) %>%
na.omit()
## match Zenn ID and team name
match_2006 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2006_proposals.xlsx",
col_types = c("numeric", "text", "text",
"text", "text", "numeric", "text",
"text", "text", "text", "text", "text",
"text", "text", "numeric", "text",
"numeric", "text", "text", "text",
"numeric", "text", "text", "text",
"numeric", "text", "text", "numeric",
"text", "text", "numeric", "text",
"text", "text", "text", "text")) %>%
select(`Zenn ID`, `Grant Title`, `Institution Name`)
match_c_2006 <- match_2006 %>%
rename("GRANTED_INSTITUTION__C" = "Institution Name") %>%
select(-`Grant Title`)
match_p_2006 <- match_2006 %>%
rename("NAME" = "Grant Title") %>%
select(-`Institution Name`)
match_2007 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2007_proposals.xlsx",
col_types = c("numeric", "text", "text",
"text", "text", "numeric", "text",
"text", "text", "text", "text", "text",
"text", "text", "numeric", "text",
"numeric", "text", "text", "text",
"numeric", "text", "text", "text",
"numeric", "text", "text", "numeric",
"text", "text", "numeric", "text",
"text", "text", "text", "text")) %>%
select(`Zenn ID`, `Grant Title`, `Institution Name`)
match_c_2007 <- match_2007 %>%
rename("GRANTED_INSTITUTION__C" = "Institution Name") %>%
select(-`Grant Title`)
match_p_2007 <- match_2007 %>%
rename("NAME" = "Grant Title") %>%
select(-`Institution Name`)
match_2008 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2008_proposals.xlsx",
col_types = c("numeric", "text", "text",
"text", "text", "numeric", "text",
"text", "text", "text", "text", "text",
"text", "text", "numeric", "text",
"numeric", "text", "text", "text",
"numeric", "text", "text", "text",
"numeric", "text", "text", "numeric",
"text", "text", "numeric", "text",
"text", "text", "text", "text")) %>%
select(`Zenn ID`, `Grant Title`, `Institution Name`)
match_c_2008 <- match_2008 %>%
rename("GRANTED_INSTITUTION__C" = "Institution Name") %>%
select(-`Grant Title`)
match_p_2008 <- match_2008 %>%
rename("NAME" = "Grant Title") %>%
select(-`Institution Name`)
# for membership match
contacts <- read_csv("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/salesforce_examples/Contact_Extract.csv") %>%
select(ID, EMAIL, NPE01__ALTERNATEEMAIL__C, NPE01__HOMEEMAIL__C,
NPE01__WORKEMAIL__C, PREVIOUS_EMAIL_ADDRESSES__C, BKUP_EMAIL_ADDRESS__C)
contacts_1 <- contacts %>%
select(ID, EMAIL)
# 2006 --------------
advisors_full_2006 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2006_advisors.xlsx") %>%
rename("ZENN_ID__C" = "Zenn ID",
"ROLE__C" = "Team Role",
"EMAIL" = "Email") %>%
mutate(EMAIL = tolower(EMAIL))
advisors_2006 <- advisors_full_2006 %>%
select(ZENN_ID__C, ROLE__C, EMAIL)
# proposal
proposal_2006 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2006_proposals.xlsx") %>%
rename(
"NAME" = "Grant Title",
"PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C" = "Proposal Summary",
"EXTERNAL_PROPOSAL_ID__C" = "External Proposal ID"
) %>%
mutate(
"year" = as.numeric(format(as.Date(`Date Created`),'%Y')),
"STATUS__C" = ifelse(`Application Status` == "invite resubmit", "Invited Resubmit", stri_trans_totitle(`Application Status`)),
"PROPOSAL_NAME_LONG_VERSION__C" = as.character(NAME),
"DATE_CREATED__C" = as.Date(`Date Created`),
"DATE_SUBMITTED__C" = as.Date(`Date Application Submitted`),
"GRANT_PERIOD_END__C" = as.Date(`Actual Period End`),
"GRANT_PERIOD_START__C" = as.Date(`Actual Period Begin`),
"AMOUNT_REQUESTED__C" = as.double(`Amount Requested`),
"ZENN_ID__C" = as.double(`Zenn ID`),
"AWARD_AMOUNT__C" = as.double(`Amount Approved`),
"APPLYING_INSTITUTION_NAME__C" = ifelse(`Institution Name` == "University of Maryland, College Park", "University of Maryland-College Park",
ifelse(`Institution Name` == "Arizona State University at the Tempe Campus", "Arizona State University",
ifelse(`Institution Name` == "The City College of New York", "CUNY City College",
ifelse(`Institution Name` == "University of Oklahoma", "University of Oklahoma Norman Campus",
`Institution Name`))))
) %>%
select(
year, NAME, AMOUNT_REQUESTED__C, PROPOSAL_NAME_LONG_VERSION__C, APPLYING_INSTITUTION_NAME__C,
AWARD_AMOUNT__C, DATE_CREATED__C, DATE_SUBMITTED__C, GRANT_PERIOD_END__C,
GRANT_PERIOD_START__C,
PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C, ZENN_ID__C, STATUS__C,
EXTERNAL_PROPOSAL_ID__C
) %>%
filter(is.na(APPLYING_INSTITUTION_NAME__C) == FALSE) %>%
left_join(extract_p) %>%
left_join(extract_alias_p, by = "APPLYING_INSTITUTION_NAME__C") %>%
mutate(ID = coalesce(ID.x, ID.y)) %>%
select(-ID.x, -ID.y) %>%
rename("APPLYING_INSTITUTION__C" = "ID") %>%
left_join(match_p_2006) %>%
left_join(program_cohort) %>%
select( - `Zenn ID`, -year) %>%
unique()
proposal_2006$PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C <- str_replace_all(proposal_2006$PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C, "[:cntrl:]", " ")
proposal_2006 <- sapply(proposal_2006, as.character)
proposal_2006[is.na(proposal_2006)] <- " "
proposal_2006 <- as.data.frame(proposal_2006)
write_csv(proposal_2006, "new/2006/proposal_2006.csv")
proposal_2006_narrow <- proposal_2006 %>%
select(NAME, ZENN_ID__C, EXTERNAL_PROPOSAL_ID__C, PROGRAM_COHORT__C, RECORDTYPEID) %>%
rename("TEAM_NAME_TEXT_ONLY_HIDDEN__C" = "NAME") %>%
left_join(teamid_2006, by = "EXTERNAL_PROPOSAL_ID__C")
# team
team_2006 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2006_proposals.xlsx") %>%
rename(
"NAME" = "Grant Title"
) %>%
mutate(
"RECORDTYPEID" = "012390000009qKOAAY",
"ALIAS__C" = ifelse(nchar(NAME) > 80, NAME, "")
) %>%
select(
NAME, RECORDTYPEID, ALIAS__C
) %>%
left_join(match_p_2006) %>%
write_csv("new/2006/team_2006.csv")
# note_task
task_2006 <- read_excel("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/sustainable_vision_grants_2006_post_award_notes.xlsx",
col_types = c("numeric", "text", "text",
"text")) %>%
set_names(c("Zenn ID", "Created Date", "Created by", "Note")) %>%
left_join(match_2006) %>%
rename("WHATID" = "Zenn ID",
"DESCRIPTION" = "Note") %>%
mutate(STATUS = "Completed",
PRIORITY = "Normal",
TYPE = "Internal Note",
TASKSUBTYPE = "Call",
ACTIVITYDATE = as.Date(`Created Date`),
SUBJECT = "Post Award Note--",
OWNER = ifelse(`Created by` == "Brenna Breeding", "00539000005UlQaAAK",
ifelse(`Created by` == "Michael Norton", "00539000004pukIAAQ",
ifelse(`Created by` == "Patricia Boynton", "00570000001K3bpAAC",
ifelse(`Created by` == "Rachel Agoglia", "00570000003QASWAA4",
"00570000004VlXPAA0"))
)
)
) %>%
unite("SUBJECT", c(SUBJECT, `Created Date`), sep = "", remove = FALSE) %>%
unite("SUBJECT", c(SUBJECT, `Created by`), sep = " ", remove = FALSE) %>%
select(
WHATID, ACTIVITYDATE, `Created by`, DESCRIPTION, TYPE, STATUS, PRIORITY, OWNER, SUBJECT
) %>%
write_csv("new/2006/note_task_2006.csv")
# memebrship
teamid_2006 <- read_csv("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/new_dataset_migrate/2006/proposal_2006_extract.csv") %>%
select(ID, ZENN_ID__C, TEAM__C) %>%
rename("PROPOSAL__C" = "ID") %>%
mutate(ZENN_ID__C = as.character(ZENN_ID__C)) %>%
na.omit()
proposal_2006_narrow <- proposal_2006 %>%
select(NAME, ZENN_ID__C, EXTERNAL_PROPOSAL_ID__C, PROGRAM_COHORT__C, RECORDTYPEID) %>%
rename("TEAM_NAME_TEXT_ONLY_HIDDEN__C" = "NAME") %>%
mutate(ZENN_ID__C = as.character(ZENN_ID__C)) %>%
left_join(teamid_2006, by = "ZENN_ID__C")
membership_2006 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2006_proposals.xlsx") %>%
select(
`Zenn ID`, `External Proposal ID`, `Application Status`,
`Grant Title`, `Institution ID`, `Date Application Submitted`,
`Actual Period Begin`, `Actual Period End`
) %>%
mutate("Application Status" = ifelse(`Application Status` == "invite resubmit", "Invited Resubmit", stri_trans_totitle(`Application Status`)),
"Actual Period Begin" = ifelse(`Application Status` == "Funded", `Actual Period Begin`, `Date Application Submitted`),
"Actual Period End" = ifelse(`Application Status` == "Funded", `Actual Period End`, `Date Application Submitted`),
"STATUS__C" = ifelse(`Application Status` == "Funded", "Completed", "Inactive")
) %>%
rename("ZENN_ID__C" = "Zenn ID") %>%
left_join(proposal_2006_narrow, by = "ZENN_ID__C") %>%
rename(
"PROGRAM_COHORT_LOOKUP__C" = "PROGRAM_COHORT__C",
"START_DATE__C" = "Actual Period Begin",
"END_DATE__C" = "Actual Period End"
) %>%
right_join(advisors_2006) %>%
select(
EMAIL, ZENN_ID__C,
TEAM__C, PROPOSAL__C, PROGRAM_COHORT_LOOKUP__C,
ROLE__C, STATUS__C, START_DATE__C, END_DATE__C, RECORDTYPEID
) %>%
mutate(EMAIL = tolower(EMAIL),
ROLE__C = ifelse(ROLE__C == "Dean of Faculty", "Dean", ROLE__C)) %>%
na.omit() %>%
left_join(contacts_1, by = "EMAIL") %>%
rename("MEMBER__C" = "ID") %>%
na.omit() %>%
mutate(RECORDTYPEID = "012390000009qIDAAY") %>%
write_csv("new/2006/member_2006.csv")
membership_2006_small <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2006_proposals.xlsx") %>%
select(
`Zenn ID`, `External Proposal ID`, `Application Status`,
`Grant Title`, `Institution ID`, `Date Application Submitted`,
`Actual Period Begin`, `Actual Period End`
) %>%
mutate("Application Status" = ifelse(`Application Status` == "invite resubmit", "Invited Resubmit", stri_trans_totitle(`Application Status`)),
"Actual Period Begin" = ifelse(`Application Status` == "Funded", `Actual Period Begin`, `Date Application Submitted`),
"Actual Period End" = ifelse(`Application Status` == "Funded", `Actual Period End`, `Date Application Submitted`),
"STATUS__C" = ifelse(`Application Status` == "Funded", "Completed", "Inactive")
) %>%
rename("ZENN_ID__C" = "Zenn ID") %>%
left_join(proposal_2006_narrow) %>%
rename(
"PROGRAM_COHORT_LOOKUP__C" = "PROGRAM_COHORT__C",
"START_DATE__C" = "Actual Period Begin",
"END_DATE__C" = "Actual Period End"
) %>%
right_join(advisors_2006) %>%
select(
EMAIL, ZENN_ID__C,
TEAM__C, PROPOSAL__C, PROGRAM_COHORT_LOOKUP__C,
ROLE__C, STATUS__C, START_DATE__C, END_DATE__C, RECORDTYPEID
) %>%
mutate(ROLE__C = ifelse(ROLE__C == "Dean of Faculty", "Dean", ROLE__C)) %>%
na.omit() %>%
left_join(contacts_1) %>%
rename("MEMBER__C" = "ID") %>%
select(-MEMBER__C)
membership_2006_big <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2006_proposals.xlsx") %>%
select(
`Zenn ID`, `External Proposal ID`, `Application Status`,
`Grant Title`, `Institution ID`, `Date Application Submitted`,
`Actual Period Begin`, `Actual Period End`
) %>%
mutate("Application Status" = ifelse(`Application Status` == "invite resubmit", "Invited Resubmit", stri_trans_totitle(`Application Status`)),
"Actual Period Begin" = ifelse(`Application Status` == "Funded", `Actual Period Begin`, `Date Application Submitted`),
"Actual Period End" = ifelse(`Application Status` == "Funded", `Actual Period End`, `Date Application Submitted`),
"STATUS__C" = ifelse(`Application Status` == "Funded", "Completed", "Inactive")
) %>%
rename("ZENN_ID__C" = "Zenn ID") %>%
left_join(proposal_2006_narrow) %>%
rename(
"PROGRAM_COHORT_LOOKUP__C" = "PROGRAM_COHORT__C",
"START_DATE__C" = "Actual Period Begin",
"END_DATE__C" = "Actual Period End"
) %>%
right_join(advisors_2006) %>%
select(
EMAIL, ZENN_ID__C,
TEAM__C, PROPOSAL__C, PROGRAM_COHORT_LOOKUP__C,
ROLE__C, STATUS__C, START_DATE__C, END_DATE__C, RECORDTYPEID
) %>%
mutate(ROLE__C = ifelse(ROLE__C == "Dean of Faculty", "Dean", ROLE__C))
no_id_2006 <- dplyr::anti_join(membership_2006_big, membership_2006_small) %>%
left_join(advisors_full_2006) %>%
drop_na(TEAM__C) %>%
write_csv("new/2006/no_id_2006.csv")
# 2007 --------------
advisors_full_2007 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2007_advisors.xlsx") %>%
rename("ZENN_ID__C" = "Zenn ID",
"ROLE__C" = "Team Role",
"EMAIL" = "Email") %>%
mutate(EMAIL = tolower(EMAIL))
advisors_2007 <- advisors_full_2007 %>%
select(ZENN_ID__C, ROLE__C, EMAIL)
# proposal
proposal_2007 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2007_proposals.xlsx") %>%
rename(
"NAME" = "Grant Title",
"PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C" = "Proposal Summary",
"EXTERNAL_PROPOSAL_ID__C" = "External Proposal ID"
) %>%
mutate(
"year" = as.numeric(format(as.Date(`Date Created`),'%Y')),
"STATUS__C" = ifelse(`Application Status` == "invite resubmit", "Invited Resubmit", stri_trans_totitle(`Application Status`)),
"PROPOSAL_NAME_LONG_VERSION__C" = as.character(NAME),
"DATE_CREATED__C" = as.Date(`Date Created`),
"DATE_SUBMITTED__C" = as.Date(`Date Application Submitted`),
"GRANT_PERIOD_END__C" = as.Date(`Actual Period End`),
"GRANT_PERIOD_START__C" = as.Date(`Actual Period Begin`),
"AMOUNT_REQUESTED__C" = as.double(`Amount Requested`),
"ZENN_ID__C" = as.double(`Zenn ID`),
"AWARD_AMOUNT__C" = as.double(`Amount Approved`),
"APPLYING_INSTITUTION_NAME__C" = ifelse(`Institution Name` == "University of Maryland, College Park", "University of Maryland-College Park",
ifelse(`Institution Name` == "Arizona State University at the Tempe Campus", "Arizona State University",
ifelse(`Institution Name` == "The City College of New York", "CUNY City College",
ifelse(`Institution Name` == "University of Oklahoma", "University of Oklahoma Norman Campus",
ifelse(`Institution Name` == "University of Texas at Arlington", "The University of Texas at Arlington",
ifelse(`Institution Name` == "University of Tennessee, Knoxville", "The University of Tennessee",
`Institution Name`))))))
) %>%
select(
year, NAME, AMOUNT_REQUESTED__C, PROPOSAL_NAME_LONG_VERSION__C, APPLYING_INSTITUTION_NAME__C,
AWARD_AMOUNT__C, DATE_CREATED__C, DATE_SUBMITTED__C, GRANT_PERIOD_END__C,
GRANT_PERIOD_START__C,
PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C, ZENN_ID__C, STATUS__C,
EXTERNAL_PROPOSAL_ID__C
) %>%
filter(is.na(APPLYING_INSTITUTION_NAME__C) == FALSE) %>%
left_join(extract_p) %>%
left_join(extract_alias_p, by = "APPLYING_INSTITUTION_NAME__C") %>%
mutate(ID = coalesce(ID.x, ID.y)) %>%
select(-ID.x, -ID.y) %>%
rename("APPLYING_INSTITUTION__C" = "ID") %>%
left_join(match_p_2007) %>%
left_join(program_cohort) %>%
select( - `Zenn ID`, -year) %>%
unique()
proposal_2007$PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C <- str_replace_all(proposal_2007$PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C, "[:cntrl:]", " ")
proposal_2007 <- sapply(proposal_2007, as.character)
proposal_2007[is.na(proposal_2007)] <- " "
proposal_2007 <- as.data.frame(proposal_2007)
write_csv(proposal_2007, "new/2007/proposal_2007.csv")
proposal_2007_narrow <- proposal_2007 %>%
select(NAME, ZENN_ID__C, EXTERNAL_PROPOSAL_ID__C, PROGRAM_COHORT__C, RECORDTYPEID) %>%
rename("TEAM_NAME_TEXT_ONLY_HIDDEN__C" = "NAME") %>%
left_join(teamid_2007, by = "EXTERNAL_PROPOSAL_ID__C")
# team
team_2007 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2007_proposals.xlsx") %>%
rename(
"NAME" = "Grant Title"
) %>%
mutate(
"RECORDTYPEID" = "012390000009qKOAAY",
"ALIAS__C" = ifelse(nchar(NAME) > 80, NAME, "")
) %>%
select(
NAME, RECORDTYPEID, ALIAS__C
) %>%
left_join(match_p_2007) %>%
write_csv("new/2007/team_2007.csv")
# note_task
task_2007 <- read_excel("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/sustainable_vision_grants_2007_post_award_notes.xlsx",
col_types = c("numeric", "text", "text",
"text")) %>%
set_names(c("Zenn ID", "Created Date", "Created by", "Note")) %>%
left_join(match_2007) %>%
rename("WHATID" = "Zenn ID",
"DESCRIPTION" = "Note") %>%
mutate(STATUS = "Completed",
PRIORITY = "Normal",
TYPE = "Internal Note",
TASKSUBTYPE = "Call",
ACTIVITYDATE = as.Date(`Created Date`),
SUBJECT = "Post Award Note--",
OWNER = ifelse(`Created by` == "Brenna Breeding", "00539000005UlQaAAK",
ifelse(`Created by` == "Michael Norton", "00539000004pukIAAQ",
ifelse(`Created by` == "Patricia Boynton", "00570000001K3bpAAC",
ifelse(`Created by` == "Rachel Agoglia", "00570000003QASWAA4",
"00570000004VlXPAA0"))
)
)
) %>%
unite("SUBJECT", c(SUBJECT, `Created Date`), sep = "", remove = FALSE) %>%
unite("SUBJECT", c(SUBJECT, `Created by`), sep = " ", remove = FALSE) %>%
select(
WHATID, ACTIVITYDATE, `Created by`, DESCRIPTION, TYPE, STATUS, PRIORITY, OWNER, SUBJECT
) %>%
write_csv("new/2007/note_task_2007.csv")
# memebrship
teamid_2007 <- read_csv("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/new_dataset_migrate/2007/proposal_2007_extract.csv") %>%
select(ID, ZENN_ID__C, TEAM__C) %>%
rename("PROPOSAL__C" = "ID") %>%
mutate(ZENN_ID__C = as.character(ZENN_ID__C)) %>%
na.omit()
proposal_2007_narrow <- proposal_2007 %>%
select(NAME, ZENN_ID__C, EXTERNAL_PROPOSAL_ID__C, PROGRAM_COHORT__C, RECORDTYPEID) %>%
rename("TEAM_NAME_TEXT_ONLY_HIDDEN__C" = "NAME") %>%
mutate(ZENN_ID__C = as.character(ZENN_ID__C)) %>%
left_join(teamid_2007, by = "ZENN_ID__C")
membership_2007 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2007_proposals.xlsx") %>%
select(
`Zenn ID`, `External Proposal ID`, `Application Status`,
`Grant Title`, `Institution ID`, `Date Application Submitted`,
`Actual Period Begin`, `Actual Period End`
) %>%
mutate("Application Status" = ifelse(`Application Status` == "invite resubmit", "Invited Resubmit", stri_trans_totitle(`Application Status`)),
"Actual Period Begin" = ifelse(`Application Status` == "Funded", `Actual Period Begin`, `Date Application Submitted`),
"Actual Period End" = ifelse(`Application Status` == "Funded", `Actual Period End`, `Date Application Submitted`),
"STATUS__C" = ifelse(`Application Status` == "Funded", "Completed", "Inactive")
) %>%
rename("ZENN_ID__C" = "Zenn ID") %>%
left_join(proposal_2007_narrow) %>%
rename(
"PROGRAM_COHORT_LOOKUP__C" = "PROGRAM_COHORT__C",
"START_DATE__C" = "Actual Period Begin",
"END_DATE__C" = "Actual Period End"
) %>%
right_join(advisors_2007) %>%
select(
EMAIL, ZENN_ID__C,
TEAM__C, PROPOSAL__C, PROGRAM_COHORT_LOOKUP__C,
ROLE__C, STATUS__C, START_DATE__C, END_DATE__C, RECORDTYPEID
) %>%
mutate(EMAIL = tolower(EMAIL),
ROLE__C = ifelse(ROLE__C == "Dean of Faculty", "Dean", ROLE__C)) %>%
na.omit() %>%
left_join(contacts_1, by = "EMAIL") %>%
rename("MEMBER__C" = "ID") %>%
na.omit() %>%
mutate(RECORDTYPEID = "012390000009qIDAAY") %>%
write_csv("new/2007/member_2007.csv")
membership_2007_small <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2007_proposals.xlsx") %>%
select(
`Zenn ID`, `External Proposal ID`, `Application Status`,
`Grant Title`, `Institution ID`, `Date Application Submitted`,
`Actual Period Begin`, `Actual Period End`
) %>%
mutate("Application Status" = ifelse(`Application Status` == "invite resubmit", "Invited Resubmit", stri_trans_totitle(`Application Status`)),
"Actual Period Begin" = ifelse(`Application Status` == "Funded", `Actual Period Begin`, `Date Application Submitted`),
"Actual Period End" = ifelse(`Application Status` == "Funded", `Actual Period End`, `Date Application Submitted`),
"STATUS__C" = ifelse(`Application Status` == "Funded", "Completed", "Inactive")
) %>%
rename("ZENN_ID__C" = "Zenn ID") %>%
left_join(proposal_2007_narrow) %>%
rename(
"PROGRAM_COHORT_LOOKUP__C" = "PROGRAM_COHORT__C",
"START_DATE__C" = "Actual Period Begin",
"END_DATE__C" = "Actual Period End"
) %>%
right_join(advisors_2007) %>%
select(
EMAIL, ZENN_ID__C,
TEAM__C, PROPOSAL__C, PROGRAM_COHORT_LOOKUP__C,
ROLE__C, STATUS__C, START_DATE__C, END_DATE__C, RECORDTYPEID
) %>%
mutate(ROLE__C = ifelse(ROLE__C == "Dean of Faculty", "Dean", ROLE__C)) %>%
na.omit() %>%
left_join(contacts_1) %>%
rename("MEMBER__C" = "ID") %>%
select(-MEMBER__C)
membership_2007_big <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2007_proposals.xlsx") %>%
select(
`Zenn ID`, `External Proposal ID`, `Application Status`,
`Grant Title`, `Institution ID`, `Date Application Submitted`,
`Actual Period Begin`, `Actual Period End`
) %>%
mutate("Application Status" = ifelse(`Application Status` == "invite resubmit", "Invited Resubmit", stri_trans_totitle(`Application Status`)),
"Actual Period Begin" = ifelse(`Application Status` == "Funded", `Actual Period Begin`, `Date Application Submitted`),
"Actual Period End" = ifelse(`Application Status` == "Funded", `Actual Period End`, `Date Application Submitted`),
"STATUS__C" = ifelse(`Application Status` == "Funded", "Completed", "Inactive")
) %>%
rename("ZENN_ID__C" = "Zenn ID") %>%
left_join(proposal_2007_narrow) %>%
rename(
"PROGRAM_COHORT_LOOKUP__C" = "PROGRAM_COHORT__C",
"START_DATE__C" = "Actual Period Begin",
"END_DATE__C" = "Actual Period End"
) %>%
right_join(advisors_2007) %>%
select(
EMAIL, ZENN_ID__C,
TEAM__C, PROPOSAL__C, PROGRAM_COHORT_LOOKUP__C,
ROLE__C, STATUS__C, START_DATE__C, END_DATE__C, RECORDTYPEID
) %>%
mutate(ROLE__C = ifelse(ROLE__C == "Dean of Faculty", "Dean", ROLE__C))
no_id_2007 <- dplyr::setdiff(membership_2007_big, membership_2007_small) %>%
left_join(advisors_full_2007) %>%
drop_na(TEAM__C) %>%
write_csv("new/2007/no_id_2007.csv")
# 2008 --------------
advisors_full_2008 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2008_advisors.xlsx") %>%
rename("ZENN_ID__C" = "Zenn ID",
"ROLE__C" = "Team Role",
"EMAIL" = "Email") %>%
mutate(EMAIL = tolower(EMAIL))
advisors_2008 <- advisors_full_2008 %>%
select(ZENN_ID__C, ROLE__C, EMAIL)
# proposal
proposal_2008 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2008_proposals.xlsx") %>%
rename(
"NAME" = "Grant Title",
"PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C" = "Proposal Summary",
"EXTERNAL_PROPOSAL_ID__C" = "External Proposal ID"
) %>%
mutate(
"year" = as.numeric(format(as.Date(`Date Created`),'%Y')),
"STATUS__C" = ifelse(`Application Status` == "invite resubmit", "Invited Resubmit", stri_trans_totitle(`Application Status`)),
"PROPOSAL_NAME_LONG_VERSION__C" = as.character(NAME),
"DATE_CREATED__C" = as.Date(`Date Created`),
"DATE_SUBMITTED__C" = as.Date(`Date Application Submitted`),
"GRANT_PERIOD_END__C" = as.Date(`Actual Period End`),
"GRANT_PERIOD_START__C" = as.Date(`Actual Period Begin`),
"AMOUNT_REQUESTED__C" = as.double(`Amount Requested`),
"ZENN_ID__C" = as.double(`Zenn ID`),
"AWARD_AMOUNT__C" = as.double(`Amount Approved`),
"APPLYING_INSTITUTION_NAME__C" = ifelse(`Institution Name` == "University of Maryland, College Park", "University of Maryland-College Park",
ifelse(`Institution Name` == "Arizona State University at the Tempe Campus", "Arizona State University",
ifelse(`Institution Name` == "The City College of New York", "CUNY City College",
ifelse(`Institution Name` == "University of Oklahoma", "University of Oklahoma Norman Campus",
ifelse(`Institution Name` == "University of Texas at Arlington", "The University of Texas at Arlington",
ifelse(`Institution Name` == "University of Tennessee, Knoxville", "The University of Tennessee",
`Institution Name`))))))
) %>%
select(
year, NAME, AMOUNT_REQUESTED__C, PROPOSAL_NAME_LONG_VERSION__C, APPLYING_INSTITUTION_NAME__C,
AWARD_AMOUNT__C, DATE_CREATED__C, DATE_SUBMITTED__C, GRANT_PERIOD_END__C,
GRANT_PERIOD_START__C,
PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C, ZENN_ID__C, STATUS__C,
EXTERNAL_PROPOSAL_ID__C
) %>%
filter(is.na(APPLYING_INSTITUTION_NAME__C) == FALSE) %>%
left_join(extract_p) %>%
left_join(extract_alias_p, by = "APPLYING_INSTITUTION_NAME__C") %>%
mutate(ID = coalesce(ID.x, ID.y)) %>%
select(-ID.x, -ID.y) %>%
rename("APPLYING_INSTITUTION__C" = "ID") %>%
left_join(match_p_2008) %>%
left_join(program_cohort) %>%
select( - `Zenn ID`, -year) %>%
unique()
proposal_2008$PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C <- str_replace_all(proposal_2008$PROJECT_DESCRIPTION_PROPOSAL_ABSTRACT__C, "[:cntrl:]", " ")
proposal_2008 <- sapply(proposal_2008, as.character)
proposal_2008[is.na(proposal_2008)] <- " "
proposal_2008 <- as.data.frame(proposal_2008)
write_csv(proposal_2008, "new/2008/proposal_2008.csv")
proposal_2008_narrow <- proposal_2008 %>%
select(NAME, ZENN_ID__C, EXTERNAL_PROPOSAL_ID__C, PROGRAM_COHORT__C, RECORDTYPEID) %>%
rename("TEAM_NAME_TEXT_ONLY_HIDDEN__C" = "NAME") %>%
left_join(teamid_2008, by = "EXTERNAL_PROPOSAL_ID__C")
# team
team_2008 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2008_proposals.xlsx") %>%
rename(
"NAME" = "Grant Title"
) %>%
mutate(
"RECORDTYPEID" = "012390000009qKOAAY",
"ALIAS__C" = ifelse(nchar(NAME) > 80, NAME, "")
) %>%
select(
NAME, RECORDTYPEID, ALIAS__C
) %>%
left_join(match_p_2008) %>%
write_csv("new/2008/team_2008.csv")
# note_task
task_2008 <- read_excel("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/sustainable_vision_grants_2008_post_award_notes.xlsx",
col_types = c("numeric", "text", "text",
"text")) %>%
set_names(c("Zenn ID", "Created Date", "Created by", "Note")) %>%
left_join(match_2008) %>%
rename("WHATID" = "Zenn ID",
"DESCRIPTION" = "Note") %>%
mutate(STATUS = "Completed",
PRIORITY = "Normal",
TYPE = "Internal Note",
TASKSUBTYPE = "Call",
ACTIVITYDATE = as.Date(`Created Date`),
SUBJECT = "Post Award Note--",
OWNER = ifelse(`Created by` == "Brenna Breeding", "00539000005UlQaAAK",
ifelse(`Created by` == "Michael Norton", "00539000004pukIAAQ",
ifelse(`Created by` == "Patricia Boynton", "00570000001K3bpAAC",
ifelse(`Created by` == "Rachel Agoglia", "00570000003QASWAA4",
"00570000004VlXPAA0"))
)
)
) %>%
unite("SUBJECT", c(SUBJECT, `Created Date`), sep = "", remove = FALSE) %>%
unite("SUBJECT", c(SUBJECT, `Created by`), sep = " ", remove = FALSE) %>%
select(
WHATID, ACTIVITYDATE, `Created by`, DESCRIPTION, TYPE, STATUS, PRIORITY, OWNER, SUBJECT
) %>%
write_csv("new/2008/note_task_2008.csv")
# memebrship
teamid_2008 <- read_csv("/Volumes/GoogleDrive/My Drive/Sustainable_Vision/new_dataset_migrate/2008/proposal_2008_extract.csv") %>%
select(ID, ZENN_ID__C, TEAM__C) %>%
rename("PROPOSAL__C" = "ID") %>%
mutate(ZENN_ID__C = as.character(ZENN_ID__C)) %>%
na.omit()
proposal_2008_narrow <- proposal_2008 %>%
select(NAME, ZENN_ID__C, EXTERNAL_PROPOSAL_ID__C, PROGRAM_COHORT__C, RECORDTYPEID) %>%
rename("TEAM_NAME_TEXT_ONLY_HIDDEN__C" = "NAME") %>%
mutate(ZENN_ID__C = as.character(ZENN_ID__C)) %>%
left_join(teamid_2008, by = "ZENN_ID__C")
membership_2008 <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2008_proposals.xlsx") %>%
select(
`Zenn ID`, `External Proposal ID`, `Application Status`,
`Grant Title`, `Institution ID`, `Date Application Submitted`,
`Actual Period Begin`, `Actual Period End`
) %>%
mutate("Application Status" = ifelse(`Application Status` == "invite resubmit", "Invited Resubmit", stri_trans_totitle(`Application Status`)),
"Actual Period Begin" = ifelse(`Application Status` == "Funded", `Actual Period Begin`, `Date Application Submitted`),
"Actual Period End" = ifelse(`Application Status` == "Funded", `Actual Period End`, `Date Application Submitted`),
"STATUS__C" = ifelse(`Application Status` == "Funded", "Completed", "Inactive")
) %>%
rename("ZENN_ID__C" = "Zenn ID") %>%
left_join(proposal_2008_narrow) %>%
rename(
"PROGRAM_COHORT_LOOKUP__C" = "PROGRAM_COHORT__C",
"START_DATE__C" = "Actual Period Begin",
"END_DATE__C" = "Actual Period End"
) %>%
right_join(advisors_2008) %>%
select(
EMAIL, ZENN_ID__C,
TEAM__C, PROPOSAL__C, PROGRAM_COHORT_LOOKUP__C,
ROLE__C, STATUS__C, START_DATE__C, END_DATE__C, RECORDTYPEID
) %>%
mutate(EMAIL = tolower(EMAIL),
ROLE__C = ifelse(ROLE__C == "Dean of Faculty", "Dean", ROLE__C)) %>%
na.omit() %>%
left_join(contacts_1, by = "EMAIL") %>%
rename("MEMBER__C" = "ID") %>%
na.omit() %>%
mutate(RECORDTYPEID = "012390000009qIDAAY") %>%
write_csv("new/2008/member_2008.csv")
membership_2008_small <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2008_proposals.xlsx") %>%
select(
`Zenn ID`, `External Proposal ID`, `Application Status`,
`Grant Title`, `Institution ID`, `Date Application Submitted`,
`Actual Period Begin`, `Actual Period End`
) %>%
mutate("Application Status" = ifelse(`Application Status` == "invite resubmit", "Invited Resubmit", stri_trans_totitle(`Application Status`)),
"Actual Period Begin" = ifelse(`Application Status` == "Funded", `Actual Period Begin`, `Date Application Submitted`),
"Actual Period End" = ifelse(`Application Status` == "Funded", `Actual Period End`, `Date Application Submitted`),
"STATUS__C" = ifelse(`Application Status` == "Funded", "Completed", "Inactive")
) %>%
rename("ZENN_ID__C" = "Zenn ID") %>%
left_join(proposal_2008_narrow) %>%
rename(
"PROGRAM_COHORT_LOOKUP__C" = "PROGRAM_COHORT__C",
"START_DATE__C" = "Actual Period Begin",
"END_DATE__C" = "Actual Period End"
) %>%
right_join(advisors_2008) %>%
select(
EMAIL, ZENN_ID__C,
TEAM__C, PROPOSAL__C, PROGRAM_COHORT_LOOKUP__C,
ROLE__C, STATUS__C, START_DATE__C, END_DATE__C, RECORDTYPEID
) %>%
mutate(ROLE__C = ifelse(ROLE__C == "Dean of Faculty", "Dean", ROLE__C)) %>%
na.omit() %>%
left_join(contacts_1) %>%
rename("MEMBER__C" = "ID") %>%
select(-MEMBER__C)
membership_2008_big <- read_excel("~/Desktop/Sustainable_Vision/sustainable_vision_grants_2008_proposals.xlsx") %>%
select(
`Zenn ID`, `External Proposal ID`, `Application Status`,
`Grant Title`, `Institution ID`, `Date Application Submitted`,
`Actual Period Begin`, `Actual Period End`
) %>%
mutate("Application Status" = ifelse(`Application Status` == "invite resubmit", "Invited Resubmit", stri_trans_totitle(`Application Status`)),
"Actual Period Begin" = ifelse(`Application Status` == "Funded", `Actual Period Begin`, `Date Application Submitted`),
"Actual Period End" = ifelse(`Application Status` == "Funded", `Actual Period End`, `Date Application Submitted`),
"STATUS__C" = ifelse(`Application Status` == "Funded", "Completed", "Inactive")
) %>%
rename("ZENN_ID__C" = "Zenn ID") %>%
left_join(proposal_2008_narrow) %>%
rename(
"PROGRAM_COHORT_LOOKUP__C" = "PROGRAM_COHORT__C",
"START_DATE__C" = "Actual Period Begin",
"END_DATE__C" = "Actual Period End"
) %>%
right_join(advisors_2008) %>%
select(
EMAIL, ZENN_ID__C,
TEAM__C, PROPOSAL__C, PROGRAM_COHORT_LOOKUP__C,
ROLE__C, STATUS__C, START_DATE__C, END_DATE__C, RECORDTYPEID
) %>%
mutate(ROLE__C = ifelse(ROLE__C == "Dean of Faculty", "Dean", ROLE__C))
no_id_2008 <- dplyr::setdiff(membership_2008_big, membership_2008_small) %>%
left_join(advisors_full_2008) %>%
drop_na(TEAM__C) %>%
write_csv("new/2008/no_id_2008.csv")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estimate_confidence.R
\name{estimate_confidence}
\alias{estimate_confidence}
\title{estimate_confidence
estimate confidence intervals for choc analysis}
\usage{
estimate_confidence(
mychoc,
method = "perm",
conf = 0.95,
nb_replicates = 500,
ncores = 1,
progressbar = TRUE
)
}
\arguments{
\item{mychoc}{a list as returned by \link{choc}}
\item{method}{either "perm" (default) or "kern", see details}
\item{conf}{size of the confidence interval}
\item{nb_replicates}{number of replicates used to assess confidence intervals}
\item{ncores}{Number of cores used. The parallelization will take place only if OpenMP is supported (default 1)}
\item{progressbar}{(default TRUE) show progressbar (might be a bit slower)}
}
\value{
an updated version of mychoc with two columns added to mychoc$grid which corresponds to the bounds of the confidence interval
}
\description{
estimate_confidence
estimate confidence intervals for choc analysis
}
\section{Details}{
Two methods are available: perm permutates the kernell per time step and estimates Kendall tau on permutations.
kern fits a kernell on the whole dataset (assuming that there is not time trend) and uses this overall kernell to
generate surrogate data sets on which kendall tau are estimated. Permutations is a good solution when there is seasonnality
within time step to preserve internal seasonality, however, it requires more time steps. kern is a good solution when there
is no seasonnality within time step or when the number of observations per time step is important enough.
}
\examples{
#retrieve results of a choc function
data(res_choc)
#here we put a low number of replicates to limit computation time
#res_confid <- estimate_confidence(res_choc,"perm",0.95,50)
}
|
/man/estimate_confidence.Rd
|
permissive
|
Irstea/chocR
|
R
| false
| true
| 1,824
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estimate_confidence.R
\name{estimate_confidence}
\alias{estimate_confidence}
\title{estimate_confidence
estimate confidence intervals for choc analysis}
\usage{
estimate_confidence(
mychoc,
method = "perm",
conf = 0.95,
nb_replicates = 500,
ncores = 1,
progressbar = TRUE
)
}
\arguments{
\item{mychoc}{a list as returned by \link{choc}}
\item{method}{either "perm" (default) or "kern", see details}
\item{conf}{size of the confidence interval}
\item{nb_replicates}{number of replicates used to assess confidence intervals}
\item{ncores}{Number of cores used. The parallelization will take place only if OpenMP is supported (default 1)}
\item{progressbar}{(default TRUE) show progressbar (might be a bit slower)}
}
\value{
an updated version of mychoc with two columns added to mychoc$grid which corresponds to the bounds of the confidence interval
}
\description{
estimate_confidence
estimate confidence intervals for choc analysis
}
\section{Details}{
Two methods are available: perm permutates the kernell per time step and estimates Kendall tau on permutations.
kern fits a kernell on the whole dataset (assuming that there is not time trend) and uses this overall kernell to
generate surrogate data sets on which kendall tau are estimated. Permutations is a good solution when there is seasonnality
within time step to preserve internal seasonality, however, it requires more time steps. kern is a good solution when there
is no seasonnality within time step or when the number of observations per time step is important enough.
}
\examples{
#retrieve results of a choc function
data(res_choc)
#here we put a low number of replicates to limit computation time
#res_confid <- estimate_confidence(res_choc,"perm",0.95,50)
}
|
library(IDSpatialStats)
### Name: est.transdist
### Title: Estimate transmission distance
### Aliases: est.transdist
### ** Examples
set.seed(123)
# Exponentially distributed transmission kernel with mean and standard deviation = 100
dist.func <- alist(n=1, a=1/100, rexp(n, a))
# Simulate epidemic
a <- sim.epidemic(R=1.5,
gen.t.mean=7,
gen.t.sd=2,
min.cases=50,
tot.generations=12,
trans.kern.func=dist.func)
# Estimate mean and standara deviation of transmission kernel
b <- est.transdist(epi.data=a,
gen.t.mean=7,
gen.t.sd=2,
t1=0,
max.sep=1e10,
max.dist=1e10,
n.transtree.reps=10)
b
|
/data/genthat_extracted_code/IDSpatialStats/examples/est.transdist.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 803
|
r
|
library(IDSpatialStats)
### Name: est.transdist
### Title: Estimate transmission distance
### Aliases: est.transdist
### ** Examples
set.seed(123)
# Exponentially distributed transmission kernel with mean and standard deviation = 100
dist.func <- alist(n=1, a=1/100, rexp(n, a))
# Simulate epidemic
a <- sim.epidemic(R=1.5,
gen.t.mean=7,
gen.t.sd=2,
min.cases=50,
tot.generations=12,
trans.kern.func=dist.func)
# Estimate mean and standara deviation of transmission kernel
b <- est.transdist(epi.data=a,
gen.t.mean=7,
gen.t.sd=2,
t1=0,
max.sep=1e10,
max.dist=1e10,
n.transtree.reps=10)
b
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mod_map_countryUI.R
\name{mod_map_countryUI}
\alias{mod_map_countryUI}
\alias{mod_map_country}
\title{mod_map_countryUI and mod_map_country}
\usage{
mod_map_countryUI(id)
mod_map_country(input, output, session, dataframe)
}
\arguments{
\item{id}{shiny id}
\item{input}{internal}
\item{output}{internal}
\item{session}{internal}
\item{dataframe}{dataframe with columns named "prix-euros", "nom_commune", "carrosserie", "transmission", "brand", "date", "energie", "nb_places", "kilometrage_km" and "nb_portes"}
}
\description{
A shiny module that displays a map of the results in the whole country by region
}
\examples{
"No example to display"
}
|
/package/findyourdreamcar/man/mod_map_countryUI.Rd
|
permissive
|
ludmilaexbrayat/findyourcar
|
R
| false
| true
| 729
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/mod_map_countryUI.R
\name{mod_map_countryUI}
\alias{mod_map_countryUI}
\alias{mod_map_country}
\title{mod_map_countryUI and mod_map_country}
\usage{
mod_map_countryUI(id)
mod_map_country(input, output, session, dataframe)
}
\arguments{
\item{id}{shiny id}
\item{input}{internal}
\item{output}{internal}
\item{session}{internal}
\item{dataframe}{dataframe with columns named "prix-euros", "nom_commune", "carrosserie", "transmission", "brand", "date", "energie", "nb_places", "kilometrage_km" and "nb_portes"}
}
\description{
A shiny module that displays a map of the results in the whole country by region
}
\examples{
"No example to display"
}
|
## Download the dataset
# setwd("./Project1")
# dataurl <- "https://github.com/rdpeng/ExData_Plotting1"
# datafile <- file.path(getwd(), "household_power_consumption.zip")
# download.file(dataurl, datafile, method = "curl")
##--unzip(datafile, exdir = "./Data")
## This file is for loading the large dataset.
## Getting full dataset
allData <- read.table("./Data/household_power_consumption.txt",
header = TRUE,
sep = ";",
colClasses = c("character", "character", rep("numeric",7)),
na = "?")
allData$Date <- as.Date(allData$Date, format="%d/%m/%Y")
## Subsetting the data for the two days 02/01/2007 - 02/02/2007
data <- subset(allData, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
# free up memory
rm(allData)
## Convert the dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 2
plot(data$Global_active_power~data$Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off()
|
/plot2.R
|
no_license
|
bwrightprojects/ExData_Plotting1
|
R
| false
| false
| 1,102
|
r
|
## Download the dataset
# setwd("./Project1")
# dataurl <- "https://github.com/rdpeng/ExData_Plotting1"
# datafile <- file.path(getwd(), "household_power_consumption.zip")
# download.file(dataurl, datafile, method = "curl")
##--unzip(datafile, exdir = "./Data")
## This file is for loading the large dataset.
## Getting full dataset
allData <- read.table("./Data/household_power_consumption.txt",
header = TRUE,
sep = ";",
colClasses = c("character", "character", rep("numeric",7)),
na = "?")
allData$Date <- as.Date(allData$Date, format="%d/%m/%Y")
## Subsetting the data for the two days 02/01/2007 - 02/02/2007
data <- subset(allData, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
# free up memory
rm(allData)
## Convert the dates
datetime <- paste(as.Date(data$Date), data$Time)
data$Datetime <- as.POSIXct(datetime)
## Plot 2
plot(data$Global_active_power~data$Datetime, type="l",
ylab="Global Active Power (kilowatts)", xlab="")
dev.copy(png, file="plot2.png", height=480, width=480)
dev.off()
|
library(ggplot2)
# load data
message('loading summarySCC_PM25.rds')
NEI <- readRDS("summarySCC_PM25.rds")
message('loading summarySCC_PM25.rds')
SCC <- readRDS("Source_Classification_Code.rds")
NEI$type<- as.factor(NEI$type)
# create a logical vector of the entries in SCC that are coal combustion related
# (including lignite)
coalLog<- with(SCC,
grepl('[Cc]ombustion',SCC.Level.One) &
( grepl('[Cc]oal',SCC.Level.Three) | grepl('[Cc]oal',SCC.Level.Four) |
grepl('[Ll]ignite',SCC.Level.Three) | grepl('[Ll]ignite',SCC.Level.Four)
) )
# 103 TRUE
# get the codes for these
coalcodes<-SCC$SCC[coalLog]
# get the total emissions
#totals<-NEI[NEI$SCC %in% coalcodes,]
### Point plot
coaled<-NEI[NEI$SCC %in% coalcodes,]
totals<-aggregate(Emissions ~ year,coaled,sum)
g<-ggplot(aes(year,Emissions),data=totals,na.rm=true)
labels<-labs(x='year',y='Emissions (tons)',title='PM2.5 Emissions across the US from Coal Combustion')
thelegend<-scale_colour_discrete(name="legend",breaks=c(""))
theplot<-g + geom_point(size=3,colour='red') + labels + geom_smooth(method='lm',fill=NA,lty=2,colour='red')
message('writing plot4.png')
png('plot4.png')
print(theplot)
dev.off()
## add a line to this
# boxplot
# hard to see a trend
# coaled<-NEI[NEI$SCC %in% coalcodes,]
# g<-ggplot(aes(factor(year),log10(Emissions)),data=coaled,na.rm=true)
# g + geom_boxplot()
|
/plot4.R
|
no_license
|
petethegreat/ExploratoryDataAnalysisProject
|
R
| false
| false
| 1,391
|
r
|
library(ggplot2)
# load data
message('loading summarySCC_PM25.rds')
NEI <- readRDS("summarySCC_PM25.rds")
message('loading summarySCC_PM25.rds')
SCC <- readRDS("Source_Classification_Code.rds")
NEI$type<- as.factor(NEI$type)
# create a logical vector of the entries in SCC that are coal combustion related
# (including lignite)
coalLog<- with(SCC,
grepl('[Cc]ombustion',SCC.Level.One) &
( grepl('[Cc]oal',SCC.Level.Three) | grepl('[Cc]oal',SCC.Level.Four) |
grepl('[Ll]ignite',SCC.Level.Three) | grepl('[Ll]ignite',SCC.Level.Four)
) )
# 103 TRUE
# get the codes for these
coalcodes<-SCC$SCC[coalLog]
# get the total emissions
#totals<-NEI[NEI$SCC %in% coalcodes,]
### Point plot
coaled<-NEI[NEI$SCC %in% coalcodes,]
totals<-aggregate(Emissions ~ year,coaled,sum)
g<-ggplot(aes(year,Emissions),data=totals,na.rm=true)
labels<-labs(x='year',y='Emissions (tons)',title='PM2.5 Emissions across the US from Coal Combustion')
thelegend<-scale_colour_discrete(name="legend",breaks=c(""))
theplot<-g + geom_point(size=3,colour='red') + labels + geom_smooth(method='lm',fill=NA,lty=2,colour='red')
message('writing plot4.png')
png('plot4.png')
print(theplot)
dev.off()
## add a line to this
# boxplot
# hard to see a trend
# coaled<-NEI[NEI$SCC %in% coalcodes,]
# g<-ggplot(aes(factor(year),log10(Emissions)),data=coaled,na.rm=true)
# g + geom_boxplot()
|
#Apply BUSseq to the hematopoietic study.
rm(list=ls())
library(BUSseq)
###########################
# Load Hematopoietic Data #
###########################
# Working directory
# setwd("G:/scRNA/Journal/Github_reproduce/Mouse_Hematopoietic")
# Loading hematopoietic count data
load("./RawCountData/hemat_countdata.RData")
HematCounts <- list(GSE72857 = dataA2,
GSE81682 = dataF2)
##########################################
# Apply BUSseq to the Hematopoietic Data #
##########################################
# the seed is a randomly sampled integer between 1 and 10,000
seed.est <- 4011
# We ran BUSseq for the number of cell types K equal to
# 3, 4, 5, 6, 7, 8, 9 and 10, and select K = 7 according to BIC
K <- 7
# Conducting MCMC sampling
BUSseqfits_hemat <- BUSseq_MCMC(ObservedData = HematCounts, n.celltypes = K,
n.iterations = 8000, seed = seed.est,
hyper_slab = 50, hyper_tau0 = c(2,0.01))
# # BIC values of the other numbers of cell types are generated by the following codes
# # all seeds are randomly sampled between 1 and 10,000.
# # We strongly recommend running the BUSseq_MCMC in parallel.
#
# BUSseqfits_hemat_K3 <- BUSseq_MCMC(Data = HematCounts, n.celltypes = 3,
# n.iterations = 8000, seed = 6706,
# hyper_slab = 50, hyper_tau0 = c(2,0.01))
# BUSseqfits_hemat_K4 <- BUSseq_MCMC(Data = HematCounts, n.celltypes = 4,
# n.iterations = 8000, seed = 4693,
# hyper_slab = 50, hyper_tau0 = c(2,0.01))
# BUSseqfits_hemat_K5 <- BUSseq_MCMC(Data = HematCounts, n.celltypes = 5,
# n.iterations = 8000, seed = 4481,
# hyper_slab = 50, hyper_tau0 = c(2,0.01))
# BUSseqfits_hemat_K6 <- BUSseq_MCMC(Data = HematCounts, n.celltypes = 6,
# n.iterations = 8000, seed = 4078,
# hyper_slab = 50, hyper_tau0 = c(2,0.01))
# BUSseqfits_hemat_K8 <- BUSseq_MCMC(Data = HematCounts, n.celltypes = 8,
# n.iterations = 8000, seed = 1177,
# hyper_slab = 50, hyper_tau0 = c(2,0.01))
# BUSseqfits_hemat_K9 <- BUSseq_MCMC(Data = HematCounts, n.celltypes = 9,
# n.iterations = 8000, seed = 4654,
# hyper_slab = 50, hyper_tau0 = c(2,0.01))
# BUSseqfits_hemat_K10 <- BUSseq_MCMC(Data = HematCounts, n.celltypes = 10,
# n.iterations = 8000, seed = 7398,
# hyper_slab = 50, hyper_tau0 = c(2,0.01))
# BIC_values <- rep(NA,8)
# BIC_values[1] <- BIC_BUSseq(BUSseqfits_hemat_K3)
# BIC_values[2] <- BIC_BUSseq(BUSseqfits_hemat_K4)
# BIC_values[3] <- BIC_BUSseq(BUSseqfits_hemat_K5)
# BIC_values[4] <- BIC_BUSseq(BUSseqfits_hemat_K6)
# BIC_values[5] <- BIC_BUSseq(BUSseqfits_hemat)
# BIC_values[6] <- BIC_BUSseq(BUSseqfits_hemat_K8)
# BIC_values[7] <- BIC_BUSseq(BUSseqfits_hemat_K9)
# BIC_values[8] <- BIC_BUSseq(BUSseqfits_hemat_K10)
# names(BIC_values) <- paste0("K=",3:10)
# # As a result, the BIC values are
# # K=3 K=4 K=5 K=6 K=7 K=8 K=9 K=10
# # 48000531 47941792 47948555 47964609 47938554 48024762 48048079 48114358
# if(!dir.exists("Image")){
# dir.create("Image")
# }
# if(!dir.exists("./Image/Other")){
# dir.create("./Image/Other")
# }
# png("./Image/Other/BIC_values.png",width = 540, height = 720)
# par(mar = c(5.1,6.1,4.1,2.1))
# plot(3:10,BIC_values,xlab= "K",ylab = "BIC",type="n",cex.axis=3,cex.lab=3)
# points(3:10,BIC_values,type="b",pch=19,cex=3)
# dev.off()
#####################################
# Obtain the intrinsic gene indices #
#####################################
intrinsic_gene_indices <- intrinsic_genes_BUSseq(BUSseqfits_hemat, fdr_threshold = 0.05)
##################################
# Obtain the cell type indicators #
##################################
w.est <- celltypes(BUSseqfits_hemat)
w_BUSseq <- unlist(w.est) # change the list of cell type indicators to a vector
########################################
# Obtain the corrected read count data #
########################################
set.seed(12345)
B <- BUSseqfits_hemat$n.batch
corrected_count_est <- corrected_read_counts(BUSseqfits_hemat)
log_corrected_count_est <- NULL
for(b in 1:B){
log_corrected_count_est <- cbind(log_corrected_count_est, log1p(corrected_count_est[[b]]))
}
# Store the workspace
if(!dir.exists("Workspace")){
dir.create("Workspace")
}
save.image("./Workspace/BUSseq_workspace.RData")
|
/Hematopoietic/run_BUSseq.R
|
no_license
|
songfd2018/BUSseq-0.99.0_implementation
|
R
| false
| false
| 4,801
|
r
|
#Apply BUSseq to the hematopoietic study.
rm(list=ls())
library(BUSseq)
###########################
# Load Hematopoietic Data #
###########################
# Working directory
# setwd("G:/scRNA/Journal/Github_reproduce/Mouse_Hematopoietic")
# Loading hematopoietic count data
load("./RawCountData/hemat_countdata.RData")
HematCounts <- list(GSE72857 = dataA2,
GSE81682 = dataF2)
##########################################
# Apply BUSseq to the Hematopoietic Data #
##########################################
# the seed is a randomly sampled integer between 1 and 10,000
seed.est <- 4011
# We ran BUSseq for the number of cell types K equal to
# 3, 4, 5, 6, 7, 8, 9 and 10, and select K = 7 according to BIC
K <- 7
# Conducting MCMC sampling
BUSseqfits_hemat <- BUSseq_MCMC(ObservedData = HematCounts, n.celltypes = K,
n.iterations = 8000, seed = seed.est,
hyper_slab = 50, hyper_tau0 = c(2,0.01))
# # BIC values of the other numbers of cell types are generated by the following codes
# # all seeds are randomly sampled between 1 and 10,000.
# # We strongly recommend running the BUSseq_MCMC in parallel.
#
# BUSseqfits_hemat_K3 <- BUSseq_MCMC(Data = HematCounts, n.celltypes = 3,
# n.iterations = 8000, seed = 6706,
# hyper_slab = 50, hyper_tau0 = c(2,0.01))
# BUSseqfits_hemat_K4 <- BUSseq_MCMC(Data = HematCounts, n.celltypes = 4,
# n.iterations = 8000, seed = 4693,
# hyper_slab = 50, hyper_tau0 = c(2,0.01))
# BUSseqfits_hemat_K5 <- BUSseq_MCMC(Data = HematCounts, n.celltypes = 5,
# n.iterations = 8000, seed = 4481,
# hyper_slab = 50, hyper_tau0 = c(2,0.01))
# BUSseqfits_hemat_K6 <- BUSseq_MCMC(Data = HematCounts, n.celltypes = 6,
# n.iterations = 8000, seed = 4078,
# hyper_slab = 50, hyper_tau0 = c(2,0.01))
# BUSseqfits_hemat_K8 <- BUSseq_MCMC(Data = HematCounts, n.celltypes = 8,
# n.iterations = 8000, seed = 1177,
# hyper_slab = 50, hyper_tau0 = c(2,0.01))
# BUSseqfits_hemat_K9 <- BUSseq_MCMC(Data = HematCounts, n.celltypes = 9,
# n.iterations = 8000, seed = 4654,
# hyper_slab = 50, hyper_tau0 = c(2,0.01))
# BUSseqfits_hemat_K10 <- BUSseq_MCMC(Data = HematCounts, n.celltypes = 10,
# n.iterations = 8000, seed = 7398,
# hyper_slab = 50, hyper_tau0 = c(2,0.01))
# BIC_values <- rep(NA,8)
# BIC_values[1] <- BIC_BUSseq(BUSseqfits_hemat_K3)
# BIC_values[2] <- BIC_BUSseq(BUSseqfits_hemat_K4)
# BIC_values[3] <- BIC_BUSseq(BUSseqfits_hemat_K5)
# BIC_values[4] <- BIC_BUSseq(BUSseqfits_hemat_K6)
# BIC_values[5] <- BIC_BUSseq(BUSseqfits_hemat)
# BIC_values[6] <- BIC_BUSseq(BUSseqfits_hemat_K8)
# BIC_values[7] <- BIC_BUSseq(BUSseqfits_hemat_K9)
# BIC_values[8] <- BIC_BUSseq(BUSseqfits_hemat_K10)
# names(BIC_values) <- paste0("K=",3:10)
# # As a result, the BIC values are
# # K=3 K=4 K=5 K=6 K=7 K=8 K=9 K=10
# # 48000531 47941792 47948555 47964609 47938554 48024762 48048079 48114358
# if(!dir.exists("Image")){
# dir.create("Image")
# }
# if(!dir.exists("./Image/Other")){
# dir.create("./Image/Other")
# }
# png("./Image/Other/BIC_values.png",width = 540, height = 720)
# par(mar = c(5.1,6.1,4.1,2.1))
# plot(3:10,BIC_values,xlab= "K",ylab = "BIC",type="n",cex.axis=3,cex.lab=3)
# points(3:10,BIC_values,type="b",pch=19,cex=3)
# dev.off()
#####################################
# Obtain the intrinsic gene indices #
#####################################
intrinsic_gene_indices <- intrinsic_genes_BUSseq(BUSseqfits_hemat, fdr_threshold = 0.05)
##################################
# Obtain the cell type indicators #
##################################
w.est <- celltypes(BUSseqfits_hemat)
w_BUSseq <- unlist(w.est) # change the list of cell type indicators to a vector
########################################
# Obtain the corrected read count data #
########################################
set.seed(12345)
B <- BUSseqfits_hemat$n.batch
corrected_count_est <- corrected_read_counts(BUSseqfits_hemat)
log_corrected_count_est <- NULL
for(b in 1:B){
log_corrected_count_est <- cbind(log_corrected_count_est, log1p(corrected_count_est[[b]]))
}
# Store the workspace
if(!dir.exists("Workspace")){
dir.create("Workspace")
}
save.image("./Workspace/BUSseq_workspace.RData")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tb_cl.R
\name{cl_table}
\alias{cl_table}
\title{Generate a larval control table}
\usage{
cl_table(x, jur = NULL, mun)
}
\arguments{
\item{x}{is the dataset of control larvario.}
\item{jur}{is the Jurisdiccion.}
\item{mun}{is the municipio.}
}
\value{
a table.
}
\description{
Generate a larval control table
}
\details{
xxx
}
\examples{
1+1
}
\references{
xxxxx
}
\seealso{
\link[formattable]{formattable}
}
\author{
Felipe Antonio Dzul Manzanilla \email{felipe.dzul.m@gmail.com}
}
|
/man/cl_table.Rd
|
permissive
|
fdzul/boldenr
|
R
| false
| true
| 563
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tb_cl.R
\name{cl_table}
\alias{cl_table}
\title{Generate a larval control table}
\usage{
cl_table(x, jur = NULL, mun)
}
\arguments{
\item{x}{is the dataset of control larvario.}
\item{jur}{is the Jurisdiccion.}
\item{mun}{is the municipio.}
}
\value{
a table.
}
\description{
Generate a larval control table
}
\details{
xxx
}
\examples{
1+1
}
\references{
xxxxx
}
\seealso{
\link[formattable]{formattable}
}
\author{
Felipe Antonio Dzul Manzanilla \email{felipe.dzul.m@gmail.com}
}
|
## Assignment for Coursera's "R Programming" course
## The functions can be used to count and cache the inverse
## of an inversible square matrix
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL #initially inverse is NULL
set <- function(y) { #if new matrix changes, then
x <<- y #new value is given and
inv <<- NULL #inverse is set as "not computed"
}
get <- function() x #returns matrix
setinv <- function(solve) inv <<- solve #computes inverse
getinv <- function() inv #returns inverse
list(set = set, get = get, #returning functions on special matrix
setinv = setinv,
getinv = getinv)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv() #check if inverse exists
if(!is.null(inv)) { #if inverse exists, return cached data and message
message("getting cached data")
return(inv)
}
data <- x$get() #else, request for the values of the matrix
inv <- solve(data, ...) #and compute inverse
x$setinv(inv) #cache new inverse
inv
}
|
/cachematrix.R
|
no_license
|
dcsilla/ProgrammingAssignment2
|
R
| false
| false
| 1,619
|
r
|
## Assignment for Coursera's "R Programming" course
## The functions can be used to count and cache the inverse
## of an inversible square matrix
## This function creates a special "matrix" object that can cache its inverse.
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL #initially inverse is NULL
set <- function(y) { #if new matrix changes, then
x <<- y #new value is given and
inv <<- NULL #inverse is set as "not computed"
}
get <- function() x #returns matrix
setinv <- function(solve) inv <<- solve #computes inverse
getinv <- function() inv #returns inverse
list(set = set, get = get, #returning functions on special matrix
setinv = setinv,
getinv = getinv)
}
## This function computes the inverse of the special "matrix"
## returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinv() #check if inverse exists
if(!is.null(inv)) { #if inverse exists, return cached data and message
message("getting cached data")
return(inv)
}
data <- x$get() #else, request for the values of the matrix
inv <- solve(data, ...) #and compute inverse
x$setinv(inv) #cache new inverse
inv
}
|
#Set work dirrectory
setwd("/home/kun/Dropbox/Study/Coursera - Data Science/03 - Getting and Cleaning Data/Week4")
#Download and unzip Data
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, file.path(getwd(), "Dataset.zip"))
unzip(zipfile="./Dataset.zip",exdir="./data")
#Check file folder
path_rf <- file.path("./data" , "UCI HAR Dataset")
files<-list.files(path_rf, recursive=TRUE)
files
#Read the Activity files
dataActivityTrain <- read.table(file.path(path_rf, "train", "Y_train.txt"),header = FALSE)
dataActivityTest <- read.table(file.path(path_rf, "test" , "Y_test.txt" ),header = FALSE)
dim(dataActivityTest)
dim(dataActivityTrain)
#Read the Subject files
dataSubjectTrain <- read.table(file.path(path_rf, "train", "subject_train.txt"),header = FALSE)
dataSubjectTest <- read.table(file.path(path_rf, "test" , "subject_test.txt"),header = FALSE)
dim(dataSubjectTrain)
dim(dataSubjectTest)
#Read Fearures files
dataFeaturesTrain <- read.table(file.path(path_rf, "train", "X_train.txt"),header = FALSE)
dataFeaturesTest <- read.table(file.path(path_rf, "test" , "X_test.txt" ),header = FALSE)
dim(dataFeaturesTrain)
dim(dataFeaturesTest)
#Combine files together (vertically)
dataSubject <- rbind(dataSubjectTrain, dataSubjectTest)
dataActivity<- rbind(dataActivityTrain, dataActivityTest)
dataFeatures<- rbind(dataFeaturesTrain, dataFeaturesTest)
dim(dataSubject)
dim(dataActivity3)
dim(dataFeatures)
#Adding labels
names(dataSubject)<-c("subject")
names(dataActivity)<- c("activity")
dataFeaturesNames <- read.table(file.path(path_rf, "features.txt"),head=FALSE)
names(dataFeatures)<- dataFeaturesNames$V2
names(dataFeatures)
#Combine files together (horizontally)
Data <- cbind(dataFeatures, dataSubject, dataActivity)
dim(Data)
names(Data)
#Select only mean and std for each measurement (and subject & activity)
subdataFeaturesNames<-dataFeaturesNames$V2[grep("mean\\(\\)|std\\(\\)", dataFeaturesNames$V2)]
dataFeaturesNames$V2
subdataFeaturesNames[70]
as.character(subdataFeaturesNames)
selectedNames<-c(as.character(subdataFeaturesNames), "subject", "activity" )
selectedNames
Data2<-subset(Data,select=selectedNames)
dim(Data2)
names(Data2)
table(Data2$activity, Data2$subject)
#Change abbriviations of labels to full descriptions to make them more meaningful
names(Data2)<-gsub("^t", "time", names(Data2))
names(Data2)<-gsub("^f", "frequency", names(Data2))
names(Data2)<-gsub("Acc", "Accelerometer", names(Data2))
names(Data2)<-gsub("Gyro", "Gyroscope", names(Data2))
names(Data2)<-gsub("Mag", "Magnitude", names(Data2))
names(Data2)<-gsub("BodyBody", "Body", names(Data2))
names(Data2)
library(plyr);
Data3<-aggregate(. ~subject + activity, Data2, mean)
Data3<-Data3[order(Data3$subject,Data3$activity),]
names(Data3)
#Change ID of activity and subject to descriptions
activityLabels <- read.table(file.path(path_rf, "activity_labels.txt"),header = FALSE)
names(Data3)[names(Data3)=="activity"] <- "V1"
names(activityLabels)[names(activityLabels)=="V2"] <- "activity"
activityLabels
Data4 <- merge(Data3, activityLabels,by="V1")
Data5 <- subset(Data4, select=-V1)
Data6 <-aggregate(. ~subject + activity, Data5, mean)
#table(Data6$subject, Data6$activity)
write.table(Data6, file = "tidydata - Getting and Cleaning Data Assignment.txt",row.name=FALSE)
install.packages("memisc")
library(memisc)
Data6 <- within(Data6,{
description(subject) <- "ID of the test subject"
description(activity) <- "The type of activity performed"
# measurement(subject) <- "norminal"
})
codebook(Data6)
Write(codebook(Data6),
file="Codebook.md")
#library(knitr)
#knit2html("codebook.Rmd")
|
/run_analysis_R V2.R
|
no_license
|
HKFORWARD/Assignment---Getting-and-Cleaning-Data
|
R
| false
| false
| 3,700
|
r
|
#Set work dirrectory
setwd("/home/kun/Dropbox/Study/Coursera - Data Science/03 - Getting and Cleaning Data/Week4")
#Download and unzip Data
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl, file.path(getwd(), "Dataset.zip"))
unzip(zipfile="./Dataset.zip",exdir="./data")
#Check file folder
path_rf <- file.path("./data" , "UCI HAR Dataset")
files<-list.files(path_rf, recursive=TRUE)
files
#Read the Activity files
dataActivityTrain <- read.table(file.path(path_rf, "train", "Y_train.txt"),header = FALSE)
dataActivityTest <- read.table(file.path(path_rf, "test" , "Y_test.txt" ),header = FALSE)
dim(dataActivityTest)
dim(dataActivityTrain)
#Read the Subject files
dataSubjectTrain <- read.table(file.path(path_rf, "train", "subject_train.txt"),header = FALSE)
dataSubjectTest <- read.table(file.path(path_rf, "test" , "subject_test.txt"),header = FALSE)
dim(dataSubjectTrain)
dim(dataSubjectTest)
#Read Fearures files
dataFeaturesTrain <- read.table(file.path(path_rf, "train", "X_train.txt"),header = FALSE)
dataFeaturesTest <- read.table(file.path(path_rf, "test" , "X_test.txt" ),header = FALSE)
dim(dataFeaturesTrain)
dim(dataFeaturesTest)
#Combine files together (vertically)
dataSubject <- rbind(dataSubjectTrain, dataSubjectTest)
dataActivity<- rbind(dataActivityTrain, dataActivityTest)
dataFeatures<- rbind(dataFeaturesTrain, dataFeaturesTest)
dim(dataSubject)
dim(dataActivity3)
dim(dataFeatures)
#Adding labels
names(dataSubject)<-c("subject")
names(dataActivity)<- c("activity")
dataFeaturesNames <- read.table(file.path(path_rf, "features.txt"),head=FALSE)
names(dataFeatures)<- dataFeaturesNames$V2
names(dataFeatures)
#Combine files together (horizontally)
Data <- cbind(dataFeatures, dataSubject, dataActivity)
dim(Data)
names(Data)
#Select only mean and std for each measurement (and subject & activity)
subdataFeaturesNames<-dataFeaturesNames$V2[grep("mean\\(\\)|std\\(\\)", dataFeaturesNames$V2)]
dataFeaturesNames$V2
subdataFeaturesNames[70]
as.character(subdataFeaturesNames)
selectedNames<-c(as.character(subdataFeaturesNames), "subject", "activity" )
selectedNames
Data2<-subset(Data,select=selectedNames)
dim(Data2)
names(Data2)
table(Data2$activity, Data2$subject)
#Change abbriviations of labels to full descriptions to make them more meaningful
names(Data2)<-gsub("^t", "time", names(Data2))
names(Data2)<-gsub("^f", "frequency", names(Data2))
names(Data2)<-gsub("Acc", "Accelerometer", names(Data2))
names(Data2)<-gsub("Gyro", "Gyroscope", names(Data2))
names(Data2)<-gsub("Mag", "Magnitude", names(Data2))
names(Data2)<-gsub("BodyBody", "Body", names(Data2))
names(Data2)
library(plyr);
Data3<-aggregate(. ~subject + activity, Data2, mean)
Data3<-Data3[order(Data3$subject,Data3$activity),]
names(Data3)
#Change ID of activity and subject to descriptions
activityLabels <- read.table(file.path(path_rf, "activity_labels.txt"),header = FALSE)
names(Data3)[names(Data3)=="activity"] <- "V1"
names(activityLabels)[names(activityLabels)=="V2"] <- "activity"
activityLabels
Data4 <- merge(Data3, activityLabels,by="V1")
Data5 <- subset(Data4, select=-V1)
Data6 <-aggregate(. ~subject + activity, Data5, mean)
#table(Data6$subject, Data6$activity)
write.table(Data6, file = "tidydata - Getting and Cleaning Data Assignment.txt",row.name=FALSE)
install.packages("memisc")
library(memisc)
Data6 <- within(Data6,{
description(subject) <- "ID of the test subject"
description(activity) <- "The type of activity performed"
# measurement(subject) <- "norminal"
})
codebook(Data6)
Write(codebook(Data6),
file="Codebook.md")
#library(knitr)
#knit2html("codebook.Rmd")
|
#---------------------------------------------------------------------------
#
# This file holds the S4 class definitions for the class that defines the
# minimal bounding grid at given resolution for an InclusionZone object.
# These objects may be used later in generating a sampling surface by
# "piling" or "heaping" them one on top of another within a "Tract"
# object.
#
#
#Author... Date: 17-Sept-2010
# Jeffrey H. Gove
# USDA Forest Service
# Northern Research Station
# 271 Mast Road
# Durham, NH 03824
# jhgove@unh.edu
# phone: 603-868-7667 fax: 603-868-7604
#---------------------------------------------------------------------------
#
#=================================================================================================
#
# define the InclusionZoneGrid class...
#
setClass('InclusionZoneGrid',
#
# slots for the class and its subclasses...
#
representation(description = 'character',
iz = 'InclusionZone', #iz object
grid = 'RasterLayer', #for the grid
data = 'data.frame', #pua estimates over the grid
bbox = 'matrix' #overall bounding box
),
prototype = list(description = 'gridded inclusion zone', #some defaults for validity checking
bbox = matrix(rep(0,4), nrow=2, dimnames=list(c('x','y'), c('min','max'))),
data = data.frame(matrix(NA, nr=0, nc=length(.StemEnv$puaEstimates),
dimnames=list(character(0), names(.StemEnv$puaEstimates))) )
),
validity = function(object) {
#essentially the same checks as in bboxCheck()...
if(!nrow(object@bbox)==2 || !ncol(object@bbox)==2)
return('bbox slot must be a 2x2 matrix')
bboxNames = match(rownames(object@bbox), c('x','y'))
if(any(is.na(bboxNames)))
return('slot bbox rownames must be "x", "y"!')
bboxNames = match(colnames(object@bbox), c('min','max'))
if(any(is.na(bboxNames)))
return('slot bbox colnames must be "min", "max"!')
if(any( apply(object@bbox,1,function(x) if(x['min'] >= x['max']) TRUE else FALSE) ))
return('in slot bbox, "min" must be less than "max" for x and y!')
dfNames = match(colnames(object@data), c(names(.StemEnv$puaEstimates),
names(.StemEnv$ppEstimates)) )
if(any(is.na(dfNames)))
return('slot data colnames must contain all the per unit area estimate names')
return(TRUE)
} #validity check
) #class InclusionZoneGrid
#=================================================================================================
#
# define the InclusionZoneGrid class for the full chainsaw object where all possible
# cuts are made within the sausage inclusion zone--a very specific class, but related
# to the above; that is, for each grid cell within the inclusion zone, we apply the
# chainSawIZ method and record the value of that cell...
#
setClass('csFullInclusionZoneGrid',
#
# slots for the class; note that we need a list of "InclusionZoneGrid" objects, one for
# each chainSaw estimate within the overall sausage inclusion zone...
#
representation(chiz = 'list' #a list of InclusionZoneGrid objects
),
contains = 'InclusionZoneGrid',
prototype = list(description = 'full chainsaw-sausage gridded inclusion zone',
chiz = list(),
bbox = matrix(rep(0,4), nrow=2, dimnames=list(c('x','y'), c('min','max'))),
data = data.frame(matrix(NA,
nrow = 0,
ncol = length(c(.StemEnv$puaEstimates,.StemEnv$ppEstimates)),
dimnames = list(character(0),
names(c(.StemEnv$puaEstimates,.StemEnv$ppEstimates)))
) #matrix
) #df
),
sealed = TRUE, #no further changes or subclasses
validity = function(object) {
#a check for "sausageIZ" would work below, but force it to be "fullChainSawIZ"...
if(!is(object@iz, 'fullChainSawIZ'))
return('The underlying inclusion zone must be of class "fullChainSawIZ".')
chizLen = length(object@chiz)
if(chizLen > 0) {
for(i in seq_len(chizLen)) {
if(isS4(object@chiz[[i]])) { #can't check is.na on S4 objects!
if(!is(object@chiz[[i]], 'InclusionZoneGrid'))
return('All internal sausage grid cells must be InclusionZoneGrid objects!')
if(!is(object@chiz[[i]]@iz, 'chainSawIZ'))
return('Each internal sausage grid cell must be from a chainSawIZ object!')
}
else if(!is.na(object@chiz[[i]]))
return('External sausage grid cells must have value "NA".')
}
}
return(TRUE)
} #validity check
) #class csFullInclusionZoneGrid
|
/sampSurf/R/InclusionZoneGridClass.R
|
no_license
|
ingted/R-Examples
|
R
| false
| false
| 5,636
|
r
|
#---------------------------------------------------------------------------
#
# This file holds the S4 class definitions for the class that defines the
# minimal bounding grid at given resolution for an InclusionZone object.
# These objects may be used later in generating a sampling surface by
# "piling" or "heaping" them one on top of another within a "Tract"
# object.
#
#
#Author... Date: 17-Sept-2010
# Jeffrey H. Gove
# USDA Forest Service
# Northern Research Station
# 271 Mast Road
# Durham, NH 03824
# jhgove@unh.edu
# phone: 603-868-7667 fax: 603-868-7604
#---------------------------------------------------------------------------
#
#=================================================================================================
#
# define the InclusionZoneGrid class...
#
setClass('InclusionZoneGrid',
#
# slots for the class and its subclasses...
#
representation(description = 'character',
iz = 'InclusionZone', #iz object
grid = 'RasterLayer', #for the grid
data = 'data.frame', #pua estimates over the grid
bbox = 'matrix' #overall bounding box
),
prototype = list(description = 'gridded inclusion zone', #some defaults for validity checking
bbox = matrix(rep(0,4), nrow=2, dimnames=list(c('x','y'), c('min','max'))),
data = data.frame(matrix(NA, nr=0, nc=length(.StemEnv$puaEstimates),
dimnames=list(character(0), names(.StemEnv$puaEstimates))) )
),
validity = function(object) {
#essentially the same checks as in bboxCheck()...
if(!nrow(object@bbox)==2 || !ncol(object@bbox)==2)
return('bbox slot must be a 2x2 matrix')
bboxNames = match(rownames(object@bbox), c('x','y'))
if(any(is.na(bboxNames)))
return('slot bbox rownames must be "x", "y"!')
bboxNames = match(colnames(object@bbox), c('min','max'))
if(any(is.na(bboxNames)))
return('slot bbox colnames must be "min", "max"!')
if(any( apply(object@bbox,1,function(x) if(x['min'] >= x['max']) TRUE else FALSE) ))
return('in slot bbox, "min" must be less than "max" for x and y!')
dfNames = match(colnames(object@data), c(names(.StemEnv$puaEstimates),
names(.StemEnv$ppEstimates)) )
if(any(is.na(dfNames)))
return('slot data colnames must contain all the per unit area estimate names')
return(TRUE)
} #validity check
) #class InclusionZoneGrid
#=================================================================================================
#
# define the InclusionZoneGrid class for the full chainsaw object where all possible
# cuts are made within the sausage inclusion zone--a very specific class, but related
# to the above; that is, for each grid cell within the inclusion zone, we apply the
# chainSawIZ method and record the value of that cell...
#
setClass('csFullInclusionZoneGrid',
#
# slots for the class; note that we need a list of "InclusionZoneGrid" objects, one for
# each chainSaw estimate within the overall sausage inclusion zone...
#
representation(chiz = 'list' #a list of InclusionZoneGrid objects
),
contains = 'InclusionZoneGrid',
prototype = list(description = 'full chainsaw-sausage gridded inclusion zone',
chiz = list(),
bbox = matrix(rep(0,4), nrow=2, dimnames=list(c('x','y'), c('min','max'))),
data = data.frame(matrix(NA,
nrow = 0,
ncol = length(c(.StemEnv$puaEstimates,.StemEnv$ppEstimates)),
dimnames = list(character(0),
names(c(.StemEnv$puaEstimates,.StemEnv$ppEstimates)))
) #matrix
) #df
),
sealed = TRUE, #no further changes or subclasses
validity = function(object) {
#a check for "sausageIZ" would work below, but force it to be "fullChainSawIZ"...
if(!is(object@iz, 'fullChainSawIZ'))
return('The underlying inclusion zone must be of class "fullChainSawIZ".')
chizLen = length(object@chiz)
if(chizLen > 0) {
for(i in seq_len(chizLen)) {
if(isS4(object@chiz[[i]])) { #can't check is.na on S4 objects!
if(!is(object@chiz[[i]], 'InclusionZoneGrid'))
return('All internal sausage grid cells must be InclusionZoneGrid objects!')
if(!is(object@chiz[[i]]@iz, 'chainSawIZ'))
return('Each internal sausage grid cell must be from a chainSawIZ object!')
}
else if(!is.na(object@chiz[[i]]))
return('External sausage grid cells must have value "NA".')
}
}
return(TRUE)
} #validity check
) #class csFullInclusionZoneGrid
|
system("R CMD Rd2pdf --pdf ../PCAmixdata")
system("R CMD build ../PCAmixdata")
system("R CMD check --as-cran ../PCAmixdata")
library(PCAmixdata)
devtools::check(,cran=TRUE)
#http://xmpalantir.wu.ac.at/cransubmit/
dir <- "~/Seafile/R"
tools::check_packages_in_dir(dir,reverse = list())
|
/inst/test_package.R
|
no_license
|
cran/PCAmixdata
|
R
| false
| false
| 286
|
r
|
system("R CMD Rd2pdf --pdf ../PCAmixdata")
system("R CMD build ../PCAmixdata")
system("R CMD check --as-cran ../PCAmixdata")
library(PCAmixdata)
devtools::check(,cran=TRUE)
#http://xmpalantir.wu.ac.at/cransubmit/
dir <- "~/Seafile/R"
tools::check_packages_in_dir(dir,reverse = list())
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iot_operations.R
\name{iot_create_job}
\alias{iot_create_job}
\title{Creates a job}
\usage{
iot_create_job(jobId, targets, documentSource, document, description,
presignedUrlConfig, targetSelection, jobExecutionsRolloutConfig,
abortConfig, timeoutConfig, tags)
}
\arguments{
\item{jobId}{[required] A job identifier which must be unique for your AWS account. We recommend
using a UUID. Alpha-numeric characters, "-" and "\_" are valid for
use here.}
\item{targets}{[required] A list of things and thing groups to which the job should be sent.}
\item{documentSource}{An S3 link to the job document.}
\item{document}{The job document.
If the job document resides in an S3 bucket, you must use a placeholder
link when specifying the document.
The placeholder link is of the following form:
\code{$\{aws:iot:s3-presigned-url:https://s3.amazonaws.com/<i>bucket</i>/<i>key</i>\}}
where \emph{bucket} is your bucket name and \emph{key} is the object in the bucket
to which you are linking.}
\item{description}{A short text description of the job.}
\item{presignedUrlConfig}{Configuration information for pre-signed S3 URLs.}
\item{targetSelection}{Specifies whether the job will continue to run (CONTINUOUS), or will be
complete after all those things specified as targets have completed the
job (SNAPSHOT). If continuous, the job may also be run on a thing when a
change is detected in a target. For example, a job will run on a thing
when the thing is added to a target group, even after the job was
completed by all things originally in the group.}
\item{jobExecutionsRolloutConfig}{Allows you to create a staged rollout of the job.}
\item{abortConfig}{Allows you to create criteria to abort a job.}
\item{timeoutConfig}{Specifies the amount of time each device has to finish its execution of
the job. The timer is started when the job execution status is set to
\code{IN_PROGRESS}. If the job execution status is not set to another
terminal state before the time expires, it will be automatically set to
\code{TIMED_OUT}.}
\item{tags}{Metadata which can be used to manage the job.}
}
\description{
Creates a job.
}
\section{Request syntax}{
\preformatted{svc$create_job(
jobId = "string",
targets = list(
"string"
),
documentSource = "string",
document = "string",
description = "string",
presignedUrlConfig = list(
roleArn = "string",
expiresInSec = 123
),
targetSelection = "CONTINUOUS"|"SNAPSHOT",
jobExecutionsRolloutConfig = list(
maximumPerMinute = 123,
exponentialRate = list(
baseRatePerMinute = 123,
incrementFactor = 123.0,
rateIncreaseCriteria = list(
numberOfNotifiedThings = 123,
numberOfSucceededThings = 123
)
)
),
abortConfig = list(
criteriaList = list(
list(
failureType = "FAILED"|"REJECTED"|"TIMED_OUT"|"ALL",
action = "CANCEL",
thresholdPercentage = 123.0,
minNumberOfExecutedThings = 123
)
)
),
timeoutConfig = list(
inProgressTimeoutInMinutes = 123
),
tags = list(
list(
Key = "string",
Value = "string"
)
)
)
}
}
\keyword{internal}
|
/cran/paws.internet.of.things/man/iot_create_job.Rd
|
permissive
|
peoplecure/paws
|
R
| false
| true
| 3,224
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iot_operations.R
\name{iot_create_job}
\alias{iot_create_job}
\title{Creates a job}
\usage{
iot_create_job(jobId, targets, documentSource, document, description,
presignedUrlConfig, targetSelection, jobExecutionsRolloutConfig,
abortConfig, timeoutConfig, tags)
}
\arguments{
\item{jobId}{[required] A job identifier which must be unique for your AWS account. We recommend
using a UUID. Alpha-numeric characters, "-" and "\_" are valid for
use here.}
\item{targets}{[required] A list of things and thing groups to which the job should be sent.}
\item{documentSource}{An S3 link to the job document.}
\item{document}{The job document.
If the job document resides in an S3 bucket, you must use a placeholder
link when specifying the document.
The placeholder link is of the following form:
\code{$\{aws:iot:s3-presigned-url:https://s3.amazonaws.com/<i>bucket</i>/<i>key</i>\}}
where \emph{bucket} is your bucket name and \emph{key} is the object in the bucket
to which you are linking.}
\item{description}{A short text description of the job.}
\item{presignedUrlConfig}{Configuration information for pre-signed S3 URLs.}
\item{targetSelection}{Specifies whether the job will continue to run (CONTINUOUS), or will be
complete after all those things specified as targets have completed the
job (SNAPSHOT). If continuous, the job may also be run on a thing when a
change is detected in a target. For example, a job will run on a thing
when the thing is added to a target group, even after the job was
completed by all things originally in the group.}
\item{jobExecutionsRolloutConfig}{Allows you to create a staged rollout of the job.}
\item{abortConfig}{Allows you to create criteria to abort a job.}
\item{timeoutConfig}{Specifies the amount of time each device has to finish its execution of
the job. The timer is started when the job execution status is set to
\code{IN_PROGRESS}. If the job execution status is not set to another
terminal state before the time expires, it will be automatically set to
\code{TIMED_OUT}.}
\item{tags}{Metadata which can be used to manage the job.}
}
\description{
Creates a job.
}
\section{Request syntax}{
\preformatted{svc$create_job(
jobId = "string",
targets = list(
"string"
),
documentSource = "string",
document = "string",
description = "string",
presignedUrlConfig = list(
roleArn = "string",
expiresInSec = 123
),
targetSelection = "CONTINUOUS"|"SNAPSHOT",
jobExecutionsRolloutConfig = list(
maximumPerMinute = 123,
exponentialRate = list(
baseRatePerMinute = 123,
incrementFactor = 123.0,
rateIncreaseCriteria = list(
numberOfNotifiedThings = 123,
numberOfSucceededThings = 123
)
)
),
abortConfig = list(
criteriaList = list(
list(
failureType = "FAILED"|"REJECTED"|"TIMED_OUT"|"ALL",
action = "CANCEL",
thresholdPercentage = 123.0,
minNumberOfExecutedThings = 123
)
)
),
timeoutConfig = list(
inProgressTimeoutInMinutes = 123
),
tags = list(
list(
Key = "string",
Value = "string"
)
)
)
}
}
\keyword{internal}
|
# Create data directory if not exist
if(!file.exists("data")){
dir.create("data")
}
#download, save, and extract the data file
if (!file.exists("./data/household_power_consumption.txt")) {
fileUrl ="https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, "./data/household_power_consumption.zip")
unzip("./data/household_power_consumption.zip", overwrite = T, exdir = "./data")
}
# Calculate required Memory in MB ,8 bytes per column, 9 columns in 2075259 rows, is 142.4967 MB
# rm = ((2075259 * 9) * 8) / 1048576
# print("Required Memory in MB:")
# print(rm)
# Read the data from 2007-02-01 and 2007-02-02 dates
# Replace ? with NA
df <- read.table(text = grep("^[1,2]/2/2007", readLines("./data//household_power_consumption.txt"), value = TRUE),
col.names = c("Date", "Time", "Global_active_power",
"Global_reactive_power", "Voltage",
"Global_intensity", "Sub_metering_1",
"Sub_metering_2", "Sub_metering_3"),
sep = ";", header = TRUE, na.strings ="?")
# Create and add new Datetime field to the data frame, based on Date and Time Fields
df$Datetime = strptime(paste(df$Date, df$Time), "%d/%m/%Y %H:%M:%S")
# Change class of Date field to date
df$Date = as.Date(df$Date, format = "%d/%m/%Y")
# Create PNG file for plot 4
png("./ExData_Plotting1//plot4.png", width = 480, height = 480, units = "px")
# Create Plot 4
par(mfrow = c(2, 2))
# plot 1 (NW)
plot(df$Datetime, df$Global_active_power, type = "l", ylab = "Global Active Power",
xlab = "")
# plot 2 (NE)
plot(df$Datetime, df$Voltage, type = "l", ylab = "Voltage", xlab = "datetime")
# plot 3 (SW)
plot(df$Datetime, df$Sub_metering_1, type = "l", ylab = "Energy sub metering",
xlab = "", col = "black")
points(df$Datetime, df$Sub_metering_2, type = "l", col = "red")
points(df$Datetime, df$Sub_metering_3, type = "l", col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1",
"Sub_metering_2", "Sub_metering_3"), bty = "n", )
# plot 4 (SE)
plot(df$Datetime, df$Global_reactive_power, type = "l", xlab = "datetime",
ylab = "Global_reactive_power", ylim = c(0, 0.5))
# close PNG file
dev.off()
|
/plot4.R
|
no_license
|
mesbah/ExData_Plotting1
|
R
| false
| false
| 2,401
|
r
|
# Create data directory if not exist
if(!file.exists("data")){
dir.create("data")
}
#download, save, and extract the data file
if (!file.exists("./data/household_power_consumption.txt")) {
fileUrl ="https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, "./data/household_power_consumption.zip")
unzip("./data/household_power_consumption.zip", overwrite = T, exdir = "./data")
}
# Calculate required Memory in MB ,8 bytes per column, 9 columns in 2075259 rows, is 142.4967 MB
# rm = ((2075259 * 9) * 8) / 1048576
# print("Required Memory in MB:")
# print(rm)
# Read the data from 2007-02-01 and 2007-02-02 dates
# Replace ? with NA
df <- read.table(text = grep("^[1,2]/2/2007", readLines("./data//household_power_consumption.txt"), value = TRUE),
col.names = c("Date", "Time", "Global_active_power",
"Global_reactive_power", "Voltage",
"Global_intensity", "Sub_metering_1",
"Sub_metering_2", "Sub_metering_3"),
sep = ";", header = TRUE, na.strings ="?")
# Create and add new Datetime field to the data frame, based on Date and Time Fields
df$Datetime = strptime(paste(df$Date, df$Time), "%d/%m/%Y %H:%M:%S")
# Change class of Date field to date
df$Date = as.Date(df$Date, format = "%d/%m/%Y")
# Create PNG file for plot 4
png("./ExData_Plotting1//plot4.png", width = 480, height = 480, units = "px")
# Create Plot 4
par(mfrow = c(2, 2))
# plot 1 (NW)
plot(df$Datetime, df$Global_active_power, type = "l", ylab = "Global Active Power",
xlab = "")
# plot 2 (NE)
plot(df$Datetime, df$Voltage, type = "l", ylab = "Voltage", xlab = "datetime")
# plot 3 (SW)
plot(df$Datetime, df$Sub_metering_1, type = "l", ylab = "Energy sub metering",
xlab = "", col = "black")
points(df$Datetime, df$Sub_metering_2, type = "l", col = "red")
points(df$Datetime, df$Sub_metering_3, type = "l", col = "blue")
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1",
"Sub_metering_2", "Sub_metering_3"), bty = "n", )
# plot 4 (SE)
plot(df$Datetime, df$Global_reactive_power, type = "l", xlab = "datetime",
ylab = "Global_reactive_power", ylim = c(0, 0.5))
# close PNG file
dev.off()
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/comprehend_operations.R
\name{comprehend_start_targeted_sentiment_detection_job}
\alias{comprehend_start_targeted_sentiment_detection_job}
\title{Starts an asynchronous targeted sentiment detection job for a collection
of documents}
\usage{
comprehend_start_targeted_sentiment_detection_job(
InputDataConfig,
OutputDataConfig,
DataAccessRoleArn,
JobName = NULL,
LanguageCode,
ClientRequestToken = NULL,
VolumeKmsKeyId = NULL,
VpcConfig = NULL,
Tags = NULL
)
}
\arguments{
\item{InputDataConfig}{[required]}
\item{OutputDataConfig}{[required] Specifies where to send the output files.}
\item{DataAccessRoleArn}{[required] The Amazon Resource Name (ARN) of the IAM role that grants Amazon
Comprehend read access to your input data. For more information, see
Role-based permissions.}
\item{JobName}{The identifier of the job.}
\item{LanguageCode}{[required] The language of the input documents. Currently, English is the only
supported language.}
\item{ClientRequestToken}{A unique identifier for the request. If you don't set the client request
token, Amazon Comprehend generates one.}
\item{VolumeKmsKeyId}{ID for the KMS key that Amazon Comprehend uses to encrypt data on the
storage volume attached to the ML compute instance(s) that process the
analysis job. The VolumeKmsKeyId can be either of the following formats:
\itemize{
\item KMS Key ID: \code{"1234abcd-12ab-34cd-56ef-1234567890ab"}
\item Amazon Resource Name (ARN) of a KMS Key:
\code{"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"}
}}
\item{VpcConfig}{}
\item{Tags}{Tags to associate with the targeted sentiment detection job. A tag is a
key-value pair that adds metadata to a resource used by Amazon
Comprehend. For example, a tag with "Sales" as the key might be added to
a resource to indicate its use by the sales department.}
}
\description{
Starts an asynchronous targeted sentiment detection job for a collection of documents. Use the \code{\link[=comprehend_describe_targeted_sentiment_detection_job]{describe_targeted_sentiment_detection_job}} operation to track the status of a job.
See \url{https://www.paws-r-sdk.com/docs/comprehend_start_targeted_sentiment_detection_job/} for full documentation.
}
\keyword{internal}
|
/cran/paws.machine.learning/man/comprehend_start_targeted_sentiment_detection_job.Rd
|
permissive
|
paws-r/paws
|
R
| false
| true
| 2,330
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/comprehend_operations.R
\name{comprehend_start_targeted_sentiment_detection_job}
\alias{comprehend_start_targeted_sentiment_detection_job}
\title{Starts an asynchronous targeted sentiment detection job for a collection
of documents}
\usage{
comprehend_start_targeted_sentiment_detection_job(
InputDataConfig,
OutputDataConfig,
DataAccessRoleArn,
JobName = NULL,
LanguageCode,
ClientRequestToken = NULL,
VolumeKmsKeyId = NULL,
VpcConfig = NULL,
Tags = NULL
)
}
\arguments{
\item{InputDataConfig}{[required]}
\item{OutputDataConfig}{[required] Specifies where to send the output files.}
\item{DataAccessRoleArn}{[required] The Amazon Resource Name (ARN) of the IAM role that grants Amazon
Comprehend read access to your input data. For more information, see
Role-based permissions.}
\item{JobName}{The identifier of the job.}
\item{LanguageCode}{[required] The language of the input documents. Currently, English is the only
supported language.}
\item{ClientRequestToken}{A unique identifier for the request. If you don't set the client request
token, Amazon Comprehend generates one.}
\item{VolumeKmsKeyId}{ID for the KMS key that Amazon Comprehend uses to encrypt data on the
storage volume attached to the ML compute instance(s) that process the
analysis job. The VolumeKmsKeyId can be either of the following formats:
\itemize{
\item KMS Key ID: \code{"1234abcd-12ab-34cd-56ef-1234567890ab"}
\item Amazon Resource Name (ARN) of a KMS Key:
\code{"arn:aws:kms:us-west-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab"}
}}
\item{VpcConfig}{}
\item{Tags}{Tags to associate with the targeted sentiment detection job. A tag is a
key-value pair that adds metadata to a resource used by Amazon
Comprehend. For example, a tag with "Sales" as the key might be added to
a resource to indicate its use by the sales department.}
}
\description{
Starts an asynchronous targeted sentiment detection job for a collection of documents. Use the \code{\link[=comprehend_describe_targeted_sentiment_detection_job]{describe_targeted_sentiment_detection_job}} operation to track the status of a job.
See \url{https://www.paws-r-sdk.com/docs/comprehend_start_targeted_sentiment_detection_job/} for full documentation.
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GSE8671.R
\docType{data}
\name{GSE8671}
\alias{GSE8671}
\title{GSE8671}
\format{A Summarized Experiment object with 12482 genes and 64 samples (32 cases and 32 controls).
The column outcome in the colData corresponds to the outcome that was used in the paper.}
\usage{
data(GSE8671)
}
\description{
This is a preprocessed hallmark data set with WNT_BETA_CATENIN_SIGNALING as target pathway.
A Genome U133 Plus 2.0 Array is utilized to analyze colon cancer in colon tissue. The study was performed in a paired design.
}
\references{
Sabates-Bellver, J., Van der Flier, L. G., de Palo, M., Cattaneo, E., Maake, C., Rehrauer, H., Laczko, E., Kurowski, M. A., Bujnicki, J. M., Menigatti, M., et al. (2007). Transcriptome profile of human colorectal adenomas. Mol Cancer Res, 5, 1263–1275.
}
\keyword{datasets}
|
/man/GSE8671.Rd
|
no_license
|
szymczak-lab/DataPathwayGuidedRF
|
R
| false
| true
| 887
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GSE8671.R
\docType{data}
\name{GSE8671}
\alias{GSE8671}
\title{GSE8671}
\format{A Summarized Experiment object with 12482 genes and 64 samples (32 cases and 32 controls).
The column outcome in the colData corresponds to the outcome that was used in the paper.}
\usage{
data(GSE8671)
}
\description{
This is a preprocessed hallmark data set with WNT_BETA_CATENIN_SIGNALING as target pathway.
A Genome U133 Plus 2.0 Array is utilized to analyze colon cancer in colon tissue. The study was performed in a paired design.
}
\references{
Sabates-Bellver, J., Van der Flier, L. G., de Palo, M., Cattaneo, E., Maake, C., Rehrauer, H., Laczko, E., Kurowski, M. A., Bujnicki, J. M., Menigatti, M., et al. (2007). Transcriptome profile of human colorectal adenomas. Mol Cancer Res, 5, 1263–1275.
}
\keyword{datasets}
|
\name{Cluster_Example_3}
\docType{data}
\alias{Cluster_Example_3}
\title{An image file}
\description{
This is an \code{Image} object obtained using \code{EBImage::readImage}.
}
\usage{data(Cluster_Example_3,package="i2d")}
\keyword{datasets}
|
/man/Cluster_Example_3.Rd
|
no_license
|
XiaoyuLiang/i2d
|
R
| false
| false
| 253
|
rd
|
\name{Cluster_Example_3}
\docType{data}
\alias{Cluster_Example_3}
\title{An image file}
\description{
This is an \code{Image} object obtained using \code{EBImage::readImage}.
}
\usage{data(Cluster_Example_3,package="i2d")}
\keyword{datasets}
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' Vector logSumExpArma
#'
#' This function computes the sum(e^x) of a vector x without leaving log space
#'
#' @param x A numeric vector
NULL
#' Vector logSumExp
#'
#' This function computes the sum(e^x) of a vector x without leaving log space
#'
#' @param x A numeric vector
#' @export
logSumExp <- function(x) {
.Call('_epiAllele_logSumExp', PACKAGE = 'epiAllele', x)
}
marginalTransitionsCpp <- function(data, tMat, traversal, nTips, logPi, siblings, ncores = 1L) {
.Call('_epiAllele_marginalTransitionsCpp', PACKAGE = 'epiAllele', data, tMat, traversal, nTips, logPi, siblings, ncores)
}
#' multiProbToStick
#'
#' Convert a multiple sets of stick breaking parameters to a set of probabilities that sum to one
#' @param x the parameter of stick breaking parameters
#' @param width the width of the stick breakink process
#' @name multiProbToStick
#' @return a vector of probabilities that sum to one
multiProbToStick <- function(x, width) {
.Call('_epiAllele_multiProbToStick', PACKAGE = 'epiAllele', x, width)
}
#' multiStickToProb
#'
#' Convert a multiple sets of stick breaking parameters to a set of probabilities that sum to one
#' @param x the parameter of stick breaking parameters
#' @param width the width of the stick breakink process
#' @name multiStickToProb
#' @return a vector of probabilities that sum to one
multiStickToProb <- function(x, width) {
.Call('_epiAllele_multiStickToProb', PACKAGE = 'epiAllele', x, width)
}
postorderMessagePassing <- function(data, tMat, traversal, nTips, logPi, nNode) {
.Call('_epiAllele_postorderMessagePassing', PACKAGE = 'epiAllele', data, tMat, traversal, nTips, logPi, nNode)
}
preorderMessagePassing <- function(data, tMat, traversal, nTips, logPi, alpha, siblings, nNode, root) {
.Call('_epiAllele_preorderMessagePassing', PACKAGE = 'epiAllele', data, tMat, traversal, nTips, logPi, alpha, siblings, nNode, root)
}
#' probToStick
#'
#' Convert a set of probabilities that sum to one to a set of stick breaking parameters
#' @param x parameter vector of probabilities
#' @name probToStick
#' @return a vector of parameters for a stick breaking parameters
probToStick <- function(x) {
.Call('_epiAllele_probToStick', PACKAGE = 'epiAllele', x)
}
setValues <- function(x, ind, val) {
invisible(.Call('_epiAllele_setValues', PACKAGE = 'epiAllele', x, ind, val))
}
siteGainLossCpp <- function(data, tMat, traversal, nTips, logPi, siblings, ncores = 1L) {
.Call('_epiAllele_siteGainLossCpp', PACKAGE = 'epiAllele', data, tMat, traversal, nTips, logPi, siblings, ncores)
}
#' stickToProb
#'
#' Convert a set of stick breaking parameters to a set of probabilities that sum to one
#' @param x parameter vector of stick breaking process parameters
#' @name stickToProb
#' @return a vector of probabilities that sum to one
stickToProb <- function(x) {
.Call('_epiAllele_stickToProb', PACKAGE = 'epiAllele', x)
}
treeLL <- function(data, tMat, traversal, nTips, logPi) {
.Call('_epiAllele_treeLL', PACKAGE = 'epiAllele', data, tMat, traversal, nTips, logPi)
}
|
/R/RcppExports.R
|
no_license
|
ndukler/epiAllele
|
R
| false
| false
| 3,281
|
r
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
#' Vector logSumExpArma
#'
#' This function computes the sum(e^x) of a vector x without leaving log space
#'
#' @param x A numeric vector
NULL
#' Vector logSumExp
#'
#' This function computes the sum(e^x) of a vector x without leaving log space
#'
#' @param x A numeric vector
#' @export
logSumExp <- function(x) {
.Call('_epiAllele_logSumExp', PACKAGE = 'epiAllele', x)
}
marginalTransitionsCpp <- function(data, tMat, traversal, nTips, logPi, siblings, ncores = 1L) {
.Call('_epiAllele_marginalTransitionsCpp', PACKAGE = 'epiAllele', data, tMat, traversal, nTips, logPi, siblings, ncores)
}
#' multiProbToStick
#'
#' Convert a multiple sets of stick breaking parameters to a set of probabilities that sum to one
#' @param x the parameter of stick breaking parameters
#' @param width the width of the stick breakink process
#' @name multiProbToStick
#' @return a vector of probabilities that sum to one
multiProbToStick <- function(x, width) {
.Call('_epiAllele_multiProbToStick', PACKAGE = 'epiAllele', x, width)
}
#' multiStickToProb
#'
#' Convert a multiple sets of stick breaking parameters to a set of probabilities that sum to one
#' @param x the parameter of stick breaking parameters
#' @param width the width of the stick breakink process
#' @name multiStickToProb
#' @return a vector of probabilities that sum to one
multiStickToProb <- function(x, width) {
.Call('_epiAllele_multiStickToProb', PACKAGE = 'epiAllele', x, width)
}
postorderMessagePassing <- function(data, tMat, traversal, nTips, logPi, nNode) {
.Call('_epiAllele_postorderMessagePassing', PACKAGE = 'epiAllele', data, tMat, traversal, nTips, logPi, nNode)
}
preorderMessagePassing <- function(data, tMat, traversal, nTips, logPi, alpha, siblings, nNode, root) {
.Call('_epiAllele_preorderMessagePassing', PACKAGE = 'epiAllele', data, tMat, traversal, nTips, logPi, alpha, siblings, nNode, root)
}
#' probToStick
#'
#' Convert a set of probabilities that sum to one to a set of stick breaking parameters
#' @param x parameter vector of probabilities
#' @name probToStick
#' @return a vector of parameters for a stick breaking parameters
probToStick <- function(x) {
.Call('_epiAllele_probToStick', PACKAGE = 'epiAllele', x)
}
setValues <- function(x, ind, val) {
invisible(.Call('_epiAllele_setValues', PACKAGE = 'epiAllele', x, ind, val))
}
siteGainLossCpp <- function(data, tMat, traversal, nTips, logPi, siblings, ncores = 1L) {
.Call('_epiAllele_siteGainLossCpp', PACKAGE = 'epiAllele', data, tMat, traversal, nTips, logPi, siblings, ncores)
}
#' stickToProb
#'
#' Convert a set of stick breaking parameters to a set of probabilities that sum to one
#' @param x parameter vector of stick breaking process parameters
#' @name stickToProb
#' @return a vector of probabilities that sum to one
stickToProb <- function(x) {
.Call('_epiAllele_stickToProb', PACKAGE = 'epiAllele', x)
}
treeLL <- function(data, tMat, traversal, nTips, logPi) {
.Call('_epiAllele_treeLL', PACKAGE = 'epiAllele', data, tMat, traversal, nTips, logPi)
}
|
library(shiny)
library(shinyBS)
library(leaflet)
load("appData.RData")
|
/jfsp-archive/other_example_apps/older_app_versions/jfsp-v02/global.R
|
no_license
|
ua-snap/snap-r-tools
|
R
| false
| false
| 72
|
r
|
library(shiny)
library(shinyBS)
library(leaflet)
load("appData.RData")
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hmi_smallfunctions.R
\name{extract_varnames}
\alias{extract_varnames}
\title{Function to extract the different elements of a formula}
\usage{
extract_varnames(
model_formula = NULL,
constant_variables,
variable_names_in_data = colnames(data),
data
)
}
\arguments{
\item{model_formula}{A formula (from class \code{formula})}
\item{constant_variables}{A Boolean-vector of length equal to the number of columns in the data set
specifying whether a variable is a constant variable (eg. an intercept variable) or not.}
\item{variable_names_in_data}{A character-vector with the column names of the data set.}
\item{data}{The data.frame the formula belongs to.}
}
\value{
A list with the names of the target variable, the intercept variable,
the fixed and random effects covariates (which includes the name of the target variable),
the variables with interactions and the cluster id variable.\cr
If some of them don't exist, they get the value "".
}
\description{
The function searches for the target variable, fixed effects variables,
if there is a cluster ID: this and the random effects variables.\cr
The names of the fixed and random intercepts variable (if existent) are explicitly labeled
In imputation models, the target variable can act as covariate
for other covariates - so we treat the target variable as fix effect variable.
}
|
/man/extract_varnames.Rd
|
no_license
|
cran/hmi
|
R
| false
| true
| 1,421
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hmi_smallfunctions.R
\name{extract_varnames}
\alias{extract_varnames}
\title{Function to extract the different elements of a formula}
\usage{
extract_varnames(
model_formula = NULL,
constant_variables,
variable_names_in_data = colnames(data),
data
)
}
\arguments{
\item{model_formula}{A formula (from class \code{formula})}
\item{constant_variables}{A Boolean-vector of length equal to the number of columns in the data set
specifying whether a variable is a constant variable (eg. an intercept variable) or not.}
\item{variable_names_in_data}{A character-vector with the column names of the data set.}
\item{data}{The data.frame the formula belongs to.}
}
\value{
A list with the names of the target variable, the intercept variable,
the fixed and random effects covariates (which includes the name of the target variable),
the variables with interactions and the cluster id variable.\cr
If some of them don't exist, they get the value "".
}
\description{
The function searches for the target variable, fixed effects variables,
if there is a cluster ID: this and the random effects variables.\cr
The names of the fixed and random intercepts variable (if existent) are explicitly labeled
In imputation models, the target variable can act as covariate
for other covariates - so we treat the target variable as fix effect variable.
}
|
\name{layerplotargs}
\alias{layerplotargs}
\alias{layerplotargs<-}
\title{
Extract or Replace the Plot Arguments of a Layered Object
}
\description{
Extracts or replaces the plot arguments of a layered object.
}
\usage{
layerplotargs(L)
layerplotargs(L) <- value
}
\arguments{
\item{L}{
An object of class \code{"layered"}
created by the function \code{\link{layered}}.
}
\item{value}{
Replacement value.
A list, with the same length as \code{L},
whose elements are lists of plot arguments.
}
}
\details{
These commands extract or replace the \code{plotargs}
in a layered object. See \code{\link{layered}}.
The replacement \code{value} should normally have the same
length as the current value. However, it can also be a list with
\emph{one} element which is a list of parameters. This will be
replicated to the required length.
For the assignment function \code{layerplotargs<-},
the argument \code{L} can be any spatial object; it will be converted
to a \code{layered} object with a single layer.
}
\value{
\code{layerplotargs} returns a list of lists of plot arguments.
\code{"layerplotargs<-"} returns the updated object
of class \code{"layered"}.
}
\author{\adrian
and \rolf
}
\seealso{
\code{\link{layered}},
\code{\link{methods.layered}},
\code{\link{[.layered}}.
}
\examples{
W <- square(2)
L <- layered(W=W, X=cells)
## The following are equivalent
layerplotargs(L) <- list(list(), list(pch=16))
layerplotargs(L)[[2]] <- list(pch=16)
layerplotargs(L)$X <- list(pch=16)
## The following are equivalent
layerplotargs(L) <- list(list(cex=2), list(cex=2))
layerplotargs(L) <- list(list(cex=2))
}
\keyword{spatial}
\keyword{hplot}
|
/man/layerplotargs.Rd
|
no_license
|
rubak/spatstat
|
R
| false
| false
| 1,742
|
rd
|
\name{layerplotargs}
\alias{layerplotargs}
\alias{layerplotargs<-}
\title{
Extract or Replace the Plot Arguments of a Layered Object
}
\description{
Extracts or replaces the plot arguments of a layered object.
}
\usage{
layerplotargs(L)
layerplotargs(L) <- value
}
\arguments{
\item{L}{
An object of class \code{"layered"}
created by the function \code{\link{layered}}.
}
\item{value}{
Replacement value.
A list, with the same length as \code{L},
whose elements are lists of plot arguments.
}
}
\details{
These commands extract or replace the \code{plotargs}
in a layered object. See \code{\link{layered}}.
The replacement \code{value} should normally have the same
length as the current value. However, it can also be a list with
\emph{one} element which is a list of parameters. This will be
replicated to the required length.
For the assignment function \code{layerplotargs<-},
the argument \code{L} can be any spatial object; it will be converted
to a \code{layered} object with a single layer.
}
\value{
\code{layerplotargs} returns a list of lists of plot arguments.
\code{"layerplotargs<-"} returns the updated object
of class \code{"layered"}.
}
\author{\adrian
and \rolf
}
\seealso{
\code{\link{layered}},
\code{\link{methods.layered}},
\code{\link{[.layered}}.
}
\examples{
W <- square(2)
L <- layered(W=W, X=cells)
## The following are equivalent
layerplotargs(L) <- list(list(), list(pch=16))
layerplotargs(L)[[2]] <- list(pch=16)
layerplotargs(L)$X <- list(pch=16)
## The following are equivalent
layerplotargs(L) <- list(list(cex=2), list(cex=2))
layerplotargs(L) <- list(list(cex=2))
}
\keyword{spatial}
\keyword{hplot}
|
##載入套件
library(rvest)
##一開始到綜藝大熱門的頁面使用過去學的方法爬,但是都跑不了,也不知道為什麼
##推測是youtube本身的限制,就像FB、TWITTER那樣
##花了很多時間研究都還是沒辦法,在網路上找到的大部分方也也都是用Python
##正當開始研究如何用Python爬時,看到一篇python的教學是從youtube的搜尋頁面上搜尋然後爬下來
##就改用R試試看這個方法,結果竟然可以,=
##於是用FOR迴圈開始搜尋爬,因為有些日子是重播加上六日沒有播,就分開一個月一個月處理
##不過這也造成一個悲劇,爬幾個月後就會被YOUTUBE擋下來,說流量異常,然後就不能爬了
##換了三個IP才爬完XD
##爬取2018五月收視率
viewlist05 <- list()
for( i in c(20180501:20180524)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist05 <- rbind(viewlist05, as.matrix(view1))
}
viewlist05 <- unlist(viewlist05)
##扣掉六日與重播天數(剩下1,2,3,7,8,9,10,14,15,16,17,21,22,23,24)
viewlist05 <- c(viewlist05[1],viewlist05[2],viewlist05[3],viewlist05[7],viewlist05[8],viewlist05[9],viewlist05[10],viewlist05[14],viewlist05[15],viewlist05[16],viewlist05[17],viewlist05[21],viewlist05[22],viewlist05[23],viewlist05[24])
##爬取2018四月收視率
viewlist04 <- list()
for( i in c(20180401:20180430)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist04 <- rbind(viewlist04, as.matrix(view1))
}
viewlist04 <- unlist(viewlist04)
##扣掉六日與重播天數(剩下2,3,4,5,9,10,11,12,16,17,18,19,23,24,25,26)
viewlist04 <- c(viewlist04[2],viewlist04[3],viewlist04[4],viewlist04[5],viewlist04[9],viewlist04[10],viewlist04[11],viewlist04[12],viewlist04[16],viewlist04[17],viewlist04[18],viewlist04[19],viewlist04[23],viewlist04[24],viewlist04[25],viewlist04[26])
##爬取2018三月收視率
viewlist03 <- list()
for( i in c(20180301:20180331)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist03 <- rbind(viewlist03, as.matrix(view1))
}
viewlist03 <- unlist(viewlist03)
##扣掉六日與重播天數(剩下5,6,7,8,12,13,14,15,19,20,21,22,26,27,28,29)
viewlist03 <- c(viewlist03[5],viewlist03[6],viewlist03[7],viewlist03[8],viewlist03[12],viewlist03[13],viewlist03[14],viewlist03[15],viewlist03[19],viewlist03[20],viewlist03[21],viewlist03[22],viewlist03[26],viewlist03[27],viewlist03[28],viewlist03[29])
##爬取2018二月收視率
viewlist02 <- list()
for( i in c(20180201:20180230)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist02 <- rbind(viewlist02, as.matrix(view1))
}
viewlist02 <- unlist(viewlist02)
##扣掉六日與重播天數(剩下1.5.6.7.8.12.13.14.16.21.22.26.27.28)
viewlist02 <- c(viewlist02[1],viewlist02[5],viewlist02[6],viewlist02[7],viewlist02[8],viewlist02[12],viewlist02[13],viewlist02[14],viewlist02[16],viewlist02[21],viewlist02[22],viewlist02[26],viewlist02[27],viewlist02[28])
##爬取2018一月收視率
viewlist01 <- list()
for( i in c(20180101:20180131)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist01 <- rbind(viewlist01, as.matrix(view1))
}
viewlist01 <- unlist(viewlist01)
##扣掉六日與重播天數(剩下1,2,3,4,8,9,10,11,15,16,17,18,22,23,24,25,29,30,31)
viewlist01 <- c(viewlist01[1],viewlist01[2],viewlist01[3],viewlist01[4],viewlist01[8],viewlist01[9],viewlist01[10],viewlist01[11],viewlist01[15],viewlist01[16],viewlist01[17],viewlist01[18],viewlist01[22],viewlist01[23],viewlist01[24],viewlist01[25],viewlist01[29],viewlist01[30],viewlist01[31])
##爬取2017十二月收視率
viewlist1712 <- list()
for( i in c(20171201:20171231)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist1712 <- rbind(viewlist1712, as.matrix(view1))
}
viewlist1712 <- unlist(viewlist1712)
##扣掉六日與重播天數(剩下4,5,6,7,11,12,13,14,18,19,20,21,25,26,27,28)
viewlist1712 <- c(viewlist1712[4],viewlist1712[5],viewlist1712[6],viewlist1712[7],viewlist1712[11],viewlist1712[12],viewlist1712[13],viewlist1712[14],viewlist1712[18],viewlist1712[19],viewlist1712[20],viewlist1712[21],viewlist1712[25],viewlist1712[26],viewlist1712[27],viewlist1712[28])
##爬取2017十一月收視率
viewlist1711 <- list()
for( i in c(20171101:20171130)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist1711 <- rbind(viewlist1711, as.matrix(view1))
}
viewlist1711 <- unlist(viewlist1711)
##扣掉六日與重播天數(剩下1,2,6,7,8,9,13,14,15,16,20,21,22,23,27,28,29,30)
viewlist1711 <- c(viewlist1711[1],viewlist1711[2],viewlist1711[6],viewlist1711[7],viewlist1711[8],viewlist1711[9],viewlist1711[13],viewlist1711[14],viewlist1711[15],viewlist1711[16],viewlist1711[20],viewlist1711[21],viewlist1711[22],viewlist1711[23],viewlist1711[27],viewlist1711[28],viewlist1711[29],viewlist1711[30])
##爬取2017十月收視率
viewlist1710 <- list()
for( i in c(20171001:20171031)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist1710 <- rbind(viewlist1710, as.matrix(view1))
}
viewlist1710 <- unlist(viewlist1710)
##扣掉六日與重播天數(剩下2,3,4,5,9,10,11,12,16,17,18,19,23,24,25,26,30,31)
viewlist1710 <- c(viewlist1710[2],viewlist1710[3],viewlist1710[4],viewlist1710[5],viewlist1710[9],viewlist1710[10],viewlist1710[11],viewlist1710[12],viewlist1710[16],viewlist1710[17],viewlist1710[18],viewlist1710[19],viewlist1710[23],viewlist1710[24],viewlist1710[25],viewlist1710[26],viewlist1710[30],viewlist1710[31])
##爬取2017九月收視率
viewlist1709 <- list()
for( i in c(20170901:20170930)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist1709 <- rbind(viewlist1709, as.matrix(view1))
}
viewlist1709 <- unlist(viewlist1709)
##扣掉六日與重播天數(剩下4,5,6,7,11,12,13,14,18,19,20,21,25,26,27,28)
viewlist1709 <- c(viewlist1709[4],viewlist1709[5],viewlist1709[6],viewlist1709[7],viewlist1709[11],viewlist1709[12],viewlist1709[13],viewlist1709[14],viewlist1709[18],viewlist1709[19],viewlist1709[20],viewlist1709[21],viewlist1709[25],viewlist1709[26],viewlist1709[27],viewlist1709[28])
##爬取2017八月收視率
viewlist1708 <- list()
for( i in c(20170801:20170831)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist1708 <- rbind(viewlist1708, as.matrix(view1))
}
viewlist1708 <- unlist(viewlist1708)
##扣掉六日與重播天數(剩下1,2,3,7,8,9,10,14,15,16,17,21,22,23,24,28,29,30,31)
viewlist1708 <- c(viewlist1708[1],viewlist1708[2],viewlist1708[3],viewlist1708[7],viewlist1708[8],viewlist1708[9],viewlist1708[10],viewlist1708[14],viewlist1708[15],viewlist1708[16],viewlist1708[17],viewlist1708[21],viewlist1708[22],viewlist1708[23],viewlist1708[24],viewlist1708[28],viewlist1708[29],viewlist1708[30],viewlist1708[31])
##爬取2017七月收視率
viewlist1707 <- list()
for( i in c(20170701:20170731)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist1707 <- rbind(viewlist1707, as.matrix(view1))
}
viewlist1707 <- unlist(viewlist1707)
##扣掉六日與重播天數(剩下3,4,5,6,10,11,12,13,17,18,19,20,24,25,26,27,31)
viewlist1707 <- c(viewlist1707[3],viewlist1707[4],viewlist1707[5],viewlist1707[6],viewlist1707[10],viewlist1707[11],viewlist1707[12],viewlist1707[13],viewlist1707[17],viewlist1707[18],viewlist1707[19],viewlist1707[20],viewlist1707[24],viewlist1707[25],viewlist1707[26],viewlist1707[27],viewlist1707[31])
##爬取2017六月收視率
viewlist1706 <- list()
for( i in c(20170601:20170630)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist1706 <- rbind(viewlist1706, as.matrix(view1))
}
viewlist1706 <- unlist(viewlist1706)
##扣掉六日與重播天數(剩下1,5,6,7,8,12,13,14,15,19,20,21,22,26,27,28,29)
viewlist1706 <- c(viewlist1706[1],viewlist1706[5],viewlist1706[6],viewlist1706[7],viewlist1706[8],viewlist1706[12],viewlist1706[13],viewlist1706[14],viewlist1706[15],viewlist1706[19],viewlist1706[20],viewlist1706[21],viewlist1706[22],viewlist1706[26],viewlist1706[27],viewlist1706[28],viewlist1706[29])
##爬取2017五月收視率
viewlist1705 <- list()
for( i in c(20170501:20170522)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist1705 <- rbind(viewlist1705, as.matrix(view1))
}
viewlist1705 <- unlist(viewlist1705)
##扣掉六日與重播天數(剩下1,2,3,4,8,9,10,11,15,16,17,18,22)
viewlist1705 <- c(viewlist1705[1],viewlist1705[2],viewlist1705[3],viewlist1705[4],viewlist1705[8],viewlist1705[9],viewlist1705[10],viewlist1705[11],viewlist1705[15],viewlist1705[16],viewlist1705[17],viewlist1705[18],viewlist1705[22])
##把2017年五月到2018年五月的資料合併
youtubeview <- c(viewlist1705, viewlist1706, viewlist1707, viewlist1708, viewlist1709, viewlist1710, viewlist1711, viewlist1712, viewlist01, viewlist02, viewlist03, viewlist04, viewlist05)
##output
write.csv(youtubeview,file="youtubeview.csv",row.names = F)
|
/final/youtubeview.R
|
no_license
|
nalol831123/R
|
R
| false
| false
| 11,136
|
r
|
##載入套件
library(rvest)
##一開始到綜藝大熱門的頁面使用過去學的方法爬,但是都跑不了,也不知道為什麼
##推測是youtube本身的限制,就像FB、TWITTER那樣
##花了很多時間研究都還是沒辦法,在網路上找到的大部分方也也都是用Python
##正當開始研究如何用Python爬時,看到一篇python的教學是從youtube的搜尋頁面上搜尋然後爬下來
##就改用R試試看這個方法,結果竟然可以,=
##於是用FOR迴圈開始搜尋爬,因為有些日子是重播加上六日沒有播,就分開一個月一個月處理
##不過這也造成一個悲劇,爬幾個月後就會被YOUTUBE擋下來,說流量異常,然後就不能爬了
##換了三個IP才爬完XD
##爬取2018五月收視率
viewlist05 <- list()
for( i in c(20180501:20180524)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist05 <- rbind(viewlist05, as.matrix(view1))
}
viewlist05 <- unlist(viewlist05)
##扣掉六日與重播天數(剩下1,2,3,7,8,9,10,14,15,16,17,21,22,23,24)
viewlist05 <- c(viewlist05[1],viewlist05[2],viewlist05[3],viewlist05[7],viewlist05[8],viewlist05[9],viewlist05[10],viewlist05[14],viewlist05[15],viewlist05[16],viewlist05[17],viewlist05[21],viewlist05[22],viewlist05[23],viewlist05[24])
##爬取2018四月收視率
viewlist04 <- list()
for( i in c(20180401:20180430)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist04 <- rbind(viewlist04, as.matrix(view1))
}
viewlist04 <- unlist(viewlist04)
##扣掉六日與重播天數(剩下2,3,4,5,9,10,11,12,16,17,18,19,23,24,25,26)
viewlist04 <- c(viewlist04[2],viewlist04[3],viewlist04[4],viewlist04[5],viewlist04[9],viewlist04[10],viewlist04[11],viewlist04[12],viewlist04[16],viewlist04[17],viewlist04[18],viewlist04[19],viewlist04[23],viewlist04[24],viewlist04[25],viewlist04[26])
##爬取2018三月收視率
viewlist03 <- list()
for( i in c(20180301:20180331)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist03 <- rbind(viewlist03, as.matrix(view1))
}
viewlist03 <- unlist(viewlist03)
##扣掉六日與重播天數(剩下5,6,7,8,12,13,14,15,19,20,21,22,26,27,28,29)
viewlist03 <- c(viewlist03[5],viewlist03[6],viewlist03[7],viewlist03[8],viewlist03[12],viewlist03[13],viewlist03[14],viewlist03[15],viewlist03[19],viewlist03[20],viewlist03[21],viewlist03[22],viewlist03[26],viewlist03[27],viewlist03[28],viewlist03[29])
##爬取2018二月收視率
viewlist02 <- list()
for( i in c(20180201:20180230)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist02 <- rbind(viewlist02, as.matrix(view1))
}
viewlist02 <- unlist(viewlist02)
##扣掉六日與重播天數(剩下1.5.6.7.8.12.13.14.16.21.22.26.27.28)
viewlist02 <- c(viewlist02[1],viewlist02[5],viewlist02[6],viewlist02[7],viewlist02[8],viewlist02[12],viewlist02[13],viewlist02[14],viewlist02[16],viewlist02[21],viewlist02[22],viewlist02[26],viewlist02[27],viewlist02[28])
##爬取2018一月收視率
viewlist01 <- list()
for( i in c(20180101:20180131)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist01 <- rbind(viewlist01, as.matrix(view1))
}
viewlist01 <- unlist(viewlist01)
##扣掉六日與重播天數(剩下1,2,3,4,8,9,10,11,15,16,17,18,22,23,24,25,29,30,31)
viewlist01 <- c(viewlist01[1],viewlist01[2],viewlist01[3],viewlist01[4],viewlist01[8],viewlist01[9],viewlist01[10],viewlist01[11],viewlist01[15],viewlist01[16],viewlist01[17],viewlist01[18],viewlist01[22],viewlist01[23],viewlist01[24],viewlist01[25],viewlist01[29],viewlist01[30],viewlist01[31])
##爬取2017十二月收視率
viewlist1712 <- list()
for( i in c(20171201:20171231)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist1712 <- rbind(viewlist1712, as.matrix(view1))
}
viewlist1712 <- unlist(viewlist1712)
##扣掉六日與重播天數(剩下4,5,6,7,11,12,13,14,18,19,20,21,25,26,27,28)
viewlist1712 <- c(viewlist1712[4],viewlist1712[5],viewlist1712[6],viewlist1712[7],viewlist1712[11],viewlist1712[12],viewlist1712[13],viewlist1712[14],viewlist1712[18],viewlist1712[19],viewlist1712[20],viewlist1712[21],viewlist1712[25],viewlist1712[26],viewlist1712[27],viewlist1712[28])
##爬取2017十一月收視率
viewlist1711 <- list()
for( i in c(20171101:20171130)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist1711 <- rbind(viewlist1711, as.matrix(view1))
}
viewlist1711 <- unlist(viewlist1711)
##扣掉六日與重播天數(剩下1,2,6,7,8,9,13,14,15,16,20,21,22,23,27,28,29,30)
viewlist1711 <- c(viewlist1711[1],viewlist1711[2],viewlist1711[6],viewlist1711[7],viewlist1711[8],viewlist1711[9],viewlist1711[13],viewlist1711[14],viewlist1711[15],viewlist1711[16],viewlist1711[20],viewlist1711[21],viewlist1711[22],viewlist1711[23],viewlist1711[27],viewlist1711[28],viewlist1711[29],viewlist1711[30])
##爬取2017十月收視率
viewlist1710 <- list()
for( i in c(20171001:20171031)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist1710 <- rbind(viewlist1710, as.matrix(view1))
}
viewlist1710 <- unlist(viewlist1710)
##扣掉六日與重播天數(剩下2,3,4,5,9,10,11,12,16,17,18,19,23,24,25,26,30,31)
viewlist1710 <- c(viewlist1710[2],viewlist1710[3],viewlist1710[4],viewlist1710[5],viewlist1710[9],viewlist1710[10],viewlist1710[11],viewlist1710[12],viewlist1710[16],viewlist1710[17],viewlist1710[18],viewlist1710[19],viewlist1710[23],viewlist1710[24],viewlist1710[25],viewlist1710[26],viewlist1710[30],viewlist1710[31])
##爬取2017九月收視率
viewlist1709 <- list()
for( i in c(20170901:20170930)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist1709 <- rbind(viewlist1709, as.matrix(view1))
}
viewlist1709 <- unlist(viewlist1709)
##扣掉六日與重播天數(剩下4,5,6,7,11,12,13,14,18,19,20,21,25,26,27,28)
viewlist1709 <- c(viewlist1709[4],viewlist1709[5],viewlist1709[6],viewlist1709[7],viewlist1709[11],viewlist1709[12],viewlist1709[13],viewlist1709[14],viewlist1709[18],viewlist1709[19],viewlist1709[20],viewlist1709[21],viewlist1709[25],viewlist1709[26],viewlist1709[27],viewlist1709[28])
##爬取2017八月收視率
viewlist1708 <- list()
for( i in c(20170801:20170831)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist1708 <- rbind(viewlist1708, as.matrix(view1))
}
viewlist1708 <- unlist(viewlist1708)
##扣掉六日與重播天數(剩下1,2,3,7,8,9,10,14,15,16,17,21,22,23,24,28,29,30,31)
viewlist1708 <- c(viewlist1708[1],viewlist1708[2],viewlist1708[3],viewlist1708[7],viewlist1708[8],viewlist1708[9],viewlist1708[10],viewlist1708[14],viewlist1708[15],viewlist1708[16],viewlist1708[17],viewlist1708[21],viewlist1708[22],viewlist1708[23],viewlist1708[24],viewlist1708[28],viewlist1708[29],viewlist1708[30],viewlist1708[31])
##爬取2017七月收視率
viewlist1707 <- list()
for( i in c(20170701:20170731)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist1707 <- rbind(viewlist1707, as.matrix(view1))
}
viewlist1707 <- unlist(viewlist1707)
##扣掉六日與重播天數(剩下3,4,5,6,10,11,12,13,17,18,19,20,24,25,26,27,31)
viewlist1707 <- c(viewlist1707[3],viewlist1707[4],viewlist1707[5],viewlist1707[6],viewlist1707[10],viewlist1707[11],viewlist1707[12],viewlist1707[13],viewlist1707[17],viewlist1707[18],viewlist1707[19],viewlist1707[20],viewlist1707[24],viewlist1707[25],viewlist1707[26],viewlist1707[27],viewlist1707[31])
##爬取2017六月收視率
viewlist1706 <- list()
for( i in c(20170601:20170630)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist1706 <- rbind(viewlist1706, as.matrix(view1))
}
viewlist1706 <- unlist(viewlist1706)
##扣掉六日與重播天數(剩下1,5,6,7,8,12,13,14,15,19,20,21,22,26,27,28,29)
viewlist1706 <- c(viewlist1706[1],viewlist1706[5],viewlist1706[6],viewlist1706[7],viewlist1706[8],viewlist1706[12],viewlist1706[13],viewlist1706[14],viewlist1706[15],viewlist1706[19],viewlist1706[20],viewlist1706[21],viewlist1706[22],viewlist1706[26],viewlist1706[27],viewlist1706[28],viewlist1706[29])
##爬取2017五月收視率
viewlist1705 <- list()
for( i in c(20170501:20170522)){
date <- i
url <- paste('https://www.youtube.com/results?search_query=%E7%B6%9C%E8%97%9D%E5%A4%A7%E7%86%B1%E9%96%80', i, sep='')
res <- read_html(url)
view <- html_text(html_nodes(res, ".yt-lockup-meta-info"))
view1 <- view[1]
viewlist1705 <- rbind(viewlist1705, as.matrix(view1))
}
viewlist1705 <- unlist(viewlist1705)
##扣掉六日與重播天數(剩下1,2,3,4,8,9,10,11,15,16,17,18,22)
viewlist1705 <- c(viewlist1705[1],viewlist1705[2],viewlist1705[3],viewlist1705[4],viewlist1705[8],viewlist1705[9],viewlist1705[10],viewlist1705[11],viewlist1705[15],viewlist1705[16],viewlist1705[17],viewlist1705[18],viewlist1705[22])
##把2017年五月到2018年五月的資料合併
youtubeview <- c(viewlist1705, viewlist1706, viewlist1707, viewlist1708, viewlist1709, viewlist1710, viewlist1711, viewlist1712, viewlist01, viewlist02, viewlist03, viewlist04, viewlist05)
##output
write.csv(youtubeview,file="youtubeview.csv",row.names = F)
|
#Script for HTML - oppdatering av nettsida
#første jobb er å konvertere til html
#http://stackoverflow.com/questions/17748566/how-can-i-turn-an-r-data-frame-into-a-simple-unstyled-html-table
#pakker
library(dplyr)
library(xtable)
#datainnlesning
df_2016 <- read.csv("~/Indikatorprosjektet/Indikatorer og datagrunnlag/Dataleveranser/Bosettingsdata 2016/Kopi av Bosetting_kommuneoversikt 2014-2017. Per 04 10 16.csv", sep=";", stringsAsFactors=FALSE)
df_2017 <- read.csv("~/Indikatorprosjektet/Indikatorer og datagrunnlag/Dataleveranser/Bosettingsdata 2016/anmodning_2017_161006_2.csv", sep=";", stringsAsFactors=FALSE)
df <- full_join(df_2016,df_2017,by="Kommune")
#forbehandling av data
df$kommune_navn = gsub(paste0("[012][0123456789][0123456789][0123456789]"),"",df$Kommune)
df$anmodning_2016_inkl_em = NA
df$vedtak_2016_inkl_em = NA
df$anmodning_2017_inkl_em = NA
df$vedtak_2017_inkl_em = NA
df$anmodning_2016_inkl_em[df$Anmodning_2016_EM>=0] = paste0(df$Anmodning_2016[df$Anmodning_2016_EM>=0]," (",df$Anmodning_2016_EM[df$Anmodning_2016_EM>=0],")")
df$anmodning_2016_inkl_em[is.na(df$Anmodning_2016_EM)==T] = df$Anmodning_2016[is.na(df$Anmodning_2016_EM)==T]
df$vedtak_2016_inkl_em[df$Vedtak_2016_EM>=0] = paste0(df$Vedtak_2016[df$Vedtak_2016_EM>=0]," (",df$Vedtak_2016_EM[df$Vedtak_2016_EM>=0],")")
df$vedtak_2016_inkl_em[df$Vedtak_2016_EM==""] = df$Vedtak_2016[df$Vedtak_2016_EM==""]
sum(is.na(df$Anmodning.totalt)) #Kommuner som ikke anmodes
sum(is.na(df$Hvorav.anmodning.EM)) #Kommuner som ikke anmodes om EM
df$anmodning_2017_inkl_em[is.na(df$Hvorav.anmodning.EM)==F] = paste0(df$Anmodning.totalt[is.na(df$Hvorav.anmodning.EM)==F]," (",df$Hvorav.anmodning.EM[is.na(df$Hvorav.anmodning.EM)==F],")")
df$anmodning_2017_inkl_em[is.na(df$Hvorav.anmodning.EM)==T] = paste0(df$Anmodning.totalt[is.na(df$Hvorav.anmodning.EM)==T]," (0)")
df$vedtak_2017_inkl_em = gsub("()","",df$Vedtak_2017_EM,fixed=T)
df = select(df,Kommune,kommune_navn,anmodning_2016_inkl_em,vedtak_2016_inkl_em,anmodning_2017_inkl_em,vedtak_2017_inkl_em)
sum(is.na(df))==0 #Ingen gjenværende NA
names(df)=c("kode","Kommune","Anmodning 2016 (herav enslige mindreårige)","Vedtak 2016 (herav enslige mindreårige)","Anmodning 2017 (herav enslige mindreårige)","Vedtak 2017 (herav enslige mindreårige)")
#fylkesoversikt
df_fylke = df[-grep("[012]",df$kode),2:6]
names(df_fylke)=c("Fylke","Anmodning 2016 (herav enslige mindreårige)","Vedtak 2016 (herav enslige mindreårige)","Anmodning 2017 (herav enslige mindreårige)","Vedtak 2017 (herav enslige mindreårige)")
print(xtable(df_fylke, caption="Fylkestall", label="label"), type="html", file="test/fylker.html",include.rownames=FALSE)
#Kommunefiler
fylkenr = seq(01,20)
for(i in fylkenr){
if(i>9){t = as.character(fylkenr[i])}
if(i<10){t = paste0("0",fylkenr[i])}
t=strsplit(t,split="")
print(xtable(df[grep(paste0("[",t[[1]][[1]],"][",t[[1]][[2]],"][0123456789][0123456789]"),df$kode),2:6]), type="html", file=paste0("test/",fylkenr[i],".html"),include.rownames=FALSE)
}
|
/scripts/tabeller_til_HTML.R
|
no_license
|
gardenberg/imdikator-munch
|
R
| false
| false
| 3,038
|
r
|
#Script for HTML - oppdatering av nettsida
#første jobb er å konvertere til html
#http://stackoverflow.com/questions/17748566/how-can-i-turn-an-r-data-frame-into-a-simple-unstyled-html-table
#pakker
library(dplyr)
library(xtable)
#datainnlesning
df_2016 <- read.csv("~/Indikatorprosjektet/Indikatorer og datagrunnlag/Dataleveranser/Bosettingsdata 2016/Kopi av Bosetting_kommuneoversikt 2014-2017. Per 04 10 16.csv", sep=";", stringsAsFactors=FALSE)
df_2017 <- read.csv("~/Indikatorprosjektet/Indikatorer og datagrunnlag/Dataleveranser/Bosettingsdata 2016/anmodning_2017_161006_2.csv", sep=";", stringsAsFactors=FALSE)
df <- full_join(df_2016,df_2017,by="Kommune")
#forbehandling av data
df$kommune_navn = gsub(paste0("[012][0123456789][0123456789][0123456789]"),"",df$Kommune)
df$anmodning_2016_inkl_em = NA
df$vedtak_2016_inkl_em = NA
df$anmodning_2017_inkl_em = NA
df$vedtak_2017_inkl_em = NA
df$anmodning_2016_inkl_em[df$Anmodning_2016_EM>=0] = paste0(df$Anmodning_2016[df$Anmodning_2016_EM>=0]," (",df$Anmodning_2016_EM[df$Anmodning_2016_EM>=0],")")
df$anmodning_2016_inkl_em[is.na(df$Anmodning_2016_EM)==T] = df$Anmodning_2016[is.na(df$Anmodning_2016_EM)==T]
df$vedtak_2016_inkl_em[df$Vedtak_2016_EM>=0] = paste0(df$Vedtak_2016[df$Vedtak_2016_EM>=0]," (",df$Vedtak_2016_EM[df$Vedtak_2016_EM>=0],")")
df$vedtak_2016_inkl_em[df$Vedtak_2016_EM==""] = df$Vedtak_2016[df$Vedtak_2016_EM==""]
sum(is.na(df$Anmodning.totalt)) #Kommuner som ikke anmodes
sum(is.na(df$Hvorav.anmodning.EM)) #Kommuner som ikke anmodes om EM
df$anmodning_2017_inkl_em[is.na(df$Hvorav.anmodning.EM)==F] = paste0(df$Anmodning.totalt[is.na(df$Hvorav.anmodning.EM)==F]," (",df$Hvorav.anmodning.EM[is.na(df$Hvorav.anmodning.EM)==F],")")
df$anmodning_2017_inkl_em[is.na(df$Hvorav.anmodning.EM)==T] = paste0(df$Anmodning.totalt[is.na(df$Hvorav.anmodning.EM)==T]," (0)")
df$vedtak_2017_inkl_em = gsub("()","",df$Vedtak_2017_EM,fixed=T)
df = select(df,Kommune,kommune_navn,anmodning_2016_inkl_em,vedtak_2016_inkl_em,anmodning_2017_inkl_em,vedtak_2017_inkl_em)
sum(is.na(df))==0 #Ingen gjenværende NA
names(df)=c("kode","Kommune","Anmodning 2016 (herav enslige mindreårige)","Vedtak 2016 (herav enslige mindreårige)","Anmodning 2017 (herav enslige mindreårige)","Vedtak 2017 (herav enslige mindreårige)")
#fylkesoversikt
df_fylke = df[-grep("[012]",df$kode),2:6]
names(df_fylke)=c("Fylke","Anmodning 2016 (herav enslige mindreårige)","Vedtak 2016 (herav enslige mindreårige)","Anmodning 2017 (herav enslige mindreårige)","Vedtak 2017 (herav enslige mindreårige)")
print(xtable(df_fylke, caption="Fylkestall", label="label"), type="html", file="test/fylker.html",include.rownames=FALSE)
#Kommunefiler
fylkenr = seq(01,20)
for(i in fylkenr){
if(i>9){t = as.character(fylkenr[i])}
if(i<10){t = paste0("0",fylkenr[i])}
t=strsplit(t,split="")
print(xtable(df[grep(paste0("[",t[[1]][[1]],"][",t[[1]][[2]],"][0123456789][0123456789]"),df$kode),2:6]), type="html", file=paste0("test/",fylkenr[i],".html"),include.rownames=FALSE)
}
|
library(dplyr)
library(readr)
setwd("~/Desktop/world-development-indicators-2")
indicators <- read_csv("Indicators.csv")
#The dataset includes regional data, which is not of interest here since we're curious about specific countries
not_countries <- list("Arab World", "Caribbean small states", "Central Europe and the Baltics", "Channel Islands", "Dominica", "East Asia & Pacific (all income levels)", "East Asia & Pacific (developing only)", "Europe & Central Asia (all income levels)", "Europe & Central Asia (developing only)", "European Union", "Fragile and conflict affected situations", "Heavily indebted poor countries (HIPC)", "High income", "High income: nonOECD", "High income: OECD", "Latin America & Caribbean (all income levels)", "Latin America & Caribbean (developing only)", "Least developed countries: UN classification", "Low & middle income", "Low income", "Lower middle income", "Middle East & North Africa (all income levels)", "Middle East & North Africa (developing only)", "Middle income", "OECD members", "Other small states", "Sub-Saharan Africa (all income levels)", "Sub-Saharan Africa (developing only)", "West Bank and Gaza", "World", "Euro area", "North America", "Pacific island small states", "Small states", "South Asia", "Upper middle income")
country_indicators <- indicators[ ! indicators$CountryName %in% not_countries, ]
#2012 was the most recent year that per capita electricity use was reported
country_indicators_2012 <- filter(country_indicators, Year == "2012")
country_idc_table_2012 <- split(country_indicators_2012, country_indicators_2012$IndicatorCode)
merge_tables <- function(idc1, idc2){ new_table <- merge(idc1, idc2, by="CountryCode"); return(new_table)}
#EG.USE.ELEC.KH.PC electric power consumption per capita
#NY.GDP.PCAP.CD GDP per capita
elec_gdppc <- merge_tables(country_idc_table_2012$NY.GDP.PCAP.CD, country_idc_table_2012$EG.USE.ELEC.KH.PC)
#Let's see what a double log plot of per capita GDP vs. per capita electricity use looks like...
log_elec = lapply(elec_gdppc$Value.y, log10)
log_gdp = lapply(elec_gdppc$Value.x, log10)
logelec_gdp = data.frame( logelec = unlist(log_elec), loggdp = unlist(log_gdp))
plot(log_elec, log_gdp, xlab="log Per Capita Electricity Consumption (kWh)", ylab ="log Per Capita GDP (2015 USD)", pch=19)
log_regression <- lm(logelec_gdp$loggdp ~ logelec_gdp$logelec)
abline(log_regression, col="red")
summary(log_regression)
#What about a linear plot?
plot(elec_gdppc$Value.y, elec_gdppc$Value.x, xlab="Per Capita Electricity Consumption (kWh)", ylab ="Per Capita GDP (2015 USD)", pch=19)
regression <- lm(elec_gdppc$Value.x ~ elec_gdppc$Value.y)
abline(regression, col="red")
summary(regression)
#What happens if we throw out Iceland?
no_iceland <- filter(elec_gdppc, CountryCode != "ISL")
plot(no_iceland$Value.y, no_iceland$Value.x, xlab="Per Capita Electricity Consumption (kWh)", ylab ="Per Capita GDP (2015 USD)", pch=19)
noice_regress <- lm(no_iceland$Value.x ~ no_iceland$Value.y)
abline(noice_regress, col="red")
summary(noice_regress)
noice_logelec = lapply(no_iceland$Value.y, log10)
noice_loggdp = lapply(no_iceland$Value.x, log10)
noice_logelecgdp <- data.frame( logelec = unlist(noice_logelec), loggdp = unlist(noice_loggdp))
plot(noice_logelecgdp$logelec, noice_logelecgdp$loggdp, xlab="log Per Capita Electricity Consumption (kWh)", ylab ="log Per Capita GDP (2015 USD)", pch=19)
noice_log_regress <- lm(noice_logelecgdp$loggdp ~ noice_logelecgdp$logelec)
abline(noice_log_regress, col="red")
summary(noice_log_regress)
#histograms
hist(elec_gdppc$Value.y, main="", xlab="Per Capita Electricity Consumption (kWh)", col="green", breaks=10)
hist(unlist(log_elec), main="", xlab="log Per Capita Electricity Consumption (kWh)", col="blue", breaks=10)
hist(elec_gdppc$Value.x, main="", xlab="Per Capita GDP (USD)", col="green", breaks=10)
hist(unlist(log_gdp), main="", xlab="log Per Capita GDP (USD)", col="blue", breaks=10)
|
/indicators.R
|
no_license
|
bkossmann/BDB
|
R
| false
| false
| 3,964
|
r
|
library(dplyr)
library(readr)
setwd("~/Desktop/world-development-indicators-2")
indicators <- read_csv("Indicators.csv")
#The dataset includes regional data, which is not of interest here since we're curious about specific countries
not_countries <- list("Arab World", "Caribbean small states", "Central Europe and the Baltics", "Channel Islands", "Dominica", "East Asia & Pacific (all income levels)", "East Asia & Pacific (developing only)", "Europe & Central Asia (all income levels)", "Europe & Central Asia (developing only)", "European Union", "Fragile and conflict affected situations", "Heavily indebted poor countries (HIPC)", "High income", "High income: nonOECD", "High income: OECD", "Latin America & Caribbean (all income levels)", "Latin America & Caribbean (developing only)", "Least developed countries: UN classification", "Low & middle income", "Low income", "Lower middle income", "Middle East & North Africa (all income levels)", "Middle East & North Africa (developing only)", "Middle income", "OECD members", "Other small states", "Sub-Saharan Africa (all income levels)", "Sub-Saharan Africa (developing only)", "West Bank and Gaza", "World", "Euro area", "North America", "Pacific island small states", "Small states", "South Asia", "Upper middle income")
country_indicators <- indicators[ ! indicators$CountryName %in% not_countries, ]
#2012 was the most recent year that per capita electricity use was reported
country_indicators_2012 <- filter(country_indicators, Year == "2012")
country_idc_table_2012 <- split(country_indicators_2012, country_indicators_2012$IndicatorCode)
merge_tables <- function(idc1, idc2){ new_table <- merge(idc1, idc2, by="CountryCode"); return(new_table)}
#EG.USE.ELEC.KH.PC electric power consumption per capita
#NY.GDP.PCAP.CD GDP per capita
elec_gdppc <- merge_tables(country_idc_table_2012$NY.GDP.PCAP.CD, country_idc_table_2012$EG.USE.ELEC.KH.PC)
#Let's see what a double log plot of per capita GDP vs. per capita electricity use looks like...
log_elec = lapply(elec_gdppc$Value.y, log10)
log_gdp = lapply(elec_gdppc$Value.x, log10)
logelec_gdp = data.frame( logelec = unlist(log_elec), loggdp = unlist(log_gdp))
plot(log_elec, log_gdp, xlab="log Per Capita Electricity Consumption (kWh)", ylab ="log Per Capita GDP (2015 USD)", pch=19)
log_regression <- lm(logelec_gdp$loggdp ~ logelec_gdp$logelec)
abline(log_regression, col="red")
summary(log_regression)
#What about a linear plot?
plot(elec_gdppc$Value.y, elec_gdppc$Value.x, xlab="Per Capita Electricity Consumption (kWh)", ylab ="Per Capita GDP (2015 USD)", pch=19)
regression <- lm(elec_gdppc$Value.x ~ elec_gdppc$Value.y)
abline(regression, col="red")
summary(regression)
#What happens if we throw out Iceland?
no_iceland <- filter(elec_gdppc, CountryCode != "ISL")
plot(no_iceland$Value.y, no_iceland$Value.x, xlab="Per Capita Electricity Consumption (kWh)", ylab ="Per Capita GDP (2015 USD)", pch=19)
noice_regress <- lm(no_iceland$Value.x ~ no_iceland$Value.y)
abline(noice_regress, col="red")
summary(noice_regress)
noice_logelec = lapply(no_iceland$Value.y, log10)
noice_loggdp = lapply(no_iceland$Value.x, log10)
noice_logelecgdp <- data.frame( logelec = unlist(noice_logelec), loggdp = unlist(noice_loggdp))
plot(noice_logelecgdp$logelec, noice_logelecgdp$loggdp, xlab="log Per Capita Electricity Consumption (kWh)", ylab ="log Per Capita GDP (2015 USD)", pch=19)
noice_log_regress <- lm(noice_logelecgdp$loggdp ~ noice_logelecgdp$logelec)
abline(noice_log_regress, col="red")
summary(noice_log_regress)
#histograms
hist(elec_gdppc$Value.y, main="", xlab="Per Capita Electricity Consumption (kWh)", col="green", breaks=10)
hist(unlist(log_elec), main="", xlab="log Per Capita Electricity Consumption (kWh)", col="blue", breaks=10)
hist(elec_gdppc$Value.x, main="", xlab="Per Capita GDP (USD)", col="green", breaks=10)
hist(unlist(log_gdp), main="", xlab="log Per Capita GDP (USD)", col="blue", breaks=10)
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 18114
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 18114
c
c Input Parameter (command line, file):
c input filename QBFLIB/Tentrup/ltl2aig-comp/load_full_2_comp2_REAL.unsat.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 6081
c no.of clauses 18114
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 18114
c
c QBFLIB/Tentrup/ltl2aig-comp/load_full_2_comp2_REAL.unsat.qdimacs 6081 18114 E1 [] 0 2 6079 18114 NONE
|
/code/dcnf-ankit-optimized/Results/QBFLIB-2018/E1/Experiments/Tentrup/ltl2aig-comp/load_full_2_comp2_REAL.unsat/load_full_2_comp2_REAL.unsat.R
|
no_license
|
arey0pushpa/dcnf-autarky
|
R
| false
| false
| 664
|
r
|
c DCNF-Autarky [version 0.0.1].
c Copyright (c) 2018-2019 Swansea University.
c
c Input Clause Count: 18114
c Performing E1-Autarky iteration.
c Remaining clauses count after E-Reduction: 18114
c
c Input Parameter (command line, file):
c input filename QBFLIB/Tentrup/ltl2aig-comp/load_full_2_comp2_REAL.unsat.qdimacs
c output filename /tmp/dcnfAutarky.dimacs
c autarky level 1
c conformity level 0
c encoding type 2
c no.of var 6081
c no.of clauses 18114
c no.of taut cls 0
c
c Output Parameters:
c remaining no.of clauses 18114
c
c QBFLIB/Tentrup/ltl2aig-comp/load_full_2_comp2_REAL.unsat.qdimacs 6081 18114 E1 [] 0 2 6079 18114 NONE
|
#' 71996 Water Use, Secondary (Codes)
#'
#' A table containing the USGS Water Use, Secondary (Codes) parameter codes.
#'
#' @format A data frame with 131 rows and 3 variables:
#' \describe{
#' \item{Parameter Code}{USGS Parameter Code}
#' \item{Fixed Value}{Fixed Value}
#' \item{Fixed Text}{Fixed Text}
#' }
#'
#'
#' @references
#' This data is from Table 26. Parameter codes with fixed values (USGS Water Quality Samples for USA: Sample Data). See \url{https://help.waterdata.usgs.gov/codes-and-parameters/}.
#'
#'
#'
#'
"pmcode_71996"
#> [1] "pmcode_71996"
|
/R/pmcode_71996.R
|
permissive
|
cran/ie2miscdata
|
R
| false
| false
| 560
|
r
|
#' 71996 Water Use, Secondary (Codes)
#'
#' A table containing the USGS Water Use, Secondary (Codes) parameter codes.
#'
#' @format A data frame with 131 rows and 3 variables:
#' \describe{
#' \item{Parameter Code}{USGS Parameter Code}
#' \item{Fixed Value}{Fixed Value}
#' \item{Fixed Text}{Fixed Text}
#' }
#'
#'
#' @references
#' This data is from Table 26. Parameter codes with fixed values (USGS Water Quality Samples for USA: Sample Data). See \url{https://help.waterdata.usgs.gov/codes-and-parameters/}.
#'
#'
#'
#'
"pmcode_71996"
#> [1] "pmcode_71996"
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iot_operations.R
\name{iot_get_behavior_model_training_summaries}
\alias{iot_get_behavior_model_training_summaries}
\title{Returns a Device Defender's ML Detect Security Profile training model's
status}
\usage{
iot_get_behavior_model_training_summaries(securityProfileName,
maxResults, nextToken)
}
\arguments{
\item{securityProfileName}{The name of the security profile.}
\item{maxResults}{The maximum number of results to return at one time. The default is 25.}
\item{nextToken}{The token for the next set of results.}
}
\value{
A list with the following syntax:\preformatted{list(
summaries = list(
list(
securityProfileName = "string",
behaviorName = "string",
trainingDataCollectionStartDate = as.POSIXct(
"2015-01-01"
),
modelStatus = "PENDING_BUILD"|"ACTIVE"|"EXPIRED",
datapointsCollectionPercentage = 123.0,
lastModelRefreshDate = as.POSIXct(
"2015-01-01"
)
)
),
nextToken = "string"
)
}
}
\description{
Returns a Device Defender's ML Detect Security Profile training model's
status.
}
\section{Request syntax}{
\preformatted{svc$get_behavior_model_training_summaries(
securityProfileName = "string",
maxResults = 123,
nextToken = "string"
)
}
}
\keyword{internal}
|
/cran/paws.internet.of.things/man/iot_get_behavior_model_training_summaries.Rd
|
permissive
|
paws-r/paws
|
R
| false
| true
| 1,341
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/iot_operations.R
\name{iot_get_behavior_model_training_summaries}
\alias{iot_get_behavior_model_training_summaries}
\title{Returns a Device Defender's ML Detect Security Profile training model's
status}
\usage{
iot_get_behavior_model_training_summaries(securityProfileName,
maxResults, nextToken)
}
\arguments{
\item{securityProfileName}{The name of the security profile.}
\item{maxResults}{The maximum number of results to return at one time. The default is 25.}
\item{nextToken}{The token for the next set of results.}
}
\value{
A list with the following syntax:\preformatted{list(
summaries = list(
list(
securityProfileName = "string",
behaviorName = "string",
trainingDataCollectionStartDate = as.POSIXct(
"2015-01-01"
),
modelStatus = "PENDING_BUILD"|"ACTIVE"|"EXPIRED",
datapointsCollectionPercentage = 123.0,
lastModelRefreshDate = as.POSIXct(
"2015-01-01"
)
)
),
nextToken = "string"
)
}
}
\description{
Returns a Device Defender's ML Detect Security Profile training model's
status.
}
\section{Request syntax}{
\preformatted{svc$get_behavior_model_training_summaries(
securityProfileName = "string",
maxResults = 123,
nextToken = "string"
)
}
}
\keyword{internal}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qpcr_analyser.R
\docType{methods}
\name{qpcr_analyser}
\alias{qpcr_analyser}
\alias{qpcr_analyser-methods}
\alias{qpcr_analyser,adpcr-method}
\alias{qpcr_analyser,data.frame-method}
\alias{qpcr_analyser,modlist-method}
\title{qPCR Analyser}
\arguments{
\item{input}{a dataframe containing the qPCR data or a result of function
\code{\link[qpcR]{modlist}} or an object of the class
\code{\linkS4class{adpcr}}.}
\item{cyc}{the column containing the cycle data. Defaults to first column.}
\item{fluo}{the column(s) (runs) to be analyzed. If NULL, all runs will be
considered. Use fluo = 2 to chose the second column for example.}
\item{model}{is the model to be used for the analysis for all runs. Defaults
to 'l5' (see \code{\link[qpcR]{pcrfit}}).}
\item{norm}{logical. Indicates if the raw data should be normalized within
[0, 1] before model fitting.}
\item{iter_tr}{\code{iter_tr} number of iteration to fit the curve.}
\item{type}{is the method for the crossing point/threshold cycle estimation
and efficiency estimation (\link[qpcR]{efficiency}). Defaults to 'Cy0'
(\code{\link[qpcR]{Cy0}}).}
\item{takeoff}{logical; if \code{TRUE} calculates the first significant
cycle of the exponential region (takeoff point). See
\code{\link[qpcR]{takeoff}} for details.}
}
\value{
A matrix where each column represents crossing point, efficiency,
the raw fluorescence value at the point defined by type and difference
between minimum and maximum of observed fluorescence. If takeoff parameter
is \code{TRUE}, additional two column represents start and the end of the
fluorescence growth.
}
\description{
Calculate statistics based on fluorescence. The function can be used to
analyze amplification curve data from quantitative real-time PCR
experiments. The analysis includes the fitting of the amplification curve by
a non-linear function and the calculation of a quantification point (often
referred to as Cp (crossing-point), Cq or Ct) based on a user defined
method. The function can be used to analyze data from chamber based dPCR
machines.
}
\details{
The \code{qpcRanalyzer} is a functions to automatize the analysis of
amplification curves from conventional quantitative real-time PCR (qPCR)
experiments and is adapted for the needs in dPCR. This function calls
instances of the \code{qpcR} package to calculate the quantification
points (cpD1, cpD2, Cy0 (default), TOP (optional)), the amplification
efficiency, fluorescence at the quantification point (Cq), the absolute
change of fluorescence and the take-off point (TOP). Most of the central
functionality of the \code{qpcR} package is accessible. The user can assign
concentrations to the samples. One column contains binary converted (pos (1)
and neg (0)) results for the amplification reaction based on a user defined
criteria (Cq-range, fluorescence cut-off, ...). \code{qpcr_analyser} tries
to detect cases where an amplification did not take place of was impossible
to analyze. By default \code{qpcr_analyser} analyses uses the Cy0 as
described in Guescini et al. (2008) for estimation of the quantification
point since method is considered to be better suited for many probe systems.
By default a 5-parameter model is used to fit the amplification curves. As
such \code{qpcr_analyser} is a function, which serves for preliminary data
inspection (see Example section) and as input for other R functions from the
\code{dpcR} package (e.g., \link{plot_panel}).
}
\examples{
# Take data of guescini1 data set from the qpcR R package.
library(qpcR)
# Use the first column containing the cycles and the second column for sample F1.1.
data(guescini1)
qpcr_analyser(guescini1, cyc = 1, fluo = 2)
# Use similar setting as before but set takeoff to true for an estimation of
# the first significant cycle of the exponential region.
qpcr_analyser(guescini1, cyc = 1, fluo = 2, takeoff = TRUE)
# Use similar setting as before but use qpcr_analyser in a loop to calculate the results for the
# first four columns containing the fluorescence in guescini1
print(qpcr_analyser(guescini1, cyc = 1, fluo = 2:5, takeoff = TRUE))
# Run qpcr_analyser on the list of models (finer control on fitting model process)
models <- modlist(guescini1)
qpcr_analyser(models)
}
\references{
Ritz C, Spiess An-N, \emph{qpcR: an R package for sigmoidal
model selection in quantitative real-time polymerase chain reaction
analysis}. Bioinformatics 24 (13), 2008.
Andrej-Nikolai Spiess (2013). qpcR: Modelling and analysis of real-time PCR
data.\cr \url{https://CRAN.R-project.org/package=qpcR}\cr
}
\seealso{
\link[qpcR]{modlist}.
}
\author{
Stefan Roediger, Andrej-Nikolai Spiess, Michal Burdukiewicz.
}
\keyword{Cy0}
\keyword{amplification}
\keyword{qPCR}
\keyword{quantification}
\keyword{real-time}
|
/man/qpcr_analyser.Rd
|
no_license
|
michbur/dpcR
|
R
| false
| true
| 4,823
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/qpcr_analyser.R
\docType{methods}
\name{qpcr_analyser}
\alias{qpcr_analyser}
\alias{qpcr_analyser-methods}
\alias{qpcr_analyser,adpcr-method}
\alias{qpcr_analyser,data.frame-method}
\alias{qpcr_analyser,modlist-method}
\title{qPCR Analyser}
\arguments{
\item{input}{a dataframe containing the qPCR data or a result of function
\code{\link[qpcR]{modlist}} or an object of the class
\code{\linkS4class{adpcr}}.}
\item{cyc}{the column containing the cycle data. Defaults to first column.}
\item{fluo}{the column(s) (runs) to be analyzed. If NULL, all runs will be
considered. Use fluo = 2 to chose the second column for example.}
\item{model}{is the model to be used for the analysis for all runs. Defaults
to 'l5' (see \code{\link[qpcR]{pcrfit}}).}
\item{norm}{logical. Indicates if the raw data should be normalized within
[0, 1] before model fitting.}
\item{iter_tr}{\code{iter_tr} number of iteration to fit the curve.}
\item{type}{is the method for the crossing point/threshold cycle estimation
and efficiency estimation (\link[qpcR]{efficiency}). Defaults to 'Cy0'
(\code{\link[qpcR]{Cy0}}).}
\item{takeoff}{logical; if \code{TRUE} calculates the first significant
cycle of the exponential region (takeoff point). See
\code{\link[qpcR]{takeoff}} for details.}
}
\value{
A matrix where each column represents crossing point, efficiency,
the raw fluorescence value at the point defined by type and difference
between minimum and maximum of observed fluorescence. If takeoff parameter
is \code{TRUE}, additional two column represents start and the end of the
fluorescence growth.
}
\description{
Calculate statistics based on fluorescence. The function can be used to
analyze amplification curve data from quantitative real-time PCR
experiments. The analysis includes the fitting of the amplification curve by
a non-linear function and the calculation of a quantification point (often
referred to as Cp (crossing-point), Cq or Ct) based on a user defined
method. The function can be used to analyze data from chamber based dPCR
machines.
}
\details{
The \code{qpcRanalyzer} is a functions to automatize the analysis of
amplification curves from conventional quantitative real-time PCR (qPCR)
experiments and is adapted for the needs in dPCR. This function calls
instances of the \code{qpcR} package to calculate the quantification
points (cpD1, cpD2, Cy0 (default), TOP (optional)), the amplification
efficiency, fluorescence at the quantification point (Cq), the absolute
change of fluorescence and the take-off point (TOP). Most of the central
functionality of the \code{qpcR} package is accessible. The user can assign
concentrations to the samples. One column contains binary converted (pos (1)
and neg (0)) results for the amplification reaction based on a user defined
criteria (Cq-range, fluorescence cut-off, ...). \code{qpcr_analyser} tries
to detect cases where an amplification did not take place of was impossible
to analyze. By default \code{qpcr_analyser} analyses uses the Cy0 as
described in Guescini et al. (2008) for estimation of the quantification
point since method is considered to be better suited for many probe systems.
By default a 5-parameter model is used to fit the amplification curves. As
such \code{qpcr_analyser} is a function, which serves for preliminary data
inspection (see Example section) and as input for other R functions from the
\code{dpcR} package (e.g., \link{plot_panel}).
}
\examples{
# Take data of guescini1 data set from the qpcR R package.
library(qpcR)
# Use the first column containing the cycles and the second column for sample F1.1.
data(guescini1)
qpcr_analyser(guescini1, cyc = 1, fluo = 2)
# Use similar setting as before but set takeoff to true for an estimation of
# the first significant cycle of the exponential region.
qpcr_analyser(guescini1, cyc = 1, fluo = 2, takeoff = TRUE)
# Use similar setting as before but use qpcr_analyser in a loop to calculate the results for the
# first four columns containing the fluorescence in guescini1
print(qpcr_analyser(guescini1, cyc = 1, fluo = 2:5, takeoff = TRUE))
# Run qpcr_analyser on the list of models (finer control on fitting model process)
models <- modlist(guescini1)
qpcr_analyser(models)
}
\references{
Ritz C, Spiess An-N, \emph{qpcR: an R package for sigmoidal
model selection in quantitative real-time polymerase chain reaction
analysis}. Bioinformatics 24 (13), 2008.
Andrej-Nikolai Spiess (2013). qpcR: Modelling and analysis of real-time PCR
data.\cr \url{https://CRAN.R-project.org/package=qpcR}\cr
}
\seealso{
\link[qpcR]{modlist}.
}
\author{
Stefan Roediger, Andrej-Nikolai Spiess, Michal Burdukiewicz.
}
\keyword{Cy0}
\keyword{amplification}
\keyword{qPCR}
\keyword{quantification}
\keyword{real-time}
|
utils::globalVariables(c('public','support','value'))
|
/R/globals.R
|
permissive
|
adsoncostanzifilho/CSGo
|
R
| false
| false
| 54
|
r
|
utils::globalVariables(c('public','support','value'))
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PropEdge2D.R
\name{PEdom.num}
\alias{PEdom.num}
\title{The domination number of Proportional Edge Proximity Catch Digraph
(PE-PCD) - multiple triangle case}
\usage{
PEdom.num(Xp, Yp, r, M = c(1, 1, 1))
}
\arguments{
\item{Xp}{A set of 2D points
which constitute the vertices of the PE-PCD.}
\item{Yp}{A set of 2D points
which constitute the vertices of the Delaunay triangles.}
\item{r}{A positive real number
which serves as the expansion parameter in PE proximity region;
must be \eqn{\ge 1}.}
\item{M}{A 3D point in barycentric coordinates
which serves as a center in the interior of each Delaunay
triangle or circumcenter of each Delaunay triangle
(for this, argument should be set as \code{M="CC"}),
default for \eqn{M=(1,1,1)}
which is the center of mass of each triangle.}
}
\value{
A \code{list} with three elements
\item{dom.num}{Domination number of the PE-PCD
whose vertices are \code{Xp} points.
PE proximity regions are constructed
with respect to the Delaunay triangles
based on the \code{Yp} points with expansion parameter \eqn{r \ge 1}.}
#\item{mds}{A minimum dominating set of the PE-PCD
whose vertices are \code{Xp} points}
\item{ind.mds}{The vector of data indices of the minimum dominating set
of the PE-PCD whose vertices are \code{Xp} points.}
\item{tri.dom.nums}{The vector of domination numbers
of the PE-PCD components
for the Delaunay triangles.}
}
\description{
Returns the domination number,
indices of a minimum dominating set of PE-PCD whose vertices are the data
points in \code{Xp} in the multiple triangle case
and domination numbers for the Delaunay triangles
based on \code{Yp} points.
PE proximity regions are defined
with respect to the Delaunay triangles based on \code{Yp} points
with expansion parameter \eqn{r \ge 1}
and vertex regions in each triangle are
based on the center \eqn{M=(\alpha,\beta,\gamma)}
in barycentric coordinates
in the interior of each Delaunay triangle or based on
circumcenter of each Delaunay triangle (default for \eqn{M=(1,1,1)}
which is the center of mass of the
triangle). Each Delaunay triangle is first converted to
an (nonscaled) basic triangle so that \code{M} will be the
same type of center for each Delaunay triangle
(this conversion is not necessary when \code{M} is \eqn{CM}).
Convex hull of \code{Yp} is partitioned
by the Delaunay triangles based on \code{Yp} points
(i.e., multiple triangles are the set of these Delaunay triangles
whose union constitutes the
convex hull of \code{Yp} points).
Loops are allowed for the domination number.
See (\insertCite{ceyhan:Phd-thesis,ceyhan:masa-2007,ceyhan:dom-num-NPE-Spat2011,ceyhan:mcap2012;textual}{pcds})
for more on the domination number of PE-PCDs.
Also, see (\insertCite{okabe:2000,ceyhan:comp-geo-2010,sinclair:2016;textual}{pcds})
for more on Delaunay triangulation and
the corresponding algorithm.
}
\examples{
\dontrun{
#nx is number of X points (target) and ny is number of Y points (nontarget)
nx<-20; ny<-5; #try also nx<-40; ny<-10 or nx<-1000; ny<-10;
set.seed(1)
Xp<-cbind(runif(nx,0,1),runif(nx,0,1))
Yp<-cbind(runif(ny,0,.25),
runif(ny,0,.25))+cbind(c(0,0,0.5,1,1),c(0,1,.5,0,1))
#try also Yp<-cbind(runif(ny,0,1),runif(ny,0,1))
M<-c(1,1,1) #try also M<-c(1,2,3)
r<-1.5 #try also r<-2
PEdom.num(Xp,Yp,r,M)
}
}
\references{
\insertAllCited{}
}
\seealso{
\code{\link{PEdom.num.tri}}, \code{\link{PEdom.num.tetra}},
\code{\link{dom.num.exact}}, and \code{\link{dom.num.greedy}}
}
\author{
Elvan Ceyhan
}
|
/man/PEdom.num.Rd
|
no_license
|
elvanceyhan/pcds
|
R
| false
| true
| 3,536
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PropEdge2D.R
\name{PEdom.num}
\alias{PEdom.num}
\title{The domination number of Proportional Edge Proximity Catch Digraph
(PE-PCD) - multiple triangle case}
\usage{
PEdom.num(Xp, Yp, r, M = c(1, 1, 1))
}
\arguments{
\item{Xp}{A set of 2D points
which constitute the vertices of the PE-PCD.}
\item{Yp}{A set of 2D points
which constitute the vertices of the Delaunay triangles.}
\item{r}{A positive real number
which serves as the expansion parameter in PE proximity region;
must be \eqn{\ge 1}.}
\item{M}{A 3D point in barycentric coordinates
which serves as a center in the interior of each Delaunay
triangle or circumcenter of each Delaunay triangle
(for this, argument should be set as \code{M="CC"}),
default for \eqn{M=(1,1,1)}
which is the center of mass of each triangle.}
}
\value{
A \code{list} with three elements
\item{dom.num}{Domination number of the PE-PCD
whose vertices are \code{Xp} points.
PE proximity regions are constructed
with respect to the Delaunay triangles
based on the \code{Yp} points with expansion parameter \eqn{r \ge 1}.}
#\item{mds}{A minimum dominating set of the PE-PCD
whose vertices are \code{Xp} points}
\item{ind.mds}{The vector of data indices of the minimum dominating set
of the PE-PCD whose vertices are \code{Xp} points.}
\item{tri.dom.nums}{The vector of domination numbers
of the PE-PCD components
for the Delaunay triangles.}
}
\description{
Returns the domination number,
indices of a minimum dominating set of PE-PCD whose vertices are the data
points in \code{Xp} in the multiple triangle case
and domination numbers for the Delaunay triangles
based on \code{Yp} points.
PE proximity regions are defined
with respect to the Delaunay triangles based on \code{Yp} points
with expansion parameter \eqn{r \ge 1}
and vertex regions in each triangle are
based on the center \eqn{M=(\alpha,\beta,\gamma)}
in barycentric coordinates
in the interior of each Delaunay triangle or based on
circumcenter of each Delaunay triangle (default for \eqn{M=(1,1,1)}
which is the center of mass of the
triangle). Each Delaunay triangle is first converted to
an (nonscaled) basic triangle so that \code{M} will be the
same type of center for each Delaunay triangle
(this conversion is not necessary when \code{M} is \eqn{CM}).
Convex hull of \code{Yp} is partitioned
by the Delaunay triangles based on \code{Yp} points
(i.e., multiple triangles are the set of these Delaunay triangles
whose union constitutes the
convex hull of \code{Yp} points).
Loops are allowed for the domination number.
See (\insertCite{ceyhan:Phd-thesis,ceyhan:masa-2007,ceyhan:dom-num-NPE-Spat2011,ceyhan:mcap2012;textual}{pcds})
for more on the domination number of PE-PCDs.
Also, see (\insertCite{okabe:2000,ceyhan:comp-geo-2010,sinclair:2016;textual}{pcds})
for more on Delaunay triangulation and
the corresponding algorithm.
}
\examples{
\dontrun{
#nx is number of X points (target) and ny is number of Y points (nontarget)
nx<-20; ny<-5; #try also nx<-40; ny<-10 or nx<-1000; ny<-10;
set.seed(1)
Xp<-cbind(runif(nx,0,1),runif(nx,0,1))
Yp<-cbind(runif(ny,0,.25),
runif(ny,0,.25))+cbind(c(0,0,0.5,1,1),c(0,1,.5,0,1))
#try also Yp<-cbind(runif(ny,0,1),runif(ny,0,1))
M<-c(1,1,1) #try also M<-c(1,2,3)
r<-1.5 #try also r<-2
PEdom.num(Xp,Yp,r,M)
}
}
\references{
\insertAllCited{}
}
\seealso{
\code{\link{PEdom.num.tri}}, \code{\link{PEdom.num.tetra}},
\code{\link{dom.num.exact}}, and \code{\link{dom.num.greedy}}
}
\author{
Elvan Ceyhan
}
|
require(ggplot2)
require(tidyr)
require(dplyr)
early <- read.csv("bacteria_order_april_early.csv")
head(early)
early <- subset(early, select=-c(X, total))
head(early)
early_filt <- subset(early, rel_abund >=1)
head(early_filt)
early$Taxonomy<-ifelse(early$rel_abund <= 1, "other", early$Taxonomy)
early <- subset(early, select=-c(Taxonomy))
early <- merge(early, early_filt, all=TRUE)
write.csv(early, "early_april_filt.csv", row.names=FALSE)
early$Taxonomy[is.na(early$Taxonomy)] <- "other"
early_filt <- separate(early_filt, X, c("domain", "phylum", "class", "order"), sep=";", remove=TRUE)
head(early_filt)
early_filt <- subset(early_filt, select=-c(domain, phylum))
early_filt <- subset(early_filt, class!="c__Clostridia")
early_filt$class <- sub("c__", "", early_filt$class)
early_filt$order <- sub("o__", "", early_filt$order)
early_filt$Taxonomy <- paste(early_filt$class, early_filt$order, sep=";")
early_filt <- subset(early_filt, select=-c(order, class))
write.csv(early_filt, "bacteria_april_early_filt.csv", row.names=FALSE)
early_filt <- read.csv("bacteria_april_early_filt.csv")
head(early_filt)
p <- ggplot(early_filt, aes(Sample, RelAbund, fill=Taxonomy)) +
geom_bar(stat="identity") +
theme_classic()
print(p)
?geom_bar
april <- read.csv("early_april_filt.csv")
head(april)
april <- separate(april, Taxonomy, c("domain", "phylum", "class", "order"), sep=";", remove=TRUE)
april <- subset(april, select=-c(domain, phylum))
april$class <- sub("c__", "", april$class)
april$order <- sub("o__", "", april$order)
april$Taxonomy <- paste(april$class, april$order, sep=";")
head(april)
april <- subset(april, select=-c(class, order))
p <- ggplot(april, aes(Sample, RelAbund, fill=Taxonomy)) +
geom_bar(stat="identity") +
ylab("Relative Abundance") +
xlab("Time Point") +
scale_fill_manual(values=c("pink","#EE3E80", "red", "#FF8200","#FF9966",
"gold2", "yellow", "palegreen", "lawngreen",
"springgreen3", "darkgreen", "skyblue4", "navyblue",
"royalblue2", "darkturquoise", "violet", "darkmagenta", "deeppink4",
"rosybrown4")) +
theme_classic() +
theme(text = element_text(size=20))
print(p)
levels(april$Taxonomy)
str(april)
april$Taxonomy[april$Taxonomy=="Under 1% of ;the community"] <- "Under 1% of the community"
|
/Filtration_Fig1_stackbar.R
|
no_license
|
lnmquigley/visualizations_JGI_GrovesCreek
|
R
| false
| false
| 2,400
|
r
|
require(ggplot2)
require(tidyr)
require(dplyr)
early <- read.csv("bacteria_order_april_early.csv")
head(early)
early <- subset(early, select=-c(X, total))
head(early)
early_filt <- subset(early, rel_abund >=1)
head(early_filt)
early$Taxonomy<-ifelse(early$rel_abund <= 1, "other", early$Taxonomy)
early <- subset(early, select=-c(Taxonomy))
early <- merge(early, early_filt, all=TRUE)
write.csv(early, "early_april_filt.csv", row.names=FALSE)
early$Taxonomy[is.na(early$Taxonomy)] <- "other"
early_filt <- separate(early_filt, X, c("domain", "phylum", "class", "order"), sep=";", remove=TRUE)
head(early_filt)
early_filt <- subset(early_filt, select=-c(domain, phylum))
early_filt <- subset(early_filt, class!="c__Clostridia")
early_filt$class <- sub("c__", "", early_filt$class)
early_filt$order <- sub("o__", "", early_filt$order)
early_filt$Taxonomy <- paste(early_filt$class, early_filt$order, sep=";")
early_filt <- subset(early_filt, select=-c(order, class))
write.csv(early_filt, "bacteria_april_early_filt.csv", row.names=FALSE)
early_filt <- read.csv("bacteria_april_early_filt.csv")
head(early_filt)
p <- ggplot(early_filt, aes(Sample, RelAbund, fill=Taxonomy)) +
geom_bar(stat="identity") +
theme_classic()
print(p)
?geom_bar
april <- read.csv("early_april_filt.csv")
head(april)
april <- separate(april, Taxonomy, c("domain", "phylum", "class", "order"), sep=";", remove=TRUE)
april <- subset(april, select=-c(domain, phylum))
april$class <- sub("c__", "", april$class)
april$order <- sub("o__", "", april$order)
april$Taxonomy <- paste(april$class, april$order, sep=";")
head(april)
april <- subset(april, select=-c(class, order))
p <- ggplot(april, aes(Sample, RelAbund, fill=Taxonomy)) +
geom_bar(stat="identity") +
ylab("Relative Abundance") +
xlab("Time Point") +
scale_fill_manual(values=c("pink","#EE3E80", "red", "#FF8200","#FF9966",
"gold2", "yellow", "palegreen", "lawngreen",
"springgreen3", "darkgreen", "skyblue4", "navyblue",
"royalblue2", "darkturquoise", "violet", "darkmagenta", "deeppink4",
"rosybrown4")) +
theme_classic() +
theme(text = element_text(size=20))
print(p)
levels(april$Taxonomy)
str(april)
april$Taxonomy[april$Taxonomy=="Under 1% of ;the community"] <- "Under 1% of the community"
|
plot1 <- function(){
#read the entire data
fileIn <- "C:\\Users\\monicabm\\Documents\\machine_learning_class\\data_science\\data\\household_power_consumption.txt"
tableAll <- read.csv(fileIn, header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
#reformat the date
tableAll$Date <- as.Date(tableAll$Date , format="%d/%m/%Y")
#subset the data
dataToUse <- tableAll[tableAll$Date == "2007-02-01" | tableAll$Date == "2007-02-02", ]
#set png file for writing
fileOut = "C:\\Users\\monicabm\\Documents\\machine_learning_class\\data_science\\results\\plot1.png"
png(fileOut, width=480, height=480)
#plot
lim <- c(0,1200)
hist(dataToUse$Global_active_power,main = paste("Global Active Power"), col="red", xlab="Global Active Power (kilowatts)", ylim=lim)
#close device
dev.off()
}
|
/plot1.R
|
no_license
|
monicabm/ExData_Plotting1
|
R
| false
| false
| 881
|
r
|
plot1 <- function(){
#read the entire data
fileIn <- "C:\\Users\\monicabm\\Documents\\machine_learning_class\\data_science\\data\\household_power_consumption.txt"
tableAll <- read.csv(fileIn, header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
#reformat the date
tableAll$Date <- as.Date(tableAll$Date , format="%d/%m/%Y")
#subset the data
dataToUse <- tableAll[tableAll$Date == "2007-02-01" | tableAll$Date == "2007-02-02", ]
#set png file for writing
fileOut = "C:\\Users\\monicabm\\Documents\\machine_learning_class\\data_science\\results\\plot1.png"
png(fileOut, width=480, height=480)
#plot
lim <- c(0,1200)
hist(dataToUse$Global_active_power,main = paste("Global Active Power"), col="red", xlab="Global Active Power (kilowatts)", ylim=lim)
#close device
dev.off()
}
|
# Verifica si se requiere instalar los paquetes ----------------------
if(!require("DT")) install.packages("DT")
if(!require("shiny")) install.packages("shiny")
if(!require("leaflet")) install.packages("leaflet")
if(!require("tidyverse")) install.packages("tidyverse")
if(!require("geosphere")) install.packages("geosphere")
library(DT)
library(shiny)
library(leaflet)
library(tidyverse)
library(geosphere)
# Cargar en background ----------------------------------------------------
dta <- readRDS('../rds/data_clean.rds') %>%
select(-num)
mtx_coord <- dta %>%
select(long, lat) %>%
as.matrix()
# Ingresamos la función de calcular distancia "calcula_dist" ------------------------------------------------------------
calcula_dist <- function(long,lat,nrows = 6){
input_point <- c(long, lat)
dta_dist <- dta %>%
mutate(distancia = distCosine(input_point, mtx_coord)) %>%
arrange(distancia) %>%
select(refugio,municipio, direccion, tel, lat, long, distancia) %>% #por definir info a mostrar
distinct(lat, long, .keep_all = TRUE) %>%
head(nrows) #por definir num de renglones a mostrar
dta_dist
}
gen_tabla <- function(df, contains_dist = FALSE){
tabla <- DT::datatable(df %>%
as_tibble(),
options = list(
pageLength = 10,
language = list(url = '//cdn.datatables.net/plug-ins/1.10.11/i18n/Spanish.json'),
autoWidth = TRUE,
scrollX = TRUE,
escape = T)) %>%
DT::formatRound(c("lat", "long"), 4)
if(contains_dist) tabla <- tabla %>% DT::formatRound(c("distancia"),0)
tabla
}
# Creamos el mapa --------------------------------------------------------------------
crea_mapa_base <- function(df){
m <- leaflet() %>%
addTiles() %>%
addAwesomeMarkers(lng = ~long, lat = ~lat, data = df, popup = ~refugio,
icon = awesomeIcons(),
popupOptions = popupOptions(closeOnClick = TRUE))
m
}
addUserMarker <- function(mapa_base, long = -105.1, lat = 22.5){
m <- mapa_base %>%
addCircleMarkers(lng = long, lat = lat, radius = 15, color = "red",
popup = "User Input")
m
}
addClosestMarkers <- function(mapa_base, long = -105.1, lat = 22.5, n_closest = 6){
df_closest <- calcula_dist(long, lat, n_closest)
m <- mapa_base %>%
addCircleMarkers(lng = ~long, lat = ~lat, data = df_closest, radius = 15, color = "green")
m
}
crea_mapa_closest <- function(df, long, lat, n_closest){
df %>%
crea_mapa_base() %>%
addUserMarker(long, lat) %>%
addClosestMarkers(long, lat, n_closest)
}
|
/proyectos/RespuestaDesastre/equipo_JMM/shiny/global.R
|
no_license
|
mhnk77/Estadistica-Computacional-fall2021
|
R
| false
| false
| 2,774
|
r
|
# Verifica si se requiere instalar los paquetes ----------------------
if(!require("DT")) install.packages("DT")
if(!require("shiny")) install.packages("shiny")
if(!require("leaflet")) install.packages("leaflet")
if(!require("tidyverse")) install.packages("tidyverse")
if(!require("geosphere")) install.packages("geosphere")
library(DT)
library(shiny)
library(leaflet)
library(tidyverse)
library(geosphere)
# Cargar en background ----------------------------------------------------
dta <- readRDS('../rds/data_clean.rds') %>%
select(-num)
mtx_coord <- dta %>%
select(long, lat) %>%
as.matrix()
# Ingresamos la función de calcular distancia "calcula_dist" ------------------------------------------------------------
calcula_dist <- function(long,lat,nrows = 6){
input_point <- c(long, lat)
dta_dist <- dta %>%
mutate(distancia = distCosine(input_point, mtx_coord)) %>%
arrange(distancia) %>%
select(refugio,municipio, direccion, tel, lat, long, distancia) %>% #por definir info a mostrar
distinct(lat, long, .keep_all = TRUE) %>%
head(nrows) #por definir num de renglones a mostrar
dta_dist
}
gen_tabla <- function(df, contains_dist = FALSE){
tabla <- DT::datatable(df %>%
as_tibble(),
options = list(
pageLength = 10,
language = list(url = '//cdn.datatables.net/plug-ins/1.10.11/i18n/Spanish.json'),
autoWidth = TRUE,
scrollX = TRUE,
escape = T)) %>%
DT::formatRound(c("lat", "long"), 4)
if(contains_dist) tabla <- tabla %>% DT::formatRound(c("distancia"),0)
tabla
}
# Creamos el mapa --------------------------------------------------------------------
crea_mapa_base <- function(df){
m <- leaflet() %>%
addTiles() %>%
addAwesomeMarkers(lng = ~long, lat = ~lat, data = df, popup = ~refugio,
icon = awesomeIcons(),
popupOptions = popupOptions(closeOnClick = TRUE))
m
}
addUserMarker <- function(mapa_base, long = -105.1, lat = 22.5){
m <- mapa_base %>%
addCircleMarkers(lng = long, lat = lat, radius = 15, color = "red",
popup = "User Input")
m
}
addClosestMarkers <- function(mapa_base, long = -105.1, lat = 22.5, n_closest = 6){
df_closest <- calcula_dist(long, lat, n_closest)
m <- mapa_base %>%
addCircleMarkers(lng = ~long, lat = ~lat, data = df_closest, radius = 15, color = "green")
m
}
crea_mapa_closest <- function(df, long, lat, n_closest){
df %>%
crea_mapa_base() %>%
addUserMarker(long, lat) %>%
addClosestMarkers(long, lat, n_closest)
}
|
library(data.table)
data("MOD13A1")
## test common used data
dt <- tidy_MOD13.gee(MOD13A1$dt)
st <- MOD13A1$st
sitename <- dt$site[1]
d <- dt[site == sitename, ] # get the first site data
sp <- st[site == sitename, ] # station point
# global parameter
IsPlot = T
print = F
nptperyear = 23
ypeak_min = 0.05
dnew <- add_HeadTail(d) # add one year in head and tail
INPUT <- check_input(dnew$t, dnew$y, dnew$w, maxgap = nptperyear/4, alpha = 0.02, wmin = 0.2)
INPUT$y0 <- dnew$y # for visualization
|
/tests/testthat/helper_MOD13A1.R
|
permissive
|
hgbzzw/phenofit
|
R
| false
| false
| 516
|
r
|
library(data.table)
data("MOD13A1")
## test common used data
dt <- tidy_MOD13.gee(MOD13A1$dt)
st <- MOD13A1$st
sitename <- dt$site[1]
d <- dt[site == sitename, ] # get the first site data
sp <- st[site == sitename, ] # station point
# global parameter
IsPlot = T
print = F
nptperyear = 23
ypeak_min = 0.05
dnew <- add_HeadTail(d) # add one year in head and tail
INPUT <- check_input(dnew$t, dnew$y, dnew$w, maxgap = nptperyear/4, alpha = 0.02, wmin = 0.2)
INPUT$y0 <- dnew$y # for visualization
|
#' Classification statistics and table
#'
#' Produces a classification table and statistics given a binary response model.
#' @param model The regression model that was stored prior
#' @param dep.var The observed dependent variable (with data frame as prefix, "df$dep.var")
#' @param prob_cut cut-off point at which the predicted probabilities should be coded binary (0,1). Usually 0.5 is used to indicate >0.5 as 1 and <0.5 as 0
#' @return Different class-values and a list of them
#' @export
estat_class <- function(model, dep.var, prob_cut){
## Predicting yhat whilst dealing with MV
estat_class_model <- update(model,na.action=na.exclude)
yhat <- predict(estat_class_model, type = "response")
## Creating indicator variable for yhat at cut point
predictions <- ifelse(yhat<prob_cut, 0, 1)
## Generating statistics
# Classification table
class_1 <- as.matrix(table(predictions, dep.var))
rownames(class_1) <- c("Predic. 0", "Predic. 1")
colnames(class_1) <- c("True 0", "True 1")
# Sensitivity (true positives)
class_2 <- (class_1[2,2]/(class_1[2,2]+class_1[1,2]))*100
names(class_2) <- "Sensitivity or true positive rate (TPR) %"
# Specificity (true negatives)
class_3 <- (class_1[1,1]/(class_1[1,1]+class_1[2,1]))*100
names(class_3) <- "Specificity or true negative rate (TNR) %"
# False Positives // Einfacher 100 - Sensitivity
class_4 <- (class_1[1,2]/(class_1[2,2]+class_1[1,2]))*100
names(class_4) <- "Miss rate or false negative rate (FNR) %"
# False Negatives // Einfacher 100 - Specificity
class_5 <- (class_1[2,1]/(class_1[1,1]+class_1[2,1]))*100
names(class_5) <- "Fall-out or false positive rate (FPR) %"
# Precision or positive predictive value (PPV) // Einfacher 100 - Specificity
class_6<- (class_1[2,2]/(class_1[2,2]+class_1[2,1]))*100
names(class_6) <- "Precision or positive predictive value (PPV) %"
# False Negatives // Einfacher 100 - Specificity
class_7 <- 100 - class_6
names(class_7) <- "False discovery rate (FDR) %"
# R²-Count - Correctly Classified or accuracy (ACC)
class_8 <- ((class_1[1,1]+class_1[2,2])/sum(class_1))*100
names(class_8) <- "R²-Count or accuracy (ACC) %"
# Adjusted R²-Count - Correctly Classified
class_9 <- (((class_1[1,1]+class_1[2,2])-max(colSums(class_1)))/((sum(class_1))-max(colSums(class_1))))*100
names(class_9) <- "Adj. R²-Count % (Long 1997: 108)"
estat_classification <- list(class_1,class_2,class_3, class_4, class_5, class_6, class_7, class_8, class_9)
estat_classification
}
#' Classification statistics and table
#'
#' Produces an extended classification table and statistics given a binary response model.
#' @param model The regression model that was stored prior
#' @param dep.var The observed dependent variable (with data frame as prefix, "df$dep.var")
#' @param prob_cut cut-off point at which the predicted probabilities should be coded binary (0,1). Usually 0.5 is used to indicate >0.5 as 1 and <0.5 as 0
#' @return Different class-values and a list of them
#' @export
extat_class <- function(model, dep.var, prob_cut){
## Predicting yhat whilst dealing with MV
estat_class_model <- update(model,na.action=na.exclude)
yhat <- predict(estat_class_model, type = "response")
## Creating indicator variable for yhat at cut point
predictions <- ifelse(yhat<prob_cut, 0, 1)
## Generating statistics
# Classification table
class_1 <- as.matrix(table(predictions, dep.var))
rownames(class_1) <- c("Predic. 0", "Predic. 1")
colnames(class_1) <- c("True 0", "True 1")
# Sensitivity (true positives)
class_2 <- (class_1[2,2]/(class_1[2,2]+class_1[1,2]))*100
names(class_2) <- "Sensitivity or true positive rate (TPR) %"
# Specificity (true negatives)
class_3 <- (class_1[1,1]/(class_1[1,1]+class_1[2,1]))*100
names(class_3) <- "Specificity or true negative rate (TNR) %"
# False Positives // Einfacher 100 - Sensitivity
class_4 <- (class_1[1,2]/(class_1[2,2]+class_1[1,2]))*100
names(class_4) <- "miss rate or false negative rate (FNR) %"
# False Negatives // Einfacher 100 - Specificity
class_5 <- (class_1[2,1]/(class_1[1,1]+class_1[2,1]))*100
names(class_5) <- "fall-out or false positive rate (FPR) %"
# Precision or positive predictive value (PPV)
class_6<- (class_1[2,2]/(class_1[2,2]+class_1[2,1]))*100
names(class_6) <- "Precision or positive predictive value (PPV) %"
# False Negatives // Einfacher 100 - Specificity
class_7 <- 100 - class_6
names(class_7) <- "false discovery rate (FDR) %"
# R²-Count - Correctly Classified or accuracy (ACC)
class_8 <- ((class_1[1,1]+class_1[2,2])/sum(class_1))*100
names(class_8) <- "R²-Count or accuracy (ACC) %"
# Adjusted R²-Count - Correctly Classified
class_9 <- (((class_1[1,1]+class_1[2,2])-max(colSums(class_1)))/((sum(class_1))-max(colSums(class_1))))*100
names(class_9) <- "Adj. R²-Count % (Long 1997: 108)"
# F1 score
class_10 <- 2*((class_6*class_2)/(class_6+class_2))
names(class_10) <- "F1 score"
# balanced accuracy (BA) or balanced R²-Count %
class_11 <- (class_2 + class_3)/2
names(class_11) <- "balanced accuracy (BA) or balanced R²-Count %"
# Matthews correlation coefficient (MCC)
class_12 <- (as.numeric(class_1[1,1]*class_1[2,2])-as.numeric(class_1[1,2]*class_1[2,1]))/sqrt(as.numeric(class_1[1,1]+class_1[1,2])*as.numeric(class_1[1,1]+class_1[2,1])*as.numeric(class_1[2,2]+class_1[1,2])*as.numeric(class_1[2,2]+class_1[2,1]))
names(class_12) <- "Matthews correlation coefficient (MCC)"
# Fowlkes–Mallows index (FM)
class_13 <- sqrt(class_6*class_2)
names(class_13) <- "Fowlkes–Mallows index (FM)"
# informedness or bookmaker informedness (BM)
class_14 <- class_2 + class_3 - 100
names(class_14) <- "informedness or bookmaker informedness (BM)"
# markedness (MK) or deltaP
class_15 <- class_6 + ((class_1[1,1]/(class_1[1,1]+class_1[1,2]))*100) - 100
names(class_15) <- "markedness (MK) or deltaP"
extat_classification <- list(class_1,class_2,class_3, class_4, class_5, class_6, class_7, class_8, class_9, class_10, class_11, class_12, class_13, class_14, class_15)
extat_classification
}
|
/R/estatclass.R
|
no_license
|
nader-hotait/estatclass
|
R
| false
| false
| 6,292
|
r
|
#' Classification statistics and table
#'
#' Produces a classification table and statistics given a binary response model.
#' @param model The regression model that was stored prior
#' @param dep.var The observed dependent variable (with data frame as prefix, "df$dep.var")
#' @param prob_cut cut-off point at which the predicted probabilities should be coded binary (0,1). Usually 0.5 is used to indicate >0.5 as 1 and <0.5 as 0
#' @return Different class-values and a list of them
#' @export
estat_class <- function(model, dep.var, prob_cut){
## Predicting yhat whilst dealing with MV
estat_class_model <- update(model,na.action=na.exclude)
yhat <- predict(estat_class_model, type = "response")
## Creating indicator variable for yhat at cut point
predictions <- ifelse(yhat<prob_cut, 0, 1)
## Generating statistics
# Classification table
class_1 <- as.matrix(table(predictions, dep.var))
rownames(class_1) <- c("Predic. 0", "Predic. 1")
colnames(class_1) <- c("True 0", "True 1")
# Sensitivity (true positives)
class_2 <- (class_1[2,2]/(class_1[2,2]+class_1[1,2]))*100
names(class_2) <- "Sensitivity or true positive rate (TPR) %"
# Specificity (true negatives)
class_3 <- (class_1[1,1]/(class_1[1,1]+class_1[2,1]))*100
names(class_3) <- "Specificity or true negative rate (TNR) %"
# False Positives // Einfacher 100 - Sensitivity
class_4 <- (class_1[1,2]/(class_1[2,2]+class_1[1,2]))*100
names(class_4) <- "Miss rate or false negative rate (FNR) %"
# False Negatives // Einfacher 100 - Specificity
class_5 <- (class_1[2,1]/(class_1[1,1]+class_1[2,1]))*100
names(class_5) <- "Fall-out or false positive rate (FPR) %"
# Precision or positive predictive value (PPV) // Einfacher 100 - Specificity
class_6<- (class_1[2,2]/(class_1[2,2]+class_1[2,1]))*100
names(class_6) <- "Precision or positive predictive value (PPV) %"
# False Negatives // Einfacher 100 - Specificity
class_7 <- 100 - class_6
names(class_7) <- "False discovery rate (FDR) %"
# R²-Count - Correctly Classified or accuracy (ACC)
class_8 <- ((class_1[1,1]+class_1[2,2])/sum(class_1))*100
names(class_8) <- "R²-Count or accuracy (ACC) %"
# Adjusted R²-Count - Correctly Classified
class_9 <- (((class_1[1,1]+class_1[2,2])-max(colSums(class_1)))/((sum(class_1))-max(colSums(class_1))))*100
names(class_9) <- "Adj. R²-Count % (Long 1997: 108)"
estat_classification <- list(class_1,class_2,class_3, class_4, class_5, class_6, class_7, class_8, class_9)
estat_classification
}
#' Classification statistics and table
#'
#' Produces an extended classification table and statistics given a binary response model.
#' @param model The regression model that was stored prior
#' @param dep.var The observed dependent variable (with data frame as prefix, "df$dep.var")
#' @param prob_cut cut-off point at which the predicted probabilities should be coded binary (0,1). Usually 0.5 is used to indicate >0.5 as 1 and <0.5 as 0
#' @return Different class-values and a list of them
#' @export
extat_class <- function(model, dep.var, prob_cut){
## Predicting yhat whilst dealing with MV
estat_class_model <- update(model,na.action=na.exclude)
yhat <- predict(estat_class_model, type = "response")
## Creating indicator variable for yhat at cut point
predictions <- ifelse(yhat<prob_cut, 0, 1)
## Generating statistics
# Classification table
class_1 <- as.matrix(table(predictions, dep.var))
rownames(class_1) <- c("Predic. 0", "Predic. 1")
colnames(class_1) <- c("True 0", "True 1")
# Sensitivity (true positives)
class_2 <- (class_1[2,2]/(class_1[2,2]+class_1[1,2]))*100
names(class_2) <- "Sensitivity or true positive rate (TPR) %"
# Specificity (true negatives)
class_3 <- (class_1[1,1]/(class_1[1,1]+class_1[2,1]))*100
names(class_3) <- "Specificity or true negative rate (TNR) %"
# False Positives // Einfacher 100 - Sensitivity
class_4 <- (class_1[1,2]/(class_1[2,2]+class_1[1,2]))*100
names(class_4) <- "miss rate or false negative rate (FNR) %"
# False Negatives // Einfacher 100 - Specificity
class_5 <- (class_1[2,1]/(class_1[1,1]+class_1[2,1]))*100
names(class_5) <- "fall-out or false positive rate (FPR) %"
# Precision or positive predictive value (PPV)
class_6<- (class_1[2,2]/(class_1[2,2]+class_1[2,1]))*100
names(class_6) <- "Precision or positive predictive value (PPV) %"
# False Negatives // Einfacher 100 - Specificity
class_7 <- 100 - class_6
names(class_7) <- "false discovery rate (FDR) %"
# R²-Count - Correctly Classified or accuracy (ACC)
class_8 <- ((class_1[1,1]+class_1[2,2])/sum(class_1))*100
names(class_8) <- "R²-Count or accuracy (ACC) %"
# Adjusted R²-Count - Correctly Classified
class_9 <- (((class_1[1,1]+class_1[2,2])-max(colSums(class_1)))/((sum(class_1))-max(colSums(class_1))))*100
names(class_9) <- "Adj. R²-Count % (Long 1997: 108)"
# F1 score
class_10 <- 2*((class_6*class_2)/(class_6+class_2))
names(class_10) <- "F1 score"
# balanced accuracy (BA) or balanced R²-Count %
class_11 <- (class_2 + class_3)/2
names(class_11) <- "balanced accuracy (BA) or balanced R²-Count %"
# Matthews correlation coefficient (MCC)
class_12 <- (as.numeric(class_1[1,1]*class_1[2,2])-as.numeric(class_1[1,2]*class_1[2,1]))/sqrt(as.numeric(class_1[1,1]+class_1[1,2])*as.numeric(class_1[1,1]+class_1[2,1])*as.numeric(class_1[2,2]+class_1[1,2])*as.numeric(class_1[2,2]+class_1[2,1]))
names(class_12) <- "Matthews correlation coefficient (MCC)"
# Fowlkes–Mallows index (FM)
class_13 <- sqrt(class_6*class_2)
names(class_13) <- "Fowlkes–Mallows index (FM)"
# informedness or bookmaker informedness (BM)
class_14 <- class_2 + class_3 - 100
names(class_14) <- "informedness or bookmaker informedness (BM)"
# markedness (MK) or deltaP
class_15 <- class_6 + ((class_1[1,1]/(class_1[1,1]+class_1[1,2]))*100) - 100
names(class_15) <- "markedness (MK) or deltaP"
extat_classification <- list(class_1,class_2,class_3, class_4, class_5, class_6, class_7, class_8, class_9, class_10, class_11, class_12, class_13, class_14, class_15)
extat_classification
}
|
# @file MethodEvaluation.R
#
# Copyright 2017 Observational Health Data Sciences and Informatics
#
# This file is part of MethodEvaluation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' MethodEvaluation
#'
#' @docType package
#' @name MethodEvaluation
#' @importFrom SqlRender loadRenderTranslateSql translateSql
#' @importFrom grDevices rgb
#' @importFrom stats aggregate coef pnorm predict qnorm quantile rexp rpois
#' @importFrom utils write.csv
#' @import Cyclops
#' @import DatabaseConnector
#' @import FeatureExtraction
NULL
#' The OMOP reference set
#' A reference set of 165 drug-outcome pairs where we believe the drug causes the outcome ( positive
#' controls) and 234 drug-outcome pairs where we believe the drug does not cause the outcome (negative
#' controls). The controls involve 4 health outcomes of interest: acute liver injury, acute kidney
#' injury, acute myocardial infarction, and GI bleeding.
#'
#' @docType data
#' @keywords datasets
#' @name omopReferenceSet
#' @usage
#' data(omopReferenceSet)
#' @format
#' A data frame with 399 rows and 10 variables: \describe{ \item{exposureId}{Concept ID
#' identifying the exposure} \item{exposureName}{Name of the exposure}
#' \item{outcomeId}{Concept ID identifying the outcome} \item{outcomeName}{Name of the
#' outcome} \item{groundTruth}{0 = negative control, 1 = positive control}
#' \item{indicationId}{Concept Id identifying the (primary) indication of the drug. To be used
#' when one wants to nest the analysis within the indication} \item{indicationName}{Name of the
#' indication} \item{comparatorId}{Concept ID identifying a comparator drug that can be
#' used as a counterfactual} \item{comparatorName}{Name of the comparator drug}
#' \item{comparatorType}{How the comparator was selected} }
#' @references
#' Ryan PB, Schuemie MJ, Welebob E, Duke J, Valentine S, Hartzema AG. Defining a reference set to
#' support methodological research in drug safety. Drug Safety 36 Suppl 1:S33-47, 2013
NULL
#' The EU-ADR reference set
#'
#' A reference set of 43 drug-outcome pairs where we believe the drug causes the outcome (
#' positive controls) and 50 drug-outcome pairs where we believe the drug does not cause the
#' outcome (negative controls). The controls involve 10 health outcomes of interest. Note that
#' originally, there was an additional positive control (Nimesulide and acute liver injury), but
#' Nimesulide is not in RxNorm, and is not available in many countries.
#'
#' @docType data
#' @keywords datasets
#' @name euadrReferenceSet
#' @usage
#' data(euadrReferenceSet)
#' @format
#' A data frame with 399 rows and 10 variables: \describe{ \item{exposureId}{Concept ID
#' identifying the exposure} \item{exposureName}{Name of the exposure}
#' \item{outcomeId}{Concept ID identifying the outcome} \item{outcomeName}{Name of the
#' outcome} \item{groundTruth}{0 = negative control, 1 = positive control}
#' \item{indicationId}{Concept Id identifying the (primary) indication of the drug. To be used
#' when one wants to nest the analysis within the indication} \item{indicationName}{Name of the
#' indication} \item{comparatorId}{Concept ID identifying a comparator drug that can be
#' used as a counterfactual} \item{comparatorName}{Name of the comparator drug}
#' \item{comparatorType}{How the comparator was selected} }
#' @references
#' Coloma PM, Avillach P, Salvo F, Schuemie MJ, Ferrajolo C, Pariente A, Fourrier-Reglat A, Molokhia
#' M, Patadia V, van der Lei J, Sturkenboom M, Trifiro G. A reference standard for evaluation of
#' methods for drug safety signal detection using electronic healthcare record databases. Drug Safety
#' 36(1):13-23, 2013
NULL
|
/R/MethodEvaluation.R
|
permissive
|
solie/MethodEvaluation
|
R
| false
| false
| 4,251
|
r
|
# @file MethodEvaluation.R
#
# Copyright 2017 Observational Health Data Sciences and Informatics
#
# This file is part of MethodEvaluation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' MethodEvaluation
#'
#' @docType package
#' @name MethodEvaluation
#' @importFrom SqlRender loadRenderTranslateSql translateSql
#' @importFrom grDevices rgb
#' @importFrom stats aggregate coef pnorm predict qnorm quantile rexp rpois
#' @importFrom utils write.csv
#' @import Cyclops
#' @import DatabaseConnector
#' @import FeatureExtraction
NULL
#' The OMOP reference set
#' A reference set of 165 drug-outcome pairs where we believe the drug causes the outcome ( positive
#' controls) and 234 drug-outcome pairs where we believe the drug does not cause the outcome (negative
#' controls). The controls involve 4 health outcomes of interest: acute liver injury, acute kidney
#' injury, acute myocardial infarction, and GI bleeding.
#'
#' @docType data
#' @keywords datasets
#' @name omopReferenceSet
#' @usage
#' data(omopReferenceSet)
#' @format
#' A data frame with 399 rows and 10 variables: \describe{ \item{exposureId}{Concept ID
#' identifying the exposure} \item{exposureName}{Name of the exposure}
#' \item{outcomeId}{Concept ID identifying the outcome} \item{outcomeName}{Name of the
#' outcome} \item{groundTruth}{0 = negative control, 1 = positive control}
#' \item{indicationId}{Concept Id identifying the (primary) indication of the drug. To be used
#' when one wants to nest the analysis within the indication} \item{indicationName}{Name of the
#' indication} \item{comparatorId}{Concept ID identifying a comparator drug that can be
#' used as a counterfactual} \item{comparatorName}{Name of the comparator drug}
#' \item{comparatorType}{How the comparator was selected} }
#' @references
#' Ryan PB, Schuemie MJ, Welebob E, Duke J, Valentine S, Hartzema AG. Defining a reference set to
#' support methodological research in drug safety. Drug Safety 36 Suppl 1:S33-47, 2013
NULL
#' The EU-ADR reference set
#'
#' A reference set of 43 drug-outcome pairs where we believe the drug causes the outcome (
#' positive controls) and 50 drug-outcome pairs where we believe the drug does not cause the
#' outcome (negative controls). The controls involve 10 health outcomes of interest. Note that
#' originally, there was an additional positive control (Nimesulide and acute liver injury), but
#' Nimesulide is not in RxNorm, and is not available in many countries.
#'
#' @docType data
#' @keywords datasets
#' @name euadrReferenceSet
#' @usage
#' data(euadrReferenceSet)
#' @format
#' A data frame with 399 rows and 10 variables: \describe{ \item{exposureId}{Concept ID
#' identifying the exposure} \item{exposureName}{Name of the exposure}
#' \item{outcomeId}{Concept ID identifying the outcome} \item{outcomeName}{Name of the
#' outcome} \item{groundTruth}{0 = negative control, 1 = positive control}
#' \item{indicationId}{Concept Id identifying the (primary) indication of the drug. To be used
#' when one wants to nest the analysis within the indication} \item{indicationName}{Name of the
#' indication} \item{comparatorId}{Concept ID identifying a comparator drug that can be
#' used as a counterfactual} \item{comparatorName}{Name of the comparator drug}
#' \item{comparatorType}{How the comparator was selected} }
#' @references
#' Coloma PM, Avillach P, Salvo F, Schuemie MJ, Ferrajolo C, Pariente A, Fourrier-Reglat A, Molokhia
#' M, Patadia V, van der Lei J, Sturkenboom M, Trifiro G. A reference standard for evaluation of
#' methods for drug safety signal detection using electronic healthcare record databases. Drug Safety
#' 36(1):13-23, 2013
NULL
|
# 法学セミナー「法律家のための実証分析入門」第23回 Rソースコード
# (C) 2013 MORITA Hatsuru
rm(list=ls())
library(foreign)
library(sampleSelection)
mroz <- read.dta("../csv/mroz.dta")
result.ols <- lm(lwage~educ+exper+expersq, data=mroz)
result.heckit <- heckit(inlf~educ+exper+expersq+nwifeinc+age+kidslt6+kidsge6,
lwage~educ+exper+expersq, method="2step", data=mroz)
result.heckml <- selection(inlf~educ+exper+expersq+nwifeinc+age+kidslt6+kidsge6,
lwage~educ+exper+expersq, data=mroz)
summary(result.ols)
summary(result.heckit)
summary(result.heckml)
|
/R/IntEmpR23.r
|
no_license
|
Prunus1350/Empirical_Analysis
|
R
| false
| false
| 591
|
r
|
# 法学セミナー「法律家のための実証分析入門」第23回 Rソースコード
# (C) 2013 MORITA Hatsuru
rm(list=ls())
library(foreign)
library(sampleSelection)
mroz <- read.dta("../csv/mroz.dta")
result.ols <- lm(lwage~educ+exper+expersq, data=mroz)
result.heckit <- heckit(inlf~educ+exper+expersq+nwifeinc+age+kidslt6+kidsge6,
lwage~educ+exper+expersq, method="2step", data=mroz)
result.heckml <- selection(inlf~educ+exper+expersq+nwifeinc+age+kidslt6+kidsge6,
lwage~educ+exper+expersq, data=mroz)
summary(result.ols)
summary(result.heckit)
summary(result.heckml)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cgBase.R
\name{cgBase}
\alias{cgBase}
\title{create CGDS object}
\usage{
cgBase(address = "https://www.cbioportal.org/")
}
\arguments{
\item{address}{string, web address}
}
\value{
cdgs object, and prints the first 2 columns of the cdgs object (the first column contains the IDs, to be used in later functions)
}
\description{
create CGDS object
}
\details{
uses cgdsr::CGDS(address)
}
\examples{
cgds <- cgBbase()
}
|
/man/cgBase.Rd
|
no_license
|
ilwookkim/cgNetwork
|
R
| false
| true
| 495
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cgBase.R
\name{cgBase}
\alias{cgBase}
\title{create CGDS object}
\usage{
cgBase(address = "https://www.cbioportal.org/")
}
\arguments{
\item{address}{string, web address}
}
\value{
cdgs object, and prints the first 2 columns of the cdgs object (the first column contains the IDs, to be used in later functions)
}
\description{
create CGDS object
}
\details{
uses cgdsr::CGDS(address)
}
\examples{
cgds <- cgBbase()
}
|
# Quick R script to
# Input: h, d
# Output: Exact p-value, write bounds, write correlation matrix ('newbounds/newsig.txt')
# Correlation matrix is random ~0.3
# Call it with Rscript test_iSample.R H D
library(mvtnorm)
# Inputs
args <- commandArgs(trailingOnly=TRUE)
h <- as.numeric(args[1])
d <- as.numeric(args[2])
# Random correlation matrix
start_sig <- matrix(data=0.3, nrow=d, ncol=d)
diag(start_sig) <- 1
temp_samp <- rmvnorm(n=2*d, sigma=start_sig)
random_sig <- cor(temp_samp)
# Explicit inverse of HC to find the p-value bounds
i_vec <- 1:d
HC_p_bounds <- ((2*i_vec+h^2)/d - sqrt((2*i_vec/d+h^2/d)^2 - 4*i_vec^2/d^2 - 4*i_vec^2*h^2/d^3)) / (2*(1+h^2/d))
HC_z_bounds <- qnorm(1-HC_p_bounds/2)
HC_z_bounds <- sort(HC_z_bounds, decreasing=F)
# qnorm can't handle more precision than 10^-16
HC_z_bounds[which(HC_z_bounds > 8.2)]= 8.2
# Write
write.table(HC_z_bounds, 'newbounds.txt', append=F, quote=F, row.names=F, col.names=F)
write.table(random_sig[upper.tri(random_sig)], 'newsig.txt', append=F, quote=F, row.names=F, col.names=F)
# Exact p-value
system2(command="./GOF_exact_pvalue", args=c(d, 'newbounds.txt',
'newsig.txt', 0))
|
/test_iSample.R
|
no_license
|
ryanrsun/GOF_pvalue_iSample
|
R
| false
| false
| 1,163
|
r
|
# Quick R script to
# Input: h, d
# Output: Exact p-value, write bounds, write correlation matrix ('newbounds/newsig.txt')
# Correlation matrix is random ~0.3
# Call it with Rscript test_iSample.R H D
library(mvtnorm)
# Inputs
args <- commandArgs(trailingOnly=TRUE)
h <- as.numeric(args[1])
d <- as.numeric(args[2])
# Random correlation matrix
start_sig <- matrix(data=0.3, nrow=d, ncol=d)
diag(start_sig) <- 1
temp_samp <- rmvnorm(n=2*d, sigma=start_sig)
random_sig <- cor(temp_samp)
# Explicit inverse of HC to find the p-value bounds
i_vec <- 1:d
HC_p_bounds <- ((2*i_vec+h^2)/d - sqrt((2*i_vec/d+h^2/d)^2 - 4*i_vec^2/d^2 - 4*i_vec^2*h^2/d^3)) / (2*(1+h^2/d))
HC_z_bounds <- qnorm(1-HC_p_bounds/2)
HC_z_bounds <- sort(HC_z_bounds, decreasing=F)
# qnorm can't handle more precision than 10^-16
HC_z_bounds[which(HC_z_bounds > 8.2)]= 8.2
# Write
write.table(HC_z_bounds, 'newbounds.txt', append=F, quote=F, row.names=F, col.names=F)
write.table(random_sig[upper.tri(random_sig)], 'newsig.txt', append=F, quote=F, row.names=F, col.names=F)
# Exact p-value
system2(command="./GOF_exact_pvalue", args=c(d, 'newbounds.txt',
'newsig.txt', 0))
|
#download file
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile = "dataset.zip", method = "curl")
unzip("dataset.zip")
#load data
data <- read.table("household_power_consumption.txt",header=TRUE, sep = ";")
#convert date and time
data$Date <- strptime(as.character(data$Date), format = "%d/%m/%Y")
data$Date <- format(as.Date(data$Date), "%Y-%m-%d")
data$Time <- strptime(as.character(data$Time), format = "%H:%M:%S")
data$Time <- format(data$Time, "%I:%M:%S %p")
#subset the data
data_subset <- subset(data, as.Date(data$Date) == "2007-02-01" | as.Date(data$Date) == "2007-02-02")
#plot graph
png("plot2.png", width = 480, height = 480)
rows <- row.names(data_subset)
plot(rows, data_subset$Global_active_power,
type = "l", ylab = "Global Active Power (kilowatts)"
,xaxt = 'n', yaxt = 'n', xlab = '')
axis(1, at = c(rows[1], rows[length(rows) / 2],
rows[length(rows)]),labels = c("Thu", "Fri", "Sat"))
axis(2, at = seq(0, 3000, 1000), labels = seq(0, 6, 2))
dev.off()
|
/plot2.r
|
no_license
|
slam17/course4_week1
|
R
| false
| false
| 1,084
|
r
|
#download file
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile = "dataset.zip", method = "curl")
unzip("dataset.zip")
#load data
data <- read.table("household_power_consumption.txt",header=TRUE, sep = ";")
#convert date and time
data$Date <- strptime(as.character(data$Date), format = "%d/%m/%Y")
data$Date <- format(as.Date(data$Date), "%Y-%m-%d")
data$Time <- strptime(as.character(data$Time), format = "%H:%M:%S")
data$Time <- format(data$Time, "%I:%M:%S %p")
#subset the data
data_subset <- subset(data, as.Date(data$Date) == "2007-02-01" | as.Date(data$Date) == "2007-02-02")
#plot graph
png("plot2.png", width = 480, height = 480)
rows <- row.names(data_subset)
plot(rows, data_subset$Global_active_power,
type = "l", ylab = "Global Active Power (kilowatts)"
,xaxt = 'n', yaxt = 'n', xlab = '')
axis(1, at = c(rows[1], rows[length(rows) / 2],
rows[length(rows)]),labels = c("Thu", "Fri", "Sat"))
axis(2, at = seq(0, 3000, 1000), labels = seq(0, 6, 2))
dev.off()
|
rm(list = ls())
# Set working directory
dir <- dirname(rstudioapi::getActiveDocumentContext()$path)
setwd(dir);
# Load required packages
require(tidyverse)
# Load the data
reserving_daily <- readRDS("data/reserving_data_daily.rds")
reserving_yearly <- readRDS("data/reserving_data_yearly.rds")
# Inspect the data
head(reserving_daily)
# Yearly indicators are defined as the number of elapsed years since 2010
# Settlement year == 3 implies the claim settled in 2013 (=2010 + 3)
# Development year is defined as the number of years elapsed since the occurrence of the claim
# Development year 1 refers to the year in which the claim occurred
head(reserving_yearly)
#### Your turn: exercise 1 ####
# Q1: visualize reporting and settlement delay.
ggplot(data = reserving_daily) +
theme_bw() +
geom_density(aes(reporting_delay, fill = 'reporting delay'),
alpha = .5) +
geom_density(aes(settlement_delay, fill = 'settlement_delay'),
alpha = .5) +
xlab('delay in days') +
xlim(c(0, 1000))
# Q2: when was the last payment registered in the data set?
max(reserving_daily$payment_date)
# Q3: what is the average number of payments per claim?
reserving_daily %>%
group_by(accident_number) %>%
summarise(payments = sum(payment_size > 0)) %>%
ungroup() %>%
summarise(average = mean(payments))
# Q4: calculate the number of claims per accident year.
reserving_yearly %>%
filter(development_year == 1) %>%
group_by(accident_year) %>%
summarise(num_claims = n())
# censoring:
observed_daily <- reserving_daily %>%
filter(payment_date <= as.Date('2020-12-31'))
unobserved_daily <- reserving_daily %>%
filter(payment_date > as.Date('2020-12-31'))
observed_yearly <- reserving_yearly %>%
filter(calendar_year <= 10,
reporting_year <= 10)
unobserved_yearly <- reserving_yearly %>%
filter(calendar_year > 10 | reporting_year > 10)
# IBNR and RBNS:
reserve_actual <- sum(unobserved_yearly$size)
reserve_actual
# same result from daily data
sum(unobserved_daily$payment_size)
## The RBNS reserve is much larger than the IBNR reserve
unobserved_yearly %>%
mutate(reported = (reporting_year <= 10)) %>%
group_by(reported) %>%
summarise(reserve = sum(size))
#### Reserving data structures - part 2 ####
# Incremental triangle:
observed_yearly %>%
group_by(accident_year, development_year) %>%
summarise(value = sum(size)) %>%
pivot_wider(values_from = value,
names_from = development_year,
names_prefix = 'DY.')
# More sophisticated function to create incremental triangles:
## rows: aggregation variable for the rows
## columns: aggregation variable for the columns
## variable: variable that will be aggregated in the cells of the triangle
## lower_na: fill the lower triangle with NA's
incremental_triangle <- function(data,
rows = 'accident_year',
columns = 'development_year',
variable = 'size',
lower_na = TRUE) {
data_triangle <- data %>%
group_by(!!sym(rows), !!sym(columns)) %>%
summarise(value = sum(!!sym(variable))) %>%
ungroup()
n <- max(data_triangle[, rows])+1
triangle <- matrix(0, nrow = n, ncol = n)
triangle[cbind(data_triangle[[rows]]+1, data_triangle[[columns]])] <- data_triangle$value
if(lower_na) {
triangle[row(triangle) + col(triangle) > n+1] <- NA
}
return(triangle)
}
cumulative_triangle <- function(data,
rows = 'accident_year',
columns = 'development_year',
variable = 'size',
lower_na = TRUE) {
incremental <- incremental_triangle(data, rows, columns, variable, lower_na)
t(apply(incremental, 1, cumsum))
}
incremental_triangle(observed_yearly,
variable = 'payment',
lower_na = TRUE)
cumulative_triangle(observed_yearly,
variable = 'payment')
#### Claims reserving with triangles ####
# diy approach to chainladder:
triangle <- cumulative_triangle(observed_yearly, variable = 'size')
l <- nrow(triangle)
## compute development factors
f <- rep(0, l-1)
for(j in 1:(l-1)) {
f[j] <- sum(triangle[1:(l-j), j+1]) / sum(triangle[1:(l-j), j])
}
f
## complete the triangle
triangle_completed <- triangle
for(j in 2:l) {
triangle_completed[l:(l-j+2), j] <- triangle_completed[l:(l-j+2), j-1] * f[j-1]
}
triangle_completed
## cumulative to incremental triangle
cbind(triangle_completed[, 1],
t(apply(triangle_completed, 1, diff)))
## cum2incr using the {ChainLadder} package
require(ChainLadder)
cum2incr(triangle_completed)
## calculating the reserve estimate
triangle_completed_incr <- cum2incr(triangle_completed)
lower_triangle <- row(triangle_completed_incr) + col(triangle_completed_incr) > l+1
lower_triangle
reserve_cl <- sum(triangle_completed_incr[lower_triangle])
data.frame(reserve_cl = reserve_cl,
reserve_actual = reserve_actual,
difference = reserve_cl - reserve_actual,
relative_difference_pct = (reserve_cl - reserve_actual) / reserve_actual * 100)
# Using the {ChainLadder} package
require(ChainLadder)
triangle <- cumulative_triangle(observed_yearly, variable = 'size')
MackChainLadder(triangle)
# Using a GLM
triangle <- incremental_triangle(observed_yearly,
variable = 'size')
triangle_long <- data.frame(
occ.year = as.numeric(row(triangle)),
dev.year = as.numeric(col(triangle)),
size = as.numeric(triangle))
head(triangle_long)
## fit the GLM
fit <- glm(size ~ factor(occ.year) + factor(dev.year),
data = triangle_long,
family = poisson(link = log))
summary(fit)
coef_cl <- coefficients(fit)
plot(coef_cl[2:10], main = 'coefficients accident year')
plot(coef_cl[11:18], main = 'coefficients development year')
## fill the lower triangle
lower_triangle <- triangle_long$occ.year + triangle_long$dev.year > l + 1
triangle_long$size[lower_triangle] <- predict(fit, newdata = triangle_long[lower_triangle, ], type = 'response')
triangle_long %>%
pivot_wider(values_from = size,
names_from = dev.year,
names_prefix = 'DY.')
reserve_glm <- sum(triangle_long$size[lower_triangle])
reserve_glm
#### your turn 2: estimating the number of future payments ####
# Q1: Compute the actual number of future payments from the unobserved data set.
payment_actual <- sum(unobserved_yearly$payment)
payment_actual
# Q2: Create a cumulative triangle containing the number of payments per accident and development year.
triangle <- cumulative_triangle(observed_yearly,
variable = 'payment')
# Q3: Estimate the future number of payments using the chain ladder method from the {ChainLadder} package.
require(ChainLadder)
cl <- MackChainLadder(triangle)
cl
# Q4: Compute the difference between the estimated and actual number of payments.
# Express this error in terms of standard deviations?
ultimate <- sum(cum2incr(cl$FullTriangle))
already_paid <- sum(cum2incr(cl$Triangle), na.rm = TRUE)
payment_cl <- ultimate - already_paid
sigma_cl <- as.numeric(cl$Total.Mack.S.E)
error = payment_actual - payment_cl
round(c(error = error,
pct_error = error / payment_actual * 100,
std.dev = error / sigma_cl),2)
#### When the chain ladder method fails ####
# inspecting a range of triangles to get insights in the underlying dynamics
triangle_open <- incremental_triangle(
observed_yearly %>%
mutate(open = calendar_year <= settlement_year),
variable = 'open')
triangle_open
triangle_open_end <- incremental_triangle(
observed_yearly %>%
mutate(open_end = (calendar_year <= settlement_year) & (close == 0)),
variable = 'open_end')
triangle_open_end
triangle_payment <- incremental_triangle(
observed_yearly,
variable = 'payment')
triangle_payment / triangle_open
triangle_size <- incremental_triangle(
observed_yearly,
variable = 'size')
triangle_size / triangle_payment
# inspecting evolutions in claim frequency:
claims<- observed_daily %>%
group_by(accident_number) %>%
slice(1) %>%
ungroup()
occ_intensity <- claims %>%
group_by(accident_date) %>%
summarise(count = n())
require(zoo)
occ_intensity$moving_average <-
rollmean(occ_intensity$count, 30, na.pad = TRUE)
ggplot(occ_intensity) +
theme_bw() +
geom_point(aes(x = accident_date, y = count)) +
geom_line(aes(x = accident_date, y = moving_average),
size = 1, color = 'blue') +
ggtitle('Evolution of claim frequency')
# inspecting evolutions in the distribution of claims within an accident year:
require(lubridate)
claims <- claims %>%
mutate(start_year = floor_date(accident_date, unit = 'year'),
time = as.numeric(accident_date - start_year) / 366,
accident_year = year(accident_date),
reporting_year = year(reporting_date)) %>%
filter(accident_year == reporting_year)
ggplot(claims) +
theme_bw() +
geom_density(aes(x = time,
group = factor(accident_year),
color = factor(accident_year)))
#### Fixing the chain ladder method ####
## Monthly chain ladder
require(lubridate)
claims <- observed_daily %>%
group_by(accident_number) %>%
slice(1) %>%
ungroup() %>%
mutate(start_month = floor_date(accident_date, unit = 'month'),
time = as.numeric(accident_date - start_month) / 31,
accident_month = format(accident_date, '%Y%m'),
reporting_month = format(reporting_date, '%Y%m')) %>%
filter(accident_month == reporting_month)
ggplot(claims) +
theme_bw() +
geom_density(aes(x = time,
group = factor(accident_month),
color = factor(accident_month))) +
theme(legend.position = 'none')
# Constructing a monthly triangle
triangle_month <- observed_daily %>%
mutate(accident_month = year(accident_date)*12 + month(accident_date) - 2010*12,
development_month = year(payment_date)*12 + month(payment_date) - 2010*12 - accident_month) %>%
group_by(accident_month, development_month) %>%
summarise(size = sum(payment_size)) %>%
ungroup() %>%
complete(expand.grid(accident_month = 1:132, development_month = 0:131), fill = list(size = 0)) %>%
mutate(size = ifelse(accident_month + development_month > 132, NA, size)) %>%
arrange(development_month) %>%
pivot_wider(names_from = development_month, values_from = size) %>%
arrange(accident_month)
triangle_month <- as.matrix(triangle_month[, 2:132])
cl <- MackChainLadder(incr2cum(triangle_month))
summary(cl)$Totals
## Chainladder by occurrence month
require(lubridate)
claims <- observed_daily %>%
group_by(accident_number) %>%
slice(1) %>%
ungroup() %>%
mutate(start_month = floor_date(accident_date, unit = 'month'),
time = as.numeric(accident_date - start_month) / 31,
accident_year = format(accident_date, '%Y'),
reporting_year = format(reporting_date, '%Y'),
month = format(accident_date, '%B')) %>%
filter(accident_year == reporting_year)
ggplot(claims) +
facet_wrap( ~ month, ncol = 3) +
theme_bw() +
geom_density(aes(x = time,
group = factor(accident_year),
color = factor(accident_year)))
# Add accident date to reserving_yearly
reserving_yearly <- reserving_yearly %>%
left_join(reserving_daily %>%
group_by(accident_number) %>%
slice(1) %>%
ungroup() %>%
select(accident_number, accident_date))
reserving_yearly <- reserving_yearly %>%
mutate(accident_month = format(accident_date, '%B'))
# Compute data for runoff triangles by month
triangles <- reserving_yearly %>%
group_by(accident_month, accident_year, development_year) %>%
summarise(size = sum(size)) %>%
ungroup() %>%
complete(expand.grid(accident_month = unique(accident_month),
accident_year = 0:10, development_year = 1:11),
fill = list(size = 0)) %>%
mutate(size = ifelse(accident_year + development_year > 11, NA, size))
triangles %>%
filter(accident_month == 'April') %>%
arrange(development_year) %>%
pivot_wider(names_from = development_year, values_from = size)
# Estimate chain ladder glm
fit <- glm(size ~ factor(development_year) * accident_month + factor(accident_year) * accident_month,
data = triangles,
family = poisson(link = 'log'))
# compute reserve
reserve_group <- sum(predict(fit, newdata = triangles %>% filter(is.na(size)), type = 'response'))
reserve_group
|
/scripts/day2/day2_reserving_complete.R
|
no_license
|
katrienantonio/workshop-loss-reserv-fraud
|
R
| false
| false
| 12,663
|
r
|
rm(list = ls())
# Set working directory
dir <- dirname(rstudioapi::getActiveDocumentContext()$path)
setwd(dir);
# Load required packages
require(tidyverse)
# Load the data
reserving_daily <- readRDS("data/reserving_data_daily.rds")
reserving_yearly <- readRDS("data/reserving_data_yearly.rds")
# Inspect the data
head(reserving_daily)
# Yearly indicators are defined as the number of elapsed years since 2010
# Settlement year == 3 implies the claim settled in 2013 (=2010 + 3)
# Development year is defined as the number of years elapsed since the occurrence of the claim
# Development year 1 refers to the year in which the claim occurred
head(reserving_yearly)
#### Your turn: exercise 1 ####
# Q1: visualize reporting and settlement delay.
ggplot(data = reserving_daily) +
theme_bw() +
geom_density(aes(reporting_delay, fill = 'reporting delay'),
alpha = .5) +
geom_density(aes(settlement_delay, fill = 'settlement_delay'),
alpha = .5) +
xlab('delay in days') +
xlim(c(0, 1000))
# Q2: when was the last payment registered in the data set?
max(reserving_daily$payment_date)
# Q3: what is the average number of payments per claim?
reserving_daily %>%
group_by(accident_number) %>%
summarise(payments = sum(payment_size > 0)) %>%
ungroup() %>%
summarise(average = mean(payments))
# Q4: calculate the number of claims per accident year.
reserving_yearly %>%
filter(development_year == 1) %>%
group_by(accident_year) %>%
summarise(num_claims = n())
# censoring:
observed_daily <- reserving_daily %>%
filter(payment_date <= as.Date('2020-12-31'))
unobserved_daily <- reserving_daily %>%
filter(payment_date > as.Date('2020-12-31'))
observed_yearly <- reserving_yearly %>%
filter(calendar_year <= 10,
reporting_year <= 10)
unobserved_yearly <- reserving_yearly %>%
filter(calendar_year > 10 | reporting_year > 10)
# IBNR and RBNS:
reserve_actual <- sum(unobserved_yearly$size)
reserve_actual
# same result from daily data
sum(unobserved_daily$payment_size)
## The RBNS reserve is much larger than the IBNR reserve
unobserved_yearly %>%
mutate(reported = (reporting_year <= 10)) %>%
group_by(reported) %>%
summarise(reserve = sum(size))
#### Reserving data structures - part 2 ####
# Incremental triangle:
observed_yearly %>%
group_by(accident_year, development_year) %>%
summarise(value = sum(size)) %>%
pivot_wider(values_from = value,
names_from = development_year,
names_prefix = 'DY.')
# More sophisticated function to create incremental triangles:
## rows: aggregation variable for the rows
## columns: aggregation variable for the columns
## variable: variable that will be aggregated in the cells of the triangle
## lower_na: fill the lower triangle with NA's
incremental_triangle <- function(data,
rows = 'accident_year',
columns = 'development_year',
variable = 'size',
lower_na = TRUE) {
data_triangle <- data %>%
group_by(!!sym(rows), !!sym(columns)) %>%
summarise(value = sum(!!sym(variable))) %>%
ungroup()
n <- max(data_triangle[, rows])+1
triangle <- matrix(0, nrow = n, ncol = n)
triangle[cbind(data_triangle[[rows]]+1, data_triangle[[columns]])] <- data_triangle$value
if(lower_na) {
triangle[row(triangle) + col(triangle) > n+1] <- NA
}
return(triangle)
}
cumulative_triangle <- function(data,
rows = 'accident_year',
columns = 'development_year',
variable = 'size',
lower_na = TRUE) {
incremental <- incremental_triangle(data, rows, columns, variable, lower_na)
t(apply(incremental, 1, cumsum))
}
incremental_triangle(observed_yearly,
variable = 'payment',
lower_na = TRUE)
cumulative_triangle(observed_yearly,
variable = 'payment')
#### Claims reserving with triangles ####
# diy approach to chainladder:
triangle <- cumulative_triangle(observed_yearly, variable = 'size')
l <- nrow(triangle)
## compute development factors
f <- rep(0, l-1)
for(j in 1:(l-1)) {
f[j] <- sum(triangle[1:(l-j), j+1]) / sum(triangle[1:(l-j), j])
}
f
## complete the triangle
triangle_completed <- triangle
for(j in 2:l) {
triangle_completed[l:(l-j+2), j] <- triangle_completed[l:(l-j+2), j-1] * f[j-1]
}
triangle_completed
## cumulative to incremental triangle
cbind(triangle_completed[, 1],
t(apply(triangle_completed, 1, diff)))
## cum2incr using the {ChainLadder} package
require(ChainLadder)
cum2incr(triangle_completed)
## calculating the reserve estimate
triangle_completed_incr <- cum2incr(triangle_completed)
lower_triangle <- row(triangle_completed_incr) + col(triangle_completed_incr) > l+1
lower_triangle
reserve_cl <- sum(triangle_completed_incr[lower_triangle])
data.frame(reserve_cl = reserve_cl,
reserve_actual = reserve_actual,
difference = reserve_cl - reserve_actual,
relative_difference_pct = (reserve_cl - reserve_actual) / reserve_actual * 100)
# Using the {ChainLadder} package
require(ChainLadder)
triangle <- cumulative_triangle(observed_yearly, variable = 'size')
MackChainLadder(triangle)
# Using a GLM
triangle <- incremental_triangle(observed_yearly,
variable = 'size')
triangle_long <- data.frame(
occ.year = as.numeric(row(triangle)),
dev.year = as.numeric(col(triangle)),
size = as.numeric(triangle))
head(triangle_long)
## fit the GLM
fit <- glm(size ~ factor(occ.year) + factor(dev.year),
data = triangle_long,
family = poisson(link = log))
summary(fit)
coef_cl <- coefficients(fit)
plot(coef_cl[2:10], main = 'coefficients accident year')
plot(coef_cl[11:18], main = 'coefficients development year')
## fill the lower triangle
lower_triangle <- triangle_long$occ.year + triangle_long$dev.year > l + 1
triangle_long$size[lower_triangle] <- predict(fit, newdata = triangle_long[lower_triangle, ], type = 'response')
triangle_long %>%
pivot_wider(values_from = size,
names_from = dev.year,
names_prefix = 'DY.')
reserve_glm <- sum(triangle_long$size[lower_triangle])
reserve_glm
#### your turn 2: estimating the number of future payments ####
# Q1: Compute the actual number of future payments from the unobserved data set.
payment_actual <- sum(unobserved_yearly$payment)
payment_actual
# Q2: Create a cumulative triangle containing the number of payments per accident and development year.
triangle <- cumulative_triangle(observed_yearly,
variable = 'payment')
# Q3: Estimate the future number of payments using the chain ladder method from the {ChainLadder} package.
require(ChainLadder)
cl <- MackChainLadder(triangle)
cl
# Q4: Compute the difference between the estimated and actual number of payments.
# Express this error in terms of standard deviations?
ultimate <- sum(cum2incr(cl$FullTriangle))
already_paid <- sum(cum2incr(cl$Triangle), na.rm = TRUE)
payment_cl <- ultimate - already_paid
sigma_cl <- as.numeric(cl$Total.Mack.S.E)
error = payment_actual - payment_cl
round(c(error = error,
pct_error = error / payment_actual * 100,
std.dev = error / sigma_cl),2)
#### When the chain ladder method fails ####
# inspecting a range of triangles to get insights in the underlying dynamics
triangle_open <- incremental_triangle(
observed_yearly %>%
mutate(open = calendar_year <= settlement_year),
variable = 'open')
triangle_open
triangle_open_end <- incremental_triangle(
observed_yearly %>%
mutate(open_end = (calendar_year <= settlement_year) & (close == 0)),
variable = 'open_end')
triangle_open_end
triangle_payment <- incremental_triangle(
observed_yearly,
variable = 'payment')
triangle_payment / triangle_open
triangle_size <- incremental_triangle(
observed_yearly,
variable = 'size')
triangle_size / triangle_payment
# inspecting evolutions in claim frequency:
claims<- observed_daily %>%
group_by(accident_number) %>%
slice(1) %>%
ungroup()
occ_intensity <- claims %>%
group_by(accident_date) %>%
summarise(count = n())
require(zoo)
occ_intensity$moving_average <-
rollmean(occ_intensity$count, 30, na.pad = TRUE)
ggplot(occ_intensity) +
theme_bw() +
geom_point(aes(x = accident_date, y = count)) +
geom_line(aes(x = accident_date, y = moving_average),
size = 1, color = 'blue') +
ggtitle('Evolution of claim frequency')
# inspecting evolutions in the distribution of claims within an accident year:
require(lubridate)
claims <- claims %>%
mutate(start_year = floor_date(accident_date, unit = 'year'),
time = as.numeric(accident_date - start_year) / 366,
accident_year = year(accident_date),
reporting_year = year(reporting_date)) %>%
filter(accident_year == reporting_year)
ggplot(claims) +
theme_bw() +
geom_density(aes(x = time,
group = factor(accident_year),
color = factor(accident_year)))
#### Fixing the chain ladder method ####
## Monthly chain ladder
require(lubridate)
claims <- observed_daily %>%
group_by(accident_number) %>%
slice(1) %>%
ungroup() %>%
mutate(start_month = floor_date(accident_date, unit = 'month'),
time = as.numeric(accident_date - start_month) / 31,
accident_month = format(accident_date, '%Y%m'),
reporting_month = format(reporting_date, '%Y%m')) %>%
filter(accident_month == reporting_month)
ggplot(claims) +
theme_bw() +
geom_density(aes(x = time,
group = factor(accident_month),
color = factor(accident_month))) +
theme(legend.position = 'none')
# Constructing a monthly triangle
triangle_month <- observed_daily %>%
mutate(accident_month = year(accident_date)*12 + month(accident_date) - 2010*12,
development_month = year(payment_date)*12 + month(payment_date) - 2010*12 - accident_month) %>%
group_by(accident_month, development_month) %>%
summarise(size = sum(payment_size)) %>%
ungroup() %>%
complete(expand.grid(accident_month = 1:132, development_month = 0:131), fill = list(size = 0)) %>%
mutate(size = ifelse(accident_month + development_month > 132, NA, size)) %>%
arrange(development_month) %>%
pivot_wider(names_from = development_month, values_from = size) %>%
arrange(accident_month)
triangle_month <- as.matrix(triangle_month[, 2:132])
cl <- MackChainLadder(incr2cum(triangle_month))
summary(cl)$Totals
## Chainladder by occurrence month
require(lubridate)
claims <- observed_daily %>%
group_by(accident_number) %>%
slice(1) %>%
ungroup() %>%
mutate(start_month = floor_date(accident_date, unit = 'month'),
time = as.numeric(accident_date - start_month) / 31,
accident_year = format(accident_date, '%Y'),
reporting_year = format(reporting_date, '%Y'),
month = format(accident_date, '%B')) %>%
filter(accident_year == reporting_year)
ggplot(claims) +
facet_wrap( ~ month, ncol = 3) +
theme_bw() +
geom_density(aes(x = time,
group = factor(accident_year),
color = factor(accident_year)))
# Add accident date to reserving_yearly
reserving_yearly <- reserving_yearly %>%
left_join(reserving_daily %>%
group_by(accident_number) %>%
slice(1) %>%
ungroup() %>%
select(accident_number, accident_date))
reserving_yearly <- reserving_yearly %>%
mutate(accident_month = format(accident_date, '%B'))
# Compute data for runoff triangles by month
triangles <- reserving_yearly %>%
group_by(accident_month, accident_year, development_year) %>%
summarise(size = sum(size)) %>%
ungroup() %>%
complete(expand.grid(accident_month = unique(accident_month),
accident_year = 0:10, development_year = 1:11),
fill = list(size = 0)) %>%
mutate(size = ifelse(accident_year + development_year > 11, NA, size))
triangles %>%
filter(accident_month == 'April') %>%
arrange(development_year) %>%
pivot_wider(names_from = development_year, values_from = size)
# Estimate chain ladder glm
fit <- glm(size ~ factor(development_year) * accident_month + factor(accident_year) * accident_month,
data = triangles,
family = poisson(link = 'log'))
# compute reserve
reserve_group <- sum(predict(fit, newdata = triangles %>% filter(is.na(size)), type = 'response'))
reserve_group
|
\name{laest}
\alias{laest}
\title{An example function from the book Cichosz, P. (2015): Data Mining Algorithms: Explained Using R}
\description{An example function from Chapter 2 of the book Cichosz, P. (2015): Data Mining Algorithms: Explained Using R. See Appendix B or http://www.wiley.com/go/data_mining_algorithms for more details.}
\usage{See Section 2.4, Example 2.4.32.}
\arguments{See Section 2.4, Example 2.4.32.}
\details{See Section 2.4, Example 2.4.32.}
\value{See Section 2.4, Example 2.4.32.}
\references{Cichosz, P. (2015): Data Mining Algorithms: Explained Using R. Wiley.}
\author{
Pawel Cichosz <p.cichosz@elka.pw.edu.pl>
}
\note{
}
\seealso{
\code{\link{mest}}
\code{\link{laprob}}
}
\examples{
laest(0, 10, 2)
mest(0, 10, 2)
laest(10, 10, 2)
mest(10, 10, 2)
}
\keyword{univar}
|
/man/laest.Rd
|
no_license
|
42n4/dmr.stats
|
R
| false
| false
| 811
|
rd
|
\name{laest}
\alias{laest}
\title{An example function from the book Cichosz, P. (2015): Data Mining Algorithms: Explained Using R}
\description{An example function from Chapter 2 of the book Cichosz, P. (2015): Data Mining Algorithms: Explained Using R. See Appendix B or http://www.wiley.com/go/data_mining_algorithms for more details.}
\usage{See Section 2.4, Example 2.4.32.}
\arguments{See Section 2.4, Example 2.4.32.}
\details{See Section 2.4, Example 2.4.32.}
\value{See Section 2.4, Example 2.4.32.}
\references{Cichosz, P. (2015): Data Mining Algorithms: Explained Using R. Wiley.}
\author{
Pawel Cichosz <p.cichosz@elka.pw.edu.pl>
}
\note{
}
\seealso{
\code{\link{mest}}
\code{\link{laprob}}
}
\examples{
laest(0, 10, 2)
mest(0, 10, 2)
laest(10, 10, 2)
mest(10, 10, 2)
}
\keyword{univar}
|
with(ae88cdfeda52d4c65889e358cb0183765, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/80bb2a25-ac5d-47d0-abfc-b3f3811f0936';source("D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/R/Recommendations/advanced_federation_blend.r");a2Hrpdwy3col1<- as.character(FRAME878836$location);linkazfOFV <- data.table("col1"=c("null"), "col2"=c("null")); linkazfOFV <- unique(linkazfOFV);aC2XtVrvb<- curate(a2Hrpdwy3col1,linkazfOFV);aC2XtVrvb <- as.data.table(aC2XtVrvb);names(aC2XtVrvb)<-"av5XX5QWX";FRAME878836 <- cbind(FRAME878836,aC2XtVrvb);FRAME878836 <- FRAME878836[,-c("location")];colnames(FRAME878836)[colnames(FRAME878836)=="av5XX5QWX"] <- "location";rm(aC2XtVrvb,linkazfOFV,a2Hrpdwy3col1,a2Hrpdwy3, best_match, best_match_nonzero, best_match_zero, blend, curate, self_match );});
|
/80bb2a25-ac5d-47d0-abfc-b3f3811f0936/R/Temp/aadAqhrZJcCBr.R
|
no_license
|
ayanmanna8/test
|
R
| false
| false
| 850
|
r
|
with(ae88cdfeda52d4c65889e358cb0183765, {ROOT <- 'D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/db/Atadata2__3b3e4a3b-d382-4e98-9950-9b4e8b308c1c/version/80bb2a25-ac5d-47d0-abfc-b3f3811f0936';source("D:/SEMOSS_v4.0.0_x64/SEMOSS_v4.0.0_x64/semosshome/R/Recommendations/advanced_federation_blend.r");a2Hrpdwy3col1<- as.character(FRAME878836$location);linkazfOFV <- data.table("col1"=c("null"), "col2"=c("null")); linkazfOFV <- unique(linkazfOFV);aC2XtVrvb<- curate(a2Hrpdwy3col1,linkazfOFV);aC2XtVrvb <- as.data.table(aC2XtVrvb);names(aC2XtVrvb)<-"av5XX5QWX";FRAME878836 <- cbind(FRAME878836,aC2XtVrvb);FRAME878836 <- FRAME878836[,-c("location")];colnames(FRAME878836)[colnames(FRAME878836)=="av5XX5QWX"] <- "location";rm(aC2XtVrvb,linkazfOFV,a2Hrpdwy3col1,a2Hrpdwy3, best_match, best_match_nonzero, best_match_zero, blend, curate, self_match );});
|
library(patentsview)
library(tidyverse)
library(shiny)
library(data.table)
# query of patent database
project_query <- qry_funs$and(
qry_funs$gte(patent_date = "2016-01-01"),
qry_funs$lte(patent_date = "2016-03-31")
)
# original dataframe
project_result = search_pv(
query = project_query,
fields = c("patent_number",
"patent_date",
"inventor_id",
"inventor_last_name",
"inventor_lastknown_city",
"inventor_lastknown_state",
"assignee_id",
"assignee_organization",
"assignee_lastknown_state",
"assignee_country"),
all_pages = TRUE
)
# unnested original data frame
unnested_project_result = project_result$data$patents %>%
unnest(inventors, .drop = FALSE) %>%
unnest(assignees)
unnested_project_result[1:5, ]
#--------------------------------------------
#core objective #1
#Print summary in console
core1_df = unnested_project_result %>%
summarise("Total number of patents:" = n_distinct(patent_number),
"Total number of inventors:" = n_distinct(inventor_id),
"Total number of assignees:" = n_distinct(assignee_id))
summary_stats_dt = as.data.table(core1_df)
summary_stats_dt
#---------------------------------------
# Core objective #2
core2_df = unnested_project_result%>%
select(patent_number,
patent_date,
inventor_last_name,
inventor_lastknown_city,
assignee_organization,
assignee_lastknown_state)
patents_dt = as.data.table(core2_df)
head(patents_dt)
#patents_table[1:5, ]
#str(patents_table)
#---------------------------------------
# core objective #3 - print top 5 assignees
core3_df = core2_df %>%
group_by(assignee_organization) %>%
summarise(count = n())
head(core3_df)
colnames(core3_df) <- c("assignee_org", "num_patents")
newcore3 = core3_df %>%
select(assignee_org, num_patents) %>%
na.exclude(assignee_org) %>%
arrange(desc(num_patents))
result = newcore3[1:5, ]
result
# core objective #3 - bar plot 5 top assignees
newtable = table(unnested_project_result$assignee_organization,
exclude = NA)
table3 = sort(newtable, decreasing = TRUE)
table4 = head(table3, n = 5)
#horizontal bar plot
par(mar = c(5,9,4,2))
assignees_plot2 = barplot(table4,
#xlab = "Number of Patents", horiz = TRUE,
#main = "Top Assignee Organizations",
xlim = c(0, 8000),
cex.names = .40,
las = 2,
col = "blue")
# vertical bar plot
par(mar=c(9,4,2,2))
assignees_plot = barplot(table4,
ylab = "Number of Patents",
main = "Top Assignee Organizations",
ylim = c(0, 8000),
cex.names = .40,
las = 2,
col = "blue"
)
#------------------------------------
#core objective 4 - drop down menu state of assignee organization
#-----------------------------------
#core objective 5 - text box query investor's last name
#-----------------------------------
#menu objective 2
inventor_df = unnested_project_result %>%
group_by (inventor_id) %>%
summarise(number_patents = n())
head(inventor_df)
inventor_df2 = unnested_project_result %>%
select(inventor_id,
inventor_last_name)
inventor_3 = inventor_df2 %>%
left_join(inventor_df) %>%
arrange(desc(number_patents))
unique(inventor_3)
inventor_joined =
left_join(inventor_df2, inventor_df, by = "inventor_id") %>%
arrange(desc(number_patents))
str(inventor_joined)
head(unique(inventor_joined))
#-----------------------------------
# menu objective #3
menu3_df = unnested_project_result %>%
group_by(assignee_country) %>%
summarise(count = n())
menu3_df
colnames(menu3_df) <- c("Country", "Total")
menu3 = na.omit(menu3_df) %>%
select(Country, Total) %>%
#na.exclude(assignee_org) %>%
arrange(desc(Total))
menu3
#menu3_result = menu3[1:5, ]
#menu3_result
menu3_dt = as.data.table(menu3)
menu3_dt
#-----------------------------------
# menu objective #4
head(newcore3)
patents_over_10 = filter(newcore3, num_patents > 10 )
patents_over_10_dt = as.data.table(patents_over_10)
head(patents_over_10_dt)
#-----------------------------------
# shiny app
ui <- fluidPage(
# Give the page a title
titlePanel("CIS 4730 Group Project"),
#--------------------------------
#tab #1 - Summary
tabsetPanel(
id = 'dataset',
tabPanel("Summary", verbatimTextOutput("summary")),
#---------------------------------------------------
#tab #2 - DataTable
tabPanel("DataTable",
selectInput("assignee_state", "Assignee State:",
c("All", sort(unique(patents_dt$assignee_lastknown_state)))
),
hr(),
textInput("inventor", "Inventor's last name contains
(e.g., Zeng) Note: case sensitive"),
hr(),
dataTableOutput("mytable2")
),
#--------------------------------------
# tab #3 - AnalyzeData
tabPanel("AnalyzeData",
# Generate a row with a sidebar
sidebarLayout(
# Define the sidebar
sidebarPanel(
# Input: Slider for barplot - number of top assignees by # of patents
sliderInput("number",
"Number of top assignees requested:",
value = 5,
min = 1,
max = 10),
hr(),
#helpText("Top Assignee Organizations")
# Input: Slider for the number of top inventors by # of patents
sliderInput("n",
"Number of top inventors requested:",
value = 5,
min = 1,
max = 10),
hr(),
# Input: Slider for countries are most interested in obtaining patents by assignee country
sliderInput("total",
"Number of Countries interested in obtaining patents:",
value = 5,
min = 1,
max = 10),
hr(),
# checkbox - show assignee org with more than 10 patents
checkboxInput(inputId = "over_10_patents",
label = strong("Show assignee organizations
with more than 10 patents"),
value = FALSE),
hr()
),
mainPanel(
plotOutput("patentsPlot"),
dataTableOutput("show"),
tableOutput("view"),
tableOutput("country")
)
)
)
)
)
server <- function(input, output) {
#-----------------------------------
# summary tab
output$summary <- renderPrint(summary_stats_dt)
#-----------------------------------
# data table tab
# output data table & filter data based on selections
output$mytable2 <- renderDataTable({
mydata <- patents_dt
state <- patents_dt$assignee_lastknown_state
#drop down menu - filter data table by assignee
if (input$assignee_state != "All") {
mydata <- mydata[state == input$assignee_state, ]
}
# text box filter by inventor
if (input$inventor != "") {
inventor <- input$inventor
mydata <- mydata[mydata$inventor_last_name %like% inventor]
}
mydata
})
#---------------------------------
# analyze data tab
#output table - assignee organizations with more than 10 patents
output$show <- renderDataTable({
if (input$over_10_patents) {
patents_over_10_dt
}
})
#output bar plot
output$patentsPlot <- renderPlot({
plot_table <- head(table3, input$number)
# Render a barplot
barplot(plot_table,
ylab = "Number of Patents",
main = "Top Assignee Organizations",
ylim = c(0, 8000),
cex.names = .35,
col = "blue")
})
#output top inventors table
output$view <- renderTable({
head(unique(inventor_joined), n = input$n)
})
#render top county table
output$country <- renderTable({
head(menu3_dt, input$total)
}, bordered = TRUE)
}
shinyApp(ui = ui, server = server)
|
/app.r
|
no_license
|
cbarlow6/shiny-team-project
|
R
| false
| false
| 8,423
|
r
|
library(patentsview)
library(tidyverse)
library(shiny)
library(data.table)
# query of patent database
project_query <- qry_funs$and(
qry_funs$gte(patent_date = "2016-01-01"),
qry_funs$lte(patent_date = "2016-03-31")
)
# original dataframe
project_result = search_pv(
query = project_query,
fields = c("patent_number",
"patent_date",
"inventor_id",
"inventor_last_name",
"inventor_lastknown_city",
"inventor_lastknown_state",
"assignee_id",
"assignee_organization",
"assignee_lastknown_state",
"assignee_country"),
all_pages = TRUE
)
# unnested original data frame
unnested_project_result = project_result$data$patents %>%
unnest(inventors, .drop = FALSE) %>%
unnest(assignees)
unnested_project_result[1:5, ]
#--------------------------------------------
#core objective #1
#Print summary in console
core1_df = unnested_project_result %>%
summarise("Total number of patents:" = n_distinct(patent_number),
"Total number of inventors:" = n_distinct(inventor_id),
"Total number of assignees:" = n_distinct(assignee_id))
summary_stats_dt = as.data.table(core1_df)
summary_stats_dt
#---------------------------------------
# Core objective #2
core2_df = unnested_project_result%>%
select(patent_number,
patent_date,
inventor_last_name,
inventor_lastknown_city,
assignee_organization,
assignee_lastknown_state)
patents_dt = as.data.table(core2_df)
head(patents_dt)
#patents_table[1:5, ]
#str(patents_table)
#---------------------------------------
# core objective #3 - print top 5 assignees
core3_df = core2_df %>%
group_by(assignee_organization) %>%
summarise(count = n())
head(core3_df)
colnames(core3_df) <- c("assignee_org", "num_patents")
newcore3 = core3_df %>%
select(assignee_org, num_patents) %>%
na.exclude(assignee_org) %>%
arrange(desc(num_patents))
result = newcore3[1:5, ]
result
# core objective #3 - bar plot 5 top assignees
newtable = table(unnested_project_result$assignee_organization,
exclude = NA)
table3 = sort(newtable, decreasing = TRUE)
table4 = head(table3, n = 5)
#horizontal bar plot
par(mar = c(5,9,4,2))
assignees_plot2 = barplot(table4,
#xlab = "Number of Patents", horiz = TRUE,
#main = "Top Assignee Organizations",
xlim = c(0, 8000),
cex.names = .40,
las = 2,
col = "blue")
# vertical bar plot
par(mar=c(9,4,2,2))
assignees_plot = barplot(table4,
ylab = "Number of Patents",
main = "Top Assignee Organizations",
ylim = c(0, 8000),
cex.names = .40,
las = 2,
col = "blue"
)
#------------------------------------
#core objective 4 - drop down menu state of assignee organization
#-----------------------------------
#core objective 5 - text box query investor's last name
#-----------------------------------
#menu objective 2
inventor_df = unnested_project_result %>%
group_by (inventor_id) %>%
summarise(number_patents = n())
head(inventor_df)
inventor_df2 = unnested_project_result %>%
select(inventor_id,
inventor_last_name)
inventor_3 = inventor_df2 %>%
left_join(inventor_df) %>%
arrange(desc(number_patents))
unique(inventor_3)
inventor_joined =
left_join(inventor_df2, inventor_df, by = "inventor_id") %>%
arrange(desc(number_patents))
str(inventor_joined)
head(unique(inventor_joined))
#-----------------------------------
# menu objective #3
menu3_df = unnested_project_result %>%
group_by(assignee_country) %>%
summarise(count = n())
menu3_df
colnames(menu3_df) <- c("Country", "Total")
menu3 = na.omit(menu3_df) %>%
select(Country, Total) %>%
#na.exclude(assignee_org) %>%
arrange(desc(Total))
menu3
#menu3_result = menu3[1:5, ]
#menu3_result
menu3_dt = as.data.table(menu3)
menu3_dt
#-----------------------------------
# menu objective #4
head(newcore3)
patents_over_10 = filter(newcore3, num_patents > 10 )
patents_over_10_dt = as.data.table(patents_over_10)
head(patents_over_10_dt)
#-----------------------------------
# shiny app
ui <- fluidPage(
# Give the page a title
titlePanel("CIS 4730 Group Project"),
#--------------------------------
#tab #1 - Summary
tabsetPanel(
id = 'dataset',
tabPanel("Summary", verbatimTextOutput("summary")),
#---------------------------------------------------
#tab #2 - DataTable
tabPanel("DataTable",
selectInput("assignee_state", "Assignee State:",
c("All", sort(unique(patents_dt$assignee_lastknown_state)))
),
hr(),
textInput("inventor", "Inventor's last name contains
(e.g., Zeng) Note: case sensitive"),
hr(),
dataTableOutput("mytable2")
),
#--------------------------------------
# tab #3 - AnalyzeData
tabPanel("AnalyzeData",
# Generate a row with a sidebar
sidebarLayout(
# Define the sidebar
sidebarPanel(
# Input: Slider for barplot - number of top assignees by # of patents
sliderInput("number",
"Number of top assignees requested:",
value = 5,
min = 1,
max = 10),
hr(),
#helpText("Top Assignee Organizations")
# Input: Slider for the number of top inventors by # of patents
sliderInput("n",
"Number of top inventors requested:",
value = 5,
min = 1,
max = 10),
hr(),
# Input: Slider for countries are most interested in obtaining patents by assignee country
sliderInput("total",
"Number of Countries interested in obtaining patents:",
value = 5,
min = 1,
max = 10),
hr(),
# checkbox - show assignee org with more than 10 patents
checkboxInput(inputId = "over_10_patents",
label = strong("Show assignee organizations
with more than 10 patents"),
value = FALSE),
hr()
),
mainPanel(
plotOutput("patentsPlot"),
dataTableOutput("show"),
tableOutput("view"),
tableOutput("country")
)
)
)
)
)
server <- function(input, output) {
#-----------------------------------
# summary tab
output$summary <- renderPrint(summary_stats_dt)
#-----------------------------------
# data table tab
# output data table & filter data based on selections
output$mytable2 <- renderDataTable({
mydata <- patents_dt
state <- patents_dt$assignee_lastknown_state
#drop down menu - filter data table by assignee
if (input$assignee_state != "All") {
mydata <- mydata[state == input$assignee_state, ]
}
# text box filter by inventor
if (input$inventor != "") {
inventor <- input$inventor
mydata <- mydata[mydata$inventor_last_name %like% inventor]
}
mydata
})
#---------------------------------
# analyze data tab
#output table - assignee organizations with more than 10 patents
output$show <- renderDataTable({
if (input$over_10_patents) {
patents_over_10_dt
}
})
#output bar plot
output$patentsPlot <- renderPlot({
plot_table <- head(table3, input$number)
# Render a barplot
barplot(plot_table,
ylab = "Number of Patents",
main = "Top Assignee Organizations",
ylim = c(0, 8000),
cex.names = .35,
col = "blue")
})
#output top inventors table
output$view <- renderTable({
head(unique(inventor_joined), n = input$n)
})
#render top county table
output$country <- renderTable({
head(menu3_dt, input$total)
}, bordered = TRUE)
}
shinyApp(ui = ui, server = server)
|
#' Cancels a SLURM job
#' @param x character vector - the SLURM ids
cancelJob <- function(x) {
x <- as.character(x)
if(length(x) > 1)
x <- paste0(x, collapse = " ")
systemSubmit(paste("scancel", x), wait = rSubmitterOpts$TIME_WAIT_FAILED_CMD, ignore.stdout = T)
}
|
/R/cancelJob.R
|
no_license
|
pablo-gar/rSubmitter
|
R
| false
| false
| 296
|
r
|
#' Cancels a SLURM job
#' @param x character vector - the SLURM ids
cancelJob <- function(x) {
x <- as.character(x)
if(length(x) > 1)
x <- paste0(x, collapse = " ")
systemSubmit(paste("scancel", x), wait = rSubmitterOpts$TIME_WAIT_FAILED_CMD, ignore.stdout = T)
}
|
library(tidyverse)
library(glue)
library(cowplot)
theme_set(theme_cowplot(14))
vals_traits <- c("bmi", "weight", "waist", "hip", "height", "whr")
# vals_traits <- "height"
ntop1 <- 500
ntop2 <- 1000
vals_chr <- 1:22
vals_est <- c("mean", "median")
vals_filt <- paste0("f", 0:3)
thr2 <- c(1e-3, 1e-5, 5e-8)[3]
thr1_lmm <- thr2
thr1_lr <- 0.05
thr3 <- thr2
tab <- lapply(vals_traits, function(trait) {
cat("trait", trait, "\n")
h2 <- glue("out/h2/{ntop1}/{trait}.tsv.gz") %>% read_tsv
gamma1 <- h2$mult
h2 <- glue("out/h2/{ntop2}/{trait}.tsv.gz") %>% read_tsv
gamma2 <- h2$mult
cat(" - gamma 1 & 2", gamma1, "/", gamma2, "\n")
gamma <- gamma2/gamma1
cat(" - gamma", gamma, "\n")
t1 <- glue("out/lmm_loco_pcs_top/{ntop1}/{trait}.{vals_chr}.tsv.gz") %>%
lapply(read_tsv) %>% bind_rows
t1 <- select(t1, snp, beta, se, zscore, pval) %>%
dplyr::rename(b_lr = beta, se_lr = se, z_lr = zscore, p_lr = pval)
t2 <- glue("out/lmm_loco_pcs_top/{ntop2}/{trait}.{vals_chr}.tsv.gz") %>%
lapply(read_tsv) %>% bind_rows
t2 <- select(t2, snp, beta, se, zscore, pval) %>%
dplyr::rename(b_lmm = beta, se_lmm = se, z_lmm = zscore, p_lmm = pval)
t <- left_join(t2, t1)
lapply(vals_filt, function(filt) {
cat("filter", filt, "\n")
t <- switch(filt,
"f0" = t,
"f1" = filter(t, p_lr < thr1_lr & p_lmm < thr1_lmm),
"f2" = filter(t, p_lr < thr2 & p_lmm < thr2),
"f3" = filter(t, p_lr < thr3 & p_lmm < thr3),
stop("filt"))
print(t)
vals_se2 <- with(t, (se_lr / se_lmm)^2)
vals_z2 <- with(t, (z_lmm / z_lr)^2)
# vals_z2 <- with(t, (z_lr / z_lmm)^2)
lapply(vals_est, function(est) {
se2 <- switch(est, "median" = median(vals_se2), "mean" = mean(vals_se2), stop("est"))
z2 <- switch(est, "median" = median(vals_z2), "mean" = mean(vals_z2), stop("est"))
tibble(
gamma = c(gamma, se2, z2),
estimator = c("trace", "se2", "z2"),
q25 = c(NA, quantile(vals_se2, 0.25, na.rm = TRUE), quantile(vals_z2, 0.25, na.rm = TRUE)),
q75 = c(NA, quantile(vals_se2, 0.75, na.rm = TRUE), quantile(vals_z2, 0.75, na.rm = TRUE))) %>%
mutate(trait = trait, filter = filt, m = nrow(t), est = est)
}) %>% bind_rows
}) %>% bind_rows
}) %>% bind_rows
## plot
# NB: filter se2 out
# ptab <- filter(tab, estimator != "se2")
ptab <- tab
# ptab <- filter(ptab, !(estimator == "z2" & filter == "f0"))
# ptab <- filter(ptab, filter != "f0" & est == "median")
ptab <- filter(ptab, est == "median")
offset <- 0.9
# ylims <- c(0, 0.5)
p <- ggplot(ptab, aes(filter, gamma - offset, fill = estimator)) +
geom_bar(stat = "identity", position = position_dodge()) +
geom_errorbar(aes(ymin = q25 - offset, ymax = q75 - offset),
width = 0.3, position = position_dodge(0.9)) +
geom_text(aes(x = filter, y = 0, label = m), vjust = -1.5, size = 3.5) +
geom_hline(yintercept = 1 - offset, linetype = 3, color = "grey20")
p <- p + facet_wrap(est ~ trait, scales = "free", ncol = 3)
# p <- ggplot(ptab, aes(trait, gamma - offset, fill = estimator, group = estimator)) +
# geom_bar(stat = "identity", position = position_dodge(0.9)) +
# geom_errorbar(aes(ymin = q25 - offset, ymax = q75 - offset, group = estimator),
# width = 0.3, position = position_dodge(0.9)) +
# geom_hline(yintercept = 1 - offset, linetype = 3, color = "grey")
# p <- p + facet_grid(est ~ filter)
p <- p +
scale_y_continuous(labels = function(x) x + offset) +
theme(legend.position = "top") +
labs(x = NULL, y = NULL)
ggsave("tmp.png", plot = p, dpi = 100, width = 12, height = 6)
|
/scripts/extra/07-fig-mult.R
|
permissive
|
variani/paper-neff
|
R
| false
| false
| 3,613
|
r
|
library(tidyverse)
library(glue)
library(cowplot)
theme_set(theme_cowplot(14))
vals_traits <- c("bmi", "weight", "waist", "hip", "height", "whr")
# vals_traits <- "height"
ntop1 <- 500
ntop2 <- 1000
vals_chr <- 1:22
vals_est <- c("mean", "median")
vals_filt <- paste0("f", 0:3)
thr2 <- c(1e-3, 1e-5, 5e-8)[3]
thr1_lmm <- thr2
thr1_lr <- 0.05
thr3 <- thr2
tab <- lapply(vals_traits, function(trait) {
cat("trait", trait, "\n")
h2 <- glue("out/h2/{ntop1}/{trait}.tsv.gz") %>% read_tsv
gamma1 <- h2$mult
h2 <- glue("out/h2/{ntop2}/{trait}.tsv.gz") %>% read_tsv
gamma2 <- h2$mult
cat(" - gamma 1 & 2", gamma1, "/", gamma2, "\n")
gamma <- gamma2/gamma1
cat(" - gamma", gamma, "\n")
t1 <- glue("out/lmm_loco_pcs_top/{ntop1}/{trait}.{vals_chr}.tsv.gz") %>%
lapply(read_tsv) %>% bind_rows
t1 <- select(t1, snp, beta, se, zscore, pval) %>%
dplyr::rename(b_lr = beta, se_lr = se, z_lr = zscore, p_lr = pval)
t2 <- glue("out/lmm_loco_pcs_top/{ntop2}/{trait}.{vals_chr}.tsv.gz") %>%
lapply(read_tsv) %>% bind_rows
t2 <- select(t2, snp, beta, se, zscore, pval) %>%
dplyr::rename(b_lmm = beta, se_lmm = se, z_lmm = zscore, p_lmm = pval)
t <- left_join(t2, t1)
lapply(vals_filt, function(filt) {
cat("filter", filt, "\n")
t <- switch(filt,
"f0" = t,
"f1" = filter(t, p_lr < thr1_lr & p_lmm < thr1_lmm),
"f2" = filter(t, p_lr < thr2 & p_lmm < thr2),
"f3" = filter(t, p_lr < thr3 & p_lmm < thr3),
stop("filt"))
print(t)
vals_se2 <- with(t, (se_lr / se_lmm)^2)
vals_z2 <- with(t, (z_lmm / z_lr)^2)
# vals_z2 <- with(t, (z_lr / z_lmm)^2)
lapply(vals_est, function(est) {
se2 <- switch(est, "median" = median(vals_se2), "mean" = mean(vals_se2), stop("est"))
z2 <- switch(est, "median" = median(vals_z2), "mean" = mean(vals_z2), stop("est"))
tibble(
gamma = c(gamma, se2, z2),
estimator = c("trace", "se2", "z2"),
q25 = c(NA, quantile(vals_se2, 0.25, na.rm = TRUE), quantile(vals_z2, 0.25, na.rm = TRUE)),
q75 = c(NA, quantile(vals_se2, 0.75, na.rm = TRUE), quantile(vals_z2, 0.75, na.rm = TRUE))) %>%
mutate(trait = trait, filter = filt, m = nrow(t), est = est)
}) %>% bind_rows
}) %>% bind_rows
}) %>% bind_rows
## plot
# NB: filter se2 out
# ptab <- filter(tab, estimator != "se2")
ptab <- tab
# ptab <- filter(ptab, !(estimator == "z2" & filter == "f0"))
# ptab <- filter(ptab, filter != "f0" & est == "median")
ptab <- filter(ptab, est == "median")
offset <- 0.9
# ylims <- c(0, 0.5)
p <- ggplot(ptab, aes(filter, gamma - offset, fill = estimator)) +
geom_bar(stat = "identity", position = position_dodge()) +
geom_errorbar(aes(ymin = q25 - offset, ymax = q75 - offset),
width = 0.3, position = position_dodge(0.9)) +
geom_text(aes(x = filter, y = 0, label = m), vjust = -1.5, size = 3.5) +
geom_hline(yintercept = 1 - offset, linetype = 3, color = "grey20")
p <- p + facet_wrap(est ~ trait, scales = "free", ncol = 3)
# p <- ggplot(ptab, aes(trait, gamma - offset, fill = estimator, group = estimator)) +
# geom_bar(stat = "identity", position = position_dodge(0.9)) +
# geom_errorbar(aes(ymin = q25 - offset, ymax = q75 - offset, group = estimator),
# width = 0.3, position = position_dodge(0.9)) +
# geom_hline(yintercept = 1 - offset, linetype = 3, color = "grey")
# p <- p + facet_grid(est ~ filter)
p <- p +
scale_y_continuous(labels = function(x) x + offset) +
theme(legend.position = "top") +
labs(x = NULL, y = NULL)
ggsave("tmp.png", plot = p, dpi = 100, width = 12, height = 6)
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/regressionNetworkViz.R
\name{regressionNetworkViz}
\alias{regressionNetworkViz}
\title{Visualize a regression result by a d3 network visualization.}
\usage{
regressionNetworkViz(mylm, sigthresh = 0.05, whichviz = "Sankey",
outfile = "temp.html", mygroup = 0, logvals = TRUE, verbose = FALSE,
correlateMyOutcomes = NA, corthresh = 0.9, zoom = F, doFDR = TRUE)
}
\arguments{
\item{mylm}{lm model output from bigLMStats}
\item{sigthresh}{significance threshold}
\item{whichviz}{which visualization method}
\item{outfile}{significance threshold}
\item{mygroup}{color each entry by group membership}
\item{logvals}{bool}
\item{verbose}{bool}
\item{correlateMyOutcomes}{not sure, see code}
\item{corthresh}{correlation threshold}
\item{zoom}{zooming factor}
\item{doFDR}{bool}
}
\value{
html file is output
}
\description{
Use either a force directed graph or a Sankey graph to show relationships
between predictors and outcome variables. correlateMyOutcomes should
correspond to the outcome variables ...
}
\examples{
\dontrun{
colnames(brainpreds)<-paste('Vox',c(1:ncol(brainpreds)),sep='')
colnames( mylm$beta.pval )<-colnames(brainpreds)
demognames<-rownames(mylm$beta.pval)
myout = regressionNetworkViz( mylm , sigthresh=0.05, outfile='temp2.html')
}
}
\author{
Avants BB
}
|
/man/regressionNetworkViz.Rd
|
permissive
|
alainlompo/ANTsR
|
R
| false
| true
| 1,369
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/regressionNetworkViz.R
\name{regressionNetworkViz}
\alias{regressionNetworkViz}
\title{Visualize a regression result by a d3 network visualization.}
\usage{
regressionNetworkViz(mylm, sigthresh = 0.05, whichviz = "Sankey",
outfile = "temp.html", mygroup = 0, logvals = TRUE, verbose = FALSE,
correlateMyOutcomes = NA, corthresh = 0.9, zoom = F, doFDR = TRUE)
}
\arguments{
\item{mylm}{lm model output from bigLMStats}
\item{sigthresh}{significance threshold}
\item{whichviz}{which visualization method}
\item{outfile}{significance threshold}
\item{mygroup}{color each entry by group membership}
\item{logvals}{bool}
\item{verbose}{bool}
\item{correlateMyOutcomes}{not sure, see code}
\item{corthresh}{correlation threshold}
\item{zoom}{zooming factor}
\item{doFDR}{bool}
}
\value{
html file is output
}
\description{
Use either a force directed graph or a Sankey graph to show relationships
between predictors and outcome variables. correlateMyOutcomes should
correspond to the outcome variables ...
}
\examples{
\dontrun{
colnames(brainpreds)<-paste('Vox',c(1:ncol(brainpreds)),sep='')
colnames( mylm$beta.pval )<-colnames(brainpreds)
demognames<-rownames(mylm$beta.pval)
myout = regressionNetworkViz( mylm , sigthresh=0.05, outfile='temp2.html')
}
}
\author{
Avants BB
}
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sanapiwrapper.R
\name{availableSince}
\alias{availableSince}
\title{Get the earliest date for which the Santiment metric is available.}
\usage{
availableSince(metric, slug)
}
\arguments{
\item{metric}{metric}
\item{slug}{project}
}
\value{
earliest date
}
\description{
Get the earliest date for which the Santiment metric is available.
}
\examples{
availableSince('daily_active_addresses', 'ethereum')
}
|
/man/availableSince.Rd
|
no_license
|
josefansinger/sanapiwrapper
|
R
| false
| true
| 485
|
rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sanapiwrapper.R
\name{availableSince}
\alias{availableSince}
\title{Get the earliest date for which the Santiment metric is available.}
\usage{
availableSince(metric, slug)
}
\arguments{
\item{metric}{metric}
\item{slug}{project}
}
\value{
earliest date
}
\description{
Get the earliest date for which the Santiment metric is available.
}
\examples{
availableSince('daily_active_addresses', 'ethereum')
}
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170334e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
/multivariance/inst/testfiles/match_rows/AFL_match_rows/match_rows_valgrind_files/1613109943-test.R
|
no_license
|
akhikolla/updatedatatype-list3
|
R
| false
| false
| 257
|
r
|
testlist <- list(A = structure(c(2.31584307392677e+77, 9.53818252170334e+295, 1.22810536108214e+146, 4.12396251261199e-221, 0, 0, 0), .Dim = c(1L, 7L)), B = structure(0, .Dim = c(1L, 1L)))
result <- do.call(multivariance:::match_rows,testlist)
str(result)
|
mydata<-read.csv("household_power_consumption.txt", header=TRUE, sep=";")
data_f<-mydata[which(mydata$Date=="1/2/2007" | mydata$Date=="2/2/2007") ,]
par(mar=c(4,4,1,1))
data_f$datetime<-paste(data_f$Date, data_f$Time)
data_f$datetime<-strptime(data_f$datetime,"%d/%m/%Y %H:%M:%S")
plot(data_f$datetime, as.numeric(as.character(data_f$Global_active_power)), type="l", ylab="Global Active Power (kilowatts)", xlab="")
dev.copy(png, file="plot2.png")
dev.off()
|
/plot2.R
|
no_license
|
linmapitt/Explore_Data_Project_1
|
R
| false
| false
| 458
|
r
|
mydata<-read.csv("household_power_consumption.txt", header=TRUE, sep=";")
data_f<-mydata[which(mydata$Date=="1/2/2007" | mydata$Date=="2/2/2007") ,]
par(mar=c(4,4,1,1))
data_f$datetime<-paste(data_f$Date, data_f$Time)
data_f$datetime<-strptime(data_f$datetime,"%d/%m/%Y %H:%M:%S")
plot(data_f$datetime, as.numeric(as.character(data_f$Global_active_power)), type="l", ylab="Global Active Power (kilowatts)", xlab="")
dev.copy(png, file="plot2.png")
dev.off()
|
library(eaf)
### Name: eaf-package
### Title: Plots of the Empirical Attainment Function
### Aliases: eaf-package _PACKAGE eaf
### Keywords: graphs package
### ** Examples
data(gcp2x2)
tabucol<-subset(gcp2x2, alg!="TSinN1")
tabucol$alg<-tabucol$alg[drop=TRUE]
eafplot(time+best~run,data=tabucol,subset=tabucol$inst=="DSJC500.5")
eafplot(time+best~run|inst,groups=alg,data=gcp2x2)
eafplot(time+best~run|inst,groups=alg,data=gcp2x2,
percentiles=c(0,50,100),include.extremes=TRUE,
cex=1.4, lty=c(2,1,2),lwd=c(2,2,2),
col=c("black","blue","grey50"))
A1<-read.data.sets(file.path(system.file(package="eaf"),"extdata","ALG_1_dat"))
A2<-read.data.sets(file.path(system.file(package="eaf"),"extdata","ALG_2_dat"))
eafplot(A1,A2, percentiles=c(50))
eafplot(list(A1=A1, A2=A2), percentiles=c(50))
eafdiffplot(A1, A2)
## Save to a PDF file
# dev.copy2pdf(file="eaf.pdf", onefile=TRUE, width=5, height=4)
|
/data/genthat_extracted_code/eaf/examples/eaf-package.Rd.R
|
no_license
|
surayaaramli/typeRrh
|
R
| false
| false
| 910
|
r
|
library(eaf)
### Name: eaf-package
### Title: Plots of the Empirical Attainment Function
### Aliases: eaf-package _PACKAGE eaf
### Keywords: graphs package
### ** Examples
data(gcp2x2)
tabucol<-subset(gcp2x2, alg!="TSinN1")
tabucol$alg<-tabucol$alg[drop=TRUE]
eafplot(time+best~run,data=tabucol,subset=tabucol$inst=="DSJC500.5")
eafplot(time+best~run|inst,groups=alg,data=gcp2x2)
eafplot(time+best~run|inst,groups=alg,data=gcp2x2,
percentiles=c(0,50,100),include.extremes=TRUE,
cex=1.4, lty=c(2,1,2),lwd=c(2,2,2),
col=c("black","blue","grey50"))
A1<-read.data.sets(file.path(system.file(package="eaf"),"extdata","ALG_1_dat"))
A2<-read.data.sets(file.path(system.file(package="eaf"),"extdata","ALG_2_dat"))
eafplot(A1,A2, percentiles=c(50))
eafplot(list(A1=A1, A2=A2), percentiles=c(50))
eafdiffplot(A1, A2)
## Save to a PDF file
# dev.copy2pdf(file="eaf.pdf", onefile=TRUE, width=5, height=4)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.