blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
327
content_id
stringlengths
40
40
detected_licenses
listlengths
0
91
license_type
stringclasses
2 values
repo_name
stringlengths
5
134
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
46 values
visit_date
timestamp[us]date
2016-08-02 22:44:29
2023-09-06 08:39:28
revision_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
committer_date
timestamp[us]date
1977-08-08 00:00:00
2023-09-05 12:13:49
github_id
int64
19.4k
671M
star_events_count
int64
0
40k
fork_events_count
int64
0
32.4k
gha_license_id
stringclasses
14 values
gha_event_created_at
timestamp[us]date
2012-06-21 16:39:19
2023-09-14 21:52:42
gha_created_at
timestamp[us]date
2008-05-25 01:21:32
2023-06-28 13:19:12
gha_language
stringclasses
60 values
src_encoding
stringclasses
24 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
7
9.18M
extension
stringclasses
20 values
filename
stringlengths
1
141
content
stringlengths
7
9.18M
061c9f389d253a2d45146c411f1b1e158059695c
f971b00d50be32357c35ab530be3a6a8baf70318
/1_simulate_phenotypes.R
865ef0a9d635b7db940444fea3a2a81ff3927863
[]
no_license
joegage/GWAS_AUC
34439470dfe8739f76e1820d9778472d6f05dcdc
5b10420143c58bf9bb3b152cdf13bca2cda6fc91
refs/heads/master
2020-03-12T16:49:41.264086
2018-04-24T16:09:59
2018-04-24T16:09:59
130,724,447
1
0
null
null
null
null
UTF-8
R
false
false
1,685
r
1_simulate_phenotypes.R
source("simPheno.R") genoFile <- "~/Dropbox/papers/tassel_GWAS_paper/analyses/widiv/genos_phenos_farmCPU.rda" load(genoFile) rm(allPhenos); gc() GM$chrom <- as.character(GM$chrom) GM$pos <- as.character(GM$pos) h2 <- seq(0.1, 1, 0.1) nCausativeValues <- c(10,100, 1000) nReps <- 10 GDfreqs <- colMeans(GD[,-1]) GDfreqs <- apply(cbind(GDfreqs, 1-GDfreqs), 1, min) keep <- GDfreqs > 0.02 & GM$chrom %in% 1:10 print(table(keep)) GD <- GD[,c(TRUE, keep)] GM <- GM[keep, ] set.seed(345) for(nCausative in nCausativeValues){ causativeSNPs <- sample(2:ncol(GD), nCausative) genotypes <- t(as.matrix(GD[,causativeSNPs])) alleleFreqs <- rowMeans(genotypes) simulated <- simPheno(h2, nCausative, nrow(GD), alleleFreqs, genotypes, nReps) simulated$effects <- data.frame(SNP=colnames(GD)[causativeSNPs], chr=GM[causativeSNPs, "chrom"], pos=GM[causativeSNPs, "pos"], effect=simulated$effects) params <- expand.grid(1:nReps, h2*100) colnames(simulated$pheno) <- paste0("c", nCausative, "h", params[,2], "r", params[,1]) assign(paste0("c", nCausative), simulated) } allPhenos <- data.frame(taxa=GD$taxa, c10$pheno, c100$pheno, c1000$pheno) save("allPhenos", "GD", "GM", file="simulated_phenos_genos.rda") write.table(allPhenos, "all_simulated_phenos.txt", row.names=FALSE, col.names=TRUE, sep="\t", quote=FALSE) for(nCausative in nCausativeValues){ fileName <- paste0("c", nCausative, "_effects.txt") outDat <- get(paste0("c", nCausative))$effects write.table(outDat, fileName, row.names=FALSE, col.names=TRUE, quote=FALSE, sep="\t") }
2495ca5b29c1335d9d054bbed7aaf955488b6421
94fa6de1769db0c8bc2862c67f4e7d80674fd395
/Part 4 - Clustering/Section 25 - Hierarchical Clustering/data_preprocessing_template.R
4dda8560fa35c41593ba3f51e99c7544d9c2b65d
[]
no_license
samehamin/Machine-Learning-A-Z
76d6f04f25c0c7a22f70004027d7212b794a9ac7
d83bff72bf7da1181fd9a70725010842285ee756
refs/heads/master
2021-05-06T15:46:35.964520
2018-11-17T20:23:16
2018-11-17T20:23:16
113,660,183
3
0
null
null
null
null
UTF-8
R
false
false
492
r
data_preprocessing_template.R
# Data Preprocessing Template # Importing the dataset dataset = read.csv('Mall_Customers.csv') X = dataset[4:5] # Using the dendrogram to find the optimal number of clusters dendrogram = hclust(dist(X, method = 'euclidean'), method = 'ward.D') plot(dendrogram, main = paste('Dendrogram'), xlab = 'Customers', ylab = 'Euclidean distances') # Fitting heirachical clustering to the mall dataset hc = hclust(dist(X, method = 'euclidean'), method = 'ward.D') y_hc = cutree(hc, 5)
c50afb252a3dea2aa7e67e26f463f13497ed8854
66539bfe3ccea078550d60b55b1d20b8b484dc90
/R/lis3.R
949e0d2de1ed21f6d898cc51888d0910e218849d
[]
no_license
xinyue-L/PML
ee1e189f32ecbaad4f24d710d517e818d9a231b6
79f67a8d3443b514e4536daa4579524881096292
refs/heads/master
2020-07-05T17:48:39.537834
2020-05-07T03:59:40
2020-05-07T03:59:40
202,718,263
0
1
null
2020-05-07T04:01:11
2019-08-16T11:46:35
R
UTF-8
R
false
false
645
r
lis3.R
#' Activity data for three individuals #' #' A synthetic data list of three elements, each consisting of an activity matrix for one individual, and #' each column of an activity matrix is one-day activity observation. Therefore, an activity matrix is #' \code{nob} by \code{nday}. It is a named list, the name of which is the individual ID. It is for illustration #' purposes of the functions in the package \code{PML} only. #' #' @docType data #' #' @usage data(lis3) #' #' @format An object of class \code{list}. #' #' @keywords datasets #' #' @examples #' data(lis3) #' pa3 <- form(lis3) #' #' @seealso \code{\link{form}}, \code{\link{pa3}}
0db6f5eae5976bd339237cc6a54c9d171399f90c
8228230ee94076f3afaa84a8cd1ad7fa6bb3e7cf
/e5_reshape_if_else.R
955da2c308fa92c3e180f815bfa186127aabc593
[]
no_license
agaspard98/borealis
36f1b95889ebf71f2fba8469c6903bc42225ef23
5ad23bfa83fec14b8d2737322889e524a57b72fa
refs/heads/master
2023-09-05T03:33:07.154842
2021-10-31T11:02:50
2021-10-31T11:02:50
411,773,526
0
0
null
null
null
null
UTF-8
R
false
false
3,943
r
e5_reshape_if_else.R
#Exercise #5 #Practice the following skills: # reshape data frames # if_else statements #Part 1: version control -------------------------------------------- # Please track changes to your script using version control. Include in your homework the URL to your completed homework assignment available on your #GitHub repository #URL: https://github.com/agaspard98/borealis.git #Part 2: reshaping data frames --------------------------------------------- #Using the 'batting.2008.Rdata' dataset, please do the following setwd("~/2021fallclasses/Intro to R/assignments/assignment 5") load("~/2021fallclasses/Intro to R/assignments/assignment 5/batting.2008.Rdata") library(tidyverse) head(d) #1) Using the spread() function, please create a data frame that shows the total number of home runs (HR) for each birthYear for #each team (use the teamID column) aggregated by player da <- d[, c("teamID", "HR", "nameLast", "nameFirst")] dab <- data.frame(r1=names(da$birthYear), t(da) dac <- spread(data = dab, key = teamID, value = HR) #2) Subset the data for the Houston Astros (HOU). Using the gather() function, create a new data frame that has 3 columns: # (1) playerID, (2) variable containing (AB, R, H, 2B, 3B) and (3) column with the corresponding values for the stats. ha = d[d$teamID == "HOU", c("playerID", "AB", "R", "H", "2B", "3B")] hb = gather(data = ha, key = player.stat, value = stat.value, 2:6) #3) Repeat the process for Question 2. However, this time, please use the melt() function from the 'reshape2' package. hamelt <- melt(data = ha, id = c("playerID"), variable.name = c("player.stat")) #4) Using the dcast() function from the 'reshape2 package', find the mean number of AB, R, H, 2B, 3B for each player (use 'playerID' to aggregate). hadcast <- dcast(data = ha, formula = playerID~3B, fun.aggregate = mean) #Part 3: if_else statements ------------------------------- # one condition #5) generate an if_else statment to test if the value of a numeric object is positive number x = 7 if(x > 0){ print("positive number") } else { print("negative number") } #6) using the two objects below, please generate an if_else statement that uses logical operator (i.e, !=, ==, >, etc.) # in the test x <- 5 y <- 8 if(x != y){ print("True") } else { print("False") } #7) Hamlet's quandry: In Shakespeare's play Hamlet, the lead character has a famous speech "to be or not to be". browseURL("https://www.poetryfoundation.org/poems/56965/speech-to-be-or-not-to-be-that-is-the-question") # Write an if_else statement using the "to be' or 'not to be' for outcomes of the 'yes' and 'no' arguments respectively. suffer = 7 take.arms = 12 if(suffer != take.arms){ print("To Be") } else { print("Not to be") } #two or more conditions #8) create an 'if else' statement that returns (in order of preference) your four ideal pizza toppings pepercini = 1 peperoni = 2 sausage = 3 cheese = 4 if(pepercini = 1 & peperoni = 2) { print("Pepercini, peperoni, sausage, cheese") } else if(pepercini = 3 & peperoni =4){ print("Sausage, cheese, pepercini, peperoni") } #two or more conditions joined (new content) #To join two or more conditions into a single if statement, use logical operators viz. && (and), || (or) and ! (not). #example: x <- 7 y <- 5 z <- 2 if(x > y && x > z) { print("x is greater") } #9) generate your own 'if' statement using multiple conditions in one line if(z < y && z < x) { print("z is less than") } } #New content: understanding a common warning/error message when using if_else. # The follow set of code will generate a warning message. Explain in a comment what the warning is telling you. v <- 1:6 if(v %% 2) { print("odd") } else { print("even") } # V has 6 numbers within it, but the if else statement is not the same length. R will evaluate the if else statement for only the first value in the v vector.
b6148da056da8043b030ed43c8376742449dd0b8
54a538a87ce7a0bf2313e96cfee6776fdc26227e
/LTLABview_collector.R
eb968086799e0313850d0708ed4e9d4f43e92482
[]
no_license
ubbikas/LTLABcollector
7d6e360094d2944199691f7e6b6d3501db8825c9
17c8027c37ad417e7e673fd780d18a9be5574a24
refs/heads/master
2021-01-01T05:15:42.035641
2016-05-14T10:33:02
2016-05-14T10:33:02
56,992,986
0
0
null
null
null
null
UTF-8
R
false
false
13,675
r
LTLABview_collector.R
# loading required libraries suppressPackageStartupMessages(require(dplyr)) suppressPackageStartupMessages(require(xml2)) suppressPackageStartupMessages(require(methods)) if (Sys.info()[[4]] == "ANDRIUS-PC") setwd("D:/-=Works=-/R/GitHub/LTLABview") if (Sys.info()[[4]] == "LTLAB-DEV") setwd("C:/R/GitHub/LTLABview") refresh_rate <- 5 last_db_update_time <- 0 update_count <- 0 current_conveyor_ids <- NA current_project <- "No production" last_project <- "Noname" current_conveyor <- data.frame() # choosing between real or testing tracedata file names #tracedata_DB_name <- "//M85399/1_m85399/tracedata.db3" tracedata_DB_name <- "tracedata.db3" ltlab_smt_db <- "LTLAB_SMT_DB.sqlite" conn_status <- list(AX_DB_TBL = "NOT_OK", SMT_DB = "NOT_OK", SMT_PROJ = "NOT_OK", PP_DATA = "NOT_OK") last_status <- data.frame() # functions saves current connectios status to csv file status_to_csv <- function(date = 0) { data <- data.frame(AX_DB_TBL = conn_status$AX_DB_TBL, SMT_DB = conn_status$SMT_DB, SMT_PROJ = conn_status$SMT_PROJ, PP_DATA = conn_status$PP_DATA) last_status <- data.frame(AX_DB_TBL = last_status$AX_DB_TBL, SMT_DB = last_status$SMT_DB, SMT_PROJ = last_status$SMT_PROJ, PP_DATA = last_status$PP_DATA) if (!identical(data, last_status)) { last_status <<- data data["Update"] <- date print("Status change:") print(data) write.csv(data, "status.csv", row.names = FALSE) } } status_to_csv(date = Sys.time()) # conveyorIDlist - list of the previous IDs in production, if some of them # changes it means they are finished and are writen to finished roduction # csv file with PCBfinishedtoCSV conveyorIDlist <- c() PCBfinished <- function(){ conveyorIDlist <- c(conveyorIDlist, current_conveyor_ids) %>% tail(2) conveyorIDlist <<- conveyorIDlist if (length(conveyorIDlist) < 2){ return() } else { finished <- setdiff(unlist(conveyorIDlist[1]), unlist(conveyorIDlist[2])) print(paste("Finished: ", finished)) if (length(finished) > 0) { tryCatch({ PCBfinishedtoCSV(finished) print("All data writen!!!!") }, error = function(e) { print(e) status_to_csv(date = Sys.time()) next() }) } } } # function to write data of finished PCBs to csv file; # name of the file - current date PCBfinishedtoCSV <- function(idlist) { for (id in idlist) { proj <- current_project date <- format(Sys.time(), format = "%Y-%m-%d") time <- strftime(Sys.time(), format="%H:%M:%S") PCBquant <- current_project_PCB_quantity PCBcomp <- current_project_comp_quantity fileName <- file.path("data", paste(date, "csv", sep = ".")) all <- data.frame(ID = id, PROJECT = proj, DATE = date, TIME = time, QUANTITY = PCBquant, COMPONENTS = PCBcomp, row.names = NULL, stringsAsFactors = FALSE) if (file.exists(fileName)) { conn_status$UPDATE <- time write.table(all, file = fileName, sep = ",", append = TRUE, row.names = FALSE, col.names = FALSE) } else { conn_status$UPDATE <- time write.table(all, file = fileName, sep = ",", row.names = FALSE, col.names = TRUE) } } } # next section needs to be looped forever ------------------------------------- while (TRUE) { Sys.sleep(refresh_rate) print("-----------------------------------------------------") system.time({ # checking if tracedata DB from production exists if (file.exists(tracedata_DB_name)) { if (conn_status$AX_DB_TBL == "NOT_OK") { tryCatch({ tracedata_DB <- src_sqlite(tracedata_DB_name) }, error = function(e) { print("DB is not available") conn_status$AX_DB_TBL <- "NOT_OK" status_to_csv(date = Sys.time()) e } ) -> tracedata_DB_tryCatch if(inherits(tracedata_DB_tryCatch, "error")) next() conn_status$AX_DB_TBL <- "OK" print("AX_DB_TBL - OK") } } else { conn_status$AX_DB_TBL <- "NOT_OK" status_to_csv(date = Sys.time()) print("AX_DB_TBL - NOT OK!!!!") next() } if (last_db_update_time == 0) { tryCatch({ print("First update") last_db_update_time <- file.info(tracedata_DB_name)$mtime transportmap <- tbl(tracedata_DB, sql("SELECT * FROM transportmap")) current_conveyor <- as.data.frame( transportmap %>% filter(lPos %in% c(1:49)) %>% select(idPCB, lPos, strPP) %>% arrange(lPos) %>% mutate(lPos2 = lPos - 1.2)) if (!dim(current_conveyor)[1] == 0) { current_project <- as.data.frame(current_conveyor %>% select(strPP))[1,] } }, error = function(e) { print("DB is busy 1") print(e) conn_status$AX_DB_TBL <- "NOT_OK" status_to_csv(date = Sys.time()) e } ) -> transportmap_tryCatch if(inherits(transportmap_tryCatch, "error")) next() } # checking if local SMT DB exists if(file.exists(ltlab_smt_db)) { if (conn_status$SMT_DB == "NOT_OK") { tryCatch({ smt_DB <- src_sqlite(ltlab_smt_db) projects <- tbl(smt_DB, sql("SELECT * FROM PROJEKTAI")) all_projects_names <- projects %>% select(PROJEKTAS) conn_status$SMT_DB <- "OK" print("SMT_DB - OK") }, error = function(e) { print("SMT DB is busy") print(e) conn_status$SMT_DB <- "NOT_OK" status_to_csv(date = Sys.time()) e } ) -> smt_DB_tryCatch if(inherits(smt_DB_tryCatch, "error")) next() } } else { conn_status$SMT_DB <- "NOT_OK" status_to_csv(date = Sys.time()) print("SMT_DB - NOT OK!!!!") next() } print(paste("Current project:", current_project)) tryCatch({ db_update_time <- file.info(tracedata_DB_name)$mtime if ((as.numeric(Sys.time()) - as.numeric(db_update_time) > 120) && (dim(current_conveyor)[1] == 0)) { current_project <- "No production" conn_status$SMT_PROJ <- "NOT_OK" conn_status$PP_DATA <- "NOT_OK" if (last_project != current_project) { current_project_data <- data.frame(current_project = NA, current_project_name = NA, current_project_side = NA, current_project_comp_quantity = NA, current_project_PCB_quantity = NA, CycleTime = NA, PCBLength = NA, PCBWidth = NA, BoardPitch = NA, MaximumNumberOfBoards = NA, StopperInX = NA) print(current_project_data) write.csv(current_project_data, "current_project_data.csv", row.names = FALSE) last_project <- current_project } print("No production!!!!") status_to_csv(date = Sys.time()) next() } db_update_time_diff <- as.numeric(db_update_time) - as.numeric(last_db_update_time) last_db_update_time <- db_update_time update_count <- c(update_count, db_update_time_diff) %>% tail(2) if(update_count[1] > 0 && update_count[2] == 0){ print("DB update found") transportmap_new <- as.data.frame( transportmap %>% filter(lPos %in% c(1:49)) %>% select(idPCB, lPos, strPP) %>% arrange(lPos) %>% select(idPCB, lPos)) current_conveyor_pcb_pos <- current_conveyor %>% select(idPCB, lPos) if(!identical(transportmap_new, current_conveyor_pcb_pos)) { print("Transport IDs changed!") current_conveyor <- as.data.frame( transportmap %>% filter(lPos %in% c(1:49)) %>% select(idPCB, lPos, strPP) %>% arrange(lPos) %>% mutate(lPos2 = lPos - 1.2)) current_conveyor_ids <- current_conveyor %>% select(idPCB) %>% as.data.frame() if (!dim(current_conveyor)[1] == 0) { current_project <- as.data.frame(current_conveyor %>% select(strPP))[1,] } write.csv(current_conveyor, "current_conveyor.csv", row.names = FALSE) print(current_conveyor) } else { print("No transport change!!!!") status_to_csv(date = Sys.time()) next() } } else { print("No DB update!!!!") status_to_csv(date = Sys.time()) next() } }, error = function(e) { print("DB is busy 2") conn_status$AX_DB_TBL <- "NOT_OK" status_to_csv(date = Sys.time()) e } ) -> current_conveyor_tryCatch if(inherits(current_conveyor_tryCatch, "error")) next() if (last_project != current_project) { tryCatch({ current_project_name <- current_project %>% sub("_BOT","",.) %>% sub("_TOP","",.) current_project_side <- ifelse(grepl("_BOT$", current_project), c("BOT"), c("TOP")) all_names <- all_projects_names %>% as.data.frame() %>% .[["PROJEKTAS"]] if (current_project_name %in% all_names) { current_project_comp_quantity <- projects %>% filter(PROJEKTAS == current_project_name) %>% select(ifelse(current_project_side == "TOP", TOP_KOMP, BOT_KOMP)) %>% as.data.frame() %>% as.numeric() conn_status$SMT_PROJ <- "OK" print(paste("SMT_PROJ - OK - ", current_project_comp_quantity)) } else { print("SMT_PROJ - NOT OK!!!!") conn_status$SMT_PROJ <- "NOT_OK" status_to_csv(date = Sys.time()) next() } pp_file_name <- file.path("PP", paste(current_project, "PP", sep = ".")) if (file.exists(pp_file_name)) { PnPFileXML <- read_xml(pp_file_name) CycleTime <- xml_find_all(PnPFileXML, ".//CycleTime") %>% xml_text() %>% as.numeric() PCBLength <- xml_find_all(PnPFileXML, ".//PCBLength") %>% xml_text() %>% as.numeric() PCBWidth <- xml_find_all(PnPFileXML, ".//PCBWidth") %>% xml_text() %>% as.numeric() BoardPitch <- xml_find_all(PnPFileXML, ".//BoardPitch") %>% xml_text() %>% as.numeric() MaximumNumberOfBoards <- xml_find_all(PnPFileXML, ".//MaximumNumberOfBoards") %>% xml_text() %>% as.numeric() StopperInX <- xml_find_all(PnPFileXML, ".//StopperInX") %>% xml_text() %>% as.numeric() current_project_PCB_quantity <- xml_find_all(PnPFileXML, ".//CircuitId") %>% xml_text() %>% as.numeric() %>% max() current_project_data <- data.frame(current_project, current_project_name, current_project_side, current_project_comp_quantity, current_project_PCB_quantity, CycleTime, PCBLength, PCBWidth, BoardPitch, MaximumNumberOfBoards, StopperInX) print(current_project_data) write.csv(current_project_data, "current_project_data.csv", row.names = FALSE) conn_status$PP_DATA <- "OK" print("PP_DATA - OK") } else { print("PP_DATA - NOT OK!!!!") conn_status$PP_DATA <- "NOT_OK" status_to_csv(date = Sys.time()) next() } last_project <- current_project }, error = function(e) { print("SMT DB is busy 2") conn_status$SMT_PROJ <- "NOT_OK" status_to_csv(date = Sys.time()) e } ) -> smt_DB_2_tryCatch if(inherits(smt_DB_2_tryCatch, "error")) next() } PCBfinished() status_to_csv(date = Sys.time()) print("Cycle finished!!!!!!!!!!!!!!!!") }) -> timing print(timing) }
e5c2485511ad136959cc3d889fc56d095fe5c73c
5b6179f52aa5df28dc2527b0dcfa809e8e502c5d
/Codes/Map_study_sites.R
b27bce77506bc95005638ffd790597d424fcfe19
[]
no_license
jms5151/EVP
e9de694f9e062ca149a2cac5e5d72e8f7d98f3f0
3ef8208d10353d6d058b0ea8085d9cc0a14ba20a
refs/heads/master
2021-06-05T18:05:24.776867
2020-06-12T02:53:07
2020-06-12T02:53:07
150,645,258
0
1
null
null
null
null
UTF-8
R
false
false
8,904
r
Map_study_sites.R
# https://eriqande.github.io/rep-res-web/lectures/making-maps-with-R.html library(ggplot2) library(ggmap) library(maps) library(mapdata) library(ggrepel) source("C:/Users/Jeremy/Box Sync/R_functions/Google_maps_API_token.R") # set sites ecuador.sites <-data.frame(longitude = c(-80.2225, -79.9554, -79.6109, -79.6109) , latitude = c(-3.4764, -3.2581, -3.7173, -3.6573) , Site = c("Huaquillas", "Machala", "Zaruma", "Portovelo")) kenya.sites <- data.frame(longitude = c(34.636908, 34.767957, 39.668207, 39.566111) , latitude = c(-0.035266, -0.091702, -4.043477, -4.287500) , Site = c("Chulaimbo", "Kisumu", "Msambweni", "Ukunda")) # get basemaps # ecuador <- get_googlemap(center = c(-79.6109, -2.7173), maptype = "terrain", source = "google", zoom = 8, style='feature:all|element:labels|visibility:off') # kenya <- get_googlemap(center = c(37.9083, 0.1769), maptype = "terrain", source = "google", zoom = 6, style='feature:all|element:labels|visibility:off') ecuador <- get_googlemap(center = c(-79.6109, -2.0173), maptype = "terrain", source = "google", zoom = 7, style='feature:all|element:labels|visibility:off') kenya <- get_googlemap(center = c(36.9083, -2.0173), maptype = "terrain", source = "google", zoom = 7, style='feature:all|element:labels|visibility:off') # make maps ggmap(ecuador) + geom_point(data = ecuador.sites, mapping = aes(x = longitude, y = latitude)) + geom_label_repel(aes(x = longitude, y = latitude, label = Site) , xlim = c(ecuador.sites$longitude,0.05), ylim = c(ecuador.sites$latitude,0.05) , min.segment.length = 0.5, direction='both' , data = ecuador.sites) + ggtitle("A. Ecuador") + ylab("Latitude") + xlab("Longitude") ggmap(kenya) + geom_point(data = kenya.sites, mapping = aes(x = longitude, y = latitude)) + geom_label_repel(aes(x = longitude, y = latitude, label = Site) , xlim = c(kenya.sites$longitude,0.05), ylim = c(kenya.sites$latitude,0.05) , min.segment.length = 0.5, direction='both' , data = kenya.sites) + ggtitle("B. Kenya") + ylab("Latitude") + xlab("Longitude") # ------------------------------------------------------------------------------------------- # https://www.r-spatial.org/r/2018/10/25/ggplot2-sf-3.html ---------------------------------- # load libraries library("cowplot") library("googleway") library("ggplot2") library("ggrepel") library("ggspatial") library("sf") library("rnaturalearth") library("rnaturalearthdata") library("rgeos") # set theme and get world map theme_set(theme_bw()) world <- ne_countries(scale='medium', returnclass = 'sf') # set study site coordinates kenya.sites <- st_as_sf(data.frame(longitude = c(34.636908, 34.767957, 39.668207, 39.566111) , latitude = c(-0.035266, -0.091702, -4.043477, -4.287500)) , coords = c("longitude", "latitude"), crs = 4210, agr = "constant") ecuador.sites <- st_as_sf(data.frame(longitude = c(-80.2225, -79.9554, -79.6109, -79.6109) , latitude = c(-3.4764, -3.2581, -3.7173, -3.6573)) # last latitude is actually -3.6873; I shifted it for visualization , coords = c("longitude", "latitude"), crs = 4248, agr = "constant") # Map of world with insets for Ecuador and Kenya ----------------------------------------- gworld <- ggplot(data = world) + geom_sf(aes(fill = region_wb)) + #, show.legend = FALSE geom_rect(xmin = -83, xmax = -73, ymin = -7, ymax = 4, fill = NA, colour = "black", size = 1.5) + geom_rect(xmin = 31, xmax = 45, ymin = -6.5, ymax = 6.5, fill = NA, colour = "black", size = 1.5) + scale_fill_viridis_d(option = "plasma", name="") + theme(panel.background = element_rect(fill = "azure"), panel.border = element_rect(fill = NA)) kenyamap <- ggplot(data = world) + geom_sf(aes(fill = region_wb)) + annotate(geom = "text", x = 37.7, y = 2, label = "Kenya", fontface = "bold", color = "grey22", size = 4) + geom_sf(data = kenya.sites, size = 2, shape = 21, fill = "black", color='white') + coord_sf(xlim = c(33, 43), ylim = c(-5, 5.5), expand = FALSE) + scale_fill_viridis_d(option = "plasma") + theme(legend.position = "none", axis.title.x = element_blank(), axis.title.y = element_blank(), axis.text.x = element_blank(), axis.text.y = element_blank(), axis.ticks = element_blank(), panel.background = element_rect(fill = "azure"), panel.border = element_rect(fill = NA), plot.background = element_blank()) ecuadormap <- ggplot(data = world) + geom_sf(aes(fill = region_wb)) + annotate(geom = "text", x = -78.1, y = -0.5, label = "Ecuador", fontface = "bold", color = "grey22", size = 4) + geom_sf(data = ecuador.sites, size = 2, shape = 21, fill = "black", color='white') + coord_sf(xlim = c(-81, -75), ylim = c(-5.1, 1.6), expand = FALSE) + scale_fill_viridis_d(option = "plasma") + theme(legend.position = "none", axis.title.x = element_blank(), axis.title.y = element_blank(), axis.text.x = element_blank(), axis.text.y = element_blank(), axis.ticks = element_blank(), panel.background = element_rect(fill = "azure"), panel.border = element_rect(fill = NA), plot.background = element_blank()) gworld + annotation_custom(grob = ggplotGrob(ecuadormap), xmin = -210, xmax = -75, ymin = -90, ymax = 25) + annotation_custom(grob = ggplotGrob(kenyamap), xmin = 40, xmax = 175, ymin = -50, ymax = 65) # Map of Ecuador and Kenya ------------------------------------------------------------- gworld <- ggplot(data = world) + geom_sf(fill = "transparent", show.legend = FALSE) + geom_rect(xmin = -83, xmax = -73, ymin = -7, ymax = 4, fill = NA, colour = "black", size = 1.5) + geom_rect(xmin = 31, xmax = 45, ymin = -6.5, ymax = 6.5, fill = NA, colour = "black", size = 1.5) + scale_fill_viridis_d(option = "plasma", name="") + theme(panel.background = element_rect(fill = NA), panel.border = element_rect(fill = NA)) kenyamap2 <- ggplot(data = world) + geom_sf(aes(fill = region_wb)) + annotate(geom = "text", x = 37.7, y = 2, label = "Kenya", fontface = "bold", color = "grey22", size = 6) + annotate(geom = "text", x = 35, y = 0.25, label = "Chulaimbo", fontface = "italic", color = "grey22", size = 4) + annotate(geom = "text", x = 35.1, y = -0.35, label = "Kisumu", fontface = "italic", color = "grey22", size = 4) + annotate(geom = "text", x = 38.6, y = -3.8, label = "Msambweni", fontface = "italic", color = "grey22", size = 4) + annotate(geom = "text", x = 38.7, y = -4.2, label = "Ukunda", fontface = "italic", color = "grey22", size = 4) + geom_sf(data = kenya.sites, size = 2, shape = 21, fill = "black", color='white') + coord_sf(xlim = c(33, 43), ylim = c(-5, 5.5), expand = FALSE) + scale_fill_viridis_d(option = "plasma") + theme(legend.position = "none", axis.title.x = element_blank(), axis.title.y = element_blank(), panel.background = element_rect(fill = "azure"), panel.border = element_rect(fill = NA)) ecuadormap2 <- ggplot(data = world) + geom_sf(aes(fill = region_wb)) + annotate(geom = "text", x = -78.1, y = -0.5, label = "Ecuador", fontface = "bold", color = "grey22", size = 6) + annotate(geom = "text", x = -79.3, y = -3.2, label = "Machala", fontface = "italic", color = "grey22", size = 4) + annotate(geom = "text", x = -79.5, y = -3.45, label = "Huaquillas", fontface = "italic", color = "grey22", size = 4) + annotate(geom = "text", x = -79.1, y = -3.65, label = "Zaruma", fontface = "italic", color = "grey22", size = 4) + annotate(geom = "text", x = -79, y = -3.85, label = "Portovelo", fontface = "italic", color = "grey22", size = 4) + geom_sf(data = ecuador.sites, size = 2, shape = 21, fill = "black", color='white') + coord_sf(xlim = c(-81, -75), ylim = c(-5.1, 1.6), expand = FALSE) + scale_fill_viridis_d(option = "plasma") + theme(legend.position = "none", axis.title.x = element_blank(), axis.title.y = element_blank(), panel.background = element_rect(fill = "azure"), panel.border = element_rect(fill = NA)) ggplot() + coord_equal(xlim = c(0, 2), ylim = c(0, 1), expand = FALSE) + annotation_custom(ggplotGrob(ecuadormap2), xmin = 0, xmax = 1, ymin = 0, ymax = 1) + annotation_custom(ggplotGrob(kenyamap2), xmin = 1, xmax = 2, ymin = 0, ymax = 1) + theme_void() ggplot() + coord_equal(xlim = c(0, 2), ylim = c(0, 2), expand = FALSE) + annotation_custom(ggplotGrob(ecuadormap2), xmin = 0, xmax = 1, ymin = 0, ymax = 1) + annotation_custom(ggplotGrob(kenyamap2), xmin = 1, xmax = 2, ymin = 0, ymax = 1) + annotation_custom(ggplotGrob(gworld), xmin = 0, xmax = 2, ymin = 1, ymax = 2) + theme_void()
872cf39055313085e1b092ebcefa0d2650ca2eb6
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/naniar/examples/add_any_miss.Rd.R
950d10f5330387b9412f88993e8d4a1e4638679f
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
265
r
add_any_miss.Rd.R
library(naniar) ### Name: add_any_miss ### Title: Add a column describing presence of any missing values ### Aliases: add_any_miss ### ** Examples airquality %>% add_any_miss() airquality %>% add_any_miss(Ozone) airquality %>% add_any_miss(Ozone, Solar.R)
e7b0d352096a51d67df6dcee35d0bbb74fe67cbe
90bfb6687a16fc6d79dd99a3dc11828ad2c0321b
/scripts/hgnc_symbol_checker.R
e2c574f3938ee93d0420658fc4a79ef712e07df4
[]
no_license
pilarcacheiro/wol
e028960f797212d885195ae4907fc05127511382
2ef44e6a37ac9a938354f081bb662993a98331c5
refs/heads/master
2022-10-05T14:40:34.123580
2020-06-08T21:35:36
2020-06-08T21:35:36
270,823,761
1
1
null
null
null
null
UTF-8
R
false
false
5,002
r
hgnc_symbol_checker.R
############################################################################################################################### ############################################################################################################################### ### Script: hgnc_symbol_checker.R ### Purpose: check gene symbols and retrieve hgnc id ### Description: this function returns hgnc ids for protein coding genes symbols / (only protein coding genes /input file could ### be modified) ### Input: "gene_with_protein_product.txt" (see README) file and a vector of gene symbols to check ### Output: dataframe with 3 columns: "HGNC.ID":corresponding hgnc id ("-" , if no hgnc id was found): "Gene.Symbol": gene ### symbol provided; "Type": mapping type (Approved.Symbol,Synonym.Symbol,Notfound.ProteinCoding.Symbol,...) ############################################################################################################################### ############################################################################################################################### hgnc.checker <- function(gene.symbols,gene.file){ library(tidyr); library(dplyr); library(data.table) check.approved <- function(input.genes,database) { return(database %>% dplyr::select(hgnc_id,symbol) %>% mutate_if(is.factor,as.character) %>% filter(symbol!="") %>% filter(symbol %in% input.genes) %>% dplyr::rename (Gene.Symbol = symbol,HGNC.ID = hgnc_id) %>% mutate_if(is.factor,as.character) %>% mutate(Type = "Approved.Symbol")) } check.synonyms <- function(input.genes,database){ return(database %>% dplyr::select(hgnc_id,alias_symbol) %>% mutate_if(is.factor,as.character) %>% filter(alias_symbol!="") %>% tidyr::separate_rows(alias_symbol,sep="\\|") %>% dplyr::rename(HGNC.ID = hgnc_id) %>% mutate(Gene.Symbol = trimws(alias_symbol)) %>% dplyr::select(HGNC.ID,Gene.Symbol) %>% filter(Gene.Symbol %in% input.genes) %>% mutate_if(is.factor,as.character) %>% mutate(Type = "Synonym.Symbol")) } check.previous <- function(input.genes,database) { return(database %>% dplyr::select(hgnc_id,prev_symbol) %>% mutate_if(is.factor,as.character) %>% filter(prev_symbol!="") %>% tidyr::separate_rows(prev_symbol,sep="\\|") %>% dplyr::rename(HGNC.ID = hgnc_id) %>% mutate(Gene.Symbol = trimws(prev_symbol)) %>% dplyr::select(HGNC.ID,Gene.Symbol) %>% filter(Gene.Symbol %in% input.genes) %>% mutate_if(is.factor,as.character) %>% mutate(Type = "Previous.Symbol")) } check.duplicates.symbol <- function(file.to.check.symbols,duplicates.symbol){ if(!length(duplicates.symbol)) { return(file.to.check.symbols)} else{ final.nodup.symbol <- file.to.check.symbols %>% filter(!Gene.Symbol %in% duplicates.symbol) duplicate.symbols.df <- data.frame(HGNC.ID = rep("-",length(duplicates.symbol)), Gene.Symbol = duplicates.symbol, Type = "Ambiguous.Symbol") final.dups.symbol <- rbind(final.nodup.symbol,duplicate.symbols.df) return(final.dups.symbol) } } check.duplicates.id <- function(file.to.check.ids,duplicates.id){ if(!length(duplicates.id)) { return(file.to.check.ids)} else{ final.nodup.id <- file.to.check.ids %>% filter(!HGNC.ID %in% duplicates.id) duplicate.ids.df <- file.to.check.ids %>% filter(HGNC.ID %in% duplicates.id) %>% mutate(HGNC.ID = "-",Type="Ambiguous.Symbol") final.dups.id <- rbind(final.nodup.id,duplicate.ids.df) return(final.dups.id) } } genes <- trimws(gene.symbols) hgnc <- gene.file hgnc.approved <- check.approved(genes,hgnc) hgnc.synonyms <- check.synonyms(genes[! genes %in% hgnc.approved$Gene.Symbol],hgnc) hgnc.previous <- check.previous(genes[! genes %in% c(hgnc.approved$Gene.Symbol,hgnc.synonyms$Gene.Symbol)],hgnc) genes.not.found <- genes[! genes %in% c(hgnc.approved$Gene.Symbol,hgnc.synonyms$Gene.Symbol,hgnc.previous$Gene.Symbol)] hgnc.notfound <- data.frame(HGNC.ID = rep("-",length(genes.not.found)),Gene.Symbol = genes.not.found) %>% mutate_if(is.factor,as.character) %>% mutate(Type = "Notfound.ProteinCoding.Symbol") hgnc.all <- rbind(hgnc.approved,hgnc.synonyms,hgnc.previous,hgnc.notfound) duplicates.symbol <- hgnc.all$Gene.Symbol[duplicated(hgnc.all$Gene.Symbol)] results.noduplicated.symbol <- check.duplicates.symbol(hgnc.all,duplicates.symbol) duplicates.id <- results.noduplicated.symbol$HGNC.ID[duplicated(results.noduplicated.symbol$HGNC.ID)& results.noduplicated.symbol$HGNC.ID!="-"] results.noduplicated.id <- check.duplicates.id(results.noduplicated.symbol,duplicates.id) results.final <- results.noduplicated.id return(results.final) }
2c638fdd3b2bec551d977f39119ef02e2e27072d
52802ff28ca37aa7d028c1411b2e3948ecbf02f0
/text_mining/tidy_text/Ch03_tf_idf.R
49041e9e037006d7d10c74ce83723439a25282c9
[]
no_license
PyRPy/ML_Py_Templates
a4cd06c5e0cc54ccf544269ae4bf3a8aece15b85
677d29207c459bbc9e89e0f1239a1792f128a413
refs/heads/master
2022-02-23T06:18:51.466170
2022-02-16T22:52:02
2022-02-16T22:52:02
167,294,425
0
0
null
null
null
null
UTF-8
R
false
false
5,250
r
Ch03_tf_idf.R
# word and document frequency: tf-idf # Term frequency in Jane Austen's novels # What are the most commonly used words in Jane Austen's novels? library(dplyr) library(janeaustenr) library(tidytext) book_words <- austen_books() %>% unnest_tokens(word, text) %>% count(book, word, sort = TRUE) total_words <- book_words %>% group_by(book) %>% summarize(total = sum(n)) book_words <- left_join(book_words, total_words) book_words # let's look at the distribution of n/total for each novel, the # number of times a word appears in a novel divided by the total number # of terms (words) in that novel. library(ggplot2) ggplot(book_words, aes(n/total, fill = book)) + geom_histogram(show.legend = FALSE) + xlim(NA, 0.0009) + facet_wrap(~book, ncol = 2, scales = "free_y") # --- Zipf's law --- # long-tailed distributions are so common in any given corpus of natural language freq_by_rank <- book_words %>% group_by(book) %>% mutate(rank = row_number(), `term frequency` = n/total) freq_by_rank # Zipf's law is often visualized by plotting rank on the x-axis and term frequency on # the y-axis, on logarithmic scales. freq_by_rank %>% ggplot(aes(rank, `term frequency`, color = book)) + geom_line(size = 1.1, alpha = 0.8, show.legend = FALSE) + scale_x_log10() + scale_y_log10() # view this as a broken power law with, say, three sections rank_subset <- freq_by_rank %>% filter(rank < 500, rank > 10) lm(log10(`term frequency`) ~ log10(rank), data = rank_subset) # Coefficients: # (Intercept) log10(rank) # -0.6226 -1.1125 freq_by_rank %>% ggplot(aes(rank, `term frequency`, color = book)) + geom_abline(intercept = -0.62, slope = -1.1, color = "gray50", linetype = 2) + geom_line(size = 1.1, alpha = 0.8, show.legend = FALSE) + scale_x_log10() + scale_y_log10() # --- The bind_tf_idf function --- book_words <- book_words %>% bind_tf_idf(word, book, n) book_words # look at terms with high tf-idf in Jane Austen's works. book_words %>% select(-total) %>% arrange(desc(tf_idf)) # visualization for these high tf-idf words book_words %>% arrange(desc(tf_idf)) %>% mutate(word = factor(word, levels = rev(unique(word)))) %>% group_by(book) %>% top_n(15) %>% ungroup() %>% ggplot(aes(word, tf_idf, fill = book)) + geom_col(show.legend = FALSE) + labs(x = NULL, y = "tf-idf") + facet_wrap(~book, ncol = 2, scales = "free") + coord_flip() # !!! This is the point of tf-idf; it identifies words that are important to one document # within a collection of documents. # --- A corpus of physics texts --- library(gutenbergr) physics <- gutenberg_download(c(37729, 14725, 13476, 5001), meta_fields = "author") str(physics) # find out how many times each word was used in each text physics_words <- physics %>% unnest_tokens(word, text) %>% count(author, word, sort = TRUE) physics_words # calculate tf-idf, then visualize the high tf-idf words plot_physics <- physics_words %>% bind_tf_idf(word, author, n) %>% arrange(desc(tf_idf)) %>% mutate(word = factor(word, levels = rev(unique(word)))) %>% mutate(author = factor(author, levels = c("Galilei, Galileo", "Huygens, Christiaan", "Tesla, Nikola", "Einstein, Albert"))) plot_physics %>% group_by(author) %>% top_n(15, tf_idf) %>% ungroup() %>% mutate(word = reorder(word, tf_idf)) %>% ggplot(aes(word, tf_idf, fill = author)) + geom_col(show.legend = FALSE) + labs(x = NULL, y = "tf-idf") + facet_wrap(~author, ncol = 2, scales = "free") + coord_flip() # Very interesting indeed. One thing we see here is "eq" in the Einstein text?! library(stringr) physics %>% filter(str_detect(text, "eq\\.")) %>% select(text) # "K1" is the name of a coordinate system for Einstein: physics %>% filter(str_detect(text, "K1")) %>% select(text) # "AB", "RC", and so forth are names of rays, circles, angles, and so forth for Huygens. physics %>% filter(str_detect(text, "AK")) %>% select(text) # remove some of these less meaningful words mystopwords <- tibble(word = c("eq", "co", "rc", "ac", "ak", "bn", "fig", "file", "cg", "cb", "cm")) physics_words <- anti_join(physics_words, mystopwords, by = "word") plot_physics <- physics_words %>% bind_tf_idf(word, author, n) %>% arrange(desc(tf_idf)) %>% mutate(word = factor(word, levels = rev(unique(word)))) %>% group_by(author) %>% top_n(15, tf_idf) %>% ungroup() %>% mutate(author = factor(author, levels = c("Galilei, Galileo", "Huygens, Christiaan", "Tesla, Nikola", "Einstein, Albert"))) ggplot(plot_physics, aes(word, tf_idf, fill = author)) + geom_col(show.legend = FALSE) + labs(x = NULL, y = "tf-idf") + facet_wrap(~author, ncol = 2, scales = "free") + coord_flip()
d7d6f874f83e6f935ef8dfde3520a8a8009cd034
12918ea5a65ded9a9f371c7b0759a6eea51cf5ab
/ratio_over_season.R
ff72068734fe6157b1b597ee7acf9225183aa9a9
[]
no_license
dengyishuo/BPL
17673e1a39941deca086d1feb9b6a0027a38ce23
94c91d50de3350d0e1e4b2454dda2ba2abc9300c
refs/heads/master
2021-01-20T12:00:46.224195
2014-04-21T16:37:17
2014-04-21T16:37:17
null
0
0
null
null
null
null
UTF-8
R
false
false
2,497
r
ratio_over_season.R
#process data- 8 season 380*8 = 3040 onservations d1 = read.csv("PL2012-13.csv",header = T) d2 = read.csv("PL2011-12.csv",header = T) d3 = read.csv("PL2010-11.csv",header = T) d4 = read.csv("PL2009-10.csv",header = T) d5 = read.csv("PL2008-09.csv",header = T) d6 = read.csv("PL2007-08.csv",header = T) d7 = read.csv("PL2006-07.csv",header = T) d8 = read.csv("PL2005-06.csv",header = T) label = c("Date","HomeTeam","AwayTeam","FTHG","FTAG","Lambda.H.","Lambda.A.") data1 = d1[,label] data2 = d2[,label] data3 = d3[,label] data4 = d4[,label] data5 = d5[,label] data6 = d6[,label] data7 = d7[,label] data8 = d8[,label] #input: raw data #out put: list contains: data, no. of week, three mean for each week #for weeks with no game, simply remove that week process_date = function(data){ data[,"Date"] = as.Date(data[,"Date"], "%d/%m/%Y") data = data[order(data[,"Date"]),] data[,"Date"] = as.numeric(data[,"Date"] - data[1,"Date"]) %/% 7 #for different season, number of week may different nweek = max(data[,"Date"])+1 # goal_H + goal_A actual_total_goal = data[,"FTHG"]+data[,"FTAG"] implied_total_goal = data[,"Lambda.H."]+data[,"Lambda.A."] data = data.frame(data,actual_total_goal,implied_total_goal) #for each week plot a1 = rep(0,nweek) #store average actual total goal for each week a2 = rep(0,nweek)#store average implied total goal for each week a3 = rep(0,nweek) #ratio for (i in 1 : nweek){ d = data[,"Date"] == (i-1) a1[i] = mean(data[d,"actual_total_goal"]) a2[i] = mean(data[d,"implied_total_goal"]) a3[i] = a1[i]/a2[i] } mean_actual = a1[!is.na(a1)] mean_implied = a2[!is.na(a2)] ratio = a3[!is.na(a3)] list(data, nweek,mean_actual,mean_implied,ratio) } require(ggplot2) for(i in 1 :8) { assign((paste("list",i,sep="")),process_date(get(paste("data",i,sep="")))) temp = get(paste("list",i,sep="")) #plot(temp[[3]],type="l",main = paste("Average Total Goal for week",i,sep=" ")) #plot(temp[[4]],type="l",main = paste("Average Implied Goal for week",i,sep=" ")) #plot(temp[[5]],type="l",main = paste("Ratio of actual over implied for week",i,sep=" ")) #ts.plot(as.ts(temp[[3]]),as.ts(temp[[4]])) #r = range(temp[[3]]) df = data.frame(temp[[3]],temp[[4]],temp[[5]]) ts.plot(df,lty=c(1:3),main = paste("For season",i,sep=" "),ylim = c(0,5.2)) ts.name = c("Average Total Goal","Average Implied Goal","Ratio of actual over implied") legend(0.5,5.2,ts.name,lty=c(1:3),cex=0.5) }
3a5269c902754910b9b08436dcf23729e37ca8e6
58f02ca234748a1e0214774f2cc07628fb6dbad4
/FMBNanalyzer/FMBNanalyzer_v_2_3_1.R
ca506b1fc5bd8a2255b9bcad08f257767e3da2e0
[ "MIT" ]
permissive
ep142/FoodMicrobionet
0bd2bed7a2127267f2534c461633cff1165d54c4
599578aa801af4d87acc7673370fb588b9413281
refs/heads/master
2023-01-27T19:52:30.431777
2023-01-08T10:57:56
2023-01-08T10:57:56
229,555,940
4
1
MIT
2022-09-01T14:42:42
2019-12-22T11:14:05
HTML
UTF-8
R
false
false
64,174
r
FMBNanalyzer_v_2_3_1.R
# FMBN analyzer v2.3.1 beta # performs graphical and numerical analysis of FMBN data generated with the # ShinyFMBN app # for this to work correctly # perform a search in ShinyFMBN and export the __agg__ file: see the manual for ShinyFMBN on [Mendeley Data](https://data.mendeley.com/datasets/8fwwjpm79y/4) for further details # put the file (you will find it in the output -> aggdata folder located in the app folder) in a new folder containing this template # create a RStudio project https://support.rstudio.com/hc/en-us/articles/200526207-Using-RStudio-Projects for that folder # set the options for filtering, saving etc. # Preparatory steps ------------------------------------------------------- # install/load packages ------------------------------------------------- # need to use plyr:: because of conflicts # there are also other potential conflicts between vegan and bipartite .cran_packages <- c("plyr", "reshape2", "gplots", "tidyverse", "randomcoloR", "bipartite", "RColorBrewer", "ggrepel", "vegan") .inst <- .cran_packages %in% installed.packages() if(any(!.inst)) { install.packages(.cran_packages[!.inst]) } sapply(.cran_packages, require, character.only = TRUE) opar <- par(no.readonly = T) set.seed(1234) #########1#########2#########3#########4#########5#########6#########7#########8 # list files in working directory for convenience only filelist <- list.files() #########1#########2#########3#########4#########5#########6#########7#########8 # Load data --------------------------------------------------------------- # (generated with the ShinyFMBN app or with FMBNmakefilesv5_x ) # #########1#########2#########3#########4#########5#########6#########7#########8 # there must be only one file ending with _aggRDS in the project folder input_file_name <- filelist[str_detect(filelist, "_agg.RDS")] # this is overkill, really if(length(input_file_name>1)) input_file_name <- input_file_name[1] file_name_prefix <- str_remove(input_file_name, "_agg.RDS") file_name <- basename(file_name_prefix) input_data <- readRDS(input_file_name) # list, containing the following # studies the study table # OTU_table the OTU table (taxa are columns) with absolute freqs # OTU_table_relf OTU table (taxa are rows, a_label) with rel freqs # sample_metadata the sample metadata # taxa_metadata the taxa metadata # edge_table basically, the OTU_table_relf in long format, with frequencies in % # node_table info on sample and OTU nodes # i_graph an igraph bipartite object # references data frame containing the references # version text decribing the FMBN version # sample_agg the level of aggregation for samples # tax_agg the level of aggregation for taxa # diagnostic_f a diagnostic code related to the state of the app, irrelevant for # the analysis # check if the data are in the right format check_names <- names(input_data) == c("studies","OTU_table", "OTU_table_relf", "sample_metadata", "taxa_metadata", "edge_table", "node_table", "i_graph", "references", "version", "sample_agg", "tax_agg", "diagnostic_f") if(length(check_names) != sum(check_names)) cat("UNKNOWN ERROR: there must be something wrong with your data") #########1#########2#########3#########4#########5#########6#########7#########8 # Set options ------------------------------------------------------------- #########1#########2#########3#########4#########5#########6#########7#########8 # options for saving graphs savegraph <- T # set savegraph to T if you want to save graphs (some graphs won't be displayed) # use F if you don't want to save graphresolution <- 300 # in dpi graphsizein <- 7 # size in inches, overridden in some graphs graphtype <- "tif" # alternative value is "pdf" gfileext <- ifelse (graphtype == "pdf", ".pdf", ".tiff") # options for number of OTU groups in sample and OTU palettes overrides <- F # set to T if you want more than 15 categories in sample palettes overrideOTU <- F # set to T if you want more than 15 categories in OTU palettes # The "Other" column has no taxonomic meaning unless it is an aggregate of # several OTUs (see instructions in the FMBN_make_files* script) and will be # removed by default. The "Chloroplast" and "Mitochondria" columns only indicate # contamination with chloroplast or mitochondria DNA/RNA and should also be # removed. Set to F if you want to keep these columns in OTU tables. # Eukaryota can also be removed removeOther <- T removeKonly <- T # removes the taxa which have been identified only at the level of Kingdom removeChloroplast <- T removeMitochondria <- T removeEukaryota <- T # options for filtering samples and OTUs # if filtersamples == T the sample filter will be applied (overridden if sample_agg == "exp. code") # if filterOTUs == T the maxrelab filter will be applied filtersamples <- T filterOTUs <- T # filter for OTU abundance. OTUs with a maximum relative abundance < abtreshold # will be removed or pooled (see below) # keep the value low or you will loose too many OTUs between filtering and # rarefaction abtreshold <- 0.005 # options for taxa prevalence filter (only applicable when sample aggregation is sample) prev_filter <- F prev_treshold <- 0.05 # as fraction of samples prev_filter_flag <- F # flags for filtering, will be set to true if filtering is performed filtersamples_flag <- F filterOTU_flag <- F # samples/sample groups with less than minsampleab will be removed minsampleab <- 1000 # rarefy? if T vegan::rrarefy() will be used to extract a random rarefied # matrix. If raremin is T will rarefy to the closest hundred below the # minimum number of sequences, otherwise will rarefy to raren. # The default is F, although rarefaction will be performed for bipartite # analysis # option for performing rarefaction analysis on the unfiltered matrix dorareanalysis <- F rareoption <- T raremin <- T raren <- 10000 rarefy_flag <- F # will be set to T if rarefaction is performed # pooling of filtered OTUs: if pool is T, abundances of OTUs removed by # filtering are pooled and shown as "Other" in bar charts; does not apply # to heat maps and MDS. pool <- T # maximum number of OTUs allowed in stacked bar charts # will be used to generate the OTU palette with randomcoloR maxOTUcats <- 25 # maximum number of "species" for which prevalence and abundance is saved topp <- 50 # if the number of "species" in data is lower, then it is going to be the # max number of species # maximum number of "species" in box plots, heat maps and NMDS # must be <topp topn <-25 # max number of genera in the bonus plot topx <- 19 # max "samples" or sample categories for barplots and boxplots max_samples <- 25 # logging the color scale in heat maps; set to T if you want the scale # in log10(relabundance); set to F if you want to keep a linear scale loghmscale <- T #########1#########2#########3#########4#########5#########6#########7#########8 # make a qualitative color palette n <- maxOTUcats rpalette <- distinctColorPalette(n) names(rpalette) <- NULL #########1#########2#########3#########4#########5#########6#########7#########8 #########1#########2#########3#########4#########5#########6#########7#########8 # creating user defined functions for repetitive tasks #########1#########2#########3#########4#########5#########6#########7#########8 # a function for coarse taxonomic filtering filter_taxa_1 <- function(OTUmatrix){ # optionally remove "Other", "Chloroplast", "Mitochondria" and "Eukaryota" # and taxa with identification only at the level of Kingdom if (removeOther) { OTUmatrix <- OTUmatrix[, colnames(OTUmatrix)!= "Other"] } if (removeChloroplast){ chl_labels <- input_data$taxa_metadata %>% dplyr::filter(label == "Chloroplast" | class == "Chloroplast") %>% pull(label) OTUmatrix <- OTUmatrix[, -which(colnames(OTUmatrix) %in% chl_labels)] } if (removeMitochondria){ mit_labels <- input_data$taxa_metadata %>% dplyr::filter(label == "Mitochondria" | family == "Mitochondria") %>% pull(label) OTUmatrix <- OTUmatrix[, -which(colnames(OTUmatrix) %in% mit_labels)] } if (removeEukaryota){ euk_labels <- input_data$taxa_metadata %>% dplyr::filter(domain == "Eukaryota") %>% pull(label) if (length(euk_labels > 0)){ OTUmatrix <- OTUmatrix[, -which(colnames(OTUmatrix) %in% euk_labels)] } } if (removeKonly){ Konly <- input_data$taxa_metadata %>% dplyr::filter(is.na(phylum)) %>% pull(label) OTUmatrix <- OTUmatrix[, -which(colnames(OTUmatrix) %in% Konly)] } return(OTUmatrix) } # a function for creating OTU palettes # Create palette for OTUs. See above for instructions to override defaults # this should be transformed into a function but I don't have time make_OTU_palettes <- function(OTUmatrix, taxa_metadata = input_data$taxa_metadata, tax_agg = input_data$tax_agg, override_OTU = overrideOTU){ ET3 <- taxa_metadata %>% dplyr::filter(label %in% colnames(OTUmatrix)) # must fill the NAs with info from higher taxa ET3 <- ET3 %>% mutate(domain = ifelse(is.na(domain),"Other", domain)) %>% mutate(phylum = ifelse(is.na(phylum), domain, phylum)) %>% mutate(class = ifelse(is.na(class), phylum, class)) %>% mutate(order = ifelse(is.na(order), class, order)) %>% mutate(family = ifelse(is.na(family), order, family)) ET3$class <- as.factor(ET3$class) if (tax_agg == "class"){ ET3$family <- ET3$class } else { ET3$family <- as.factor(ET3$family) } ncolsfamily <- nlevels(ET3$family) ncolsclass <- nlevels(ET3$class) familypalette <- if(ncolsfamily<=12) { brewer.pal(ncolsfamily, "Set3")[1:ncolsfamily] } else { distinctColorPalette(ncolsfamily) } classpalette <- if(ncolsclass<=12) { brewer.pal(ncolsclass, "Set3")[1:ncolsclass] } else { distinctColorPalette(ncolsclass) } OTUcolors <- ET3[, c("label", "class", "family")] fcolors <- data.frame(family = levels(OTUcolors$family), fcolor = familypalette) ccolors <- data.frame(class = levels(OTUcolors$class), ccolor = classpalette) # Merge the palettes with label and lineage info OTUcolors <- merge(OTUcolors, ccolors, by = "class") OTUcolors <- merge(OTUcolors, fcolors, by = "family") OTUcolors <- arrange(OTUcolors, label) # set the palette to use catOTUs <- "class" if (input_data$tax_agg != "class"){ catOTUs <- "family" familystring <- paste(" and ", nlevels(OTUcolors$family), " families.",sep = "") if (!override_OTU) { catOTUs <- ifelse(nlevels(OTUcolors$family)<=15, "family","class") } if (catOTUs == "class") { myOTUpalette <- as.character(OTUcolors$ccolor) myOTUtext <- as.character(ccolors$class) myOTUcolors <- as.character(ccolors$ccolor) } else { myOTUpalette <- as.character(OTUcolors$fcolor) myOTUtext <- as.character(fcolors$family) myOTUcolors <- as.character(fcolors$fcolor) } } OTU_palette_list <- list( palette = myOTUpalette, text = myOTUtext, colors = myOTUcolors, OTU_colors_df = OTUcolors, cat_OTU_flag = catOTUs ) return(OTU_palette_list) } #########1#########2#########3#########4#########5#########6#########7#########8 # Fix the sample_metadata ------------------------------------------------- #########1#########2#########3#########4#########5#########6#########7#########8 # keep a copy of sample_metadata sample_metadata <- input_data$sample_metadata row.names(sample_metadata) <- sample_metadata$label sample_metadata_f <- sample_metadata # will be modified by sample filtering # note that input_data$sample_agg can take values "sample" and "exp. code" #########1#########2#########3#########4#########5#########6#########7#########8 # Create a "filtered" version of the OTU table ------------------------------- #########1#########2#########3#########4#########5#########6#########7#########8 tOTUm <- input_data$OTU_table totseqs <- rowSums(tOTUm[complete.cases(tOTUm),]) OTUmatrixf <- tOTUm[complete.cases(tOTUm),] # sample filter first # optionally remove samples/sample groups with less than minsampleab sequences # filtered samples will be also removed from the sample metadata table # reset filtersamples if aggregation is exp. code if(input_data$sample_agg == "exp. code") filtersamples <-F if (filtersamples){ samples_to_keep <- which(totseqs >= minsampleab) if(length(samples_to_keep) < nrow(OTUmatrixf)){ OTUmatrixf <- OTUmatrixf[samples_to_keep,] totseqs <- rowSums(OTUmatrixf) keepMetadata <- which(row.names(sample_metadata) %in% names(samples_to_keep)) sample_metadata_f <- sample_metadata_f[keepMetadata,] } filtersamples_flag <- T } # optionally remove "Other", "Chloroplast", "Mitochondria" and "Eukaryota" # and taxa with identification only at the level of Kingdom OTUmatrixf <- filter_taxa_1(OTUmatrix = OTUmatrixf) # optionally rarefy the samples with vegan::rrarefy() if (rareoption){ rarenn <- ifelse(raremin, 100*floor(min(totseqs)/100), raren) OTUmatrixf <- vegan::rrarefy(OTUmatrixf, rarenn) # drop colums with 0 sum OTUmatrixf <- OTUmatrixf[, colSums(OTUmatrixf)>0] OTUtokeep <- colnames(OTUmatrixf) rarefy_flag <- T } # optionally filter OTUs which are rare and/or have low prevalence if (filterOTUs){ OTUmatrixf_relab <- OTUmatrixf/rowSums(OTUmatrixf) maxrelab <- apply(OTUmatrixf_relab, 2, max) # OTU to keep (mar rel. abundance > abtreshold AND prevalence > prev_treshold) prevdf <- apply(X = OTUmatrixf, MARGIN = 2, FUN = function(x){sum(x > 0)}) min_rel_ab <- apply(X = OTUmatrixf_relab, MARGIN = 2, FUN = function(x){min(x)}) max_rel_ab <- apply(X = OTUmatrixf_relab, MARGIN = 2, FUN = function(x){max(x)}) # Add taxonomy and total read counts to this data.frame prevdf <- data.frame(Prevalence = prevdf, TotalAbundance = colSums(OTUmatrixf), min_rel_ab = min_rel_ab, max_rel_ab = max_rel_ab) prevdf <- prevdf %>% rownames_to_column(var = "label") %>% left_join(., select(input_data$taxa_metadata, label, phylum:species)) if(prev_filter){ pass_prev_filter <- dplyr::filter(select(prevdf, label, Prevalence), Prevalence > floor(nrow(sample_metadata)*prev_treshold)) %>% pull(label) OTUtokeep <- intersect( names(which(maxrelab >= abtreshold)), pass_prev_filter) } else { OTUtokeep <- names(which(maxrelab >= abtreshold)) } prevdf <- prevdf %>% mutate(relAbundance = TotalAbundance/sum(TotalAbundance), pass_maxrelab_treshold = ifelse(label %in% OTUtokeep, "T", "F")) # make a plot # prevalence vs abundance graph OTUmatrixf <- OTUmatrixf[, which(colnames(OTUmatrixf_relab) %in% OTUtokeep)] f_seq_ret <- round(sum(OTUmatrixf)/sum(tOTUm),4) title_text <- "Prevalence vs. abundance, by Phylum" subtitle_text <- paste("using the filters you retain ", length(OTUtokeep), " taxa (triangles) out of ", ncol(tOTUm), " (", f_seq_ret*100, "% of init. seqs.)", sep ="") # a prevalence and abundance plot prev_ab_plot <- ggplot(prevdf, aes(x = TotalAbundance, y = Prevalence / nrow(OTUmatrixf), shape = as.factor(pass_maxrelab_treshold), color = phylum)) + geom_point(size = 2, alpha = 0.7) + facet_wrap( ~ phylum) + geom_hline(yintercept = ifelse(prev_filter, prev_treshold, 0), alpha = 0.5, linetype = 2) + labs(x = "total abundance", y = "Prevalence [Frac. Samples]", shape = 'pass ab. treshold', title = title_text, subtitle = subtitle_text) + scale_x_log10() + scale_y_continuous(minor_breaks = seq(0, 1, 0.05)) + theme(legend.position = "none", plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust = 0.5), axis.text.x = element_text(angle = 90)) print(prev_ab_plot) filterOTU_flag <- T if(prev_filter) prev_filter_flag <- T } prevdf <- prevdf %>% mutate(relprev = Prevalence/nrow(OTUmatrixf)) %>% arrange(-relprev, - relAbundance) filtOTU <- ncol(tOTUm)-ncol(OTUmatrixf) # fix sample and taxa sample_metadata_f <- sample_metadata_f %>% mutate_at(vars(llabel, s_type, L1, L4, L6, nature, process, spoilage, studyId, target1, target2), as.factor) taxa_metadata_f <- input_data$taxa_metadata %>% dplyr::filter(label %in% colnames(OTUmatrixf)) # maybe should create factors here # print a summary cat("The original OTU table has", ncol(tOTUm), "OTUs and ", nrow(tOTUm), "samples/sample groups.", "\n", "\n", sep = " ") cat("After filtering there are", "\n", nlevels(sample_metadata$llabel), "llabel groups", "\n", sep = " ") cat(nlevels(sample_metadata_f$L1), "food groups", "\n", sep = " ") cat(nlevels(sample_metadata_f$L4), "food subgroups", "\n", sep = " ") cat("After filtering/rarefaction you have removed", filtOTU, "OTU out of", ncol(tOTUm), "\n", "\n", sep = " ") topp <- ifelse(topp<nrow(prevdf), topp, nrow(prevdf)) toppprev <- slice(prevdf,1:topp) %>% select(label, phylum, class, order, family, genus, relAbundance, min_rel_ab, max_rel_ab, relprev) write_tsv(toppprev, str_c("file_name_prefix","topp.txt",sep="_")) #########1#########2#########3#########4#########5#########6#########7#########8 # Make palettes for samples ----------------------------------------------- # The script will determine if sample labels are llabels or sample ids, based # # on aggSample, loaded with the input data. # # If the number of L4 levels is <=15 L4 will be used for palettes. # # L1 will be used otherwise. To override this set overrides to T (unwise, may # # result in problems in legends). # #########1#########2#########3#########4#########5#########6#########7#########8 catsamples <- "L4" maxslevels <- max(c(nlevels(sample_metadata_f$L1), nlevels(sample_metadata_f$L4))) if(maxslevels>12) { lspalette <- distinctColorPalette(maxslevels) names(lspalette) <- NULL } if (!overrides){ catsamples <- ifelse(nlevels(sample_metadata_f$L4)<=15, "L4","L1") } samplenames <- data.frame(label = row.names(OTUmatrixf), stringsAsFactors = F) cat("There are ", nrow(OTUmatrixf), " samples in the data set, belonging to", "\n", nlevels(sample_metadata_f$L1), " food groups and ", nlevels(sample_metadata_f$L4), " subgroups.", "\n", catsamples, " will be used for palettes.", sep ="") groupcolors <- inner_join(samplenames, select(sample_metadata_f, label, L1, L4)) ncolfoodg <- nlevels(groupcolors$L1) ncolfoodsg <- nlevels(groupcolors$L4) foodgpalette <- if(ncolfoodg<=12) { brewer.pal(ncolfoodg, "Paired")[1:ncolfoodg] } else { distinctColorPalette(ncolfoodg) } foodsgpalette <- if(ncolfoodsg<=12) { brewer.pal(ncolfoodsg, "Paired")[1:ncolfoodsg] } else { distinctColorPalette(ncolfoodsg) } foodgpalette <- data.frame(cbind(L1 = levels(groupcolors$L1), fgcolor = foodgpalette)) foodsgpalette <- data.frame(L4 = levels(groupcolors$L4), fsgcolor = foodsgpalette) # Merge the palettes with sample names and metadata groupcolors <- merge(groupcolors, foodgpalette, by = "L1") groupcolors <- merge(groupcolors, foodsgpalette, by = "L4") groupcolors <- groupcolors %>% dplyr::arrange(label) # Set the palette and colors and text for legends if (catsamples == "L1") { myspalette <- as.character(groupcolors$fgcolor) mystext <- as.character(foodgpalette$L1) myscolors <- as.character(foodgpalette$fgcolor) } else { myspalette <- as.character(groupcolors$fsgcolor) mystext <- as.character(foodsgpalette$L4) myscolors <- as.character(foodsgpalette$fsgcolor) } # Show the palette and plots a legend in a horizontal format # It might be a good idea to save the legend graph ltitle <- paste("Sample groups ", "(", catsamples, ", FoodEx2 class.)", sep = "") plot(0, 0, type = "n", bty = "n", xaxt = "n", yaxt = "n", ann = FALSE) legend("center", pch=15, legend = mystext, col = myscolors, bty = "n", # ncol = if(length(mystext>10)) 2 else 1, title = ltitle) if (savegraph) { par(pin = c(graphsizein,graphsizein)) if (graphtype == "pdf") { pdf(file = paste(file_name, "fpal.pdf", sep = ""), width = graphsizein, height = graphsizein, paper = "default") } else { tiff(filename = paste(file_name, "fpal.tif", sep = ""), width = graphsizein, height = graphsizein, units = "in", res = graphresolution, compression = "none") } plot(0, 0, type = "n", bty = "n", xaxt = "n", yaxt = "n", ann = FALSE) legend("center", pch=15, legend = mystext, col = myscolors, bty = "n", ncol = if(length(mystext>10)) 2 else 1, title = ltitle) par(opar) dev.off() } # Clean up (if you want to recreate a palette you have to run again) rm(foodgpalette, foodsgpalette, ncolfoodg, ncolfoodsg, samplenames) #########1#########2#########3#########4#########5#########6#########7#########8 # Rarefaction analysis and diversity indices (optional) ----------------------- #########1#########2#########3#########4#########5#########6#########7#########8 if (dorareanalysis){ richness <- t(estimateR(tOTUm)) # should be saved or given option to save or printed rarecurve(tOTUm, step = 10, col = myspalette) if (savegraph) { par(pin = c(graphsizein,graphsizein)) if (graphtype == "pdf") { pdf(file = paste(file_name, "rarecurve.pdf", sep = ""), width = graphsizein, height = graphsizein, paper = "default") } else { tiff(filename = paste(file_name, "rarecurve.tif", sep = ""), width = graphsizein, height = graphsizein, units = "in", res = graphresolution, compression = "none") } rarecurve(tOTUm, step = 10, col = myspalette) par(opar) dev.off() } } #########1#########2#########3#########4#########5#########6#########7#########8 # Bar plots at the genus, family and class level -------------------------- # Using rel. frequencies before filtering/rarefaction (if any). # # The plots are generated only if the number of categories is <=maxOTUcats # # You can change this by setting maxOTUcats in line 119 # # Note: this section will attempt to use a color brewer qualitative palette # # if the number of categories for the taxonomic level are <=12, # # otherwise it will use a default palette and difference in colours may be # # difficult to perceive. # #########1#########2#########3#########4#########5#########6#########7#########8 # need the following because OTUmatrixf_relab is only created if filterOTUs is T if(!exists("OTUmatrixf_relab")) OTUmatrixf_relab <- OTUmatrixf/rowSums(OTUmatrixf) # column Other may be close to 0 if no taxa filtering is applied if (pool){ OTUmatrixf_relab_2 <- cbind(OTUmatrixf_relab,1-rowSums(OTUmatrixf_relab)) colnames(OTUmatrixf_relab)[ncol(OTUmatrixf_relab)] <-"Other" } else { OTUmatrixf_relab_2 <- OTUmatrixf_relab/rowSums(OTUmatrixf_relab) } otm <- as.data.frame(OTUmatrixf_relab_2) %>% rownames_to_column(var = "source") edge_table_filtered <- pivot_longer(otm, cols = 2:ncol(otm), names_to = "target", values_to = "weight") # join taxonomic information and recode factors to move Other at the top edge_table_filtered <- left_join(edge_table_filtered, select(taxa_metadata_f, label, class:species), by = c("target" = "label")) %>% dplyr::filter(weight >0) # setting flags for bar plots bpgenusf <- F bpfamilyf <- F bpclassf <- F if (input_data$tax_agg != "class"){ if (input_data$tax_agg != "family") { # Genus level e_table_f_agg <- edge_table_filtered %>% group_by(source, genus) %>% dplyr::summarise(weight = sum(weight)) %>% mutate(genus = ifelse(is.na(genus), "Other", genus)) taxa_levels <- unique(e_table_f_agg$genus) if ("Other" %in% taxa_levels) { taxa_levels <- c("Other",setdiff(taxa_levels,"Other")) } e_table_f_agg$genus <- factor(e_table_f_agg$genus, levels = taxa_levels) n_taxa <- length(levels(e_table_f_agg$genus)) if (n_taxa<=maxOTUcats) { bpgenusf <- T sbplot <- ggplot(data = e_table_f_agg, aes(x = source, y= weight, fill = genus)) if (n_taxa>=12) { bpgenus <- sbplot + geom_col(position = "fill") + scale_fill_manual(values = rpalette[1:n_taxa]) + labs(title = "Relative abundance, genus", x= "Samples/Sample groups", y= "Relative abundance") + theme(axis.text.x = element_text(angle = 90, hjust = 0, vjust = 0.5), panel.background= element_rect(fill = "white"), plot.title = element_text(hjust = 0.5)) print(bpgenus) } else { bpgenus <- sbplot + geom_col(position = "fill") + labs(title = "Relative abundance, genus", x= "Samples/Sample groups", y= "Relative abundance") + scale_fill_brewer(type = "qual", palette = "Paired") + theme(axis.text.x = element_text(angle = 90, hjust = 0, vjust = 0.5), panel.background= element_rect(fill = "white"), plot.title = element_text(hjust = 0.5)) print(bpgenus) } } else { # con shiny non deve restituire un messaggio ma un grafico # o devo mettere un campo output con un messaggio cat("Too many genera, genus bar chart aborted...","\n","\n") } } # Family level e_table_f_agg <- edge_table_filtered %>% group_by(source, family) %>% dplyr::summarise(weight = sum(weight)) %>% mutate(family = ifelse(is.na(family), "Other", family)) taxa_levels <- unique(e_table_f_agg$family) if ("Other" %in% taxa_levels) { taxa_levels <- c("Other",setdiff(taxa_levels,"Other")) } e_table_f_agg$family <- factor(e_table_f_agg$family, levels = taxa_levels) n_taxa <- length(levels(e_table_f_agg$family)) if (n_taxa <= maxOTUcats) { bpfamilyf <- T sbplot <- ggplot(data = e_table_f_agg, aes(x = source, y= weight, fill = family)) if (nlevels(e_table_f_agg$Family)>=12) { bpfamily <- sbplot + geom_col(position = "fill") + scale_fill_manual(values = rpalette[1:n_taxa]) + labs(title = "Relative abundance, Family", x= "Samples/Sample groups", y= "Relative abundance") + theme(axis.text.x = element_text(angle = 90, hjust = 0, vjust = 0.5), panel.background= element_rect(fill = "white"), plot.title = element_text(hjust = 0.5)) bpfamily } else { bpfamily <- sbplot + geom_col(position = "fill") + labs(title = "Relative abundance, Family", x= "Samples/Sample groups", y= "Relative abundance") + scale_fill_brewer(type = "qual", palette = "Paired") + theme(axis.text.x = element_text(angle = 90, hjust = 0, vjust = 0.5), panel.background= element_rect(fill = "white"), plot.title = element_text(hjust = 0.5)) bpfamily } } else { cat("Too many families, family bar chart aborted...","\n","\n") } } else { cat("OTU aggregated at the Class level, Genus and Family plots skipped") } # Class level e_table_f_agg <- edge_table_filtered %>% group_by(source, class) %>% dplyr::summarise(weight = sum(weight)) %>% mutate(class = ifelse(is.na(class), "Other", class)) taxa_levels <- unique(e_table_f_agg$class) if ("Other" %in% taxa_levels) { taxa_levels <- c("Other",setdiff(taxa_levels,"Other")) } e_table_f_agg$class <- factor(e_table_f_agg$class, levels = taxa_levels) n_taxa <- length(levels(e_table_f_agg$class)) sbplot <- ggplot(data = e_table_f_agg, aes(x = source, y= weight, fill = class)) if (n_taxa<=maxOTUcats){ bpclassf <- T if (n_taxa>=12) { bpclass <- sbplot + geom_col(position = "fill") + scale_fill_manual(values = rpalette[1:n_taxa]) + labs(title = "Relative abundance, Class", x= "Samples/Sample groups", y= "Relative abundance") + theme(axis.text.x = element_text(angle = 90, hjust = 0, vjust = 0.5), panel.background= element_rect(fill = "white"), plot.title = element_text(hjust = 0.5)) bpclass } else { bpclass <- sbplot + geom_col(position = "fill") + labs(title = "Relative abundance, Class", x= "Samples/Sample groups", y= "Relative abundance") + scale_fill_brewer(type = "qual", palette = "Paired") + theme(axis.text.x = element_text(angle = 90, hjust = 0, vjust = 0.5), panel.background= element_rect(fill = "white"), plot.title = element_text(hjust = 0.5)) bpclass } } else { cat("Too many classes, class bar chart aborted...","\n","\n") } # saving barplots if (savegraph) { if (bpgenusf){ ggsave(filename = str_c(file_name_prefix, "bp","genus",gfileext), plot = bpgenus, width = 7, height = 5, dpi = graphresolution) } if (bpfamilyf){ ggsave(filename = str_c(file_name_prefix, "bp","family",gfileext), plot = bpfamily, width = 7, height = 5, dpi = graphresolution) } if (bpclassf){ ggsave(filename = str_c(file_name_prefix, "bp","class",gfileext), plot = bpclass, width = 7, height = 5, dpi = graphresolution) } } # Box plots for the n (see topn) most abundant taxa ------------------------------- # I am selecting the top n most abundant and prevalent taxa (genera, families, classes, # depending on taxonomic aggregation in the input file) # with some exceptions all further analysis will be performed on these taxa topntaxa <- dplyr::filter(toppprev, !is.na(get(input_data$tax_agg))) topn <- ifelse(topn<nrow(topntaxa), topn, nrow(topntaxa)) topntaxa <- topntaxa %>% slice(1:topn) %>% pull(label) edge_table_filtered <- edge_table_filtered %>% dplyr::filter(target %in% topntaxa) # now join the metadata for samples sample_data <- sample_metadata_f %>% select(label, studyId, llabel, foodId, L1, L4, L6) edge_table_boxplot <- left_join(edge_table_filtered, sample_data, by = c("source" = "label")) # get the variable to use on x axis # it will be the lowest aggregation level with <= max_samples levels sample_nlevels <- sample_data %>% summarize(across(.fns = n_distinct)) %>% select(llabel, foodId, L6, L4, L1) xaxisvariable <- colnames(sample_nlevels)[which(sample_nlevels[1,]<=max_samples)[1]] # now plot topnboxplot <- ggplot(edge_table_boxplot, mapping = aes(x = get(xaxisvariable), y = weight)) topnboxplot + geom_boxplot() + geom_jitter(alpha = 0.2) + facet_wrap(~target) + labs(x= xaxisvariable, y= "rel.ab.") + scale_y_log10(breaks = c(1E-5, 1E-4, 1E-3, 1E-2, 1E-1, 1)) + theme_bw() + theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5), strip.text = element_text(size = 6)) ggsave(filename = str_c(file_name_prefix, "bxp","topntaxa",gfileext), width = 9, height = 7, dpi = graphresolution) #########1#########2#########3#########4#########5#########6#########7#########8 # NMDS with vegan package ------------------------------------------------- #########1#########2#########3#########4#########5#########6#########7#########8 # NMDS with vegan package using Bray-Curtis distance as default # using OTUmatrixf (absolute abundances) works usually better, but triggers # the autotransform option in metaMDS; if you want you can use relative # abundances (use OTUmatrixf3) # filtering OTUmatrixf (removing samples) samples_2_keep <- which(row.names(OTUmatrixf) %in% groupcolors$label) MDSmatrix <- OTUmatrixf MDSall <- metaMDS(MDSmatrix, trymax = 50) MDSall stressplot(MDSall) plot(MDSall, type = "t") # get proper plots using ggplot2 # make data frames, add metadata for row names, sort to use palettes # Create palette for OTUs. See above for instructions to override defaults # this should be transformed into a function but I don't have time OTU_palette_data <- make_OTU_palettes(OTUmatrixf) OTUcolors <- OTU_palette_data$OTU_colors_df myOTUcolors <- OTU_palette_data$colors catOTUs <- OTU_palette_data$cat_OTU_flag speciescoord <- as.data.frame(MDSall$species) speciescoord <- speciescoord[order(row.names(speciescoord)),] %>% rownames_to_column("label") %>% left_join(OTUcolors) speciescoord$olabel <- OTUcolors[,which(colnames(OTUcolors)== catOTUs)] samplecoord <-as.data.frame(MDSall$points) samplecoord <- samplecoord[order(row.names(samplecoord)),] samplecoord$slabel <- groupcolors[,which(colnames(groupcolors)== catsamples)] rangex <- max(c(max(samplecoord$MDS1),max(speciescoord$MDS1))) maxx <- rangex minx <- min(min(samplecoord$MDS1), min(speciescoord$MDS1)) minx <- if_else(abs(minx)-abs(trunc(minx)) < 0.5, trunc(minx)-0.5,trunc(minx)-1) maxx <- if_else(abs(maxx)-abs(trunc(maxx)) < 0.5, trunc(maxx)+0.5,trunc(maxx)+1) rangey <- max(c(max(samplecoord$MDS2),max(speciescoord$MDS2))) miny <- min(min(samplecoord$MDS2), min(speciescoord$MDS2)) miny <- if_else(abs(miny)-abs(trunc(miny)) < 0.5, trunc(miny)-0.5, trunc(miny)-1) maxy <- rangey maxy <- if_else(abs(maxy)-abs(trunc(maxy)) < 0.5, trunc(maxy)+0.5,trunc(maxy)+1) # get the group with less than 12 levels, if any sample_nlevels_MDS <- sample_data %>% summarize(across(.fns = n_distinct)) %>% select(L6, L4, L1) colorv_MDS <- colnames(sample_nlevels_MDS)[which(sample_nlevels[1,]<=max_samples)[1]] # sample MDS plot. MDSsampleplot <- ggplot(data = samplecoord, aes(x = MDS1, y= MDS2)) # may be use ggrepel MDSsampleplot + geom_point(aes(colour = str_wrap(slabel,20))) + scale_color_manual(values = myscolors) + scale_x_continuous(limits = c(minx, maxx), breaks = seq(minx, maxx,0.1)) + scale_y_continuous(limits = c(miny, maxy), breaks = seq(miny, maxy, 0.1)) + labs(title = "MDS, samples", x= "dim(1)", y= "dim(2)", colour = catsamples) + theme_bw() + theme(plot.title = element_text(hjust = 0.5)) # saving sample metadata write_tsv(sample_metadata_f, str_c(file_name_prefix, "sample_metadata.txt", sep = "_")) samplecoord_2 <- samplecoord %>% rownames_to_column(var = "label") %>% left_join(.,select(sample_metadata_f, label, L1:L6)) # need to change a variable name samplecoord_2 <- samplecoord_2 %>% rename(llabel = label) MDSsampleplot_2 <- ggplot(data = samplecoord_2, aes(x = MDS1, y= MDS2)) if(sample_nlevels_MDS[1,colorv_MDS]<=12){ MDSsampleplot_2 + geom_point(aes(colour = str_wrap(get(colorv_MDS),20))) + geom_text_repel(mapping = aes(label = llabel), size = I(2.0), alpha = I(0.6), max.overlaps = 20) + scale_color_brewer(type = "qual", palette = "Paired") + scale_x_continuous(limits = c(minx, maxx), breaks = seq(minx, maxx,0.1)) + scale_y_continuous(limits = c(miny, maxy), breaks = seq(miny, maxy, 0.1)) + labs(title = "MDS, samples", x= "dim(1)", y= "dim(2)", colour = colorv_MDS) + theme_bw() + theme(plot.title = element_text(hjust = 0.5)) } else{ MDSsampleplot_2 + geom_point(aes(colour = str_wrap(get(colorv_MDS),20))) + geom_text_repel(mapping = aes(label = llabel), size = I(2.0), alpha = I(0.6), max.overlaps = 20) + scale_color_manual(values = rpalette) + scale_x_continuous(limits = c(minx, maxx), breaks = seq(minx, maxx,0.1)) + scale_y_continuous(limits = c(miny, maxy), breaks = seq(miny, maxy, 0.1)) + labs(title = "MDS, samples", x= "dim(1)", y= "dim(2)", colour = colorv_MDS) + theme_bw() + theme(plot.title = element_text(hjust = 0.5)) } if (savegraph) { ggsave(filename =str_c(file_name_prefix, "_MDSsamples",gfileext), width = 7, height = 5, dpi = graphresolution) } # a small change speciescoord <- speciescoord %>% rename(llabel = label) # species MDS plot. MDSOTUplot <- ggplot(data = speciescoord, aes(x = MDS1, y= MDS2)) MDSOTUplot + geom_point(aes(colour = olabel)) + geom_text_repel(mapping = aes(label = llabel), size = 2.0, alpha = 0.6, max.overlaps = 20) + scale_color_manual(values = myOTUcolors) + scale_x_continuous(limits = c(minx,maxx), breaks = seq(minx,maxx,0.5)) + scale_y_continuous(limits = c(miny,maxy), breaks = seq(miny,maxy,0.5)) + labs(title = "Non-metric MDS, OTUs", x= "dim(1)", y= "dim(2)", colour = catOTUs) + theme_bw()+ theme(plot.title = element_text(hjust = 0.5)) if (savegraph) { ggsave(filename = str_c(file_name_prefix, "_MDSspecies", gfileext), width = 7, height = 5, dpi = graphresolution) } # biplot MDSsampleplot + geom_point(aes(colour = slabel)) + geom_text_repel(label = row.names(samplecoord), size = 3, color = "red", alpha = 0.6, max.overlaps = 20) + geom_text_repel(data = speciescoord, aes(x= MDS1, y= MDS2, label = llabel), size = I(2.5), alpha = I(0.6), max.overlaps = 20) + scale_color_manual(values = myscolors) + scale_x_continuous(limits = c(minx,maxx), breaks = seq(minx,maxx,0.5)) + scale_y_continuous(limits = c(miny,maxy), breaks = seq(miny,maxy,0.5)) + labs(x= "dim(1)", y= "dim(2)", colour = catsamples) + theme_bw() + theme(legend.key.size = unit(0.1, "in"), legend.text = element_text(size = 6)) if (savegraph) { ggsave(filename = str_c(file_name_prefix, "_MDSbp",gfileext), width = 7, height = 5, dpi = graphresolution) } #########1#########2#########3#########4#########5#########6#########7#########8 # Heatmaps ---------------------------------------------------------------- #########1#########2#########3#########4#########5#########6#########7#########8 # create a palette of 100 colours for OTU abundance colscale <- colorRampPalette(c("lightyellow", "orange", "brown"), space = "rgb")(100) # remove the columns with the pooled OTUs (if any) from the filtered matrix # of relative frequencies OTUmatrixf_relab_hm <- OTUmatrixf_relab[, topntaxa] if (any(colnames(OTUmatrixf_relab_hm) == "Other")){ OTUmatrixf_relab_hm <- OTUmatrixf_relab_hm[, -which(colnames(OTUmatrixf_relab_hm) == "Other")] } # Re-create palette for OTUs. See above for instructions to override defaults OTU_palette_data <- make_OTU_palettes(OTUmatrixf_relab_hm) OTUcolors <- OTU_palette_data$OTU_colors_df myOTUcolors <- OTU_palette_data$colors catOTUs <- OTU_palette_data$cat_OTU_flag myOTUtext <- OTU_palette_data$text myOTUpalette <- OTU_palette_data$palette cat("The aggregation level for OTUs is ", input_data$tax_agg, "\n", "There are ", ncol(OTUmatrixf_relab_hm), " OTU in this set.", "\n", "The category used for OTU palettes is ", catOTUs, ".\n", sep="") # Show the palette and plot a legend in a horizontal format. It might be a good # idea to save the palette par(opar) plot(0, 0, type = "n", bty = "n", xaxt = "n", yaxt = "n", ann = FALSE) legend("center", pch=15, legend = myOTUtext, y.intersp = 0.8, col = myOTUcolors, bty = "n", title = catOTUs, ncol = 1) if (savegraph) { if (graphtype == "pdf") { pdf(file = paste(file_name, "hmOTUpal.pdf", sep = ""), width = graphsizein, height = graphsizein, paper = "default") } else { tiff(filename = paste(file_name, "hmOTUpal.tif", sep = ""), width = graphsizein, height = graphsizein, units = "in", res = graphresolution, compression = "none") } plot(0, 0, type = "n", bty = "n", xaxt = "n", yaxt = "n", ann = FALSE) legend("center", pch=15, legend = myOTUtext, y.intersp = 0.8, col = myOTUcolors, bty = "n", title = catOTUs, ncol = if(length(myOTUtext>10)) 2 else 1) dev.off() } # calculate distance matrices and dendrograms sample.dist <- vegdist(OTUmatrixf_relab_hm, method = "bray") # may generate warnings sample.clus <- hclust(sample.dist, "aver") OTU.dist <- vegdist(t(OTUmatrixf_relab_hm), method = "bray") OTU.clus <- hclust(OTU.dist, "aver") # may generate warnings: it is probably related to the order packages are loaded hm <- OTUmatrixf_relab_hm keyscale <- "rel. abundance" if (loghmscale) { # lbound gives a default to 0 abundances using max number of seqs per sample lbound <- 10^(-ceiling(log10(max(rowSums(tOTUm))))) hm <- aaply(hm, c(1,2), function(x,minprop) log10(x+minprop),minprop=lbound) keyscale <- "log10(rel.abundance)" } # heat map with legend for abundance levels, margins, lhei, lwid, srtCol, # offsetRow and offsetCol may require tinkering. # Size temporarily reset for saving heatmap.2(hm, Rowv = as.dendrogram(sample.clus), Colv = as.dendrogram(OTU.clus), srtCol = 30, col = colscale, breaks = 101, symbreaks = F, ColSideColors = myOTUpalette, RowSideColors = myspalette, margins = c(7,11), trace = "both", density.info = "none", tracecol = "orange1", lhei = c(2.5,7.5), lwid = c(2.5,7.5), offsetRow = 0.5, offsetCol = 0.5, key.title = "NA", key.xlab = keyscale, keysize = 0.1, symkey = F ) if (savegraph) { if (graphtype == "pdf") { pdf(file = paste(file_name, "hm2.pdf", sep = ""), width = 10, height = 7, paper = "a4r") } else { tiff(filename = paste(file_name, "hm2.tif", sep = ""), width = 10, height = 7, units = "in", res = graphresolution, compression = "none") } heatmap.2(hm, Rowv = as.dendrogram(sample.clus), Colv = as.dendrogram(OTU.clus), srtCol = 30, col = colscale, breaks = 101, symbreaks = F, ColSideColors = myOTUpalette, RowSideColors = myspalette, margins = c(7,11), trace = "both", density.info = "none", tracecol = "orange1", lhei = c(2.5,7.5), lwid = c(2.5,7.5), offsetRow = 0.5, offsetCol = 0.5, key.title = "NA", key.xlab = keyscale, keysize = 0.1, symkey = F ) dev.off() } par(opar) #########1#########2#########3#########4#########5#########6#########7#########8 # Network analysis with the bipartite package ----------------------------- # This will be performed only if sample aggregation is Llabel. # # Rarefaction and filtering are performed by default. # #########1#########2#########3#########4#########5#########6#########7#########8 # Calculate and plot degree distributions, separately for OTU and samples # using the unfiltered matrix (Other and chloroplast are removed) # exponential, power law and truncated power law are given in black, dark grey # and light grey, respectively. Results are saved in objects for further use. # The defaults for this section are slightly different and filtering is # performed from scratch if(input_data$sample_agg == "exp. code"){ OTUmatrixbip <- tOTUm[complete.cases(tOTUm),] # filter chloroplast, mitochondria etc. if applicable OTUmatrixbip <- filter_taxa_1(OTUmatrix = OTUmatrixbip) OTUdd <- as.data.frame(degreedistr(OTUmatrixbip, level = "higher", plot.it = FALSE)) degreedistr(OTUmatrixbip, level = "higher") Sampledd <- as.data.frame(degreedistr(OTUmatrixbip, level = "lower", plot.it = FALSE)) degreedistr(OTUmatrixbip, level = "lower") # Filtering and rarefaction performed # remove samples is the sample filter was used if (filtersamples_flag){ OTUmatrixbip <- OTUmatrixbip[samples_to_keep,] } OTUmatrixbip2 <- OTUmatrixbip/rowSums(OTUmatrixbip) maxrelabbip <- apply(OTUmatrixbip2, 2, max) OTUtokeepbip <- names(which(maxrelabbip >= abtreshold)) OTUmatrixbip <- OTUmatrixbip[, which(colnames(OTUmatrixbip2) %in% OTUtokeepbip)] totseqsbip <- rowSums(OTUmatrixbip) rarennbip <- ifelse(raremin, 100*floor(min(totseqsbip)/100), raren) OTUmatrixbip <- vegan::rrarefy(OTUmatrixbip, rarennbip) rarefy_flag <- T OTUmatrixbip <- OTUmatrixbip[,colSums(OTUmatrixbip)>0] OTUtokeepbip <- colnames(OTUmatrixbip) filtOTUbip <- ncol(tOTUm)-ncol(OTUmatrixbip) # prints a summary cat("The original OTU/taxa abundance table has", ncol(tOTUm), "OTUs/taxa and ", nrow(tOTUm), "samples/sample groups.", "\n", "\n", sep = " ") cat("After filtering there are", "\n", n_distinct(sample_metadata$llabel), "llabel groups", "\n", sep = " ") cat(n_distinct(sample_metadata$L1), "food groups", "\n", sep = " ") cat(n_distinct(sample_metadata$L4), "food subgroups", "\n", sep = " ") cat("After filtering/rarefaction you have removed", filtOTU, "OTU/TAXA out of", ncol(tOTUm), "\n", "\n", sep = " ") #########1#########2#########3#########4#########5#########6#########7#########8 # Rebuild palette for OTUs: palettes are made for family and class. Family is # # is used if number of families <=15, otherwise class is used. To override # # this set overrideOTU <- T (may result in problems in legends) # #########1#########2#########3#########4#########5#########6#########7#########8 OTU_palette_data <- make_OTU_palettes(OTUmatrixbip) OTUcolors <- OTU_palette_data$OTU_colors_df myOTUcolors <- OTU_palette_data$colors catOTUs <- OTU_palette_data$cat_OTU_flag myOTUtext <- OTU_palette_data$text myOTUpalette <- OTU_palette_data$palette cat("The aggregation level for OTUs is ", input_data$tax_agg, "\n", "There are ", ncol(OTUmatrixbip), " OTU in this set.", "\n", "The category used for OTU palettes is ", catOTUs, ".\n", sep="") # Show the palette and plot a legend in a horizontal format. It might be a good # idea to save the palette graph plot(0, 0, type = "n", bty = "n", xaxt = "n", yaxt = "n", ann = FALSE) legend("center", pch=15, legend = myOTUtext, y.intersp = 0.8, col = myOTUcolors, bty = "n", ncol = if(length(myOTUtext)>10) 2 else 1, title = catOTUs) if (savegraph) { if (graphtype == "pdf") { pdf(file = paste(file_name, "bipOTUpal.pdf", sep = ""), width = graphsizein, height = graphsizein, paper = "default") } else { tiff(filename = paste(file_name, "bipOTUpal.tif", sep = ""), width = graphsizein, height = graphsizein, units = "in", res = graphresolution, compression = "none") } plot(0, 0, type = "n", bty = "n", xaxt = "n", yaxt = "n", ann = FALSE) legend("center", pch=15, legend = myOTUtext, y.intersp = 0.8, col = myOTUcolors, bty = "n", ncol = if(length(myOTUtext)>10) 2 else 1, title = catOTUs) dev.off() } #########1#########2#########3#########4#########5#########6#########7#########8 # Plot the filtered web. adjust y.lim and text.width to fit labels if needed. # The legend may need some tinkering. Adjust y.lim and twidths to fit labels # and legend if needed. The section has been tested with par(pin=c(13.5,6)). # In Rstudio you need to adjust the size of the plot pane. #########1#########2#########3#########4#########5#########6#########7#########8 ncol_top <- case_when( length(myOTUtext) <=5 ~ 1, length(myOTUtext) >5 && length(myOTUtext) <=10 ~ 2, length(myOTUtext) >10 && length(myOTUtext) <=15 ~ 3, length(myOTUtext) >15 ~4 ) ncol_bottom = case_when( length(mystext) <=5 ~ 1, length(mystext) >5 && length(mystext) <=10 ~ 2, length(mystext) >10 && length(mystext) <=15 ~ 3, length(mystext) >15 ~4 ) plotweb(OTUmatrixbip, text.rot = 90, y.lim=c(0,2.5), col.high = myOTUpalette, col.low = myspalette, low.y = 0.8) twidthOTU <- max(strwidth(myOTUtext))*0.8 sltitle <- ifelse(catsamples == "L1", "FoodEx2 class L1", "FoodEx2 class L4") legend("top", xjust = 0.5, yjust = 0, legend = myOTUtext, col = myOTUcolors, bty = "n", title = catOTUs, pch = 15, cex = 0.75, ncol = ncol_top, y.intersp = 0.6, text.width = twidthOTU) twidths <- max(strwidth(mystext))*0.6 legend("bottom", xjust = 0.5, yjust = 1, legend = mystext, col = myscolors, bty = "n", title = sltitle, y.intersp = 0.6, pch = 15, cex = 0.75, ncol = ncol_bottom, text.width = twidths) if (savegraph) { if (graphtype == "pdf") { pdf(file = paste(file_name, "bipnet.pdf", sep = ""), width = graphsizein*1.2, height = graphsizein, paper = "default") } else { tiff(filename = paste(file_name, "bipnet.tif", sep = ""), width = graphsizein*1.2, height = graphsizein, units = "in", res = graphresolution, compression = "none") } plotweb(OTUmatrixbip, text.rot = 90, y.lim=c(0,2.5), col.high = myOTUpalette, col.low = myspalette, low.y = 0.8) twidthOTU <- max(strwidth(myOTUtext))*0.8 sltitle <- ifelse(catsamples == "L1", "FoodEx2 class L1", "FoodEx2 class L4") legend("top", xjust = 0.5, yjust = 0, legend = myOTUtext, col = myOTUcolors, bty = "n", title = catOTUs, pch = 15, cex = 0.75, ncol = ncol_top, y.intersp = 0.6, text.width = twidthOTU) twidths <- max(strwidth(mystext))*0.6 legend("bottom", xjust = 0.5, yjust = 1, legend = mystext, col = myscolors, bty = "n", title = sltitle, y.intersp = 0.6, pch = 15, cex = 0.75, ncol = ncol_bottom, text.width = twidths) dev.off() } # restoring graphical parameters par(opar) #########1#########2#########3#########4#########5#########6#########7#########8 # Visualize the web in matrix style format visweb(OTUmatrixbip, type = "diagonal") #########1#########2#########3#########4#########5#########6#########7#########8 # Calculate H2' for the network (the higher the value the more selective # the OTUs) and d' (same for samples, ranges from 0, no specialisation, to 1, # perfect specialist). d' for OTUs is calculated later. Htwo <- H2fun(OTUmatrixbip) dprimefood <- dfun(OTUmatrixbip) Htwo dprimefood #########1#########2#########3#########4#########5#########6#########7#########8 # Calculate statistics for OTU nodes and sample nodes (filtered network) and # save them in data frames slevelstats <- specieslevel(OTUmatrixbip) OTUstats <- slevelstats$`higher level` # join with relative abundance OTUfreqs <- colSums(OTUmatrixbip)/sum(OTUmatrixbip) OTUstats <- cbind(OTUstats,OTUfreqs) samplestats <- slevelstats$`lower level` # join the Class OTUstats$label <- row.names(OTUstats) OTUstats <- left_join(OTUstats, select(input_data$taxa_metadata, label, class)) OTUstats$Class <- as.factor(OTUstats$class) # plot d values OTUdab <- OTUstats %>% dplyr::select(label, d, OTUfreqs, class) %>% dplyr::arrange(desc(d)) # if the number of taxa is > 50 only the first 50 (most specialist) will be used OTUdab_toplot <- if(nrow(OTUdab)>50) slice(OTUdab, 1:50) else OTUdab dabplotwarning <- ifelse(nrow(OTUdab)>50, str_wrap( str_c("Warning: only the 50 taxa with highest values of specialization, out of ", nrow(OTUdab), " taxa in the dataset are being plotted"), 50 ), "") dabplot <- ggplot(data = OTUdab_toplot, aes(x=d, y = reorder(factor(label), d), size = OTUfreqs, colour = class)) # use alternative palettes and styles depending on the number of categories dabtitle <- str_c("Specialization (d) and abundance plot") dabsubtitle <- ifelse(dabplotwarning !="", dabplotwarning, waiver()) if(n_distinct(OTUdab_toplot$class)<=12){ dabplot + geom_point() + scale_color_brewer(type = "qual", palette = "Paired") + labs(y = "taxa", size = "rel. ab.", title = dabtitle, subtitle = dabsubtitle) + theme_bw() + theme(plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust=0.5)) } else { dabplot_palette <- myOTUcolorsbip names(dabplot_palette) <- myOTUtext dabplot + geom_point() + scale_color_manual(values = dabplot_palette) + labs(y = "taxa", size = "rel. ab.", title = dabtitle, subtitle = dabsubtitle) + theme_bw() + theme(axis.text.y = element_text(size = 16/log2(n_distinct(OTUdab_toplot$class))), plot.title = element_text(hjust = 0.5), plot.subtitle = element_text(hjust=0.5)) } if (savegraph) { ggsave(filename = str_c(file_name_prefix, "_dabplot",gfileext), width = 7, height = 5, dpi = graphresolution) } # plot degree vs betweeness, only the OTUs with the top 20% values are labeled OTUstats$OTUlabel <- as.character(rep("",nrow(OTUstats))) lbl <- which(OTUstats$normalised.degree >= quantile(OTUstats$normalised.degree, 0.80)) OTUstats$OTUlabel[lbl] <- OTUstats$label[lbl] lbl <- which(OTUstats$weighted.betweenness != 0) OTUstats$OTUlabel[lbl] <- OTUstats$label[lbl] degbplot <- ggplot(data = OTUstats, aes(x = normalised.degree, y = weighted.betweenness, colour = Class, size = OTUfreqs)) if(n_distinct(OTUstats$class)<=12){ degbplot + geom_point() + geom_text_repel(aes(label = OTUlabel), colour = "black", alpha = 0.6, size = 3) + labs(x = "normalised degree", y = "weighted betweenness") + scale_color_brewer(type = "qual", palette = "Paired") + theme_bw() } else { degplot_palette <- myOTUcolors names(degplot_palette) <- myOTUtext degbplot + geom_point() + geom_text_repel(aes(label = OTUlabel), colour = "black", alpha = 0.6, size = 3) + labs(x = "normalised degree", y = "weighted betweenness") + scale_color_manual(values = degplot_palette) + theme_bw() } if (savegraph) { ggsave(filename = str_c(file_name_prefix, "_degbplot",gfileext), width = 7, height = 5, dpi = graphresolution) } #########1#########2#########3#########4#########5#########6#########7#########8 # Recalculate degree distributions, separately for OTU and samples # using the filtered matrix OTUddf <- as.data.frame(degreedistr(OTUmatrixbip, level = "higher", plot.it = FALSE)) degreedistr(OTUmatrixf, level = "higher") Sampleddf <- as.data.frame(degreedistr(OTUmatrixbip, level = "lower", plot.it = FALSE)) degreedistr(OTUmatrixbip, level = "lower") #########1#########2#########3#########4#########5#########6#########7#########8 # Compute and plot modules, may need to tamper with labsize mylabsize <- 0.6 moduleWebObject <- computeModules(OTUmatrixbip); plotModuleWeb(moduleWebObject, labsize = mylabsize) if (savegraph) { if (graphtype == "pdf") { pdf(file = paste(file_name, "modw.pdf", sep = ""), width = graphsizein, height = graphsizein, paper = "default") } else { tiff(filename = paste(file_name, "modw.tif", sep = ""), width = graphsizein, height = graphsizein, units = "in", res = graphresolution, compression = "none") } moduleWebObject <- computeModules(OTUmatrixbip); plotModuleWeb(moduleWebObject, labsize = mylabsize) dev.off() } # restoring graphical parameters par(opar) #########1#########2#########3#########4#########5#########6#########7#########8 # Calculate network level stats. Only some options are included. # # For small networks you may run # # netstats <- as.data.frame(networklevel(OTUmatrixf, index = "ALLBUTDD" # # Run this only at the end, may cause crashes for large matrices. # # Make sure you save files and export the plots before you run it # #########1#########2#########3#########4#########5#########6#########7#########8 netstats <- as.data.frame(networklevel(OTUmatrixbip, index = c("connectance", "weighted connectance", "nestedness", "weighted nestedness", "cluster coefficient"))) colnames(netstats)<-"index" #########1#########2#########3#########4#########5#########6#########7#########8 # Save individual dataframes (as .txt files) for future use # #########1#########2#########3#########4#########5#########6#########7#########8 write.table(OTUstats, file = str_c(file_name_prefix, "_OTUstats.txt"), append = FALSE, sep = "\t", col.names = TRUE, row.names = TRUE) write.table(samplestats, file = str_c(file_name_prefix, "_samplestats.txt"), append = FALSE, sep = "\t", col.names = TRUE, row.names = TRUE) write.table(netstats, file = str_c(file_name_prefix, "_netstats.txt", sep = ""), append = FALSE, sep = "\t", col.names = TRUE, row.names = TRUE) } #########1#########2#########3#########4#########5#########6#########7#########8 # Bonus plot -------------------------------------------------------------- # bar plot with the x taxa with the highest absolute abundance. # # skipped if aggregation level is other than genus or sample aggregation is # # other than exp.code. # The abundances of the remaining OTUs are optionally pooled. # # #########1#########2#########3#########4#########5#########6#########7#########8 if(input_data$sample_agg == "exp. code" & input_data$tax_agg == "genus"){ OTUsums <- colSums(OTUmatrixbip) # topx should be <20 and definitely <25 topxOTUnodesab <- names(OTUsums[order(OTUsums, decreasing = TRUE)])[1:topx] OTUmatrixf3 <- OTUmatrixbip[,which(colnames(OTUmatrixbip) %in% topxOTUnodesab)] if (pool) { if (rarefy_flag){ sampleSums <- round(rarennbip*(totseqs-totseqsbip)/totseqs,0)+rarennbip } else {sampleSums <- totseqs} } else { sampleSums <- rowSums(OTUmatrixbip) } OTUmatrixf3 <- OTUmatrixf3/sampleSums Other <- 1-rowSums(OTUmatrixf3) OTUmatrixf3 <- cbind(OTUmatrixf3, Other) ET5 <- melt(OTUmatrixf3, value.name = "Weight") colnames(ET5)[1:2] <- c("Sample", "Taxa") taxa_levels <- unique(ET5$Taxa) if ("Other" %in% taxa_levels) { taxa_levels <- c("Other",setdiff(taxa_levels,"Other")) } ET5$Taxa <- factor(ET5$Taxa, levels = taxa_levels) # make a palette ncols <- nlevels(ET5$Taxa) # make a qualitative color palette bp_palette <- distinctColorPalette(ncols) names(bp_palette) <- levels(ET5$OTU) mygtitle <- paste("Relative abundance, top", ncols, "taxa (based on tot seqs)") sbplot <- ggplot(data = ET5, aes(x = Sample, y= Weight, fill = Taxa)) if (topx<=12) { sbplot <- sbplot + geom_col(position = "fill") + labs(title = str_wrap(mygtitle,40), x= "Samples/Sample groups", y= "Relative abundance", fill = "taxa") + scale_fill_brewer(type = "qual", palette = "Paired") + theme(axis.text.x = element_text(angle = 90, hjust = 0, vjust = 0.5), legend.key.size = unit(0.2, "in"), plot.title = element_text(hjust = 0.5)) print(sbplot) } else { sbplot <- sbplot + geom_col(position = "fill") + labs(title = str_wrap(mygtitle,40), x= "Samples/Sample groups", y= "Relative abundance", fill = "taxa") + scale_fill_manual(values = bp_palette) + theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5), legend.key.size = unit(0.2, "in"), plot.title = element_text(hjust = 0.5)) print(sbplot) } if (savegraph) { ggsave(str_c(file_name_prefix, "_topxab", gfileext), width = 7, height = 5, dpi = graphresolution) } } #########1#########2#########3#########4#########5#########6#########7#########8 # Save an image of the workspace: please realize that some objects which are # recycled in the script must be created afresh (i.e. ET3, ET4, OTUmatrixf3) save.image(file = paste(file_name_prefix, "image.Rdata", sep = "")) # if you want to do some clean up use rm(list = ls()) before running # the script again # rm(list = ls()) #########1#########2#########3#########4#########5#########6#########7#########8 # Credits and citation ---------------------------------------------------- #########1#########2#########3#########4#########5#########6#########7#########8 # Script created by Eugenio Parente, 2018, modified in 2022. # # The checkpackages function is derived from http://tinyurl.com/jjwyzph # # The code for making qualitative palette is derived from: # # http://tinyurl.com/hawqufy # # The code for heat maps is modified from: http://tinyurl.com/hxxbmvz # # assume this is under GNU general public licence # # http://www.gnu.org/licenses/gpl-3.0.en.html # # if you decide to use this script in your work, please mention # # the FoodMicrobionet web site # # http://www.foodmicrobionet.org # # and cite relevant publications therein as appropriate # # References for packages used in this script: # all_packages <- c("base", .cran_packages) map(all_packages, citation) #########1#########2#########3#########4#########5#########6#########7#########8 # this script was successfully tested on the following systems: # iMac 21.5 inches late 2013, MacOS 10.15.7, R 4.1.2, RStudio 2021.09.0+351 # MacBook Pro Retina 13" Early 2015, 2.7 GHz Intel Core i5, MacOS 10.15.7, R 4.1.2, RStudio 2021.09.0+351 #########1#########2#########3#########4#########5#########6#########7#########8 # Future plans (in case I have time, which is unlikely): # transform the script in an interactive Rmarkdown document (almost done) # improve filtering # use function to automate/improve some of the actions i.e. # creation of palettes for samples # use functions from phyloseq and SpiecEasi to estimate networks # improve the way the format of the input data is checked # offer different abundance/prevalence filters # add options for aggregating taxa (if too many) # add an option for using absolute or relative abundances in vegan::metaMDS # maybe make alpha a function of the number of points in the MDS plots # automatically skip bipartite analysis if sample agg is not llabel # improve the control of options in plotweb
1463fe7d1f6c4c96524386eb03a50658ab326e13
8db74a17ef5a9218a0abd1dda219ed14ab1810ac
/plot3.R
35266b9c4c70e5400b581e452024a268adc8c015
[]
no_license
robertjacobson/ExData_Plotting1
915bf8165b8340a98ed650ff237258c5bb3c073c
a7fc240137dd1f258155345f35fe5751e115922d
refs/heads/master
2021-08-19T17:31:34.199499
2017-11-27T03:13:34
2017-11-27T03:13:34
112,108,990
0
0
null
2017-11-26T19:08:21
2017-11-26T19:08:21
null
UTF-8
R
false
false
948
r
plot3.R
# If datas doesn't exist, load it into the environment. if(length(datas) == 0){ source('downloadDataset.R') } friIndex <- match(as.Date('2007-02-02'), datas$Date) count <- nrow(datas) plot_colors <- c('grey','red','blue') max_y <- max(datas$Sub_metering_1, datas$Sub_metering_2, datas$Sub_metering_3) png( file='plot3.png', width=480, height=480 ) # Lines plot( datas$Sub_metering_1, type='l', ann=FALSE, axes=FALSE, col=plot_colors[1] ) lines( datas$Sub_metering_2, type='l', ann=FALSE, col=plot_colors[2] ) lines( datas$Sub_metering_3, type='l', ann=FALSE, col=plot_colors[3] ) # X Labels axis(1, at=0, lab=c('Thu')) axis(1, at=friIndex, lab=c('Fri')) axis(1, at=count, lab=c('Sat')) # Y Labels title(ylab='Energy sub metering') axis(2, las=1, at=10*0:max_y) # Legend legend("topright", max_y, c('Sub_meterng_1', 'Sub_metering_2', 'Sub_metering_3'), cex=0.8, col=plot_colors, lwd=2) # Box box() dev.off()
be4a421eb187942de64e22960daeecc6842e6dec
217c2788d589bc0dff3993bd4a02c04b26296fb2
/dailyCovidRestimate.R
ed6db2094a18ecf508e11b95b2bccb26a08a7e8f
[]
no_license
tjibbed/UKFprojection
d3fdecf1a5deace4f4956a0c5bb924a62a4a1f99
37b300cb2052eec6ab58595a82350c9df65ec7c5
refs/heads/master
2023-02-13T09:46:00.873526
2021-01-14T10:20:34
2021-01-14T10:20:34
256,415,670
0
0
null
null
null
null
UTF-8
R
false
false
1,296
r
dailyCovidRestimate.R
library(ggplot2) library(data.table) library(stringr) library(dplyr) library(psych) library(ggpubr) library(animation) library(tidyverse) library(survival) library(flexsurv) library(lubridate) library(survminer) library(RCurl) library(httr) library(jsonlite) source("EarlyIncidencePrediction.R") source("WithinHospModel.R") source("FigurePlotting.R") source("IncidenceModel.R") source("Survival.R") source("ExpertParameters.R") readLandKreisNames() RKIinciList<-getInciForKreiseRKI(c("Emmendi","Freiburg","Hochschwarzwald"),loadData = T) sum(RKIinciList$all) singleRtEsti(RKIinciList$all, min(RKIinciList$Date), as.Date("2020-09-28"), pexp(1:100,1/5)-pexp(0:99,1/5), pgamma(1:100,1.87,0.28)-pgamma(0:99,1.87,0.28), runLength=150, underreporting=1) myEst<-multiRtEsti(RKIinciList$all, min(RKIinciList$Date), as.Date("2020-09-28"), c(1,0), # pexp(1:100,1/3)-pexp(0:99,1/3), pgamma(1:100,1.87,0.28)-pgamma(0:99,1.87,0.28), runLength=150, underreporting=1, numRuns=1000) warnings() getReShadedPlot(myEst,ylimits = c(0,6))
b1fb50fb738343851a28b4363c79ee3ef9bb0d7b
ae6580683da835a034d2e43a2b8023c43071bd05
/my_ann_orignal.R
c3be7fc86ffca3c9063d753ed7cf693783ec6063
[ "MIT" ]
permissive
agrutkarsh/Driving-Stereotype-Identification-of-Heavy-Goods-Vehicle-code-only-i.e.-without-dataset-
f25c3af2b71dd3c1655b6c4d3325a0411fc8ab6d
00035f560d42150bddf031e3eea31fd99560cc19
refs/heads/master
2021-04-29T14:29:07.706540
2018-02-16T16:31:41
2018-02-16T16:31:41
121,773,042
0
0
null
2018-02-16T16:30:31
2018-02-16T16:27:37
null
UTF-8
R
false
false
2,807
r
my_ann_orignal.R
setwd('C:/Users/psxua/Dropbox/projects/phd/codes/r/edinburgh series') library("pracma", lib.loc="~/R/R-3.2.2/library") library("cclust", lib.loc="~/R/R-3.2.2/library") library("cluster", lib.loc="~/R/R-3.2.2/library") library("clValid", lib.loc="~/R/R-3.2.2/library") data<-read.csv("output_4cls_unnormalised_data.csv", header = FALSE) data2<-data[,1:10] data2<-data2/300 no_of_row<-nrow(data) no_of_col<-ncol(data) max_cls<-4 pos<-cbind() nc<-rbind() train<-rbind() out<-rbind() for (i in 1:no_of_row){ if (data[i,11]=='NaN'){ pos<-cbind(pos,i) n<-data2[i,1:10] nc<-rbind(nc,n) } else{ n<-data2[i,1:10] train<-rbind(train,n) n<-data[i,11] out<-rbind(out,n) } } out1 <- array(0,dim=c(nrow(train),max_cls)) for (i in 1:nrow(train)){ for (j in 1:max_cls){ if (out[i]==j) out1[i,j]=1 } } trd<-t(train) nhl<-20 nol<-4 nil<-nrow(trd) ns<-ncol(trd) wih<-rbind() for (i in 1:nil){ temp <- -1+2*(runif(nhl)) wih <- rbind(wih,temp) } who<-rbind() for (i in 1:nhl){ temp <- -1+2*(runif(nol)) who <- rbind(who,temp) } y<-out1 alpha <- 0.2 se <- 1 epoc <- 0 yhl<-matrix(0,nhl,1) yp<-matrix(0,ns,nol) error<-matrix(0,ns,nol) delo<-matrix(0,nol,1) delh<-matrix(0,nhl,1) while (epoc<2000){ epoc<-epoc+1 for (i in 1:ns){ for (j in 1:nhl){ temp1<-t(trd[,i]) temp2<-temp1*wih[,j] temp<-sum(temp2) yhl[j,1]=sigmoid(temp,a=1,b=0) } for (j in 1:nol){ temp1<-t(yhl[,1]) temp2<-temp1*who[,j] temp<-sum(temp2) yp[i,j]<-sigmoid(temp,a=1,b=0) error[i,j]<-y[i,j]-yp[i,j] delo[j,1]<-yp[i,j]*(1-yp[i,j])*error[i,j] } for (j in 1:nhl){ for (k in 1:nol){ who[j,k]<-who[j,k]+alpha*yhl[j,1]*delo[k,1] } } for (j in 1:nhl){ temp=0 for (k in 1:nol){ temp<-temp+delo[k,1]*who[j,k] } delh[j,1]<-yhl[j,1]*(1-yhl[j,1])*temp } for (j in 1:nil){ for (k in nhl){ wih[j,k]<-wih[j,k]+alpha*trd[j,i]*delh[k,1] } } se<-sum((error^2)) } print(se) print(epoc) } output<-rbind() ynew<-matrix(0,nol,1) for (loop in 1:nrow(nc)){ x=nc[loop,] for (j in 1:nhl){ temp1<-t(x) temp2<-temp1*wih[,j] temp<-sum(temp2) yhl[j,1]<-sigmoid(temp,a=1,b=0) } for (j in 1:nol){ temp1<-t(yhl[,1]) temp2<-temp1*who[,j] temp<-sum(temp2) ynew[j,1]<-sigmoid(temp,a=1,b=0) } ypos=1 ymax=ynew[1,1] for (j in 1:(nrow(ynew)-1)){ if (ymax<ynew[(j+1),1]){ ymax<-ynew[j+1] ypos<-j+1 } } output<-rbind(output,ypos) } write.csv(output,file="ann_result_on_unnormalised_data2.csv")
536a7d1c9d134abb27ad708f50723dd99a936078
b5e7b204795818d53e69488b3883a746a11c11ff
/plot4.r
acacfcc633a6eb272490093a050c4986eef0751e
[]
no_license
geoffness/ExData_Plotting1
b8a56eeafa6ae16b80b23a2b704b4e687fddceea
91ba9b445efbd7dd5114fa766937eb6b5bf3f16a
refs/heads/master
2021-01-18T07:37:36.626144
2015-09-13T11:22:58
2015-09-13T11:22:58
42,343,123
0
0
null
2015-09-12T03:27:20
2015-09-12T03:27:20
null
UTF-8
R
false
false
1,369
r
plot4.r
## Code to read household energy consumption data then plot a group of 4 plots ## covering global active power, voltage, energy sub metering and global reactive power ## Note: assumes presence of file "household_power_consumption.txt" in working directory # Read and format FebData <- read.table(pipe('grep "^[1-2]/2/2007" "household_power_consumption.txt"'),sep = ";") header <- readLines("household_power_consumption.txt",n = 1) names(FebData) <- strsplit(header,";")[[1]] FebData$datetime <- strptime(paste(FebData$Date,FebData$Time),"%d/%m/%Y %H:%M:%S") FebData$weekday <- format(FebData$datetime,"%a") # Output plots to png file dest png(filename = "plot4.png") par(mfrow=c(2,2)) plot(FebData$datetime,FebData$Global_active_power,type = "l", xlab = "",ylab = "Global Active Power") plot(FebData$datetime,FebData$Voltage,type = "l", xlab = "datetime",ylab = "Voltage") plot(FebData$datetime,FebData$Sub_metering_1,type = "l",ylab = "Energy sub metering",xlab = "") lines(FebData$datetime,FebData$Sub_metering_2,col="red") lines(FebData$datetime,FebData$Sub_metering_3,col="blue") legend("topright",legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col = c("black","red","blue"),lty = c(1,1,1),bty = "n") plot(FebData$datetime,FebData$Global_reactive_power,type = "l", xlab = "datetime",ylab = "Global_reactive_power") dev.off()
e0beb5d01431c0eaa05a4873a0f816b2ff56a078
153415943e27c98bf6bd27366ce49835050f0899
/games.R
b47b787acadf6cdce94d0a69eaeb67f186a50b96
[]
no_license
ptoulis/long-term-causal-effects
a8f7746618df0188e58728e6035e9538fb8aebcb
7c955450978ca81e4cbe7e6d725d37cefb748ce0
refs/heads/master
2021-01-22T09:43:08.141192
2016-09-22T22:20:07
2016-09-22T22:20:07
68,416,870
5
1
null
null
null
null
UTF-8
R
false
false
2,156
r
games.R
# GAME = list(payoff matrix for agent A, payoff for agent B, ...) # mixedStrategy = (piA, piB, ...) where piX = action distribution for agent X. # game.mckelvey <- function(A=5, B=5) { Payoff.A <- matrix(c(1, 0, 1, 0, 0, 0, 1, A, 1), nrow=3, byrow=T) Payoff.B <- matrix(c(1, 0, 1, 0, 0, 0, 1, B, 1), nrow=3, byrow=T) return(list(Payoff.A, Payoff.B)) } game.lieberman <- function() { Payoff.A <- 3.373 * matrix(c(15, 0, -2, 0, -15, -1, 1, 2, 0), nrow=3, byrow=T) Payoff.B <- 3.373 * matrix(c(-15, 0, -1, 0, 15, -2, 2, 1, 0), nrow=3, byrow=T) return(list(A=Payoff.A, B=Payoff.B)) } game.rapoport <- function(W, L) { Payoff.A <- 1 * matrix(c(W, L, L, L, L, L, L, W, W, W, L, W, L, L, W, L, W, L, W, L, L, W, W, L, L), nrow=5, byrow=T) Payoff.B <- -t(Payoff.A) return(list(A=Payoff.A, B=Payoff.B)) } get.player.index <- function(mixed.strategy, game, agentId) { num.actions = nrow(game[[1]]) start = 1 + (agentId-1) * num.actions end = start + num.actions - 1 stopifnot(end <= length(mixed.strategy), start > 0) return(list(start=start, end=end)) } player.mixedStrategy <- function(mixed.strategy, game, agentId) { # Gets the player strategy from the entire vector of mixed strategy profile # # Returns: # distribution over actions of player agentId ind = get.player.index(mixed.strategy, game, agentId) st = matrix(mixed.strategy[ind$start:ind$end], ncol=1) stopifnot(abs(1-sum(st)) < 1e-3) return(st) } other.mixedStrategy <- function(mixed.strategy, game, agentId) { # # Returns the mixed strategies of all other agents flattened out # in a column vector. ind = get.player.index(mixed.strategy, game, agentId) return(matrix(mixed.strategy[-seq(ind$start, ind$end)], ncol=1)) }
858ae6fdda6d94c51b690be5a7bffbff6b3f4cd2
721d96cf611a8ee4ee224e6dae7c92a9faf180c0
/R/FUNCTION-trim_v5.R
99227877a3d002334a437f1c95cc07354e892e94
[]
no_license
cran/astrochron
5b05df5f4f114cdf6df630cdfe0f649e5f45dbbf
c86d83297c0b75ce1ab1a026a25fe05e0e6cb3fe
refs/heads/master
2023-09-01T06:57:37.447424
2023-08-26T13:40:02
2023-08-26T14:30:40
21,361,937
5
1
null
null
null
null
UTF-8
R
false
false
2,333
r
FUNCTION-trim_v5.R
### This function is a component of astrochron: An R Package for Astrochronology ### Copyright (C) 2016 Stephen R. Meyers ### ########################################################################### ### trim: function to remove outliers - (SRM: March 9-10, 2012; June 30, 2012; ### May 20, 2013; August 6, 2015; July 22, 2016) ### ### automatically remove outliers from time series. ### outliers are determined using boxplot ########################################################################### trim <- function (dat,c=1.5,genplot=T,verbose=T) { if(verbose) cat("\n----- REMOVING OUTLIERS FROM STRATIGRAPHIC SERIES -----\n") ipts <- length(dat[,1]) if(verbose) cat(" * Number of original data points=", ipts,"\n") eval <- !dat[,2] %in% boxplot.stats(dat[,2],coef=c)$out out <- rep(NA,ipts*2) dim(out) <- c(ipts,2) trimmed <- rep(NA,ipts*2) dim(trimmed) <- c(ipts,2) trimpts=0 for (i in 1:ipts) { if (isTRUE(eval[i])) { out[i,1]= dat[i,1] out[i,2]=dat[i,2] trimpts = trimpts + 1 } else { trimmed[i,1]= dat[i,1] trimmed[i,2]= dat[i,2] } } if(verbose) cat(" * Number of data points post-trimming=", trimpts,"\n") ### grab all data except 'NA' values, assign to dat2 dat2 <- data.frame(subset(out,!(out[,2] == 'NA'))) ### grab all trimmed data except 'NA' values, assign to datrimmed datrimmed <- data.frame(subset(trimmed,!(trimmed[,2] == 'NA'))) if(genplot) { ### plot data series. Note, cex is the factor by which to increase or decrease default symbol size par(mfrow=c(2,2)) xrange = range(c(dat[,1])) yrange = range(c(dat[,2])) plot(dat2, cex=.5,xlim=xrange,ylim=yrange,xlab="Location",ylab="Value",main="Data Series") lines(dat2) par(new=TRUE) plot(datrimmed, cex=.5, xlim=xrange,ylim=yrange,col="red") ### plot the denisty and the histogram together hist(dat2[,2],freq=F,xlab="Value",main="Distribution values"); lines(density(dat2[,2], bw="nrd0"),col="red"); grid() ### boxplot boxplot(dat2[,2]) ### Normal probabilty plot (Normal Q-Q Plot) qqnorm(dat2[,2]); qqline(dat2[,2], col="red"); grid() } return(dat2) ### END function trim }
c719af45b4d36200e0dc9b8b55721cf3f6e24e39
24bf55acfd9df65714d83975ae5e2e0988d2486e
/man/fbind.Rd
7237af1f6e578f1ecfdf66a52985e001e9bf43da
[]
no_license
pdaniele/foofactors
7764d789e5fd33712d4c5977eb872a306863154a
7e19df7176ba16ad0a0438e4aebe4721861c0957
refs/heads/master
2020-06-13T16:12:42.697155
2016-12-08T19:07:09
2016-12-08T19:07:09
75,359,928
0
1
null
null
null
null
UTF-8
R
false
true
472
rd
fbind.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/fbind.R \name{fbind} \alias{fbind} \title{Bind two factors} \usage{ fbind(a, b) } \arguments{ \item{a}{factor} \item{b}{factor} } \value{ factor } \description{ Create a new factor from two existing factors, where the new factor's levels are the union of the levels of the input factors } \examples{ fbind(iris$Species[c(1,51,101)], PlantGrowth$group[c(1,11,21)]) }
8f8907458e0386780c7d84a84a16c63173fd20ff
1e7f61b700940811ef7757d21b3690023ec9822a
/tests/dimchecks.R
d48367250063349653543b9bd09cc5adbe333aaa
[]
no_license
jwjeong1117/pomp
b2417b9514c583fbe19c3280878fab6d60817c00
5ce3725cc381039e310f1999b8d37631ac359c14
refs/heads/master
2021-01-22T11:21:53.811139
2015-10-20T21:15:27
2015-10-20T21:15:27
null
0
0
null
null
null
null
UTF-8
R
false
false
1,652
r
dimchecks.R
library(pomp) set.seed(1420306530L) pompExample(ricker) po <- ricker pars <- coef(po) xstart <- init.state(po,params=pars) rprocess(po,xstart,times=0:5,params=pars)[,1,] rprocess(po,xstart=parmat(xstart,5),times=0:5,params=pars)[,3,] rprocess(po,xstart=xstart,times=0:5,params=parmat(pars,3))[,3,] try( rprocess(po,xstart=parmat(xstart,2),times=0:5,params=parmat(pars,3))[,,3] ) rprocess(po,xstart=parmat(xstart,2),times=0:5,params=parmat(pars,6))[,,3] x <- rprocess(po,xstart=parmat(xstart,2),times=0:5,params=parmat(pars,8)) rmeasure(po,x=x,params=pars,times=0:5)[,,3] try( rmeasure(po,x=x,params=parmat(pars,3),times=0:5)[,,3] ) rmeasure(po,x=x,params=parmat(pars,4),times=0:5) x <- rprocess(po,xstart=xstart,times=0:5,params=pars) rmeasure(po,x=x,params=parmat(pars,2),times=0:5) y <- rmeasure(po,x=x,params=parmat(pars,4),times=0:5) dmeasure(po,x=x,y=y[,2,,drop=F],params=pars,times=0:5) x <- rprocess(po,xstart=parmat(xstart,3),times=0:5,params=pars) y <- rmeasure(po,x=x,params=pars,times=0:5) try(dmeasure(po,x=x,y=y,params=parmat(pars,3),times=0:5)) f1 <- dmeasure(po,x=x,y=y[,1,,drop=F],params=parmat(pars,3),times=0:5) f2 <- dmeasure(po,x=x,y=y[,1,,drop=F],params=pars,times=0:5) stopifnot(identical(f1,f2)) g1 <- skeleton(po,x=x,t=0:5,params=pars) g2 <- skeleton(po,x=x,t=0:5,params=parmat(pars,3)) stopifnot(identical(g1,g2)) g3 <- skeleton(po,x=x,t=0:5,params=parmat(pars,6)) stopifnot(identical(g1,g3[,1:3,])) stopifnot(identical(g1,g3[,4:6,])) pompExample(gompertz) p <- parmat(coef(gompertz),5) f1 <- partrans(gompertz,p,"inv") f2 <- parmat(coef(gompertz,transform=TRUE),5) stopifnot(identical(f1,f2))
07be9713b1c3a9bae2f3c0db18bd95cb630adab3
26f465c3aeab5838385e7008f8fc15f97deb74b9
/experiments/Experiment 3/algorithms/LOF.R
aaa805780d777f873e75924515bd58fd72e87e36
[]
no_license
msandim/master-thesis
3776038565897ccd838f5f88a06499a90f2e0e86
db60b549446e953747764e40feccc7cf1fcbe2d9
refs/heads/master
2021-09-14T01:23:39.003398
2018-05-07T02:51:41
2018-05-07T02:51:41
84,819,866
0
0
null
null
null
null
UTF-8
R
false
false
867
r
LOF.R
library(dbscan) library(caret) lof_apply <- function(data) { data_X <- data %>% select(-outlier) returnData <- data %>% select(outlier) # Scale the data: scaleObject <- preProcess(data_X, method = c("center", "scale")) data_X <- predict(scaleObject, data_X) %>% as.matrix returnData$lof_03 <- lof(data_X, k = 3) returnData$lof_05 <- lof(data_X, k = 5) returnData$lof_08 <- lof(data_X, k = 8) returnData$lof_14 <- lof(data_X, k = 14) returnData$lof_19 <- lof(data_X, k = 19) returnData$lof_25 <- lof(data_X, k = 25) returnData$lof_30 <- lof(data_X, k = 30) return(returnData) } #lof_save <- function(data, algorithm, dataset, fold) #{ # for(parameter in names(data)) # { # writePredictions(data[[name]], # paste0(algorithm,":",parameter), # dataset, # fold) # } #}
3d1419bd50b9b251dcb324310eacda3c69a6ad27
3f680c621d68cd817097e1a83915ceaead162e12
/man/getAllFunnelsIncAIC.Rd
80a9a8347fc7b2fb716123f3726f61634e676c8b
[]
no_license
rohan-shah/mpMap2
46273875750e7a564a17156f34439a4d93260d6c
c43bb51b348bdf6937e1b11298b9cdfe7a85e001
refs/heads/master
2021-05-23T20:34:59.327670
2020-07-19T10:24:09
2020-07-19T10:24:09
32,772,885
10
6
null
null
null
null
UTF-8
R
false
true
1,779
rd
getAllFunnelsIncAIC.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getAllFunnels.R \name{getAllFunnelsIncAIC} \alias{getAllFunnelsIncAIC} \title{Get all funnels, including AIC lines} \usage{ getAllFunnelsIncAIC(cross, standardised = FALSE) } \arguments{ \item{cross}{The object of class \code{mpcross} containing the pedigree of interest} \item{standardised}{Should the output funnels be standardised?} } \value{ Matrix of mixing orders that contribute to the final popluation. Rows DO NOT refer to specific genetic lines. } \description{ Get every order of the founding lines, which makes a contribution to the final population } \details{ This function is similar to \code{\link{getAllFunnels}}, but more useful for populations with maintenance (or AIC) generations. It returns a list of all the mixing orders in the initial generations, which make a genetic contribution to the final population. Unlike for \code{\link{getAllFunnels}}, rows of the returned matrix DO NOT refer to specific genetic lines. } \examples{ set.seed(1) pedigree <- fourParentPedigreeRandomFunnels(initialPopulationSize = 1000, selfingGenerations = 6, intercrossingGenerations = 1) #Assume infinite generations of selfing in subsequent analysis selfing(pedigree) <- "infinite" #Generate random map map <- qtl::sim.map(len = 100, n.mar = 101, anchor.tel = TRUE, include.x = FALSE) #Simulate data cross <- simulateMPCross(map = map, pedigree = pedigree, mapFunction = haldane, seed = 1L) #Because we have maintenance in this experiment, we can't get out the funnels per genetic line funnels <- getAllFunnels(cross) dim(funnels) funnels[1:10,] #But we can get out a list of all the funnels that go into the experiment. funnels <- getAllFunnelsIncAIC(cross) dim(funnels) funnels[1:10,] }
84f64fe753f6c0e4bc6887562876d6bdb68726db
f335d68753c16e38f887e7cbaee903feeaa5d37b
/man/var_sinar.Rd
7d3004882b1e10fb430686e44544ac6a86e112d2
[]
no_license
cran/sinar
fd50c470ed8924d6ef03c45a8ad641c17341dde6
bd1f88640db4bf80a5dacb3f938ce842383b80e7
refs/heads/master
2023-01-07T03:42:14.314920
2020-11-06T16:00:06
2020-11-06T16:00:06
311,435,252
0
0
null
null
null
null
UTF-8
R
false
true
536
rd
var_sinar.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/cls_sinar.R \name{var_sinar} \alias{var_sinar} \title{Empirical estimate for the variance of innovations.} \usage{ var_sinar(X) } \arguments{ \item{X}{A integer matrix where each cell is the observed value in the regular lattice.} } \value{ The estimated standard deviation in the \eqn{SINAR(1,1)}. } \description{ \eqn{\sigma^2_\epsilon} is the variance the innovations for the \eqn{SINAR(1,1)} model. } \examples{ data("nematodes") var_sinar(nematodes) }
8e285cc88595bfdc2a487f21fe86de8580539673
89a1c2e27d5c39aa71a6f7e7680d10f362e3f0f6
/LawfulResidentStatusScrape.r
c3d16c53871fc9d759520fb6f76376427129dfbf
[]
no_license
azankhanyari/USImmigration_Visualization
f9e9d3b59d2ae4d944d82ea826c9562edbb4376d
547af6b6faac3f63a77be10a2b944fb5c09518da
refs/heads/master
2022-03-25T13:33:20.079910
2019-12-17T17:40:30
2019-12-17T17:40:30
228,669,965
0
0
null
null
null
null
UTF-8
R
false
false
517
r
LawfulResidentStatusScrape.r
library(rvest) url <- "https://www.dhs.gov/immigration-statistics/yearbook/2017/table2" table3 <- url %>% html() %>% html_nodes(xpath = '//*[@id="content-area"]/div/div/article/div[1]/div[2]/div/div/table') %>% html_table(fill = TRUE) table3_unlist <- table3[[1]] NaturalisationByRegion <- table3_unlist[2:88,c(1,4)] names(NaturalisationByRegion) <- c('Country','Total People in 2017') write.csv(NaturalisationByRegion, file = "C:\\Users\\Rick\\Documents\\DV CA\\LawfulResidenceByCountry.csv",row.names = FALSE)
d2e0f0b72eb9fa2fe7ad75159c71b9adf7f951b1
180dd82b3c98dd23bab21ba2566277cd3a2a3d51
/R/read_members.R
6922a94c7afb96d21ab4c1cbda51eca4f4580019
[ "Apache-2.0" ]
permissive
andrew-MET/harpIO
2e90861751a2b47ec8cdebde3de6d35414575cb7
976ddeca6337c31e7682d7a371552942cbb4cc66
refs/heads/master
2023-05-10T11:55:28.830541
2023-01-11T09:18:03
2023-01-11T09:18:03
275,838,990
0
3
NOASSERTION
2022-11-08T12:24:30
2020-06-29T14:27:58
R
UTF-8
R
false
false
7,580
r
read_members.R
# Read ensemble members from a forecast file # # \code{read_members} reads all ensemble members from a forecast file for a # single lead time. It is possible to read from grib or netcdf files via the # helper functions. It is assumed that grib files only contain one member and # ntcdf files contain all members. # # @param model_files Files to read. For NetCDF only 1 file is expected, but for # grib a vector of files can be passed. The character string 'mbr' must be # somewhere in the paths for the member numbes to be identified. It is # assumed that grib files only contain one member. For grib2 (yet to be # implemented) this might not be the case. # @param parameter The parameter to read. # @param members A vector of numbers identifying the members to read. This is # ignored for grib files as it is assumed that the members were already # decided when the filenames were obtained. # @param file_type The forecast file format. The function can attempt to # ascertain the format from the file name, but if it can't \code{file_type} # must be passed as an argument. # @param lead_time The lead time to read. # @param ... Arguments to be passed to \code{read_netcf} or \code{read_grib} # # @return A list containing: \cr \code{model_data}: The 3d field (the 3rd # dimension is ensemble member). \cr \code{x}: The x coordinates in # the projection of the forecast file. \cr \code{y}: The y coordinates # in the projection of the forecast file. \cr \code{proj4_string}: The # proj4 projection string for the forecast file. \cr \code{parameter}: # The parameter that the 3d field represents. \cr \code{filename}: The # full path to the forecast files. # NOT currently in an exportable state. Use mepsr for now. # # @examples # fname <- get_filenames(file_date = 2017080100, template = "meps_met") # model_field <- read_members(fname, "precipitation_amount_acc", lead_time = 24) # model_field <- read_members(fname, "Pcp", members = c(2, 4, 6), lead_time = 24) # # my_static_path <- "/lustre/storeB/users/andrewts/surfacePerturbations/grib" # my_expt <- "MEPS_summer2017_sfcPertRef" # my_template <- file.path( # "${YYYY}${MM}${DD}${HH}", # "${experiment}", # "mbr${MBR3}/fc${YYYY}${MM}${DD}${HH}+${LDT3}.grib" # ) # fname <- get_filenames( # file_path = my_static_path, # file_date = 2017052700, # experiment = my_expt, # template = my_template, # lead_time = 3, member = seq(0, 10) # ) # model_field <- read_members(fname, "fog") ### NEEDS UPDATING!!!! - change name to read_eps. Accommodate multi dimensions better. read_members <- function(model_files, parameter, members = seq(0, 9), file_type = NULL, lead_time = NULL, ...) { # # read control and get domain info # if (is.null(file_type)) { file_type <- tolower(tools::file_ext(model_files[1])) if (! file_type %in% c("grib", "grb", "nc", "nc4", "netcdf")) { if (stringr::str_detect(model_files[1], "grib")) { file_type = "grib" } else { stop("Unable to ascertain file type. Call the function with file_type = '<file_type>'", call. = FALSE ) } } else { file_type <- switch( file_type, "grb" = "grib", "nc" = "netcdf", "nc4" = "netcdf", file_type ) } } if (tolower(file_type) == "grib") { if (!requireNamespace("Rgrib2", quietly = TRUE)) { stop("Package Rgrib2 required for read_members() - you can get it from HARP", call. = FALSE ) } num_perturbed_members <- length(model_files) - 1 model_file <- model_files[1] geofield_data <- read_grib(model_file, parameter) domain_data <- meteogrid::DomainExtent(geofield_data) x <- seq(domain_data$x0, domain_data$x1, domain_data$dx) y <- seq(domain_data$y0, domain_data$y1, domain_data$dy) proj4_string <- paste0( "+", paste( meteogrid::proj4.list2str(attr(geofield_data, "domain")$projection), collapse = " +" ) ) %>% stringr::str_replace("latlong", "longlat") %>% stringr::str_replace_all(" = ", "=") %>% stringr::str_replace_all(" =", "=") %>% stringr::str_replace_all("= ", "=") members <- ifelse(length(model_files) > 1, model_files %>% strsplit("/") %>% purrr::map(~ stringr::str_subset(., "mbr")) %>% purrr::map_dbl(readr::parse_number), 0 ) data_all <- array(NA, c(dim(geofield_data), num_perturbed_members + 1)) data_all[, , 1] <- geofield_data } else if (tolower(file_type) == "netcdf") { if (!requireNamespace("ncdf4", quietly = TRUE)) { stop("Package ncdf4 required for read_members() - Please install from CRAN", call. = FALSE ) } if (is.null(lead_time)) stop("lead_time must be supplied for NetCDF data") model_file <- model_files[1] ncID <- ncdf4::nc_open(model_file) x <- ncdf4::ncvar_get(ncID, "x") y <- ncdf4::ncvar_get(ncID, "y") proj4_string <- ncdf4::ncatt_get(ncID, "projection_lambert", "proj4")$value num_members <- length(members) nc_members <- ncdf4::ncvar_get(ncID, "ensemble_member") ncdf4::nc_close(ncID) if (num_members > length(nc_members)) { cat("\nWARNING: Number of members in file =", length(nc_members)) cat("\n Number of members requested =", num_members) cat("\n All members will be read from the file") cat("\n") members <- nc_members num_members <- length(members) } num_perturbed_members <- num_members - 1 data_all <- array(NA, c(length(x), length(y), num_perturbed_members + 1)) data_all[, , 1] <- read_netcdf(model_file, parameter, members[1], lead_time, ...) } else { stop("Unknown file type: ", file_type, ". Can only deal with netcdf or grib", call. = FALSE ) } # # Get perturbed members # if (num_perturbed_members > 0) { pb <- utils::txtProgressBar(min = 1, max = num_perturbed_members, initial = 1, style = 3) for (member in 1:num_perturbed_members) { member_name <- paste0("mbr", formatC(member, width = 3, flag = "0")) if (file_type == "grib") { model_file <- model_files[member + 1] data_all[, , member + 1] <- read_grib(model_file, parameter) } else if (file_type == "netcdf") { data_all[, , member + 1] <- read_netcdf(model_file, parameter, members[member], lead_time, ...) } utils::setTxtProgressBar(pb, member) } } # # Convert units - it is assumed that when geopotential is requested, geopential # height is what is wanted # is_temperature <- function(x) { tolower(x) %in% c("t", "t2m", "sst") | stringr::str_detect(x, "temperature") } is_pressure <- function(x) { tolower(x) == "pmsl" | stringr::str_detect(x, "pressure") } is_geopotential <- function(x) { tolower(x) %in% c("z0m", "z") | stringr::str_detect(x, "geopotential") } if (is_temperature(parameter) & min(data_all, na.rm = TRUE) > 200) { data_all <- data_all - 273.15 } if (is_pressure(parameter) & min(data_all, na.rm = TRUE) > 2000) { data_all <- data_all / 100 } if (is_geopotential(parameter)) { data_all <- data_all / 9.80665 } list( model_data = data_all, x = x, y = y, member = members, proj4_string = proj4_string, parameter = parameter, filename = model_files ) }
1dff54020061706aec70ff0eb94fab77e1517e57
690c3a0b5858c6f6873ba2e01093da27f7b36e04
/R codes/Critical Slowing Down_R Codes/Out-of-sample predictions/BTC_appendix.R
92eb2ada8db02dd5cd41b2de7229f703c8b92cdb
[]
no_license
stancld/bachelors_thesis
ca1552ac3d8ad31de5e27b6457258776b077e03e
d218639f0e76d9240f4225ae4cc57550ac9f8232
refs/heads/master
2023-03-26T16:18:58.484752
2021-03-29T21:57:08
2021-03-29T21:57:08
352,791,769
3
3
null
null
null
null
UTF-8
R
false
false
4,924
r
BTC_appendix.R
library(quantmod) library(ggplot2) library(gtable) library(grid) library(gridExtra) library(tidyverse) library(lubridate) setwd("C:/Data/Skola/Bachelor's Thesis/R Codes") getSymbols("BTC-USD", env = .GlobalEnv, from = '2015-08-20', to = '2018-02-23', periodicity = 'daily') data <- log(`BTC-USD`[,6]) ##### Period1 start <- 1 end <- which(time(data) == '2017-11-15') (data1 <- data[start:end]) MA <- rep(NA, end - start + 1) time_frame <- 1:end gaussian_kernel <- function(sigma, r, t){ (1 / (sqrt(2*pi) * sigma)) * exp(-((r-t)^2) / (2 * sigma^2)) } for (t in time_frame){ MA[t] <- sum(sapply(time_frame, function(x) gaussian_kernel(sigma = 5, r = x, t = t) * data1[x])) / sum(sapply(time_frame, function(x) gaussian_kernel(sigma = 5, r = x, t = t))) } residuals <- rep(NA, end - start + 1) residuals[1:end] <- data1[1:end] - MA[1:end] plot(residuals, type = 'l') ### 100 days # AR(1) residuals.ar <- residuals[1:nrow(data)] AR <- rep(NA, end - 100) end1 <- end - 100 for (i in 1:end1){ AR[i] <- summary(lm(residuals.ar[(i+1):(i+99)] ~ residuals.ar[i:(i+98)]))$coefficients[2,1] } AR <- c(rep(NA, 100), AR, rep(NA, 100)) plot(AR, type = 'l') # Variance variance <- rep(NA, end - 100) for (i in 1:end1){ start <- i end2 <- i + 100 variance[i] <- sqrt(var(residuals[start:end2])) } plot(variance, type = 'l') variance <- c(rep(NA, 100), variance, rep(NA, 100)) print(length(variance)) plot(variance, type = 'l') ###### PLOTS p1 <- ggplot(data = NULL, mapping = aes(x = decimal_date(time(data)), y = data$`BTC-USD.Adjusted`)) + geom_line(size = 0.8, colour = 'black') + geom_line(aes(x = decimal_date(time(data)), y = c(MA,rep(NA,100))), size = 0.8, colour = 'red', alpha = 0.8) + geom_vline(xintercept = max(decimal_date(time(data1))), linetype = 'dashed', size = 0.8) + xlab("") + ylab("") + scale_x_continuous(limits = c(min(decimal_date(time(data))), max(decimal_date(time(data)))), expand = c(0,0)) + scale_y_continuous(limits = c(5.3, 10.5), expand = c(0,0)) + theme_gray() + theme(axis.text.x = element_text(face = 'bold', colour = 'black', size = 11), axis.text.y = element_text(face = 'bold', colour = 'black', size = 11), plot.title = element_text(face = 'bold', hjust = 0.5, size = 16)) + geom_label(aes(x = max(decimal_date(time(data))) - 0.07, y = 10.3, fontface = 2), label = 'BTC-USD', fill = 'grey') p1 ##### P2, residuals------ #residuals <- c(residuals, rep(NA, 100)) p2 <- ggplot(data = NULL, mapping = aes(x = decimal_date(time(data)), y = residuals)) + geom_segment(mapping = aes(xend = decimal_date(time(data)), yend = 0), size = 0.7) + xlab("") + ylab("") + theme_bw() + theme(axis.text.x = element_text(face = 'bold', colour = 'black', size = 11), axis.text.y = element_text(face = 'bold', colour = 'black', size = 11)) + scale_x_continuous(limits = c(min(decimal_date(time(data))), max(decimal_date(time(data)))), expand = c(0,0)) + geom_label(aes(x = max(decimal_date(time(data))) - 0.08, y = 0.2, fontface = 2), label = 'Residuals', fill = 'grey') p2 ##### P3, AR(1)------ p3 <- ggplot(data = NULL, mapping = aes(x = decimal_date(time(data)), y = AR)) + geom_line(size = 0.8) + xlab("") + ylab("") + theme_bw() + theme(axis.text.x = element_text(face = 'bold', colour = 'black', size = 11), axis.text.y = element_text(face = 'bold', colour = 'black', size = 11)) + scale_x_continuous(limits = c(min(decimal_date(time(data))), max(decimal_date(time(data)))), expand = c(0,0)) + geom_label(aes(x = max(decimal_date(time(data))) - 0.046, y = 0.93, fontface = 2), label = 'AR(1)', fill = 'grey') p3 ##### P4, var------ p4 <- ggplot(data = NULL, mapping = aes(x = decimal_date(time(data)), y = sqrt(variance))) + geom_line(size = 0.8) + xlab("") + ylab("") + theme_bw() + theme(axis.text.x = element_text(face = 'bold', colour = 'black', size = 11), axis.text.y = element_text(face = 'bold', colour = 'black', size = 11), axis.title.x = element_text(face = 'bold', colour = 'black', size = 14)) + scale_x_continuous(limits = c(min(decimal_date(time(data))), max(decimal_date(time(data)))), expand = c(0,0)) + geom_label(aes(x = max(decimal_date(time(data))) - 0.07, y = 0.245, fontface = 2), label = 'Std. dev', fill = 'grey') p4 g1 <- ggplotGrob(p1) g2 <- ggplotGrob(p2) g3 <- ggplotGrob(p3) g4 <- ggplotGrob(p4) g <- rbind(g1, g2, g3, g4, size = 'first') g$widths <- unit.pmax(g1$widths, g2$widths, g3$widths, g4$widths) grid.newpage() grid.draw(g) png('BTC.png', width = 900, height = 600) grid.newpage() grid.draw(g) dev.off()
546de51905a5e5d73e981dceb64a28586b1e84d0
1648d74b87220c51ab86225e438dce2ff3cc72dc
/Churn_Prediction_Ensemble_Model.r
683155fa2f649d5161ad112d1f7bd405571068e9
[ "MIT" ]
permissive
s0yabean/ds_portfolio
fa69d138a9761790d80db696fc4ad2b26d36f605
446c001ba63ffb9aee18ac9d252ff88520eba145
refs/heads/master
2020-06-16T20:55:33.358912
2019-07-07T22:44:41
2019-07-07T22:44:41
195,700,875
1
0
null
null
null
null
UTF-8
R
false
false
14,037
r
Churn_Prediction_Ensemble_Model.r
################################################################################################ # Churn-Prediction Machine Learning Project # Description: A paid project for a venture-backed startup, helping to predict the likeliest customers # about to churn for a B2C subscription-based app. Here, I am going through a couple of popular # ML algorithms for this binary classification problem to find baseline performance, and also # trying out an ensemble method to see if performance improves. # Date: 2018 Spring # Author: Tony Tong ################################################################################################ library(caret) library(dplyr) library(pROC) library(vtreat) library(corrplot) ################################################################################################ # Parallel Processing (To speed up machine learning speeds) ################################################################################################ # Parallel Processing (Added by Tony. My Mac has an i5 processor, which has 4 cores) library(doParallel) cl <- makeCluster(detectCores()) registerDoParallel(cl) # run when rounding up machine learning project to stop the parallel processing # stopCluster(cl) ################################################################################################ # Obtaining and Pre-processing Data # Essentially here, I am running the data through a dummy variable conversion (converting text to numerics/ factors) # and vtreat, which is to standardise, normalise and deal with discrepancies like missing values in the data. # Workflow: Data -> dummyVar() -> vtreat() -> scale() for both TRAINING and TEST data. ################################################################################################ # Connect to db connection <- XXXXXXX::pgsql_connect("XXXXXXXXXXXXXXXXXXXXXXXXXXXX") # Training data. Positive class is set as default to be 0, when there is no renewal. train_data <- dbGetQuery(connection, "XXXXXXXXXXXXXXXXXXXXXXXXXXXX") # Test data. Key difference is that there is an extra date filter, to those before 21st Feb as there was an engineering change before. test_data <- dbGetQuery(connection,"XXXXXXXXXXXXXXXXXXXXXXXXXXXX") # Dummy variable transformation transform = dummyVars(" ~ .", data = train_data) train_data_dummy = data.frame(predict(transform, newdata = train_data)) str(train_data_dummy) # create a cross frame experiment (TRAINING data) cross_frame_experiment <- mkCrossFrameCExperiment(train_data_dummy, colnames(train_data_dummy), 'renewed', 1) plan <- cross_frame_experiment$treatments performance_stats <- plan$scoreFrame treated_train <- cross_frame_experiment$crossFrame # treat TEST data based on the same treatment plan on training data test_data_dummy = data.frame(predict(transform, newdata = test_data)) treated_test <- vtreat::prepare(plan, test_data_dummy, pruneSig = NULL) # Scaling treated_train[, 1:24] = scale(treated_train[, 1:24]) treated_test[, 1:24] = scale(treated_test[, 1:24]) # Converting to character as a downstream function does not work with numerics/ factors. treated_train$renewed_char <- ifelse(treated_train$renewed == 1, "yes", "no") treated_test$renewed_char <- ifelse(treated_test$renewed == 1, "yes", "no") ################################################################################################ # Measuring Correlation and picking top 20 variables ################################################################################################ correlation <- cor(treated_train) corrplot(correlation, method = "circle", tl.cex = .5, tl.col = 'black') vars = as.data.frame(correlation) %>% mutate(variable = colnames(correlation)) %>% select(variable, renewed) %>% arrange(desc(abs(renewed))) %>% dplyr::slice(2:21) x_vars <- paste(unlist(vars[1:20,1]), collapse=' + ') equation <- as.formula(paste("renewed_char", x_vars, sep = " ~ " )) # str(treated_train) # str(treated_test) ################################################################################################ # Creating a standard set of control parameters in train function # Number and repeats are changable parameters ################################################################################################ myControl = trainControl(method = "cv", number = 3, repeats = 2, search = 'grid', classProbs = TRUE, summaryFunction=twoClassSummary, verboseIter = TRUE) ################################################################################################ # Machine Learning # Methods tried: # Random Forest (rf) # Logistic Regression (glm) # Xgboost (xgb) # Ada (Boosted Classification Tree) # Naive Bayes (nb) # Support Vector Machine (svm) ################################################################################################ ################################# # Random Forest ################################# rf_fit = train(equation, data = treated_train, method="rf", metric = 'ROC', trControl = myControl) rf_fit # testing data rf_fit_prob <- predict(rf_fit, newdata = treated_test, type='prob')[,2] roc(treated_test$renewed, rf_fit_prob) %>% auc confusionMatrix(ifelse(rf_fit_prob>= 0.50, 1, 0), treated_test$renewed) # Training Data # ROC Sens Spec # 0.8192454 0.8405315 0.6713205 # Testing Data # ROC Sens Spec # 0.72 0.6272 0.6926 ################################# # GLMNET (Logistic Regression) ################################# glmnet_grid <- expand.grid(.alpha = c(0, 0.5, 1), .lambda=seq(0, 1, by=.01)) glmnet_fit <- train(equation, data = treated_train, method='glmnet', tuneGrid=glmnet_grid, metric='ROC', trControl= myControl) glmnet_fit$results # testing data glmnet_fit_prob <- predict(glmnet_fit, newdata = treated_test, type='prob')[,2] roc(treated_test$renewed, glmnet_fit_prob) %>% auc confusionMatrix(ifelse(glmnet_fit_prob >= 0.50, 1, 0), treated_test$renewed) # Training Data # ROC Sens Spec # 0.8186528 0.8968968 0.6185666 # Testing Data # ROC Sens Spec # 0.7778 0.7629 0.6667 #################################################### # XGBOOST - extreme gradient boosting (tree method) #################################################### # xgboost tree (0.9605899 0.8795028 0.9524339) xgb_grid <- expand.grid(nrounds = 50, #the maximum number of iterations eta = c(0.01,0.1), # shrinkage max_depth = c(2,6,10), gamma = 0, #default=0 colsample_bytree = 1, #default=1 min_child_weight = 1, #default=1 subsample = c(.05, 1)) xgb_fit <- train(equation, data = treated_train, method='xgbTree', tuneGrid=xgb_grid, metric='ROC', trControl=myControl) xgb_fit xgb_fit_prob <- predict(xgb_fit, newdata = treated_test, type='prob')[,2] roc(treated_test$renewed, xgb_fit_prob ) %>% auc confusionMatrix(ifelse(xgb_fit_prob >= 0.50, 1, 0), treated_test$renewed) # Training Data # ROC Sens Spec # 0.8192677 0.8593222 0.6636708 # Testing Data # ROC Sens Spec # 0.7864 0.6681 0.7305 ################################################################################################ # SVM Modelling ################################################################################################ svm_fit <- train(equation, data = treated_train, method='svmLinear2', metric='ROC', trControl=myControl) svm_fit svm_fit_prob <- predict(svm_fit, newdata = treated_test, type='prob')[,2] roc(treated_test$renewed, svm_fit_prob ) %>% auc confusionMatrix(ifelse(svm_fit_prob >= 0.50, 1, 0), treated_test$renewed) # Training Data # ROC Sens Spec # 0.8079858 0.9309330 0.5743828 # Testing Data # ROC Sens Spec # 0.7191 0.7694 0.6527 ################################################################################################ # NB Modelling ################################################################################################ nb_fit <- train(equation, data = treated_train, method='nb', metric='ROC', trControl=myControl) nb_fit nb_fit_prob <- predict(nb_fit, newdata = treated_test, type='prob')[,2] roc(treated_test$renewed, nb_fit_prob ) %>% auc confusionMatrix(ifelse(nb_fit_prob >= 0.50, 1, 0), treated_test$renewed) # Training Data # ROC Sens Spec # 0.7977393 0.9090973 0.5955845 # Testing Data # ROC Sens Spec # 0.755 0.7543 0.6527 ################################################################################################ # Boosted Classification Trees ################################################################################################ ada_fit <- train(equation, data = treated_train, method='ada', metric='ROC', trControl=myControl) ada_fit ada_fit_prob <- predict(ada_fit, newdata = treated_test, type='prob')[,2] roc(treated_test$renewed, ada_fit_prob) %>% auc confusionMatrix(ifelse(ada_fit_prob >= 0.50, 1, 0), treated_test$renewed) # Training Data # ROC Sens Spec # 0.8219956 0.8313704 0.6812450 # Testing Data # ROC Sens Spec # 0.7841 0.7651 0.6567 ################################################################################################ # Seeing results on training data (during resampling) to approximate performace of alogrithms ################################################################################################ # Based on a subset training data (sample bags) results = resamples(list(ada = ada_fit, nb = nb_fit, svm = svm_fit, xgb = xgb_fit, rf = rf_fit, glmnet = glmnet_fit)) summary(results) bwplot(results) dotplot(results) # dev.off() #if plot does not show ################################################################################################ # Ensemble Modelling ################################################################################################ # Adding all 6 to build an ensemble model treated_train$ada_fit_prob = predict(ada_fit, newdata = treated_train, type='prob')[2] %>% unlist() treated_train$nb_fit_prob = predict(nb_fit, newdata = treated_train, type='prob')[2] %>% unlist() treated_train$svm_fit_prob = predict(svm_fit, newdata = treated_train, type='prob')[2] %>% unlist() treated_train$rf_fit_prob = predict(rf_fit, newdata = treated_train, type='prob')[2] %>% unlist() treated_train$glmnet_fit_prob = predict(glmnet_fit, newdata = treated_train, type='prob')[2] %>% unlist() treated_train$xgb_fit_prob = predict(xgb_fit, newdata = treated_train, type='prob')[2] %>% unlist() treated_test$ada_fit_prob = predict(ada_fit, newdata = treated_test, type='prob')[2] %>% unlist() treated_test$nb_fit_prob = predict(nb_fit, newdata = treated_test, type='prob')[2] %>% unlist() treated_test$svm_fit_prob = predict(svm_fit, newdata = treated_test, type='prob')[2] %>% unlist() treated_test$rf_fit_prob = predict(rf_fit, newdata = treated_test, type='prob')[2] %>% unlist() treated_test$glmnet_fit_prob = predict(glmnet_fit, newdata = treated_test, type='prob')[2] %>% unlist() treated_test$xgb_fit_prob = predict(xgb_fit, newdata = treated_test, type='prob')[2] %>% unlist() # Scaling the probabilities just like all the other data treated_train[, 27:32] = scale(treated_train[, 27:32]) treated_test[, 27:32] = scale(treated_test[, 27:32]) equation_ensemble <- as.formula("XXXXXXXXXXXXXXXXXXXXXXXXXXXX") ################################# # GLMNET (Logistic Regression) # Reruning the machine learning # using glmnet (arbitrary) ################################# glmnet_fit_ensemble <- train(equation_ensemble, data = treated_train, method='glmnet', tuneGrid=glmnet_grid, metric = 'ROC', trControl = myControl) glmnet_fit_ensemble_prob <- predict(glmnet_fit_ensemble, newdata = treated_test , type='prob')[,2] # KEEP POSITIVE CLASS AT 0! (Did not renew, which is the default in the confusion matrix function) roc(treated_test$renewed, glmnet_fit_ensemble_prob) %>% auc confusionMatrix(ifelse(glmnet_fit_ensemble_prob >= 0.50, 1, 0), treated_test$renewed) #################################################### # XGBOOST - extreme gradient boosting (tree method) #################################################### xgb_grid <- expand.grid(nrounds = 50, #the maximum number of iterations eta = c(0.01,0.1), # shrinkage max_depth = c(2,6,10), gamma = 0, #default=0 colsample_bytree = 1, #default=1 min_child_weight = 1, #default=1 subsample = c(.05, 1)) xgb_fit_ensemble <- train(equation_ensemble, data = treated_train, method='xgbTree', tuneGrid=xgb_grid, metric = 'ROC', trControl = myControl) xgb_fit_ensemble_prob <- predict(xgb_fit_ensemble, newdata = treated_test , type='prob')[,2] # KEEP POSITIVE CLASS AT 0! (Did not renew, which is the default in the confusion matrix function) roc(treated_test$renewed, xgb_fit_ensemble_prob) %>% auc confusionMatrix(ifelse(xgb_fit_ensemble_prob >= 0.50, 1, 0), treated_test$renewed) #################################################### # Conclusion #################################################### # For single algorithm, having boosted classification trees gave the best performane ROC wise, so we can tell that tree-based # algorithms seem to work well for this problem type. # During the ensembling, there was severe overfitting on the training data, as the training accuracy went up to 99% but testing accuracy remained similar to baseline. # This suggests that the main bottleneck is not from algorithms, perhaps we need more/ better data or fine-tuning hyperparameters.
2a1dd8265d35f84307c76216c8a3c851da7c83a3
41972a07b4ff2f873dc0f786621bdf3b48279af2
/dataType/list.R
bcefbf01da8d89886b31db3aad5627787b8f1309
[]
no_license
juniuszhou/R_language
4920be49e26d6bbccd54839e27504f36b4b188c6
e4925034336b372df13b310b410c7e3d34daeef7
refs/heads/master
2021-01-18T22:58:19.408997
2016-05-12T07:36:47
2016-05-12T07:36:47
21,159,610
0
0
null
null
null
null
UTF-8
R
false
false
1,072
r
list.R
# _____________________________________________________ # To make a list "person", actually it is a map. # 矢量中的元素只能是基本数据类型,而列表中的元素可以是R中的各种对象。 # 一个矢量中的元素必须是同一类型,而一个列表中的元素可以是不同种类的对象。 # each item in the list could be different type, different length. # and each item has a name. person <- list(name="payal", x=2, y=9, year=1990) person # Accessing things inside a list -- person$name person$x person["name"] # data without name. q <- list(1,2,3) q # pairlist p <- pairlist() p$a <- 1 p$a p a <- c(1,2,3) b <- c(4,5,6) l <- list(a, b) l[1] # get internal data a <- list(1, 2, 3) a[1] a[[1]] x<- list(a = c(0, 1, 2), b = c('a', 'b', 'c')) # following both methods are the same just different format x[[c(1,2)]] x[[1]][[2]] # filter columns, seems wrong x[x$a>0] x <- list(abc = c(0, 1, 2)) # partial name match. just for prefix is unique. x$a y='a' # following doesn't work because expression behind $ can't be evaluated. x$y #
e4c9b88e89971bcc5cb9ec88c5259f08b1da451b
74550574a2c3aacffc61e6f5bb215e25f35e430b
/R/moc.R
4409d64f62ef3d65a8ca12fc335625b90125ec49
[]
no_license
cran/moc
797ebc7b51f0bdc0d6badcd2b41152221dfa47de
d9e5491d9bdf1229b033f9ac6b441e4ffe023eb9
refs/heads/master
2021-06-03T19:05:14.538630
2019-02-28T21:32:16
2019-02-28T21:32:16
17,719,038
0
0
null
null
null
null
UTF-8
R
false
false
69,367
r
moc.R
.packageName <- "moc" ## ## moc : Package to fit general multivariate finite mixtures models ## ## Copyright (C) 2000-2019 Bernard Boulerice ## ## This program is free software; you can redistribute it and/or modify ## it under the terms of the GNU General Public License version 3 as published by ## the Free Software Foundation. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, write to the Free Software ## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ## ## SYNOPSIS # # moc(y, density=NULL, joint=FALSE, groups=1, # gmu=NULL, gshape=NULL, gextra=NULL, gmixture=inv.glogit, expected = NULL, # pgmu=NULL, pgshape=NULL, pgextra=NULL, pgmix=NULL, check.length=TRUE, # scale.weight=FALSE, wt=1, data=NULL, # ndigit=10, gradtol=0.0001, steptol=gradtol, iterlim=100, print.level=1,...) # ## DESCRIPTION ## ## Function to fit general nonlinear multivariate finite mixtures model ## ## moc<- function(y, density=NULL, joint=FALSE, groups=1, gmu=NULL, gshape=NULL, gextra=NULL, gmixture=inv.glogit, expected = NULL, pgmu=NULL, pgshape=NULL, pgextra=NULL, pgmix=NULL, check.length=TRUE, scale.weight=FALSE, wt=1, data=NULL, ndigit=10, gradtol=0.0001, steptol=gradtol, iterlim=100, print.level=1,...) { call <- sys.call() if (!is.null(data)) { mocData <- list2env(eval(data)) density <- eval(call$density,mocData) if(!is.null(call$gmixture)) gmixture <- eval(call$gmixture,mocData) gmu <- eval(call$gmu,mocData) gshape <- eval(call$gshape,mocData) gextra <- eval(call$gextra,mocData) expected <- eval(call$expected,mocData) } # attach(data,pos=2);on.exit(detach(2),add=TRUE) # thisEnv <- environment() # for( i in names( data ) ) { # assign( i, data[[i]], envir = thisEnv ) # } resp<-as.matrix(eval(substitute(y))) ndim<-dim(resp) n<-ndim[1] nt<-ndim[2] ng<-groups inv.glogit<- if(ng==1) {function(gmix) {cbind(1)}} else {function(gmix) {rbind(c(1,exp(gmix)))/(1+sum(exp(gmix)))}} if(groups > 1) attr(inv.glogit,"parameters")<-paste(" G",2:groups," vs G1",sep="") ## ## check density ## if(!is.function(density)) stop("\ndensity must be a function\n") if(length(formals(density))>4) stop("\ndensity must not use more than 4 arguments\n") .density<-density ## ## count the number of parameters ## npl <- length(pgmu) nps <- length(pgshape) npext <- length(pgextra) npmix <- length(pgmix) np <- npl+nps+npmix+npext ## ## create local versions of functions and ## check that the functions return correct values ## #repn<-deparse(substitute(rep(1,n),list(n=n))) if(!is.list(gmu) & (length(gmu) != ng) & !is.null(gmu)) stop(paste("\ngmu must be a list of functions of length ",ng)) if(!is.list(gshape) & (length(gshape) != ng) & !is.null(gshape)) stop(paste("\ngshape must be a list of functions of length ",ng)) if(!is.list(gextra) & (length(gextra) != ng) & !is.null(gextra)) stop(paste("\ngextra must be a list of functions of length ",ng)) .gmu<-list() .gshape<-list() .gextra<-list() .expected<-list() for( ig in 1:ng) { ## ## check gmu ## if(length(dim(gmu[[ig]](pgmu)))!=2 | dim(gmu[[ig]](pgmu))[2]!=nt) stop(paste("\ngmu in group",ig,"must return a matrix with vectors of length nvar = ",nt)) if(any(is.na(gmu[[ig]](pgmu)))) stop(paste("\nThe gmu function returns NAs in group ",ig)) if(dim(gmu[[ig]](pgmu))[1]==1) { fcall<-paste(deparse(gmu[[ig]],control=NULL)[1],collapse="") fbody<-paste(deparse(gmu[[ig]],control=NULL)[-1],collapse="\n") ftext<-paste(c(fcall,"{matrix(",fbody,",",n,",",nt,",byrow=TRUE)}"),collapse="") .gmu[[ig]]<-eval(parse(text=ftext)) environment(.gmu[[ig]])<-environment(gmu[[ig]]) } else if(dim(gmu[[ig]](pgmu))[1]==n) .gmu[[ig]]<-gmu[[ig]] else stop(paste("\ngmu in group",ig,"should return a matrix of length 1 or ",n)) ## ## check gshape ## if(is.null(gshape) & is.null(pgshape)) .gshape[[ig]]<- function(...) 1 else { if(length(dim(gshape[[ig]](pgshape)))!=2 | dim(gshape[[ig]](pgshape))[2]!=nt) stop(paste("\ngshape in group",ig,"must return a matrix with vectors of length nvar = ",nt)) if(any(is.na(gshape[[ig]](pgshape)))) stop(paste("\nThe shape function returns NAs in group",ig)) if(dim(gshape[[ig]](pgshape))[1]==1) { fcall<-paste(deparse(gshape[[ig]],control=NULL)[1],collapse="") fbody<-paste(deparse(gshape[[ig]],control=NULL)[-1],collapse="\n") ftext<-paste(c(fcall,"{matrix(",fbody,",",n,",",nt,",byrow=TRUE)}"),collapse="") .gshape[[ig]]<-eval(parse(text=ftext)) environment(.gshape[[ig]])<-environment(gshape[[ig]]) } else if(dim(gshape[[ig]](pgshape))[1]==n ) .gshape[[ig]]<-gshape[[ig]] else stop("\ngshape in group",ig,"should return a matrix of length 1 or",n) } ## ## check gextra ## if(is.null(gextra) & is.null(pgextra)) .gextra[[ig]]<- function(...) 1 else { if(length(dim(gextra[[ig]](pgextra)))!=2 | (((lgext<-dim(gextra[[ig]](pgextra))[2])!=nt)&check.length)) stop(paste("\ngextra in group",ig,"must return a matrix with vectors of length nvar =",nt)) if(any(is.na(gextra[[ig]](pgextra)))) stop(paste("\nThe extra parameters function returns NAs in group",ig)) if(dim(gextra[[ig]](pgextra))[1]==1) { fcall<-paste(deparse(gextra[[ig]],control=NULL)[1],collapse="") fbody<-paste(deparse(gextra[[ig]],control=NULL)[-1],collapse="\n") ftext<-paste(c(fcall,"{matrix(",fbody,",",n,",",ifelse(check.length,nt,lgext), ",byrow=TRUE)}"),collapse="") .gextra[[ig]]<-eval(parse(text=ftext)) environment(.gextra[[ig]])<-environment(gextra[[ig]]) } else if(dim(gextra[[ig]](pgextra))[1]==n ) .gextra[[ig]]<-gextra[[ig]] else stop(paste("\ngextra in group",ig,"should return a matrix of length 1 or",n)) } if (is.null(expected)) { environment(.gmu[[ig]])<-globalenv() .expected[[ig]]<- eval(expression(function(p) {gmu[[k]](p[1:npl])}), list(k=ig,gmu=.gmu,npl=npl)) } else { ptot <- c(pgmu,pgshape,pgextra,pgmix) ## ## check expected ## if(length(dim(expected[[ig]](ptot)))!=2 | dim(expected[[ig]](ptot))[2]!=nt) stop(paste("\nexpected in group",ig,"must return a matrix with vectors of length nvar =",nt)) if(any(is.na(expected[[ig]](ptot)))) stop(paste("\tThe expected function returns NAs in group",ig)) if(dim(expected[[ig]](ptot))[1]==1){ fcall<-paste(deparse(expected[[ig]],control=NULL)[1],collapse="") fbody<-paste(deparse(expected[[ig]],control=NULL)[-1],collapse="\n") ftext<-paste(c(fcall,"{matrix(",fbody,",",n,",",nt,",byrow=TRUE)}"),collapse="") .expected[[ig]]<-eval(parse(text=ftext)) environment(.expected[[ig]])<-environment(expected[[ig]]) } else if(dim(expected[[ig]](ptot))[1]==n) .expected[[ig]]=expected[[ig]] else stop(paste("\nexpected for group",ig,"should return a matrix of length 1 or",n)) environment(.expected[[ig]])<-globalenv() } } ## ## check the returned values of the mixture function ## if(is.null(gmixture) & is.null(pgmix)) { .gmixture<-function(...) {1} } else { if(dim(gmixture(pgmix))[2]!=ng) stop(paste("\ngmixture must return a matrix with vectors of length groups=",ng)) if(any(is.na(gmixture(pgmix)))) stop("\nThe mixture function returns NAs") if(any(gmixture(pgmix)<0) | any(abs(apply(gmixture(pgmix),1,sum)-1)>.Machine$double.eps^0.5)) warning("\nThe mixture function components probabilities must be >=0 and sum to 1") if(dim(gmixture(pgmix))[1]==1 ) { fcall<-paste(deparse(gmixture,control=NULL)[1],collapse="") fbody<-paste(deparse(gmixture,control=NULL)[-1],collapse="\n") ftext<-paste(c(fcall,"{matrix(",fbody,",",n,",",ng,",byrow=TRUE)}"),collapse="") .gmixture<-eval(parse(text=ftext)) environment(.gmixture)<-environment(gmixture) } else if(dim(gmixture(pgmix))[1]==n) .gmixture<-gmixture else stop(paste("\ngmixture should return a matrix of length 1 or",n)) } ## ## check and scale weights if necessary ## wt<-as.vector(wt) if((length(wt)==1) & (wt==1)) wt <- rep(1,n) if(length(wt)!=n) stop("\nwt must be the same length as the other variables") if(min(wt)<0) stop("\nAll weights must be non-negative") if(any(is.na(wt))) stop("\n weights contain NA\n") if((sum(wt)!=n) & scale.weight) { warning("\nThe weights have been rescaled to sum to the the sample size ",n,"\n") wt<-wt/mean(wt) } ## ## define the likelihood functions ## ## New Code naresp <- which(is.na(resp)) jointlike <- if(joint) { function(mdens) {mdens} } else { function(mdens) { mdens[naresp] <- 1 .C("cjointlike", as.double(mdens), as.integer(dim(mdens)[1]), as.integer(dim(mdens)[2]), jll = double(dim(mdens)[1]),NAOK=TRUE,PACKAGE="moc")$jll # Cjointlike( as.double(mdens), as.integer(dim(mdens)[1]),as.integer(dim(mdens)[2]), # jll = double(dim(mdens)[1]))$jll } } loglike <- function(p) { parm <- split(p, rep(c("mu", "shape", "extra", "mix"), c(npl, nps, npext, npmix))) dens <- sapply(1:ng, function(ind) jointlike(.density(resp, .gmu[[ind]](parm$mu), .gshape[[ind]](parm$shape), .gextra[[ind]](parm$extra)))) .C("mixloglike", as.double(dens), as.double(.gmixture(parm$mix)), as.double(wt), as.integer(n), as.integer(ng), mll = double(1),NAOK=TRUE,PACKAGE="moc")$mll # Cmixloglike( as.double(dens), as.double(.gmixture(parm$mix)), # as.double(wt), as.integer(n), as.integer(ng), # mll = double(1))$mll } ## ## ## check that the likelihood returns an appropriate value and minimize ## p<-c(pgmu,pgshape,pgextra,pgmix) if(is.na(loglike(p))) stop("\nLikelihood returns NAs: probably invalid initial values") if(np>0){ dt<-system.time(z0 <- nlm(loglike,p=p,hessian=TRUE, ndigit=ndigit,gradtol=gradtol,steptol=steptol, iterlim=iterlim,print.level=print.level,...))[3]} else {dt<-system.time(z0 <- list(minimum=loglike(p),estimate=p,code=0,iterations=0))[3]} cat("\n Estimation took ",dt," seconds.\n") ## ## compute cov and se's ## if(np==0)cov <- NULL else if(np==1)cov <- 1/z0$hessian else { a <- if(any(is.na(z0$hessian)) | any(abs(z0$hessian)==Inf)) 0 else qr(z0$hessian)$rank if(a==np)cov <- solve(z0$hessian) else cov <- matrix(NA,ncol=np,nrow=np) } se <- sqrt(diag(cov)) ## ## compute mixture and posterior probabilities ## parm<-split(z0$estimate,rep(c("mu","shape","extra","mix"),c(npl,nps,npext,npmix))) pmix<-.gmixture(parm$mix) if(joint){ post1<- .C("mixpost", as.double(sapply(1:ng,function(ind) .density( resp,.gmu[[ind]](parm$mu),.gshape[[ind]](parm$shape), .gextra[[ind]](parm$extra)))), as.double(pmix), as.double(wt), as.integer(n), as.integer(ng),post=double(n*ng), PACKAGE="moc")$post } else { post1<-.C("mixpost", as.double(sapply(1:ng,function(ind) jointlike( .density(resp,.gmu[[ind]](parm$mu),.gshape[[ind]](parm$shape), .gextra[[ind]](parm$extra))))), as.double(pmix), as.double(wt), as.integer(n), as.integer(ng),post=double(n*ng), PACKAGE="moc")$post } dim(post1) <- c(n,ng) dimnames(post1)<-list(NULL,paste("Group",1:ng,sep="")) ## ## compute posterior prob., fitted and observed values ## mpost<-apply(post1*wt,2,mean) fitted.mean<-matrix(t(sapply(1:ng,function(ind) apply(.expected[[ind]](z0$estimate)*post1[,ind]*wt,2,mean,na.rm=TRUE)))/mpost,ng,nt) mpost<-sapply(1:ng,function(ind) apply(post1[,ind]*wt*ifelse(is.na(resp),NA,1),2,mean,na.rm=TRUE)) observed.mean<-matrix(t(sapply(1:ng,function(ind) apply(post1[,ind]*wt*resp,2,mean,na.rm=TRUE))/mpost),ng,nt) dimnames(fitted.mean)<-list(paste("Group",1:ng,sep=""), ifelse(is.na(nchar(temp<-dimnames(resp)[[2]])[1:nt]) , paste("V",1:nt,sep=""),temp)) dimnames(observed.mean)<-list(paste("Group",1:ng,sep=""), ifelse(is.na(nchar(temp<-dimnames(resp)[[2]])[1:nt]) , paste("V",1:nt,sep=""),temp)) ## ## clean functions environment and ## makes the weights callable ## environment(.density)<-globalenv() for(ig in 1:ng) { environment(.gmu[[ig]])<-globalenv() environment(.gshape[[ig]])<-globalenv() environment(.gextra[[ig]])<-globalenv() } environment(.gmixture)<-globalenv() attributes(.gmu)<-attributes(gmu) attributes(.gshape)<-attributes(gshape) attributes(.gextra)<-attributes(gextra) attributes(.gmixture)<-attributes(gmixture) attributes(.density)<-attributes(density) attributes(.expected)<-attributes(expected) gname<-paste("Group",1:ng,sep="") names(.gmu)<-gname names(.gshape)<-gname names(.gextra)<-gname names(.expected)<-gname wt<-match.call()$wt if(is.null(wt)) {wt<-call("rep",1,n)} else { if(scale.weight) {wt<-substitute(as.vector(a)/mean(as.vector(a)),list(a=wt)) } else wt<-substitute(as.vector(a),list(a=wt)) } if(any(.gmixture(parm$mix)<0) | any(abs(apply(rbind(.gmixture(parm$mix)),1,sum)-1)>.Machine$double.eps^0.5)) warning("\nThe final mixture probablities are not all >=0 or don't sum to 1.\n") ## ## return a list of class moc ## moc.out <- list( call=call, data=match.call()$data, resp=substitute(as.matrix(txt),list(txt=match.call()$y)), density=.density, joint=joint, nsubject=n, nvar=nt, nobs=sum(!is.na(resp)), groups=ng, npar=c(npl,nps,npext,npmix), gmu=.gmu, gshape=.gshape, gextra=.gextra, gmixture=.gmixture, expected = .expected, prior.weights=wt, post.prob=post1, loglikelihood=-z0$minimum, df=sum(eval(wt))*nt-np, AIC=2*z0$minimum+2*np, BIC=2*z0$minimum+np*log(sum(eval(wt))*nt), coefficients=z0$estimate, cov=cov, hessian=z0$hessian, fitted.mean=fitted.mean, observed.mean=observed.mean, iterations=z0$iterations, code=z0$code, execution.time=dt) class(moc.out) <- "moc" return(moc.out) } update.moc <- function(object,groups=1:object$groups,parm=object$coef,what=NULL,evaluate=FALSE,...) { if(!inherits(object,"moc")) stop("\n\tObject must be of class moc\n") moc.args <- c("y", "density", "joint", "groups", "gmu", "gshape", "gextra", "gmixture", "expected", "pgmu", "pgshape", "pgextra", "pgmix", "check.length", "scale.weight", "wt", "data", "ndigit", "gradtol", "steptol", "iterlim", "print.level") ocall <- as.list(object$call) names(ocall)[2] <- "y" ng <- length(groups) ocall$groups <- ng arg.list <- as.list(match.call(expand.dots=FALSE))$... valid.arg <- match(names(arg.list),moc.args) if(length(valid.arg)) if(any(is.na(valid.arg))) stop(paste("\tInvalid moc arguments\n",names(arg.list)[is.na(valid.arg)])) pnames <- c("mu","shape","extra","mix") if(!all(groups %in% 1:object$groups)) stop(paste("\n\tgroups should be a vector of valid group numbers for",paste(substitute(object)),"\n")) oldparm <- split(object$coefficients,rep(pnames,object$npar)) objpnam <- pnames[which(object$npar>0)] if(!is.list(parm)) { parm <- split(parm,rep(pnames,object$npar)) parm <- replace(list(mu=NULL,shape=NULL,extra=NULL,mix=NULL),names(parm),parm) } if((length(unlist(parm)) != sum(object$npar)) | !all(pnames %in% names(parm))) { stop("\n\tparm should be a named list (mu,shape,extra,mix) or vector of corect length\n")} if(!all(object$npar == sapply(parm,length))) stop("\n\tYou supplied the wrong number of parameters\n") if(is.null(what)){ ocall$pgmu <- parm$mu ocall$pgshape <- parm$shape ocall$pgextra <- parm$extra ocall$pgmix <- parm$mix for(ln in names(arg.list)) ocall[[ln]] <- arg.list[[ln]] if(evaluate) return(eval(as.call(ocall))) else return(as.call(ocall)) } if(!is.list(what)) { what <- split(what,rep(pnames,object$npar)) what <- replace(list(mu=NULL,shape=NULL,extra=NULL,mix=NULL),names(what),what) } if((length(unlist(what)) != sum(object$npar)) | !all(pnames %in% names(what))) { stop("\n\twhat should be a named list (mu,shape,extra,mix) or vector of correct length\n")} if(!all(object$npar == sapply(what,length))) stop("\n\tYou supplied the wrong number of constraints in what\n") update.fun <- function(fun,parm,oldparm,what,funlist=TRUE) { what <- replace(what,what==0,NA) pfree <- which(is.na(what)) if(length(pfree)) { npfree <- 1:length(pfree) freena <- which(is.na(parm) & is.na(what)) parm[freena] <- oldparm[freena] } else npfree <- NULL wnegpos <- replace(what,what<0,NA) peq <- which(!is.na(wnegpos)) if(length(peq)){ npeq <- unclass(factor(wnegpos,exclude=NA))[peq]+length(npfree) unieq <- sapply(split(parm[peq],npeq),function(x) length(unique(x))) if(any(unieq>1)) stop("\n\tSome free parameters with equality constraints have different values") if(any(is.na(parm[peq]))) stop("\n\tYou are trying to put equality constraint on 'NA' values\n") peq.first <- sapply(split(peq,npeq),function(x) x[1]) } else { npeq <- NULL unieq <- NULL peq.first <- NULL } wnegpos <- replace(what,what>0,NA) pfix <- which(!is.na(wnegpos)) if(length(pfix)) { npfix <- unclass(factor(wnegpos,exclude=NA))[pfix] unifix <- sapply(split(parm[pfix],npfix),function(x) length(unique(x))) if(any(is.na(parm[pfix]))) stop("\n\tYou are trying to fix some parameters with 'NA' value") fixna <- which(is.na(parm) & !is.na(wnegpos)) parm[fixna] <- oldparm[fixna] } else { npfix <- NULL unifix <- NULL } startval <- parm[c(pfree,peq.first)] oparstr <- character(length(c(pfree,peq))) oparstr[pfix] <- paste(parm[pfix]) oparstr[c(pfree,peq)] <- paste("np[",c(npfree,npeq),"]",sep="") if(funlist) { newfun <- as.pairlist(lapply(groups,function(gr) eval(parse(text=paste("function(np){",deparse(fun,width.cutoff=500,control=NULL), "[[",gr,"]](c(",paste(oparstr,collapse=","),"))}")),parent.frame(2)))) names(newfun) <- paste("Group",groups,sep="") } else { newfun <- eval(parse(text=paste("function(np){",deparse(fun,width.cutoff=500,control=NULL), "(c(",paste(oparstr,collapse=","),"))}")),parent.frame(2)) } attributes(newfun) <- attributes(eval(fun,parent.frame(2))) if(!is.null(attr(newfun,"parameters"))) attr(newfun,"parameters") <- tapply(attr(newfun,"parameters")[c(pfree,peq)],c(npfree,npeq),paste,collapse=",") list(newfun=newfun,startval=startval,fixval=parm[pfix],newpar=c(npfree,npeq),oldpar=c(pfree,peq)) } newpar <- alist(mu=NULL,shape=NULL,extra=NULL,mix=NULL) for(pn in pnames[-4]) { if(!is.null(tmp <- ocall[[paste("g",pn,sep="")]])) { newfun <- update.fun(tmp,parm[[pn]],oldparm[[pn]],what[[pn]]) ## For some reason statements like # assign(paste("ocall$g",pn,sep=""),newfun$newfun) # if(!is.null(ocall[[paste("pg",pn,sep="")]])) assign(paste("ocall$pg",pn,sep=""), newfun$startval) ## don't do the assignment and statement like # ocall[[paste("g",pn,sep="")]] <- newfun$newfun ## works only part time because it does strong type checking and don't force the assignment ## with incompatible types. ## So, we have to do it the long way ! switch(pn, mu={ocall$gmu <- newfun$newfun if(!is.null(ocall[["pgmu"]])) ocall$pgmu <- newfun$startval}, shape={ocall$gshape <- newfun$newfun if(!is.null(ocall[["pgshape"]])) ocall$pgshape <- newfun$startval}, extra={ocall$gextra <- newfun$newfun if(!is.null(ocall[["pgextra"]])) ocall$pgextra <- newfun$startval}) newpar[[pn]] <- newfun$newpar }} if(length(groups)==1) { ocall$pgmix <- NULL ocall$gmixture <- NULL } else { if(!is.null(ocall$gmixture)) { newgmixture <- update.fun(ocall$gmixture,parm$mix,oldparm$mix,what$mix,funlist=FALSE) ocall$gmixture <- newgmixture$newfun ocall$pgmix <- newgmixture$startval newpar$mix <- newgmixture$newpar } else { newgmixture <- update.fun(as.name("inv.glogit"),parm$mix,oldparm$mix,what$mix,funlist=FALSE) ocall$gmixture <- newgmixture$newfun ocall$pgmix <- newgmixture$startval newpar$mix <- newgmixture$newpar }} if(!is.null(xnam <- ocall$expected)) { xwhat <- numeric(0) for(pn in pnames){ tmp <- what[[pn]] if(any(lz <- (tmp<0))) {tmp[lz] <- tmp[lz]+min(xwhat[xwhat<0],0)} if(any(gz <- (tmp>=0))) {tmp[gz] <- (newpar[[pn]]+max(xwhat[xwhat>0],0))*(tmp[gz]!=0)} xwhat <- c(xwhat,tmp)} newexpected <- update.fun(xnam,c(unlist(parm)),c(unlist(oldparm)),xwhat) ocall$expected <- newexpected$newfun } for(ln in names(arg.list)) ocall[[ln]] <- arg.list[[ln]] if(!is.null(eval(object$data))) { mocData <- list2env(eval(object$data)) environment(object$density) <- mocData environment(object$gmixture) <- mocData for (i in 1:object$groups) { environment(object$gmu[[i]]) <- mocData environment(object$gshape[[i]]) <- mocData environment(object$gextra[[i]]) <- mocData environment(object$expected[[i]]) <- mocData } # attach(eval(object$data),name=deparse(object$data,control=NULL),pos=2) # on.exit(detach(2),add=TRUE) } if(evaluate) eval(as.call(ocall),parent.frame()) else as.call(ocall) } post<-function(object,...) UseMethod("post") post.moc<-function(object, ...) { if(!inherits(object,"moc")) stop("\n\tObject must be of class moc\n") if(!is.null(object$post.prob)) {return(invisible(object$post.prob))} else { if(!is.null(eval(object$data))) { mocData <- list2env(eval(object$data)) environment(object$density) <- mocData environment(object$gmixture) <- mocData for (i in 1:object$groups) { environment(object$gmu[[i]]) <- mocData environment(object$gshape[[i]]) <- mocData environment(object$gextra[[i]]) <- mocData environment(object$expected[[i]]) <- mocData } # attach(eval(object$data),name=deparse(object$data,control=NULL),pos=2) # on.exit(detach(2),add=TRUE) } parm<-split(object$coefficients,rep(c("mu","shape","extra","mix"),object$npar)) mix<-object$gmixture(parm$mix) if(object$groups==1) cbind(1) else { if(object$joint) { post1<-sapply(1:object$groups,function(ind) object$density(as.matrix(eval(object$resp)),object$gmu[[ind]](parm$mu), object$gshape[[ind]](parm$shape),object$gextra[[ind]](parm$extra)))*mix } else {post1<-sapply(1:object$groups,function(ind) apply(object$density(as.matrix(eval(object$resp)),object$gmu[[ind]](parm$mu), object$gshape[[ind]](parm$shape),object$gextra[[ind]](parm$extra)), 1,prod,na.rm=TRUE))*mix } dimnames(post1)<-list(NULL,paste("Group",1:object$groups,sep="")) post1 <- post1/apply(post1,1,sum) obj.name <- deparse(match.call()$object) object$post.prob <- post1 # eval(substitute(assign(obj.name,object,pos=sys.frame()))) attr(post1,"moc.name") <- obj.name invisible(post1) } } } fitted.moc<-function(object,...) { if(!inherits(object,"moc")) stop("\n\tObject must be of class moc\n") if(!is.null(eval(object$data))){ mocData <- list2env(eval(object$data)) environment(object$density) <- mocData environment(object$gmixture) <- mocData for (i in 1:object$groups) { environment(object$gmu[[i]]) <- mocData environment(object$gshape[[i]]) <- mocData environment(object$gextra[[i]]) <- mocData environment(object$expected[[i]]) <- mocData } # attach(eval(object$data),name=deparse(object$data,control=NULL),pos=2); # on.exit(detach(2),add=TRUE) } n<-object$nsubject dim1<-c(n,object$nvar,object$groups) fit<-array(NA,dim=dim1) for(ig in 1:object$groups) fit[,,ig]<-object$expected[[ig]](object$coefficients) dimnames(fit)<-list(NULL, ifelse(is.na(nchar(temp<-dimnames(eval(object$resp))[[2]])[1:dim1[2]]), paste("V",1:dim1[2],sep=""),temp),paste("Group",1:dim1[3],sep="")) attr(fit,"moc.name") <- deparse(match.call()$object,control=NULL) invisible(fit) } residuals.moc<-function(object,...,type=c("deviance","response","mixture","gradient"),post.weight=TRUE,within=FALSE) { thiscall <- sys.call() if(!inherits(object,"moc")) stop("\n\tObject must be of class moc\n") if(!is.null(eval(object$data))){ mocData <- list2env(eval(object$data)) environment(object$density) <- mocData environment(object$gmixture) <- mocData for (i in 1:object$groups) { environment(object$gmu[[i]]) <- mocData environment(object$gshape[[i]]) <- mocData environment(object$gextra[[i]]) <- mocData environment(object$expected[[i]]) <- mocData } # # attach(eval(object$data),name=deparse(object$data,control=NULL),pos=2) # on.exit(detach(2),add=TRUE) } choices<-c("deviance","response","mixture","gradient") type<-match.arg(type,choices) n<-object$nsubject nt<-object$nvar ng<-object$groups dim1<-c(n,nt,ng) parm<-split(object$coefficients,rep(c("mu","shape","extra","mix"),object$npar)) wts<-eval(object$prior.weights) wpost<-post.moc(object) if (within) wpost<-t(t(wpost)/apply(wpost,2,mean)) y<-as.matrix(eval(object$resp)) res<-array(y,dim=dim1)-fitted(object) if(type=="mixture"){ ## Compute the empirical distribution res <- npmle.gradient(object,gradient=FALSE,average=FALSE) res <- array(res,append(dim(res),1,1)) dimnames(res) <- list(NULL,NULL,"mixture") post.weight <- FALSE } else if(type=="gradient") { res <- npmle.gradient(object,gradient=TRUE,average=FALSE) res <- array(res,append(dim(res),1,1)) dimnames(res) <- list(NULL,NULL,paste("Group",1:dim1[3],sep="")) post.weight <- FALSE } else { for(ig in 1:object$groups) { m<-object$gmu[[ig]](parm$mu) s<-object$gshape[[ig]](parm$shape) extra<-object$gextra[[ig]](parm$extra) switch(type, response= res[,,ig]<-res[,,ig], deviance= res[,,ig]<- sqrt(2*wts*(log(object$density(y,y,s,extra)/object$density(y,m,s,extra))))* sign(res[,,ig]) ) } dimnames(res)<-list(NULL, ifelse(is.na(nchar(temp<-dimnames(eval(object$resp))[[2]])[1:dim1[2]]), paste("V",1:dim1[2],sep=""),temp),paste("Group",1:dim1[3],sep="")) } if(post.weight) res<-res*array(wpost[,rep(1:ng,rep(nt,ng))],dim=dim1) class(res)<-c("residuals.moc","residuals") attr(res,"type")<-type attr(res,"post.weight")<-post.weight attr(res,"within")<-within attr(res,"moc.name") <- deparse(match.call()$object,control=NULL) invisible(res) } print.moc<-function(x,digits=5,expand=TRUE,transpose=FALSE,...) { object<-x if(!inherits(object,"moc")) stop("\n\tObject must be of class moc\n") if(!is.null(eval(object$data))){ mocData <- list2env(eval(object$data)) environment(object$density) <- mocData environment(object$gmixture) <- mocData for (i in 1:object$groups) { environment(object$gmu[[i]]) <- mocData environment(object$gshape[[i]]) <- mocData environment(object$gextra[[i]]) <- mocData environment(object$expected[[i]]) <- mocData } # attach(eval(object$data),name=deparse(object$data,control=NULL),pos=2) # on.exit(detach(2),add=TRUE) } cat("\n\t\t",object$groups,"mixtures MOC model\n\n\n") cat("Response: ",deparse(object$resp,control=NULL),"\n\n") if(object$joint) cat("joint ") cat("Density: ") cat(deparse(object$call$density,control=NULL),"\n") if(expand) print(object$density,...) if(!is.null(object$call$gmu)) { cat("\nLocation: ") cat(deparse(object$call$gmu,control=NULL),"( pgmu = ",deparse(object$call$pgmu,control=NULL)," )\n") if(expand) print.listof(object$gmu,... ) } if(!is.null(object$call$expected)) { cat("\nExpectation: ") cat(deparse(object$call$expected,control=NULL),"\n") if(expand) print.listof(object$expected,... ) } if(!is.null(object$call$gshape)) { cat("\nShape: ") cat(deparse(object$call$gshape,control=NULL),"( pgshape = ",deparse(object$call$pgshape,control=NULL)," )\n") if(expand) print.listof(object$gshape,...) } if(!is.null(object$call$gextra)) { cat("\nExtra parameter: ") cat(deparse(object$call$gextra,control=NULL),"( pgextra = ",deparse(object$call$pgextra,control=NULL)," )\n") if(expand) print.listof(object$gextra,...) } cat("\nMixture: ") if(!is.null(object$call$gmixture)) { cat(deparse(object$call$gmixture,control=NULL),"( pgmix = ",deparse(object$call$pgmix,control=NULL)," )\n") } else { cat("inv.glogit( pgmix = ",deparse(object$call$pgmix,control=NULL)," )\n") } if(expand) print(object$gmixture,...) cat("\n\n\t\t\tMaximum Likelihood Estimates\n\n") cat(formatC(c("",""," Standard","Wald Chi-Sq:"),width=13),"\n") cat(formatC(c("Parameter ","Estimate"," Error"," Param = 0"," Prob > Wald"),width=13),"\n") object.coef<-split(object$coef,rep(c("mu","shape","extra","mix"),object$npar)) object.se<-split(sqrt(diag(object$cov)),rep(c("mu","shape","extra","mix"),object$npar)) nl<-object$npar[1];ns<-object$npar[2];nextra<-object$npar[3];nm<-object$npar[4] if(nl>0) { cat("\nLocation:\n\n") param<-attr(object$gmu,"parameters") if (length(param) != nl) param<-" " coeftable<-matrix(cbind(est<-object.coef$mu,se<-object.se$mu,w<-(est/se)^2, (1-pchisq(w,1))),nl,4) coeftable<-formatC(matrix(apply(coeftable,2,format,digits=digits,width=13),nl,4),width=13) cat(paste(formatC(param,width=-13),apply(coeftable,1,paste,collapse=" "),collapse="\n"),"\n") object$Location<-coeftable } if(ns>0) { cat("\nShape:\n\n") param<-attr(object$gshape,"parameters") if (length(param) != ns) param<-" " coeftable<-matrix(cbind(est<-object.coef$shape,se<-object.se$shape, w<-(est/se)^2,(1-pchisq(w,1))),ns,4) coeftable<-formatC(matrix(apply(coeftable,2,format,digits=digits,width=13),ns,4),width=13) cat(paste(formatC(param,width=-13),apply(coeftable,1,paste,collapse=" "),collapse="\n"),"\n") object$Shape<-coeftable } if(nextra>0) { cat("\nExtra Parameters:\n\n") param<-attr(object$gextra,"parameters") if (length(param) != nextra) param<-" " coeftable<-matrix(cbind(est<-object.coef$extra, se<-object.se$extra,w<-(est/se)^2, (1-pchisq(w,1))),nextra,4) coeftable<-formatC(matrix(apply(coeftable,2,format,digits=digits,width=13),nextra,4),width=13) cat(paste(formatC(param,width=-13),apply(coeftable,1,paste,collapse=" "),collapse="\n"),"\n") object$Extra<-coeftable } if(nm>0) { cat("\nMixture:\n\n") param<-attr(object$gmixture,"parameters") if (length(param) != nm) param<-" " coeftable<-matrix(cbind(est<-object.coef$mix, se<-object.se$mix, w<-(est/se)^2,(1-pchisq(w,1))),nm,4) coeftable<-formatC(matrix(apply(coeftable,2,format,digits=digits,width=13),nm,4),width=13) cat(paste(formatC(param,width=-13),apply(coeftable,1,paste,collapse=" "),collapse="\n"),"\n") object$Mixture<-coeftable cat("\nMean Mixture Probabilities:\n") mmix<-apply(object$gmixture(est),2,weighted.mean,eval(object$prior.weights),na.rm=TRUE) names(mmix)<-paste("Group",1:object$groups,sep="") print(mmix,digits=5) object$MixtProb<-mmix } modelfit<-cbind("-2*logLikelihood"=-2*object$loglik, AIC=object$AIC,AIC(object,k="BIC")[-1]) dimnames(modelfit)[[1]]<-" " cat("\n") print(modelfit) object$ModelFit<-modelfit coeftable<-apply(post.moc(object),2,weighted.mean,eval(object$prior.weights),na.rm=TRUE) object$PostMixtProb<-coeftable if(object$code>2) {cat("\n\nWARNING - CONVERGENCE NOT ACHIEVED - ERROR",object$code,"\n\n\n")} if(object$groups>1) { cat("\n\nMean Posterior Mixture Probabilities:\n") print(coeftable,digits=5)} cat("\n") cat("\nPosterior mean predicted values:\n") if(transpose) tmp <- t(object$fitted.mean) else tmp <- object$fitted.mean print(tmp,digits=digits) cat("\nPosterior mean observed values:\n") if(transpose) tmp <- t(object$observed.mean) else tmp <- object$observed.mean print(tmp,digits=digits) cat("\n") invisible(object) } npmle.gradient <- function(object,parm=object$coef,gradient=TRUE,average=FALSE) { new.parm <- parm if(!inherits(object,"moc")) stop("\n\tObject must be of class moc\n") if(length(new.parm)!=sum(object$npar)) stop(paste("\n\tYou should supply",sum(object$npar),"parameter values in parm\n")) if(!is.null(eval(object$data))) { mocData <- list2env(eval(object$data)) environment(object$density) <- mocData environment(object$gmixture) <- mocData for (i in 1:object$groups) { environment(object$gmu[[i]]) <- mocData environment(object$gshape[[i]]) <- mocData environment(object$gextra[[i]]) <- mocData environment(object$expected[[i]]) <- mocData } # attach(eval(object$data),name=deparse(object$data,control=NULL),pos=2) # on.exit(detach(2),add=TRUE) } parm <- split(new.parm,rep(c("mu","shape","extra","mix"),object$npar)) pmix <- object$gmixture(parm$mix) dens <- sapply(1:object$groups, function(ind) apply(as.matrix(object$density(eval(object$resp), object$gmu[[ind]](parm$mu), object$gshape[[ind]](parm$shape), object$gextra[[ind]](parm$extra))), 1, prod, na.rm = TRUE)) mix.dens <- apply(dens*pmix,1,sum) if(!gradient){ ## Compute the empirical distribution tmp <- as.matrix(apply(format(eval(object$resp),digits=20),1,paste,collapse="")) tmp.ind <- by(1:dim(tmp)[1],factor(tmp),I) dens.emp <- matrix(NA,dim(tmp)[1]) remove("tmp") wts <- eval(object$prior.weights) for(i in 1:length(tmp.ind)) dens.emp[tmp.ind[[i]]] <- sum(wts[tmp.ind[[i]]])/sum(wts) val <-as.matrix(dens.emp/mix.dens-1) dimnames(val) <- list(NULL,"empirical") } else { val <- as.matrix(dens/mix.dens-1) dimnames(val) <- list(NULL,paste("Group",1:object$groups,sep="")) } if(average) val <- apply(val,2,mean,na.rm=TRUE) val } confint.moc <- function(object,parm=list(),level=0.95,profiling=c("none","simple","complete"),...) { opt.args <- as.list(match.call()) estimate <- parm l <- length(estimate) if(l==0) { estimate <- lapply(paste("~p",1:sum(object$npar),sep=""),as.formula) l <- sum(object$npar) } profiling <- match.arg(profiling,c("none","simple","complete")) grad <- lapply(estimate,function(expr) deriv(expr,paste("p",1:sum(object$npar),sep=""))) parm <- as.list(structure(object$coef,names=paste("p",1:sum(object$npar),sep=""))) val.grad <- lapply(grad,function(gr) with(parm,eval(gr))) val <- unlist(val.grad) grad <- structure(unlist(lapply(val.grad,attr,"gradient")),dim=c(sum(object$npar),l)) cova <- as.matrix(t(grad)%*%object$cov%*%grad) table.univ <- data.frame(estimate=val,st.dev=sqrt(diag(cova)),"Lower.ACI"=val+qnorm(0.5-level/2)*sqrt(diag(cova)), "Upper.ACI"=val+qnorm(0.5+level/2)*sqrt(diag(cova)),row.names= sapply(estimate,deparse,control=NULL,width.cutoff=500)) if(length(estimate)>1){ cov.jointcond <- sapply(1:length(diag(cova)),function(i) cova[i,i]-cova[i,-i]%*%solve(cova[-i,-i])%*%cova[-i,i]) table.joint <- data.frame(estimate=val,st.dev=sqrt( cov.jointcond),"Lower.ACI"=val+qnorm(0.5-level/2)*sqrt( cov.jointcond), "Upper.ACI"=val+qnorm(0.5+level/2)*sqrt( cov.jointcond),row.names=sapply(estimate,deparse,control=NULL,width.cutoff=500)) } else {table.joint <- "NOT APPLICABLE"} if(profiling=="simple" | profiling=="complete") { offscal <- eval(opt.args[[match("offscal",names(opt.args))]]) if (is.null(offscal)) { offscal <- c(-5,-4,seq(-3,3,0.5),4,5)} pparm <- t(object$coef + sqrt(c(diag(object$cov)))%o%offscal) dimnames(pparm) <- list(NULL, paste("p",1:sum(object$npar),sep="")) devilike <- list() profci <- matrix(NA,sum(object$npar),3) profci[,1] <- object$coef dimnames(profci) <- list(paste("p",1:sum(object$npar),sep=""),c("estimate","Lower.CI","Upper.CI")) if(profiling == "simple"){ df <- 1 ## A better approximation probably requires adjustment of this llkfun <- function(object,p,fix) { llk <- loglike.moc(object,parm=p,evaluate=FALSE)[,2] c(llk+2*object$loglike,p)} } else if(profiling == "complete"){ df <- 1 ## Look forward for adjustment of this one too iterlim <- eval(opt.args[[match("iterlim",names(opt.args))]]) if (is.null(iterlim)) {iterlim <- 15} llkfun <- function(object,p,fix) { llk <- try(update.moc(object,parm=p,what=fix, evaluate=TRUE,iterlim = iterlim,print.level=0)) pmiss <- which(fix!=0) llk <- c(llk$loglikelihood,append(llk$coefficients,p[pmiss],pmiss-1)) c(-2*llk[1]+2*object$loglikelihood,llk[-1]) } cat("\n Wait for complete profiling to be done: be aware, this can take a long time!\n") mocmessf <- textConnection("mocmess","w",local=TRUE) sink(mocmessf) on.exit({sink();close(mocmessf)},add=TRUE) } for(i in 1:sum(object$npar)) { devilike[[paste("p",i,sep="")]] <- t(sapply(pparm[,i],function(vp) llkfun(object,replace(object$coef,i,vp),replace(rep(0,length(object$coef)),i,-1)) )) dimnames(devilike[[i]]) <- list(NULL,c("deviance",paste("p",1:sum(object$npar),sep=""))) tmp <- approx(devilike[[i]][,i+1],pchisq(devilike[[i]][,1],df),n=75,ties="ordered",rule=2) profci[i,2] <- approx(c(1,tmp$y[tmp$x<=object$coef[i]]), c(-Inf,tmp$x[tmp$x<=object$coef[i]]), xout=level,ties="mean", rule=1,yleft=-Inf,yright=Inf)$y profci[i,3] <- approx(c(tmp$y[tmp$x>=object$coef[i]],1), c(tmp$x[tmp$x>=object$coef[i]],Inf),xout=level,ties="mean", rule=1,yleft=-Inf,yright=Inf)$y } } else {profci <- "NOT REQUESTED"; devilike <- "NOT REQUESTED"} ellip <- function(p) {(val-p)%*%solve(cova)%*%(val-p)} ellip.env <- new.env() assign("val",val,envir=ellip.env) assign("cova",cova,envir=ellip.env) environment(ellip) <- ellip.env structure(list(devilike,ellip,table.univ, table.joint,profci), names=c(paste(profiling,"likelihood profiling"), "ellip", paste("Univariate",100*level,"% CI"),paste("Joint conditional",100*level,"% CI"), paste(profiling,"likelihood rejection",100*level,"% CI")), moc.names=paste(substitute(object))) } AIC.moc <- function(object,...,k=2) { mlist<- list(object,...) if(!all(sapply(mlist,inherits,"moc"))) stop("\n\tAll objects must be of class moc\n") ml<-length(mlist) cnames<-as.character(match.call()[-1]) names(mlist)<- cnames[1:ml] nobslist<-sapply(mlist,function(tmp) tmp$nobs) nglist<-sapply(mlist,function(tmp) tmp$groups) if((k==0) & ((max(nglist)!=min(nglist)) | (max(nobslist)!=min(nobslist)))) warning("log Likelihood should only be used to compare nested models on the same observations with the same number of mixture.\nTry using AIC or BIC or ICL-BIC instead.\n") else {if((k!="BIC") & (max(nobslist)!=min(nobslist))) warning("AIC like statistics should not be used to compare models with differing number of observations, use BIC or ICL-BIC instead.\n")} if(k=="BIC") val<-as.data.frame(t(sapply(mlist,function(tmp){ #attach(eval(tmp$data),name=deparse(tmp$data,control=NULL),pos=2) #on.exit(detach(2),add=TRUE) bic<-tmp$BIC po<-post.moc(tmp); entropy<--sum(ifelse(po==0,0,eval(tmp$prior.weight)*po*log(po))) c(-2*tmp$loglikelihood,bic,entropy,bic+2*entropy,tmp$df)}))) else val<-as.data.frame(t(sapply(mlist,function(tmp) c(-2*tmp$loglikelihood+k*sum(tmp$npar),tmp$df)))) names(val)<-c(switch(as.character(k),"0"="-2*logLik","2"=c("AIC"), BIC=c("-2*logLik","BIC","Entropy","ICL-BIC"), "generalized AIC"),"Df") row.names(val)<-cnames[1:ml] val } entropy <- function(object,...) UseMethod("entropy") entropy.default <- function(object,...) { obj2 <- as.matrix(object) if(!all((obj2 >= 0) & (obj2 <= 1)) | any(abs(apply(obj2,1,sum)-1) > .Machine$double.eps^0.5)) stop("The probabilities must lies in [0,1] ans sum to 1 ") en <- apply(obj2,1,function(pro) -sum(ifelse(pro==0,0,pro*log(pro)))) en <- cbind(en,en/log(dim(obj2)[2])) dimnames(en) <- list(dimnames(obj2)[[1]],c("entropy","std.entropy")) return(en) } entropy.moc <- function(object,...) { mlist<- list(object,...) if(!all(sapply(mlist,inherits,"moc"))) stop("\n\tAll objects must be of class moc\n") ml<-length(mlist) cnames<-as.character(match.call()[-1]) names(mlist)<- cnames[1:ml] val<-as.data.frame(t(sapply(mlist,function(tmp){ if(!is.null(eval(tmp$data))) { mocData <- list2env(eval(tmp$data)) environment(tmp$density) <- mocData environment(tmp$gmixture) <- mocData for (i in 1:tmp$groups) { environment(tmp$gmu[[i]]) <- mocData environment(tmp$gshape[[i]]) <- mocData environment(tmp$gextra[[i]]) <- mocData environment(tmp$expected[[i]]) <- mocData } } # attach(eval(tmp$data),name=deparse(tmp$data,control=NULL),pos=2) # on.exit(detach(2),add=TRUE) parm<-split(tmp$coef,rep(c("mu","shape","extra","mix"),tmp$npar)) pri <- tmp$gmixture(parm$mix) po<-post.moc(tmp) pri.entropy<--sum(ifelse(pri==0,0,eval(tmp$prior.weight)*pri*log(pri))) post.entropy<--sum(ifelse(po==0,0,eval(tmp$prior.weight)*po*log(po))) c(tmp$groups,pri.entropy,post.entropy, pri.entropy/log(tmp$groups)/sum(eval(tmp$prior.weight)),post.entropy/log(tmp$groups)/sum(eval(tmp$prior.weight)), 1-post.entropy/pri.entropy)}))) names(val)<-c("Groups","Total Prior Entropy","Total Posterior Entropy", "Mean Prior Standardized Entropy","Mean Posterior Standardized Entropy", "% Reduction") row.names(val)<-cnames[1:ml] val } logLik.moc <- function(object,...) { if(!inherits(object,"moc")) stop("\n\tObject must be of class moc\n") val <- object$loglikelihood attr(val,"df") <- object$df attr(val,"nobs")<-object$nvar*object$nsubject class(val) <- "logLik" attr(val,"moc.name") <- deparse(match.call()$object,control=NULL) val } loglike.moc <- function(object,parm=object$coef, evaluate=FALSE) { new.parm <- parm if(!inherits(object,"moc")) stop("\n\tObject must be of class moc.\n") if(length(new.parm)!=sum(object$npar)) stop(paste("\n\tYou should supply",sum(object$npar),"parameter values in parm\n")) parm<-split(new.parm,rep(c("mu","shape","extra","mix"),object$npar)) if(!is.null(eval(object$data))){ mocData <- list2env(eval(object$data)) environment(object$density) <- mocData environment(object$gmixture) <- mocData for (i in 1:object$groups) { environment(object$gmu[[i]]) <- mocData environment(object$gshape[[i]]) <- mocData environment(object$gextra[[i]]) <- mocData environment(object$expected[[i]]) <- mocData } # attach(eval(object$data),name=deparse(object$data,control=NULL),pos=2) # on.exit(detach(2),add=TRUE) } pmix <- object$gmixture(parm$mix) dens <- sapply(1:object$groups, function(ind) apply(as.matrix(object$density(eval(object$resp), object$gmu[[ind]](parm$mu), object$gshape[[ind]](parm$shape), object$gextra[[ind]](parm$extra))), 1, prod, na.rm = TRUE)) loglike <- .C("mixloglike", as.double(dens), as.double(pmix), as.double(eval(object$prior.weights)), as.integer(object$nsubject), as.integer(object$groups), mll = double(1),NAOK=TRUE,PACKAGE="moc")$mll loglike <- matrix(c(-2*object$loglikelihood,2*loglike),nrow=1,ncol=2,dimnames=list("-2*loglike",c("original","new.parm"))) new.moc <- NULL if(evaluate) { new.call <- object$call new.call$pgmu <- parm$mu new.call$pgshape <- parm$shape new.call$pgextra <- parm$extra new.call$pgmix <- parm$mix new.call$print.level <- 0 new.moc <- eval(new.call,envir=parent.frame()) } val <- cbind(loglike,"new.estimate"=-2*new.moc$loglikelihood) attr(val,"moc.name") <- deparse(match.call()$object,control=NULL) attr(val,"parameters") <- list("original"=object$coef,"new.parm"=new.parm,"new.estimate"=new.moc$coef) val } obsfit.moc<-function(object,along=list(cons=rep(1,object$nsubject)),FUN=function(x) x) { if(!inherits(object,"moc")) stop("\n\tObject must be of class moc\n") if(!is.null(eval(object$data))){ mocData <- list2env(eval(object$data)) environment(object$density) <- mocData environment(object$gmixture) <- mocData for (i in 1:object$groups) { environment(object$gmu[[i]]) <- mocData environment(object$gshape[[i]]) <- mocData environment(object$gextra[[i]]) <- mocData environment(object$expected[[i]]) <- mocData } # attach(eval(object$data),name=deparse(object$data,control=NULL),pos=2) # on.exit(detach(2),add=TRUE) } along.name <- substitute(along) if(!is.list(along)) eval(parse(text=paste("along <-list(",substitute(along),"=along)"))) n<-object$nsubject nt<-object$nvar ng<-object$groups parm<-split(object$coef,rep(c("mu","shape","extra","mix"),object$npar)) wts<-eval(object$prior.weight) post.obj<-post.moc(object)*wts mpost<-by(post.obj,along,sapply,mean,na.rm=TRUE) wts<-by(wts,along,mean) tmp <- object$gmixture(parm$mix) names(tmp) <- paste("Group",1:object$groups,sep="") dimnames(tmp) <- list(NULL,paste("Group",1:object$groups,sep="")) gmix.mean <-by(tmp,along,sapply,mean) tmp<-matrix(NA,n,nt*ng) for (ig in 1:object$groups) tmp[,(1:nt)+(ig-1)*nt]<-FUN(object$expected[[ig]](object$coef)) fitted.mean<-by(tmp*array(apply(post.obj,2,rep,nt),c(n,nt*ng)), along,sapply,mean,na.rm=TRUE) mpost.fitted<-by(ifelse(is.na(tmp),NA,1)*array(apply(post.obj,2,rep,nt),c(n,nt*ng)), along,sapply,mean,na.rm=TRUE) tmp<-FUN(array(eval(object$resp),c(n,nt*ng))) observed.mean<-by(tmp*array(apply(post.obj,2,rep,nt),c(n,nt*ng)),along,sapply,mean,na.rm=TRUE) mpost.observed<-by(ifelse(is.na(tmp),NA,1)*array(apply(post.obj,2,rep,nt),c(n,nt*ng)), along,sapply,mean,na.rm=TRUE) nlist<-dim(mpost) fitmean <- list() obsmean <- list() for (i in 1:prod(nlist)) { if(!is.null(fitted.mean[[i]])){ fitmean[[i]] <- matrix(t(array(fitted.mean[[i]]/mpost.fitted[[i]],c(nt,ng))),ng,nt, dimnames=list(paste("Group",1:ng,sep=""), ifelse(is.na(nchar(temp<-dimnames(eval(object$resp))[[2]])[1:nt]) , paste("V",1:nt,sep=""),temp))) } if(!is.null(observed.mean[[i]])) { obsmean[[i]] <- matrix(t(array(observed.mean[[i]]/mpost.observed[[i]],c(nt,ng))),ng,nt, dimnames=list(paste("Group",1:ng,sep=""), ifelse(is.na(nchar(temp<-dimnames(eval(object$resp))[[2]])[1:nt]) , paste("V",1:nt,sep=""),temp))) } gmix.mean[[i]]<-gmix.mean[[i]]/wts[[i]] } attributes(fitmean) <- attributes(fitted.mean) attributes(obsmean) <- attributes(observed.mean) val<-list("Mean Prior Probabilities"=gmix.mean, "Mean function Expected Values"=fitmean, "Mean function Observed Values"=obsmean, "Mean Posterior Probabilities"=mpost) structure(val,moc.name=deparse(match.call()$object,control=NULL),FUN=substitute(FUN),along=deparse(along.name,control=NULL)) } plot.moc<-function(x,against=1:x$nvar,main="",xlab="",ylab="",prob.legend=TRUE,scale=FALSE,group.colors=rainbow(x$groups),...) { if(main == "") main <- deparse(match.call()$x) if(!inherits(x,"moc")) stop("Not an object of class moc") if(!is.null(eval(x$data))){ mocData <- list2env(eval(x$data)) environment(x$density) <- mocData environment(x$gmixture) <- mocData for (i in 1:x$groups) { environment(x$gmu[[i]]) <- mocData environment(x$gshape[[i]]) <- mocData environment(x$gextra[[i]]) <- mocData environment(x$expected[[i]]) <- mocData } # attach(eval(x$data),name=deparse(x$data,control=NULL),pos=2) # on.exit(detach(2),add=TRUE) } if(prob.legend) { oldpar<-par("oma"=c(0,0,0,8),"las"=1) legend.text<-paste("Group",1:x$groups,"=", formatC(apply(x$gmixture(x$coef[(sum(x$npar[1:3])+1):sum(x$npar[1:4])]), 2,mean),digits=4)) on.exit(par(oldpar),add=TRUE) } if(dim(as.matrix(against))[2]==1) {w<-cbind(against)} else {w<-cbind(against,against)} if(scale) { center<-apply(eval(x$resp),2,function(v) weighted.mean(v,eval(x$prior.weights),na.rm=TRUE)) scale<-sqrt(apply(eval(x$resp),2,function(v) mean(eval(x$prior.weights)*v[nav<-!is.na(v)]^2)/ mean(eval(x$prior.weights)[nav]))-center^2) } else {center<-rep(0,x$nvar);scale<-rep(1,x$nvar)} matplot(w,cbind((t(x$observed.mean)-center)/scale,(t(x$fitted.mean)-center)/scale), type="n",main=main,xlab=xlab,ylab=ylab,col=group.colors,...) matpoints(against,(t(x$observed.mean)-center)/scale,col=group.colors,...) matlines(against,(t(x$fitted.mean)-center)/scale,type="o",pch=20,cex=.75,col=group.colors,...) mocname <- deparse(match.call()$x) if(prob.legend) { mtext(legend.text,side=4,outer=TRUE,at=((1:x$groups)+3)/(x$groups+6),col=group.colors) } invisible(list(moc.name = mocname,against=against,fitted=x$fitted,observed=x$observed,center=center,scale=scale,graphic.par=par())) } density.moc <- function(x,var=NULL,along=NULL,plot=c("none","pp-plot","density","pq-plot"),type="l",...) { object <- x if(is.null(var)) stop("argument var should be supplied, there is no default!") if(!all(var%in%(1:object$nvar))) stop(paste("var should be given as an integer list between 1 and nvar =",object$nvar)) if(is.null(along)) { along <- factor(rep(TRUE,object$nsubject)) subtitle <- NULL } else { subtitle <- paste(",",deparse(substitute(along),width.cutoff=20,control=NULL),"=",collapse=" ") along <- factor(along) } plot <- match.arg(paste(plot),c("none","pp-plot","density","pq-plot")) if(!is.null(eval(object$data))) { mocData <- list2env(eval(object$data)) environment(object$density) <- mocData environment(object$gmixture) <- mocData for (i in 1:object$groups) { environment(object$gmu[[i]]) <- mocData environment(object$gshape[[i]]) <- mocData environment(object$gextra[[i]]) <- mocData environment(object$expected[[i]]) <- mocData } # attach(eval(object$data),name=deparse(object$data,control=NULL),pos=2) # on.exit(detach(2),add=TRUE) } parm<-split(object$coefficients,rep(c("mu","shape","extra","mix"),object$npar)) mix <- object$gmixture(parm$mix) y <- array(NA,dim(eval(object$resp))) y[,var] <- eval(object$resp)[,var] dens.pred <- apply(sapply(1:object$groups,function(ind) apply(as.matrix(object$density(y,object$gmu[[ind]](parm$mu), object$gshape[[ind]](parm$shape),object$gextra[[ind]](parm$extra))),1,prod,na.rm=TRUE))*mix,1,sum) vname <- dimnames(eval(object$resp))[[2]][var] if(is.null(vname)) vname <- c(paste("V",var,sep="")) remove("y") val <- data.frame(dens.pred,eval(object$resp)[, var]) names(val) <- c("density",vname) if(plot %in% c("density","pp-plot","pq-plot")){ npcol <- nlevels(along)-("FALSE" %in% levels(along)) oldpar<-par(mfrow=c(length(var),npcol));on.exit(par(oldpar),add=TRUE) for(i in 1:length(var)) { for(lname in levels(along)[which(levels(along)!="FALSE")]) { select <- which(along==lname) ind <- order(val[select,i+1],val[select,1],na.last=NA) tmp <- val[select,c(i+1,1)][ind,] if(nlevels(along)<=1) subname <- NULL else subname <- lname if(plot == "density") plot(tmp,type=type,xlab=paste(vname[i],paste(subtitle,subname)),ylab="density",...) if(plot %in% c("pp-plot","pq-plot")) { tmp2 <- ecdf(tmp[,1]) tmp <- approx(tmp[,1],tmp[,2],rule=2,method="linear",yleft=0,yright=0, ties="ordered",n=max(75,length(val[,1]))) if(plot=="pp-plot"){ plot(tmp2(tmp$x),cumsum(tmp$y)/sum(tmp$y), type=type,xlab="Empirical CDF",ylab="Predicted CDF",xlim=c(0,1),ylim=c(0,1), sub=paste(vname[i],paste(subtitle,subname)),...) lines(c(0,1),c(0,1)) } else { plot(tmp$x,cumsum(tmp$y)/sum(tmp$y), type=type,xlab=paste(vname[i],paste(subtitle,subname)), ylab="Predicted CDF",ylim=c(0,1),...) lines( tmp$x,tmp2(tmp$x),type="s",lty=2) }} }}} else {val} } profiles.postCI <- function(object,data=NULL,level=0.95,interpolate=TRUE) { if(!inherits(object,"moc")) stop("\n\tNot an object of class moc\n") CI <- level if(length(CI)==1) CI <- c((1-CI)/2,(1+CI)/2) if(!all((CI>=0) & (CI<=1))) stop("\nlevel should lie in [0,1]\n") if(is.null(data)) data <- eval(object$resp) if(dim(as.matrix(data))[1] != object$nsubject) stop(paste("\ndata should have",object$nsubject,"subjects\n")) data <- data.frame(data) vname <- dimnames(data)[[2]] mpost <- post.moc(object)*eval(object$prior.weights) ordind <- sapply(data,order) mpost.na <- matrix(sapply(data,function(x) apply(as.matrix(na.omit(data.frame(x,mpost))[,-1]),2,sum)),object$group,dim(data)[2]) if(interpolate) method="linear" else method="constant" val <- lapply(1:object$groups,function(gr) sapply(1:dim(ordind)[2],function(colid) approx( cumsum(mpost[ordind[,colid],gr])/mpost.na[gr,colid],data[ordind[,colid],colid], ties="ordered",f=0,method=method,rule=2,xout=CI)$y) ) names(val) <- paste("Group",1:object$groups,sep="") for(i in 1:length(val)) { dimnames(val[[i]]) <- list(c(paste("Lower_",CI[1],sep=""),paste("Upper_",CI[2],sep="")),vname) val[[i]][1,is.na(val[[i]][1,])] <- -Inf val[[i]][2,is.na(val[[i]][2,])] <- Inf } attr(val,"CI") <- CI attr(val,"moc.name") <- paste(substitute(object)) val } profilesplot<-function(x,...) UseMethod("profilesplot") profilesplot.moc<-function(x,against=1:x$nvar,main=NULL,xlab="",ylab="",col.legend=TRUE,scale=FALSE,group.colors=rainbow(x$groups),type="subject",...) { if(!inherits(x,"moc")) stop("Not an object of class moc") if(!is.null(eval(x$data))) { mocData <- list2env(eval(x$data)) environment(x$density) <- mocData environment(x$gmixture) <- mocData for (i in 1:x$groups) { environment(x$gmu[[i]]) <- mocData environment(x$gshape[[i]]) <- mocData environment(x$gextra[[i]]) <- mocData environment(x$expected[[i]]) <- mocData } # attach(eval(x$data),name=deparse(x$data,control=NULL),pos=2) # on.exit(detach(2),add=TRUE) } type <- match.arg(type,c("subject","variable","posterior")) if (is.null(main)) main <- paste(paste(x$resp,collapse=" "),type,"profiles") group.colors<-col2rgb(group.colors)/256 if(col.legend) { oldpar<-par("oma"=c(0,0,0,6),"las"=1);on.exit(par(oldpar),add=TRUE) legend.text<-paste("Group",1:x$groups) } if(scale) { center<-apply(eval(x$resp),2,mean,na.rm=TRUE) scale<-apply(eval(x$resp),2,sd,na.rm=TRUE) } else {center<-rep(0,x$nvar);scale<-rep(1,x$nvar)} group.rgb<-t(apply(post.moc(x),1,function(y) (group.colors%*%y))) if(type=="subject") { matplot((against),(t(eval(x$resp))-center)/scale,type="o",pch=20,cex=0.75, main=main,xlab=xlab,ylab=ylab,col=rgb(group.rgb[,1],group.rgb[,2],group.rgb[,3])) } else { if(type=="variable") pairs(eval(x$resp),col=rgb(group.rgb[,1],group.rgb[,2],group.rgb[,3]),upper.panel=NULL,main=main) else if(type=="posterior") pairs(post.moc(x),col=rgb(group.rgb[,1],group.rgb[,2],group.rgb[,3]),upper.panel=NULL,main=main) } if(col.legend) { mtext(legend.text,side=4,outer=TRUE,at=((1:x$groups)+3)/(x$groups+6), col=rgb(group.colors[1,],group.colors[2,],group.colors[3,])) } mocname <- deparse(match.call()$x) invisible(list("moc.name" = mocname,against=substitute(against),col.legend=col.legend,scale=scale,group.colors=group.colors, type=type,graphic.par=par())) } entropyplot <- function(x,...) UseMethod("entropyplot") entropyplot.moc <- function(x,main=NULL,std=TRUE,lwd=1.5,col=c("red3","green3","gray95"),shade.gr.col=gray(1-0.5*(0:(x$groups-1))/(x$groups-1)),legend=TRUE,...) { if(!inherits(x,"moc")) stop("Not an object of class moc") if(x$groups==1) stop("Sorry, but there is no entropy for a single group!\n") if(!is.null(eval(x$data))) { mocData <- list2env(eval(x$data)) environment(x$density) <- mocData environment(x$gmixture) <- mocData for (i in 1:x$groups) { environment(x$gmu[[i]]) <- mocData environment(x$gshape[[i]]) <- mocData environment(x$gextra[[i]]) <- mocData environment(x$expected[[i]]) <- mocData } # attach(eval(x$data),name=deparse(x$data,control=NULL),pos=2) # on.exit(detach(2),add=TRUE) } parm<-split(x$coef,rep(c("mu","shape","extra","mix"),x$npar)) prior.entropy <- entropy(x$gmixture(parm$mix)) posterior.entropy <- entropy(post.moc(x)) max.ent <- ifelse(std,1,log(x$groups)) if(std) {col.ind <- 2} else { col.ind <- 1} order.pripost <- order(prior.entropy[,col.ind],posterior.entropy[,col.ind]) n <- length(order.pripost) moc.name <- deparse(match.call()$x) if (is.null(main)) main <- paste("Prior and posterior",ifelse(std,"standardized",""),"entropy for",moc.name) plot(c(0,1),c(0,max.ent),xaxt="n",xlim=c(0,1),ylim=c(0,max.ent),type="n",main=main,xlab="",ylab="") if(!is.null(shade.gr.col)) { shade.gr.col<- mix.colors.moc(x,shade.gr.col) } else { shade.gr.col <- rep(col[3],n) } for(i in 0:(n-1)) polygon(cbind((i:(i + 1))/(n - 1), ((i + 1):i)/(n - 1)), cbind(prior.entropy[order.pripost[(i+1):(i+2)], col.ind], posterior.entropy[order.pripost[(i+2):(i+1)], col.ind]), col=shade.gr.col[order.pripost[(i+1)]],border=NA) lines((0:(n-1))/(n-1),prior.entropy[order.pripost,col.ind],col=col[1],lwd=lwd) lines(((n-1):0)/(n-1),posterior.entropy[order.pripost[n:1],col.ind],col=col[2],lwd=lwd) if(legend) legend(1,0,c("Prior","Posterior"),lwd=lwd,col=col[1:2],ncol=2,xjust=1,bty="n") } plot.residuals.moc<-function(x,against="Index",groups=1:dim(x)[3],sunflower=FALSE,group.colors=NULL,...) { if(sunflower) thisplot<-sunflowerplot else thisplot<-plot if(!inherits(x,"residuals.moc")) stop("Not an object of class residuals.moc !") if (!all(groups %in% (1:dim(x)[3]))) stop("You requested residuals for non-existing groups !") if(is.null(group.colors)) group.colors=rainbow(eval(as.name(attr(x,"moc.name")))$groups) group.rgb <- mix.colors.moc(eval(as.name((attr(x,"moc.name")))),group.colors=group.colors) dim1<-dim(x) again<-substitute(against) oldpar<-par(mfrow=c(length(groups),1),oma=c(0,0,2,0));on.exit(par(oldpar),add=TRUE) vname<-deparse(substitute(against),control=NULL) if(again=="Subject") against<-c(rep(1:dim1[1],dim1[2])) if(again=="Observation") against<-factor(c(rep(1:dim1[2],rep(dim1[1],dim1[2])))) if(again=="Index") against<-1:(dim1[1]*dim1[2]) tmp <- function(i) { z<-na.omit(data.frame(group.rgb,c(against),c(x[,,i]))) tmp.rgb <- as.vector(z[,1]) z <- z[,-1] dimnames(z)<-list(dimnames(z)[[1]],c(vname,attr(x,"type"))) thisplot(z,main=ifelse(attr(x,"type")=="mixture","All groups",paste("Group",i)),col=tmp.rgb,...) if(again=="Index") abline(v=(1:dim1[2])*dim1[1]+0.5) } res.apply<-sapply(groups,tmp) mtext(paste("MOC ",attr(x,"type")," residuals of",attr(x,"moc.name")),side=3,outer=TRUE,font=2) mocname <- deparse(match.call()$x,control = NULL) attr(res.apply,"moc.name") <- mocname invisible(list(moc.name = mocname ,against=deparse(again,control=NULL),groups=deparse(substitute(groups),control=NULL), sunflower=sunflower,group.colors=group.colors,graphic.par=par())) } coef.moc <- function(object,split=FALSE,...) { if(split) return(split(object$coef,rep(c("mu","shape","extra","mixture"),object$npar))) else return(object$coef) } ## Generalized logit and inverse logit with respect to a reference group inv.glogit<-function(gmix,ref=1) {rbind(append(exp(gmix),1,ref-1))/(1+sum(exp(gmix)))} glogit<-function(p,ref=1) {log(p[-ref]/p[ref])} ## Mix group colors according to posterior mixture probabilities mix.colors.moc<-function(object,group.colors=rainbow(object$groups)) { if(!inherits(object,"moc")) stop("Not an object of class moc") if(length(group.colors)!=object$groups) stop("Oooups ! Wrong number of colors: should be the same as number of groups.") group.rgb<-t(apply(post(object),1,function(y) ((col2rgb(group.colors)/256)%*%y))) invisible(rgb(group.rgb[,1],group.rgb[,2],group.rgb[,3])) } .onAttach <- function(libname,pkgname) { utils.path <- system.file("Utils",package=pkgname) packageStartupMessage("\n",paste(readLines(system.file("DESCRIPTION",package=pkgname)),collapse="\n"), "\n",appendLF = TRUE) packageStartupMessage(" Supplementary utility functions can be found in directory:\n\t", utils.path,": { ", paste(dir(utils.path, pattern="*.R$"),collapse=", ")," }\n") packageStartupMessage(" Extended examples can be found in ",system.file("Examples",package=pkgname)) packageStartupMessage("\n Directory ",system.file("Sweave",package=pkgname), " contains files\n\t which help using Sweave to make reports of moc models.\n") packageStartupMessage(" A GUI is also available in ", system.file("GUI","moc-gui.R",package=pkgname)) packageStartupMessage("\n\n You must source those files to use their functionalities.") packageStartupMessage("\n See the specific files for documentations.\n\n") } mocUtils <- function(filename) { if(missing(filename)) return(dir(system.file("Utils",package="moc"),pattern="*.[Rr]$")) name <-paste(filename,sep="") if((file <- system.file("Utils",filename,package="moc")) != ""){ assign(name,new.env()) source(file,local=eval(parse(text=name))) # attach(eval(parse(text=name)),name=name) cat("\n",name," returned!\n") eval(parse(text=name)) } cat("\n Did not returned anything ?\n") } .onUnload <- function(libpath) library.dynam.unload("moc", libpath)
47a215d5fc8222b7c726a9b5fba1b93223feab4d
d746fef241f9a0e06ae48cc3b1fe72693c43d808
/ark_87287/d7jc7h/d7jc7h-013/rotated.r
70d0fcb7cbd95373869c270511d1f9e4aa44da19
[ "MIT" ]
permissive
ucd-library/wine-price-extraction
5abed5054a6e7704dcb401d728c1be2f53e05d78
c346e48b5cda8377335b66e4a1f57c013aa06f1f
refs/heads/master
2021-07-06T18:24:48.311848
2020-10-07T01:58:32
2020-10-07T01:58:32
144,317,559
5
0
null
2019-10-11T18:34:32
2018-08-10T18:00:02
JavaScript
UTF-8
R
false
false
195
r
rotated.r
r=0.21 https://sandbox.dams.library.ucdavis.edu/fcrepo/rest/collection/sherry-lehmann/catalogs/d7jc7h/media/images/d7jc7h-013/svc:tesseract/full/full/0.21/default.jpg Accept:application/hocr+xml
7ed3f95070be28178a4b1dceabc82cf3cc80093f
7e4fd49824b4ef4922fb024a227dd704ed78dfb2
/cachematrix.R
844f73c579d206c2895dbfcfa112ae37580cd721
[]
no_license
shprite/coursera
37769906b67486e62fd6eb964141913738066301
f79bc8c8fe74ced5f988dc766a02efd9b857df71
refs/heads/master
2021-05-12T10:11:38.198669
2018-01-13T15:27:06
2018-01-13T15:27:06
117,348,903
0
0
null
null
null
null
UTF-8
R
false
false
1,142
r
cachematrix.R
## Put comments here that give an overall description of what your ## functions do ## Write a short comment describing this function #this function initialises the matrix and the variable to store the inverse of the matrix - inv makeCacheMatrix <- function(x = matrix()) { inv <- NULL set <- function(y) { x <<- y inv <<- NULL } get <- function() x setinv <- function(solve) inv <<- solve getinv <- function() inv list(set = set, get = get, setinv = setinv, getinv = getinv) } ## Write a short comment describing this function # this function calculates the inverse of the matrix using the inbuilt solve function in r. first it looks to see whether the inverse has been calculated and stored already. if it hasn't been calculated, it then calculates the inverse of the matrix and then stores is it in the cache where it can be retrieved from cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' inv <- x$getinv() if(!is.null(inv)) { message("getting cached data") return(inv) } data<-x$get() inv <- solve(data, ...) x$setinv(inv) inv }
e530a6f63aa84e54dfae3440398b09b0d6fa3850
d0a994376c57bf849f2229b6afdc3b6c515e30d5
/pollutantmean.R
086ee3101e90c68aaec86b0933a512790aa31a4b
[]
no_license
vttoonses/R-Week-2
19490c2b49903147adc634f3f977679c3df00af8
45c3e0212ce3580cea1b9e4e0e5f025cd7713726
refs/heads/master
2021-08-31T13:37:54.607859
2017-12-21T14:23:02
2017-12-21T14:23:02
114,809,592
0
0
null
null
null
null
UTF-8
R
false
false
1,305
r
pollutantmean.R
pollutantmean <- function(directory, pollutant, id = 1:332) { # Validate the function parameters if (length(directory) != 1) stop("A single directory must be specified for the test data.") if (!dir.exists(directory)) stop(paste("The specified test data directory", directory, "does not exist.")) if (length(pollutant) != 1) stop("A single pollutant type of \"sulfate\" or \"nitrate\" must be specified.") if (!(pollutant %in% c("sulfate", "nitrate"))) stop(paste("Unknown pollutant:", pollutant, ". Please specify either \"sulfate\" or \"nitrate\".")) idLength <- length(id) if (idLength < 1 || idLength > 332 || any(!(id %in% 1:332)) || anyDuplicated(id) > 0) stop("The id vector may only contain values between 1 and 332 inclusive with no repeats.") # Get the data # Read a single file's data readFileData <- function (id) { fileData <- read.csv(sprintf("%03d.csv", id)) targetData <- fileData[pollutant] targetData[!(is.na(targetData))] } # Change to the data directory to prevent excessive string concatenations originalDirectory = getwd() setwd(directory) testData <- unlist(sapply(id, readFileData)) # Return to the original directory setwd(originalDirectory) # Return the mean of all that data mean(testData) }
394afbc0e217fb55198edf6d9ea84a1c8beb4f06
7c17d45e324d26d6ac1eb42aed7c1a7db3b0238d
/z_r_proj/02_ajusta_modelo.R
af732600134f7537adcf27c417a5fb0a7446bb11
[]
no_license
mhnk77/dpa-project
4e267df5649f44c41795eb19e041709669b1a96e
a2cc2b4488f002de0e206726b2f633964c2d6efb
refs/heads/main
2023-05-14T03:57:45.993203
2021-05-24T15:51:48
2021-05-24T15:51:48
null
0
0
null
null
null
null
UTF-8
R
false
false
1,057
r
02_ajusta_modelo.R
# library(stringr) library(tidymodels) dta <- readRDS('z_r_proj/dta_clean.rds') %>% drop_na() rcp <- recipe(results ~ ., data = dta) %>% update_role(inspection_id, inspection_date, new_role = 'ID') %>% # update_role(violations, inspection_date, new_role = 'trash') %>% step_date(inspection_date, features = c('dow', 'month')) %>% step_rm(inspection_date) %>% step_other(facility_type, threshold = 0.0002) %>% step_other(inspection_type, threshold = 0.0002) %>% step_string2factor(facility_type, zip, inspection_type, results) %>% step_string2factor(risk, ordered = T) rf_mod <- rand_forest(mode = 'classification', trees = 1000) %>% set_engine('ranger', importance = 'impurity', num.threads = 8) wflow <- workflow() %>% add_model(rf_mod) %>% add_recipe(rcp) mod_fit <- wflow %>% fit(data = dta) mod_fit %>% saveRDS('z_r_proj/mod_fit_factor.rds') fit_obj <- pull_workflow_fit(mod_fit)$fit fit_obj$variable.importance %>% tidy() %>% arrange(-x) %>% mutate(imp_rel = x/sum(x)) fit_obj$predictions
aeb1cfcbe4094de03e3b788875a2b40ca1fb06be
86920ac8061f5b29b86733ec332469bf83761202
/scripts/03_combine_kelp_temp_waves.R
db8b8a90f903fa6d042f62a441eb3b9cda04401e
[]
no_license
jebyrnes/temp_kelp_change
64b5c54dd58213d51b356017e2512867ef156e5e
b8891d25038ffab02410e5afd4b81d0f0c4d4cea
refs/heads/master
2021-10-11T14:11:31.283204
2021-10-04T19:53:09
2021-10-04T19:53:09
51,555,573
0
0
null
2021-10-04T19:53:10
2016-02-11T23:36:10
R
UTF-8
R
false
false
5,723
r
03_combine_kelp_temp_waves.R
library(dplyr) library(readr) library(tidyr) library(readxl) filename <- "wave_data_reguero/timeseries_v2.xls" dat_year_filter <- . %>% filter(Year >= 1952 & Year <= 2008) #first data point in kelp, last da #### 1) Load the different pieces of the wave timeseries unique_lat_long_tab <- read_excel(filename, sheet=2) #updated nov 2nd #unique_lat_long_tab <- read_excel("wave_data_reguero/timeseries_new_points.xls", sheet=2) #unique_lat_long_tab <- read_excel("wave_data_reguero/timeseries.xls", sheet=2) #unique_lat_long_tab2 <- read_excel("wave_data_reguero/timeseries.xls", sheet=2) #unique_lat_long_tab #lat_long <- unique(paste("X", unique_lat_long_tab$LonGOW, unique_lat_long_tab$LatGOW, sep="_")) #Unique coordinates in the data gow_coords <- read_excel(filename, sheet=3) gow_coords_names <- paste("X", gow_coords$GOWLon, gow_coords$GOWLat, sep="_") ## Most of the data uses the same format, so, here's a function to parse it parse_wave_data <- function(sheet, month=FALSE){ wave_col_name <- c("Year", "Month", gow_coords_names) if(!month) wave_col_name <- wave_col_name[-2] wd <- read_excel(filename, sheet=sheet, col_names=wave_col_name) if(month){ #forgive this hack - but something odd was happening with tidyr #hopefully it will be unnecessary once my issue on github #is resolved nameFrame <- tibble(gow_coords = names(wd)[-c(1:2)], newName = paste("X", 1:(ncol(wd)-2), sep="_")) names(wd) <- c("Year", "Month", nameFrame$newName) wd <- wd %>% gather(newName, measurement, -Year, -Month) %>% left_join(nameFrame) %>% dplyr::select(-newName) # wd <- wd %>% gather(gow_coords, measurement, -Year, -Month) }else{ nameFrame <- tibble(gow_coords = names(wd)[-1], newName = paste("X", 1:(ncol(wd)-1), sep="_")) names(wd) <- c("Year", nameFrame$newName) wd <- gather(wd, newName, measurement, -Year) %>% left_join(nameFrame) %>% dplyr::select(-newName) # wd <- wd %>% gather(gow_coords, measurement, -Year) } wd %>% mutate(gow_coords = gsub("X_", "", gow_coords)) %>% separate(gow_coords, c("GOWLon", "GOWLat"), "_") %>% mutate(GOWLon = as.numeric(GOWLon), GOWLat = as.numeric(GOWLat)) } annual_mean_wave_energy <- parse_wave_data(sheet = 4) %>% dplyr::rename(mean_wave_energy = measurement) annual_mean_wave_height <- parse_wave_data(sheet = 5) %>% dplyr::rename(mean_wave_height = measurement) annual_q95_wave_height <- parse_wave_data(sheet = 6) %>% dplyr::rename(q95_wave_height = measurement) monthly_mean_wave_energy <- parse_wave_data(sheet = 7, month=TRUE) monthly_sum_wave_energy <- parse_wave_data(sheet = 8, month=TRUE) annual_max_wave_energy <- monthly_mean_wave_energy %>% group_by(Year, GOWLon, GOWLat) %>% dplyr::summarise(max_mean_wave_energy = max(measurement, na.rm=T), sd_mean_wave_energy = sd(measurement, na.rm=T)) sum_wave_energy <- monthly_sum_wave_energy %>% group_by(Year, GOWLon, GOWLat) %>% dplyr::summarise(max_sum_wave_energy = max(measurement, na.rm=T), total_wave_energy = sum(measurement, na.rm=T), sd_sum_wave_energy = sd(measurement, na.rm=T)) monthly_max_wave_height <- parse_wave_data(sheet = 9, month=TRUE) annual_max_wave_height <- monthly_max_wave_height %>% group_by(Year, GOWLon, GOWLat) %>% dplyr::summarise(max_wave_height = max(measurement, na.rm=T), sd_max_wave_height = sd(measurement, na.rm=T)) # # qplot_wave_trends <- function(adf, preprocess=F, preprocessFun = max){ # if(preprocess) { # adf <- adf %>% group_by(Year, GOWLon, GOWLat) %>% # summarize(measurement = preprocessFun(measurement, na.rm=T)) %>% # ungroup() # } # # ggplot(adf %>% group_by(GOWLon, GOWLat) %>% # mutate(measurement = measurement-mean(measurement, na.rm=T)) %>% # ungroup(), # aes(x=Year, y = measurement, # color=abs(as.numeric(GOWLat)), group=factor(paste(GOWLon, GOWLat)))) + # geom_line(alpha=0.1) + # scale_color_continuous(low="red", high="blue", guide=guide_colorbar(title="GOWLat")) + # stat_smooth(method="lm", fill=NA, color="grey", alpha=0.5) + # stat_smooth(method="lm", fill=NA, color="black", alpha=0.5, mapping=aes(group=1)) + # theme_bw(base_size=17) + # ylab("anomoly") # } ##### 2) Merge into one mega-frame wave_data <- left_join( left_join( left_join(annual_mean_wave_energy, annual_mean_wave_height), left_join(annual_max_wave_energy, sum_wave_energy)), left_join(annual_max_wave_height, annual_q95_wave_height)) %>% dplyr::rename(LonGOW = GOWLon, LatGOW = GOWLat) wave_data_full <- left_join(unique_lat_long_tab, wave_data) %>% dplyr::rename(Latitude = `orig Lat`, Longitude = `orig Lon`) %>% dat_year_filter %>% select(!contains("sd_")) #meh. not going to work with these, & want to minimize columns ##### 3) Calculate averages for each lat/long during the entire dataset period wave_data_avg <- wave_data_full %>% group_by(Longitude, Latitude, LonGOW, LatGOW) %>% summarize(across(mean_wave_energy:q95_wave_height, ~mean(.x, na.rm=TRUE), .names = "{.col}_site")) %>% ungroup() ##### 4) Load and merge with wave/temp data kelp_temp_data <- read.csv("derived_data/raw_data_with_temp.csv") rd_wave <- list( kelp_temp_data, wave_data_full, wave_data_avg ) %>% purrr::reduce(left_join) %>% mutate(across(mean_wave_energy:q95_wave_height, ~.x - get(glue::glue("{cur_column()}_site")), .names = "{.col}_dev")) ##### 6) Write it all out write_csv(rd_wave, "derived_data/raw_data_with_temp_waves.csv")
3a3d00afcd6e3f7f2face0a500a7cc6ad97febd9
299585457e6f3fd9c3e82769db4d82becc67d05a
/man/backbone.Rd
8330a9b84be672789614e149e3022376acf9fa94
[]
no_license
jcfisher/backbone
39ca2d01f5e99cab28fefd456a69b02a5a6c9fa8
89fd0f4b0d786534d398559d8cf72db1abb65b16
refs/heads/master
2020-08-05T06:28:38.456269
2019-10-03T15:42:41
2019-10-03T15:42:41
212,430,280
0
0
null
2019-10-03T15:42:45
2019-10-02T20:01:09
null
UTF-8
R
false
true
2,045
rd
backbone.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/backbone.R \docType{package} \name{backbone} \alias{backbone} \alias{backbone-package} \title{backbone: Extracts the Backbone from Weighted Graphs} \description{ Provides methods for extracting from a weighted graph a binary or signed backbone that retains only the significant edges. The user may input a weighted graph, or a bipartite graph from which a weighted graph is first constructed via projection. Backbone extraction methods include the stochastic degree sequence model (Neal, Z. P. (2014). <doi:10.1016/j.socnet.2014.06.001>), hypergeometric model (Neal, Z. (2013). <doi:10.1007/s13278-013-0107-y>), the fixed degree sequence model (Zweig, K. A., and Kaufmann, M. (2011). <doi:10.1007/s13278-011-0021-0>), as well as a universal threshold method. } \details{ Some features of the package are: \itemize{ \item '\code{\link{universal}}': returns a unipartite backbone matrix in which values are set to 1 if above the given upper parameter threshold, and set to -1 if below the given lower parameter threshold, and are 0 otherwise. \item '\code{\link{sdsm}}': computes the proportion of generated edges above or below the observed value using the stochastic degree sequence model. Once computed, use \code{\link{backbone.extract}} to return the backbone matrix for a given alpha value. \item '\code{\link{fdsm}}': computes the proportion of generated edges above or below the observed value using the fixed degree sequence model. Once computed, use \code{\link{backbone.extract}} to return the backbone matrix for a given alpha value. \item '\code{\link{hyperg}}': returns a binary or signed adjacency matrix containing the backbone that retains only the significant edges. \item '\code{\link{backbone.extract}}': returns a positive or signed adjacency matrix containing the backbone: only the significant edges. } For additional documentation and background on the package functions, see \code{browseVignettes("backbone")}. }
3571ddf3a4da571c670bf3a93ff79ec4db192605
41cf136f375fb670d61fb88af26bdb2bbee434c3
/man/kill_flow.Rd
d90c5328e57894a9ec617752f5ea64a21681bb55
[]
no_license
gitter-badger/flowr
41503fa2e4a4c0187a9864442307ee17740374ed
2db159f2721ee8a3ae3aaf34dc0847b842a1c891
refs/heads/master
2020-12-03T09:15:08.286834
2015-06-05T17:57:00
2015-06-05T17:57:00
36,954,410
0
0
null
2015-06-05T20:41:25
2015-06-05T20:41:25
null
UTF-8
R
false
false
755
rd
kill_flow.Rd
% Generated by roxygen2 (4.1.1): do not edit by hand % Please edit documentation in R/status.R \name{kill_flow} \alias{kill_flow} \title{kill_flow} \usage{ kill_flow(x, wd, fobj, kill_cmd = "bkill", jobid_col = "job_sub_id") } \arguments{ \item{x}{either path to flow [character] or fobj object of class \link{flow}} \item{wd}{path to a specific which needs to be killed} \item{fobj}{a object of class \link{flow}} \item{kill_cmd}{The command used to kill. Default is 'bkill' (LSF). One can used qdel for 'torque', 'sge' etc.} \item{jobid_col}{Advanced use. The column name in 'flow_details.txt' file used to fetch jobids to kill} } \description{ kill_flow } \examples{ \dontrun{ ## example for terminal flowr kill_flow wd=path_to_flow_directory } }
68d0e8e487f92f5efb83953800502c214c6df649
20fb140c414c9d20b12643f074f336f6d22d1432
/man/NISTradTOgray.Rd
5ebf101c9bac9ab2e0d25ab9a7ca4dd4f3e8ab1e
[]
no_license
cran/NISTunits
cb9dda97bafb8a1a6a198f41016eb36a30dda046
4a4f4fa5b39546f5af5dd123c09377d3053d27cf
refs/heads/master
2021-03-13T00:01:12.221467
2016-08-11T13:47:23
2016-08-11T13:47:23
27,615,133
0
0
null
null
null
null
UTF-8
R
false
false
722
rd
NISTradTOgray.Rd
\name{NISTradTOgray} \alias{NISTradTOgray} \title{Convert rad to gray } \usage{NISTradTOgray(rad)} \description{\code{NISTradTOgray} converts from rad (absorbed dose) (rad) to gray (Gy) } \arguments{ \item{rad}{rad (absorbed dose) (rad) } } \value{gray (Gy) } \source{ National Institute of Standards and Technology (NIST), 2014 NIST Guide to SI Units B.8 Factors for Units Listed Alphabetically \url{http://physics.nist.gov/Pubs/SP811/appenB8.html} } \references{ National Institute of Standards and Technology (NIST), 2014 NIST Guide to SI Units B.8 Factors for Units Listed Alphabetically \url{http://physics.nist.gov/Pubs/SP811/appenB8.html} } \author{Jose Gama} \examples{ NISTradTOgray(10) } \keyword{programming}
5e9519fdbfcd8020c353bb2c5c974f10ea5ab76b
deaec1ac710c5a9388317a188c6005fd39c67ec2
/05_ShotogunMetagenome/15_VisualizeAll.R
b41ef99999fe6f208974261cae826bb584b7b417
[ "LicenseRef-scancode-unknown-license-reference", "MIT" ]
permissive
ong8181/interaction-capacity
57bf4cd7a833b953a12b28f28013019e8e624f7b
7b7f5160225468a91c7241fa78b999baf9cd7efa
refs/heads/main
2023-04-06T17:11:39.353785
2022-05-02T05:05:48
2022-05-02T05:05:48
235,936,633
0
0
null
null
null
null
UTF-8
R
false
false
2,182
r
15_VisualizeAll.R
#### #### Compare the resuls of phyloFlash and qMiSeq #### Visualize all figures #### # Load libraries library(tidyverse); packageVersion("tidyverse") # 1.3.0, 2021.7.23 library(cowplot); packageVersion("cowplot") # 1.1.1, 2021.7.23 library(ggsci); packageVersion("ggsci") # 2.9, 2021.7.23 theme_set(theme_cowplot()) # Generate output folder (fn <- basename(rstudioapi::getSourceEditorContext()$path)) output_folder <- paste0(str_sub(fn, end = -3), "Out"); rm(fn) dir.create(output_folder) # ---------------------------------------------- # # Load ggplot objects # ---------------------------------------------- # g1 <- readRDS(sprintf("%s/g1_s001.obj", output_folder)) g2 <- readRDS(sprintf("%s/g1_s002.obj", output_folder)) g3 <- readRDS(sprintf("%s/g1_s003.obj", output_folder)) g4 <- readRDS(sprintf("%s/g1_s004.obj", output_folder)) g1 <- g1 + labs(title = "Plot 1\n2017/7/14", subtitle = "No. of detected taxa\nqMiSeq = 136\nShotgun = 118") g2 <- g2 + labs(title = "Plot 1\n2017/7/15", subtitle = "No. of detected taxa\nqMiSeq = 129\nShotgun = 99") g3 <- g3 + labs(title = "Plot 5\n2017/7/14", subtitle = "No. of detected taxa\nqMiSeq = 82\nShotgun = 116") g4 <- g4 + labs(title = "Plot 5\n2017/7/15", subtitle = "No. of detected taxa\nqMiSeq = 91\nShotgun = 115") # ---------------------------------------------- # # Compile all figures # ---------------------------------------------- # g_legend <- get_legend(g1) g5 <- g1 + theme(legend.position = "none", plot.margin = margin(.8,.8,.8,.8,"cm")) g6 <- g2 + theme(legend.position = "none") g7 <- g3 + theme(legend.position = "none") g8 <- g4 + theme(legend.position = "none") g_all <- plot_grid(g5, g6, g7, g8, NULL, g_legend, ncol = 6, rel_widths = c(1,1,1,1,0.2,1), labels = c("a","b","c","d",NA,NA), align = "hv", axis = "lrbt") g_obj <- list(g1, g2, g3, g4) # ---------------------------------------------- # # Save output # ---------------------------------------------- # ggsave(file = sprintf("%s/qMiSeq_Shotgun_All.pdf", output_folder), plot = g_all, width = 16, height = 7) saveRDS(g_obj, sprintf("%s/qMiSeq_Shotgun_All.obj", output_folder))
9fc57f5a1662cba4016f68ad50a812406e8187af
99a79794582dfab5cf27cbe290410b3b06c7f4df
/00_scripts/20_GBM/GBM_13.R
7c94c37d8ab42ea61977473eb2a714cd616260db
[]
no_license
PLStenger/Pearl_Oyster_Colour_BS_Seq
01db29172513ec8a84689ea187eff787cdd52c87
c80162bd330573689f929554edae26cd32038917
refs/heads/master
2021-07-17T14:44:47.677414
2020-06-22T06:20:16
2020-06-22T06:20:16
183,000,705
1
0
null
null
null
null
UTF-8
R
false
false
444
r
GBM_13.R
#!/usr/bin/env Rscript setwd("/home/datawork-ihpe/Pearl_Oyster_Colour_BS_Seq/06_bismark") dat <- read.table("Index_7.2-V-613_R1_paired_bismark_bt2_pe.deduplicated.bam_sorted_clean.bam.bed_correspondance.bed_base.txt") for(i in 1:length(unique(dat$V1))){ sink("Index_7.2-V-613_GBM.txt", append=TRUE) pos_name <- as.character(unique(dat$V1)[i]) newdata <- dat[ which(dat$V1==pos_name), ] print(c(pos_name, mean(newdata$V2))) sink() }
505b2860874fec9b97327dab2798ba5039b6c064
bcf64441bd84791260d509236fd45ce1e8bf86f2
/R/draw_serve_locations.R
14455bf8717e6fba952b023024bd6ea4a17d1a50
[]
no_license
buddyscott/esv
69e66d93fedb64022701ff717956764e32bedd2a
4bf8d8328b3df99c34d974567b7c09161ba83426
refs/heads/master
2022-12-15T18:46:43.550957
2020-09-05T14:30:56
2020-09-05T14:30:56
null
0
0
null
null
null
null
UTF-8
R
false
false
3,532
r
draw_serve_locations.R
library(ggplot2) library(dplyr) library(DT) source('static_variables.R') source('plotting_utilities.R') source('sample_data_prep.R') g <- ggplot() + geom_rect(data = draw_tennis_court_surface_ggplot(), mapping = aes(xmin = xmin, xmax = xmax, ymin = ymin, ymax = ymax, fill = ID)) + geom_path(data = draw_tennis_court_lines_ggplot(), mapping = aes(x = x, y = y, group = ID), color = 'white') + geom_path(data = draw_tennis_net_ggplot(), mapping = aes(x = x, y = y), color = 'white', linetype = 'dashed') + geom_point(data = shot_df_sample %>% filter(plc %in% c('1S', '2S')) %>% mutate(x = ifelse(striker_x_coordinate < 0, -striker_x_coordinate, striker_x_coordinate), y = ifelse(striker_x_coordinate < 0, -striker_y_coordinate, striker_y_coordinate)), mapping = aes(x = x, y = y), color = 'white', alpha = 0.25) + geom_point(data = shot_df_sample %>% filter(plc %in% c('1S', '2S')) %>% mutate(x = ifelse(returner_x_coordinate < 0, returner_x_coordinate, -returner_x_coordinate), y = ifelse(returner_x_coordinate < 0, returner_y_coordinate, -returner_y_coordinate)), mapping = aes(x = x, y = y), color = 'cyan', alpha = 0.25) + geom_path(data = draw_region_lines_ggplot(), mapping = aes(x = x, y = y, group = ID), color = 'white', alpha = 0.5, linetype = 'dotted') + geom_text(data = draw_region_numbers_ggplot(), mapping = aes(x = x, y = y, label = region_number), color = 'white', size = 5, alpha = 0.5, angle = 270) + geom_text(data = draw_region_numbers_ggplot(), mapping = aes(x = -x, y = -y, label = region_number), color = 'white', size = 5, alpha = 0.5, angle = 90) + scale_fill_manual(values = c('ob_surface' = '#658B4E', 'ib_surface' = '#2A5387')) + scale_y_continuous(limits = c(-max_y, max_y), breaks = c(round(-baseline_y - doubles_alley_width, 3), round(-baseline_y, 3), 0, round(baseline_y, 3), round(baseline_y + doubles_alley_width,3)), expand = c(0,0)) + scale_x_continuous(limits = c(-max_x, max_x), breaks = c(round(-baseline_x, 3), round(-service_line, 3), 0, round(service_line, 3), round(baseline_x, 3)), expand = c(0,0)) + guides(fill = FALSE, size = FALSE, alpha = FALSE, colour = FALSE) + ylab('y (m)') + xlab('x (m)') + coord_fixed(ratio = 1, xlim = NULL, ylim = NULL, expand = TRUE) + theme(line = element_blank(), plot.margin = unit(c(0, 0, 0, 0), "mm"), legend.title = element_blank(), panel.background = element_rect(colour = NULL, fill = NULL)) ggsave('../plots/serve_locations.jpg', g, height = 150, width = 200, unit = 'mm')
a3b2be59e974102aa9cd8b657605ec757fd2f098
09024c65f99a196f63d742947ce8e3026d4b9088
/DEG-analysis.R
b5881f1739f45bd1aa788f38b44b1846dab7fc06
[]
no_license
Xianglaichi/MSI
f8c4527d6effca4606bf5a4fac96ea682b8453c6
f57d5888fd4dc3cd8ca2f808b783e7757c0d55ab
refs/heads/main
2023-02-13T18:05:28.020094
2021-01-06T09:04:45
2021-01-06T09:04:45
327,251,680
0
0
null
null
null
null
UTF-8
R
false
false
3,012
r
DEG-analysis.R
for (i in 1:30){ print(i) datalist_new1[[i]]<-datalist_new1[[i]][,match(summaryrfs[[i]]$id,colnames(datalist_new1[[i]]))]} pan_group<-list() for (i in 1:30){ print(i) pan_group[[cancername1[i]]]<-summaryrfs[[i]] pan_group[[cancername1[i]]]<-within(pan_group[[cancername1[i]]],{ High<-NA Low<-NA High[MSI == 'MSI-H'] = '1' High[MSI == 'MSI-L'] = '0' Low[MSI == 'MSI-H'] = '0' Low[MSI == 'MSI-L'] = '1' }) rownames(pan_group[[cancername1[i]]])<-pan_group[[cancername1[i]]]$id pan_group[[cancername1[i]]]$id<-NULL pan_group[[cancername1[i]]]<-pan_group[[cancername1[i]]][,-c(1:8)] } pan_group<-lapply(pan_group,function(df){ df[] <- lapply(df, function(x) { if(is.factor(x)) as.numeric(as.character(x)) else as.numeric(as.character(x)) }) return(df) }) pan_group<-lapply(pan_group,as.matrix) DEG<-list() for (i in 27:30){ print(i) fit <- lmFit(datalist_new1[[i]],pan_group[[i]]) contrast.matrix <- makeContrasts(High - Low,levels=pan_group[[i]]) fit2 <- contrasts.fit(fit,contrast.matrix) fit2 <- eBayes(fit2) all_diff <- topTable(fit2, adjust.method = 'fdr',coef=1,p.value = 1,lfc <- log(1,2),number = 60000,sort.by = 'logFC') all_diff$logP<- -log10(all_diff$adj.P.Val) DEG[[cancername1[i]]]<-all_diff DEG[[cancername1[i]]]$ID<-rownames(DEG[[cancername1[i]]]) } cancernameDEG<-names(DEG) DEGtest<-DEG DEG<-DEGtest for (i in 1:26){ print(i) DEG[[i]]$cancer<-cancernameDEG[i] DEG[[i]]$gene<-rownames(DEG[[i]]) } DEG<-lapply(DEG,function(df){ df<-df%>%filter(df$adj.P.Val<0.05) }) DEG<-lapply(DEG,function(df){ df<-df%>%filter(abs(df$logFC)>0.5) }) for (i in 1:26){ print(i) DEG[[i]]<-within(DEG[[i]],{ group<-NA group[logFC > 0 ] = 'up-regulated' group[logFC < 0 ] = '-down-regulated' }) } DEG<-lapply(DEG,function(df){ df$group<-as.factor(df$group) return(df) }) DEGbar<-data.frame() for (i in 1:26){ print(i) DEGbar[2*i-1,1]<-cancernameDEG[i] DEGbar[2*i-1,2]<-summary(DEG[[i]]$group)[1] DEGbar[2*i-1,3]<-'High' DEGbar[2*i-1,4]<-summary(DEG[[i]]$group)[1]+summary(DEG[[i]]$group)[2] DEGbar[2*i,1]<-cancernameDEG[i] DEGbar[2*i,2]<-summary(DEG[[i]]$group)[2] DEGbar[2*i,3]<-'Low' DEGbar[2*i,4]<-summary(DEG[[i]]$group)[1]+summary(DEG[[i]]$group)[2] } colnames(DEGbar)<-c('cancer','value','Group','sum') DEGbar%>%mutate(cancer = fct_reorder(cancer, desc(sum)))%>% #此处表示按反向排列!管道连接??? ggplot(aes(x=cancer, y=value,fill = Group)) + geom_bar(stat="identity",alpha=0.7, width=0.8,position=position_dodge(0.8)) + theme_bw()+ scale_fill_manual(values=c('High' = '#FF3333','Low' = '#0099FF'))+ #表示手动调节颜色??? coord_flip() + xlab("") + ylab("")+ theme(axis.text.x = element_text(size = 13, angle = 0, hjust = 0, vjust = 0), axis.text.y = element_text(size = 15, angle = 0, hjust = 0, vjust = 0)) write.csv(DEGbar,file = "/DEGbar.csv")
92f25660751dc728a97fe4a877d641545e693ec5
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/fullfact/examples/observLmer.Rd.R
be0bf9147a40b4fb5013f3e6d59f92b771c69cc3
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
287
r
observLmer.Rd.R
library(fullfact) ### Name: observLmer ### Title: Variance components for normal data ### Aliases: observLmer ### ** Examples data(chinook_length) #Chinook salmon offspring length length_mod1<- observLmer(observ=chinook_length,dam="dam",sire="sire",response="length") length_mod1
ed6dddd75c394998eccba121183bfe1e3a9fa203
160622f50fc2fe9a6aaa3095849f7a8bd2caa496
/man/b_grid.Rd
5581746d2cd5e8c8590e059c48edd32a69268b69
[]
no_license
jayhesselberth/platetools
53fd0d16deca84ec6efd0c1f838d7a9fed835a1b
617a33fc4a3482b85fc1fd8b38dcc82a53a10176
refs/heads/master
2020-03-22T01:45:56.557163
2018-06-25T13:36:13
2018-06-25T13:36:13
139,328,253
1
0
null
2018-07-01T12:42:49
2018-07-01T12:42:48
null
UTF-8
R
false
true
1,013
rd
b_grid.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/b_grid.R \name{b_grid} \alias{b_grid} \title{Plots multiple b-scored normalised platemaps} \usage{ b_grid(data, well, plate_id, plate = 96) } \arguments{ \item{data}{Numerical values to be plotted} \item{well}{Vector of well identifiers e.g "A01"} \item{plate_id}{Vector of plate identifiers e.g "Plate_1"} \item{plate}{Number of wells in complete plate (96, 384 or 1536)} } \value{ ggplot plot } \description{ Transforms numerical values using the b-score normalisation process to account for row and column effects. Uses well and plate labels to plot the normalised values in the form of microtitre plates. Works for 96, 384 and 1536 well plates. } \examples{ df01 <- data.frame(well = num_to_well(1:96), vals = rnorm(96), plate = 1) df02 <- data.frame(well = num_to_well(1:96), vals = rnorm(96), plate = 2) df <- rbind(df01, df02) b_grid(data = df$vals, well = df$well, plate_id = df$plate, plate = 96) }
75ed5fdbb72e09cc3034ad857816f9976ecdb01d
bc930a6e02dd38d046a7904b31b4d3557ef8a284
/R/event_counter.R
9aa6c5d23388db738237cff53bc7adbb31dff804
[ "MIT" ]
permissive
kalimu/shinyEventLogger
ea66443f427f60d4e45b1405c7fcb12b247c977b
d6dd6d5ceec57b2e44cff8fa7a83b2e018973396
refs/heads/master
2021-07-25T09:08:21.928018
2019-02-11T06:59:48
2019-02-11T07:00:29
157,194,640
30
5
NOASSERTION
2020-04-28T22:08:28
2018-11-12T10:15:58
R
UTF-8
R
false
false
629
r
event_counter.R
get_event_counter <- function() { settings_session <- dynGet("log_settings_session", minframe = 1L, inherits = TRUE, ifnotfound = stop(paste0( "'log_settings_session' not found. ", "Have you call 'set_logging_session'?" ))) settings_session$event_counter } # end of get_event_counter increment_event_counter <- function() { settings_session <- dynGet("log_settings_session", minframe = 1L, inherits = TRUE) settings_session$event_counter <- settings_session$event_counter + 1 } # end of increment_event_counter
2ec697bde65f7fd94ba5bfea48788b8b55c56478
f6c871a87f64a5ca6d8dcf6931e05e6dc0fb0c1f
/PreferencesPaper/SweepReaderFC.R
a97527ebe0f630c189584265947c5590625c5ba3
[]
no_license
CreanzaLab/SongEvolutionModel
073342a07eaeca061f92b6798b2ab1ff40567b58
449087c85280e11b97924575d2e3c618169a0f8b
refs/heads/master
2020-06-13T18:23:13.603932
2019-08-15T22:57:51
2019-08-15T22:57:51
194,747,721
0
1
null
2019-08-15T22:57:52
2019-07-01T21:52:31
R
UTF-8
R
false
false
5,978
r
SweepReaderFC.R
setwd("D:/Prelim2") rm(list=objects()) source("Source_SweepSquareReaderFC.R") #install_github("CreanzaLab/SongEvolutionModel/R-Package", force=TRUE, ref="FemalChoiceExpansion") #Stitcher(file.path(getwd(),"StaticFinal")) #Stitcher(file.path(getwd(),"SkipFinal")) Data <- LoadData(file.path(getwd(),"StaticFinal", "Stitched"), 2000, 20) Ordered <- order(Data$FinalData2$Preference, Data$FinalData2$PLrnStrtgy, rev(as.logical(Data$FinalData2$Vertical)), rev(as.logical(Data$FinalData2$Social)), decreasing = FALSE) Labels <- list(Label1=paste(c("TRUE", "FALSE")), Label2=paste(c("TRUE", "FALSE")), #Tutor Label3=levels(as.factor(Data$FinalData2$PLrnStrtgy)), Label4="lewl") write.csv(Data$FinalData2[,], "TableofKeyParams.csv") pdf("Common.pdf", width=6, height=8.5) Labels[['Label4']] <- levels(as.factor(Data$FinalData2$Preference))[1] QuadPlots(Labels, Ordered, subset=1:24, FALSE, mar=c(0,3,1.5,.5), End=FALSE, mfrow=c(5,1), Color = c(rgb(1,1,1), rgb(1,1,1),rgb(.85,.95,1), rgb(.8,.9,.95), rgb(.75,.85,.9), rgb(.7,.8,.85),rgb(.65,.75,.8) )) mtext(side=2, at=c(.5, .9), line=-.45, text=c("Vertical","Social"), cex=.7, font=2) dev.off() pdf("Match.pdf", width=6, height=8.5) Labels[['Label4']] <- levels(as.factor(Data$FinalData2$Preference))[2] QuadPlots(Labels, Ordered, subset=25:48, FALSE, mar=c(0,3,1.5,.5), End=FALSE, mfrow=c(5,1), Color = c(rgb(1,1,1), rgb(1,1,1),rgb(.85,.95,1), rgb(.8,.9,.95), rgb(.75,.85,.9), rgb(.7,.8,.85),rgb(.65,.75,.8) )) mtext(side=2, at=c(.5, .9), line=-.45, text=c("Vertical","Social"), cex=.7, font=2) dev.off() pdf("Noise.pdf", width=6, height=8.5) Labels[['Label4']] <- levels(as.factor(Data$FinalData2$Preference))[3] QuadPlots(Labels, Ordered, subset=49:72, FALSE, mar=c(0,3,1.5,.5), End=FALSE, mfrow=c(5,1), Color = c(rgb(1,1,1), rgb(1,1,1),rgb(.85,.95,1), rgb(.8,.9,.95), rgb(.75,.85,.9), rgb(.7,.8,.85),rgb(.65,.75,.8) )) mtext(side=2, at=c(.5, .9), line=-.45, text=c("Vertical","Social"), cex=.7, font=2) dev.off() pdf("Rare.pdf", width=6, height=8.5) Labels[['Label4']] <- levels(as.factor(Data$FinalData2$Preference))[4] QuadPlots(Labels, Ordered, subset=73:96, FALSE, mar=c(0,3,1.5,.5), End=FALSE, mfrow=c(5,1), Color = c(rgb(1,1,1), rgb(1,1,1),rgb(.85,.95,1), rgb(.8,.9,.95), rgb(.75,.85,.9), rgb(.7,.8,.85),rgb(.65,.75,.8) )) mtext(side=2, at=c(.5, .9), line=-.45, text=c("Vertical","Social"), cex=.7, font=2) dev.off() pdf("Rep.pdf", width=6, height=8.5) Labels[['Label4']] <- levels(as.factor(Data$FinalData2$Preference))[5] QuadPlots(Labels, Ordered, subset=97:120, FALSE, mar=c(0,3,1.5,.5), End=FALSE, mfrow=c(5,1), Color = c(rgb(1,1,1), rgb(1,1,1),rgb(.85,.95,1), rgb(.8,.9,.95), rgb(.75,.85,.9), rgb(.7,.8,.85),rgb(.65,.75,.8) )) mtext(side=2, at=c(.5, .9), line=-.45, text=c("Vertical","Social"), cex=.7, font=2) dev.off() pdf("SexySyls.pdf", width=6, height=8.5) Labels[['Label4']] <- levels(as.factor(Data$FinalData2$Preference))[6] QuadPlots(Labels, Ordered, subset=121:144, FALSE, mar=c(0,3,1.5,.5), End=FALSE, mfrow=c(5,1), Color = c(rgb(1,1,1), rgb(1,1,1),rgb(.85,.95,1), rgb(.8,.9,.95), rgb(.75,.85,.9), rgb(.7,.8,.85),rgb(.65,.75,.8) )) mtext(side=2, at=c(.5, .9), line=-.45, text=c("Vertical","Social"), cex=.7, font=2) dev.off() pdf("Social.pdf", width=6, height=8.5) Labels[['Label4']] <- levels(as.factor(Data$FinalData2$Preference))[7] QuadPlots(Labels, Ordered, subset=145:168, FALSE, mar=c(0,3,1.5,.5), End=FALSE, mfrow=c(5,1), Color = c(rgb(1,1,1), rgb(1,1,1),rgb(.85,.95,1), rgb(.8,.9,.95), rgb(.75,.85,.9), rgb(.7,.8,.85),rgb(.65,.75,.8) )) mtext(side=2, at=c(.5, .9), line=-.45, text=c("Vertical","Social"), cex=.7, font=2) dev.off() path <- file.path(getwd(), "StaticFinal") for(i in 1:168){ if(file.exists(file.path(path, paste0(i, "MSong.csv")))){ P <- ReloadParam(file.path(path, "Stitched", paste0(i, ".semp"))) Songs <- read.csv(file.path(path, paste0(i, "MSong.csv")), header=FALSE, fileEncoding = "UTF-8-BOM") Songs <- getAverageSong(P, Songs) Songs <- matrix(unlist(Songs), ncol=101, nrow=P$MaxRSize) pdf(paste0("Songs/Songer-",i, ".pdf")) SongPlot(P,Songs, thin=1) dev.off() } } SyllableFrequency <- data.frame(Template=numeric(168), RareTemplate=numeric(168), NonTemplate=numeric(168)) for(i in 1:168){ if(file.exists(file.path(path, paste0(i, "MSong.csv")))){ P <- ReloadParam(file.path(path, "Stitched", paste0(i, ".semp"))) Songs <- read.csv(file.path(path, paste0(i, "MSong.csv")), header=FALSE, fileEncoding = "UTF-8-BOM") Songs <- getAverageSong(P, Songs) Songs <- matrix(unlist(Songs), ncol=101, nrow=P$MaxRSize) SyllableFrequency[i,] <-c(mean(Songs[1:5,101])/P$numBirds, mean(Songs[6:7,101])/P$numBirds, mean(Songs[8:500,101])/P$numBirds) } } SyllableFrequency[,'Temp-nonTemp'] <-SyllableFrequency$Template - SyllableFrequency$NonTemplate par(mfrow=c(3,1), mgp=c(1.5, .5, 0), mar=c(2.5,2.5,1,1)) plot(1:168,SyllableFrequency$Template[Ordered], type='b', pch=19, ylim=c(0,1), cex=.6, lty=6, panel.first = {abline(v=seq(24, 168, by=25)+.5, col="grey80")}) plot(1:168,SyllableFrequency$NonTemplate[Ordered], type='b', pch=19, ylim=c(0,1), cex=.6, lty=6, panel.first = {abline(v=seq(24, 168, by=25)+.5, col="grey80")}) plot(1:168,SyllableFrequency$`Temp-nonTemp`[Ordered], type='b', pch=19, ylim=c(-.2,1), cex=.6, lty=6, panel.first = {abline(v=seq(24, 168, by=25)+.5, col="grey80")}) Results <- read.csv("TableofMagic.csv")
9ae8b9dfbca3501716ccadadea58abd2f474bd9e
77645a8337ba03357128505872dc77cb4d49e2bf
/R/lookup-title-descr.R
b430727821237440ea6f4bb55add30617457c22e
[ "MIT" ]
permissive
jameelalsalam/naicsmatch
0316ae97cb212d791229d04ad64789b39ac39191
a43a3f408ca2eb3be4b10c2120a03376ad830c0f
refs/heads/main
2021-07-11T17:02:53.204673
2020-04-01T10:09:47
2020-04-01T10:09:47
162,162,674
0
0
null
null
null
null
UTF-8
R
false
false
837
r
lookup-title-descr.R
#' Lookup Titles and Descriptions of NAICS Codes #' #' Convenience function. Focus on most recent NAICS code classifications. Lookup for prior may be added in teh #' @param naics a character vector of NAICS codes to look up #' @param naics_listing a lookup table of naics codes and titles, defaults to most recent #' @aliases naics_descr #' @import dplyr #' @export #' @examples #' naics_title(c("332710", "332722", "332723")) naics_title <- function(naics, naics_listing = naicsmatch::naics_2017) { tibble::tibble(naics = naics) %>% left_join(naics_2017_listing, by = "naics") %>% pull(title) } #' @rdname naics_title #' @export #' @examples #' naics_descr(c("332710", "332722", "332723")) naics_descr <- function(naics) { tibble::tibble(naics = naics) %>% left_join(naics_2017, by = "naics") %>% pull(descr) }
1b48b3d817dc03d4154deda30f0cbf731e1324fb
c0fec0769584c9bd2d2a322f948e99283253ab9e
/man/Motif-class.Rd
3ff02f33d9ab5ceda62e6da1bf0afd79e36d73eb
[ "MIT" ]
permissive
Puriney/signac
ed88d42b8f9af470badf9fee4ff6f867a6eaea73
cd99cf2212320785f34dfd5db58e14b59e89edc5
refs/heads/master
2023-01-28T05:41:10.679212
2020-12-04T17:13:54
2020-12-04T17:13:54
273,932,199
0
0
NOASSERTION
2020-09-16T15:15:39
2020-06-21T15:30:09
R
UTF-8
R
false
true
1,028
rd
Motif-class.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/objects.R \docType{class} \name{Motif-class} \alias{Motif-class} \alias{Motif} \title{The Motif class} \description{ The Motif class is designed to store DNA sequence motif information, including motif PWMs or PFMs, motif positions, and metadata. } \section{Slots}{ \describe{ \item{\code{data}}{A sparse, binary, feature x motif matrix. Columns correspond to motif IDs, rows correspond to genomic features (peaks or bins). Entries in the matrix should be 1 if the genomic feature contains the motif, and 0 otherwise.} \item{\code{pwm}}{A named list of position weight matrices} \item{\code{motif.names}}{A list containing the name of each motif} \item{\code{positions}}{A \code{\link[GenomicRanges]{GRangesList}} object containing exact positions of each motif.} \item{\code{meta.data}}{A dataframe for storage of additional information related to each motif. This could include the names of proteins that bind the motif.} }} \concept{motifs}
3990d20fbcd9e8247d0d078bcbe717a16f20d93f
c7add6406714aad477f5b3690914fcff0befeed9
/R/consensusPeakList.R
588986f7dc8bec38014519a35cc28658e50e3f86
[]
no_license
gtluu/rspeclust
37a006aa53e7504a004a9b8475f6226fed37b3d1
1a212a18cc11ffefba3ca4e46d051d7201873218
refs/heads/master
2022-12-05T14:21:40.634741
2020-08-20T21:31:46
2020-08-20T21:31:46
288,889,087
0
0
null
null
null
null
UTF-8
R
false
false
1,458
r
consensusPeakList.R
#' Get Consensus Peak List #' #' Generate a consensus peak list with average m/z values from multiple peak #' lists. #' #' @param peakLists \code{list} of single column \code{dataframes} with the #' column name 'mz'. #' @param tol \code{double} value to be used for m/z tolerance when merging #' peak lists. #' @param cutoff \code{numeric} value; peaks that appear in \code{n} peak lists #' are kept if \code{n >= cutoff}. #' @return \code{dataframe} with each row representing shared peaks from #' multiple peak lists based on m/z value along with a column for number of #' peak lists it appears in. #' @example #' #' peakList1 <- data.frame('mz'=c(615.3456, 489.6651, 375.1968)) #' peakList2 <- data.frame('mz'=c(615.3589, 453.3596, 357.9618)) #' peakList3 <- data.frame('mz'=c(615.3358, 861.3456, 198.3557)) #' #' peakLists <- list(peakList1, peakList2, peakList3) #' #' consensus <- consensusPeakList(peakLists, tol=0.2, cutoff=0.5) #' #' @export consensusPeakList <- function(peakLists, tol, cutoff) { # Combine peak lists into single large dataframe. bigDf <- combinePeakLists(peakLists, tol=tol) # Find how many peak lists each peak was found in and remove if lower than # the cutoff. bigDf$N <- rowSums(!is.na(bigDf)) bigDf <- bigDf[which(bigDf$N >= cutoff),] # Get average m/z values. n <- bigDf$N bigDf <- subset(bigDf, select=-c(N)) bigDf$average <- rowMeans(bigDf, na.rm=TRUE) bigDf$N <- n return(bigDf) }
80dc39decdf534fa563f12405d9adebf070b7da7
cf770445f090d49f1a93b706f881a32b35b6777d
/R/make_target_ls.R
b6e29069cce37fd5ed837a48714c935c672e7cf7
[]
no_license
devincaughey/estsubpop
70ad85b33a52353c62816fe8676082498cac2bfb
9bf4a32f8ca04d29c8307c8923e5d3264778a830
refs/heads/master
2020-03-25T09:22:01.395227
2019-06-11T02:26:10
2019-06-11T02:26:10
143,662,471
3
0
null
null
null
null
UTF-8
R
false
false
857
r
make_target_ls.R
#' @export make_target_ls <- function (design_list, formulae_list, periods, period_var = "YEAR") { out <- vector("list", length(periods)) names(out) <- periods for (y in seq_along(periods)) { out[[y]] <- list() for (d in seq_along(design_list)) { for (f in seq_along(formulae_list[[d]])) { ds <- subset(design_list[[d]], design_list[[d]]$variables[, period_var] == periods[y]) df <- as.data.frame(survey::svytable(formulae_list[[d]][[f]], ds)) df$Freq <- df$Freq / length(formulae_list[[d]]) #avoid double-counting if (sum(df$Freq) > 0) { out[[y]] <- c(out[[y]], list(df)) names(out[[y]])[length(out[[y]])] <- paste(d, paste(formulae_list[[d]][[f]], collapse = ""), sep = " | ") } } } } return(out) }
6070785668ec8d1941ce1986eb904461dc793e2f
a4c82295d300bae1b76287504d968b61836a222f
/tests/testthat.R
2530bb5684295dd4cc54e7302e20db6611b7bcfb
[ "MIT" ]
permissive
sainathadapa/stubthat
29ddbd4166509d9abb864a14a24eb908965d866c
6fcfcdbba6f605ef5dd093d455822ade30643682
refs/heads/master
2022-05-12T09:14:02.326260
2022-05-07T13:31:08
2022-05-07T13:31:08
41,420,394
20
6
NOASSERTION
2022-05-03T12:39:37
2015-08-26T10:47:41
R
UTF-8
R
false
false
60
r
testthat.R
library(testthat) library(stubthat) test_check("stubthat")
16976c1289a80ce747854df571acc5a3d605fbfe
307daa5d64a3e1e5ab2b6e4e85c83133e9d43e7b
/man/getCVLData.Rd
c582ea951b8b6e37cd957d2e460cefa771b65747
[]
no_license
hkim207/ahri-1
80be69695ee74acb2f10fc9b63b74998f02e04f3
d2a671671c1b8cf66bc97d6d7135581254ebb30e
refs/heads/master
2022-12-13T16:51:37.618854
2020-09-26T20:05:22
2020-09-26T20:05:22
null
0
0
null
null
null
null
UTF-8
R
false
true
358
rd
getCVLData.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/getVLData.R \name{getCVLData} \alias{getCVLData} \title{getCVLData} \usage{ getCVLData(Args) } \arguments{ \item{Args}{requires Args (\code{\link{setArgs}}) for filepath and setting age} } \value{ data.frame } \description{ Pulls in Community Viral Load data } \keyword{internal}
6266cd224d3f5d97f88f42a275cda6027f8141c0
2132efc961fd422dea6ba2f6ab205e14993556e4
/tests/testthat/test_biodiversidad.R
e61e138e0d098dfc5fb9afc7031e36303d5993e4
[]
no_license
datasketch/GanaderiaSostenible
3f62e05879535520a2acc7105a1b120f1928f0af
4e78982713bfe6b1e4720d71c8a8ac50d0f7f703
refs/heads/master
2020-09-09T16:00:53.379680
2020-03-02T15:28:00
2020-03-02T15:28:00
221,488,970
1
2
null
null
null
null
UTF-8
R
false
false
3,977
r
test_biodiversidad.R
context("Biodiversidad") test_that("Biodiversidad works",{ area <- 10 region <- "Eje Cafetero" pop2 <- biodiv_area(10, "Eje Cafetero", "bosque_secundario") pop2 pop2 <- biodiv_area(1000, region, "bosque_secundario") pop2 area <- 10 region <- "Bajo Magdalena" tipo_cobertura <- "bosque_secundario" pop <- biodiv_area(10, region, "bosque_secundario") pop2 pop2 <- biodiv_area(1000, region, "bosque_secundario") pop2 areas <- c(100) region <- "Eje Cafetero" total <- biodiv_area(areas, region, t = 10, tipo_cobertura = "arboles_dispersos") total }) test_that("Biodiversidad aves 2", { region <- "Bajo Magdalena" tipo_cobertura <- "bosque_secundario" area <- 10 region <- "Bajo Magdalena" tipo_cobertura <- "bosque_secundario" especies1 <- biodiv_area(area, region, tipo_cobertura) especies1 especies2 <- biodiv_area2(area, region, tipo_cobertura) especies2 biodiv_area2(0, region, tipo_cobertura) biodiv_area2(NA, region, tipo_cobertura) #biodiv_area2(NULL, region, tipo_cobertura) expect_equal(0,biodiv_area2(0, "Eje Cafetero", "bosque_primario")) expect_equal(as.numeric(NA), biodiv_area2(NA, region, tipo_cobertura)) expect_equal(biodiv_area2(1, "Eje Cafetero", "bosque_secundario"),biodiv_area2(1, "Eje Cafetero", "bosque_primario")) expect_equal(biodiv_area2(1, "Eje Cafetero", "silvopastoriles"),biodiv_area2(1, "Eje Cafetero", "arboles_dispersos")) expect_equal(biodiv_area2(1, "Eje Cafetero", "cercas_vivas"),biodiv_area2(1, "Eje Cafetero", "arboles_dispersos")) expect_equal(0, biodiv_area2(area = 0, region = region, tipo_cobertura = 'silvopastoriles')) ## expect_equal(26,round(biodiv_area2(1, "Eje Cafetero", "bosque_primario"))) expect_equal(4,round(biodiv_area2(1, "Eje Cafetero", "silvopastoriles"))) expect_equal(691,round(biodiv_area2(301, "Eje Cafetero", "bosque_primario"))) expect_equal(38,round(biodiv_area2(1001, "Eje Cafetero", "silvopastoriles"))) # cargar paquete library (sars) # Species area relationship ### Ejemplo para predecir número de especies # # cargar las funciones Area - especies load(system.file("biodiversity", "funciones_area_especies.RData", package = "GanaderiaSostenible")) # Predecir numero de especies proporcionando el area para cada region. # Note que el area esta en metros (1ha=10.000 m2) # sar_pred(Bajo_Magdalena_bosque_secundario, area = c(10000, 100000, 1000000, 10000000)) # predict 1ha, 10ha, 100ha, 1000ha # sar_pred(Bajo_Magdalena_silvopastoriles, area = c(10000, 100000, 1000000, 10000000)) # predict 1ha, 10ha, 100ha, 1000ha # sar_pred(Boyaca_y_Santander_bosque_secundario, area = c(10000, 100000, 1000000, 10000000)) # predict 1ha, 10ha, 100ha, 1000ha # sar_pred(Boyaca_y_Santander_silvopastoriles, area = c(10000, 100000, 1000000, 10000000)) # predict 1ha, 10ha, 100ha, 1000ha # sar_pred(Eje_Cafetero_bosque_secundario, area = c(10000, 100000, 1000000, 10000000)) # predict 1ha, 10ha, 100ha, 1000ha # sar_pred(Eje_Cafetero_silvopastoriles, area = c(10000, 100000, 1000000, 10000000)) # predict 1ha, 10ha, 100ha, 1000ha # sar_pred(Piedemonte_del_Meta_bosque_secundario, area = c(10000, 100000, 1000000, 10000000)) # predict 1ha, 10ha, 100ha, 1000ha # sar_pred(Piedemonte_del_Meta_silvopastoriles, area = c(10000, 100000, 1000000, 10000000)) # predict 1ha, 10ha, 100ha, 1000ha # sar_pred(Valle_del_Rio_Cesar_bosque_secundario, area = c(10000, 100000, 1000000, 10000000)) # predict 1ha, 10ha, 100ha, 1000ha # sar_pred(Valle_del_Rio_Cesar_silvopastoriles, area = c(10000, 100000, 1000000, 10000000)) # predict 1ha, 10ha, 100ha, 1000ha #sar_pred(Eje_Cafetero_bosque_secundario, area = c(10000, 100000, 1000000, 10000000)) # predict 1ha, 10ha, 100ha, 1000ha x <- sars::sar_pred(Eje_Cafetero_bosque_secundario, area = c(10000, 100000, 1000000, 10000000)) # predict 1ha, 10ha, 100ha, 1000ha expect_equal(x$Prediction[1], biodiv_area2(1, "Eje Cafetero", "bosque_secundario")) })
52101fc949aad3da2b8c2b328cf2eb271c09ab3d
b3affc2e8d14674a9db30759047077fc0ae72301
/Fin3380/Fin3380_Quiz4/fin3380_quiz4_mywork.R
dc0f43ff09324088a7e6bc7a58976f3630367921
[]
no_license
HeyBrawlStar/RstudioPractice
57e46ed61ac347dfbf229949e097882d9e77f51c
b520c658c93a38b8e1fc06421b6e4b7d641349df
refs/heads/master
2023-01-24T02:45:09.543682
2020-12-06T01:56:11
2020-12-06T01:56:11
295,978,579
0
0
null
null
null
null
UTF-8
R
false
false
2,042
r
fin3380_quiz4_mywork.R
setwd("/Users/guangyaohe/Desktop/FIN3380/Fin3380_Quiz4") dj.rsi= read.csv("dj.csv") dj.rsi = dj.rsi[-which(dj.rsi$dji==0),] dj.rsi = na.omit(dj.rsi) library(zoo) library(dplyr) dj.rsi = dj.rsi %>% select(date,dji) %>% arrange(date) %>% mutate(date=as.Date(date), delta=dji-lag(dji,1)) head(dj.rsi,3) dj.rsi = dj.rsi %>% mutate(up=ifelse(delta>0,1,0), down=ifelse(delta<0,1,0)) head(dj.rsi) dj.rsi = dj.rsi %>% mutate(up.val=delta*up, down.val=-delta*down) head(dj.rsi) dj.rsi = dj.rsi[2:nrow(dj.rsi),] %>% mutate(up.first.avg=rollapply(up.val, width=14, align='right', fill=NA,FUN=mean,na.rm=T), down.first.avg=rollapply(down.val, width=14, align='right', fill=NA,FUN=mean,na.rm=T)) head(dj.rsi,20) # initialise the up.avg and down.avg with up.first.avg and down.first.avg values dj.rsi$up.avg =dj.rsi$up.first.avg dj.rsi$down.avg = dj.rsi$down.first.avg # fill up.avg and down.avg values with updated value for t>=15 for (t in 15:length(dj.rsi$up.avg)){ dj.rsi$up.avg[t] = dj.rsi$up.avg[t-1]*13/14 + dj.rsi$up.val[t]*1/14 dj.rsi$down.avg[t] = dj.rsi$down.avg[t-1]*13/14 + dj.rsi$down.val[t]*1/14 } head(dj.rsi,20) # RS = upside wema/downside wema # RSI = 100-100/(1+RS) dj.rsi$rs=dj.rsi$up.avg/dj.rsi$down.avg dj.rsi$rsi=100-100/(1+dj.rsi$rs) head(dj.rsi,20) dj.rsi = dj.rsi %>% filter(substr(date,1,4)=='2006') plot(dj.rsi$rsi~dj.rsi$date, ylab='RSI (14-Day Moving Avg)', xlab='Date', ylim=c(0,100), lwd=2, type='l', main='DJ: RSI (14-Day Moving Avg)') abline(h=c(30,70),col='red') par(new=T) plot(dj.rsi$dji~dj.rsi$date, xlab='', ylab='', yaxt='n', type='l',col='lightblue') legend("topleft", c('14-Day Moving Avg', 'Moving Avg Abline','dji'), lty=c(1,1,1), col=c("black","red",'lightblue')) axis(4)
4badcebdcc2412d9c23d24231a04d033d226fb53
dae9dece43c6fe692fdf00e45bcab0129b4f3787
/examples/api-key-authentication/1-deploy.R
9550337c13a93b6565315e97b4f552b6226cc1d3
[]
no_license
Sandy4321/RestRserve
31b2f83a47b51bff1be8bc9456cf9ec5f425a291
1ee89a0864ea9b67ca581a0ccfe2fa4917978d7b
refs/heads/master
2020-03-17T04:48:39.978576
2018-04-16T05:35:21
2018-04-16T05:35:21
null
0
0
null
null
null
null
UTF-8
R
false
false
525
r
1-deploy.R
#!/usr/bin/env Rscript DIR = "run" APP = "R/app.R" conf = c( "http.tls.port" = "8002", "tls.key" = normalizePath("run/cert/server.key"), "tls.cert" = normalizePath("run/cert/server.cert"), # you may need also put public keys (CA certs) provided by Certificate Authority (CA) # "tls.ca" = normalizePath("run/cert/server.ca"), "encoding" = "utf8", "port" = "6312") RestRserve::restrserve_deploy(APP, dir = DIR, configuration = conf) file.copy("api-keys.txt", file.path(DIR, "api-keys.txt"), overwrite = TRUE)
8223dee4b4b7a8fdd0f506714cbfbe17ccaf7c48
587636c9ce7053359f4e094e835b2c04a0662982
/riveredge.R
7565c77046f654c0e78a310f50164c2688b6be00
[]
no_license
RomeoAlphaYankee/DataScienceR
99678099a18ad4acc3d5ed6b43b286e933380e86
89fb71d38c235317c4e8c203385d76c72d64f484
refs/heads/master
2022-05-06T20:01:05.590064
2022-03-19T15:36:35
2022-03-19T15:36:35
219,862,131
0
1
null
null
null
null
UTF-8
R
false
false
1,951
r
riveredge.R
library(dplyr) library(readr) library(lubridate) library(ggplot2) ti <- read.csv("C:/Users/angel/Downloads/multiTimeline.csv", skip = 2) head(ti) names(ti) <- c("week", "data") ti %>% mutate(date = ymd(week)) %>% mutate(year = year(date), month = month(date)) %>% filter(month >= 6, month <= 9) %>% group_by(month, year) %>% summarise(mean = mean(data), median = median(data)) ti %>% mutate(date = ymd(week)) %>% mutate(year = year(date), month = month(date)) %>% filter(month >= 6, month <= 10) %>% group_by(month, year) %>% summarise(mean = mean(data), median = median(data)) %>% ggplot(aes(x = month, y = mean, fill = factor(year))) + geom_col(position = "dodge") + ggtitle("Google Searches for Thousand Islands") + labs(fill = "Year") ti %>% mutate(date = ymd(week)) %>% mutate(year = year(date)) %>% ggplot(aes(x = date, y = data)) + geom_line(color = "blue", lwd = 1.25) + ggtitle("Google Searches for Thousand Islands", "2020 vs. 2019") + xlab("Date") + ylab("Searches") rr <- read_csv("C:/Users/angel/Downloads/Analytics Riveredge Resort Pages 20171205-20201211.csv", skip = 19) head(rr) rr$`Month Index` <- as.numeric(rr$`Month Index`) tail(rr) rr$month <- 12 rr$month[-1] <- 1:12 rr <- rr[-nrow(rr), ] tail(rr) rr$year <- NA within(rr, rr$year <- ifelse(rr$Month.Index == 0, 2017, ifelse(rr$Month.Index >= 1 & <= 12, 2018, ifelse(rr$Month.Index >= 13 & <= 24, 2019, 2020)))) year <- c(2017, rep(2018, 12), rep(2019, 12), rep(2020, 12)) rr$year <- year head(rr) str(rr) rr %>% filter(month >= 6, month <= 10) %>% ggplot(aes(x = month, y = Pageviews, fill = factor(year))) + geom_col(position = "dodge") + ggtitle("Web Traffic to Riveredge Resort", "Page Views 2018 - 2020") + xlab("Month") + labs(fill = "Year")
cd05148169b09b1dd534d7702e38366778068689
ada4a328c22ba799276e6e827ff10fc2a17aaab4
/vectorized.R
cad31db9e0f7435695a196eac43528a122a19637
[]
no_license
giannelli/slotmachine
aee8fb1ac7cbfb70c3f19df6fba12f84812ba0fb
b1fb4e0af9e3f4df1a06f5eb946acc0b4c8863c3
refs/heads/master
2020-04-01T14:44:30.371705
2018-10-25T14:40:08
2018-10-25T14:40:08
153,306,124
0
0
null
null
null
null
UTF-8
R
false
false
6,477
r
vectorized.R
# Hands-On Programming with R # Chapter 10 : Speed # Repeat what we need - should package this up. get_symbols <- function() { wheel <- c("DD", "7", "BBB", "BB", "B", "C", "0") sample(wheel, size = 3, replace = TRUE, prob = c(0.03, 0.03, 0.06, 0.1, 0.25, 0.01, 0.52)) } score <- function(symbols) { diamonds <- sum(symbols == "DD") cherries <- sum(symbols == "C") # identify case # since diamonds are wild, only nondiamonds # matter for three of a kind and all bars slots <- symbols[symbols != "DD"] same <- length(unique(slots)) == 1 bars <- slots %in% c("B", "BB", "BBB") # assign prize if (diamonds == 3) { prize <- 100 } else if (same) { payouts <- c("7" = 80, "BBB" = 40, "BB" = 25, "B" = 10, "C" = 10, "0" = 0) prize <- unname(payouts[slots[1]]) } else if (all(bars)) { prize <- 5 } else if (cherries > 0) { # diamonds count as cherries # so long as there is one real cherry prize <- c(0, 2, 5)[cherries + diamonds + 1] } else { prize <- 0 } # double for each diamond prize * 2^diamonds } play <- function() { symbols <- get_symbols() structure(score(symbols), symbols = symbols, class = "slots") } # Repeat what we need - END abs_loop <- function(vec) { for (i in 1:length(vec)) { if (vec[i] < 0) { vec[i] <- -vec[i] } } vec } abs_sets <- function(vec) { negs <- vec < 0 vec[negs] <- vec[negs] * -1 vec } long <- rep(c(-1, 1), 5000000) length(long) system.time(abs_loop(long)) system.time(abs_sets(long)) system.time(abs(long)) ## page 175 change_symbols <- function(vec){ for (i in 1:length(vec)){ if (vec[i] == "DD") { vec[i] <- "joker" } else if (vec[i] == "C") { vec[i] <- "ace" } else if (vec[i] == "7") { vec[i] <- "king" }else if (vec[i] == "B") { vec[i] <- "queen" } else if (vec[i] == "BB") { vec[i] <- "jack" } else if (vec[i] == "BBB") { vec[i] <- "ten" } else { vec[i] <- "nine" } } vec } vec <- c("DD", "C", "7", "B", "BB", "BBB", "0") change_symbols(vec) many <- rep(vec, 1000000) system.time(change_symbols(many)) change_vec <- function (vec) { vec[vec == "DD"] <- "joker" vec[vec == "C"] <- "ace" vec[vec == "7"] <- "king" vec[vec == "B"] <- "queen" vec[vec == "BB"] <- "jack" vec[vec == "BBB"] <- "ten" vec[vec == "0"] <- "nine" vec } system.time(change_vec(many)) change_vec2 <- function(vec){ tb <- c("DD" = "joker", "C" = "ace", "7" = "king", "B" = "queen", "BB" = "jack", "BBB" = "ten", "0" = "nine") unname(tb[vec]) } system.time(change_vec2(many)) # page 178 system.time({ output <- rep(NA, 1000000) for (i in 1:1000000) { output[i] < i + 1 } }) system.time({ output <- NA for (i in 1:1000000) { output[i] <- i + 1 } }) system.time({ winnings <- vector(length = 1000000) for (i in 1:1000000) { winnings[i] <- play() } print(paste("Average win: ", mean(winnings))) }) ## 0.9366984 - very close to calculated expected value worked out in chapter 9 ###### Vectorize simulation Page 180 ############### get_many_symbols <- function(n) { wheel <- c("DD", "7", "BBB", "BB", "B", "C", "0") vec <- sample(wheel, size = 3 * n, replace = TRUE, prob = c(0.03, 0.03, 0.06, 0.1, 0.25, 0.01, 0.52)) matrix(vec, ncol = 3) } get_many_symbols(10) # symbols should be a matrix with a column for each slot machine window score_many <- function(symbols) { # Step 1: Assign base prize based on cherries and diamonds --------- ## Count the number of cherries and diamonds in each combination cherries <- rowSums(symbols == "C") diamonds <- rowSums(symbols == "DD") ## Wild diamonds count as cherries prize <- c(0, 2, 5)[cherries + diamonds + 1] ## ...but not if there are zero real cherries ### (cherries is coerced to FALSE where cherries == 0) prize[!cherries] <- 0 # Step 2: Change prize for combinations that contain three of a kind same <- symbols[, 1] == symbols[, 2] & symbols[, 2] == symbols[, 3] payoffs <- c("DD" = 100, "7" = 80, "BBB" = 40, "BB" = 25, "B" = 10, "C" = 10, "0" = 0) prize[same] <- payoffs[symbols[same, 1]] # Step 3: Change prize for combinations that contain all bars ------ bars <- symbols == "B" | symbols == "BB" | symbols == "BBB" all_bars <- bars[, 1] & bars[, 2] & bars[, 3] & !same prize[all_bars] <- 5 # Step 4: Handle wilds --------------------------------------------- ## combos with two diamonds two_wilds <- diamonds == 2 ### Identify the nonwild symbol one <- two_wilds & symbols[, 1] != symbols[, 2] & symbols[, 2] == symbols[, 3] two <- two_wilds & symbols[, 1] != symbols[, 2] & symbols[, 1] == symbols[, 3] three <- two_wilds & symbols[, 1] == symbols[, 2] & symbols[, 2] != symbols[, 3] ### Treat as three of a kind prize[one] <- payoffs[symbols[one, 1]] prize[two] <- payoffs[symbols[two, 2]] prize[three] <- payoffs[symbols[three, 3]] ## combos with one wild one_wild <- diamonds == 1 ### Treat as all bars (if appropriate) wild_bars <- one_wild & (rowSums(bars) == 2) prize[wild_bars] <- 5 ### Treat as three of a kind (if appropriate) one <- one_wild & symbols[, 1] == symbols[, 2] two <- one_wild & symbols[, 2] == symbols[, 3] three <- one_wild & symbols[, 3] == symbols[, 1] prize[one] <- payoffs[symbols[one, 1]] prize[two] <- payoffs[symbols[two, 2]] prize[three] <- payoffs[symbols[three, 3]] # Step 5: Double prize for every diamond in combo ------------------ unname(prize * 2^diamonds) } play_many <- function(n) { symb_mat <- get_many_symbols(n = n) data.frame(w1 = symb_mat[,1], w2 = symb_mat[,2], w3 = symb_mat[,3], prize = score_many(symb_mat)) } system.time(play_many(1000000)) ## Vectorised scoring - Ten Million! system.time({ plays <- play_many(10000000) print(mean(plays$prize)) }) # [1] 0.9371745 # user system elapsed # 8.706 1.917 10.622 ## Non vectorised scoring - 'Only' One Million system.time({ winnings <- vector(length = 1000000) for (i in 1:1000000) { winnings[i] <- play() } print(paste("Average win: ", mean(winnings))) }) # [1] "Average win: 0.93637" # user system elapsed # 39.424 0.002 39.420 # ####################### # symbols <- matrix( # c("DD", "DD", "DD", # "C", "DD", "0", # "B", "B", "B", # "B", "BB", "BBB", # "C", "C", "0", # "7", "DD", "DD"), nrow = 6, byrow = TRUE) # symbols
ef78545214e168ab79bfa992f16d007f6af6752b
bbcec89cbc999b348f012b091525af29e267c4c8
/man/impute_missing_per_quantile.Rd
7a99c43f18e3d1571aca2697d9fcab1042b42f46
[]
no_license
shinyfluba/covidEnsembles
3ec08a70205d145bd0597d75bd1c763b2907f2fb
fedbc5c8921afd221ec4584a3bd63152eb4fba8b
refs/heads/master
2023-04-04T08:10:35.323665
2021-03-25T14:49:15
2021-03-25T14:49:15
null
0
0
null
null
null
null
UTF-8
R
false
true
1,102
rd
impute_missing_per_quantile.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/qra_fit_convenience.R \name{impute_missing_per_quantile} \alias{impute_missing_per_quantile} \title{Impute missing values for each quantile level in a quantile forecast matrix It is assumed that in each row, all quantiles for a given model are either missing or available.} \usage{ impute_missing_per_quantile(qfm, impute_method = "mean") } \arguments{ \item{qfm}{a QuantileForecastMatrix} \item{impute_method}{character string specifying method for imputing; currently only 'mean' is supported} } \value{ list of two items: \enumerate{ \item 'qfm_imputed' the input QuantileForecastMatrix object with missing values imputed \item 'weight_transfer' a square matrix of dimension equal to the number of unique models in qfm. Entry \link{i, j} is the proportion of imputed observations for model j that are attributable to model i. } } \description{ Impute missing values for each quantile level in a quantile forecast matrix It is assumed that in each row, all quantiles for a given model are either missing or available. }
1da95af3a5ba19aadbdbc72210070c89914e8cd5
7a7a325a0f01d92a41e7c35278e7f938452fed84
/man/YPR.Rd
25d842db1edcb43b58ad0fbf15c61f37665bc896
[]
no_license
IMPRESSPROJECT/Rfishpop
cca94973e345a7841634b5ab4a7ba5fdebf42e24
c4055e61b7126d9ab0b4264855f39584405a8a16
refs/heads/master
2022-08-30T10:35:13.002516
2022-08-26T16:51:16
2022-08-26T16:51:16
252,695,701
0
5
null
2020-11-05T10:49:10
2020-04-03T10:04:49
R
UTF-8
R
false
true
3,069
rd
YPR.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/YPR.R \name{YPR} \alias{YPR} \title{Yield-per-Recruit} \usage{ YPR(Pop.Mod, f.grid, Fish.years, Bio.years, plot, Method, par) } \arguments{ \item{Pop.Mod}{A list containing the components returned by Population.Modeling function (main function).} \item{f.grid}{A sequence of fishing efforts.} \item{Fish.years}{The number of recent years to estimate the mean of SEL (selectivity, see information about such elements in Sum.Pop.Mod function).} \item{Bio.years}{The number of recent years to estimate the mean of M and WC (natural mortality and catch weight, see information about such elements in Population.Modeling function and Sum.Pop.Mod function).} \item{plot}{A vector of two elements. The first one is a logical parameter. By default is equal to TRUE, which means that a biomass per recruit graph is done. The second element refers to which iteration must be plotted.} \item{Method}{The procedure to obtain the age vector of weight (catches), selectivity and natural mortality. By default is "mean" which means that the mean of the last "Bio.years" is used. The alternative option is "own", the user can introduce these elements.} \item{par}{If Method="own" it is a list containing the matrices whose columns report for each iteration the age vector of weight (catches), natural mortality, and selectivity. In other case is equal to NULL.} } \value{ An array whose third dimension corresponds to the iterations. For each iteration the array contains a matrix reporting the yield-per-recruit for a range of overall fishing mortalities. } \description{ Return yield-per-Recruit for each iteration. } \details{ The function return the yield-per-recruit. } \examples{ ctrPop<-list(years=seq(1980,2020,by=1),niter=2,N0=15000,ages=0:15,minFage=2, maxFage=5,tc=0.5,seed=NULL) number_ages<-length(ctrPop$ages);number_years<-length(ctrPop$years) Mvec=c(1,0.6,0.5,0.4,0.35,0.35,0.3,rep(0.3,9)) M<-matrix(rep(Mvec,number_years),ncol = number_years) colnames(M)<-ctrPop$years rownames(M)<-ctrPop$ages ctrBio<-list(M=M,CV_M=0.2, L_inf=20, t0=-0.25, k=0.3, CV_L=0, CV_LC=0, a=6*10^(-6), b=3, a50_Mat=1, ad_Mat=-0.5,CV_Mat=0) ctrSEL<-list(type="cte", par=list(cte=0.5),CV_SEL=0) f=matrix(rep(0.5,number_years),ncol=number_years,nrow=2,byrow=TRUE) ctrFish<-list(f=f,ctrSEL=ctrSEL) a_BH=15000; b_BH=50; CV_REC_BH=0 SR<-list(type="BH",par=c(a_BH,b_BH,CV_REC_BH)) Pop.Mod<-Population.Modeling(ctrPop=ctrPop,ctrBio=ctrBio,ctrFish=ctrFish,SR=SR) f.grid<-seq(0.00,1.5,by=0.01) YPR(Pop.Mod,f.grid,3,3,plot=c(TRUE,1), Method="mean",par=NULL) # The following commented lines refers to par argument. # If par is not NULL must be something like (assuming that WC, M, # SEL are defined previously). # par=list(); par$WC<-WC; par$SEL<-SEL; par$M<-M # YPR(Pop.Mod,f.grid,plot=c(TRUE,1),Method="own",par=par) } \author{ \itemize{ \item{Marta Cousido-Rocha} \item{Santiago Cerviño López} \item{Maria Grazia Pennino} } }
5205eaec2c952a1ccc8b0c6679944da2db3ac558
71a051ad748a7b22f427edd81d791443f4c90dea
/man/rgt.Rd
8739b3e6489153a2f337e91aec7428542511d9e3
[]
no_license
knausb/GeneticDiff
d13c419503591de43979dc1016383770d5094b1e
6058edb2bd4e68152b42e80ca2807bfc370204f3
refs/heads/master
2021-01-01T19:45:04.216663
2017-09-24T21:42:58
2017-09-24T21:42:58
98,671,915
0
0
null
null
null
null
UTF-8
R
false
true
4,679
rd
rgt.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{rgt} \alias{rgt} \title{rgt - random genotypes} \usage{ rgt(nsamp = 4L, nvar = 3L, pphased = as.numeric(c(0.5)), pploid = as.numeric(c(0, 1)), pallele = as.numeric(c(0.5, 0.5)), verbose = 0L) } \arguments{ \item{nsamp}{number of samples (columns) to simulate} \item{nvar}{number of variants (rows) to simulate} \item{pphased}{probability each genotype will be phased} \item{pploid}{probability each genotype will be a ploidy level} \item{pallele}{probability of each allele} \item{verbose}{should verbose output be produced (1) or not (0)} } \value{ A matrix with variants (loci) in rows and samples in columns. } \description{ Generate a matrix of random genotypes. } \details{ The function \code{rgt} generates a matrix of random genotypes. This matrix consists of loci or variants in rows and samples in columns where the number of loci and samples is arbitrary. Genotypes can be delimited by either the forward slash (/), for unphased genotypes, or the pipe (|), for phased genotypes. The matrix of genotypes generated by this function is intended to be compatible with a matrix of genotypes extracted from variant cal format data (VCF). The alleles are encoded in a zero-based numeric manner consistent with the VCF specification. Here, 0 is the first (typically the reference) allele, 1 is the first alternate allele, 2 is the second alternate allele, and so on. While these genotypes are intended to conform to the VCF specification it is hoped they will be generalizable to broader applications. The parameter \strong{nvar} determines the number of loci or variants (rows) to simulate. This integer value is arbitrary to accomodate a range of locus counts. The parameter \strong{nsamp} determines the number of samples (columns) to simulate. This integer value is arbitrary to accomodate a range of sample sizes. The parameter \strong{pphased} indicates the probability that a genotype will be phased. This parameter consists of a vector containing one element ranging in value from zero to one and describes the probability an individual genotype will be phased. For example, \code{rgt(pphased=c(0))} will produce genotypes that have a probability of zero for being phased. The code \code{rgt(pphased=c(1))} will produce genotypes that have a probability of one for being phased. The code \code{rgt(pphased=c(0.2))} will produce genotypes that have a probability of 0.2 that they will be phased. The probability for each genotype returned is calculated independently. The parameter \strong{pploid} indicates the probability a genotype will be of a particular ploidy or copy number. This is an n-element vector where each element indicates the probability a genotype will be n-ploid. For example, \code{rgt(pploid=c(1))} will produce genotypes that have a probability of 1 for being one-ploid (haploid). The code \code{rgt(pploid=c(0,1))} will produce genotypes that have a probability of 0 for being one-ploid and a probability of 1 for being two-ploid (diploid). The code \code{rgt(pploid=c(0,0,1))} will produce genotypes that have a probability of 0 for being one-ploid, a probability of 0 for being two-ploid (diploid), and a probability of 1 for being three-ploid (triploid). The code \code{rgt(pploid=c(0.1,0.5,0.4))} will produce genotypes that have a probability of 0.1 for being one-ploid, 0.5 for being two-ploid, and 0.4 for being three-ploid. The values in this vector are of arbitrary length but should sum to one. The parameter \strong{pallele} indicates the probability of each allele to occur in a genotype. This is an n-element vector where each element indicates the probability of the nth allele occuring. Here the first element is the probability of the 0th allele. For example, \code{rgt(pallele = c(1))} will produce genotypes where the 0th allele (0) will occur with a probability of 1. The code \code{rgt(pallele = c(0,1))} will produce genotypes where the 0th allele (0) will occur with a probability of 0 and the 1st allele will occur with a probability of 1. The code \code{rgt(pallele = c(0,0,1))} will produce genotypes where the 0th allele (0) will occur with a probability of 0, the 1st allele will occur with a probability of 0, and the 2nd allele will occur with a probability of 1. The code \code{rgt(pallele = c(0.2,0.7,0.1))} will produce genotypes where the 0th allele (0) will occur with a probability of 0.2, the 1st allele will occur with a probability of 0.7, and the 2nd allele will occur with a probability of 0.1. The values in this vector are of arbitrary length but should sum to one. } \examples{ rgt() }
4ea988c024b617ee7e4eafd64de8367118d3b821
ee79ad37d2885aac5f5cecb9eea61fafee117a11
/simultaneous-selection/analysis/analyses_rep/model_4.R
0fd970f658dc8c32c61792ad06ee193ef7d9609b
[]
no_license
vullab/ColorBinding
d33a3609b89f0916a026cae846742e1f065938ea
b3c6e718f1233ad9b4568a51f794cf3b5966145f
refs/heads/master
2021-03-16T10:26:02.634673
2018-12-16T04:04:29
2018-12-16T04:04:29
65,759,803
1
1
null
null
null
null
UTF-8
R
false
false
8,521
r
model_4.R
library("VGAM") #library(R.basic) #looking good, debatable whether or not should do the whole color or flipping - whole color should not rely on part maybe? #need seperate colors for asymetrical bars rotate90<- function(x) { t(mirror.matrix(x)) } # Rotate matrix 180 clockworks rotate180 <- function(x) { xx <- rev(x); dim(xx) <- dim(x); xx; } # Rotate matrix 270 clockworks rotate270 <- function(x) { mirror.matrix(t(x)) } fitBindingModel <- function(x,data,plot = FALSE){ if (all(x>=0) & all(x[0:3] <=1)){ #fill out paramters # p_whole_item = x[1] # p_part_A = x[2] # p_part_B = x[3] # p_color_A = x[4] # p_color_B = x[5] # sd_whole = x[6] # sd_part_A = x[7] # sd_part_B = x[8] # sd_color_A = x[9] # sd_color_B = x[10] # p_whole_item = x[1] # p_part_A = x[2] # p_part_B = x[2] # p_color_A = x[3] # p_color_B = x[3] # sd_whole = x[4] # sd_part_A = x[5] # sd_part_B = x[5] # sd_color_A = x[6] # sd_color_B = x[6] p_whole_item = x[1] p_part_A = x[2] p_part_B = x[2] p_color_A = x[3] p_color_B = x[3] sd_whole = x[4] sd_part_A = x[5] sd_part_B = x[5] sd_color_A = x[6] sd_color_B = x[6] prob_flip = 0 # (p_part_A + p_part_B) <= 1 & if ( (p_color_A + p_color_B) <= 1){ #get the model fit fit <- runBindingModel(p_whole_item, p_part_A, p_part_B, p_color_A, p_color_B, sd_whole, sd_part_A, sd_part_B, sd_color_A, sd_color_B, prob_flip) #find the residual residual = fit - data if (plot){ quartz() image(sqrt(matrix(rev(fit),nrow=10,byrow=TRUE)[(10:1),] ),col=gray((0:128)/128)) quartz() image(matrix(rev(residual),nrow=10,byrow=TRUE)[(10:1),] ,col=gray((0:128)/128)) } }else{ residual = 10000 } }else{ residual = 10000 } return(sum(residual^2)) } fitBindingModel2 <- function(x,data,plot = FALSE){ if (all(x>=0) & all(x[0:3] <=1) & all(x[7] <=1)){ #fill out paramters # p_whole_item = x[1] # p_part_A = x[2] # p_part_B = x[3] # p_color_A = x[4] # p_color_B = x[5] # sd_whole = x[6] # sd_part_A = x[7] # sd_part_B = x[8] # sd_color_A = x[9] # sd_color_B = x[10] # p_whole_item = x[1] # p_part_A = x[2] # p_part_B = x[2] # p_color_A = x[3] # p_color_B = x[3] # sd_whole = x[4] # sd_part_A = x[5] # sd_part_B = x[5] # sd_color_A = x[6] # sd_color_B = x[6] p_whole_item = x[1] p_part_A = x[2] p_part_B = x[2] p_color_A = x[3] p_color_B = x[3] sd_whole = x[4] sd_part_A = x[5] sd_part_B = x[5] sd_color_A = x[6] sd_color_B = x[6] prob_flip = x[7] # (p_part_A + p_part_B) <= 1 & if ( (p_color_A + p_color_B) <= 1){ #get the model fit fit <- runBindingModel(p_whole_item, p_part_A, p_part_B, p_color_A, p_color_B, sd_whole, sd_part_A, sd_part_B, sd_color_A, sd_color_B, prob_flip) #find the residual residual = fit - data if (plot){ quartz() image(sqrt(matrix(rev(fit),nrow=10,byrow=TRUE)[(10:1),] ),col=gray((0:128)/128)) quartz() image(matrix(rev(residual),nrow=10,byrow=TRUE)[(10:1),] ,col=gray((0:128)/128)) } }else{ residual = 10000 } }else{ residual = 10000 } return(sum(residual^2)) } runBindingModel <- function(p_whole_item, p_part_A, p_part_B, p_color_A, p_color_B, sd_whole, sd_part_A, sd_part_B, sd_color_A, sd_color_B, prob_flip){ prob_norm <- function(x){ return(x/sum(x))} #browser() #laplace for each level whole_prob<- prob_norm(dlaplace(-2:2,0,sd_whole)) partA_prob<- prob_norm(dlaplace(-2:2,0,sd_part_A)) partB_prob<- prob_norm(dlaplace(-2:2,0,sd_part_B)) colorA_prob<- prob_norm(dlaplace(-2:2,0,sd_color_A)) colorB_prob<- prob_norm(dlaplace(-2:2,0,sd_color_B)) guess_prob<- rep(1/10,10) #set up individual matricies whole_matrix <- matrix(0, nrow = 10, ncol = 10) whole_swap_matrix <- matrix(0, nrow = 10, ncol = 10) Apart_Bpart_matrix <- matrix(0, nrow = 10, ncol = 10) Apart_Acolor_matrix <- matrix(0, nrow = 10, ncol = 10) Apart_Bcolor_matrix <- matrix(0, nrow = 10, ncol = 10) Apart_G_matrix <- matrix(0, nrow = 10, ncol = 10) Acolor_Bpart_matrix <- matrix(0, nrow = 10, ncol = 10) Acolor_Acolor_matrix <- matrix(0, nrow = 10, ncol = 10) Acolor_Bcolor_matrix <- matrix(0, nrow = 10, ncol = 10) Acolor_DR_matrix <- matrix(0, nrow = 10, ncol = 10) Bcolor_Bpart_matrix <- matrix(0, nrow = 10, ncol = 10) Bcolor_Acolor_matrix <- matrix(0, nrow = 10, ncol = 10) Bcolor_Bcolor_matrix <- matrix(0, nrow = 10, ncol = 10) Bcolor_DR_matrix <- matrix(0, nrow = 10, ncol = 10) G_Bpart_matrix <- matrix(0, nrow = 10, ncol = 10) #Acolor_DR_matrix <- matrix(0, nrow = 10, ncol = 10) #Bcolor_DR_matrix <- matrix(0, nrow = 10, ncol = 10) GG_matrix <- matrix(0, nrow = 10, ncol = 10) Aoptions<- 1:5 Boptions<- 6:10 #A is rows, B is columns whole_matrix[Aoptions,Boptions] <- diag(whole_prob) Apart_Bpart_matrix[Aoptions,Boptions] <- partA_prob %*% t(partB_prob) Apart_Acolor_matrix[Aoptions,Aoptions] <- partA_prob %*% t(colorA_prob) Apart_Bcolor_matrix[Aoptions,Boptions] <- partA_prob %*% t(colorB_prob) Apart_G_matrix[Aoptions,1:10] <- partA_prob %*% t(guess_prob) Acolor_Bpart_matrix[Aoptions,Boptions] <- colorA_prob %*% t(partB_prob) Acolor_Acolor_matrix[Aoptions,Aoptions] <- colorA_prob %*% t(colorA_prob) Acolor_Bcolor_matrix[Aoptions,Boptions] <- colorA_prob %*% t(colorB_prob) Acolor_DR_matrix[Aoptions,Aoptions] <- diag(colorA_prob) Bcolor_Bpart_matrix[Boptions,Boptions] <- colorB_prob %*% t(partB_prob) Bcolor_Acolor_matrix[Boptions,Aoptions] <- colorB_prob %*% t(colorA_prob) Bcolor_Bcolor_matrix[Boptions,Boptions] <- colorB_prob %*% t(colorB_prob) Bcolor_DR_matrix[Boptions,Boptions] <- diag(colorB_prob) G_Bpart_matrix[1:10,Boptions]<- guess_prob %*% t(partB_prob) GG_matrix[1:10,1:10]<- guess_prob %*% t(guess_prob) #only used if considering the possiblity of whole with no parts whole_swap_matrix[Boptions,Aoptions] <- diag(whole_prob) #browser() #if you want to allow whole colors only. #browser() #putting everything together #get the whole thing finalMatrix <- p_whole_item * whole_matrix + #p_whole_item * (1 - (1 - p_part_A) * (1 - p_part_B)) * whole_matrix + #p_whole_item * ((1 - p_part_A) * (1 - p_part_B)) * whole_swap_matrix + #dont get the whole thing (1 - p_whole_item) * ( #get part A (p_part_A * p_part_B) * Apart_Bpart_matrix + # TFL-Sample parts (1-p_part_B) * ( # TFL-If you don't remember part B then (p_part_A * p_color_A) * Apart_Acolor_matrix + # TFL-Guess an A color (p_part_A * p_color_B) * Apart_Bcolor_matrix + # TFL-Guess a B color (p_part_A * (1 - p_color_A - p_color_B)) * Apart_G_matrix ) + # TFL-Or just guess #dont get part A (1-p_part_A) * ( (p_color_A * p_part_B) * Acolor_Bpart_matrix + # TFL-Guess A color for A (1-p_part_B) * ( # TFL-If you don't remember B part but remember color of A (p_color_A * p_color_B) * Acolor_Acolor_matrix + # TFL-Guess an A color (p_color_A * p_color_B) * Acolor_Bcolor_matrix + # TFL-Guess a B color (p_color_A * (1 - p_color_A - p_color_B)) * Acolor_DR_matrix ) + # TFL--Guess any color (p_color_B * p_part_B) * Bcolor_Bpart_matrix + # TFL-Guess a B color for A (1-p_part_B) * ( (p_color_B * p_color_A) * Bcolor_Acolor_matrix + (p_color_B * p_color_B) * Bcolor_Bcolor_matrix + (p_color_B * (1 - p_color_A - p_color_B)) * Bcolor_DR_matrix ) + ((1 - p_color_A - p_color_B) * p_part_B) * G_Bpart_matrix + # TFL-Guess color for A (1-p_part_B) * ( ((1 - p_color_A - p_color_B) * p_color_A) * Acolor_DR_matrix + ((1 - p_color_A - p_color_B) * p_color_B) * Bcolor_DR_matrix + ((1 - p_color_A - p_color_B) * (1 - p_color_A - p_color_B)) * GG_matrix ))) return((1-prob_flip) * finalMatrix + prob_flip * rotate180(t(rotate180(finalMatrix))) ) } #image(sqrt(matrix(rev(finalMatrix),nrow=10,byrow=TRUE)[(10:1),] ),col=gray((0:128)/128))
27d6676a6eb9f4e5567cda6d62bd83799736d1e9
5ec06dab1409d790496ce082dacb321392b32fe9
/clients/r/generated/R/ComDayCqDamIdsImplIDSPoolManagerImplProperties.r
476ec884ca2009a97902c7874aa3d646013c8e38
[ "Apache-2.0" ]
permissive
shinesolutions/swagger-aem-osgi
e9d2385f44bee70e5bbdc0d577e99a9f2525266f
c2f6e076971d2592c1cbd3f70695c679e807396b
refs/heads/master
2022-10-29T13:07:40.422092
2021-04-09T07:46:03
2021-04-09T07:46:03
190,217,155
3
3
Apache-2.0
2022-10-05T03:26:20
2019-06-04T14:23:28
null
UTF-8
R
false
false
8,237
r
ComDayCqDamIdsImplIDSPoolManagerImplProperties.r
# Adobe Experience Manager OSGI config (AEM) API # # Swagger AEM OSGI is an OpenAPI specification for Adobe Experience Manager (AEM) OSGI Configurations API # # OpenAPI spec version: 1.0.0-pre.0 # Contact: opensource@shinesolutions.com # Generated by: https://openapi-generator.tech #' ComDayCqDamIdsImplIDSPoolManagerImplProperties Class #' #' @field max.errors.to.blacklist #' @field retry.interval.to.whitelist #' @field connect.timeout #' @field socket.timeout #' @field process.label #' @field connection.use.max #' #' @importFrom R6 R6Class #' @importFrom jsonlite fromJSON toJSON #' @export ComDayCqDamIdsImplIDSPoolManagerImplProperties <- R6::R6Class( 'ComDayCqDamIdsImplIDSPoolManagerImplProperties', public = list( `max.errors.to.blacklist` = NULL, `retry.interval.to.whitelist` = NULL, `connect.timeout` = NULL, `socket.timeout` = NULL, `process.label` = NULL, `connection.use.max` = NULL, initialize = function(`max.errors.to.blacklist`, `retry.interval.to.whitelist`, `connect.timeout`, `socket.timeout`, `process.label`, `connection.use.max`){ if (!missing(`max.errors.to.blacklist`)) { stopifnot(R6::is.R6(`max.errors.to.blacklist`)) self$`max.errors.to.blacklist` <- `max.errors.to.blacklist` } if (!missing(`retry.interval.to.whitelist`)) { stopifnot(R6::is.R6(`retry.interval.to.whitelist`)) self$`retry.interval.to.whitelist` <- `retry.interval.to.whitelist` } if (!missing(`connect.timeout`)) { stopifnot(R6::is.R6(`connect.timeout`)) self$`connect.timeout` <- `connect.timeout` } if (!missing(`socket.timeout`)) { stopifnot(R6::is.R6(`socket.timeout`)) self$`socket.timeout` <- `socket.timeout` } if (!missing(`process.label`)) { stopifnot(R6::is.R6(`process.label`)) self$`process.label` <- `process.label` } if (!missing(`connection.use.max`)) { stopifnot(R6::is.R6(`connection.use.max`)) self$`connection.use.max` <- `connection.use.max` } }, toJSON = function() { ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject <- list() if (!is.null(self$`max.errors.to.blacklist`)) { ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject[['max.errors.to.blacklist']] <- self$`max.errors.to.blacklist`$toJSON() } if (!is.null(self$`retry.interval.to.whitelist`)) { ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject[['retry.interval.to.whitelist']] <- self$`retry.interval.to.whitelist`$toJSON() } if (!is.null(self$`connect.timeout`)) { ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject[['connect.timeout']] <- self$`connect.timeout`$toJSON() } if (!is.null(self$`socket.timeout`)) { ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject[['socket.timeout']] <- self$`socket.timeout`$toJSON() } if (!is.null(self$`process.label`)) { ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject[['process.label']] <- self$`process.label`$toJSON() } if (!is.null(self$`connection.use.max`)) { ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject[['connection.use.max']] <- self$`connection.use.max`$toJSON() } ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject }, fromJSON = function(ComDayCqDamIdsImplIDSPoolManagerImplPropertiesJson) { ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject <- jsonlite::fromJSON(ComDayCqDamIdsImplIDSPoolManagerImplPropertiesJson) if (!is.null(ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject$`max.errors.to.blacklist`)) { max.errors.to.blacklistObject <- ConfigNodePropertyInteger$new() max.errors.to.blacklistObject$fromJSON(jsonlite::toJSON(ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject$max.errors.to.blacklist, auto_unbox = TRUE)) self$`max.errors.to.blacklist` <- max.errors.to.blacklistObject } if (!is.null(ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject$`retry.interval.to.whitelist`)) { retry.interval.to.whitelistObject <- ConfigNodePropertyInteger$new() retry.interval.to.whitelistObject$fromJSON(jsonlite::toJSON(ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject$retry.interval.to.whitelist, auto_unbox = TRUE)) self$`retry.interval.to.whitelist` <- retry.interval.to.whitelistObject } if (!is.null(ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject$`connect.timeout`)) { connect.timeoutObject <- ConfigNodePropertyInteger$new() connect.timeoutObject$fromJSON(jsonlite::toJSON(ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject$connect.timeout, auto_unbox = TRUE)) self$`connect.timeout` <- connect.timeoutObject } if (!is.null(ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject$`socket.timeout`)) { socket.timeoutObject <- ConfigNodePropertyInteger$new() socket.timeoutObject$fromJSON(jsonlite::toJSON(ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject$socket.timeout, auto_unbox = TRUE)) self$`socket.timeout` <- socket.timeoutObject } if (!is.null(ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject$`process.label`)) { process.labelObject <- ConfigNodePropertyString$new() process.labelObject$fromJSON(jsonlite::toJSON(ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject$process.label, auto_unbox = TRUE)) self$`process.label` <- process.labelObject } if (!is.null(ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject$`connection.use.max`)) { connection.use.maxObject <- ConfigNodePropertyInteger$new() connection.use.maxObject$fromJSON(jsonlite::toJSON(ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject$connection.use.max, auto_unbox = TRUE)) self$`connection.use.max` <- connection.use.maxObject } }, toJSONString = function() { sprintf( '{ "max.errors.to.blacklist": %s, "retry.interval.to.whitelist": %s, "connect.timeout": %s, "socket.timeout": %s, "process.label": %s, "connection.use.max": %s }', self$`max.errors.to.blacklist`$toJSON(), self$`retry.interval.to.whitelist`$toJSON(), self$`connect.timeout`$toJSON(), self$`socket.timeout`$toJSON(), self$`process.label`$toJSON(), self$`connection.use.max`$toJSON() ) }, fromJSONString = function(ComDayCqDamIdsImplIDSPoolManagerImplPropertiesJson) { ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject <- jsonlite::fromJSON(ComDayCqDamIdsImplIDSPoolManagerImplPropertiesJson) ConfigNodePropertyIntegerObject <- ConfigNodePropertyInteger$new() self$`max.errors.to.blacklist` <- ConfigNodePropertyIntegerObject$fromJSON(jsonlite::toJSON(ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject$max.errors.to.blacklist, auto_unbox = TRUE)) ConfigNodePropertyIntegerObject <- ConfigNodePropertyInteger$new() self$`retry.interval.to.whitelist` <- ConfigNodePropertyIntegerObject$fromJSON(jsonlite::toJSON(ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject$retry.interval.to.whitelist, auto_unbox = TRUE)) ConfigNodePropertyIntegerObject <- ConfigNodePropertyInteger$new() self$`connect.timeout` <- ConfigNodePropertyIntegerObject$fromJSON(jsonlite::toJSON(ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject$connect.timeout, auto_unbox = TRUE)) ConfigNodePropertyIntegerObject <- ConfigNodePropertyInteger$new() self$`socket.timeout` <- ConfigNodePropertyIntegerObject$fromJSON(jsonlite::toJSON(ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject$socket.timeout, auto_unbox = TRUE)) ConfigNodePropertyStringObject <- ConfigNodePropertyString$new() self$`process.label` <- ConfigNodePropertyStringObject$fromJSON(jsonlite::toJSON(ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject$process.label, auto_unbox = TRUE)) ConfigNodePropertyIntegerObject <- ConfigNodePropertyInteger$new() self$`connection.use.max` <- ConfigNodePropertyIntegerObject$fromJSON(jsonlite::toJSON(ComDayCqDamIdsImplIDSPoolManagerImplPropertiesObject$connection.use.max, auto_unbox = TRUE)) } ) )
0cd70364ce97e4007ebc740ec25403115d0b0554
16e799f7242507c6168cbea9c2da9c7522d36a85
/assignments/fishery_stats.R
932ff2a696ab11ebe38485b8e79ee2cd787c18d3
[]
no_license
eristig/ESM262-1
6fc035328563d684da502fab3d6e4c85f73ccdec
b2414d073f624424f312eeca0d69a3ff91ae4310
refs/heads/master
2021-02-19T03:47:50.641382
2020-03-05T21:45:21
2020-03-05T21:45:21
null
0
0
null
null
null
null
UTF-8
R
false
false
934
r
fishery_stats.R
### Assignment 2: Fishing # create vector of possible fish possible.fish = c("salmon","steelhead","shark","tuna","cod") possible.locations = c("A","B","C","D","E") fish.prices = runif(min=5, max=10, n=length(possible.fish)) # we can use sample to simulate a random recording of catch by fisherman, lets say we pick 20 fish from the net prices = data.frame(fish = possible.fish, price = fish.prices) catch_reef = sample(possible.fish, size=50, prob = c(0.2, 0.2, 0.1, 0.1, 0.4), replace=T) catch_ocean = sample(possible.fish, size=50, prob = c(0.4, 0.2, 0.1, 0.1, 0.2), replace=T) catch= data.frame(catch_reef, catch_ocean) catch fishery_stats = function(catch) { data = as.data.frame(matrix(nrow=ncol(catch), ncol=3)) for(i in 1:ncol(catch)){ data[i,1] = colnames(catch)[i] data[i,2] = names(which.max(summary(catch[[i]]))) data[i,3] = max(summary(catch[[i]])) } return(data) } fishery_stats(catch)
15092a39aa8e57bd5cf78d620f36cf5116aab83e
8114393f498302505eb893a8f3f62b5200c65fe5
/man/predict.CauseSpecificCox.Rd
2294479d8ab53787af0a5aa2fd5a5135bb84be22
[]
no_license
PablitoCho/riskRegression
838fae9645819eaa0aa5fc51f9b462c22ccc84ad
d5e995d991a2de2196ab9751b69b606fea347743
refs/heads/master
2020-12-13T19:57:41.632436
2017-06-26T12:37:23
2017-06-26T12:37:23
null
0
0
null
null
null
null
UTF-8
R
false
true
4,661
rd
predict.CauseSpecificCox.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/predict.CauseSpecificCox.R \name{predict.CauseSpecificCox} \alias{predict.CauseSpecificCox} \title{Predicting absolute risk from cause-specific Cox models} \usage{ \method{predict}{CauseSpecificCox}(object, newdata, times, cause, landmark = NA, keep.times = 1L, keep.newdata = 1L, keep.strata = 1L, se = FALSE, band = FALSE, iid = FALSE, average.iid = FALSE, nSim.band = 10000, logTransform = FALSE, productLimit = TRUE, conf.level = 0.95, store.iid = "full", ...) } \arguments{ \item{object}{The fitted cause specific Cox model} \item{newdata}{A data frame containing the values of the variables in the right hand side of 'coxph' for each subject.} \item{times}{Vector of times at which to return the estimated absolute risk.} \item{cause}{Identifies the cause of interest among the competing events.} \item{landmark}{the starting time for the computation of the cumulative risk} \item{keep.times}{Logical. If \code{TRUE} add the evaluation times to the output.} \item{keep.newdata}{Logical. If \code{TRUE} add the value of the covariates used to make the prediction in the output list.} \item{keep.strata}{Logical. If \code{TRUE} add the value of the strata used to make the prediction in the output list.} \item{se}{Logical. If \code{TRUE} add the standard errors to the output.} \item{band}{Logical. If \code{TRUE} add the confidence band to the output.} \item{iid}{Logical. If \code{TRUE} add the influence function to the output.} \item{average.iid}{Logical. If \code{TRUE} add the average of the influence function over \code{newdata} to the output.} \item{nSim.band}{the number of simulations used to compute the quantiles for the confidence bands.} \item{logTransform}{Should the confidence intervals/bands be computed on the log(-log) scale and be backtransformed. Otherwise they are computed on the original scale and truncated (if necessary).} \item{productLimit}{Logical. If true the survival is computed using the product limit estimator. Otherwise the exponential approximation is used (i.e. exp(-cumulative hazard)).} \item{conf.level}{Level of confidence.} \item{store.iid}{Implementation used to estimate the influence function and the standard error. Can be \code{"full"} or \code{"minimal"}. See the details section of \code{\link{calcSeCSC}}.} \item{...}{not used} } \value{ A list containing: \itemize{ \item{absRisk}: (data table) the predictions for each subject (in rows) and each time (in columns). \item{absRisk.se}: (data table) the standard errors of the predictions. \item(absRisk.iid): (array) the value of the influence of each subject used to fit the object (dim 3) for each subject in newdata (dim 1) and each time (dim 2). \item{times}: (vector) the evaluation times. } } \description{ Apply formula to combine two or more Cox models into absolute risk (cumulative incidence function) } \details{ Note: for Cox regression models with time varying covariates it does not make sense to use this function, because the predicted risk has to be a measurable function of the data available at the time origin. When setting \code{logTransform} to \code{TRUE}, the standard error that is returned is before back-transformation to the original scale. } \examples{ set.seed(5) d <- sampleData(80,outcome="comp") nd <- sampleData(4,outcome="comp") d$time <- round(d$time,1) ttt <- sort(sample(x = unique(d$time), size = 10)) # coxph function CSC.fit <- CSC(Hist(time,event)~ X3+X8,data=d, method = "breslow") x= predict(CSC.fit,newdata=nd,times=1:10,cause=1,se=1L) px=print(x) px x2 = predict(CSC.fit,newdata=nd,times=1:10,cause=1,se=1L, logTransform = TRUE) predCSC <- predict(CSC.fit, newdata = d, cause = 2, times = ttt) predCSC.se <- predict(CSC.fit, newdata = d[1:5,], cause = 2, times = ttt, se = TRUE,keep.newdata=TRUE) predCSC.iid <- predict(CSC.fit, newdata = d[1:5,], cause = 2, times = ttt, iid = TRUE) # predCSC.se$absRisk.se # sqrt(apply(predCSC.iid$absRisk.iid[,1,]^2,1,function(x){sum(x)})) ## strata CSC.fit.s <- CSC(list(Hist(time,event)~ strata(X1)+X2+X9, Hist(time,event)~ X2+strata(X4)+X8+X7),data=d, method = "breslow") predict(CSC.fit.s,cause=1,times=ttt,se=1L) # cph function CSC.cph <- CSC(Hist(time,event)~ X1+X2,data=d, method = "breslow", fitter = "cph") predict(CSC.cph, newdata = d, cause = 2, times = ttt) # landmark analysis T0 <- 1 predCSC_afterT0 <- predict(CSC.fit, newdata = d, cause = 2, times = ttt[ttt>T0], landmark = T0) predCSC_afterT0 } \author{ Brice Ozenne broz@sund.ku.dk, Thomas A. Gerds tag@biostat.ku.dk }
4ef4c7fdf9dd047830dbd312f6c61c0b72201829
5d1e0788c8732862077768cf3bf27e2e1dd9affa
/plot1.R
bf500f6c7bafa03202127dea921d4eeb1159652f
[]
no_license
trier257/ExData_Plotting1
2fe346ca3b676353edf4fec97e0e7726c7155c6b
d86572b2a800942b9f34c75beecdca90e78c0871
refs/heads/master
2020-11-30T16:17:27.282069
2014-06-07T01:03:16
2014-06-07T01:03:16
null
0
0
null
null
null
null
UTF-8
R
false
false
1,373
r
plot1.R
# Read in the data. # This code requires the zip file be downloaded and unzipped to the current directory. # The data is big but not so large that it won't fit in my PC's memory. allData <- read.table("household_power_consumption.txt", sep=";", header=TRUE, stringsAsFactors=FALSE, na.strings="?") # The assignment only deals with the data for Feb 1, 2007 to end of Feb 2, 2007. # Need to convert data's dates, then subset with them. # Convert character date and time to a date-time value using strptime. # http://www.stat.berkeley.edu/classes/s133/dates.html allData$DateTime <- paste(allData$Date, allData$Time) allData$DateTime <- strptime(allData$DateTime, format="%d/%m/%Y %H:%M:%S") # Subset the data to only required days. # For plot 1 only need the Global_active_power column (column 3). bDate <- strptime("01/02/2007 00:00:00", format="%d/%m/%Y %H:%M:%S") eDate <- strptime("03/02/2007 00:00:00", format="%d/%m/%Y %H:%M:%S") gapData <- subset(allData, DateTime >= bDate & DateTime < eDate, select=c(3,10)) # Open the PNG device with the required 480x480. # Note the background of the professor's figure is transparent. png(file="plot1.png", bg="transparent", width=480, height=480) # Create a histogram to match the assignment's plot 1 hist(gapData$Global_active_power, main="Global Active Power", xlab="Global Active Power (kilowatts)", col="red") dev.off()
4e4673d5a60f39c6d5380e15e7b77c6314da205c
c4eceeca4f0ef6f9aa3cc803c50db867da22fc12
/R/hgrm2.R
3eb43db34adace4a8cfbc46ae997b926034b6c38
[]
no_license
xiangzhou09/hIRT
f55191377777aa8e59d21d1c6cef6349d7e3b284
b33d90f13287f60f7feffd0c061858340dff40a6
refs/heads/master
2021-12-30T05:05:26.010542
2021-12-14T21:14:23
2021-12-14T21:14:23
97,876,422
8
1
null
null
null
null
UTF-8
R
false
false
11,011
r
hgrm2.R
#' Hierarchical Graded Response Models with Known Item Parameters #' #' \code{hgrm2} fits a hierarchical graded response model where the item parameters #' are known and supplied by the user. #' #' @param y A data frame or matrix of item responses. #' @param x An optional model matrix, including the intercept term, that predicts the #' mean of the latent preference. If not supplied, only the intercept term is included. #' @param z An optional model matrix, including the intercept term, that predicts the #' variance of the latent preference. If not supplied, only the intercept term is included. #' @param item_coefs A list of known item parameters. The parameters of item \eqn{j} are given #' by the \eqn{j}th element, which should be a vector of length \eqn{H_j}, containing #' \eqn{H_j - 1} item difficulty parameters (in descending order) and one item discrimination #' parameter. #' @param control A list of control values #' \describe{ #' \item{max_iter}{The maximum number of iterations of the EM algorithm. #' The default is 150.} #' \item{eps}{Tolerance parameter used to determine convergence of the #' EM algorithm. Specifically, iterations continue until the Euclidean #' distance between \eqn{\beta_{n}} and \eqn{\beta_{n-1}} falls under \code{eps}, #' where \eqn{\beta} is the vector of item discrimination parameters. #' \code{eps}=1e-4 by default.} #' \item{max_iter2}{The maximum number of iterations of the conditional #' maximization procedures for updating \eqn{\gamma} and \eqn{\lambda}. #' The default is 15.} #' \item{eps2}{Tolerance parameter used to determine convergence of the #' conditional maximization procedures for updating \eqn{\gamma} and #' \eqn{\lambda}. Specifically, iterations continue until the Euclidean #' distance between two consecutive log likelihoods falls under \code{eps2}. #' \code{eps2}=1e-3 by default.} #' \item{K}{Number of Gauss-Legendre quadrature points for the E-step. The default is 21.} #' \item{C}{[-C, C] sets the range of integral in the E-step. \code{C}=3 by default.} #' } #' #' @return An object of class \code{hgrm}. #' \item{coefficients}{A data frame of parameter estimates, standard errors, #' z values and p values.} #' \item{scores}{A data frame of EAP estimates of latent preferences and #' their approximate standard errors.} #' \item{vcov}{Variance-covariance matrix of parameter estimates.} #' \item{log_Lik}{The log-likelihood value at convergence.} #' \item{N}{Number of units.} #' \item{J}{Number of items.} #' \item{H}{A vector denoting the number of response categories for each item.} #' \item{ylevels}{A list showing the levels of the factorized response categories.} #' \item{p}{The number of predictors for the mean equation.} #' \item{q}{The number of predictors for the variance equation.} #' \item{control}{List of control values.} #' \item{call}{The matched call.} #' @importFrom rms lrm.fit #' @importFrom pryr compose #' @importFrom pryr partial #' @import stats #' @export #' @examples #' #' y <- nes_econ2008[, -(1:3)] #' x <- model.matrix( ~ party * educ, nes_econ2008) #' z <- model.matrix( ~ party, nes_econ2008) #' #' n <- nrow(nes_econ2008) #' id_train <- sample.int(n, n/4) #' id_test <- setdiff(1:n, id_train) #' #' y_train <- y[id_train, ] #' x_train <- x[id_train, ] #' z_train <- z[id_train, ] #' #' mod_train <- hgrm(y_train, x_train, z_train) #' #' y_test <- y[id_test, ] #' x_test <- x[id_test, ] #' z_test <- z[id_test, ] #' #' item_coefs <- lapply(coef_item(mod_train), `[[`, "Estimate") #' #' model_test <- hgrm2(y_test, x_test, z_test, item_coefs = item_coefs) hgrm2 <- function(y, x = NULL, z = NULL, item_coefs, control = list()) { # match call cl <- match.call() # check y and convert y into data.frame if needed if(missing(y)) stop("`y` must be provided.") if ((!is.data.frame(y) && !is.matrix(y)) || ncol(y) == 1L) stop("'y' must be either a data.frame or a matrix with at least two columns.") if(is.matrix(y)) y <- as.data.frame(y) # number of units and items N <- nrow(y) J <- ncol(y) # convert each y_j into an integer vector y[] <- lapply(y, factor, exclude = c(NA, NaN)) ylevels <- lapply(y, levels) y[] <- lapply(y, as.integer) if (!is.na(invalid <- match(TRUE, vapply(y, invalid_grm, logical(1L))))) stop(paste(names(y)[invalid], "does not have at least two valid responses")) H <- vapply(y, max, integer(1L), na.rm = TRUE) # extract item parameters if(missing(item_coefs)) stop("`item_coefs` must be supplied.") if(!is.list(item_coefs) || length(item_coefs) != J) stop("`item_coefs` must be a list of `ncol(y)` elements") item_coefs_H <- vapply(item_coefs, length, integer(1L)) if(!all.equal(item_coefs_H, H)) stop("`item_coefs` do not match the number of response categories in `y`") alpha <- lapply(item_coefs, function(x) c(Inf, x[-length(x)], -Inf)) beta <- vapply(item_coefs, function(x) x[[length(x)]], double(1L)) # check x and z (x and z should contain an intercept column) x <- x %||% as.matrix(rep(1, N)) z <- z %||% as.matrix(rep(1, N)) if (!is.matrix(x)) stop("`x` must be a matrix.") if (!is.matrix(z)) stop("`z` must be a matrix.") if (nrow(x) != N || nrow(z) != N) stop("both 'x' and 'z' must have the same number of rows as 'y'") p <- ncol(x) q <- ncol(z) colnames(x) <- colnames(x) %||% paste0("x", 1:p) colnames(z) <- colnames(z) %||% paste0("x", 1:q) # control parameters con <- list(max_iter = 150, max_iter2 = 15, eps = 1e-03, eps2 = 1e-03, K = 25, C = 4) con[names(control)] <- control # set environments for utility functions environment(loglik_grm) <- environment(theta_post_grm) <- environment(dummy_fun_grm) <- environment(tab2df_grm) <- environment() # GL points K <- con[["K"]] theta_ls <- con[["C"]] * GLpoints[[K]][["x"]] qw_ls <- con[["C"]] * GLpoints[[K]][["w"]] # imputation y_imp <- y if(anyNA(y)) y_imp[] <- lapply(y, impute) # pca for initial values of theta_eap theta_eap <- { tmp <- princomp(y_imp, cor = TRUE)$scores[, 1] (tmp - mean(tmp, na.rm = TRUE))/sd(tmp, na.rm = TRUE) } # initial values of gamma and lambda lm_opr <- tcrossprod(solve(crossprod(x)), x) gamma <- lm_opr %*% theta_eap lambda <- rep(0, q) fitted_mean <- as.double(x %*% gamma) fitted_var <- rep(1, N) # EM algorithm for (iter in seq(1, con[["max_iter"]])) { # store previous parameters # alpha_prev <- alpha # beta_prev <- beta gamma_prev <- gamma lambda_prev <- lambda # construct w_ik posterior <- Map(theta_post_grm, theta_ls, qw_ls) w <- { tmp <- matrix(unlist(posterior), N, K) t(sweep(tmp, 1, rowSums(tmp), FUN = "/")) } # # maximization # pseudo_tab <- Map(dummy_fun_grm, y, H) # pseudo_y <- lapply(pseudo_tab, tab2df_grm, theta_ls = theta_ls) # pseudo_lrm <- lapply(pseudo_y, function(df) lrm.fit(df[["x"]], df[["y"]], weights = df[["wt"]])[["coefficients"]]) # beta <- vapply(pseudo_lrm, function(x) x[[length(x)]], double(1L)) # alpha <- lapply(pseudo_lrm, function(x) c(Inf, x[-length(x)], -Inf)) # EAP and VAP estimates of latent preferences theta_eap <- t(theta_ls %*% w) theta_vap <- t(theta_ls^2 %*% w) - theta_eap^2 # variance regression gamma <- lm_opr %*% theta_eap r2 <- (theta_eap - x %*% gamma)^2 + theta_vap if (ncol(z)==1) lambda <- log(mean(r2)) else{ s2 <- glm.fit(x = z, y = r2, intercept = FALSE, family = Gamma(link = "log"))[["fitted.values"]] loglik <- -0.5 * (log(s2) + r2/s2) LL0 <- sum(loglik) dLL <- 1 for (m in seq(1, con[["max_iter2"]])) { gamma <- lm.wfit(x, theta_eap, w = 1/s2)[["coefficients"]] r2 <- (theta_eap - x %*% gamma)^2 + theta_vap var_reg <- glm.fit(x = z, y = r2, intercept = FALSE, family = Gamma(link = "log")) s2 <- var_reg[["fitted.values"]] loglik <- -0.5 * (log(s2) + r2/s2) LL_temp <- sum(loglik) dLL <- LL_temp - LL0 if (dLL < con[["eps2"]]) break LL0 <- LL_temp } lambda <- var_reg[["coefficients"]] } fitted_mean <- as.double(x %*% gamma) fitted_var <- exp(as.double(z %*% lambda)) cat(".") if (sqrt(mean((gamma/gamma_prev - 1)^2)) < con[["eps"]]) { cat("\n converged at iteration", iter, "\n") break } else if (iter == con[["max_iter"]]) { stop("algorithm did not converge; try increasing `max_iter` or decreasing `eps`") break } else next } gamma <- setNames(as.double(gamma), paste("x", colnames(x), sep = "")) lambda <- setNames(as.double(lambda), paste("z", colnames(z), sep = "")) # inference pik <- matrix(unlist(Map(partial(dnorm, x = theta_ls), mean = fitted_mean, sd = sqrt(fitted_var))), N, K, byrow = TRUE) * matrix(qw_ls, N, K, byrow = TRUE) Lijk <- lapply(theta_ls, function(theta_k) exp(loglik_grm(alpha = alpha, beta = beta, rep(theta_k, N)))) # K-list Lik <- vapply(Lijk, compose(exp, partial(rowSums, na.rm = TRUE), log), double(N)) Li <- rowSums(Lik * pik) # log likelihood log_Lik <- sum(log(Li)) # outer product of gradients environment(sj_ab_grm) <- environment(si_gamma) <- environment(si_lambda) <- environment() # s_ab <- unname(Reduce(rbind, lapply(1:J, sj_ab_grm))) s_gamma <- vapply(1:N, si_gamma, double(p)) s_lambda <- vapply(1:N, si_lambda, double(q)) # covariance matrix s_all <- rbind(s_gamma, s_lambda) s_all[is.na(s_all)] <- 0 covmat <- tryCatch(solve(tcrossprod(s_all)), error = function(e) {warning("The information matrix is singular; SE calculation failed."); matrix(NA, nrow(s_all), nrow(s_all))}) # reorganize se_all sH <- sum(H) gamma_indices <- (sH - 1):(sH + p - 2) lambda_indices <- (sH + p - 1):(sH + p + q - 2) se_all <- c(rep(0, sH), sqrt(diag(covmat))) # name se_all and covmat names_ab <- unlist(lapply(names(alpha), function(x) { tmp <- alpha[[x]] paste(x, c(paste0("y>=", seq(2, length(tmp)-1)), "Dscrmn")) })) names(se_all) <- c(names_ab, names(gamma), names(lambda)) rownames(covmat) <- colnames(covmat) <- c(names(gamma), names(lambda)) # item coefficients coef_item <- Map(function(a, b) c(a[-c(1L, length(a))], Dscrmn = b), alpha, beta) # all coefficients coef_all <- c(unlist(coef_item), gamma, lambda) coefs <- data.frame(Estimate = coef_all, Std_Error = se_all, z_value = coef_all/se_all, p_value = 2 * (1 - pnorm(abs(coef_all/se_all)))) rownames(coefs) <- names(se_all) # ability parameter estimates theta <- data.frame(post_mean = theta_eap, post_sd = sqrt(theta_vap), prior_mean = fitted_mean, prior_sd = sqrt(fitted_var)) # output out <- list(coefficients = coefs, scores = theta, vcov = covmat, log_Lik = log_Lik, N = N, J = J, H = H, ylevels = ylevels, p = p, q = q, control = con, call = cl) class(out) <- c("hgrm", "hIRT") out }
30b2864595e7aaa5479a9e4ef4aa4779a8398705
00cf6a882ff6436daf8e7cb511c6334dc0c387cf
/00_build_product_metadata.R
1f118a605fb2591590d78b37ac41b26ff2f7f810
[]
no_license
dahcase/earth_engine
09183f95c4fc114e45d10dbb573f9549ca01e72f
43d8fdc500ffba0f274b5234edbe70fdefd3aca0
refs/heads/master
2021-06-26T12:55:39.076946
2019-02-10T01:40:25
2019-02-10T01:40:25
116,109,534
0
1
null
null
null
null
UTF-8
R
false
false
3,764
r
00_build_product_metadata.R
library('data.table') #Land surface temperature (MOD11A2 & MYD11A2) lst = setDT(expand.grid(product =c('MOD11A2','MYD11A2'), version = '006', variables = c('LST_Day_1km', 'LST_Night_1km'), lower = 7500, upper = 65535, qa_na = 0, scale = .02, sensor = 'MODIS', qa_bits = 8, year_start = 2001, year_end = 2016, stringsAsFactors = F)) lst[ , qa_layer := paste0('QC_', tstrsplit(variables, "_")[[2]])] lst[ , qa_product := product] lst[ product == 'MYD11A2',year_start := 2002] #NDVI and EVI from MOD13A1 and MYD13A1 vis = setDT(expand.grid(product = c('MOD13A1','MYD13A1'), version = '006', variables = c('EVI', 'NDVI'), lower = -2000, upper = 10000, qa_na = 65535, scale = .0001, qa_layer = 'DetailedQA', qa_bits = 16, sensor = 'MODIS', year_start = 2001, year_end = 2016, pixel_size = 500, stringsAsFactors = F)) vis[,qa_product:= product] #night time lights ntl = setDT(expand.grid(product = 'DMSPNTL', version = 4, sensor = 'DMSPNTL', gee_collection_id = 'NOAA/DMSP-OLS/CALIBRATED_LIGHTS_V4', stringsAsFactors = F)) #Reflectance reflect = setDT(expand.grid(product = c('MOD09A1'), version = '006', variables = paste0('sur_refl_b0',1:7), lower = -100, upper = 16000, qa_na = 4294967295, scale = .0001, qa_layer = 'QA', qa_bits = 32, sensor = 'MODIS', year_start = 2001, year_end = 2016, stringsAsFactors = F)) #Albedo reflect albedo = setDT(expand.grid(product = c('MCD43A3'), version = '006', variables = c(paste0('Albedo_BSA_Band',c(1:7,'_vis','_nir','_shortwave')), paste0('Albedo_WSA_Band',c(1:7,'_vis','_nir','_shortwave'))), lower = 0, upper = 32766, qa_na = 2, scale = .001, qa_layer = 'QA', qa_bits = 1, sensor = 'MODIS', year_start = 2001, year_end = 2016, stringsAsFactors = F)) #overwrite the QA layer names albedo[,qa_layer:= paste0('BRDF_Albedo_Band_Mandatory_Quality_', c(paste0('Band',1:7),'vis','nir','shortwave'))] #brdf reflect brdf = setDT(expand.grid(product = 'MCD43A4', version = '006', variables = c(paste0('Nadir_Reflectance_Band',1:7)), lower = 0, upper = 32766, qa_product = 'MCD43A4', #MCD43A2 is the full QA scale = .001, qa_na = -9999, sensor = 'MODIS', year_start = 2001, year_end = 2016, stringsAsFactors = F)) #overwrite the QA layer names brdf[,qa_layer:= paste0('BRDF_Albedo_Band_Mandatory_Quality_', c(paste0('Band',1:7)))]
a95b6579538dafa0e870253da2a49859395e383a
a24efd391f493d356c0fe3f94ebc36e6eebd4714
/R/days_to_numeric.r
d4d284a7aa4dc0052b3e621bf6d74f8a619d94f2
[]
no_license
dabidou025/Lockdown-Forecast
7eb0bfe8fedb5f2289b9796cc5c1b6bc3023848c
400f10c18c85e53bb5851c488471bce80c957dc0
refs/heads/main
2023-04-01T07:40:25.668489
2021-04-04T11:41:24
2021-04-04T11:41:24
354,528,256
0
0
null
null
null
null
UTF-8
R
false
false
461
r
days_to_numeric.r
days_to_numeric = function (data) { data$WeekDays[data$WeekDays=='Monday'] = 1 data$WeekDays[data$WeekDays=='Tuesday'] = 2 data$WeekDays[data$WeekDays=='Wednesday'] = 3 data$WeekDays[data$WeekDays=='Thursday'] = 4 data$WeekDays[data$WeekDays=='Friday'] = 5 data$WeekDays[data$WeekDays=='Saturday'] = 6 data$WeekDays[data$WeekDays=='Sunday'] = 7 data$WeekDays = as.numeric(data$WeekDays) return (data$WeekDays) }
d673d9b9a2661dfb5a3cb4c7b4c18629c34776ee
a4bcc17bab2f4ac27c37f8f0027168211a9bc24d
/man/lookupGenes.Rd
41d42707bb4298724c2794264127d34c0b562fde
[]
no_license
hcnh174/hlsgr
7da6307f535038d00e9030eab61e1a0045fc745a
23b0d0cce887eef200c90fccb5a667b4fc1f37cd
refs/heads/master
2023-04-13T18:44:28.519716
2023-04-03T05:38:45
2023-04-03T05:38:45
262,727,333
0
0
null
null
null
null
UTF-8
R
false
true
455
rd
lookupGenes.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/rnaseq.R \name{lookupGenes} \alias{lookupGenes} \title{Return a vector gene names corresponding to a vector of Ensembl IDs} \usage{ lookupGenes(ensembl_ids) } \arguments{ \item{ensembl_ids}{vector of ensembl IDs} } \value{ } \description{ Return a vector gene names corresponding to a vector of Ensembl IDs } \examples{ lookupGenes(c('ENSG00000211459', 'ENSG00000211958 ')) }
f66993d4ba8a1afc2c1b0a134e4dd73c4c3f4718
be2d33a5547d69d7339351366c5a1a309f4455dd
/fcd/R/plan.R
c9b4b096a7e8469c91106c1aca082d1a189a573c
[ "MIT" ]
permissive
thebioengineer/drake-examples
3e3f1f6e0ae9942b70333238875dd65849650ee7
e7009496a9fa8495faa32806eb191ad9ae33786d
refs/heads/master
2020-08-04T19:49:58.862394
2019-10-09T16:28:03
2019-10-09T16:28:03
212,259,584
0
0
null
2019-10-02T05:07:05
2019-10-02T05:07:04
null
UTF-8
R
false
false
383
r
plan.R
#Experimental Conditions conds <- fcdConds(nPeople = c(10000,1000,100), pEdge = c(0.75,0.5,0.25,0.1), topN = c(NA, 3, 5, 10)) plan <- drake_plan( ddist_i = target( fcdSim(nSims = 50, nPeople = nPeople,pEdge = pEdge, topN = topN), transform = map(nPeople, pEdge, topN, .data = !!conds) ), ddist = target( bind_rows(ddist_i), transform = combine(ddist_i) ) )
a05afa87352479a24c6788ecf347fc3e86be442a
ccd5803bb5b558dcd89b4818b7a42b00cf63e610
/R/format.R
a318fc8fdb8049eec20651b5562ca5fd26fbeee6
[ "MIT" ]
permissive
labouz/recipes
bbcdedce9116745df3bb69d88d9aacce26026fe8
620ebfe67d59e3e440fe203b51bdb87ceb542702
refs/heads/master
2021-11-15T09:57:24.236122
2021-04-22T18:41:14
2021-04-22T18:41:14
237,512,319
0
0
NOASSERTION
2021-04-23T23:39:04
2020-01-31T20:40:30
HTML
UTF-8
R
false
false
1,043
r
format.R
#' Helpers for printing step functions #' #' @param x A vector of objects. #' @param sep A character string for separating values. #' @param width An integer for when to split the output over lines. #' @return A character string #' @keywords internal #' @export format_ch_vec <- function(x, sep = ", ", width = options()$width - 9) { widths <- nchar(x) sep_wd <- nchar(sep) adj_wd <- widths + sep_wd if (sum(adj_wd) >= width) { keepers <- max(which(cumsum(adj_wd) < width)) - 1 if (length(keepers) == 0 || keepers < 1) { x <- paste(length(x), "items") } else { x <- c(x[1:keepers], "...") } } paste0(x, collapse = sep) } #' @keywords internal #' @rdname format_ch_vec #' @export format_selectors <- function(x, width = options()$width - 9) { ## convert to character without the leading ~ x_items <- lapply(x, function(x) { expr_deparse(quo_get_expr(x)) }) x_items <- unlist(x_items) format_ch_vec(x_items, width = width, sep = ", ") }
75c8c10127ac91ef6081bc9db1b9392fc1292731
4bcffce1d7b8094e5dd462705ad73834da8aed0e
/plot4.R
ba87a636aba245a677b6c46e0cf29f0a99859458
[]
no_license
NicoHeredia/ExData_Plotting1
ff02715aa8aacc3c8ff344c7528459b53bd83708
0b47ef482913dd7488cb60806496e9205e007639
refs/heads/master
2022-11-14T07:26:44.862702
2020-07-12T20:45:12
2020-07-12T20:45:12
279,129,446
0
0
null
2020-07-12T19:00:08
2020-07-12T19:00:07
null
UTF-8
R
false
false
1,211
r
plot4.R
FileDat <- "household_power_consumption.txt" PowerData <- read.table(FileDat, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".") SubsetDat <- PowerData[PowerData$Date %in% c("1/2/2007","2/2/2007") ,] #str(SubsetDat) GlobalActPower <- as.numeric(SubsetDat$Global_active_power) TimeDat <- strptime(paste(SubsetDat$Date, SubsetDat$Time, sep=" "), "%d/%m/%Y %H:%M:%S") SubMet1 <- as.numeric(SubsetDat$Sub_metering_1) SubMet2 <- as.numeric(SubsetDat$Sub_metering_2) SubMet3 <- as.numeric(SubsetDat$Sub_metering_3) GlobalReactPower <- as.numeric(SubsetDat$Global_reactive_power) Volt <- as.numeric(SubsetDat$Voltage) #plotting png("plot4.png", width=480, height=480) par(mfrow=c(2,2)) plot(TimeDat, GlobalActPower, type="l", xlab="", ylab="Global Active Power") plot(TimeDat, Volt, type="l", xlab="datetime", ylab="Voltage") plot(TimeDat, SubMet1, type="l", xlab="", ylab="Energy sub metering") lines(TimeDat, SubMet2, type="l", col="red") lines(TimeDat, SubMet3, type="l", col="blue") legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, lwd=2.5, col=c("black", "red", "blue")) plot(TimeDat, GlobalReactPower, type="l", xlab="datetime", ylab="Global_reactive_power") dev.off()
38645f54fdf875f9ee7c951901b7d230bdf2409c
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
/data/genthat_extracted_code/taRifx/examples/munch.Rd.R
db766865e270d49851232e72b4902fc55575584f
[]
no_license
surayaaramli/typeRrh
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
66e6996f31961bc8b9aafe1a6a6098327b66bf71
refs/heads/master
2023-05-05T04:05:31.617869
2019-04-25T22:10:06
2019-04-25T22:10:06
null
0
0
null
null
null
null
UTF-8
R
false
false
250
r
munch.Rd.R
library(taRifx) ### Name: munch ### Title: Recursively delete entries containing 'what' before entry ### pointed to by 'which' ### Aliases: munch ### ** Examples x <- c("a","","b","","","","","c","d","","","","e","") munch( x, c(3,8,9,13) )
15c1b669319f7f8a4937aa738f5d1fcf5211a535
212e49d0b5df150e4d0681451925689b9e152eba
/MESSAR_WEBSERVER/helper.r
280bb300966644aba086178a2dbdc94c356c3439
[]
no_license
daniellyz/MESSAR
7e3fa9a6fbfba378a37ded366bad6db89d6a226b
ebbe4d0b849d074d36d447d5488464ac4773b991
refs/heads/master
2023-04-14T02:19:20.990578
2023-03-28T17:06:07
2023-03-28T17:06:07
153,620,833
1
0
null
2019-07-03T14:57:32
2018-10-18T12:33:50
R
UTF-8
R
false
false
9,930
r
helper.r
<<<<<<< HEAD ======= mcc_calculator<-function(rules){ P_A_B = rules$support/rules$Py tpr = rules$support fpr = (1 - P_A_B)*rules$Py fnr = (1 - rules$confidence)*rules$Px tnr = 1 - tpr - fpr - fnr MCC = (tpr*tnr - fpr*fnr)/sqrt((tpr+fpr)*(tpr+fnr)*(tnr+fpr)*(tnr+fnr)) return(MCC) } >>>>>>> 7b7522d0affa0d7a817f5252d04e620560b84c0e search_rules<-function(ref_feature, ref_feature_type, mass, mass_diff, ppm_search){ # Combine masses: test_feature = c(mass, mass_diff) test_feature_type = c(rep('mass', length(mass)), rep("mass_diff", length(mass_diff))) NF = length(ref_feature) # Search ref features: matched_feature = c() for (i in 1:NF){ valid = 1 feature = ref_feature[[i]] feature_type = ref_feature_type[[i]] for (f in 1:length(feature)){ errors = abs((feature[f]-test_feature)/feature[f])*1000000 min_error = min(errors) index_error = which.min(errors) if ((min_error <= ppm_search) & (test_feature_type[index_error] == feature_type[f])){ } else { valid = 0 break } } if (valid == 1){ # If always valid matched_feature = c(matched_feature, i) } } return(matched_feature)} ppm_calc <- function(x, ref){ errors = abs(x-ref)/ref*1000000 return(errors) } compute_similariy<-function(candidates,true_structure){ # Compare list of candidates (SMILE code) with the true structure candidates_finger <- lapply(parse.smiles(candidates), get.fingerprint, type='maccs') true_structure_finger <- get.fingerprint(parse.smiles(true_structure)[[1]],type="maccs") fp.distance=sapply(candidates_finger,function(x) 1-distance(x,true_structure_finger)) return(fp.distance) } fn <- function(x, y){ # Compare list of candidates x_finger <- try(get.fingerprint(parse.smiles(x)[[1]],type='maccs'),silent=T) y_finger <- try(get.fingerprint(parse.smiles(y)[[1]],type='maccs'),silent=T) if ((class(x_finger)!="try-error") && (class(y_finger)!="try-error")){ fp.distance=1-distance(x_finger,y_finger)} else { fp.distance = 1} return(fp.distance) } numextract <- function(string){as.numeric(str_extract(string, "\\-*\\d+\\.*\\d*"))} aggregate_rules <- function(rules_extracted){ <<<<<<< HEAD ======= >>>>>>> 7b7522d0affa0d7a817f5252d04e620560b84c0e # Group rules by their substructure combined_type = aggregate(rules_extracted$SPECTRAL_FEATURE_TYPE, list(rules_extracted$SUBSTRUCTURE), toString)[,2] combined_features = aggregate(rules_extracted$SPECTRAL_FEATURE, list(rules_extracted$SUBSTRUCTURE), toString)[,2] combined_type = sapply(combined_type, clean_feature_text) combined_features = sapply(combined_features, clean_feature_text) <<<<<<< HEAD sum_confidence = aggregate(rules_extracted$CONFIDENCE, list(rules_extracted$SUBSTRUCTURE), sum) sum_lift = aggregate(rules_extracted$LIFT, list(rules_extracted$SUBSTRUCTURE), sum)[,2] sum_mcc = aggregate(rules_extracted$MCC, list(rules_extracted$SUBSTRUCTURE), sum)[,2] sum_f1 = aggregate(rules_extracted$F1, list(rules_extracted$SUBSTRUCTURE), sum)[,2] sum_aggregated = cbind.data.frame(sum_confidence,sum_lift,sum_mcc, sum_f1) colnames(sum_aggregated) = c("SUBSTRUCTURE", "CONFIDENCE", "LIFT","MCC","F1") median_confidence = aggregate(rules_extracted$CONFIDENCE, list(rules_extracted$SUBSTRUCTURE), median) median_lift = aggregate(rules_extracted$LIFT, list(rules_extracted$SUBSTRUCTURE), median)[,2] median_mcc = aggregate(rules_extracted$MCC, list(rules_extracted$SUBSTRUCTURE), median)[,2] median_f1 = aggregate(rules_extracted$F1, list(rules_extracted$SUBSTRUCTURE), median)[,2] median_aggregated = cbind.data.frame(median_confidence,median_lift,median_mcc,median_f1) colnames(median_aggregated) = c("SUBSTRUCTURE", "CONFIDENCE", "LIFT","MCC","F1") ======= sum_confidence = aggregate(rules_extracted$CONFIDENCE, list(rules_extracted$SUBSTRUCTURE), sum) sum_lift = aggregate(rules_extracted$LIFT, list(rules_extracted$SUBSTRUCTURE), sum)[,2] sum_mcc = aggregate(rules_extracted$MCC, list(rules_extracted$SUBSTRUCTURE), sum)[,2] sum_aggregated = cbind.data.frame(sum_confidence,sum_lift,sum_mcc) colnames(sum_aggregated) = c("SUBSTRUCTURE", "CONFIDENCE", "LIFT","MCC") median_confidence = aggregate(rules_extracted$CONFIDENCE, list(rules_extracted$SUBSTRUCTURE), median) median_lift = aggregate(rules_extracted$LIFT, list(rules_extracted$SUBSTRUCTURE), median)[,2] median_mcc = aggregate(rules_extracted$MCC, list(rules_extracted$SUBSTRUCTURE), median)[,2] median_aggregated = cbind.data.frame(median_confidence,median_lift,median_mcc) colnames(median_aggregated) = c("SUBSTRUCTURE", "CONFIDENCE", "LIFT","MCC") >>>>>>> 7b7522d0affa0d7a817f5252d04e620560b84c0e max_confidence = aggregate(rules_extracted$CONFIDENCE, list(rules_extracted$SUBSTRUCTURE), max) max_lift = aggregate(rules_extracted$LIFT, list(rules_extracted$SUBSTRUCTURE), max)[,2] max_mcc = aggregate(rules_extracted$MCC, list(rules_extracted$SUBSTRUCTURE), max)[,2] <<<<<<< HEAD max_f1 = aggregate(rules_extracted$F1, list(rules_extracted$SUBSTRUCTURE), max)[,2] max_aggregated = cbind.data.frame(max_confidence,max_lift,max_mcc,max_f1) colnames(max_aggregated) = c("SUBSTRUCTURE", "CONFIDENCE", "LIFT","MCC","F1") ======= max_aggregated = cbind.data.frame(max_confidence,max_lift,max_mcc) colnames(max_aggregated) = c("SUBSTRUCTURE", "CONFIDENCE", "LIFT","MCC") >>>>>>> 7b7522d0affa0d7a817f5252d04e620560b84c0e return(list(type=combined_type, features=combined_features, sum_aggregated=sum_aggregated, median_aggregated=median_aggregated, max_aggregated=max_aggregated)) } eval_rules_aggregated <- function(rule_aggregated, aggregate_type, score_type, pb_threshold){ # Pick a type of aggregated rule evaluation if (aggregate_type=="SUM"){score = rule_aggregated$sum_aggregated[, score_type]} if (aggregate_type=="MEDIAN"){score = rule_aggregated$median_aggregated[, score_type]} if (aggregate_type=="MAX"){score = rule_aggregated$max_aggregated[, score_type]} aggregated = cbind.data.frame(rule_aggregated$sum_aggregated$SUBSTRUCTURE, score) colnames(aggregated) = c("SUBSTRUCTURE", score_type) threshold = quantile(aggregated[,2], probs=pb_threshold/100, na.rm=T) aggregated = aggregated[aggregated[,2]>=threshold, ] aggregated = aggregated[order(aggregated[,2], decreasing = T),] return(aggregated) } img_uri <- function(x) {sprintf('<img src="%s"/>', knitr::image_uri(x))} clean_feature_text <- function(feature){ feature = str_replace_all(feature, "\\[", "") feature = str_replace_all(feature, " ", "") feature = str_replace_all(feature, "\\]", "") feature = str_replace_all(feature, "\\'","") return(feature) } <<<<<<< HEAD ======= fdr_compute <- function(rules, decoy, fdr_thr){ combined_label = c(rep("Target", nrow(rules)), rep("Decoy", nrow(decoy))) combined_mcc = c(rules$MCC, decoy$MCC) combined_lift = c(rules$LIFT, decoy$LIFT) NC = length(combined_label) NG = 100 # Number of grids grid_mcc = seq(min(combined_mcc), max(combined_mcc), length.out = NG) grid_lift = seq(min(combined_lift), max(combined_lift), length.out = NG) fdr_matrix = matrix(1, NG, NG) valid_matrix = matrix(0, NG, NG) # Nb of valid (target) rules for (i in 1:NG){ for (j in 1:NG){ valid = which(combined_mcc>=grid_mcc[i] & combined_lift>=grid_lift[j]) if (length(valid)>0){ selected_label = combined_label[valid] fdr = sum(selected_label=="Decoy")/length(valid) fdr_matrix[i,j] = fdr valid_matrix[i,j] = sum(selected_label=="Target") } } } if (min(fdr_matrix)<=fdr_thr){ index_fdr = which(fdr_matrix<=fdr_thr, arr.ind=T) # Index that have a fdr smaller than fdr_thr LS = valid_matrix[index_fdr] # Nb of rules of all minimal fdrs max_size = which(LS == max(LS)) # Maximal number of rules that allow minimal fdr grid_fdr = index_fdr[max_size[length(max_size)],] # Apply highest possibe threshold to maximize mcc_min = grid_mcc[grid_fdr[1]] lift_min = grid_lift[grid_fdr[2]] fdr = min(fdr_matrix) } else { # No filter applied if FDR optimization not possible max_size = nrow(rules) mcc_min = 0 lift_min = 1 fdr = -1 # Failed! } return(list(max_size = max_size, mcc_min = mcc_min, lift_min = lift_min, fdr = fdr)) } >>>>>>> 7b7522d0affa0d7a817f5252d04e620560b84c0e rule_mass_match<-function(rule_types, rule_features, masslist, ppm_search){ # The function match back comobined rules to raw masslist rule_types = strsplit(rule_types,",")[[1]] rule_features = as.numeric(strsplit(rule_features,",")[[1]]) # Match to raw masslist: index1 = c() # Index of matched mass in raw masslist rule_fragments = rule_features[rule_types=="mass"] if (length(rule_fragments)>0){ for (fragment in rule_fragments){ errors = ppm_calc(masslist, fragment) valid = which(errors<=ppm_search) if (length(valid)>0){index1 = c(index1, valid[1])} }} # Match to raw mass diff: index2 = list() # List of matched mass difference (from...to...) rule_mdiff = rule_features[rule_types=="mass_diff"] rule_mdiff = unique(rule_mdiff) dist_mass = data.matrix(dist(masslist)) k = 0 if (length(rule_mdiff)>0){ for (mdiff in rule_mdiff){ errors = abs((dist_mass-mdiff)/mdiff*1000000) valid = which(errors<ppm_search, arr.ind = T) if (nrow(valid)>0){ k = k+1 index2[[k]] = as.numeric(valid[1,]) }} } return(list(index1=index1, index2=index2)) } <<<<<<< HEAD get_spectrum_list <- function(dat){ spectrum_list = list() for (i in 1:length(dat)){ spectrum = cbind(dat[[i]]@mz, dat[[i]]@intensity) spectrum = matrix(spectrum, ncol=2) spectrum_list[[i]] = spectrum } return(spectrum_list) } ======= >>>>>>> 7b7522d0affa0d7a817f5252d04e620560b84c0e
b1a97b33b93fb325844de4c2532143569d8348ee
9b40acbd06b56f1df9de7438516a3230ba8c2cc4
/server.R
9244cd4b24caa9e992224d9b8d6e03538eec7021
[ "MIT" ]
permissive
Huade/UN_IdealPoints
7ca7c4ec059df758bda10c7e2677b4eefbf4290e
9276aca27ad69dd4efacaa64217c15d7671201a3
refs/heads/master
2016-09-06T07:03:32.325359
2015-04-21T15:17:33
2015-04-21T15:17:33
31,746,396
2
3
null
2015-04-01T14:17:15
2015-03-06T01:31:12
R
UTF-8
R
false
false
3,252
r
server.R
library(shiny) library(ggplot2) library(plotly) library(ggthemes) shinyServer(function(input, output, session) { output$trendPlot <- renderGraph({ if (length(input$name)==0) print("Please select at least one country") else { df_trend <- subset(Ideal_Point_Data, Name %in% input$name) # Graph title if (length(input$name)>2) { j_names_comma <- paste(input$name[-length(input$name)], collapse = ', ') j_names <- paste(j_names_comma, ", and ", input$name[length(input$name)], sep="") } else{ j_names <- paste(input$name, collapse = ' and ') } graph_title <- paste("Ideal Points for ", j_names, sep="") ggideal_point <- ggplot(df_trend)+ geom_hline(aes(yintercept = 2), color = "white")+ geom_hline(aes(yintercept = -2), color = "white")+ geom_line(aes(x=Year, y=Ideal.point, by=Name, color=Name))+ labs(x = "Year")+ labs(y = "Ideology")+ labs(title = graph_title)+ scale_colour_hue("clarity",l=70, c=150)+ theme_few() # This converts the ggplot2 graph into Plotly's format. # This is a list of lists which declaratively describe every attribute # of the plotly graph fig <- gg2list(ggideal_point) data <- list() for(i in 1:(length(fig)-1)){data[[i]]<-fig[[i]]} layout <- fig$kwargs$layout layout$annotations <- NULL # Remove the existing annotations (the legend label) layout$annotations <- list() # Add colored text annotations next to the end of each line # More about plotly annotations: https://plot.ly/r/reference/#annotation # Each key that we update is documented in that link above. for(i in 1:(length(data))){ # data is a list of the lines in the graph layout$annotations[[i]] <- list( text = data[[i]]$name, # The text label of the annotation, e.g. "Canada" font = list(color = data[[i]]$line$color), # Match the font color to the line color showarrow = FALSE, # Don't show the annotation arrow y = data[[i]]$y[[length(data[[i]]$y)]], # set the y position of the annotation to the last point of the line yref = "y1", # the "y" coordinates above are with respect to the yaxis x = 1, # set the x position of the graph to the right hand side of the graph xref = "paper", # the x coordinates are with respect to the "paper", where 1 means the right hand side of the graph and 0 means the left hand side xanchor = "left" # position the x coordinate with respect to the left of the text ); } layout$showlegend <- FALSE # remove the legend layout$margin$r <- 170 # increase the size of the right margin to accommodate more room for the annotation labels # Send this message up to the browser client, which will get fed through to # Plotly's javascript graphing library embedded inside the graph return(list( list( id="trendPlot", task="newPlot", data=data, layout=layout ) )) } }) })
36c6f23ecb6b74239c55c7672a8b68c3120c551a
a67fa37fb1ae2a624f2fc306f1b6eab457fc8197
/Hidden Markov Models/HMM.R
cae505347c13d5caf94a66a5a522cd4520571a57
[ "MIT" ]
permissive
PedramKasebzadeh/Machine_Learning
a350ef5a05c2ae953624a69c6bb66d8f6331d0ca
3ac60aa38722d1a6817a8347e58954892f9ebbb0
refs/heads/main
2023-01-24T02:19:30.162358
2020-12-11T12:15:20
2020-12-11T12:15:20
320,540,864
0
0
null
null
null
null
UTF-8
R
false
false
4,006
r
HMM.R
# Task 1 - Build a hidden Markov model (HMM) # Defining the trasmissions and the emissions probabilites matrices. set.seed(12345) library("HMM") # creating the emissons probability matrix ep= diag(0.2,10) ep[row(ep) == (col(ep) -1)] = 0.2 ep[row(ep) == (col(ep) -2)] = 0.2 ep[row(ep) == (col(ep) +1)] = 0.2 ep[row(ep) == (col(ep) +2)] = 0.2 ep[c(1,1,2,9,10,10),c(9,10,10,1,1,2)]=0.2 ep[9,2]=0;ep[2,9]=0 ep # transProbs matrix tp = diag(0.5,10) tp[row(tp)==(col(tp)-1)]=0.5 tp[10,1]=0.5 tp Sectors=paste0("S",1:10) hmm = initHMM(Sectors,Sectors, startProbs = rep(0.1,10),transProbs = tp, emissionProbs = ep) # Task 2 - Simulate the HMM for 100 time steps* simulations <- simHMM(hmm, 100) # Task 3 - Discard the hidden states from the sample obtained above. # observations simulations$observation observation=simulations$observation # filtering logfo = forward(hmm,observation) loge = exp(logfo) # smoothing observation=simulations$observation posterior = posterior(hmm,observation) # the most probable path is mostpro = viterbi(hmm, simulations$observation) # Task 4 - Compute the accuracy of the filtered and smoothed probability distributions, and of the most probable path. #head(prop.table(exp(logfo))) forward = prop.table(loge,2) # The accuracy for filtered probability distributions #simulations$states forpat = apply(forward,2,which.max) forpath= rownames(forward)[forpat] for_acc = length(which(forpath==simulations$states))/100 for_acc # The accuracy for smoothed probability distributions smoo = apply(posterior,MARGIN = 2,FUN=which.max) smoopath= rownames(posterior)[smoo] smoo_acc = length(which(smoopath==simulations$states))/100 smoo_acc # The accuracy for most probable path: mos_acc = length(which(mostpro==simulations$states))/100 mos_acc # Smoothing gives us the highest accuracy and thats is due to the fact that it uses both previous and the future values to make a predictrion. # Task 5 -Repeat the previous task with different simulated samples. In general, the smoothed distributions should be more accurate than the filtered distributions. f100 = rep(0,100) smoo100=rep(0,100) mos100=rep(0,100) for(i in 1:100){ simulations <- simHMM(hmm, 100) observation=simulations$observation logfo = forward(hmm,observation) posterior = posterior(hmm,observation) mostpro = viterbi(hmm, simulations$observation) #forward acc filtered = prop.table(exp(logfo),2) forpat = apply(filtered, 2,which.max) forwpath = rownames(logfo)[forpat] #forwpath= table(forpat==simulations$states) f100[i] = length(which(forwpath==simulations$states))/100 # post acc smoo = apply(posterior,MARGIN = 2,FUN=which.max) smoopath= rownames(posterior)[smoo] smoo100[i] = length(which(smoopath==simulations$states))/100 # mo acc mos100[i] = length(which(mostpro==simulations$states))/100 } cat("The accuracy for filter is:",mean(f100)) cat("\nThe accuracy for smoothing is:",mean(smoo100)) cat("\nThe accuracy for path is:",mean(mos100)) # The reason for the smoothing to be the most accurate is that it uses all observations, including observations in the future, # which the filtering function doesn't. And viterbi has a logical constraint which could be useful but in this case will result in # less accuracy comparing to the smoothing. # Task 6 - Is it true that the more observations you have the better you know where the robot is? library(entropy) ento=apply(prop.table(loge),2,entropy.empirical) plot(ento,type = "l") # As we can see in the plot the entropy does not converges to a stable value, which means having more data doesn't help us with accuracy. # Task 7 - Consider any of the samples above of length 100. Compute the probabilities of the hidden states for the time step 101. #filterProbs * trasmission to get 101 probablities = t(forward[,100])%*%tp mostp = which.max(probablities) mostp probablities # The probabilities of the hidden state for the time step 101 are: `r probablities` and the most probable is: `r mostp`
bf04ac00cde577bdfd319df1c028a025b66d2209
cbd3b0f76677e4e5c00bafced19939549182ff3f
/cachematrix.R
3badc6ef8885fa05fa6ef54e6325e6e29fb4c342
[]
no_license
ginoandolini/ProgrammingAssignment2
d6dc2922c2dac18420bc270c692ed1bba8a6558f
7b4043852fb8ed6eb773ae434c9252929139334f
refs/heads/master
2020-04-01T19:19:37.525165
2014-05-20T10:41:06
2014-05-20T10:41:06
null
0
0
null
null
null
null
UTF-8
R
false
false
1,255
r
cachematrix.R
## makeCacheMatrix makes a special matrix ## Sets the value of the matrix, set ## Gets the value of the matrix , get ## Sets the value of the matrix inverse, setinverse ## Gets the value of the matrix inverse, getinverse makeCacheMatrix <- function(x = numeric()) { m <- NULL set <- function(y) { x <<- y m <<- NULL } get <- function() x setinverse <- function(solve) m <<- solve getinverse <- function() m list(set = set, get = get, setinverse = setinverse, getinverse = getinverse) } ## Calculates the inverse of the special "matrix" created with makeCacheMatrix ## First checks to see if the inverse has already been calculated ## If so gets the inverse from the cache and skips the computation ## Otherwise, it calculates the inverse of the data and sets the value of the inverse in the cache via the setinverse function cacheSolve <- function(x, ...) { ## Return a matrix that is the inverse of 'x' m <- x$getinverse() if(!is.null(m)) { message("getting cached data") return(m) } data <- x$get() m <- solve(data, ...) x$setinverse(m) m }
c9d64ac63c1a3b7f19c451097476055a3d33a196
ecd850028010252cd24d7d67aa3ef3b8065bf109
/man/parallelCountNbd.Rd
81f51188730bed3e4daa424abf88265e443dbe5c
[]
no_license
EricMarcon/SpatDiv
de98877555e9d92ec7859d4ce18bf5dbb47bf2a1
c0346e1105130d18dc8b978a415569f653ae0cf7
refs/heads/master
2023-05-26T18:42:38.506723
2023-05-21T08:42:49
2023-05-21T08:42:49
111,210,097
1
0
null
null
null
null
UTF-8
R
false
true
657
rd
parallelCountNbd.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/RcppExports.R \name{parallelCountNbd} \alias{parallelCountNbd} \title{parallelCountNbd} \usage{ parallelCountNbd(r, NbSpecies, x, y, Type, Weight) } \arguments{ \item{r}{The vector of distances to take into account.} \item{NbSpecies}{The number of species of the community.} \item{x, y}{The coordinates of the points.} \item{Type}{A vector containing the species of each point (as integer, i.e. the factor code).} \item{Weight}{A vector containing the weights of points.} } \description{ Create a 3-D array containing the number of neighbors around each point, per species }
8f37165ade6eae5855a7723931f56be58d0738a0
f04c8b91b2e26b0c4342525533032a0d33ad1333
/R/link_tag.R
4035f482c2f7e7bef2e8f4ee77068794d5c24eff
[]
no_license
polymathematic/Longform
c567c9d883ba2c02a03a484546a7a62ad9a1b973
d03557ac07d6e39e0baf30c4dfa26d070c60a0b6
refs/heads/master
2020-03-15T21:22:07.825613
2020-01-14T18:00:50
2020-01-14T18:00:50
132,353,511
0
0
null
null
null
null
UTF-8
R
false
false
343
r
link_tag.R
#' Add a link tag #' #' Format an HTML image tag #' #' @param text The text of the link #' @param url The URL the link points to #' @export link_tag <- function(text, url){ #Create tag output <- sprintf("[%s](%s)", text, url) #Class as image tag class(output) <- "link_tag" #Return output return(output) }
2c34b013e33aca29c72663b2205888299878b947
31325bac0088dbc4725f01794ff72413d5856c64
/R/I_quadModMat.R
16589c32707e11f9a573c5e9b1385dd304376d59
[]
no_license
mfasiolo/synlik_dev
6b2bae4f357cbe028050917f5a264e1698fcfe76
2508506cffe09e752c6fa221e6431997f739168e
refs/heads/master
2021-03-27T10:56:50.668629
2018-05-30T10:37:00
2018-05-30T10:37:00
17,673,353
0
2
null
null
null
null
UTF-8
R
false
false
506
r
I_quadModMat.R
# Creates model matrix for quadratic multivariate regression .quadModMat <- function(X, center = FALSE, scale = FALSE, deriv = FALSE) { nPar <- ncol(X) X <- scale(X, center = center, scale = scale) M <- cbind(1, X, X ^ 2 / ifelse(deriv, 2, 1)) # Creating all mixed terms if(nPar > 1){ comb <- combn(nPar, 2) tmp <- lapply(1:ncol(comb), function(jj) X[ , comb[1, jj]] * X[ , comb[2, jj]]) M <- cbind(M, do.call("cbind", tmp)) } return( M ) }
ffa6d326540f1b20c72bd427c20368fa17f81312
c3e2451daec7c223e6bca5e8ec5d29ea3efa5c6a
/inst/Example_AR1_AR2.R
2c392b7119d40e116380d86e754d54aa1802a16a
[]
no_license
pierrejacob/bayeshscore
d0336efc16dd0456ffa2c3f6fbe51aabbcf3f3f8
8f148f4074e09de4911d5645a9781c8aa844d38d
refs/heads/master
2021-09-22T10:56:27.652463
2018-09-08T18:21:28
2018-09-08T18:21:28
63,806,619
5
0
null
null
null
null
UTF-8
R
false
false
12,235
r
Example_AR1_AR2.R
################################################################################################## # Example - AR(1) vs AR(2) ################################################################################################## library(bayeshscore) library(ggplot2) library(gridExtra) library(doParallel) library(foreach) library(wesanderson) set.seed(19) # Define model nu0 = 1 sigma02 = 1 nb_models = 2 model = function(i){ if (i==1){return(get_model_AR1(nu0, sigma02))} if (i==2){return(get_model_AR2(nu0, sigma02))} } # set algorithmic parameters algorithmic_parameters = list() algorithmic_parameters$Ntheta = 2^10 algorithmic_parameters$verbose = TRUE algorithmic_parameters$resampling = function(normw) ssp_resampling_n(normw, runif(length(normw))) # The remaining algorithmic parameters are set to their default values via util_default.R #-------------------------------------------------------------------------------------------- repl = 5 #number of replications registerDoParallel(cores=5) #number of workers in parallel #-------------------------------------------------------------------------------------------- nobservations = 1000 ############################################################################################# # Case 3: true model = AR(1) ############################################################################################# true_model = 1 true_theta = c(0.6,0.8) observations1 = simulateData(model(true_model),true_theta,nobservations) # observations in a matrix of dimensions dimy x nobservations #-------------------------------------------------------------------------------------------- results_all1 = data.frame() #-------------------------------------------------------------------------------------------- ### Compute logevidence and hscore for each model for (m in 1:nb_models){ results = foreach(i=1:repl,.packages=c('bayeshscore'),.verbose = TRUE) %dorng% { hscore(observations1, model(m), algorithmic_parameters) } for (r in 1:repl){ results_all1 = rbind(results_all1,data.frame(logevidence = results[[r]]$logevidence, hscore = results[[r]]$hscore, time = 1:nobservations, model = factor(m), repl = r)) } } ############################################################################################# # Case 4: true model = AR(2) ############################################################################################# true_model = 2 true_theta = c(0.25,0.5,0.75^2) observations2 = simulateData(model(true_model),true_theta,nobservations) # observations in a matrix of dimensions dimy x nobservations #-------------------------------------------------------------------------------------------- results_all2 = data.frame() #-------------------------------------------------------------------------------------------- ### Compute logevidence and hscore for each model for (m in 1:nb_models){ results = foreach(i=1:repl,.packages=c('bayeshscore'),.verbose = TRUE) %dorng% { hscore(observations2, model(m), algorithmic_parameters) } for (r in 1:repl){ results_all2 = rbind(results_all2,data.frame(logevidence = results[[r]]$logevidence, hscore = results[[r]]$hscore, time = 1:nobservations, model = factor(m), repl = r)) } } ############################################################################################## #-------------------------------------------------------------------------------------------- #-------------------------------------------------------------------------------------------- # Generate plots for paper #-------------------------------------------------------------------------------------------- #-------------------------------------------------------------------------------------------- # Compute the Hyvarinen factor results_all = list(results_all1,results_all2) logbayesfactors = data.frame() h_factors = data.frame() BF_plots = list() HF_plots = list() for (r in 1:repl) { for (i in 1:nb_models) { results = results_all[[i]] logbayes_factor = subset(results,model==1&repl==r)$logevidence - subset(results,model==2&repl==r)$logevidence logbayesfactors = rbind(logbayesfactors,data.frame(case = factor(i), time = 1:nobservations, repl = r, logbayesfactor = logbayes_factor, type = factor(paste("Case ",toString(i))))) h_factor = subset(results,model==2&repl==r)$hscore - subset(results,model==1&repl==r)$hscore h_factors = rbind(h_factors,data.frame(case = factor(i), time = 1:nobservations, repl = r, hfactor = h_factor, type = factor(paste("Case ",toString(i))))) } } # log Bayes factor ggplot(logbayesfactors) + geom_line(aes(time, logbayesfactor, color = case, group = interaction(case,repl))) + geom_hline(yintercept = 0,linetype="dotted",size=1) + ylab("log-Bayes factor [1 vs 2]") + facet_grid(. ~ type) + xlab("Number of observations") + # guides(colour = guide_legend(override.aes = list(size = 2))) + theme(strip.text.y = element_text(size = 12, colour = "black")) + theme(legend.text=element_text(size=12)) + theme(legend.title=element_text(size=12)) + theme(legend.position="none") + theme(axis.title.y=element_text(margin=margin(0,10,0,0))) + theme(axis.title.x=element_text(margin=margin(10,0,0,0))) # ggsave("example_AR1_AR2_logBF_1_vs_2.png",dpi = 300,width = 10,height = 5) # Hyvarinen factor labels.df = data.frame(x = rep(375,2), y = c(-220,30), text = c("Case 4","Case 3"), type = factor(c("Case 4","Case 3"))) colors = c(wes_palette("Darjeeling")[1],"mediumblue") axis_ticktextsize = 10 axis_titlesize = 12 ggplot() + geom_label(data = labels.df, aes(x,y,label = text,color=type), color = colors, fontface = "bold") + theme(legend.position="none") + scale_color_manual(values = colors[2:1]) + geom_line(data = h_factors, aes(time, hfactor, color = case, group = interaction(case,repl)),alpha=0.6) + geom_hline(yintercept = 0,linetype=2) + xlab("Number of observations") + ylab("H-factor [1 vs. 2]") + ylim(c(min(h_factors$hfactor),30+max(h_factors$hfactor))) + theme(strip.text.y = element_text(size = 12, colour = "black")) + theme(legend.text=element_text(size=12)) + theme(legend.title=element_text(size=12)) + theme(legend.position="none") + theme(axis.text.x = element_text(size = axis_ticktextsize), axis.text.y = element_text(size = axis_ticktextsize), axis.title.x = element_text(size = axis_titlesize, margin=margin(10,0,0,0)), axis.title.y = element_text(size = axis_titlesize, angle = 90, margin = margin(0,10,0,0)), strip.text.x = element_text(size = axis_titlesize, colour = "black"), strip.background = element_rect(fill="gray88"), panel.background = element_rect(fill="gray95",linetype = "solid", colour="white"), legend.position = "none") # ggsave("example_AR1_AR2_Hyvarinen_factor_1_vs_2_10_by_5.png",dpi = 300,width = 10,height = 5) # ggsave("example_AR1_AR2_Hyvarinen_factor_1_vs_2_5_by_5.png",dpi = 300,width = 5,height = 5) # log Bayes factor labels.df = data.frame(x = rep(375,2), y = c(-70,12), text = c("Case 4","Case 3"), type = factor(c("Case 4","Case 3"))) colors = c("tomato3","dodgerblue") ggplot() + geom_label(data = labels.df, aes(x,y,label = text,color=type), color = colors, fontface = "bold") + theme(legend.position="none") + scale_color_manual(values = colors[2:1]) + geom_line(data = logbayesfactors, aes(time, logbayesfactor, color = case, group = interaction(case,repl)),alpha=0.6) + geom_hline(yintercept = 0,linetype=2) + xlab("Number of observations") + ylab("log-Bayes factor [1 vs. 2]") + ylim(c(min(h_factors$hfactor),30+max(h_factors$hfactor))) + theme(strip.text.y = element_text(size = 12, colour = "black")) + theme(legend.text=element_text(size=12)) + theme(legend.title=element_text(size=12)) + theme(axis.text.x = element_text(size = axis_ticktextsize), axis.text.y = element_text(size = axis_ticktextsize), axis.title.x = element_text(size = axis_titlesize, margin=margin(10,0,0,0)), axis.title.y = element_text(size = axis_titlesize, angle = 90, margin = margin(0,10,0,0)), strip.text.x = element_text(size = axis_titlesize, colour = "black"), strip.background = element_rect(fill="gray88"), panel.background = element_rect(fill="gray95",linetype = "solid", colour="white"), legend.position = "none") # ggsave("example_AR1_AR2_logBF_1_vs_2_10_by_5.png",dpi = 300,width = 10,height = 5) # ggsave("example_AR1_AR2_logBF_1_vs_2_5_by_5.png",dpi = 300,width = 5,height = 5) #-------------------------------------------------------------------------------------------- #-------------------------------------------------------------------------------------------- #-------------------------------------------------------------------------------------------- crit.df = data.frame() crit.df = rbind(crit.df,data.frame(logbayesfactors[-4], value = logbayesfactors$logbayesfactor, crit = factor("LBF"))) crit.df = rbind(crit.df,data.frame(h_factors[-4], value = h_factors$hfactor, crit = factor("HF"))) relabelling = function(x){ if(x==levels(crit.df$type[1])[1]){return (factor(3))} if(x==levels(crit.df$type[1])[2]){return (factor(4))} } crit.df$type = sapply(crit.df$type, function(x)relabelling(x)) #-------------------------------------------------------------------------------------------- case_label <- list( '3'=expression(paste("Case 3: ",M[1]," nested in ", M[2],", both well-specified",sep="")), '4'=expression(paste("Case 4: ",M[1]," nested in ", M[2], ", only ", M[2]," well-specified",sep="")) ) case_labeller <- function(variable,value){ return(case_label[value]) } labels.df = data.frame(x = c(900,900,rep(875,2)), y = c(5.5,1.5,-280,-65), text = rep(c("HF 1 vs. 2","log-BF 1 vs. 2"),4), crit = factor(rep(c("HF","LBF"),4)), type = factor(rep(3:4,each=2))) hline.df = data.frame(y = c(-5,10), type = factor(3)) axis_titlesize = 18 axis_ticktextsize = 15 ggplot() + geom_label(data = labels.df, aes(x,y,label = text,color=crit), fontface = "bold",size=5) + theme(legend.position="none") + scale_color_manual(values = colors[2:1]) + geom_line(data = crit.df, aes(time, value, color = crit, group = interaction(crit,repl)),alpha=0.85) + geom_hline(yintercept = 0,linetype=2) + geom_hline(data = hline.df, aes(yintercept = y),linetype=2, alpha = 0) + xlab("Number of observations") + ylab("") + # ylim(c(min(h_factors$hfactor),30+max(h_factors$hfactor))) + facet_wrap( ~ type, ncol=2, scales="free", labeller = case_labeller) + theme(axis.text.x = element_text(size = axis_ticktextsize), axis.text.y = element_text(size = axis_ticktextsize), axis.title.x = element_text(size = axis_titlesize, margin=margin(20,0,0,0)), axis.title.y = element_text(size = axis_titlesize, angle = 90, margin = margin(0,20,0,0)), strip.text.x = element_text(size = axis_titlesize, colour = "black"), strip.text.y = element_text(size = axis_titlesize, colour = "black"), strip.background = element_rect(fill="gray88"), panel.background = element_rect(fill="gray95",linetype = "solid", colour="white"), legend.position = "none") # ggsave("example_AR1_AR2_15_by_6.png",dpi = 300,width = 15,height = 6) # ggsave("example_AR1_AR2_15_by_6.pdf",dpi = 300,width = 15,height = 6)
e85c9aa76fe4fd9ec4b57c4cd1e1ac1b827bd56f
4bef412a97da9bde0e505bcaa4fe0c91d7226b6b
/R/BB_Prior_prob_phi.R
b1bddda0d5d85b083a68314a45360f8ed2504535
[]
no_license
cran/BEDASSLE
3578af6a84a08fe6cbe3dc157c0139e053c4bbbd
e5fb9b7a2430614f806ff16e9465384e3600d496
refs/heads/master
2022-04-30T11:00:10.441717
2022-04-10T18:12:30
2022-04-10T18:12:30
17,677,835
2
1
null
null
null
null
UTF-8
R
false
false
76
r
BB_Prior_prob_phi.R
BB_Prior_prob_phi <- function(phi){ stats::dexp(1/phi,rate=5,log=TRUE) }
9cce927a9746a1a287e08165029beab1604acdb2
e3ce3ad557ebd51429ed7acfea936723149a8d4c
/R/makeMultiObjectiveFunction.R
b824d9c94f25530c8f686f1fb809f5ff2cf5b44c
[]
permissive
jakobbossek/smoof
87512da9d488acfe3a7cc62aa3539a99e82d52ba
d65247258fab57d08a5a76df858329a25c0bb1b8
refs/heads/master
2023-03-20T02:05:12.632661
2023-03-08T13:59:27
2023-03-08T13:59:27
22,465,741
32
27
BSD-2-Clause
2022-01-21T10:02:19
2014-07-31T10:39:43
R
UTF-8
R
false
false
1,869
r
makeMultiObjectiveFunction.R
#' Generator for multi-objective target functions. #' #' @template arg_name #' @template arg_id #' @template arg_description #' @template arg_fn #' @template arg_has_simple_signature #' @template arg_par_set #' @param n.objectives [\code{integer(1)}]\cr #' Number of objectives of the multi-objective function. #' @template arg_noisy #' @template arg_fn_mean #' @param minimize [\code{logical}]\cr #' Logical vector of length \code{n.objectives} indicating if the corresponding #' objectives shall be minimized or maximized. #' Default is the vector with all components set to \code{TRUE}. #' @template arg_vectorized #' @template arg_constraint_fn #' @param ref.point [\code{numeric}]\cr #' Optional reference point in the objective space, e.g., for hypervolume computation. #' @return [\code{function}] Target function with additional stuff attached as attributes. #' @examples #' fn = makeMultiObjectiveFunction( #' name = "My test function", #' fn = function(x) c(sum(x^2), exp(x)), #' n.objectives = 2L, #' par.set = makeNumericParamSet("x", len = 1L, lower = -5L, upper = 5L) #' ) #' print(fn) #' @export makeMultiObjectiveFunction = function( name = NULL, id = NULL, description = NULL, fn, has.simple.signature = TRUE, par.set, n.objectives = NULL, noisy = FALSE, fn.mean = NULL, minimize = NULL, vectorized = FALSE, constraint.fn = NULL, ref.point = NULL) { smoof.fn = makeObjectiveFunction( name, id, description, fn, has.simple.signature, par.set, n.objectives, noisy, fn.mean, minimize, vectorized, constraint.fn ) if (!is.null(ref.point)) { assertNumeric(ref.point, len = n.objectives, any.missing = FALSE, all.missing = FALSE) } smoof.fn = setAttribute(smoof.fn, "ref.point", ref.point) class(smoof.fn) = c("smoof_multi_objective_function", class(smoof.fn)) return(smoof.fn) }
7376f58618041f78ca0c75c8345ce649693a5bf0
8ea032a59e2ef9b5d822d86b537e3e84e13d8322
/BOOK/Doit_R/가연/07_데이터 정제 - 빠진 데이터, 이상한 데이터 제거하기.R
d008b40839df80caf8300c0e4258f7fcc7527876
[]
no_license
KimJiSeong1994/Data-Analysis_Study_Changwon
987e9067f3f067196d92f163f26688952919d440
f6e221e76f7557863263dd8e7e4849eb50f75b62
refs/heads/master
2021-08-17T09:42:15.679370
2020-08-26T08:35:50
2020-08-26T08:35:50
216,145,887
6
3
null
null
null
null
UHC
R
false
false
4,552
r
07_데이터 정제 - 빠진 데이터, 이상한 데이터 제거하기.R
#####07-1 빠진 데이터를 찾아라! - 결측치 정제하기##### #결측치 찾기 df <- data.frame(sex=c("M","F",NA,"M","F"), score=c(5,4,3,4,NA)) is.na(df) #결측치 확인(결측치=TRUE) table(is.na(df)) #결측치 빈도 출력 table(is.na(df$sex)) #sex 결측치 빈도 출력 table(is.na(df$score)) #score 결측치 빈도 출력 mean(df$sex) mean(df$score) #결측치가 포함된 데이터를 함수 적용하면 NA가 출력 #결측치 제거하기 library(dplyr) df %>% filter(is.na(score)) #score가 NA인 데이터만 출력 df %>% filter(!is.na(score)) df_n <- df %>% filter(!is.na(score)) mean(df_n$score);sum(df_n$score) df_n1 <- df %>% filter(!is.na(score)&!is.na(sex)) #여러 변수 결측치 동시제거 df_n2 <- na.omit(df) #결측치가 하나라도 있으면 모두 제거 #함수의 결측치 제외 기능 mean(df$score, na.rm=T) #na.rm=T 파라미터 기능 사용 sum(df$score, na.rm=T) exam <- read.csv("C:/rtest/Doit_R/Data/csv_exam.csv") exam[c(3,8,15),"math"] <- NA #3,8,15행의 math에 NA 할당 exam %>% summarise(mean_math=mean(math)) exam %>% summarise(mean_math=mean(math,na.rm=T)) exam %>% summarise(mean_math=mean(math,na.rm=T), sum_math=sum(math,na.rm=T), median_math=median(math,na.rm=T)) #평균값으로 결측치 대체하기 mean(exam$math, na.rm=T) #결측치 제외하고 math 평균 산출 exam$math <- ifelse(is.na(exam$math),55,exam$math) #math가 NA면 55로 대체 table(is.na(exam$math)) mean(exam$math) #####연습문제##### library(ggplot2) mpg <- as.data.frame(ggplot2::mpg) mpg[c(65,124,131,153,212),"hwy"] <- NA #Q1. drv 변수와 hwy변수에 결측치가 몇 개 있는지 알아보기 table(is.na(mpg$drv)) table(is.na(mpg$hwy)) #Q2. filter()이용해 hwy 결측치 제거하고 어떤 drv(구동방식)의 hwy평균이 높은지 알아보기. 하나의 dplyr 구문으로 만들어야함. mpg %>% select(drv,hwy) %>% filter(!is.na(hwy)) %>% group_by(drv) %>% summarise(mean_hwy=mean(hwy)) %>% arrange(desc(mean_hwy)) mpg %>% group_by(drv) %>% summarise(mean_hwy=mean(hwy,na.rm=T)) %>% arrange(desc(mean_hwy)) #Q2. 정답은 이거(select 없어도 됨) mpg %>% filter(!is.na(hwy)) %>% group_by(drv) %>% summarise(mean_hwy=mean(hwy)) #####07-2 이상한 데이터를 찾아라! - 이상치 정제하기##### outlier <- data.frame(sex=c(1,2,1,3,2,1), score=c(5,4,3,4,2,6)) #이상치 확인하기 table(outlier$sex) table(outlier$score) #결측 처리하기 outlier$sex <- ifelse(outlier$sex==3,NA,outlier$sex) outlier outlier$score <- ifelse(outlier$score>5,NA,outlier$score) outlier outlier %>% filter(!is.na(sex)&!is.na(score)) %>% group_by(sex) %>% summarise(mean_score=mean(score)) #이상치 제거하기 - 극단적인 값 #극단치가 있으면 분석 결과가 왜곡될 수 있기 때문에 분석 전 제거해야함 #상자 그림으로 극단치 기준 정하기 boxplot(mpg$hwy) #상자 그림 통계치 출력 #출력 결과는 차례대로 아래쪽 극단치 경계/1사분위수/중앙값/3사분위수/위쪽 극단치 경계 boxplot(mpg$hwy)$stats #결측 처리하기 #12~37 벗어나면 NA 할당 mpg$hwy <- ifelse(mpg$hwy<12|mpg$hwy>37,NA,mpg$hwy) table(mpg$hwy) table(is.na(mpg$hwy)) mpg %>% group_by(drv) %>% summarise(mean_hwy=mean(hwy,na.rm = T)) #####연습문제2##### mpg[c(10,14,58,93),"drv"] <- "k" mpg[c(29,43,129,203),"cty"] <- c(3,4,39,42) #Q1. drv에 이상치가 있는지 확인. 이상치 결측 처리 후 확인. 결측 처리 할 때는 %in% 기호 활용하기. table(mpg$drv) mpg$drv <- ifelse(mpg$drv=="k",NA,mpg$drv) #Q1. 정답은 이거 table(mpg$drv) mpg$drv <- ifelse(mpg$drv %in% c("4","f","r"), mpg$drv, NA) table(mpg$drv) #Q2. 상자 그림 이용해 cty에 이상치가 있는지 확인하기. 상자 그림의 통계치를 이용해 정상 범위 벗어난 값을 결측 처리하고 다시 상자 그림으로 확인 boxplot(mpg$cty)$stats mpg$cty <- ifelse(mpg$cty>26|mpg$cty<9,NA,mpg$cty) boxplot(mpg$cty) #Q3. 이상치를 제외한 drv별로 cty평균이 어떻게 다른지 알아보기. 하나의 dplyr구문으로 만들어야함 mpg %>% filter(!is.na(drv)&!is.na(cty)) %>% group_by(drv) %>% summarise(cty_mean=mean(cty))
e156792dc8f179c43edf59ec5638e6a8d342a0cd
e948abc55d3973e14fd3d2201796069213860272
/code/NYC_HVS_Analysis.R
32420c77189e0c986333875ba299f427b15abbae
[]
no_license
madisonvolpe/evictions_nyc
426eba0911cd64a89fa3eea5b4eab385168a7bb0
7309888b3d9786879e644d242b854af75a2408c7
refs/heads/master
2020-06-05T10:29:02.971180
2019-06-17T19:51:09
2019-06-17T19:51:09
192,409,157
0
0
null
null
null
null
UTF-8
R
false
false
18,410
r
NYC_HVS_Analysis.R
suppressPackageStartupMessages(library(raster)) suppressPackageStartupMessages(library(survey)) suppressPackageStartupMessages(library(plyr)) suppressPackageStartupMessages(library(tidyverse)) suppressPackageStartupMessages(library(ggplot2)) suppressPackageStartupMessages(library(sf)) # for shapefiles suppressPackageStartupMessages(library(maptools)) suppressPackageStartupMessages(library(rgdal)) suppressPackageStartupMessages(library(rgeos)) suppressPackageStartupMessages(library(reshape2)) suppressPackageStartupMessages(library(kableExtra)) suppressPackageStartupMessages(library(gridExtra)) suppressPackageStartupMessages(library(plotly)) suppressPackageStartupMessages(library(geosphere)) ## Pre-processing! options(scipen = 999) # Before anything else read in shapefile and join the sub-borough areas to dataset # read in shapefile shp <- rgdal::readOGR(dsn = "data/NYC_HVS/NYC_Sub_borough_Area") shp@data$id = shp@data$bor_subb # transform shape for ggplot2 shp.points = fortify(shp, region="id") shp.df = plyr::join(shp.points, shp@data, by="id") rm(shp.points) names(shp.df)[8] <- "sba" # Bring in code for creating SEs from replicate weights source("code/ReplicateWeights.R") # Some notes this is data aggregated by the NYC HPD for their 2019 Data Expo - it is simplified, which is good for me # It only includes only household-level occupied records # It has both household weights + replicate weights ! # Load in Data hvs <- read.csv("data/NYC_HVS/NYC_HVS_2017_HO.csv") # The more detailed variable names are the first row - extract them out varnames <- hvs[1,] # remove more detailed names hvs <- hvs[-1,] # add sub-borough name from shpfile data to dataset it will make analysis easier! hvs$sba <- as.numeric(as.character(hvs$sba)) hvs$sba.name <- shp.df$NAME[match(hvs$sba, shp.df$sba)] sum(is.na(hvs$sba.name)) #check none missing # recode race - hispanic origin levels(hvs$X_1e) <- list(No=c("1"), Yes=c("2", "3", "4", "5", "6", "7")) # recode race - all levels(hvs$X_1f) <- list(White = c("1"), Black = c("2"), Asian = c("4", "5", "6", "7", "8"), PI = c("9"), Native = c("3"), TwoRaces = c("10")) # make a white v. minority categpry for simpler evaluation hvs <- hvs %>% mutate(RaceSimplified = case_when( X_1e == "No" & X_1f == "White" ~ "White", #white people X_1e == "Yes"& X_1f == "White" ~ "Minority", # white hispanics as minority X_1e == "No" & X_1f != "White" ~ "Minority", # nonwhite non hispanics as minority X_1e == "Yes"& X_1f != "White" ~ "Minority")) # recode income, rather make continuous and then make income categories hvs$hhinc <- as.numeric(as.character(hvs$hhinc)) hvs$hhinc[hvs$hhinc < 0] <- -1 hvs$hhinc[hvs$hhinc == 9999999] <- 0 #hvs$hhinc<- cut(hvs$hhinc, seq(-1,3000000,25000)) # recode hh moving variable (add labels) hvs$X_6 <- factor(hvs$X_6, levels = c("1","2","3","4","5","6","7","8", "9", "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "20", "98", "99", "Reason for Moving"), labels = c("Change in Employment Status", "Looking for Work", "Commuting Reasons", "School", "Other Financial/ Employment Reason", "Needed Larger House or Apartment", "Widowed, Divorced, Deceased", "Newly Married", "To be close to Relatives", "Establish Separate Household", "Other Family Reason", "Wanted this neighborhood/ better services", "Other neighborhood reason", "Wanted to own residence", "Wanted to rent residence", "Wanted greater housing affordability", "Wanted better quality housing", "Evicted, displaced, landlord harassment", "Other housing reason", "Any other reason", "Not Reported", "Not Applicable", "X")) # Examine the Reason for HH Moving Variable # Subset moved people moved <- filter(hvs, X_6 != "Not Reported" & X_6 != "Not Applicable") # Okay so we see that (from respondents there are only 3,266 observations that moved) - obviously # this is more when we add up the sampling weights etc, so let's do that now # The full Sample ('Recently Moved') Estimated # just add up the household weights! # note household weights have 5 implied decimal places (so after will divide by 100,000) moved$hhweight <- as.numeric(as.character(moved$hhweight)) sum(moved$hhweight)/100000 # The Full-Sample Recently Moved Estimate for all of NYC is : 781,263 (those who moved after 2013) # Calculate the SE for this estimate rep.wts.SE(moved) # the SE is 10,945.73 # Which reason for moving categories dominated? moved %>% select(X_6) %>% group_by(X_6) %>% summarise(n = n()) %>% arrange(desc(n)) %>% ggplot(aes(x=X_6, y = n)) + geom_bar(stat = "identity") + coord_flip() + ggtitle('Moved Reason - 2017 NYC HVS') # When people said 'they were evicted/displaced', where do they reside now? moved %>% select(sba.name, X_6) %>% filter(X_6 == 'Evicted, displaced, landlord harassment') %>% group_by(sba.name) %>% summarise(n=n()) %>% arrange(desc(n)) %>% ggplot(aes(x=sba.name, y=n))+ geom_bar(stat = 'identity') + coord_flip() + ggtitle("Where do Evicted/Displaced People Move?") # Naive analysis! # Estimates for how many people are moving to these neighborhoods after being evicted/displaced # Will include the full sample estimate (sum of hhweight) # Will also include the SE estimate from the replicate weights # Basically Estimates by Neighborhood ! # will include estimate (sum of hhweight) # SE estimate (using replicate weights) evicted.displaced <- moved %>% filter(X_6 == 'Evicted, displaced, landlord harassment') EstbyNeighborhood.ed <- rep.wts.grp.SE(evicted.displaced, sba.name) #create table estimate # kable(EstbyNeighborhood.ed, format = 'html', col.names = c('SBA', 'Sample Estimate', 'Var', 'SE')) # Let's map this so people could see where people move/stay after being evicted/displaced # add neighborhoods to EstbyNeighborhood that are not in original df SBA <- unique(shp.df$NAME) SBAnoEvicted <- SBA[!SBA %in% EstbyNeighborhood.ed$sba.name] SBAnoEvicted <- data.frame(sba.name = SBAnoEvicted, N0 = 0, Var = 0, SE = 0) # adds those neighborhoods, where evicted/displaced people were not residing EstbyNeighborhood.ed <- rbind(EstbyNeighborhood.ed, SBAnoEvicted) names(EstbyNeighborhood.ed)[1] <- "NAME" #now join these figures to shapefile shp.df.evic <-join(shp.df, EstbyNeighborhood.ed, by = "NAME") # lets map this map.evic <- ggplot(shp.df.evic) + aes(long,lat,group=group) + geom_polygon(aes(fill=N0)) + scale_fill_continuous(type = "viridis")+ geom_path(color="white") + theme_bw() + ggtitle("Neighborhoods where Evicted/Displaced/Harassed People Reside") # How About People who Move for Greater Housing Affordability? Do people who move from displacement move to same areas # as people seeking greater Housing Affordability? moved %>% select(sba.name, X_6) %>% filter(X_6 == 'Wanted greater housing affordability') %>% group_by(sba.name) %>% summarise(n=n()) %>% arrange(desc(n)) %>% ggplot(aes(x=sba.name, y=n))+ geom_bar(stat = 'identity') + coord_flip() + ggtitle("Where do People that Want Housing Affordability Move?") housing.afford <- moved %>% filter(X_6 == 'Wanted greater housing affordability') EstbyNeighborhood.ha <- rep.wts.grp.SE(housing.afford, sba.name) # kable(EstbyNeighborhood.ha, format = 'html', col.names = c('SBA', 'Sample Estimate', 'Var', 'SE')) # add neighborhoods to EstbyNeighborhood.ha that are not in original df SBA <- unique(shp.df$NAME) SBAnoha <- SBA[!SBA %in% EstbyNeighborhood.ha$sba.name] SBAnoha <- data.frame(sba.name = SBAnoha, N0 = 0, Var = 0, SE = 0) # adds those neighborhoods, where evicted/displaced people were not residing EstbyNeighborhood.ha <- rbind(EstbyNeighborhood.ha, SBAnoha) names(EstbyNeighborhood.ha)[1] <- "NAME" #now join these figures to shapefile shp.df.ha <-join(shp.df, EstbyNeighborhood.ha, by = "NAME") # create map map.ha<-ggplot(shp.df.ha) + aes(long,lat,group=group) + geom_polygon(aes(fill=N0)) + scale_fill_continuous(type = "viridis")+ geom_path(color="white") + theme_bw() + ggtitle("Neighborhoods where People that Moved Seeking Housing Affordability Reside") # Combination of maps map.evic map.ha ######### Go a Level Deeper and Expand on this Analysis Adding some Racial Component ##### #### EVICTIONS/ DISPLACEMENT # Evicted/displaced/harassed in total sum(evicted.displaced$hhweight)/100000 #13,623.21 in total were evicted in 2017 rep.wts.SE(evicted.displaced) # the SE is 1816.878 # How many people are evicted/displaced/harassed that are a minority (Full Sample Estimate) evicted.minority <- moved %>% filter(X_6 == 'Evicted, displaced, landlord harassment' & RaceSimplified == 'Minority') sum(evicted.minority$hhweight)/100000 #9,692.036 minorities are displaced throughout NYC rep.wts.SE(evicted.minority) # the SE is 1574.457! # How many people are evicted/displaced/harassed that are white (Full Sample Estimate) evicted.white <- moved %>% filter(X_6 == 'Evicted, displaced, landlord harassment' & RaceSimplified == 'White') sum(evicted.white$hhweight)/100000 #3931.175 whites are displaced throughout NYC rep.wts.SE(evicted.white) # the SE is 1030.248! # Not a surprise but most evictions/displaced/landlord harassments are for minorities (blacks, hispanics, asians,etc.) # What neighborhoods are recently evicted/displaced people ending up in based on minority v. nonminority status ? moved %>% select(sba.name,borough,X_6, RaceSimplified, hhweight) %>% filter(X_6 == 'Evicted, displaced, landlord harassment') %>% group_by(sba.name, RaceSimplified) %>% summarise(n=n(),amt=ceiling((sum(hhweight)/100000))) %>% arrange(desc(amt)) %>% ggplot(aes(x=sba.name, y = n, fill = RaceSimplified)) + geom_bar(stat = 'identity', position = 'dodge') + facet_grid(~RaceSimplified) + coord_flip() + ggtitle("Where do Evicted/Displaced People Reside based on Race?") # create a table for this with Full Sample estimate, Var, and SE EstbyNeighborhood.edRace <- rep.wts.2grps.SE(evicted.displaced, sba.name, RaceSimplified) # kable(EstbyNeighborhood.edRace, format = 'html', col.names = c('SBA',"Status",'Sample Estimate', 'Var', 'SE')) # let's map this #before I join with shpfile I need to make this wide and where only interested in Full Sample estimate so that is what I'll keep EstbyNeighborhood.edRaceWide <- spread(EstbyNeighborhood.edRace[1:3], RaceSimplified, N0) EstbyNeighborhood.edRaceWide <- EstbyNeighborhood.edRaceWide %>% mutate_at(vars(Minority, White), function(x) as.numeric(as.character(x))) EstbyNeighborhood.edRaceWide$Minority[is.na(EstbyNeighborhood.edRaceWide$Minority)] <- 0 EstbyNeighborhood.edRaceWide$White[is.na(EstbyNeighborhood.edRaceWide$White)] <- 0 # add in neighborhoods not in EstbyNeighborhood.edRaceWide SBA <- unique(shp.df$NAME) SBAnoEvictedRaceWide <- SBA[!SBA %in% EstbyNeighborhood.edRaceWide$sba.name] SBAnoEvictedRaceWide <- data.frame(sba.name = SBAnoEvictedRaceWide, Minority = 0, White = 0) # adds those neighborhoods, where evicted/displaced people were not residing EstbyNeighborhood.edRaceWide<- rbind(EstbyNeighborhood.edRaceWide, SBAnoEvictedRaceWide) names(EstbyNeighborhood.edRaceWide)[1] <- "NAME" # join to shapefile with (N0 estimate for all evicted/displaced already -- shp.df.evic) shp.df.evic <-join(shp.df.evic, EstbyNeighborhood.edRaceWide, by = "NAME") # make map #need to find centroid # Get polygons centroids centroids <- as.data.frame(centroid(shp)) colnames(centroids) <- c("long_cen", "lat_cen") centroids <- data.frame("id" = shp$bor_subb, centroids) # Join centroids with dataframe shp.df.evic <- plyr::join(shp.df.evic, centroids, by = "id") # Minority Map ggplot(shp.df.evic) + aes(long,lat,group=group) + geom_polygon(aes(fill=N0)) + scale_fill_continuous(low = '#E0EEEE', high = '#0000FF')+ labs(fill='Level of Evicted/Displaced') + geom_point(aes(x=long_cen,y=lat_cen,col = Minority), alpha = 0.9)+ scale_color_continuous(low = '#FFFFFF', high = '#CD0000')+ geom_path(color="white") + theme_bw() + ggtitle("Where do Evicted/Displaced Minorities Reside?") # White Map ggplot(shp.df.evic) + aes(long,lat,group=group) + geom_polygon(aes(fill=N0)) + scale_fill_continuous(low = '#E0EEEE', high = '#0000FF')+ labs(fill='Level of Evicted/Displaced') + geom_point(aes(x=long_cen,y=lat_cen,col = White), alpha = 0.9)+ scale_color_continuous(low = '#FFFFFF', high = '#CD0000')+ geom_path(color="white") + theme_bw() + ggtitle("Where do Evicted/Displaced White People Reside?") #### WANTED GREATER HOUSING AFFORDABILITY # Let's make the white vs/ minority affordability estimates (tables + maps) # How many people moved for greater affordability sum(housing.afford$hhweight)/100000 #36,101.14 moved for greater housing affordability rep.wts.SE(housing.afford) # the SE is 3027.933 # How many people moved for greater housing affordability that are a minority (Full Sample Estimate) ha.minority <- moved %>% filter(X_6 == "Wanted greater housing affordability" & RaceSimplified == 'Minority') sum(ha.minority$hhweight)/100000 #22550.74 minorities moved for greater housing affordability rep.wts.SE(ha.minority) # the SE is 2681.442! # How many people moved for greater housing affordability that are white (Full Sample Estimate) ha.white <- moved %>% filter(X_6 == "Wanted greater housing affordability" & RaceSimplified == 'White') sum(ha.white$hhweight)/100000 #13550.4 whites moved for greater housing affordability rep.wts.SE(ha.white) # the SE is 1842.22! # where do whites v. minorities move when they seek greater housing affordability... moved %>% select(sba.name,borough,X_6, RaceSimplified, hhweight) %>% filter(X_6 == "Wanted greater housing affordability") %>% group_by(sba.name, RaceSimplified) %>% summarise(n=n(),amt=ceiling((sum(hhweight)/100000))) %>% arrange(desc(amt)) %>% ggplot(aes(x=sba.name, y = n, fill = RaceSimplified)) + geom_bar(stat = 'identity', position = 'dodge') + facet_grid(~RaceSimplified) + coord_flip() + ggtitle("Where do People Move for Greater Housing Affordability?") # create a table for this with Full Sample estimate, Var, and SE EstbyNeighborhood.haRace <- rep.wts.2grps.SE(housing.afford, sba.name, RaceSimplified) # kable(EstbyNeighborhood.haRace, format = 'html', col.names = c('SBA',"Status",'Sample Estimate', 'Var', 'SE')) # making maps # let's map this #before I join with shpfile I need to make this wide and where only interested in Full Sample estimate so that is what I'll keep EstbyNeighborhood.haRaceWide <- spread(EstbyNeighborhood.haRace[1:3], RaceSimplified, N0) EstbyNeighborhood.haRaceWide <- EstbyNeighborhood.haRaceWide %>% mutate_at(vars(Minority, White), function(x) as.numeric(as.character(x))) EstbyNeighborhood.haRaceWide$Minority[is.na(EstbyNeighborhood.haRaceWide$Minority)] <- 0 EstbyNeighborhood.haRaceWide$White[is.na(EstbyNeighborhood.haRaceWide$White)] <- 0 # add in neighborhoods not in EstbyNeighborhood.edRaceWide SBAnohaRaceWide <- SBA[!SBA %in% EstbyNeighborhood.haRaceWide$sba.name] SBAnohaRaceWide <- data.frame(sba.name = SBAnohaRaceWide, Minority = 0, White = 0) # adds those neighborhoods, where evicted/displaced people were not residing EstbyNeighborhood.haRaceWide<- rbind(EstbyNeighborhood.haRaceWide, SBAnohaRaceWide) names(EstbyNeighborhood.haRaceWide)[1] <- "NAME" # join to shapefile with (N0 estimate for all evicted/displaced already -- shp.df.evic) shp.df.ha <-join(shp.df.ha, EstbyNeighborhood.haRaceWide, by = "NAME") # join centroids shp.df.ha <- plyr::join(shp.df.ha, centroids, by = "id") # Maps # Minority Map ggplot(shp.df.ha) + aes(long,lat,group=group) + geom_polygon(aes(fill=N0)) + scale_fill_continuous(low = '#E0EEEE', high = '#0000FF')+ labs(fill='Level of Housing Affordability') + geom_point(aes(x=long_cen,y=lat_cen,col = Minority), alpha = 0.9)+ scale_color_continuous(low = '#FFFFFF', high = '#CD0000')+ geom_path(color="white") + theme_bw() + ggtitle("Where do Minorities seeking Housing Affordability Reside?") # White Map ggplot(shp.df.ha) + aes(long,lat,group=group) + geom_polygon(aes(fill=N0)) + scale_fill_continuous(low = '#E0EEEE', high = '#0000FF')+ labs(fill='Level of Evicted/Displaced') + geom_point(aes(x=long_cen,y=lat_cen,col = White), alpha = 0.9)+ scale_color_continuous(low = '#FFFFFF', high = '#CD0000')+ geom_path(color="white") + theme_bw() + ggtitle("Where do White People seeking Housing Affordability Reside?") ######### Go another Level Deeper and Expand on this Analysis Adding Income #####
b9468a781002cb7dbbd9dc2178424690e543468f
618e4a8505b6a5dfa8e768b474bb2bc0352f39b3
/server.r
a6fcf4f6e758a22cdef41d1632dcb975cf0eec5e
[]
no_license
eua89/buoyviz_r_shiny
1c660b21279b182283a40365a21f28558c7d4895
c1bef095c43867ff32fc81e022489de55232ea45
refs/heads/master
2021-01-21T18:24:59.099422
2017-05-05T14:49:37
2017-05-05T14:49:37
92,047,336
0
0
null
null
null
null
UTF-8
R
false
false
1,022
r
server.r
library(quantmod) library(ggplot2) library(scales) #source("helper.r") shinyServer(function(input, output) { buoy_data=reactive({ fn = "http://localhost:5000/WAQM3.csv" fn = paste("http://localhost:5000/",input$select1,".csv", sep="") f = read.csv(fn,header=T, sep=",") f$Date <- format(as.POSIXct(paste(f$X.YY, f$MM, f$DD, f$hh ,f$mm, sep = "/"), format= "%Y/%m/%d/%H/%M")) #f$POSDATE <- as.POSIXct(f$Date) colnames(f)[10] <- "O2" f # Instead of running the above function here we could load it from a helper.r file # read_csv(input$select1) }) output$table <- renderTable({ data <- buoy_data() data[6:16] }) output$plot <- renderPlot({ data <- buoy_data() POSDATE <- as.POSIXct(data$Date) ggplot(data, aes_string(x = "POSDATE", y = input$select2)) + geom_line() + scale_x_datetime(labels = date_format("%Y-%m-%d %mm")) }) output$text <- renderText({paste(input$select2, "of buoy with ID=", input$select1 )}) })
2aaff223b91e46bb018147d878cbb319e0091390
d8ec0122f6b914db27fdcb9d657195c18b925aa9
/Introduction-to-R/ggplot2.R
9c328e9224e2bf3205ff9f99d4bba96ed9b1219a
[]
no_license
sssridha/R
5c8576849a26e7beecc2325f5ec3ce782ebb63c4
9d061143286a96c5b89818538d7251e2997bc788
refs/heads/master
2020-07-04T02:17:24.488420
2016-11-19T06:05:26
2016-11-19T06:05:26
null
0
0
null
null
null
null
UTF-8
R
false
false
2,350
r
ggplot2.R
### ggplot2 is the most common used graphics package in R. It has great flexibility, ### allows for a wide variety of graphs, and has an easy to understand grammar. ### Additional resources: ### https://www.rstudio.com/wp-content/uploads/2015/03/ggplot2-cheatsheet.pdf ### http://docs.ggplot2.org/current/ # Install the ggplot2 library and load it. install.packages("ggplot2") library(ggplot2) # Assign the diamonds data # View the structure of diamonds # Create a ggplot and assign just price to the aesthetics. # Create a histogram of price by adding a geometry. # Assign the ggplot to p without a geometry. # Create a density plot by adding geom_density() to p # Create a histogram of p, set bins = 50, and add a title. # Create a scatter plot carat and price # Color the scatterplot by the cut variable # Color the scatterplot by the x variable # Color the scatterplot blue by placing the color inside geom_point() # Add a title and customize the x and y axis # Color the scatterplot blue by placing the color inside geom_point() # Add a title and customize the x and y axis ## Practice: ## Assign the economics data ## View the structure of economics ## create a histogram of pce ## create a histogram of psavert ## Create a scatterplot of psavert and pce, add geom_smooth() to the plot ## Create a scatterplot of psavert and pce, add geom_smooth(method = lm) to the plot. ## Add a title, label the x-axis "Personal Savings Rate", and y-axis "Personal Consumption Expenditures". # Line Graph # Create the variable unemployment_rate in the economics data frame by dividing unemploy by pop. # Create a line graph of unemployment_rate on date # Bar Chart str(diamonds) # Create a barchart of the count of cut # Use dplyr to create a barchart of mean price by cut, add the fill option # Create a scatterplot of carat and price. Use facet_wrap on cut. ## Practice: setwd("~/Documents/Introduction-to-R") titanic1 = read.csv("titanic1.csv") titanic2 = read.csv("titanic2.csv") titanic = merge(titanic1, titanic2, by = "Id") str(titanic) ## Create a bar plot of the survival rates on pclass. ## Add a title and custom axis labels. ## Create a bar plot of the survival rates of the pclass. ## Use facet_wrap with the sex variable. ## Add a title and custom axis labels.
52295cfed1c55dd0b7aca5ee59cf0302f82662ab
2bec5a52ce1fb3266e72f8fbeb5226b025584a16
/BigVAR/man/show-methods.Rd
af3ed58a261ebdbbbe1ee74e14a9a1612c2fa2ef
[]
no_license
akhikolla/InformationHouse
4e45b11df18dee47519e917fcf0a869a77661fce
c0daab1e3f2827fd08aa5c31127fadae3f001948
refs/heads/master
2023-02-12T19:00:20.752555
2020-12-31T20:59:23
2020-12-31T20:59:23
325,589,503
9
2
null
null
null
null
UTF-8
R
false
true
780
rd
show-methods.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/BigVARObjectClass.R \docType{methods} \name{show.BigVAR} \alias{show.BigVAR} \alias{show,BigVAR-method} \title{Default show method for an object of class BigVAR} \usage{ \S4method{show}{BigVAR}(object) } \arguments{ \item{object}{\code{BigVAR} object created from \code{ConstructModel}} } \value{ Displays the following information about the BigVAR object: \itemize{ \item{Prints the first 5 rows of \code{Y}} \item{ Penalty Structure} \item{ Relaxed Least Squares Indicator} \item{Maximum lag order} \item{ VARX Specifications (if applicable)} \item{Start, end of cross validation period} } } \description{ Default show method for an object of class BigVAR } \seealso{ \code{\link{constructModel}} }
6907bccec3416bbb22bd2836a35a12e38583d3fa
932b7a024ad453706c4a03a9e9791c5a5512c892
/man/getBinaryConfusionMatrix.Rd
e23775f86b43dca4ff29d0144a9022cd75610e5a
[]
no_license
dputler/AlteryxPredictive
428eb2f836999cd44312e7a8548c5c95c5a29fb1
e35a0795c86e19cad7b022a3044a10158dbb5511
refs/heads/master
2020-03-06T19:20:54.612405
2018-04-02T18:24:30
2018-04-02T18:24:30
127,025,821
0
0
null
2018-03-27T17:56:39
2018-03-27T17:56:38
null
UTF-8
R
false
true
526
rd
getBinaryConfusionMatrix.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/interactiveutils.R \name{getBinaryConfusionMatrix} \alias{getBinaryConfusionMatrix} \title{Helpful wrapper around fitted and actual values for generating confusion matrix} \usage{ getBinaryConfusionMatrix(fitted_values, actual_values) } \arguments{ \item{fitted_values}{fitted values} \item{actual_values}{actual values} } \value{ confusion matrix } \description{ Helpful wrapper around fitted and actual values for generating confusion matrix }
af822deeb9f06017f60a81753c17ddffc534c7fd
36c8308493104441ae3aa3606ee63e2f555f86b1
/man/convert.stress.level.Rd
028a3b20ee4ad91c123eabaadb02c1c74ad7d6ea
[]
no_license
cran/ALTopt
26e07bd47875d27311aa89bf1e43b91c9bea8481
325843e8028f0c6e2f086c0ec04a73e05d76e9ce
refs/heads/master
2020-04-29T09:34:57.600803
2019-12-12T22:10:02
2019-12-12T22:10:02
29,384,214
0
1
null
null
null
null
UTF-8
R
false
true
1,560
rd
convert.stress.level.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ALTopt.R \name{convert.stress.level} \alias{convert.stress.level} \title{Coding and decoding stress level} \usage{ convert.stress.level(lowStLv, highStLv, actual = NULL, stand = NULL) } \arguments{ \item{lowStLv}{a numeric vector containing the actual lowest stress level of each stress variable in design region.} \item{highStLv}{a numeric vector containing the actual highest stress level of each stress variable in design region.} \item{actual}{a data frame or numeric vector containing the design points in actual units.} \item{stand}{a data frame or numeric vector containing the design points in standardized units.} } \value{ When \code{actual} is provided, the function converts it to the standardized units and when \code{stand} is provided, the function converts it to the actual units. } \description{ Convert the stress levels from the actual levels to standardized levels, and vice versa. } \examples{ \dontrun{ # Generating D optimal design in coded unit. Design <- altopt.rc(optType = "D", N = 100, tc = 100, nf = 2, alpha = 1, formula = ~x1 + x2 + x1:x2, coef = c(0, -4.086, -1.476, 0.01)) # Transform the coded unit to actual stress variable's level. convert.stress.level(lowStLv = c(34.834, 4.094), highStLv = c(30.288, 4.5), stand = Design$opt.design.rounded) # Transform the actual stress level to coded units. use <- c(38.281, 3.219) convert.stress.level(lowStLv = c(34.834, 4.094), highStLv = c(30.288, 4.5), actual = use) } }
c02a0fd78a6acc1d095ad4d473efb30ca3b01d4c
5a1bf799691965bfba48f1733b2fdbbfe2050c58
/R/database_functions.R
fcc6c3fbf8c99c384fa49b579c45ace4ff29adb1
[]
no_license
vedhav/shipdashboard
33e2cc367def8db6700b22ead79fe1983f889fc9
e41d3f8045bc2ce48e3921fe8f341129d7c07e04
refs/heads/main
2023-01-24T11:10:17.906535
2020-11-21T19:16:53
2020-11-21T19:16:53
313,908,951
0
0
null
null
null
null
UTF-8
R
false
false
4,258
r
database_functions.R
# Kills all the unclosed db connections that occur due to exceptions or timeouts kill_dbx_connections <- function () { all_cons <- dbListConnections(RMySQL::MySQL()) for(con in all_cons) dbx::dbxDisconnect(con) print(paste(length(all_cons), " connections killed.")) print("If you have not set up the MySQL database please swith to local_file branch of the repo { git checkout local_file } so you can use this app without it") } #' To insert new values inside the MySQL table #' #' @param table_name A string which contains the MySQL table name where the data has to be inserted #' @param values A tbl_df/tbl/data.frame which is the same structure as the MySQL table #' @param db_name A string which contains the name of the database #' @param host_name A string which contains the username to access the database #' @param host_password A string which contains the password to access the database #' @param host_ip A string which contains the host address of the database #' #' @return Returns the result obtained after the insert, NULL if the data was inserted successfully insert_into_db <- function(table_name, values, db_name = default_db_name, host_name = default_host_name, host_password = default_host_password, host_ip = default_host_ip) { #adding support for '\' - extending dbx for(field in 1:length(values)) { if(typeof(values[[field]]) == 'character') { values[[field]] <- gsub("[\\]", "\\\\\\\\", values[[field]]) } } conn <- tryCatch({ dbx::dbxConnect( adapter = "mysql", user = host_name, password = host_password, host = host_ip, port = default_db_port, dbname = db_name ) }, error = function(err) { kill_dbx_connections() dbx::dbxConnect( adapter = "mysql", user = host_name, password = host_password, host = host_ip, port = default_db_port, dbname = db_name ) }) result <- suppressWarnings(dbx::dbxInsert(conn, table_name, values, batch_size = 1000)) on.exit(dbx::dbxDisconnect(conn)) return(result) } #' To fetch data from database using select query #' #' @param query A string with the select query #' @param params A list of optional parameters that fill the ? in the query #' @param db_name A string which contains the name of the database #' @param host_name A string which contains the username to access the database #' @param host_password A string which contains the password to access the database #' @param host_ip A string which contains the host address of the database #' #' @return A tibble which is the result of the query get_data_from_db <- function(query, params = NULL, db_name = default_db_name, host_name = default_host_name, host_password = default_host_password, host_ip = default_host_ip) { if(query == "") { stop("The query string cannot be empty") } conn <- tryCatch({ dbx::dbxConnect( adapter = "mysql", user = host_name, password = host_password, host = host_ip, port = default_db_port, dbname = db_name ) }, error = function(err) { kill_dbx_connections() dbx::dbxConnect( adapter = "mysql", user = host_name, password = host_password, host = host_ip, port = default_db_port, dbname = db_name ) }) result <- NULL if(!is.null(params)) { #adding support for '\' - extending dbx for(field in 1:length(params)) { if(typeof(params[[field]]) == 'character') { params[[field]] <- gsub("[\\]", "\\\\\\\\", params[[field]]) } } result <- suppressWarnings(dbx::dbxSelect(conn, query, params)) } else { result <- suppressWarnings(dbx::dbxSelect(conn, query)) } #To remove the unnecessary results that we get from executing a stored procedure while(RMySQL::dbMoreResults(conn)) { try(RMySQL::dbNextResult(conn)) } on.exit(dbx::dbxDisconnect(conn)) return(tibble::as_tibble(result)) }
3da4e0b0bb2e30c59a72ca86a7983c9efe756099
0f2d3dcaf08ef634b59e3d7d2a88ee41fea94596
/ycombinator-plot/code.R
a85c7aa2b36953b454ca12427350d59bfbe69442
[ "MIT" ]
permissive
kaustavSen/dataviz-makeover
80b944dcd7fd1aa4f88e44913b8e66c202dac95a
75eb709fee36945deeee686a193fb496d34f5061
refs/heads/main
2023-08-29T04:34:30.897398
2021-10-30T13:09:53
2021-10-30T13:09:53
367,793,854
0
0
null
null
null
null
UTF-8
R
false
false
1,210
r
code.R
library(tidyverse) library(ggthemes) library(ggtext) library(scales) data <- read_csv("ycombinator-plot/data.csv") subtitle <- "% of companies funded each year which have their main focus in the emerging markets of Africa, Latin America & Middle East" data %>% mutate(percent = parse_number(percent) / 100) %>% ggplot(aes(year, percent)) + geom_step(color = "#B40F20", size = 1.05) + scale_x_continuous(labels = number_format(accuracy = 1, big.mark = "")) + scale_y_continuous(labels = percent_format(accuracy = 1), limits = c(0, 0.3), expand = expansion(mult = 0), breaks = seq(0, 0.3, 0.1)) + coord_cartesian(clip = "off") + labs( title = "Y Combinator's <span style='color: #B40F20'>emerging</span> markets focus", subtitle = str_wrap(subtitle) ) + theme_fivethirtyeight(base_family = "Inter") + theme( plot.title.position = "plot", plot.title = element_markdown(family = "Poppins", hjust = 0.5, margin = margin(b = 5)), plot.subtitle = element_text(hjust = 0.5, lineheight = 1.2, color = "grey60", size = rel(0.9), margin = margin(b = 10)) ) ggsave("ycombinator-plot/plot.png", width = 8, height = 6)
754bd186f8b1090c0455dcd04ac2358d1eecf36c
0c3fa446b7aa8e5f48201e139fd7e19c351ae54d
/src/prepare_ecdc_data/R/corrections_02August.R
125c062335b564784bd1f219a987253b1caa9962
[]
no_license
mrc-ide/covid19-forecasts-orderly
477b621129ebc93dbd4e75ae4d54fbda0ad1669c
2210ea70dc7b38d91fc1678ab0c2bb9b83c5ddcb
refs/heads/main
2023-02-06T17:58:18.937033
2022-08-02T16:39:50
2022-08-02T16:39:50
254,094,782
33
12
null
2022-08-02T16:39:51
2020-04-08T13:25:08
R
UTF-8
R
false
false
2,272
r
corrections_02August.R
##################################################################### ###################################################################### ###################################################################### ###################################################################### ########### Corrections 02nd August ################################## ###################################################################### ###################################################################### ###################################################################### last_week <- seq(from = as.Date("2020-07-26"), to = as.Date("2020-08-02"), by = "1 day") who_Guatemala <- who[who$country == "Guatemala" & who$date_reported %in% last_week, ] ecdc_Guatemala <- raw_data[raw_data$`Countries.and.territories` == "Guatemala", ] ecdc_Guatemala <- ecdc_Guatemala[ecdc_Guatemala$DateRep %in% last_week, ] df <- dplyr::left_join(who_Guatemala, ecdc_Guatemala, by = c("date_reported" = "DateRep")) df <- dplyr::arrange(df, desc(date_reported)) raw_data$Cases[raw_data$`Countries.and.territories` == "Guatemala" & raw_data$DateRep %in% df$date_reported] <- df$new_cases raw_data$Deaths[raw_data$`Countries.and.territories` == "Guatemala" & raw_data$DateRep %in% df$date_reported] <- df$new_deaths raw_data$Deaths[raw_data$`Countries.and.territories` == "Kosovo" & raw_data$DateRep == "2020-08-01"] <- 15 raw_data$Deaths[raw_data$`Countries.and.territories` == "Kosovo" & raw_data$DateRep == "2020-08-02"] <- 10 raw_data$Deaths[raw_data$`Countries.and.territories` == "Saudi_Arabia" & raw_data$DateRep == "2020-07-31"] <- 26 raw_data$Deaths[raw_data$`Countries.and.territories` == "Saudi_Arabia" & raw_data$DateRep == "2020-08-01"] <- 24 raw_data$Deaths[raw_data$`Countries.and.territories` == "Saudi_Arabia" & raw_data$DateRep == "2020-08-02"] <- 21 raw_data$Deaths[raw_data$`Countries.and.territories` == "Sudan" & raw_data$DateRep == "2020-08-02"] <- 6 raw_data$Cases[raw_data$`Countries.and.territories` == "Sudan" & raw_data$DateRep == "2020-08-02"] <- 94 raw_data$Deaths[raw_data$`Countries.and.territories` == "Venezuela" & raw_data$DateRep == "2020-07-31"] <- 4 raw_data$Cases[raw_data$`Countries.and.territories` == "Venezuela" & raw_data$DateRep == "2020-07-30"] <- 3
17c805cb769d1a49900c584cb563f7faded7ff79
49e30fa0003f379dc1aa21396f5678927296e629
/Basket/code/model/add_none.R
4a87d5d577d87f97c71aba97329cbafee66a24cc
[]
no_license
xiaofeifei1800/Kaggle
8ce6e74b4bf7e379ae4c08afa984dfb49311148c
ece120bdc8fb37ac6f409548abb04c1d503332e5
refs/heads/master
2021-09-10T17:02:19.107623
2018-03-29T20:40:41
2018-03-29T20:40:41
63,451,543
0
0
null
null
null
null
UTF-8
R
false
false
705
r
add_none.R
test = fread("/Users/xiaofeifei/I/Kaggle/Basket/result1.csv") none = fread("/Users/xiaofeifei/I/Kaggle/Basket/None1.csv") none = none[-1,-3] none = none[none$V2 == "None"] colnames(none) = c("order_id","product_id") none$reordered = 1 test$reordered <- (test$reordered > 0.21) * 1 test = rbind(test, none) submission <- test %>% filter(reordered == 1) %>% group_by(order_id) %>% summarise( products = paste(product_id, collapse = " ") ) missing <- data.frame( order_id = unique(test$order_id[!test$order_id %in% submission$order_id]), products = "None" ) submission <- submission %>% bind_rows(missing) %>% arrange(order_id) fwrite(submission, file = "none_test.csv", row.names = F)
bc25203b162a7f524fa66d7269219dad188c2943
c96d12a58c06ae3d19cd48ef60fc2051ba73d7d8
/R/9_edge_bundling_alternative.R
61242fe7bc83ef0f2624a8586e6f275b6701fca5
[]
no_license
rafapereirabr/stockholm_access_travelbehavior
c5757398d532da66d8066caaedbe64e6ceeccc8f
a49c3d67dbbd2c728d34c422de91ec35d837fe50
refs/heads/main
2023-08-08T13:15:03.762770
2023-07-26T20:18:08
2023-07-26T20:18:08
380,514,032
0
0
null
null
null
null
UTF-8
R
false
false
1,266
r
9_edge_bundling_alternative.R
setDT(df_plot_income) df_plot_income[, count := 1] df_plot_income <- df_plot_income[order(group, period, income)] ddd <- df_plot_income[, .(count = sum(count)), by=.(group,x, y)] unique(ddd) df_line <- sfheaders::sf_linestring(obj = ddd, x = 'x', y= 'y', linestring_id = 'group', keep = T) head(df_line) library(stplanr) library(sf) m <- subset(df_line, income == 'Low') m <- stplanr::overline(sl = m, attrib = "count", fun = sum, ncores = 4, simplify = F) head(m) t <- ggplot(data = m, aes(color=count)) + geom_sf( alpha=.1) + coord_sf() # # ggplot(mm, aes(factor(base_area_orig), factor(base_area_dest), fill= count)) + # geom_tile() + # st_coordinates() # # scale_fill_viridis_c(option = 'E', direction = ) + # facet_wrap(.~income, nrow=1 ) + # theme_classic() + # theme(axis.line=element_blank(), # axis.text=element_blank(), # axis.ticks=element_blank(), # axis.title=element_blank(), # strip.background=element_rect(colour=NA, fill="gray95") ) ggsave(t, file= './figures/test_count_overLOW.png', dpi=300, width = 16, height = 8, units = 'cm')
19bf498b4f238572919d5d8e257517e347e6e6cb
885b9041aa386150e9b4afa077ba8c6c157858c5
/ui.R
beb3aee444eb8440f9f12c580cf1f169aba78739
[]
no_license
liujiashen9307/Hoitalent-small-project
2a5fff22bf030e2ef0ad98c1ea5299e782c34d23
8a204f991d842343088d6146de6dff5e5cc5ee16
refs/heads/master
2020-09-15T22:37:35.509443
2016-12-04T19:04:26
2016-12-04T19:04:26
73,515,169
0
0
null
null
null
null
UTF-8
R
false
false
6,310
r
ui.R
library(shiny) library(shinythemes) library(plotly) library(wordcloud) dt <- read.csv('HT.csv') bg <- read.csv('Background.csv')[c(2:16,19),1] bg<- as.character(bg) shinyUI(fluidPage(theme = shinytheme("sandstone"), titlePanel(h3(strong('Find the position you may be interested in'))), sidebarPanel( conditionalPanel('input.tab=="Dashboard"', h4("Description"), p(h5('By selecting two of your most matched backgrounds, you can check:')), p(h5('a. What extra backgrounds do positions you can apply need')), p(h5('b. Which company may be your potential employer?')), p(h5('c. Which language skills you better have for being more competitive ')), p(h5('d. How many years of experience and degree you better have')), p(h5('e. What are the key words in the job description?')), br(), h4('Choose two of your background'), selectInput('bg1','Background 1',choices = bg,selectize = TRUE), selectInput('bg2','Background 2',choices = bg,selected = bg[7],selectize = TRUE) ), conditionalPanel('input.tab=="Position Recommendation"', h4('Description'), p(h5('This part can be used as a simple search engine based on the content in job description')), br(), p('However, it is also a prototype of personalized recommender. If one gets the full data set of the browsing history of all users, he or she can use the similar model to make a recommendation to a specific user based on his or her background, education, language, experience and browsing history.'), br(), p('Type in the words and click "Find the Position"'), br(), h4('Key words of your dreamed job'), textInput('text',label = 'Use space to seperate different words. More words you provide, more accurate the algorithm will be. Default keywords are provided below already.',value ='Data Python Machine Learning',placeholder = 'Please type in the key words of your dream job, and we will match the most suitable job for you.',width = '600px'), actionButton("go", "Find the Position") ), conditionalPanel('input.tab=="About"', h4('Contact Author'), p(h5('Jiashen Liu')), p(h5('Graduate of Rotterdam School of Management, Erasmus Univerisity')), p( a(h5("LinkedIn"),href="https://nl.linkedin.com/in/jiashen-liu-4658aa112",target="_blank"), a(h5("Github"),href="https://github.com/liujiashen9307/",target="_blank"), a(h5("Kaggle"),href="https://www.kaggle.com/jiashenliu",target="_blank") ) ) ), mainPanel( tabsetPanel( id='tab', tabPanel('Dashboard', h4('A: Explore the extra background and required experience'), splitLayout(cellwidths=c("50%","50%"),plotlyOutput("plot1"),plotlyOutput("plot2")), h4('B: Explore the potential employer and location of company'), splitLayout(cellwidths=c("50%","50%"),plotlyOutput("plot3"),plotlyOutput("plot4")), h4('C: Explore required languages and degree requirement'), splitLayout(cellwidths=c("50%","50%"),plotlyOutput("plot5"),plotlyOutput("plot6")), h4("D: Briefly explore what terms are in Job Description"), plotOutput('plot7') ), tabPanel('Position Recommendation', h4('TOP 5 Positions picked by the words you typed in'), dataTableOutput('dt') ), tabPanel('About', h4("Overall"), p('Two parts of contents are included in this small project. The first one serves as the visualization of data set while the second part, which implements a KNN model, builds a small recommendation system based on the key words you type in',align='Justify'), h4('Data Source'), p("All data are from Hoitalent.com , the largest job portal for English Jobs in the Netherlands",align='Justify'), h4('Packages Used'), p("Python Packages: bs4, requests, pandas."), p("R Packages: Shiny, Shinythemes, plotly, wordcloud, FNN,tm, snowballC,sqldf,ggplot2, and RColorBrewer",align='Justify'), h4("Link of Code"), p('If you are interested, you can find both the web scraping script and Shiny Scripts in the below github link.',align='Justify'), a(h5("Click me for the Code"),href="https://github.com/liujiashen9307/Hoitalent-small-project",target="_blank") ) ) ) ))
93bfd9fbd7cf27fcab23076400815ff0c1a08242
117936196834fbda370de297d6f5a77846bf45e9
/manuscript_functions/metacom_sim_fx.R
5939fcb6ed63b33c5aebef29a08c201e580ed61d
[]
no_license
javirudolph/testingHMSC
a79dc2ffcdec967ed45d23e46151044d1365ab51
61c3e1b035b8095c45755833d2ab0ebc1179a6fb
refs/heads/master
2021-06-16T04:27:22.878177
2021-03-11T18:46:51
2021-03-11T18:46:51
170,368,566
4
2
null
null
null
null
UTF-8
R
false
false
7,355
r
metacom_sim_fx.R
# These are the functions needed for the processes regarding the metacommunity simulations: # Each individual function is explained in the Supplementary information # # Each of these functions describes the processes going on for the metacommunity. We use these functions later on in the main_sim_fx.R script to generate an occupancy matrix for species in patches ################################################################################################### # Colonization ################################################################################################### # Immigration ------------------------------------------------------------- #' @title Metacommunity process: immigration #' @description This function will calculate the propagule pressure. The effects of immigration are given as a weighted average of the occurrence probability of species i in neighborhood of z. #' @param Y matrix of species occurrence #' @param K connectivity matrix #' @param m independent constant to account for immigration from outside the simulated metacommunity #' @keywords immigration metacommunity #' I_f <- function(Y = "Species occurrence matrix", K = "Patch connectivity matrix", m = "outside immigration"){ N <- nrow(Y) R <- ncol(Y) I <- (1-m) * (K %*% Y) / (K %*% matrix(1, nrow = N, ncol = R)) + m return(I) } # Patch Connectivity ------------------------------------------------------ #' @title Metacommunity process: patch connectivity #' @description This function will calculate the connectivity matrix for a given set of patches and a dispersal parameter #' @param XY corresponds to the patch coordinates #' @param alpha dispersal parameter associated to an exponential distribution used for dispersal #' @keywords patch get_K <- function(XY, alpha){ N <- nrow(XY) distMat <- as.matrix(dist(XY, method = "euclidean", upper = T, diag = T)) ConMat <- exp((-1/alpha)*distMat) diag(ConMat) <- 0 return(ConMat) } # Environment ------------------------------------------------------------- # The effect of the environment on the local performance and colonization #' @title Metacommunity process: Environmental filtering with gaussian response #' @description The effect of the environment on the local performance and colonization. Probability of establishing a viable local population given environmental conditions in the patch. #' @param E matrix of environmental covariates for each patch #' @param u_c considered as the niche optima for each species, for each environmental variable. #' @param s_c understood as niche breadth for each species for each environmental variable. #' @keywords environmental filtering gaussian response S_f_gaussian <- function(E, u_c, s_c) { R <- ncol(u_c) N <- nrow(E) D <- ncol(E) S <- matrix(1, nr = N, nc = R) for(i in 1:D){ optima <- matrix(u_c[i,],nrow = N,ncol = R,byrow = TRUE) breadth <- matrix(s_c[i,],nrow = N,ncol = R,byrow = TRUE)^2 S <- S*exp(-(E[,i]-optima)^2 / breadth) } return(S) } # This function is used for a quadratic response to the environment #' @title Metacommunity process: Environmental filtering with quadratic response #' @description The effect of the environment on the local performance and colonization. Probability of establishing a viable local population given environmental conditions in the patch. #' @param E matrix of environmental covariates for each patch #' @param u_c considered as the niche optima for each species, for each environmental variable. #' @param s_c understood as niche breadth for each species for each environmental variable. #' @keywords environmental filtering quadratic response #' S_f_quadratic <- function(E, u_c, s_c) { R <- ncol(u_c) N <- nrow(E) D <- ncol(E) S <- matrix(1, nr = N, nc = R) for(i in 1:D){ optima <- matrix(u_c[i,],nrow = N,ncol = R,byrow = TRUE) breadth <- matrix(s_c[i,],nrow = N,ncol = R,byrow = TRUE) S <- S * ((-1 / (breadth/2)^2) * (E[,i] - optima)^2 + 1) S <- ifelse(S < 0, 0 , S) } return(S) } # Interactions ------------------------------------------------------------ #' @title Metacommunity process: ecological interactions #' @description Sum of ecological interactions. Computes the sum of ecological interactions for every location and every species #' @param A species interaction matrix #' @param Y species occurrence matrix #' @keywords ecological interactions #' sum_interactions <- function(A = "Species interactions matrix", Y = "Species occurrence matrix"){ t(A %*% t(Y)) } # Then the effect of interactions on colonization #' @title Metacommunity process: colonization #' @description The total effect of ecological interactions on the colonization probabilty #' @param v sum of interactions #' @param d_c sensitivity to interactions #' @param c_0 colonization at zero interactions #' @param c_max colonization at max interactions #' C_f <- function(v = "Sum interactions", d_c = "Sensitivity to interactions", c_0 = "Colonization at zero interactions", c_max = "Colonization at max interactions"){ c_max * (1 + ((1 / c_0) - 1) * exp(-v * d_c))^-1 # Is this the same as: # c_max / (1 + ((1 / c_0) - 1) * exp(-v * d_c)) } ################################################################################################### # Extinction ################################################################################################### # Environment Extinction -------------------------------------------------- #' @title Metacomminbut process: extinction #' @description Responses of the extinction probability to the local environment. Extinction rate should be minimal at environmental optimum #' @param E Matrix of environmental covariates at each patch #' @param u_e extinction rate at environmental optimum #' @param s_e extintion rate at minimum #' M_f <- function(E = "Environmental variables", u_e = "extinction rate at optimum?", s_e = "extinction rate at optimum?") { R <- ncol(u_e) N <- nrow(E) D <- ncol(E) M <- matrix(1, nrow = N, ncol = R) for(i in 1:D){ optima <- matrix(u_e[i,],nrow = N,ncol = R,byrow = TRUE) minima <- matrix(s_e[i,],nrow = N,ncol = R,byrow = TRUE)^2 M <- M*(1 - exp(-(E[,i] - optima)^2 / minima)) } return(M) } # Interaction Extinction -------------------------------------------------- #' @title Metacommunity process: interactions and extinction #' @description Effect of ecological interactions on species extinction at the patch level. In this case, extinction probability must be larger when interactions are negative and smaller when the interactions are positive. #' @param v sum of ecological interactions #' @param d_e sensitivity to interactions #' @param e_0 extinction at zero interactions #' @param e_min extinction at max interactions #' E_f <- function(v = "Sum of interactions", d_e = "Sensitivity to interactions", e_0 = "Extinction at zero interactions", e_min = "Extinction at max intreractions?") { N <- nrow(v) R <- ncol(v) e_min_mat <- matrix(e_min, nrow = N, ncol = R, byrow=TRUE) e_min_mat + (1 / (1-e_min_mat) + (1/(e_0-e_min_mat) - 1 / (1 - e_min_mat)) * exp(d_e * v))^-1 }
663346b03d4a4a618faeadbb82fda2d9e30844f5
4b4294254cab67348af2f98f73cc6f5341a2b3bb
/Milestone_Report/milestone_report.R
ca6f4931c9a905ae0419b5323fb9213af22a2390
[]
no_license
jtzingsheim1/Text-Prediction-Model-and-App
f9bddd406353379be7dc12ae4dbe0a55cf8a67ef
b70a14758c6777471acd68c12fd01b5a349c1770
refs/heads/master
2020-06-14T07:19:08.277047
2019-08-01T22:50:14
2019-08-01T22:50:14
194,945,608
0
0
null
null
null
null
UTF-8
R
false
false
8,075
r
milestone_report.R
# Coursera Data Science Specialization Capstone Project 1 Script---------------- # Milestone report for predictive text project # The purpose of this script is to complete the basic requirements behind the # first capstone project from Johns Hopkins University within the Data Science # Specialization on Coursera. # # The review criteria for this part of the project are: # - Does the link lead to an HTML page describing the exploratory analysis of # the training data set? # - Has the data scientist done basic summaries of the three files? Word counts, # line counts and basic data tables? # - Has the data scientist made basic plots, such as histograms to illustrate # features of the data? # - Was the report written in a brief, concise style, in a way that a non-data # scientist manager could appreciate? # # This project will begin to meet the objectives by building a script to work # with the data. This script will serve as the basis for an R Markdown file # which will create the actual deliverable of the html report. library(tidyverse) library(quanteda) # Part 0) Function definitions-------------------------------------------------- DLAndUnzipData <- function(data.filename = "Coursera-SwiftKey.zip") { # Downloads and unzips the capstone dataset if needed, returns folder name # # Args: # data.filename: An optional name for the zip file, to replace the default # # Returns: # A chacacter value of the name of the folder containing the data # Check if the file already exists, download if it does not if (!file.exists(data.filename)) { message("Downloading Data File") url <- paste0("https://d396qusza40orc.cloudfront.net/dsscapstone/dataset/", "Coursera-SwiftKey.zip") download.file(url, data.filename) } # Check if the file is already unzipped, unzip if needed data.folder <- "final" if (!file.exists(data.folder)) { message("Unzipping Data File") unzip(data.filename) } return(data.folder) # Return directory of unzipped file contents as character } AssembleCorpus <- function(n.lines, file.selection = c("blogs", "news", "twitter"), sub.dir = c("en_US", "de_DE", "fi_FI", "ru_RU")) { # Reads in specified number of lines from the specified file, assembles corpus # # Args: # n.lines: The number of lines to read in from the text with readLines() # file: Select which file to read from, one of: blogs, news, or twitter # sub.dir: The subdirectory to read in files from, "en_US" by default # # Returns: # A corpus of the text from the selected file, one "text" per line # Check and set arguments file.selection <- match.arg(file.selection) sub.dir <- match.arg(sub.dir) # Download and unzip the data, store folder name and file path filename <- paste(sub.dir, file.selection, "txt", sep = ".") # Build file name filepath <- file.path(DLAndUnzipData(), sub.dir, filename) # Build file path file.corpus <- filepath %>% readLines(n = n.lines) %>% # Read in text corpus() # Convert to corpus # Set metadata for the corpus docnames(file.corpus) <- paste0(file.selection, 1:ndoc(file.corpus)) file.corpus$metadata$source <- filename file.corpus$metadata$file.size <- file.info(filepath)$size file.corpus$metadata$rows.read <- ndoc(file.corpus) # Return the corpus return(file.corpus) } AssembleSummary <- function(corpus.object) { # Assembles a data frame from the metadata of a corpus # # Args: # corpus.object: The corpus from which to extract the metadata # # Returns: # A data frame of the metadata # Extract metadata from corpus and convert to data frame corpus.metadata <- corpus.object %>% metacorpus() %>% as.data.frame(stringsAsFactors = FALSE) # Return the metadata as a data frame return(corpus.metadata) } # Part 1) Load the data, create tokens, and build summary table----------------- # Set how many lines to read in from the text files chunk.size <- 1000 # Read in text data and assemble into corpora blogs.corp <- AssembleCorpus(n.lines = chunk.size, file.selection = "blogs") news.corp <- AssembleCorpus(n.lines = chunk.size, file.selection = "news") twitter.corp <- AssembleCorpus(n.lines = chunk.size, file.selection = "twitter") # Tokenize and clean text # The predictive model will not attempt to predict: numbers, punctuation, # symbols, twitter handles, hyphens, or urls, so these are all removed blogs.tkn <- tokens(blogs.corp, remove_numbers = T, remove_punct = T, remove_symbols = T, remove_twitter = T, remove_hyphens = T, remove_url = T) news.tkn <- tokens(news.corp, remove_numbers = T, remove_punct = T, remove_symbols = T, remove_twitter = T, remove_hyphens = T, remove_url = T) twitter.tkn <- tokens(twitter.corp, remove_numbers = T, remove_punct = T, remove_symbols = T, remove_twitter = T, remove_hyphens = T, remove_url = T) # Count the number of words in each token object and add to the corpus metadata blogs.corp$metadata$word.count <- sum(ntoken(blogs.tkn)) news.corp$metadata$word.count <- sum(ntoken(news.tkn)) twitter.corp$metadata$word.count <- sum(ntoken(twitter.tkn)) # Construct a table that summarizes the corpora summary.table <- bind_rows(AssembleSummary(blogs.corp), AssembleSummary(news.corp), AssembleSummary(twitter.corp)) summary.table <- summary.table %>% select(-created) %>% mutate(file.size = round(file.size / (1024 ^ 2), 1)) # Part 2) Combine texts, explore unigram---------------------------------------- # Remove unneeded objects to free up memory rm(DLAndUnzipData, AssembleCorpus, AssembleSummary, chunk.size, blogs.corp, news.corp, twitter.corp, summary.table) # Combine tokens into a single object all.tkn <- blogs.tkn + news.tkn + twitter.tkn rm(blogs.tkn, news.tkn, twitter.tkn) # Build dfm of unigrams and convert to dataframe unigram <- all.tkn %>% dfm() %>% textstat_frequency() # Plot unigram frequency by index plot(x = 1:nrow(unigram), y = unigram$frequency, xlab = "Word Index (sorted)", ylab = "Word Frequency [count]") # Display the top ten unigrams and frequencies unigram %>% select(feature:docfreq) %>% slice(1:10) %>% print() # Check how many words are needed to cover 50 and 90 percent of occurances occurances <- sum(unigram$frequency) # Total count of word occurances # Create a table that includes the cumulative frequencies and fraction frequencies <- unigram %>% mutate(cum.freq = cumsum(frequency), cum.frac = cum.freq / occurances) %>% select(cum.frac) # Find index of 50th percentile frequencies %>% filter(cum.frac <= 0.5) %>% nrow() %>% print() # Find index of 90th percentile frequencies %>% filter(cum.frac <= 0.9) %>% nrow() %>% print() # Plot cumulative occurance fraction by word index plot(x = 1:nrow(frequencies), y = frequencies$cum.frac, xlab = "Word Index (sorted)", ylab = "Cumulative Occurance Fraction") rm(occurances, frequencies) rm(unigram) # Part 3) Explore bigram and trigram-------------------------------------------- # Convert the 1-gram tokens to 2-gram tokens all.tkn.bi <- tokens_ngrams(all.tkn, n = 2) # Build dfm of bigrams and convert to dataframe bigram <- all.tkn.bi %>% dfm() %>% textstat_frequency() rm(all.tkn.bi) # Check total number of bigrams bigram %>% nrow() %>% print() # Display the top ten bigrams and frequencies bigram %>% select(feature:docfreq) %>% slice(1:10) %>% print() rm(bigram) # Convert the 1-gram tokens to 3-gram tokens all.tkn.tri <- tokens_ngrams(all.tkn, n = 3) rm(all.tkn) # Build dfm of trigrams and convert to dataframe trigram <- all.tkn.tri %>% dfm() %>% textstat_frequency() rm(all.tkn.tri) # Check total number of trigrams trigram %>% nrow() %>% print() # Display the top ten trigrams and frequencies trigram %>% select(feature:docfreq) %>% slice(1:10) %>% print() rm(trigram)
b62edbe782fd3147cd7124749f960f02ffbe802a
bd1609c4c212dfe7169ed071dab297d6342e1dcf
/examples/createAndUpdateCoordinates.R
d4e9ab366ff22cb988ff25f7d78fd537c852f9ec
[]
no_license
restevesd/RtweetsDb
86a5871c13683abf7ec5a4bc0dc3053f7c94b658
efe25474a4cdb0c020fa03ae5c956a576ec74498
refs/heads/master
2021-01-26T07:20:38.778298
2015-07-06T12:09:54
2015-07-06T12:09:54
null
0
0
null
null
null
null
UTF-8
R
false
false
451
r
createAndUpdateCoordinates.R
## Working directory for this script should be the root of the project ## If this file is in working directory, you can run command ## setwd('..') source('RtweetsDb.R') createCoordinatesModel() lookupAndAddCoordinates(c("Barcelona", "Zaragoza")) lookupAndAddCoordinates(c("Barcelona", "Antofagasta")) lookupAndAddCoordinates(c("Barcelona", "Antofagasta", "Warszawa", "Denton", "Coventry")) lookupAndAddCoordinates(c("Barcelona", "Bialystok"))
eb6b08d6e947e347c0c09252724fd0e31820a660
0f13169c31b56ae9769850f9d909eed4909b6874
/tests/testthat/test-components.R
4c452865e7bb93ad8f28d441f7c0f3d4ec1ae5c3
[]
no_license
hathawayj/osfr
d2a5f962839954ca86618f9ae5ee040f74ae3492
e01f61c90ecc3a44cfb2f3feb2e47a07f87fd082
refs/heads/master
2020-03-06T19:23:14.243628
2018-05-03T16:04:05
2018-05-03T16:04:05
127,026,594
0
1
null
2018-03-27T18:03:41
2018-03-27T18:03:34
R
UTF-8
R
false
false
671
r
test-components.R
context("component operations") test_that("create, update, delete", { x <- create_project('tmp') expect_error(create_component()) expect_error(create_component(x)) expect_is(create_component(x, title = 'Test'), 'character') expect_is(create_component(x, title = 'Test', description = 'Text'), 'character') expect_is(create_component(x, title = 'Test', description = 'Text', private = FALSE), 'character') y <- create_component(x, title = 'Test') expect_error(update_component()) expect_true(update_component(y, private = TRUE)) expect_true(update_component(y, private = FALSE)) expect_error(delete_component()) expect_true(delete_component(y)) })
cc9f6fb9f10604e919f01aaf0f6f24f7f7dc4c44
1f0fac7707f85b34bea60416d27d84cfbfad9169
/man/ruch_gracz_anty_yolo_zakup_malego_psa.Rd
9924d5213df3164a9ae91c11424f73e0fc1cec40
[]
no_license
nkneblewska/SuperFarmer
65dd1e44e15d6e38c2407c790dc62fdf2b3c7619
3b65fa7a6fa457c6cd4f288b53ea4fe5f5d2e959
refs/heads/master
2020-06-19T15:50:57.439890
2016-12-07T07:49:34
2016-12-07T07:49:34
74,902,062
0
0
null
null
null
null
WINDOWS-1250
R
false
true
473
rd
ruch_gracz_anty_yolo_zakup_malego_psa.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/ruch_gracz_anty_yolo_zakup_malego_psa.R \name{ruch_gracz_anty_yolo_zakup_malego_psa} \alias{ruch_gracz_anty_yolo_zakup_malego_psa} \title{Zakup małego psa} \usage{ ruch_gracz_anty_yolo_zakup_malego_psa(stan_gracza) } \arguments{ \item{stan_gracza}{Wektor będący stanem stada gracza} } \description{ Funkcja pomocnicza strategii ANTY-YOLO, kupująca małego psa obronnego, jeśli nas stać }
7532a656e60292fdb89dbeea519188d6a48d9f39
20ba172691d30cb84d60c374053d5d4a546d48d5
/man/emp_vario.Rd
3d5a97d4b7720b63a0cff3e4fb01a9d0a07237f8
[]
no_license
sebastian-engelke/graphicalExtremes
b597b20c8a72a09e8cccd9876eb1e2b0cba3d851
4ad26c9f64e0f8ff8e4a96830c36be41c0330574
refs/heads/master
2023-09-01T15:52:11.310214
2023-04-03T10:05:06
2023-04-03T10:05:06
219,372,705
15
6
null
2022-09-16T14:03:03
2019-11-03T22:13:08
R
UTF-8
R
false
true
1,392
rd
emp_vario.Rd
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/estimation_param.R \name{emp_vario} \alias{emp_vario} \alias{emp_vario_pairwise} \title{Estimation of the variogram matrix \eGamma of a Huesler--Reiss distribution} \usage{ emp_vario(data, k = NULL, p = NULL) emp_vario_pairwise(data, k = NULL, p = NULL, verbose = FALSE) } \arguments{ \item{data}{Numeric \nxd matrix, where \code{n} is the number of observations and \code{d} is the dimension.} \item{k}{Integer between 1 and \code{d}. Component of the multivariate observations that is conditioned to be larger than the threshold \code{p}. If \code{NULL} (default), then an average over all \code{k} is returned.} \item{p}{Numeric between 0 and 1 or \code{NULL}. If \code{NULL} (default), it is assumed that the \code{data} are already on multivariate Pareto scale. Else, \code{p} is used as the probability in the function \code{\link[=data2mpareto]{data2mpareto()}} to standardize the \code{data}.} \item{verbose}{Print verbose progress information} } \value{ Numeric \dxd matrix. The estimated variogram of the Huesler--Reiss distribution. } \description{ Estimates the variogram of the Huesler--Reiss distribution empirically. } \details{ \code{emp_vario_pairwise} calls \code{emp_vario} for each pair of observations. This is more robust if the data contains many \code{NA}s, but can take rather long. }